summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlmuHS <almuhs@github.com>2019-04-30 00:20:29 +0200
committerAlmuHS <almuhs@github.com>2019-04-30 00:20:29 +0200
commit26a0568f33df5bb772e7260470b1a56494562c3b (patch)
treedad3afbb6b6bb30a68f037b67496416dc008ebba
parent710f8603aa44a919320229e3fad7b6138a85ac9e (diff)
start implementation of cpustack array
-rw-r--r--i386/i386/cpuboot.S1
-rw-r--r--i386/i386/mp_desc.c499
-rw-r--r--i386/i386/vm_param.h3
3 files changed, 272 insertions, 231 deletions
diff --git a/i386/i386/cpuboot.S b/i386/i386/cpuboot.S
index 4809fb57..428277fa 100644
--- a/i386/i386/cpuboot.S
+++ b/i386/i386/cpuboot.S
@@ -98,7 +98,6 @@ _apboot:
pushl stack_ptr
call cpu_ap_main
- .comm _intstack,INTSTACK_SIZE
cli
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index 7c7196ed..8ba01a5f 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -76,6 +76,18 @@ char intstack[]; /* bottom */
char eintstack[]; /* top */
+/*
+ * Addresses of bottom and top of cpu main stacks.
+ */
+vm_offset_t cpu_stack[NCPUS];
+vm_offset_t _cpu_stack_top[NCPUS];
+
+/*
+ * Barrier address.
+ */
+vm_offset_t cpu_stack_high;
+
+
static struct kmutex mp_cpu_boot_lock;
/*
@@ -111,6 +123,7 @@ extern struct real_descriptor ldt[LDTSZ];
/*
* Address of cpu start routine, to skip to protected mode after startup IPI
+ * TODO: Reserve physical page to this
*/
extern void* *apboot, *apbootend;
#define AP_BOOT_ADDR (0x7000)
@@ -149,76 +162,80 @@ extern int lapic_addr;
struct mp_desc_table *
mp_desc_init(int mycpu)
{
- struct mp_desc_table *mpt;
-
- if (mycpu == master_cpu) {
- /*
- * Master CPU uses the tables built at boot time.
- * Just set the TSS and GDT pointers.
- */
- mp_ktss[mycpu] = (struct task_tss *) &ktss;
- mp_gdt[mycpu] = gdt;
- return 0;
- }
- else {
- /*
- * Other CPUs allocate the table from the bottom of
- * the interrupt stack.
- */
- mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
-
- mp_desc_table[mycpu] = mpt;
- mp_ktss[mycpu] = &mpt->ktss;
- mp_gdt[mycpu] = mpt->gdt;
-
- /*
- * Copy the tables
- */
- memcpy(mpt->idt,
- idt,
- sizeof(idt));
- memcpy(mpt->gdt,
- gdt,
- sizeof(gdt));
- memcpy(mpt->ldt,
- ldt,
- sizeof(ldt));
- memset(&mpt->ktss, 0,
- sizeof(struct task_tss));
-
- /*
- * Fix up the entries in the GDT to point to
- * this LDT and this TSS.
- */
+ struct mp_desc_table *mpt;
+
+ if (mycpu == master_cpu)
+ {
+ /*
+ * Master CPU uses the tables built at boot time.
+ * Just set the TSS and GDT pointers.
+ */
+ mp_ktss[mycpu] = (struct task_tss *) &ktss;
+ mp_gdt[mycpu] = gdt;
+ return 0;
+ }
+ else
+ {
+ /*
+ * Other CPUs allocate the table from the bottom of
+ * the interrupt stack.
+ */
+ mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
+
+ mp_desc_table[mycpu] = mpt;
+ mp_ktss[mycpu] = &mpt->ktss;
+ mp_gdt[mycpu] = mpt->gdt;
+
+ /*
+ * Copy the tables
+ */
+ memcpy(mpt->idt,
+ idt,
+ sizeof(idt));
+ memcpy(mpt->gdt,
+ gdt,
+ sizeof(gdt));
+ memcpy(mpt->ldt,
+ ldt,
+ sizeof(ldt));
+ memset(&mpt->ktss, 0,
+ sizeof(struct task_tss));
+
+ /*
+ * Fix up the entries in the GDT to point to
+ * this LDT and this TSS.
+ */
#ifdef MACH_RING1
- panic("TODO %s:%d\n",__FILE__,__LINE__);
+ panic("TODO %s:%d\n",__FILE__,__LINE__);
#else /* MACH_RING1 */
- fill_descriptor(&mpt->gdt[sel_idx(KERNEL_LDT)],
- (unsigned)&mpt->ldt,
- LDTSZ * sizeof(struct real_descriptor) - 1,
- ACC_P|ACC_PL_K|ACC_LDT, 0);
- fill_descriptor(&mpt->gdt[sel_idx(KERNEL_TSS)],
- (unsigned)&mpt->ktss,
- sizeof(struct task_tss) - 1,
- ACC_P|ACC_PL_K|ACC_TSS, 0);
-
- mpt->ktss.tss.ss0 = KERNEL_DS;
- mpt->ktss.tss.io_bit_map_offset = IOPB_INVAL;
- mpt->ktss.barrier = 0xFF;
+ fill_descriptor(&mpt->gdt[sel_idx(KERNEL_LDT)],
+ (unsigned)&mpt->ldt,
+ LDTSZ * sizeof(struct real_descriptor) - 1,
+ ACC_P|ACC_PL_K|ACC_LDT, 0);
+ fill_descriptor(&mpt->gdt[sel_idx(KERNEL_TSS)],
+ (unsigned)&mpt->ktss,
+ sizeof(struct task_tss) - 1,
+ ACC_P|ACC_PL_K|ACC_TSS, 0);
+
+ mpt->ktss.tss.ss0 = KERNEL_DS;
+ mpt->ktss.tss.io_bit_map_offset = IOPB_INVAL;
+ mpt->ktss.barrier = 0xFF;
#endif /* MACH_RING1 */
- return mpt;
- }
+ return mpt;
+ }
}
-static void send_ipi(unsigned icr_h, unsigned icr_l){
+static void send_ipi(unsigned icr_h, unsigned icr_l)
+{
lapic->icr_high.r = icr_h;
lapic->icr_low.r = icr_l;
}
/*TODO: Add delay between IPI*/
-void startup_cpu(uint32_t apic_id){
+void startup_cpu(uint32_t apic_id)
+{
unsigned icr_h = 0;
unsigned icr_l = 0;
@@ -234,7 +251,8 @@ void startup_cpu(uint32_t apic_id){
while( ( (lapic->icr_low.r >> 12) & 1) == SEND_PENDING);
//Send INIT De-Assert IPI
- icr_h = 0; icr_l = 0;
+ icr_h = 0;
+ icr_l = 0;
icr_h = (apic_id << 24);
icr_l = (INIT << 8) | (DE_ASSERT << 14) | (LEVEL << 15);
send_ipi(icr_h, icr_l);
@@ -246,7 +264,8 @@ void startup_cpu(uint32_t apic_id){
while( ( (lapic->icr_low.r >> 12) & 1) == SEND_PENDING);
//Send StartUp IPI
- icr_h = 0; icr_l = 0;
+ icr_h = 0;
+ icr_l = 0;
icr_h = (apic_id << 24);
icr_l = (STARTUP << 8) | ((AP_BOOT_ADDR >>12) & 0xff);
send_ipi(icr_h, icr_l);
@@ -258,7 +277,8 @@ void startup_cpu(uint32_t apic_id){
while( ( (lapic->icr_low.r >> 12) & 1) == SEND_PENDING);
//Send second StartUp IPI
- icr_h = 0; icr_l = 0;
+ icr_h = 0;
+ icr_l = 0;
icr_h = (apic_id << 24);
icr_l = (STARTUP << 8) | ((AP_BOOT_ADDR >>12) & 0xff);
send_ipi(icr_h, icr_l);
@@ -272,72 +292,75 @@ void startup_cpu(uint32_t apic_id){
}
int
-cpu_setup(){
+cpu_setup()
+{
- int i = 1;
- while(i < ncpu && (machine_slot[i].running == TRUE)) i++;
+ int i = 1;
+ while(i < ncpu && (machine_slot[i].running == TRUE)) i++;
- unsigned apic_id = (((ApicLocalUnit*)phystokv(lapic_addr))->apic_id.r >> 24) & 0xff;
+ unsigned apic_id = (((ApicLocalUnit*)phystokv(lapic_addr))->apic_id.r >> 24) & 0xff;
- /* panic? */
- if(i >= ncpu)
- return -1;
+ /* panic? */
+ if(i >= ncpu)
+ return -1;
- /*TODO: Move this code to a separate function*/
+ /*TODO: Move this code to a separate function*/
- /* assume Pentium 4, Xeon, or later processors */
+ /* assume Pentium 4, Xeon, or later processors */
- /* Update apic2kernel and machine_slot with the newest apic_id */
- if(apic2kernel[machine_slot[i].apic_id] == i){
- apic2kernel[machine_slot[i].apic_id] = -1;
- }
+ /* Update apic2kernel and machine_slot with the newest apic_id */
+ if(apic2kernel[machine_slot[i].apic_id] == i)
+ {
+ apic2kernel[machine_slot[i].apic_id] = -1;
+ }
- apic2kernel[apic_id] = i;
- machine_slot[i].apic_id = apic_id;
+ apic2kernel[apic_id] = i;
+ machine_slot[i].apic_id = apic_id;
- /* Initialize machine_slot fields with the cpu data */
- machine_slot[i].running = TRUE;
- machine_slot[i].cpu_subtype = CPU_SUBTYPE_AT386;
+ /* Initialize machine_slot fields with the cpu data */
+ machine_slot[i].running = TRUE;
+ machine_slot[i].cpu_subtype = CPU_SUBTYPE_AT386;
- int cpu_type = discover_x86_cpu_type ();
+ int cpu_type = discover_x86_cpu_type ();
- switch (cpu_type)
- {
- default:
- printf("warning: unknown cpu type %d, assuming i386\n", cpu_type);
+ switch (cpu_type)
+ {
+ default:
+ printf("warning: unknown cpu type %d, assuming i386\n", cpu_type);
- case 3:
- machine_slot[i].cpu_type = CPU_TYPE_I386;
- break;
+ case 3:
+ machine_slot[i].cpu_type = CPU_TYPE_I386;
+ break;
- case 4:
- machine_slot[i].cpu_type = CPU_TYPE_I486;
- break;
+ case 4:
+ machine_slot[i].cpu_type = CPU_TYPE_I486;
+ break;
- case 5:
- machine_slot[i].cpu_type = CPU_TYPE_PENTIUM;
- break;
- case 6:
- case 15:
- machine_slot[i].cpu_type = CPU_TYPE_PENTIUMPRO;
- break;
- }
+ case 5:
+ machine_slot[i].cpu_type = CPU_TYPE_PENTIUM;
+ break;
+ case 6:
+ case 15:
+ machine_slot[i].cpu_type = CPU_TYPE_PENTIUMPRO;
+ break;
+ }
- //slave_main(i);
+ //slave_main(i);
- printf("launched first thread of cpu %d\n", i);
+ printf("launched first thread of cpu %d\n", i);
- //printf("cpu %d enabled\n", cpu_number());
+ //printf("cpu %d enabled\n", cpu_number());
- return 0;
+ return 0;
}
int
-cpu_ap_main(){
+cpu_ap_main()
+{
- if(cpu_setup())
+ if(cpu_setup())
goto idle;
idle:
@@ -348,61 +371,66 @@ idle:
/*TODO: Reimplement function to send Startup IPI to cpu*/
kern_return_t intel_startCPU(int slot_num)
{
- /*TODO: Get local APIC from cpu*/
- int lapic_id = machine_slot[slot_num].apic_id;
- unsigned long eFlagsRegister;
-
- kmutex_init(&mp_cpu_boot_lock);
- printf("Trying to enable: %d\n", lapic_id);
-
-
- //assert(lapic != -1);
-
- /*
- * Initialize (or re-initialize) the descriptor tables for this cpu.
- * Propagate processor mode to slave.
- */
- /*cpu_desc_init64(cpu_datap(slot_num));*/
- mp_desc_init(slot_num);
-
- /* Serialize use of the slave boot stack, etc. */
- kmutex_lock(&mp_cpu_boot_lock, FALSE);
-
- /*istate = ml_set_interrupts_enabled(FALSE);*/
- cpu_intr_save(&eFlagsRegister);
- if (slot_num == cpu_number()) {
- /*ml_set_interrupts_enabled(istate);*/
- cpu_intr_restore(eFlagsRegister);
- /*lck_mtx_unlock(&mp_cpu_boot_lock);*/
- kmutex_unlock(&mp_cpu_boot_lock);
- return KERN_SUCCESS;
- }
-
- /*
- * Perform the processor startup sequence with all running
- * processors rendezvous'ed. This is required during periods when
- * the cache-disable bit is set for MTRR/PAT initialization.
- */
- /*mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);*/
- startup_cpu(lapic_id);
- //cpu_up(slot_num);
-
- /*ml_set_interrupts_enabled(istate);*/
- cpu_intr_restore(eFlagsRegister);
- /*lck_mtx_unlock(&mp_cpu_boot_lock);*/
- kmutex_unlock(&mp_cpu_boot_lock);
-
- delay(1000000);
-
- /*if (!cpu_datap(slot_num)->cpu_running) {*/
- if(!machine_slot[slot_num].running){
- printf("Failed to start CPU %02d, rebooting...\n", slot_num);
- halt_cpu();
- return KERN_SUCCESS;
- } else {
- printf("Started cpu %d (lapic id %08x)\n", slot_num, lapic_id);
- return KERN_SUCCESS;
- }
+ /*TODO: Get local APIC from cpu*/
+ int lapic_id = machine_slot[slot_num].apic_id;
+ unsigned long eFlagsRegister;
+
+ kmutex_init(&mp_cpu_boot_lock);
+ printf("Trying to enable: %d\n", lapic_id);
+
+
+ //assert(lapic != -1);
+
+ /* Serialize use of the slave boot stack, etc. */
+ kmutex_lock(&mp_cpu_boot_lock, FALSE);
+
+ /*istate = ml_set_interrupts_enabled(FALSE);*/
+ cpu_intr_save(&eFlagsRegister);
+ if (slot_num == cpu_number())
+ {
+ /*ml_set_interrupts_enabled(istate);*/
+ cpu_intr_restore(eFlagsRegister);
+ /*lck_mtx_unlock(&mp_cpu_boot_lock);*/
+ kmutex_unlock(&mp_cpu_boot_lock);
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Perform the processor startup sequence with all running
+ * processors rendezvous'ed. This is required during periods when
+ * the cache-disable bit is set for MTRR/PAT initialization.
+ */
+ /*mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);*/
+ startup_cpu(lapic_id);
+ //cpu_up(slot_num);
+
+
+ /*
+ * Initialize (or re-initialize) the descriptor tables for this cpu.
+ * Propagate processor mode to slave.
+ */
+ /*cpu_desc_init64(cpu_datap(slot_num));*/
+ mp_desc_init(slot_num);
+
+ /*ml_set_interrupts_enabled(istate);*/
+ cpu_intr_restore(eFlagsRegister);
+ /*lck_mtx_unlock(&mp_cpu_boot_lock);*/
+ kmutex_unlock(&mp_cpu_boot_lock);
+
+ delay(1000000);
+
+ /*if (!cpu_datap(slot_num)->cpu_running) {*/
+ if(!machine_slot[slot_num].running)
+ {
+ printf("Failed to start CPU %02d, rebooting...\n", slot_num);
+ halt_cpu();
+ return KERN_SUCCESS;
+ }
+ else
+ {
+ printf("Started cpu %d (lapic id %08x)\n", slot_num, lapic_id);
+ return KERN_SUCCESS;
+ }
}
/*
@@ -412,39 +440,42 @@ kern_return_t intel_startCPU(int slot_num)
void
interrupt_stack_alloc(void)
{
- int i;
- vm_offset_t stack_start;
-
-
- /*
- * Allocate an interrupt stack for each CPU except for
- * the master CPU (which uses the bootstrap stack)
- */
- if (!init_alloc_aligned(INTSTACK_SIZE*(ncpu-1), &stack_start))
- panic("not enough memory for interrupt stacks");
- stack_start = phystokv(stack_start);
-
- /*
- * Set up pointers to the top of the interrupt stack.
- */
- for (i = 0; i < ncpu; i++) {
- if (i == master_cpu) {
- interrupt_stack[i] = (vm_offset_t) intstack;
- _int_stack_top[i] = (vm_offset_t) eintstack;
- }
- else if (machine_slot[i].is_cpu) {
- interrupt_stack[i] = stack_start;
- _int_stack_top[i] = stack_start + INTSTACK_SIZE;
-
- stack_start += INTSTACK_SIZE;
- }
- }
-
- /*
- * Set up the barrier address. All thread stacks MUST
- * be above this address.
- */
- int_stack_high = stack_start;
+ int i;
+ vm_offset_t stack_start;
+
+
+ /*
+ * Allocate an interrupt stack for each CPU except for
+ * the master CPU (which uses the bootstrap stack)
+ */
+ if (!init_alloc_aligned(INTSTACK_SIZE*(ncpu-1), &stack_start))
+ panic("not enough memory for interrupt stacks");
+ stack_start = phystokv(stack_start);
+
+ /*
+ * Set up pointers to the top of the interrupt stack.
+ */
+ for (i = 0; i < ncpu; i++)
+ {
+ if (i == master_cpu)
+ {
+ interrupt_stack[i] = (vm_offset_t) intstack;
+ _int_stack_top[i] = (vm_offset_t) eintstack;
+ }
+ else if (machine_slot[i].is_cpu)
+ {
+ interrupt_stack[i] = stack_start;
+ _int_stack_top[i] = stack_start + INTSTACK_SIZE;
+
+ stack_start += INTSTACK_SIZE;
+ }
+ }
+
+ /*
+ * Set up the barrier address. All thread stacks MUST
+ * be above this address.
+ */
+ int_stack_high = stack_start;
}
/* XXX should be adjusted per CPU speed */
@@ -455,67 +486,77 @@ unsigned int simple_lock_pause_count = 0; /* debugging */
void
simple_lock_pause(void)
{
- static volatile int dummy;
- int i;
+ static volatile int dummy;
+ int i;
- simple_lock_pause_count++;
+ simple_lock_pause_count++;
- /*
- * Used in loops that are trying to acquire locks out-of-order.
- */
+ /*
+ * Used in loops that are trying to acquire locks out-of-order.
+ */
- for (i = 0; i < simple_lock_pause_loop; i++)
- dummy++; /* keep the compiler from optimizing the loop away */
+ for (i = 0; i < simple_lock_pause_loop; i++)
+ dummy++; /* keep the compiler from optimizing the loop away */
}
kern_return_t
cpu_control(int cpu, const int *info, unsigned int count)
{
- printf("cpu_control(%d, %p, %d) not implemented\n",
- cpu, info, count);
- return KERN_FAILURE;
+ printf("cpu_control(%d, %p, %d) not implemented\n",
+ cpu, info, count);
+ return KERN_FAILURE;
}
void
interrupt_processor(int cpu)
{
- printf("interrupt cpu %d\n",cpu);
+ printf("interrupt cpu %d\n",cpu);
}
kern_return_t
cpu_start(int cpu)
{
- if (machine_slot[cpu].running)
- return KERN_FAILURE;
+ if (machine_slot[cpu].running)
+ return KERN_FAILURE;
- return intel_startCPU(cpu);
+ return intel_startCPU(cpu);
}
void
start_other_cpus(void)
{
- int cpu;
- printf("found %d cpus\n", ncpu);
- printf("The current cpu is: %d\n", cpu_number());
- int apic_id = lapic->apic_id.r >>24;
-
- //copy start routine
- memcpy((void*)phystokv(AP_BOOT_ADDR), (void*) &apboot, (uint32_t)&apbootend - (uint32_t)&apboot);
-
- //update BSP machine_slot and apic2kernel
- machine_slot[0].apic_id = apic_id;
- apic2kernel[apic_id] = 0;
-
- for (cpu = 0; cpu < ncpu; cpu++){
- if (cpu != cpu_number()){
- //Initialize cpu stack
- #define STACK_SIZE (4096 * 2)
- *stack_ptr = (void*) kalloc(STACK_SIZE);
-
- machine_slot[cpu].running = FALSE;
- cpu_start(cpu);
- }
- }
+ int cpu;
+ vm_offset_t stack_start;
+ int apic_id = lapic->apic_id.r >>24;
+ printf("found %d cpus\n", ncpu);
+ printf("The current cpu is: %d\n", cpu_number());
+
+ //copy start routine
+ /*TODO: Copy the routine in a physical page */
+ memcpy((void*)phystokv(AP_BOOT_ADDR), (void*) &apboot, (uint32_t)&apbootend - (uint32_t)&apboot);
+
+ //update BSP machine_slot and apic2kernel
+ machine_slot[0].apic_id = apic_id;
+ apic2kernel[apic_id] = 0;
+
+ for (cpu = 0; cpu < ncpu; cpu++)
+ {
+ if (cpu != cpu_number())
+ {
+ if (!init_alloc_aligned(STACK_SIZE*(ncpu-1), &stack_start))
+ panic("not enough memory for cpu stacks");
+ stack_start = phystokv(stack_start);
+ //Initialize cpu stack
+
+ /*TODO: Put stacks in an array */
+ //*stack_ptr = (void*) kalloc(STACK_SIZE);
+ cpu_stack[cpu] = stack_start;
+ _cpu_stack_top[cpu] = stack_start + STACK_SIZE;
+
+ machine_slot[cpu].running = FALSE;
+ cpu_start(cpu);
+ }
+ }
}
#endif /* NCPUS > 1 */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index 7051b7af..c940835b 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 1994 The University of Utah and
* the Computer Systems Laboratory at the University of Utah (CSL).
* All rights reserved.
@@ -75,6 +75,7 @@
#else /* MACH_PV_PAGETABLES */
#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
#define INTSTACK_SIZE (1*I386_PGBYTES)
+#define STACK_SIZE (4096 * 2)
#endif /* MACH_PV_PAGETABLES */
/* interrupt stack size */