summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-02 03:30:34 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-02 03:58:19 +0100
commit5e9f6f52451ccb768d875370bf1769b27ff0041c (patch)
treed556eb00b4ce74e08ddf0f8b34a64cd9f3932067
parent945f51bfe865e122d73986dd8219762450ffc0f3 (diff)
Stack the slab allocator directly on top of the physical allocator
In order to increase the amount of memory available for kernel objects, without reducing the amount of memory available for user processes, a new allocation strategy is introduced in this change. Instead of allocating kernel objects out of kernel virtual memory, the slab allocator directly uses the direct mapping of physical memory as its backend. This largely increases the kernel heap, and removes the need for address translation updates. In order to allow this strategy, an assumption made by the interrupt code had to be removed. In addition, kernel stacks are now also allocated directly from the physical allocator. * i386/i386/db_trace.c: Include i386at/model_dep.h (db_i386_reg_value): Update stack check. * i386/i386/locore.S (trap_from_kernel, all_intrs, int_from_intstack): Update interrupt handling. * i386/i386at/model_dep.c: Include kern/macros.h. (int_stack, int_stack_base): New variables. (int_stack_high): Remove variable. (i386at_init): Update interrupt stack initialization. * i386/i386at/model_dep.h: Include i386/vm_param.h. (int_stack_top, int_stack_base): New extern declarations. (ON_INT_STACK): New macro. * kern/slab.c: Include vm/vm_page.h (KMEM_CF_NO_CPU_POOL, KMEM_CF_NO_RECLAIM): Remove macros. (kmem_pagealloc, kmem_pagefree, kalloc_pagealloc, kalloc_pagefree): Remove functions. (kmem_slab_create): Allocate slab pages directly from the physical allocator. (kmem_slab_destroy): Release slab pages directly to the physical allocator. (kmem_cache_compute_sizes): Update the slab size computation algorithm to return a power-of-two suitable for the physical allocator. (kmem_cache_init): Remove custom allocation function pointers. (kmem_cache_reap): Remove check on KMEM_CF_NO_RECLAIM. (slab_init, kalloc_init): Update calls to kmem_cache_init. (kalloc, kfree): Directly fall back on the physical allocator for big allocation sizes. (host_slab_info): Remove checks on defunct flags. * kern/slab.h (kmem_slab_alloc_fn_t, kmem_slab_free_fn_t): Remove types. (struct kmem_cache): Add `slab_order' member, remove `slab_alloc_fn' and `slab_free_fn' members. (KMEM_CACHE_NOCPUPOOL, KMEM_CACHE_NORECLAIM): Remove macros. (kmem_cache_init): Update prototype, remove custom allocation functions. * kern/thread.c (stack_alloc): Allocate stacks from the physical allocator. * vm/vm_map.c (vm_map_kentry_cache, kentry_data, kentry_data_size): Remove variables. (kentry_pagealloc): Remove function. (vm_map_init): Update calls to kmem_cache_init, remove initialization of vm_map_kentry_cache. (vm_map_create, _vm_map_entry_dispose, vm_map_copyout): Unconditionnally use vm_map_entry_cache. * vm/vm_map.h (kentry_data, kentry_data_size, kentry_count): Remove extern declarations. * vm/vm_page.h (VM_PT_STACK): New page type. * device/dev_lookup.c (dev_lookup_init): Update calls to kmem_cache_init. * device/dev_pager.c (dev_pager_hash_init, device_pager_init): Likewise. * device/ds_routines.c (mach_device_init, mach_device_trap_init): Likewise. * device/net_io.c (net_io_init): Likewise. * i386/i386/fpu.c (fpu_module_init): Likewise. * i386/i386/machine_task.c (machine_task_module_init): Likewise. * i386/i386/pcb.c (pcb_module_init): Likewise. * i386/intel/pmap.c (pmap_init): Likewise. * ipc/ipc_init.c (ipc_bootstrap): Likewise. * ipc/ipc_marequest.c (ipc_marequest_init): Likewise. * kern/act.c (global_act_init): Likewise. * kern/processor.c (pset_sys_init): Likewise. * kern/rdxtree.c (rdxtree_cache_init): Likewise. * kern/task.c (task_init): Likewise. * vm/memory_object_proxy.c (memory_object_proxy_init): Likewise. * vm/vm_external.c (vm_external_module_initialize): Likewise. * vm/vm_fault.c (vm_fault_init): Likewise. * vm/vm_object.c (vm_object_bootstrap): Likewise. * vm/vm_resident.c (vm_page_module_init): Likewise. (vm_page_bootstrap): Remove initialization of kentry_data.
-rw-r--r--device/dev_lookup.c2
-rw-r--r--device/dev_pager.c4
-rw-r--r--device/ds_routines.c4
-rw-r--r--device/net_io.c4
-rw-r--r--i386/i386/db_trace.c4
-rw-r--r--i386/i386/fpu.c2
-rw-r--r--i386/i386/locore.S14
-rw-r--r--i386/i386/machine_task.c2
-rw-r--r--i386/i386/pcb.c2
-rw-r--r--i386/i386at/model_dep.c17
-rw-r--r--i386/i386at/model_dep.h9
-rw-r--r--i386/intel/pmap.c4
-rw-r--r--ipc/ipc_init.c8
-rw-r--r--ipc/ipc_marequest.c2
-rw-r--r--kern/act.c2
-rw-r--r--kern/processor.c2
-rw-r--r--kern/rdxtree.c2
-rw-r--r--kern/slab.c140
-rw-r--r--kern/slab.h23
-rw-r--r--kern/task.c2
-rw-r--r--kern/thread.c14
-rw-r--r--vm/memory_object_proxy.c2
-rw-r--r--vm/vm_external.c6
-rw-r--r--vm/vm_fault.c2
-rw-r--r--vm/vm_map.c60
-rw-r--r--vm/vm_map.h3
-rw-r--r--vm/vm_object.c2
-rw-r--r--vm/vm_page.h3
-rw-r--r--vm/vm_resident.c8
29 files changed, 124 insertions, 225 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index a80830c2..9af7508c 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -366,5 +366,5 @@ dev_lookup_init(void)
queue_init(&dev_number_hash_table[i]);
kmem_cache_init(&dev_hdr_cache, "mach_device",
- sizeof(struct mach_device), 0, NULL, NULL, NULL, 0);
+ sizeof(struct mach_device), 0, NULL, 0);
}
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 815473a9..40331706 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -173,7 +173,7 @@ void dev_pager_hash_init(void)
size = sizeof(struct dev_pager_entry);
kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
queue_init(&dev_pager_hashtable[i]);
simple_lock_init(&dev_pager_hash_lock);
@@ -705,7 +705,7 @@ void device_pager_init(void)
*/
size = sizeof(struct dev_pager);
kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
/*
* Initialize the name port hashing stuff.
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 43ed5b5d..dbff7f89 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -1554,7 +1554,7 @@ void mach_device_init(void)
device_io_map->wait_for_space = TRUE;
kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
- sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0);
+ sizeof(io_buf_ptr_inband_t), 0, NULL, 0);
mach_device_trap_init();
}
@@ -1598,7 +1598,7 @@ static void
mach_device_trap_init(void)
{
kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*
diff --git a/device/net_io.c b/device/net_io.c
index 47ef2ea8..99af0b29 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -1495,11 +1495,11 @@ net_io_init(void)
size = sizeof(struct net_rcv_port);
kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
size = sizeof(struct net_hash_entry);
kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
net_kmsg_size = round_page(size);
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
index ec338591..c8789e71 100644
--- a/i386/i386/db_trace.c
+++ b/i386/i386/db_trace.c
@@ -37,6 +37,7 @@
#include <machine/machspl.h>
#include <machine/db_interface.h>
#include <machine/db_trace.h>
+#include <i386at/model_dep.h>
#include <ddb/db_access.h>
#include <ddb/db_command.h>
@@ -129,7 +130,6 @@ db_i386_reg_value(
long *dp = 0;
db_expr_t null_reg = 0;
thread_t thread = ap->thread;
- extern unsigned int_stack_high;
if (db_option(ap->modif, 'u')) {
if (thread == THREAD_NULL) {
@@ -139,7 +139,7 @@ db_i386_reg_value(
if (thread == current_thread()) {
if (ddb_regs.cs & 0x3)
dp = vp->valuep;
- else if (ddb_regs.ebp < int_stack_high)
+ else if (ON_INT_STACK(ddb_regs.ebp))
db_error("cannot get/set user registers in nested interrupt\n");
}
} else {
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index 0f34833e..ddf4c8ed 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -189,7 +189,7 @@ fpu_module_init(void)
{
kmem_cache_init(&ifps_cache, "i386_fpsave_state",
sizeof(struct i386_fpsave_state), 16,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index 8cefbccb..c715d959 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -542,8 +542,10 @@ trap_from_kernel:
#if MACH_KDB || MACH_TTD
movl %esp,%ebx /* save current stack */
- cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb 1f /* OK if so */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl EXT(int_stack_base),%edx
+ je 1f /* OK if so */
CPU_NUMBER(%edx) /* get CPU number */
cmpl CX(EXT(kernel_stack),%edx),%esp
@@ -647,8 +649,10 @@ ENTRY(all_intrs)
pushl %edx
cld /* clear direction flag */
- cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb int_from_intstack /* if not: */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl %ss:EXT(int_stack_base),%edx
+ je int_from_intstack /* if not: */
pushl %ds /* save segment registers */
pushl %es
@@ -707,7 +711,7 @@ LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
iret /* return to caller */
int_from_intstack:
- cmpl $EXT(_intstack),%esp /* seemingly looping? */
+ cmpl EXT(int_stack_base),%esp /* seemingly looping? */
jb stack_overflowed /* if not: */
call EXT(interrupt) /* call interrupt routine */
_return_to_iret_i: /* ( label for kdb_kintr) */
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
index 490b1022..d592838a 100644
--- a/i386/i386/machine_task.c
+++ b/i386/i386/machine_task.c
@@ -38,7 +38,7 @@ void
machine_task_module_init (void)
{
kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
index 3a0eba0f..6b22e4c4 100644
--- a/i386/i386/pcb.c
+++ b/i386/i386/pcb.c
@@ -371,7 +371,7 @@ thread_t switch_context(
void pcb_module_init(void)
{
kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
fpu_module_init();
}
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 7638b832..62763ae1 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -47,6 +47,7 @@
#include <kern/cpu_number.h>
#include <kern/debug.h>
#include <kern/mach_clock.h>
+#include <kern/macros.h>
#include <kern/printf.h>
#include <kern/startup.h>
#include <sys/time.h>
@@ -133,8 +134,9 @@ extern char version[];
/* If set, reboot the system on ctrl-alt-delete. */
boolean_t rebootflag = FALSE; /* exported to kdintr */
-/* XX interrupt stack pointer and highwater mark, for locore.S. */
-vm_offset_t int_stack_top, int_stack_high;
+/* Interrupt stack. */
+static char int_stack[KERNEL_STACK_SIZE] __aligned(KERNEL_STACK_SIZE);
+vm_offset_t int_stack_top, int_stack_base;
#ifdef LINUX_DEV
extern void linux_init(void);
@@ -398,11 +400,6 @@ i386at_init(void)
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
#endif /* MACH_PV_PAGETABLES */
- /* Interrupt stacks are allocated in physical memory,
- while kernel stacks are allocated in kernel virtual memory,
- so phys_last_addr serves as a convenient dividing point. */
- int_stack_high = phystokv(phys_last_addr);
-
/*
* Initialize and activate the real i386 protected-mode structures.
*/
@@ -448,10 +445,8 @@ i386at_init(void)
hyp_p2m_init();
#endif /* MACH_XEN */
- /* XXX We'll just use the initialization stack we're already running on
- as the interrupt stack for now. Later this will have to change,
- because the init stack will get freed after bootup. */
- asm("movl %%esp,%0" : "=m" (int_stack_top));
+ int_stack_base = (vm_offset_t)&int_stack;
+ int_stack_top = int_stack_base + KERNEL_STACK_SIZE - 4;
}
/*
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
index aa240320..47551b85 100644
--- a/i386/i386at/model_dep.h
+++ b/i386/i386at/model_dep.h
@@ -19,8 +19,17 @@
#ifndef _MODEL_DEP_H_
#define _MODEL_DEP_H_
+#include <i386/vm_param.h>
#include <mach/vm_prot.h>
+/*
+ * Interrupt stack.
+ */
+extern vm_offset_t int_stack_top, int_stack_base;
+
+/* Check whether P points to the interrupt stack. */
+#define ON_INT_STACK(P) (((P) & ~(KERNEL_STACK_SIZE-1)) == int_stack_base)
+
extern int timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
void inittodr(void);
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 22e20c97..0771a08d 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -978,9 +978,9 @@ void pmap_init(void)
* and of the physical-to-virtual entries.
*/
s = (vm_size_t) sizeof(struct pmap);
- kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, 0);
s = (vm_size_t) sizeof(struct pv_entry);
- kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, 0);
#if NCPUS > 1
/*
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
index 2c58a6e4..5ed800f4 100644
--- a/ipc/ipc_init.c
+++ b/ipc/ipc_init.c
@@ -73,16 +73,16 @@ ipc_bootstrap(void)
ipc_port_timestamp_data = 0;
kmem_cache_init(&ipc_space_cache, "ipc_space",
- sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_space), 0, NULL, 0);
kmem_cache_init(&ipc_entry_cache, "ipc_entry",
- sizeof(struct ipc_entry), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_entry), 0, NULL, 0);
kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port",
- sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_port), 0, NULL, 0);
kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset",
- sizeof(struct ipc_pset), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_pset), 0, NULL, 0);
/* create special spaces */
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
index ded1711d..736db838 100644
--- a/ipc/ipc_marequest.c
+++ b/ipc/ipc_marequest.c
@@ -137,7 +137,7 @@ ipc_marequest_init(void)
}
kmem_cache_init(&ipc_marequest_cache, "ipc_marequest",
- sizeof(struct ipc_marequest), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_marequest), 0, NULL, 0);
}
/*
diff --git a/kern/act.c b/kern/act.c
index 3186f7e9..3819ef32 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -68,7 +68,7 @@ global_act_init(void)
{
#ifndef ACT_STATIC_KLUDGE
kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
#else
int i;
diff --git a/kern/processor.c b/kern/processor.c
index 48e92731..0a88469b 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -109,7 +109,7 @@ void pset_sys_init(void)
* Allocate the cache for processor sets.
*/
kmem_cache_init(&pset_cache, "processor_set",
- sizeof(struct processor_set), 0, NULL, NULL, NULL, 0);
+ sizeof(struct processor_set), 0, NULL, 0);
/*
* Give each processor a control port.
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
index 78868b1f..a23d6e7e 100644
--- a/kern/rdxtree.c
+++ b/kern/rdxtree.c
@@ -124,7 +124,7 @@ void
rdxtree_cache_init(void)
{
kmem_cache_init(&rdxtree_node_cache, "rdxtree_node",
- sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0);
+ sizeof(struct rdxtree_node), 0, NULL, 0);
}
#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
diff --git a/kern/slab.c b/kern/slab.c
index 8bc1b06b..a887cbb5 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -87,6 +87,7 @@
#include <mach/vm_param.h>
#include <mach/machine/vm_types.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
#include <vm/vm_types.h>
#include <sys/types.h>
@@ -217,9 +218,7 @@
*
* The flags don't change once set and can be tested without locking.
*/
-#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */
#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */
-#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */
#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */
#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */
@@ -384,24 +383,6 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
return (void *)bufctl - cache->bufctl_dist;
}
-static vm_offset_t kmem_pagealloc(vm_size_t size)
-{
- vm_offset_t addr;
- kern_return_t kr;
-
- kr = kmem_alloc_wired(kmem_map, &addr, size);
-
- if (kr != KERN_SUCCESS)
- return 0;
-
- return addr;
-}
-
-static void kmem_pagefree(vm_offset_t ptr, vm_size_t size)
-{
- kmem_free(kmem_map, ptr, size);
-}
-
static void kmem_slab_create_verify(struct kmem_slab *slab,
struct kmem_cache *cache)
{
@@ -430,30 +411,27 @@ static void kmem_slab_create_verify(struct kmem_slab *slab,
static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
size_t color)
{
+ struct vm_page *page;
struct kmem_slab *slab;
union kmem_bufctl *bufctl;
size_t buf_size;
unsigned long buffers;
void *slab_buf;
- if (cache->slab_alloc_fn == NULL)
- slab_buf = (void *)kmem_pagealloc(cache->slab_size);
- else
- slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size);
+ page = vm_page_alloc_pa(cache->slab_order,
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_KMEM);
- if (slab_buf == NULL)
+ if (page == NULL)
return NULL;
+ slab_buf = (void *)phystokv(vm_page_to_pa(page));
+
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
if (slab == NULL) {
- if (cache->slab_free_fn == NULL)
- kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size);
- else
- cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size);
-
+ vm_page_free_pa(page, cache->slab_order);
return NULL;
}
} else {
@@ -514,21 +492,19 @@ static void kmem_slab_destroy_verify(struct kmem_slab *slab,
*/
static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
{
+ struct vm_page *page;
vm_offset_t slab_buf;
assert(slab->nr_refs == 0);
assert(slab->first_free != NULL);
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
if (cache->flags & KMEM_CF_VERIFY)
kmem_slab_destroy_verify(slab, cache);
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
-
- if (cache->slab_free_fn == NULL)
- kmem_pagefree(slab_buf, cache->slab_size);
- else
- cache->slab_free_fn(slab_buf, cache->slab_size);
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_free_pa(page, cache->slab_order);
if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
@@ -702,9 +678,10 @@ static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
*/
static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
{
- size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size = 0;
- size_t waste, waste_min;
- int embed, optimal_embed = 0;
+ size_t i, buffers, buf_size, slab_size, free_slab_size;
+ size_t waste, waste_min, optimal_size = optimal_size;
+ int embed, optimal_embed = optimal_embed;
+ unsigned int slab_order, optimal_order = optimal_order;
buf_size = cache->buf_size;
@@ -716,7 +693,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
do {
i++;
- slab_size = P2ROUND(i * buf_size, PAGE_SIZE);
+
+ slab_order = vm_page_order(i * buf_size);
+ slab_size = PAGE_SIZE << slab_order;
free_slab_size = slab_size;
if (flags & KMEM_CACHE_NOOFFSLAB)
@@ -739,19 +718,19 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
if (waste <= waste_min) {
waste_min = waste;
+ optimal_order = slab_order;
optimal_size = slab_size;
optimal_embed = embed;
}
} while ((buffers < KMEM_MIN_BUFS_PER_SLAB)
&& (slab_size < KMEM_SLAB_SIZE_THRESHOLD));
- assert(optimal_size > 0);
assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed);
+ cache->slab_order = optimal_order;
cache->slab_size = optimal_size;
- slab_size = cache->slab_size - (optimal_embed
- ? sizeof(struct kmem_slab)
- : 0);
+ slab_size = cache->slab_size
+ - (optimal_embed ? sizeof(struct kmem_slab) : 0);
cache->bufs_per_slab = slab_size / buf_size;
cache->color_max = slab_size % buf_size;
@@ -767,9 +746,8 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
}
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags)
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags)
{
#if SLAB_USE_CPU_POOLS
struct kmem_cpu_pool_type *cpu_pool_type;
@@ -783,15 +761,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->flags = 0;
#endif /* SLAB_VERIFY */
- if (flags & KMEM_CACHE_NOCPUPOOL)
- cache->flags |= KMEM_CF_NO_CPU_POOL;
-
- if (flags & KMEM_CACHE_NORECLAIM) {
- assert(slab_free_fn == NULL);
- flags |= KMEM_CACHE_NOOFFSLAB;
- cache->flags |= KMEM_CF_NO_RECLAIM;
- }
-
if (flags & KMEM_CACHE_VERIFY)
cache->flags |= KMEM_CF_VERIFY;
@@ -819,8 +788,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->nr_slabs = 0;
cache->nr_free_slabs = 0;
cache->ctor = ctor;
- cache->slab_alloc_fn = slab_alloc_fn;
- cache->slab_free_fn = slab_free_fn;
strncpy(cache->name, name, sizeof(cache->name));
cache->name[sizeof(cache->name) - 1] = '\0';
cache->buftag_dist = 0;
@@ -908,9 +875,6 @@ static void kmem_cache_reap(struct kmem_cache *cache)
struct list dead_slabs;
unsigned long nr_free_slabs;
- if (cache->flags & KMEM_CF_NO_RECLAIM)
- return;
-
simple_lock(&cache->lock);
list_set_head(&dead_slabs, &cache->free_slabs);
list_init(&cache->free_slabs);
@@ -1297,7 +1261,7 @@ void slab_init(void)
sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
size = sizeof(void *) * cpu_pool_type->array_size;
kmem_cache_init(cpu_pool_type->array_cache, name, size,
- cpu_pool_type->array_align, NULL, NULL, NULL, 0);
+ cpu_pool_type->array_align, NULL, 0);
}
#endif /* SLAB_USE_CPU_POOLS */
@@ -1305,25 +1269,7 @@ void slab_init(void)
* Prevent off slab data for the slab cache to avoid infinite recursion.
*/
kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
- 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB);
-}
-
-static vm_offset_t kalloc_pagealloc(vm_size_t size)
-{
- vm_offset_t addr;
- kern_return_t kr;
-
- kr = kmem_alloc_wired(kmem_map, &addr, size);
-
- if (kr != KERN_SUCCESS)
- return 0;
-
- return addr;
-}
-
-static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size)
-{
- kmem_free(kmem_map, ptr, size);
+ 0, NULL, KMEM_CACHE_NOOFFSLAB);
}
void kalloc_init(void)
@@ -1335,8 +1281,7 @@ void kalloc_init(void)
for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) {
sprintf(name, "kalloc_%lu", size);
- kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL,
- kalloc_pagealloc, kalloc_pagefree, 0);
+ kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0);
size <<= 1;
}
}
@@ -1387,8 +1332,18 @@ vm_offset_t kalloc(vm_size_t size)
if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY))
kalloc_verify(cache, buf, size);
- } else
- buf = (void *)kalloc_pagealloc(size);
+ } else {
+ struct vm_page *page;
+
+ page = vm_page_alloc_pa(vm_page_order(size),
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_KERNEL);
+
+ if (page == NULL)
+ return 0;
+
+ buf = (void *)phystokv(vm_page_to_pa(page));
+ }
return (vm_offset_t)buf;
}
@@ -1429,7 +1384,10 @@ void kfree(vm_offset_t data, vm_size_t size)
kmem_cache_free(cache, data);
} else {
- kalloc_pagefree(data, size);
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys(data));
+ vm_page_free_pa(page, vm_page_order(size));
}
}
@@ -1529,12 +1487,8 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
list_for_each_entry(&kmem_cache_list, cache, node) {
simple_lock(&cache->lock);
- info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL)
- ? CACHE_FLAGS_NO_CPU_POOL : 0)
- | ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
- ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
- | ((cache->flags & KMEM_CF_NO_RECLAIM)
- ? CACHE_FLAGS_NO_RECLAIM : 0)
+ info[i].flags = ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
+ ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
| ((cache->flags & KMEM_CF_VERIFY)
? CACHE_FLAGS_VERIFY : 0)
| ((cache->flags & KMEM_CF_DIRECT)
diff --git a/kern/slab.h b/kern/slab.h
index 5ff3960e..1ad24d63 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -137,14 +137,6 @@ struct kmem_slab {
typedef void (*kmem_cache_ctor_t)(void *obj);
/*
- * Types for slab allocation/free functions.
- *
- * All addresses and sizes must be page-aligned.
- */
-typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t);
-typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
-
-/*
* Cache name buffer size. The size is chosen so that struct
* kmem_cache fits into two cache lines. The size of a cache line on
* a typical CPU is 64 bytes.
@@ -175,6 +167,7 @@ struct kmem_cache {
struct rbtree active_slabs;
int flags;
size_t bufctl_dist; /* Distance from buffer to bufctl */
+ unsigned int slab_order;
size_t slab_size;
unsigned long bufs_per_slab;
unsigned long nr_objs; /* Number of allocated objects */
@@ -189,8 +182,6 @@ struct kmem_cache {
size_t color_max;
unsigned long nr_bufs; /* Total number of buffers */
unsigned long nr_slabs;
- kmem_slab_alloc_fn_t slab_alloc_fn;
- kmem_slab_free_fn_t slab_free_fn;
char name[KMEM_CACHE_NAME_SIZE];
size_t buftag_dist; /* Distance from buffer to buftag */
size_t redzone_pad; /* Bytes from end of object to redzone word */
@@ -210,19 +201,15 @@ extern vm_map_t kmem_map;
/*
* Cache initialization flags.
*/
-#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */
-#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */
-#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source,
- implies KMEM_CACHE_NOOFFSLAB */
-#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */
+#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */
+#define KMEM_CACHE_VERIFY 0x2 /* Use debugging facilities */
/*
* Initialize a cache.
*/
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags);
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags);
/*
* Allocate an object from a cache.
diff --git a/kern/task.c b/kern/task.c
index e9e6ba24..0f24e44d 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -63,7 +63,7 @@ ipc_port_t new_task_notification = NULL;
void task_init(void)
{
kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
eml_init();
machine_task_module_init ();
diff --git a/kern/thread.c b/kern/thread.c
index 3e900792..91e08697 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -195,16 +195,20 @@ kern_return_t stack_alloc(
(void) splx(s);
if (stack == 0) {
- kern_return_t kr;
+ struct vm_page *page;
+
/*
* Kernel stacks should be naturally aligned,
* so that it is easy to find the starting/ending
* addresses of a stack given an address in the middle.
*/
- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
- if (kr != KERN_SUCCESS)
- return kr;
+ page = vm_page_alloc_pa(vm_page_order(KERNEL_STACK_SIZE),
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_STACK);
+ if (page == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+ stack = phystokv(vm_page_to_pa(page));
#if MACH_DEBUG
stack_init(stack);
#endif /* MACH_DEBUG */
@@ -298,7 +302,7 @@ void stack_privilege(
void thread_init(void)
{
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
/*
* Fill in a template thread for fast initialization.
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index a64bfcce..01bce2a5 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -64,7 +64,7 @@ void
memory_object_proxy_init (void)
{
kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
- sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0);
+ sizeof (struct memory_object_proxy), 0, NULL, 0);
}
/* Lookup a proxy memory object by its port. */
diff --git a/vm/vm_external.c b/vm/vm_external.c
index 2e2593b1..097a9b12 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -138,13 +138,13 @@ void vm_external_module_initialize(void)
vm_size_t size = (vm_size_t) sizeof(struct vm_external);
kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_small_existence_map_cache,
"small_existence_map", SMALL_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_large_existence_map_cache,
"large_existence_map", LARGE_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 4d674174..09e2c54d 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -105,7 +105,7 @@ extern struct db_watchpoint *db_watchpoint_list;
void vm_fault_init(void)
{
kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
- sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0);
+ sizeof(vm_fault_state_t), 0, NULL, 0);
}
/*
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 3a231de0..0d610621 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -126,7 +126,6 @@ MACRO_END
struct kmem_cache vm_map_cache; /* cache for vm_map structures */
struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
-struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
/*
@@ -151,43 +150,16 @@ vm_object_t vm_submap_object = &vm_submap_object_store;
*
* vm_map_cache: used to allocate maps.
* vm_map_entry_cache: used to allocate map entries.
- * vm_map_kentry_cache: used to allocate map entries for the kernel.
- *
- * Kernel map entries are allocated from a special cache, using a custom
- * page allocation function to avoid recursion. It would be difficult
- * (perhaps impossible) for the kernel to allocate more memory to an entry
- * cache when it became empty since the very act of allocating memory
- * implies the creation of a new entry.
*/
-vm_offset_t kentry_data;
-vm_size_t kentry_data_size = KENTRY_DATA_SIZE;
-
-static vm_offset_t kentry_pagealloc(vm_size_t size)
-{
- vm_offset_t result;
-
- if (size > kentry_data_size)
- panic("vm_map: kentry memory exhausted");
-
- result = kentry_data;
- kentry_data += size;
- kentry_data_size -= size;
- return result;
-}
-
void vm_map_init(void)
{
kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
- sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
- kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
- sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
- NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
- | KMEM_CACHE_NORECLAIM);
+ sizeof(struct vm_map_entry), 0, NULL, 0);
kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
- sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_map_copy), 0, NULL, 0);
/*
* Submap object is initialized by vm_object_init.
@@ -261,15 +233,9 @@ vm_map_t vm_map_create(
vm_map_entry_t _vm_map_entry_create(map_header)
const struct vm_map_header *map_header;
{
- kmem_cache_t cache;
vm_map_entry_t entry;
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
-
- entry = (vm_map_entry_t) kmem_cache_alloc(cache);
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
@@ -291,14 +257,9 @@ void _vm_map_entry_dispose(map_header, entry)
const struct vm_map_header *map_header;
vm_map_entry_t entry;
{
- kmem_cache_t cache;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ (void)map_header;
- kmem_cache_free(cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
}
/*
@@ -2539,15 +2500,8 @@ kern_return_t vm_map_copyout(
* Mismatches occur when dealing with the default
* pager.
*/
- kmem_cache_t old_cache;
vm_map_entry_t next, new;
- /*
- * Find the cache that the copies were allocated from
- */
- old_cache = (copy->cpy_hdr.entries_pageable)
- ? &vm_map_entry_cache
- : &vm_map_kentry_cache;
entry = vm_map_copy_first_entry(copy);
/*
@@ -2571,7 +2525,7 @@ kern_return_t vm_map_copyout(
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
- kmem_cache_free(old_cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
entry = next;
}
}
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9b31f90a..b4ba7c7b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -363,9 +363,6 @@ MACRO_END
* Exported procedures that operate on vm_map_t.
*/
-extern vm_offset_t kentry_data;
-extern vm_size_t kentry_data_size;
-extern int kentry_count;
/* Initialize the module */
extern void vm_map_init(void);
diff --git a/vm/vm_object.c b/vm/vm_object.c
index eda03c65..ece3a83c 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -262,7 +262,7 @@ vm_object_t vm_object_allocate(
void vm_object_bootstrap(void)
{
kmem_cache_init(&vm_object_cache, "vm_object",
- sizeof(struct vm_object), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_object), 0, NULL, 0);
queue_init(&vm_object_cached_list);
simple_lock_init(&vm_object_cached_lock_data);
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 7607aad0..4e870d82 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -378,7 +378,8 @@ extern unsigned int vm_page_info(
#define VM_PT_TABLE 2 /* Page is part of the page table */
#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
-#define VM_PT_KERNEL 5 /* Type for generic kernel allocations */
+#define VM_PT_STACK 5 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
static inline unsigned short
vm_page_type(const struct vm_page *page)
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 9fd64918..dd1cf9cd 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -193,12 +193,6 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the kernel map entries.
- */
-
- kentry_data = pmap_steal_memory(kentry_data_size);
-
- /*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
*
@@ -312,7 +306,7 @@ vm_offset_t pmap_steal_memory(
void vm_page_module_init(void)
{
kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*