summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-02 03:30:34 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-02 03:58:19 +0100
commit5e9f6f52451ccb768d875370bf1769b27ff0041c (patch)
treed556eb00b4ce74e08ddf0f8b34a64cd9f3932067 /kern
parent945f51bfe865e122d73986dd8219762450ffc0f3 (diff)
Stack the slab allocator directly on top of the physical allocator
In order to increase the amount of memory available for kernel objects, without reducing the amount of memory available for user processes, a new allocation strategy is introduced in this change. Instead of allocating kernel objects out of kernel virtual memory, the slab allocator directly uses the direct mapping of physical memory as its backend. This largely increases the kernel heap, and removes the need for address translation updates. In order to allow this strategy, an assumption made by the interrupt code had to be removed. In addition, kernel stacks are now also allocated directly from the physical allocator. * i386/i386/db_trace.c: Include i386at/model_dep.h (db_i386_reg_value): Update stack check. * i386/i386/locore.S (trap_from_kernel, all_intrs, int_from_intstack): Update interrupt handling. * i386/i386at/model_dep.c: Include kern/macros.h. (int_stack, int_stack_base): New variables. (int_stack_high): Remove variable. (i386at_init): Update interrupt stack initialization. * i386/i386at/model_dep.h: Include i386/vm_param.h. (int_stack_top, int_stack_base): New extern declarations. (ON_INT_STACK): New macro. * kern/slab.c: Include vm/vm_page.h (KMEM_CF_NO_CPU_POOL, KMEM_CF_NO_RECLAIM): Remove macros. (kmem_pagealloc, kmem_pagefree, kalloc_pagealloc, kalloc_pagefree): Remove functions. (kmem_slab_create): Allocate slab pages directly from the physical allocator. (kmem_slab_destroy): Release slab pages directly to the physical allocator. (kmem_cache_compute_sizes): Update the slab size computation algorithm to return a power-of-two suitable for the physical allocator. (kmem_cache_init): Remove custom allocation function pointers. (kmem_cache_reap): Remove check on KMEM_CF_NO_RECLAIM. (slab_init, kalloc_init): Update calls to kmem_cache_init. (kalloc, kfree): Directly fall back on the physical allocator for big allocation sizes. (host_slab_info): Remove checks on defunct flags. * kern/slab.h (kmem_slab_alloc_fn_t, kmem_slab_free_fn_t): Remove types. (struct kmem_cache): Add `slab_order' member, remove `slab_alloc_fn' and `slab_free_fn' members. (KMEM_CACHE_NOCPUPOOL, KMEM_CACHE_NORECLAIM): Remove macros. (kmem_cache_init): Update prototype, remove custom allocation functions. * kern/thread.c (stack_alloc): Allocate stacks from the physical allocator. * vm/vm_map.c (vm_map_kentry_cache, kentry_data, kentry_data_size): Remove variables. (kentry_pagealloc): Remove function. (vm_map_init): Update calls to kmem_cache_init, remove initialization of vm_map_kentry_cache. (vm_map_create, _vm_map_entry_dispose, vm_map_copyout): Unconditionnally use vm_map_entry_cache. * vm/vm_map.h (kentry_data, kentry_data_size, kentry_count): Remove extern declarations. * vm/vm_page.h (VM_PT_STACK): New page type. * device/dev_lookup.c (dev_lookup_init): Update calls to kmem_cache_init. * device/dev_pager.c (dev_pager_hash_init, device_pager_init): Likewise. * device/ds_routines.c (mach_device_init, mach_device_trap_init): Likewise. * device/net_io.c (net_io_init): Likewise. * i386/i386/fpu.c (fpu_module_init): Likewise. * i386/i386/machine_task.c (machine_task_module_init): Likewise. * i386/i386/pcb.c (pcb_module_init): Likewise. * i386/intel/pmap.c (pmap_init): Likewise. * ipc/ipc_init.c (ipc_bootstrap): Likewise. * ipc/ipc_marequest.c (ipc_marequest_init): Likewise. * kern/act.c (global_act_init): Likewise. * kern/processor.c (pset_sys_init): Likewise. * kern/rdxtree.c (rdxtree_cache_init): Likewise. * kern/task.c (task_init): Likewise. * vm/memory_object_proxy.c (memory_object_proxy_init): Likewise. * vm/vm_external.c (vm_external_module_initialize): Likewise. * vm/vm_fault.c (vm_fault_init): Likewise. * vm/vm_object.c (vm_object_bootstrap): Likewise. * vm/vm_resident.c (vm_page_module_init): Likewise. (vm_page_bootstrap): Remove initialization of kentry_data.
Diffstat (limited to 'kern')
-rw-r--r--kern/act.c2
-rw-r--r--kern/processor.c2
-rw-r--r--kern/rdxtree.c2
-rw-r--r--kern/slab.c140
-rw-r--r--kern/slab.h23
-rw-r--r--kern/task.c2
-rw-r--r--kern/thread.c14
7 files changed, 65 insertions, 120 deletions
diff --git a/kern/act.c b/kern/act.c
index 3186f7e9..3819ef32 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -68,7 +68,7 @@ global_act_init(void)
{
#ifndef ACT_STATIC_KLUDGE
kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
#else
int i;
diff --git a/kern/processor.c b/kern/processor.c
index 48e92731..0a88469b 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -109,7 +109,7 @@ void pset_sys_init(void)
* Allocate the cache for processor sets.
*/
kmem_cache_init(&pset_cache, "processor_set",
- sizeof(struct processor_set), 0, NULL, NULL, NULL, 0);
+ sizeof(struct processor_set), 0, NULL, 0);
/*
* Give each processor a control port.
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
index 78868b1f..a23d6e7e 100644
--- a/kern/rdxtree.c
+++ b/kern/rdxtree.c
@@ -124,7 +124,7 @@ void
rdxtree_cache_init(void)
{
kmem_cache_init(&rdxtree_node_cache, "rdxtree_node",
- sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0);
+ sizeof(struct rdxtree_node), 0, NULL, 0);
}
#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
diff --git a/kern/slab.c b/kern/slab.c
index 8bc1b06b..a887cbb5 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -87,6 +87,7 @@
#include <mach/vm_param.h>
#include <mach/machine/vm_types.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
#include <vm/vm_types.h>
#include <sys/types.h>
@@ -217,9 +218,7 @@
*
* The flags don't change once set and can be tested without locking.
*/
-#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */
#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */
-#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */
#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */
#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */
@@ -384,24 +383,6 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
return (void *)bufctl - cache->bufctl_dist;
}
-static vm_offset_t kmem_pagealloc(vm_size_t size)
-{
- vm_offset_t addr;
- kern_return_t kr;
-
- kr = kmem_alloc_wired(kmem_map, &addr, size);
-
- if (kr != KERN_SUCCESS)
- return 0;
-
- return addr;
-}
-
-static void kmem_pagefree(vm_offset_t ptr, vm_size_t size)
-{
- kmem_free(kmem_map, ptr, size);
-}
-
static void kmem_slab_create_verify(struct kmem_slab *slab,
struct kmem_cache *cache)
{
@@ -430,30 +411,27 @@ static void kmem_slab_create_verify(struct kmem_slab *slab,
static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
size_t color)
{
+ struct vm_page *page;
struct kmem_slab *slab;
union kmem_bufctl *bufctl;
size_t buf_size;
unsigned long buffers;
void *slab_buf;
- if (cache->slab_alloc_fn == NULL)
- slab_buf = (void *)kmem_pagealloc(cache->slab_size);
- else
- slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size);
+ page = vm_page_alloc_pa(cache->slab_order,
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_KMEM);
- if (slab_buf == NULL)
+ if (page == NULL)
return NULL;
+ slab_buf = (void *)phystokv(vm_page_to_pa(page));
+
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
if (slab == NULL) {
- if (cache->slab_free_fn == NULL)
- kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size);
- else
- cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size);
-
+ vm_page_free_pa(page, cache->slab_order);
return NULL;
}
} else {
@@ -514,21 +492,19 @@ static void kmem_slab_destroy_verify(struct kmem_slab *slab,
*/
static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
{
+ struct vm_page *page;
vm_offset_t slab_buf;
assert(slab->nr_refs == 0);
assert(slab->first_free != NULL);
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
if (cache->flags & KMEM_CF_VERIFY)
kmem_slab_destroy_verify(slab, cache);
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
-
- if (cache->slab_free_fn == NULL)
- kmem_pagefree(slab_buf, cache->slab_size);
- else
- cache->slab_free_fn(slab_buf, cache->slab_size);
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_free_pa(page, cache->slab_order);
if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
@@ -702,9 +678,10 @@ static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
*/
static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
{
- size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size = 0;
- size_t waste, waste_min;
- int embed, optimal_embed = 0;
+ size_t i, buffers, buf_size, slab_size, free_slab_size;
+ size_t waste, waste_min, optimal_size = optimal_size;
+ int embed, optimal_embed = optimal_embed;
+ unsigned int slab_order, optimal_order = optimal_order;
buf_size = cache->buf_size;
@@ -716,7 +693,9 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
do {
i++;
- slab_size = P2ROUND(i * buf_size, PAGE_SIZE);
+
+ slab_order = vm_page_order(i * buf_size);
+ slab_size = PAGE_SIZE << slab_order;
free_slab_size = slab_size;
if (flags & KMEM_CACHE_NOOFFSLAB)
@@ -739,19 +718,19 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
if (waste <= waste_min) {
waste_min = waste;
+ optimal_order = slab_order;
optimal_size = slab_size;
optimal_embed = embed;
}
} while ((buffers < KMEM_MIN_BUFS_PER_SLAB)
&& (slab_size < KMEM_SLAB_SIZE_THRESHOLD));
- assert(optimal_size > 0);
assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed);
+ cache->slab_order = optimal_order;
cache->slab_size = optimal_size;
- slab_size = cache->slab_size - (optimal_embed
- ? sizeof(struct kmem_slab)
- : 0);
+ slab_size = cache->slab_size
+ - (optimal_embed ? sizeof(struct kmem_slab) : 0);
cache->bufs_per_slab = slab_size / buf_size;
cache->color_max = slab_size % buf_size;
@@ -767,9 +746,8 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
}
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags)
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags)
{
#if SLAB_USE_CPU_POOLS
struct kmem_cpu_pool_type *cpu_pool_type;
@@ -783,15 +761,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->flags = 0;
#endif /* SLAB_VERIFY */
- if (flags & KMEM_CACHE_NOCPUPOOL)
- cache->flags |= KMEM_CF_NO_CPU_POOL;
-
- if (flags & KMEM_CACHE_NORECLAIM) {
- assert(slab_free_fn == NULL);
- flags |= KMEM_CACHE_NOOFFSLAB;
- cache->flags |= KMEM_CF_NO_RECLAIM;
- }
-
if (flags & KMEM_CACHE_VERIFY)
cache->flags |= KMEM_CF_VERIFY;
@@ -819,8 +788,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->nr_slabs = 0;
cache->nr_free_slabs = 0;
cache->ctor = ctor;
- cache->slab_alloc_fn = slab_alloc_fn;
- cache->slab_free_fn = slab_free_fn;
strncpy(cache->name, name, sizeof(cache->name));
cache->name[sizeof(cache->name) - 1] = '\0';
cache->buftag_dist = 0;
@@ -908,9 +875,6 @@ static void kmem_cache_reap(struct kmem_cache *cache)
struct list dead_slabs;
unsigned long nr_free_slabs;
- if (cache->flags & KMEM_CF_NO_RECLAIM)
- return;
-
simple_lock(&cache->lock);
list_set_head(&dead_slabs, &cache->free_slabs);
list_init(&cache->free_slabs);
@@ -1297,7 +1261,7 @@ void slab_init(void)
sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
size = sizeof(void *) * cpu_pool_type->array_size;
kmem_cache_init(cpu_pool_type->array_cache, name, size,
- cpu_pool_type->array_align, NULL, NULL, NULL, 0);
+ cpu_pool_type->array_align, NULL, 0);
}
#endif /* SLAB_USE_CPU_POOLS */
@@ -1305,25 +1269,7 @@ void slab_init(void)
* Prevent off slab data for the slab cache to avoid infinite recursion.
*/
kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
- 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB);
-}
-
-static vm_offset_t kalloc_pagealloc(vm_size_t size)
-{
- vm_offset_t addr;
- kern_return_t kr;
-
- kr = kmem_alloc_wired(kmem_map, &addr, size);
-
- if (kr != KERN_SUCCESS)
- return 0;
-
- return addr;
-}
-
-static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size)
-{
- kmem_free(kmem_map, ptr, size);
+ 0, NULL, KMEM_CACHE_NOOFFSLAB);
}
void kalloc_init(void)
@@ -1335,8 +1281,7 @@ void kalloc_init(void)
for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) {
sprintf(name, "kalloc_%lu", size);
- kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL,
- kalloc_pagealloc, kalloc_pagefree, 0);
+ kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0);
size <<= 1;
}
}
@@ -1387,8 +1332,18 @@ vm_offset_t kalloc(vm_size_t size)
if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY))
kalloc_verify(cache, buf, size);
- } else
- buf = (void *)kalloc_pagealloc(size);
+ } else {
+ struct vm_page *page;
+
+ page = vm_page_alloc_pa(vm_page_order(size),
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_KERNEL);
+
+ if (page == NULL)
+ return 0;
+
+ buf = (void *)phystokv(vm_page_to_pa(page));
+ }
return (vm_offset_t)buf;
}
@@ -1429,7 +1384,10 @@ void kfree(vm_offset_t data, vm_size_t size)
kmem_cache_free(cache, data);
} else {
- kalloc_pagefree(data, size);
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys(data));
+ vm_page_free_pa(page, vm_page_order(size));
}
}
@@ -1529,12 +1487,8 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
list_for_each_entry(&kmem_cache_list, cache, node) {
simple_lock(&cache->lock);
- info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL)
- ? CACHE_FLAGS_NO_CPU_POOL : 0)
- | ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
- ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
- | ((cache->flags & KMEM_CF_NO_RECLAIM)
- ? CACHE_FLAGS_NO_RECLAIM : 0)
+ info[i].flags = ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
+ ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
| ((cache->flags & KMEM_CF_VERIFY)
? CACHE_FLAGS_VERIFY : 0)
| ((cache->flags & KMEM_CF_DIRECT)
diff --git a/kern/slab.h b/kern/slab.h
index 5ff3960e..1ad24d63 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -137,14 +137,6 @@ struct kmem_slab {
typedef void (*kmem_cache_ctor_t)(void *obj);
/*
- * Types for slab allocation/free functions.
- *
- * All addresses and sizes must be page-aligned.
- */
-typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t);
-typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
-
-/*
* Cache name buffer size. The size is chosen so that struct
* kmem_cache fits into two cache lines. The size of a cache line on
* a typical CPU is 64 bytes.
@@ -175,6 +167,7 @@ struct kmem_cache {
struct rbtree active_slabs;
int flags;
size_t bufctl_dist; /* Distance from buffer to bufctl */
+ unsigned int slab_order;
size_t slab_size;
unsigned long bufs_per_slab;
unsigned long nr_objs; /* Number of allocated objects */
@@ -189,8 +182,6 @@ struct kmem_cache {
size_t color_max;
unsigned long nr_bufs; /* Total number of buffers */
unsigned long nr_slabs;
- kmem_slab_alloc_fn_t slab_alloc_fn;
- kmem_slab_free_fn_t slab_free_fn;
char name[KMEM_CACHE_NAME_SIZE];
size_t buftag_dist; /* Distance from buffer to buftag */
size_t redzone_pad; /* Bytes from end of object to redzone word */
@@ -210,19 +201,15 @@ extern vm_map_t kmem_map;
/*
* Cache initialization flags.
*/
-#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */
-#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */
-#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source,
- implies KMEM_CACHE_NOOFFSLAB */
-#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */
+#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */
+#define KMEM_CACHE_VERIFY 0x2 /* Use debugging facilities */
/*
* Initialize a cache.
*/
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags);
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags);
/*
* Allocate an object from a cache.
diff --git a/kern/task.c b/kern/task.c
index e9e6ba24..0f24e44d 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -63,7 +63,7 @@ ipc_port_t new_task_notification = NULL;
void task_init(void)
{
kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
eml_init();
machine_task_module_init ();
diff --git a/kern/thread.c b/kern/thread.c
index 3e900792..91e08697 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -195,16 +195,20 @@ kern_return_t stack_alloc(
(void) splx(s);
if (stack == 0) {
- kern_return_t kr;
+ struct vm_page *page;
+
/*
* Kernel stacks should be naturally aligned,
* so that it is easy to find the starting/ending
* addresses of a stack given an address in the middle.
*/
- kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
- if (kr != KERN_SUCCESS)
- return kr;
+ page = vm_page_alloc_pa(vm_page_order(KERNEL_STACK_SIZE),
+ VM_PAGE_SEL_DIRECTMAP,
+ VM_PT_STACK);
+ if (page == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+ stack = phystokv(vm_page_to_pa(page));
#if MACH_DEBUG
stack_init(stack);
#endif /* MACH_DEBUG */
@@ -298,7 +302,7 @@ void stack_privilege(
void thread_init(void)
{
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
/*
* Fill in a template thread for fast initialization.