summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-02 03:30:34 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-02 03:58:19 +0100
commit5e9f6f52451ccb768d875370bf1769b27ff0041c (patch)
treed556eb00b4ce74e08ddf0f8b34a64cd9f3932067 /vm
parent945f51bfe865e122d73986dd8219762450ffc0f3 (diff)
Stack the slab allocator directly on top of the physical allocator
In order to increase the amount of memory available for kernel objects, without reducing the amount of memory available for user processes, a new allocation strategy is introduced in this change. Instead of allocating kernel objects out of kernel virtual memory, the slab allocator directly uses the direct mapping of physical memory as its backend. This largely increases the kernel heap, and removes the need for address translation updates. In order to allow this strategy, an assumption made by the interrupt code had to be removed. In addition, kernel stacks are now also allocated directly from the physical allocator. * i386/i386/db_trace.c: Include i386at/model_dep.h (db_i386_reg_value): Update stack check. * i386/i386/locore.S (trap_from_kernel, all_intrs, int_from_intstack): Update interrupt handling. * i386/i386at/model_dep.c: Include kern/macros.h. (int_stack, int_stack_base): New variables. (int_stack_high): Remove variable. (i386at_init): Update interrupt stack initialization. * i386/i386at/model_dep.h: Include i386/vm_param.h. (int_stack_top, int_stack_base): New extern declarations. (ON_INT_STACK): New macro. * kern/slab.c: Include vm/vm_page.h (KMEM_CF_NO_CPU_POOL, KMEM_CF_NO_RECLAIM): Remove macros. (kmem_pagealloc, kmem_pagefree, kalloc_pagealloc, kalloc_pagefree): Remove functions. (kmem_slab_create): Allocate slab pages directly from the physical allocator. (kmem_slab_destroy): Release slab pages directly to the physical allocator. (kmem_cache_compute_sizes): Update the slab size computation algorithm to return a power-of-two suitable for the physical allocator. (kmem_cache_init): Remove custom allocation function pointers. (kmem_cache_reap): Remove check on KMEM_CF_NO_RECLAIM. (slab_init, kalloc_init): Update calls to kmem_cache_init. (kalloc, kfree): Directly fall back on the physical allocator for big allocation sizes. (host_slab_info): Remove checks on defunct flags. * kern/slab.h (kmem_slab_alloc_fn_t, kmem_slab_free_fn_t): Remove types. (struct kmem_cache): Add `slab_order' member, remove `slab_alloc_fn' and `slab_free_fn' members. (KMEM_CACHE_NOCPUPOOL, KMEM_CACHE_NORECLAIM): Remove macros. (kmem_cache_init): Update prototype, remove custom allocation functions. * kern/thread.c (stack_alloc): Allocate stacks from the physical allocator. * vm/vm_map.c (vm_map_kentry_cache, kentry_data, kentry_data_size): Remove variables. (kentry_pagealloc): Remove function. (vm_map_init): Update calls to kmem_cache_init, remove initialization of vm_map_kentry_cache. (vm_map_create, _vm_map_entry_dispose, vm_map_copyout): Unconditionnally use vm_map_entry_cache. * vm/vm_map.h (kentry_data, kentry_data_size, kentry_count): Remove extern declarations. * vm/vm_page.h (VM_PT_STACK): New page type. * device/dev_lookup.c (dev_lookup_init): Update calls to kmem_cache_init. * device/dev_pager.c (dev_pager_hash_init, device_pager_init): Likewise. * device/ds_routines.c (mach_device_init, mach_device_trap_init): Likewise. * device/net_io.c (net_io_init): Likewise. * i386/i386/fpu.c (fpu_module_init): Likewise. * i386/i386/machine_task.c (machine_task_module_init): Likewise. * i386/i386/pcb.c (pcb_module_init): Likewise. * i386/intel/pmap.c (pmap_init): Likewise. * ipc/ipc_init.c (ipc_bootstrap): Likewise. * ipc/ipc_marequest.c (ipc_marequest_init): Likewise. * kern/act.c (global_act_init): Likewise. * kern/processor.c (pset_sys_init): Likewise. * kern/rdxtree.c (rdxtree_cache_init): Likewise. * kern/task.c (task_init): Likewise. * vm/memory_object_proxy.c (memory_object_proxy_init): Likewise. * vm/vm_external.c (vm_external_module_initialize): Likewise. * vm/vm_fault.c (vm_fault_init): Likewise. * vm/vm_object.c (vm_object_bootstrap): Likewise. * vm/vm_resident.c (vm_page_module_init): Likewise. (vm_page_bootstrap): Remove initialization of kentry_data.
Diffstat (limited to 'vm')
-rw-r--r--vm/memory_object_proxy.c2
-rw-r--r--vm/vm_external.c6
-rw-r--r--vm/vm_fault.c2
-rw-r--r--vm/vm_map.c60
-rw-r--r--vm/vm_map.h3
-rw-r--r--vm/vm_object.c2
-rw-r--r--vm/vm_page.h3
-rw-r--r--vm/vm_resident.c8
8 files changed, 16 insertions, 70 deletions
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index a64bfcce..01bce2a5 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -64,7 +64,7 @@ void
memory_object_proxy_init (void)
{
kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
- sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0);
+ sizeof (struct memory_object_proxy), 0, NULL, 0);
}
/* Lookup a proxy memory object by its port. */
diff --git a/vm/vm_external.c b/vm/vm_external.c
index 2e2593b1..097a9b12 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -138,13 +138,13 @@ void vm_external_module_initialize(void)
vm_size_t size = (vm_size_t) sizeof(struct vm_external);
kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_small_existence_map_cache,
"small_existence_map", SMALL_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_large_existence_map_cache,
"large_existence_map", LARGE_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 4d674174..09e2c54d 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -105,7 +105,7 @@ extern struct db_watchpoint *db_watchpoint_list;
void vm_fault_init(void)
{
kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
- sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0);
+ sizeof(vm_fault_state_t), 0, NULL, 0);
}
/*
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 3a231de0..0d610621 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -126,7 +126,6 @@ MACRO_END
struct kmem_cache vm_map_cache; /* cache for vm_map structures */
struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
-struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
/*
@@ -151,43 +150,16 @@ vm_object_t vm_submap_object = &vm_submap_object_store;
*
* vm_map_cache: used to allocate maps.
* vm_map_entry_cache: used to allocate map entries.
- * vm_map_kentry_cache: used to allocate map entries for the kernel.
- *
- * Kernel map entries are allocated from a special cache, using a custom
- * page allocation function to avoid recursion. It would be difficult
- * (perhaps impossible) for the kernel to allocate more memory to an entry
- * cache when it became empty since the very act of allocating memory
- * implies the creation of a new entry.
*/
-vm_offset_t kentry_data;
-vm_size_t kentry_data_size = KENTRY_DATA_SIZE;
-
-static vm_offset_t kentry_pagealloc(vm_size_t size)
-{
- vm_offset_t result;
-
- if (size > kentry_data_size)
- panic("vm_map: kentry memory exhausted");
-
- result = kentry_data;
- kentry_data += size;
- kentry_data_size -= size;
- return result;
-}
-
void vm_map_init(void)
{
kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
- sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
- kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
- sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
- NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
- | KMEM_CACHE_NORECLAIM);
+ sizeof(struct vm_map_entry), 0, NULL, 0);
kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
- sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_map_copy), 0, NULL, 0);
/*
* Submap object is initialized by vm_object_init.
@@ -261,15 +233,9 @@ vm_map_t vm_map_create(
vm_map_entry_t _vm_map_entry_create(map_header)
const struct vm_map_header *map_header;
{
- kmem_cache_t cache;
vm_map_entry_t entry;
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
-
- entry = (vm_map_entry_t) kmem_cache_alloc(cache);
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
@@ -291,14 +257,9 @@ void _vm_map_entry_dispose(map_header, entry)
const struct vm_map_header *map_header;
vm_map_entry_t entry;
{
- kmem_cache_t cache;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ (void)map_header;
- kmem_cache_free(cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
}
/*
@@ -2539,15 +2500,8 @@ kern_return_t vm_map_copyout(
* Mismatches occur when dealing with the default
* pager.
*/
- kmem_cache_t old_cache;
vm_map_entry_t next, new;
- /*
- * Find the cache that the copies were allocated from
- */
- old_cache = (copy->cpy_hdr.entries_pageable)
- ? &vm_map_entry_cache
- : &vm_map_kentry_cache;
entry = vm_map_copy_first_entry(copy);
/*
@@ -2571,7 +2525,7 @@ kern_return_t vm_map_copyout(
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
- kmem_cache_free(old_cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
entry = next;
}
}
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9b31f90a..b4ba7c7b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -363,9 +363,6 @@ MACRO_END
* Exported procedures that operate on vm_map_t.
*/
-extern vm_offset_t kentry_data;
-extern vm_size_t kentry_data_size;
-extern int kentry_count;
/* Initialize the module */
extern void vm_map_init(void);
diff --git a/vm/vm_object.c b/vm/vm_object.c
index eda03c65..ece3a83c 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -262,7 +262,7 @@ vm_object_t vm_object_allocate(
void vm_object_bootstrap(void)
{
kmem_cache_init(&vm_object_cache, "vm_object",
- sizeof(struct vm_object), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_object), 0, NULL, 0);
queue_init(&vm_object_cached_list);
simple_lock_init(&vm_object_cached_lock_data);
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 7607aad0..4e870d82 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -378,7 +378,8 @@ extern unsigned int vm_page_info(
#define VM_PT_TABLE 2 /* Page is part of the page table */
#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
-#define VM_PT_KERNEL 5 /* Type for generic kernel allocations */
+#define VM_PT_STACK 5 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
static inline unsigned short
vm_page_type(const struct vm_page *page)
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 9fd64918..dd1cf9cd 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -193,12 +193,6 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the kernel map entries.
- */
-
- kentry_data = pmap_steal_memory(kentry_data_size);
-
- /*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
*
@@ -312,7 +306,7 @@ vm_offset_t pmap_steal_memory(
void vm_page_module_init(void)
{
kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*