summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-02-02 23:17:20 +0100
committerRichard Braun <rbraun@sceen.net>2016-02-02 23:20:32 +0100
commit44d78061e90e777b51cae8e01eda5c0d3ce63103 (patch)
tree64af20619a7292834c9d66e8157e0301a8d62f0d /kern
parent909167b9d05cf896f1e54122183ef8ee9ee70677 (diff)
Fix various memory managment errors
A few errors were introduced in the latest changes. o Add VM_PAGE_WAIT calls around physical allocation attempts in case of memory exhaustion. o Fix stack release. o Fix memory exhaustion report. o Fix free page accounting. * kern/slab.c (kmem_pagealloc, kmem_pagefree): New functions (kmem_slab_create, kmem_slab_destroy, kalloc, kfree): Use kmem_pagealloc and kmem_pagefree instead of the raw page allocation functions. (kmem_cache_compute_sizes): Don't store slab order. * kern/slab.h (struct kmem_cache): Remove `slab_order' member. * kern/thread.c (stack_alloc): Call VM_PAGE_WAIT in case of memory exhaustion. (stack_collect): Call vm_page_free_contig instead of kmem_free to release pages. * vm/vm_page.c (vm_page_seg_alloc): Fix memory exhaustion report. (vm_page_setup): Don't update vm_page_free_count. (vm_page_free_pa): Check page parameter. (vm_page_mem_free): New function. * vm/vm_page.h (vm_page_free_count): Remove extern declaration. (vm_page_mem_free): New prototype. * vm/vm_pageout.c: Update comments not to refer to vm_page_free_count. (vm_pageout_scan, vm_pageout_continue, vm_pageout): Use vm_page_mem_free instead of vm_page_free_count, update types accordingly. * vm/vm_resident.c (vm_page_free_count, vm_page_free_count_minimum): Remove variables. (vm_page_free_avail): New variable. (vm_page_bootstrap, vm_page_grab, vm_page_release, vm_page_grab_contig, vm_page_free_contig, vm_page_wait): Use vm_page_mem_free instead of vm_page_free_count, update types accordingly, don't set vm_page_free_count_minimum. * vm/vm_user.c (vm_statistics): Likewise.
Diffstat (limited to 'kern')
-rw-r--r--kern/slab.c39
-rw-r--r--kern/slab.h1
-rw-r--r--kern/thread.c28
3 files changed, 44 insertions, 24 deletions
diff --git a/kern/slab.c b/kern/slab.c
index a887cbb5..f1a534a8 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -383,6 +383,27 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
return (void *)bufctl - cache->bufctl_dist;
}
+static struct vm_page *
+kmem_pagealloc(vm_size_t size)
+{
+ struct vm_page *page;
+
+ for (;;) {
+ page = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP);
+
+ if (page != NULL)
+ return page;
+
+ VM_PAGE_WAIT(NULL);
+ }
+}
+
+static void
+kmem_pagefree(struct vm_page *page, vm_size_t size)
+{
+ vm_page_free_contig(page, size);
+}
+
static void kmem_slab_create_verify(struct kmem_slab *slab,
struct kmem_cache *cache)
{
@@ -418,9 +439,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
unsigned long buffers;
void *slab_buf;
- page = vm_page_alloc_pa(cache->slab_order,
- VM_PAGE_SEL_DIRECTMAP,
- VM_PT_KMEM);
+ page = kmem_pagealloc(cache->slab_size);
if (page == NULL)
return NULL;
@@ -431,7 +450,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
if (slab == NULL) {
- vm_page_free_pa(page, cache->slab_order);
+ kmem_pagefree(page, cache->slab_size);
return NULL;
}
} else {
@@ -504,7 +523,7 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
page = vm_page_lookup_pa(kvtophys(slab_buf));
assert(page != NULL);
- vm_page_free_pa(page, cache->slab_order);
+ kmem_pagefree(page, cache->slab_size);
if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
@@ -681,7 +700,7 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
size_t i, buffers, buf_size, slab_size, free_slab_size;
size_t waste, waste_min, optimal_size = optimal_size;
int embed, optimal_embed = optimal_embed;
- unsigned int slab_order, optimal_order = optimal_order;
+ unsigned int slab_order;
buf_size = cache->buf_size;
@@ -718,7 +737,6 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
if (waste <= waste_min) {
waste_min = waste;
- optimal_order = slab_order;
optimal_size = slab_size;
optimal_embed = embed;
}
@@ -727,7 +745,6 @@ static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed);
- cache->slab_order = optimal_order;
cache->slab_size = optimal_size;
slab_size = cache->slab_size
- (optimal_embed ? sizeof(struct kmem_slab) : 0);
@@ -1335,9 +1352,7 @@ vm_offset_t kalloc(vm_size_t size)
} else {
struct vm_page *page;
- page = vm_page_alloc_pa(vm_page_order(size),
- VM_PAGE_SEL_DIRECTMAP,
- VM_PT_KERNEL);
+ page = kmem_pagealloc(size);
if (page == NULL)
return 0;
@@ -1387,7 +1402,7 @@ void kfree(vm_offset_t data, vm_size_t size)
struct vm_page *page;
page = vm_page_lookup_pa(kvtophys(data));
- vm_page_free_pa(page, vm_page_order(size));
+ kmem_pagefree(page, size);
}
}
diff --git a/kern/slab.h b/kern/slab.h
index 1ad24d63..c50efd3d 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -167,7 +167,6 @@ struct kmem_cache {
struct rbtree active_slabs;
int flags;
size_t bufctl_dist; /* Distance from buffer to bufctl */
- unsigned int slab_order;
size_t slab_size;
unsigned long bufs_per_slab;
unsigned long nr_objs; /* Number of allocated objects */
diff --git a/kern/thread.c b/kern/thread.c
index 91e08697..d8d6af57 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -197,16 +197,19 @@ kern_return_t stack_alloc(
if (stack == 0) {
struct vm_page *page;
- /*
- * Kernel stacks should be naturally aligned,
- * so that it is easy to find the starting/ending
- * addresses of a stack given an address in the middle.
- */
- page = vm_page_alloc_pa(vm_page_order(KERNEL_STACK_SIZE),
- VM_PAGE_SEL_DIRECTMAP,
- VM_PT_STACK);
- if (page == NULL)
- return KERN_RESOURCE_SHORTAGE;
+ for (;;) {
+ /*
+ * Kernel stacks should be naturally aligned,
+ * so that it is easy to find the starting/ending
+ * addresses of a stack given an address in the middle.
+ */
+ page = vm_page_grab_contig(KERNEL_STACK_SIZE,
+ VM_PAGE_SEL_DIRECTMAP);
+ if (page != NULL)
+ break;
+
+ VM_PAGE_WAIT(NULL);
+ }
stack = phystokv(vm_page_to_pa(page));
#if MACH_DEBUG
@@ -254,6 +257,7 @@ void stack_free(
void stack_collect(void)
{
+ struct vm_page *page;
vm_offset_t stack;
spl_t s;
@@ -269,7 +273,9 @@ void stack_collect(void)
#if MACH_DEBUG
stack_finalize(stack);
#endif /* MACH_DEBUG */
- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
+ page = vm_page_lookup_pa(kvtophys(stack));
+ assert(page != NULL);
+ vm_page_free_contig(page, KERNEL_STACK_SIZE);
s = splsched();
stack_lock();