summaryrefslogtreecommitdiff
path: root/kern
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-06-29 18:35:03 +0200
committerRichard Braun <rbraun@sceen.net>2016-06-29 18:34:44 +0200
commit4bccd10cfeaf126382467dd90d7339a98989b9d2 (patch)
tree131f133a3fdda61a10bdfabadf041428c8437015 /kern
parent09ddf29b02c5087e02f358c2e67e27d17d3a21a7 (diff)
Fix page fault in critical section in the slab allocator
* kern/slab.c (host_slab_info): Use wired kernel memory to build the cache info.
Diffstat (limited to 'kern')
-rw-r--r--kern/slab.c64
1 files changed, 36 insertions, 28 deletions
diff --git a/kern/slab.c b/kern/slab.c
index 43962e7e..1f8e0005 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -1502,39 +1502,33 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
struct kmem_cache *cache;
cache_info_t *info;
unsigned int i, nr_caches;
- vm_size_t info_size = 0;
+ vm_size_t info_size;
kern_return_t kr;
if (host == HOST_NULL)
return KERN_INVALID_HOST;
- /*
- * Assume the cache list is unaltered once the kernel is ready.
- */
+ /* Assume the cache list is mostly unaltered once the kernel is ready */
- simple_lock(&kmem_cache_list_lock);
+retry:
+ /* Harmless unsynchronized access, real value checked later */
nr_caches = kmem_nr_caches;
- simple_unlock(&kmem_cache_list_lock);
-
- if (nr_caches <= *infoCntp)
- info = *infop;
- else {
- vm_offset_t info_addr;
-
- info_size = round_page(nr_caches * sizeof(*info));
- kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size);
-
- if (kr != KERN_SUCCESS)
- return kr;
-
- info = (cache_info_t *)info_addr;
- }
+ info_size = nr_caches * sizeof(*info);
+ info = (cache_info_t *)kalloc(info_size);
if (info == NULL)
return KERN_RESOURCE_SHORTAGE;
i = 0;
+ simple_lock(&kmem_cache_list_lock);
+
+ if (nr_caches != kmem_nr_caches) {
+ simple_unlock(&kmem_cache_list_lock);
+ kfree((vm_offset_t)info, info_size);
+ goto retry;
+ }
+
list_for_each_entry(&kmem_cache_list, cache, node) {
simple_lock(&cache->lock);
info[i].flags = cache->flags;
@@ -1559,24 +1553,38 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
i++;
}
- if (info != *infop) {
+ simple_unlock(&kmem_cache_list_lock);
+
+ if (nr_caches <= *infoCntp) {
+ memcpy(*infop, info, info_size);
+ } else {
+ vm_offset_t info_addr;
+ vm_size_t total_size;
vm_map_copy_t copy;
- vm_size_t used;
- used = nr_caches * sizeof(*info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &info_addr, info_size);
+
+ if (kr != KERN_SUCCESS)
+ goto out;
- if (used != info_size)
- memset((char *)info + used, 0, info_size - used);
+ memcpy((char *)info_addr, info, info_size);
+ total_size = round_page(info_size);
- kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t)info, used, TRUE,
- &copy);
+ if (info_size < total_size)
+ memset((char *)(info_addr + info_size),
+ 0, total_size - info_size);
+ kr = vm_map_copyin(ipc_kernel_map, info_addr, info_size, TRUE, &copy);
assert(kr == KERN_SUCCESS);
*infop = (cache_info_t *)copy;
}
*infoCntp = nr_caches;
+ kr = KERN_SUCCESS;
+
+out:
+ kfree((vm_offset_t)info, info_size);
- return KERN_SUCCESS;
+ return kr;
}
#endif /* MACH_DEBUG */