summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2020-05-01 02:25:37 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2020-05-01 02:25:37 +0200
commitded813a44263103a28e1788489ec0fef7e8d1c7e (patch)
tree77bc52c178b5eebe2b718e602e9152c1474a47bd
parent0cf03cdab38bd588390ddecc7086d8bb0f56d71f (diff)
Add kmem_valloc
Functions like vremap need to allocate some virtual addressing space before making their own mapping. kmem_alloc_wired can be used for that but that wastes memory. * vm/vm_kern.c (kmem_valloc): New function. * vm/vm_kern.h (kmem_valloc): New prototype. * linux/dev/glue/kmem.c (vremap): Call kmem_valloc instead of kmem_alloc_wired. Also check that `offset' is aligned on a page.
-rw-r--r--linux/dev/glue/kmem.c4
-rw-r--r--vm/vm_kern.c76
-rw-r--r--vm/vm_kern.h1
3 files changed, 80 insertions, 1 deletions
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c
index 8e0b5425..509229d1 100644
--- a/linux/dev/glue/kmem.c
+++ b/linux/dev/glue/kmem.c
@@ -574,8 +574,10 @@ vremap (unsigned long offset, unsigned long size)
{
vm_offset_t addr;
kern_return_t ret;
+
+ assert(page_aligned(offset));
- ret = kmem_alloc_wired (kernel_map, &addr, round_page (size));
+ ret = kmem_valloc (kernel_map, &addr, round_page (size));
if (ret != KERN_SUCCESS)
return NULL;
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index a84553d6..a7ec0c06 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -432,6 +432,82 @@ retry:
}
/*
+ * kmem_valloc:
+ *
+ * Allocate addressing space in the kernel's address map
+ * or a submap. The adressing space does not map anything.
+ */
+
+kern_return_t
+kmem_valloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
+{
+ vm_map_entry_t entry;
+ vm_offset_t offset;
+ vm_offset_t addr;
+ unsigned int attempts;
+ kern_return_t kr;
+
+ /*
+ * Use the kernel object for wired-down kernel pages.
+ * Assume that no region of the kernel object is
+ * referenced more than once. We want vm_map_find_entry
+ * to extend an existing entry if possible.
+ */
+
+ size = round_page(size);
+ attempts = 0;
+
+retry:
+ vm_map_lock(map);
+ kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
+ kernel_object, &entry);
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_valloc in %p (%s)\n",
+ map, map->name);
+ return kr;
+ }
+
+ /*
+ * Since we didn't know where the new region would
+ * start, we couldn't supply the correct offset into
+ * the kernel object. We only initialize the entry
+ * if we aren't extending an existing entry.
+ */
+
+ offset = addr - VM_MIN_KERNEL_ADDRESS;
+
+ if (entry->object.vm_object == VM_OBJECT_NULL) {
+ vm_object_reference(kernel_object);
+
+ entry->object.vm_object = kernel_object;
+ entry->offset = offset;
+ }
+
+ /*
+ * Since we have not given out this address yet,
+ * it is safe to unlock the map.
+ */
+ vm_map_unlock(map);
+
+ /*
+ * Return the memory, not zeroed.
+ */
+ *addrp = addr;
+ return KERN_SUCCESS;
+}
+
+/*
* kmem_alloc_wired:
*
* Allocate wired-down memory in the kernel's address map
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index 4bd89c49..0cdb19db 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -52,6 +52,7 @@ extern void kmem_init(vm_offset_t, vm_offset_t);
extern kern_return_t kmem_alloc(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *,
vm_size_t);
+extern kern_return_t kmem_valloc(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);