From ded813a44263103a28e1788489ec0fef7e8d1c7e Mon Sep 17 00:00:00 2001 From: Samuel Thibault Date: Fri, 1 May 2020 02:25:37 +0200 Subject: Add kmem_valloc Functions like vremap need to allocate some virtual addressing space before making their own mapping. kmem_alloc_wired can be used for that but that wastes memory. * vm/vm_kern.c (kmem_valloc): New function. * vm/vm_kern.h (kmem_valloc): New prototype. * linux/dev/glue/kmem.c (vremap): Call kmem_valloc instead of kmem_alloc_wired. Also check that `offset' is aligned on a page. --- linux/dev/glue/kmem.c | 4 ++- vm/vm_kern.c | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++ vm/vm_kern.h | 1 + 3 files changed, 80 insertions(+), 1 deletion(-) diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c index 8e0b5425..509229d1 100644 --- a/linux/dev/glue/kmem.c +++ b/linux/dev/glue/kmem.c @@ -574,8 +574,10 @@ vremap (unsigned long offset, unsigned long size) { vm_offset_t addr; kern_return_t ret; + + assert(page_aligned(offset)); - ret = kmem_alloc_wired (kernel_map, &addr, round_page (size)); + ret = kmem_valloc (kernel_map, &addr, round_page (size)); if (ret != KERN_SUCCESS) return NULL; diff --git a/vm/vm_kern.c b/vm/vm_kern.c index a84553d6..a7ec0c06 100644 --- a/vm/vm_kern.c +++ b/vm/vm_kern.c @@ -431,6 +431,82 @@ retry: return KERN_SUCCESS; } +/* + * kmem_valloc: + * + * Allocate addressing space in the kernel's address map + * or a submap. The adressing space does not map anything. + */ + +kern_return_t +kmem_valloc( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) +{ + vm_map_entry_t entry; + vm_offset_t offset; + vm_offset_t addr; + unsigned int attempts; + kern_return_t kr; + + /* + * Use the kernel object for wired-down kernel pages. + * Assume that no region of the kernel object is + * referenced more than once. We want vm_map_find_entry + * to extend an existing entry if possible. + */ + + size = round_page(size); + attempts = 0; + +retry: + vm_map_lock(map); + kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0, + kernel_object, &entry); + if (kr != KERN_SUCCESS) { + vm_map_unlock(map); + + if (attempts == 0) { + attempts++; + slab_collect(); + goto retry; + } + + printf_once("no more room for kmem_valloc in %p (%s)\n", + map, map->name); + return kr; + } + + /* + * Since we didn't know where the new region would + * start, we couldn't supply the correct offset into + * the kernel object. We only initialize the entry + * if we aren't extending an existing entry. + */ + + offset = addr - VM_MIN_KERNEL_ADDRESS; + + if (entry->object.vm_object == VM_OBJECT_NULL) { + vm_object_reference(kernel_object); + + entry->object.vm_object = kernel_object; + entry->offset = offset; + } + + /* + * Since we have not given out this address yet, + * it is safe to unlock the map. + */ + vm_map_unlock(map); + + /* + * Return the memory, not zeroed. + */ + *addrp = addr; + return KERN_SUCCESS; +} + /* * kmem_alloc_wired: * diff --git a/vm/vm_kern.h b/vm/vm_kern.h index 4bd89c49..0cdb19db 100644 --- a/vm/vm_kern.h +++ b/vm/vm_kern.h @@ -52,6 +52,7 @@ extern void kmem_init(vm_offset_t, vm_offset_t); extern kern_return_t kmem_alloc(vm_map_t, vm_offset_t *, vm_size_t); extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *, vm_size_t); +extern kern_return_t kmem_valloc(vm_map_t, vm_offset_t *, vm_size_t); extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t); extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t); extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t); -- cgit v1.2.3