summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-12-24 02:31:34 +0100
committerRichard Braun <rbraun@sceen.net>2016-12-24 02:31:34 +0100
commit2b0f19f602e08fd9d37268233b962674fd592634 (patch)
tree90a4bfa49483ccdcddbbcd20749de7f864272d64 /vm
parent023401c5b97023670a44059a60eb2a3a11c8a929 (diff)
VM: add the vm_wire_all call
This call maps the POSIX mlockall and munlockall calls. * Makefrag.am (include_mach_HEADERS): Add include/mach/vm_wire.h. * include/mach/gnumach.defs (vm_wire_t): New type. (vm_wire_all): New routine. * include/mach/mach_types.h: Include mach/vm_wire.h. * vm/vm_map.c: Likewise. (vm_map_enter): Automatically wire new entries if requested. (vm_map_copyout): Likewise. (vm_map_pageable_all): New function. vm/vm_map.h: Include mach/vm_wire.h. (struct vm_map): Update description of member `wiring_required'. (vm_map_pageable_all): New function. * vm/vm_user.c (vm_wire_all): New function.
Diffstat (limited to 'vm')
-rw-r--r--vm/vm_map.c91
-rw-r--r--vm/vm_map.h5
-rw-r--r--vm/vm_user.c32
3 files changed, 122 insertions, 6 deletions
diff --git a/vm/vm_map.c b/vm/vm_map.c
index c618e63d..855d7997 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -39,6 +39,7 @@
#include <mach/port.h>
#include <mach/vm_attributes.h>
#include <mach/vm_param.h>
+#include <mach/vm_wire.h>
#include <kern/assert.h>
#include <kern/debug.h>
#include <kern/kalloc.h>
@@ -1108,6 +1109,15 @@ kern_return_t vm_map_enter(
SAVE_HINT(map, new_entry);
+ if (map->wiring_required) {
+ /* Returns with the map read-locked if successful */
+ result = vm_map_pageable(map, start, end, cur_protection, FALSE, FALSE);
+
+ if (result != KERN_SUCCESS) {
+ RETURN(KERN_SUCCESS);
+ }
+ }
+
vm_map_unlock(map);
if ((object != VM_OBJECT_NULL) &&
@@ -1746,6 +1756,69 @@ kern_return_t vm_map_pageable(
}
/*
+ * vm_map_pageable_all:
+ *
+ * Sets the pageability of an entire map. If the VM_WIRE_CURRENT
+ * flag is set, then all current mappings are locked down. If the
+ * VM_WIRE_FUTURE flag is set, then all mappings created after the
+ * call returns are locked down. If no flags are passed
+ * (i.e. VM_WIRE_NONE), all mappings become pageable again, and
+ * future mappings aren't automatically locked down any more.
+ *
+ * The access type of the mappings match their current protection.
+ * Null mappings (with protection PROT_NONE) are updated to track
+ * that they should be wired in case they become accessible.
+ */
+kern_return_t
+vm_map_pageable_all(struct vm_map *map, vm_wire_t flags)
+{
+ boolean_t wiring_required;
+ kern_return_t kr;
+
+ if ((flags & ~VM_WIRE_ALL) != 0) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_map_lock(map);
+
+ if (flags == VM_WIRE_NONE) {
+ map->wiring_required = FALSE;
+
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable(map, map->min_offset, map->max_offset,
+ VM_PROT_NONE, FALSE, FALSE);
+ vm_map_unlock(map);
+ return kr;
+ }
+
+ wiring_required = map->wiring_required;
+
+ if (flags & VM_WIRE_FUTURE) {
+ map->wiring_required = TRUE;
+ }
+
+ if (flags & VM_WIRE_CURRENT) {
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable(map, map->min_offset, map->max_offset,
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE, FALSE);
+
+ if (kr != KERN_SUCCESS) {
+ if (flags & VM_WIRE_FUTURE) {
+ map->wiring_required = wiring_required;
+ }
+
+ vm_map_unlock(map);
+ return kr;
+ }
+ }
+
+ vm_map_unlock(map);
+
+ return KERN_SUCCESS;
+}
+
+/*
* vm_map_entry_delete: [ internal use only ]
*
* Deallocate the given entry from the target map.
@@ -2605,6 +2678,7 @@ kern_return_t vm_map_copyout(
vm_offset_t vm_copy_start;
vm_map_entry_t last;
vm_map_entry_t entry;
+ kern_return_t kr;
/*
* Check for null copy object.
@@ -2624,7 +2698,6 @@ kern_return_t vm_map_copyout(
vm_object_t object = copy->cpy_object;
vm_size_t offset = copy->offset;
vm_size_t tmp_size = copy->size;
- kern_return_t kr;
*dst_addr = 0;
kr = vm_map_enter(dst_map, dst_addr, tmp_size,
@@ -2764,11 +2837,19 @@ kern_return_t vm_map_copyout(
vm_map_copy_insert(dst_map, last, copy);
- vm_map_unlock(dst_map);
+ if (dst_map->wiring_required) {
+ /* Returns with the map read-locked if successful */
+ kr = vm_map_pageable(dst_map, start, start + size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE, FALSE);
- /*
- * XXX If wiring_required, call vm_map_pageable
- */
+ if (kr != KERN_SUCCESS) {
+ vm_map_unlock(dst_map);
+ return kr;
+ }
+ }
+
+ vm_map_unlock(dst_map);
return(KERN_SUCCESS);
}
diff --git a/vm/vm_map.h b/vm/vm_map.h
index aa68b92c..87660f31 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -46,6 +46,7 @@
#include <mach/vm_attributes.h>
#include <mach/vm_prot.h>
#include <mach/vm_inherit.h>
+#include <mach/vm_wire.h>
#include <vm/pmap.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
@@ -191,7 +192,7 @@ struct vm_map {
/* Flags */
unsigned int wait_for_space:1, /* Should callers wait
for space? */
- /* boolean_t */ wiring_required:1; /* All memory wired? */
+ /* boolean_t */ wiring_required:1; /* New mappings are wired? */
unsigned int timestamp; /* Version number */
@@ -492,6 +493,8 @@ static inline void vm_map_set_name(vm_map_t map, const char *name)
extern kern_return_t vm_map_pageable(vm_map_t, vm_offset_t, vm_offset_t,
vm_prot_t, boolean_t, boolean_t);
+extern kern_return_t vm_map_pageable_all(vm_map_t, vm_wire_t);
+
/*
* Submap object. Must be used to create memory to be put
* in a submap by vm_map_submap.
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 46684423..6c1e3d6f 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -447,3 +447,35 @@ kern_return_t vm_wire(port, map, start, size, access)
return vm_map_pageable(map, trunc_page(start), round_page(start+size),
access, TRUE, TRUE);
}
+
+kern_return_t vm_wire_all(const ipc_port_t port, vm_map_t map, vm_wire_t flags)
+{
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+
+ if (!ip_active(port)
+ || (ip_kotype(port) != IKOT_HOST_PRIV)) {
+ ip_unlock(port);
+ return KERN_INVALID_HOST;
+ }
+
+ ip_unlock(port);
+
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (flags & ~VM_WIRE_ALL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /*Check if range includes projected buffer;
+ user is not allowed direct manipulation in that case*/
+ if (projected_buffer_in_range(map, map->min_offset, map->max_offset)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ return vm_map_pageable_all(map, flags);
+}