From 2b0f19f602e08fd9d37268233b962674fd592634 Mon Sep 17 00:00:00 2001 From: Richard Braun Date: Sat, 24 Dec 2016 02:31:34 +0100 Subject: VM: add the vm_wire_all call This call maps the POSIX mlockall and munlockall calls. * Makefrag.am (include_mach_HEADERS): Add include/mach/vm_wire.h. * include/mach/gnumach.defs (vm_wire_t): New type. (vm_wire_all): New routine. * include/mach/mach_types.h: Include mach/vm_wire.h. * vm/vm_map.c: Likewise. (vm_map_enter): Automatically wire new entries if requested. (vm_map_copyout): Likewise. (vm_map_pageable_all): New function. vm/vm_map.h: Include mach/vm_wire.h. (struct vm_map): Update description of member `wiring_required'. (vm_map_pageable_all): New function. * vm/vm_user.c (vm_wire_all): New function. --- Makefrag.am | 1 + include/mach/gnumach.defs | 15 ++++++++ include/mach/mach_types.h | 1 + include/mach/vm_wire.h | 30 ++++++++++++++++ vm/vm_map.c | 91 ++++++++++++++++++++++++++++++++++++++++++++--- vm/vm_map.h | 5 ++- vm/vm_user.c | 32 +++++++++++++++++ 7 files changed, 169 insertions(+), 6 deletions(-) create mode 100644 include/mach/vm_wire.h diff --git a/Makefrag.am b/Makefrag.am index e001d65d..c16f1c72 100644 --- a/Makefrag.am +++ b/Makefrag.am @@ -418,6 +418,7 @@ include_mach_HEADERS = \ include/mach/vm_param.h \ include/mach/vm_prot.h \ include/mach/vm_statistics.h \ + include/mach/vm_wire.h \ include/mach/inline.h \ include/mach/xen.h diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs index 5235df63..b484accc 100644 --- a/include/mach/gnumach.defs +++ b/include/mach/gnumach.defs @@ -35,6 +35,8 @@ GNUMACH_IMPORTS type vm_cache_statistics_data_t = struct[11] of integer_t; +type vm_wire_t = int; + /* * Return page cache statistics for the host on which the target task * resides. @@ -136,3 +138,16 @@ simpleroutine gsync_requeue( wake_one : boolean_t; flags : int); +/* + * If the VM_WIRE_CURRENT flag is passed, specify that the entire + * virtual address space of the target task must not cause page faults. + * + * If the VM_WIRE_FUTURE flag is passed, automatically wire new + * mappings in the address space of the target task. + * + * If the flags are empty (VM_WIRE_NONE), unwire all mappings. + */ +routine vm_wire_all( + host : mach_port_t; + task : vm_task_t; + flags : vm_wire_t); diff --git a/include/mach/mach_types.h b/include/mach/mach_types.h index 87684824..65164a99 100644 --- a/include/mach/mach_types.h +++ b/include/mach/mach_types.h @@ -53,6 +53,7 @@ #include #include #include +#include #ifdef MACH_KERNEL #include /* for task_array_t */ diff --git a/include/mach/vm_wire.h b/include/mach/vm_wire.h new file mode 100644 index 00000000..1552dfa7 --- /dev/null +++ b/include/mach/vm_wire.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2017 Free Software Foundation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _MACH_VM_WIRE_H_ +#define _MACH_VM_WIRE_H_ + +typedef int vm_wire_t; + +#define VM_WIRE_NONE 0 +#define VM_WIRE_CURRENT 1 +#define VM_WIRE_FUTURE 2 + +#define VM_WIRE_ALL (VM_WIRE_CURRENT | VM_WIRE_FUTURE) + +#endif /* _MACH_VM_WIRE_H_ */ diff --git a/vm/vm_map.c b/vm/vm_map.c index c618e63d..855d7997 100644 --- a/vm/vm_map.c +++ b/vm/vm_map.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -1108,6 +1109,15 @@ kern_return_t vm_map_enter( SAVE_HINT(map, new_entry); + if (map->wiring_required) { + /* Returns with the map read-locked if successful */ + result = vm_map_pageable(map, start, end, cur_protection, FALSE, FALSE); + + if (result != KERN_SUCCESS) { + RETURN(KERN_SUCCESS); + } + } + vm_map_unlock(map); if ((object != VM_OBJECT_NULL) && @@ -1745,6 +1755,69 @@ kern_return_t vm_map_pageable( return(KERN_SUCCESS); } +/* + * vm_map_pageable_all: + * + * Sets the pageability of an entire map. If the VM_WIRE_CURRENT + * flag is set, then all current mappings are locked down. If the + * VM_WIRE_FUTURE flag is set, then all mappings created after the + * call returns are locked down. If no flags are passed + * (i.e. VM_WIRE_NONE), all mappings become pageable again, and + * future mappings aren't automatically locked down any more. + * + * The access type of the mappings match their current protection. + * Null mappings (with protection PROT_NONE) are updated to track + * that they should be wired in case they become accessible. + */ +kern_return_t +vm_map_pageable_all(struct vm_map *map, vm_wire_t flags) +{ + boolean_t wiring_required; + kern_return_t kr; + + if ((flags & ~VM_WIRE_ALL) != 0) { + return KERN_INVALID_ARGUMENT; + } + + vm_map_lock(map); + + if (flags == VM_WIRE_NONE) { + map->wiring_required = FALSE; + + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(map, map->min_offset, map->max_offset, + VM_PROT_NONE, FALSE, FALSE); + vm_map_unlock(map); + return kr; + } + + wiring_required = map->wiring_required; + + if (flags & VM_WIRE_FUTURE) { + map->wiring_required = TRUE; + } + + if (flags & VM_WIRE_CURRENT) { + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(map, map->min_offset, map->max_offset, + VM_PROT_READ | VM_PROT_WRITE, + FALSE, FALSE); + + if (kr != KERN_SUCCESS) { + if (flags & VM_WIRE_FUTURE) { + map->wiring_required = wiring_required; + } + + vm_map_unlock(map); + return kr; + } + } + + vm_map_unlock(map); + + return KERN_SUCCESS; +} + /* * vm_map_entry_delete: [ internal use only ] * @@ -2605,6 +2678,7 @@ kern_return_t vm_map_copyout( vm_offset_t vm_copy_start; vm_map_entry_t last; vm_map_entry_t entry; + kern_return_t kr; /* * Check for null copy object. @@ -2624,7 +2698,6 @@ kern_return_t vm_map_copyout( vm_object_t object = copy->cpy_object; vm_size_t offset = copy->offset; vm_size_t tmp_size = copy->size; - kern_return_t kr; *dst_addr = 0; kr = vm_map_enter(dst_map, dst_addr, tmp_size, @@ -2764,11 +2837,19 @@ kern_return_t vm_map_copyout( vm_map_copy_insert(dst_map, last, copy); - vm_map_unlock(dst_map); + if (dst_map->wiring_required) { + /* Returns with the map read-locked if successful */ + kr = vm_map_pageable(dst_map, start, start + size, + VM_PROT_READ | VM_PROT_WRITE, + FALSE, FALSE); - /* - * XXX If wiring_required, call vm_map_pageable - */ + if (kr != KERN_SUCCESS) { + vm_map_unlock(dst_map); + return kr; + } + } + + vm_map_unlock(dst_map); return(KERN_SUCCESS); } diff --git a/vm/vm_map.h b/vm/vm_map.h index aa68b92c..87660f31 100644 --- a/vm/vm_map.h +++ b/vm/vm_map.h @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -191,7 +192,7 @@ struct vm_map { /* Flags */ unsigned int wait_for_space:1, /* Should callers wait for space? */ - /* boolean_t */ wiring_required:1; /* All memory wired? */ + /* boolean_t */ wiring_required:1; /* New mappings are wired? */ unsigned int timestamp; /* Version number */ @@ -492,6 +493,8 @@ static inline void vm_map_set_name(vm_map_t map, const char *name) extern kern_return_t vm_map_pageable(vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t, boolean_t); +extern kern_return_t vm_map_pageable_all(vm_map_t, vm_wire_t); + /* * Submap object. Must be used to create memory to be put * in a submap by vm_map_submap. diff --git a/vm/vm_user.c b/vm/vm_user.c index 46684423..6c1e3d6f 100644 --- a/vm/vm_user.c +++ b/vm/vm_user.c @@ -447,3 +447,35 @@ kern_return_t vm_wire(port, map, start, size, access) return vm_map_pageable(map, trunc_page(start), round_page(start+size), access, TRUE, TRUE); } + +kern_return_t vm_wire_all(const ipc_port_t port, vm_map_t map, vm_wire_t flags) +{ + if (!IP_VALID(port)) + return KERN_INVALID_HOST; + + ip_lock(port); + + if (!ip_active(port) + || (ip_kotype(port) != IKOT_HOST_PRIV)) { + ip_unlock(port); + return KERN_INVALID_HOST; + } + + ip_unlock(port); + + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } + + if (flags & ~VM_WIRE_ALL) { + return KERN_INVALID_ARGUMENT; + } + + /*Check if range includes projected buffer; + user is not allowed direct manipulation in that case*/ + if (projected_buffer_in_range(map, map->min_offset, map->max_offset)) { + return KERN_INVALID_ARGUMENT; + } + + return vm_map_pageable_all(map, flags); +} -- cgit v1.2.3