summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-07 00:11:08 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-07 00:11:08 +0200
commite5c7d1c1dda40f8f262e26fed911bfe03027993b (patch)
tree1e5f9bfb86ef80c2fafdce7a1a9214ba955e9f5f
parentefcecd06abb8f7342723a8916917842840e9264f (diff)
Remove map entry pageability property.
Since the replacement of the zone allocator, kernel objects have been wired in memory. Besides, as of 5e9f6f (Stack the slab allocator directly on top of the physical allocator), there is a single cache used to allocate map entries. Those changes make the pageability attribute of VM maps irrelevant. * device/ds_routines.c (mach_device_init): Update call to kmem_submap. * ipc/ipc_init.c (ipc_init): Likewise. * kern/task.c (task_create): Update call to vm_map_create. * vm/vm_kern.c (kmem_submap): Remove `pageable' argument. Update call to vm_map_setup. (kmem_init): Update call to vm_map_setup. * vm/vm_kern.h (kmem_submap): Update declaration. * vm/vm_map.c (vm_map_setup): Remove `pageable' argument. Don't set `entries_pageable' member. (vm_map_create): Likewise. (vm_map_copyout): Don't bother creating copies of page entries with the right pageability. (vm_map_copyin): Don't set `entries_pageable' member. (vm_map_fork): Update call to vm_map_create. * vm/vm_map.h (struct vm_map_header): Remove `entries_pageable' member. (vm_map_setup, vm_map_create): Remove `pageable' argument.
-rw-r--r--device/ds_routines.c2
-rw-r--r--ipc/ipc_init.c2
-rw-r--r--kern/task.c2
-rw-r--r--vm/vm_kern.c8
-rw-r--r--vm/vm_kern.h2
-rw-r--r--vm/vm_map.c62
-rw-r--r--vm/vm_map.h8
7 files changed, 15 insertions, 71 deletions
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 6b6dcb03..445e7ae1 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -1532,7 +1532,7 @@ void mach_device_init(void)
simple_lock_init(&io_done_list_lock);
kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max,
- DEVICE_IO_MAP_SIZE, FALSE);
+ DEVICE_IO_MAP_SIZE);
/*
* If the kernel receives many device_write requests, the
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
index 5ed800f4..8e628ad8 100644
--- a/ipc/ipc_init.c
+++ b/ipc/ipc_init.c
@@ -111,7 +111,7 @@ ipc_init(void)
vm_offset_t min, max;
kmem_submap(ipc_kernel_map, kernel_map, &min, &max,
- ipc_kernel_map_size, TRUE);
+ ipc_kernel_map_size);
ipc_host_init();
}
diff --git a/kern/task.c b/kern/task.c
index 673a4378..7dff1249 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -104,7 +104,7 @@ kern_return_t task_create(
} else {
new_task->map = vm_map_create(pmap_create(0),
round_page(VM_MIN_ADDRESS),
- trunc_page(VM_MAX_ADDRESS), TRUE);
+ trunc_page(VM_MAX_ADDRESS));
vm_map_set_name(new_task->map, new_task->name);
}
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index 9c0a20b7..81bb1531 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -778,8 +778,7 @@ kmem_submap(
vm_map_t parent,
vm_offset_t *min,
vm_offset_t *max,
- vm_size_t size,
- boolean_t pageable)
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -802,7 +801,7 @@ kmem_submap(
panic("kmem_submap");
pmap_reference(vm_map_pmap(parent));
- vm_map_setup(map, vm_map_pmap(parent), addr, addr + size, pageable);
+ vm_map_setup(map, vm_map_pmap(parent), addr, addr + size);
kr = vm_map_submap(parent, addr, addr + size, map);
if (kr != KERN_SUCCESS)
panic("kmem_submap");
@@ -821,8 +820,7 @@ void kmem_init(
vm_offset_t start,
vm_offset_t end)
{
- vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
- FALSE);
+ vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end);
/*
* Reserve virtual memory allocated up to this time.
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index fb8ac7f8..4bd89c49 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -57,7 +57,7 @@ extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
- vm_offset_t *, vm_size_t, boolean_t);
+ vm_offset_t *, vm_size_t);
extern kern_return_t kmem_io_map_copyout(vm_map_t, vm_offset_t *,
vm_offset_t *, vm_size_t *,
diff --git a/vm/vm_map.c b/vm/vm_map.c
index e73a124a..f52e7c76 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -175,13 +175,11 @@ void vm_map_setup(
vm_map_t map,
pmap_t pmap,
vm_offset_t min,
- vm_offset_t max,
- boolean_t pageable)
+ vm_offset_t max)
{
vm_map_first_entry(map) = vm_map_to_entry(map);
vm_map_last_entry(map) = vm_map_to_entry(map);
map->hdr.nentries = 0;
- map->hdr.entries_pageable = pageable;
rbtree_init(&map->hdr.tree);
rbtree_init(&map->hdr.gap_tree);
@@ -211,8 +209,7 @@ void vm_map_setup(
vm_map_t vm_map_create(
pmap_t pmap,
vm_offset_t min,
- vm_offset_t max,
- boolean_t pageable)
+ vm_offset_t max)
{
vm_map_t result;
@@ -220,7 +217,7 @@ vm_map_t vm_map_create(
if (result == VM_MAP_NULL)
panic("vm_map_create");
- vm_map_setup(result, pmap, min, max, pageable);
+ vm_map_setup(result, pmap, min, max);
return(result);
}
@@ -2275,7 +2272,6 @@ start_pass_1:
/*
* XXXO If there are no permanent objects in the destination,
- * XXXO and the source and destination map entry caches match,
* XXXO and the destination map entry is not shared,
* XXXO then the map entries can be deleted and replaced
* XXXO with those from the copy. The following code is the
@@ -2285,8 +2281,7 @@ start_pass_1:
* XXXO to the above pass and make sure that no wiring is involved.
*/
/*
- * if (!contains_permanent_objects &&
- * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) {
+ * if (!contains_permanent_objects) {
*
* *
* * Run over copy and adjust entries. Steal code
@@ -2609,48 +2604,6 @@ kern_return_t vm_map_copyout(
}
/*
- * Since we're going to just drop the map
- * entries from the copy into the destination
- * map, they must come from the same pool.
- */
-
- if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) {
- /*
- * Mismatches occur when dealing with the default
- * pager.
- */
- vm_map_entry_t next, new;
-
- entry = vm_map_copy_first_entry(copy);
-
- /*
- * Reinitialize the copy so that vm_map_copy_entry_link
- * will work.
- */
- copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
- rbtree_init(&copy->cpy_hdr.tree);
- rbtree_init(&copy->cpy_hdr.gap_tree);
- vm_map_copy_first_entry(copy) =
- vm_map_copy_last_entry(copy) =
- vm_map_copy_to_entry(copy);
-
- /*
- * Copy each entry.
- */
- while (entry != vm_map_copy_to_entry(copy)) {
- new = vm_map_copy_entry_create(copy);
- vm_map_entry_copy_full(new, entry);
- vm_map_copy_entry_link(copy,
- vm_map_copy_last_entry(copy),
- new);
- next = entry->vme_next;
- kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
- entry = next;
- }
- }
-
- /*
* Adjust the addresses in the copy chain, and
* reset the region attributes.
*/
@@ -3205,7 +3158,6 @@ kern_return_t vm_map_copyin(
vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy);
copy->type = VM_MAP_COPY_ENTRY_LIST;
copy->cpy_hdr.nentries = 0;
- copy->cpy_hdr.entries_pageable = TRUE;
rbtree_init(&copy->cpy_hdr.tree);
rbtree_init(&copy->cpy_hdr.gap_tree);
@@ -3522,8 +3474,7 @@ kern_return_t vm_map_copyin_object(
/*
* We drop the object into a special copy object
* that contains the object directly. These copy objects
- * are distinguished by entries_pageable == FALSE
- * and null links.
+ * are distinguished by links.
*/
copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
@@ -4163,8 +4114,7 @@ vm_map_t vm_map_fork(vm_map_t old_map)
new_map = vm_map_create(new_pmap,
old_map->min_offset,
- old_map->max_offset,
- old_map->hdr.entries_pageable);
+ old_map->max_offset);
for (
old_entry = vm_map_first_entry(old_map);
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 9e946c5e..dad07139 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -153,8 +153,6 @@ struct vm_map_header {
struct rbtree gap_tree; /* Sorted tree of gap lists
for allocations */
int nentries; /* Number of entries */
- boolean_t entries_pageable;
- /* are map entries pageable? */
};
/*
@@ -380,11 +378,9 @@ MACRO_END
extern void vm_map_init(void);
/* Initialize an empty map */
-extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t,
- boolean_t);
+extern void vm_map_setup(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
/* Create an empty map */
-extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t,
- boolean_t);
+extern vm_map_t vm_map_create(pmap_t, vm_offset_t, vm_offset_t);
/* Create a map in the image of an existing map */
extern vm_map_t vm_map_fork(vm_map_t);