summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-20 22:59:42 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-21 00:21:08 +0200
commit783ad37f65384994dfa5387ab3847a8a4d77b90b (patch)
treeb79f445a9fe53680cca9484fc3e02a32d6859e5c
parent38aca37c00548f9b31bf17e74ab4a36c73521782 (diff)
Redefine what an external page is
Instead of a "page considered external", which apparently takes into account whether a page is dirty or not, redefine this property to reliably mean "is in an external object". This commit mostly deals with the impact of this change on the page allocation interface. * i386/intel/pmap.c (pmap_page_table_page_alloc): Update call to vm_page_grab. * kern/slab.c (kmem_pagealloc_physmem): Use vm_page_grab instead of vm_page_grab_contig. (kmem_pagefree_physmem): Use vm_page_release instead of vm_page_free_contig. * linux/dev/glue/block.c (alloc_buffer, device_read): Update call to vm_page_grab. * vm/vm_fault.c (vm_fault_page): Update calls to vm_page_grab and vm_page_convert. * vm/vm_map.c (vm_map_copy_steal_pages): Update call to vm_page_grab. * vm/vm_page.h (struct vm_page): Remove `extcounted' member. (vm_page_external_limit, vm_page_external_count): Remove extern declarations. (vm_page_convert, vm_page_grab): Update declarations. (vm_page_release, vm_page_grab_phys_addr): New function declarations. * vm/vm_pageout.c (VM_PAGE_EXTERNAL_LIMIT): Remove macro. (VM_PAGE_EXTERNAL_TARGET): Likewise. (vm_page_external_target): Remove variable. (vm_pageout_scan): Remove specific handling of external pages. (vm_pageout): Don't set vm_page_external_limit and vm_page_external_target. * vm/vm_resident.c (vm_page_external_limit): Remove variable. (vm_page_insert, vm_page_replace, vm_page_remove): Update external page tracking. (vm_page_convert): RemoveĀ `external' parameter. (vm_page_grab): Likewise. Remove specific handling of external pages. (vm_page_grab_phys_addr): Update call to vm_page_grab. (vm_page_release): Remove `external' parameter and remove specific handling of external pages. (vm_page_wait): Remove specific handling of external pages. (vm_page_alloc): Update call to vm_page_grab. (vm_page_free): Update call to vm_page_release. * xen/block.c (device_read): Update call to vm_page_grab. * xen/net.c (device_write): Likewise.
-rw-r--r--i386/intel/pmap.c2
-rw-r--r--kern/slab.c4
-rw-r--r--linux/dev/glue/block.c4
-rw-r--r--vm/vm_fault.c8
-rw-r--r--vm/vm_map.c2
-rw-r--r--vm/vm_page.h19
-rw-r--r--vm/vm_pageout.c41
-rw-r--r--vm/vm_resident.c66
-rw-r--r--xen/block.c2
-rw-r--r--xen/net.c2
10 files changed, 47 insertions, 103 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index bfc5d76e..096e6fd0 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -1036,7 +1036,7 @@ pmap_page_table_page_alloc(void)
/*
* Allocate a VM page for the level 2 page table entries.
*/
- while ((m = vm_page_grab(FALSE)) == VM_PAGE_NULL)
+ while ((m = vm_page_grab()) == VM_PAGE_NULL)
VM_PAGE_WAIT((void (*)()) 0);
/*
diff --git a/kern/slab.c b/kern/slab.c
index 1f8e0005..9d21c428 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -370,7 +370,7 @@ kmem_pagealloc_physmem(vm_size_t size)
assert(size == PAGE_SIZE);
for (;;) {
- page = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP);
+ page = vm_page_grab();
if (page != NULL)
break;
@@ -389,7 +389,7 @@ kmem_pagefree_physmem(vm_offset_t addr, vm_size_t size)
assert(size == PAGE_SIZE);
page = vm_page_lookup_pa(kvtophys(addr));
assert(page != NULL);
- vm_page_free_contig(page, size);
+ vm_page_release(page);
}
static vm_offset_t
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
index 74126ebe..2ecd1b4c 100644
--- a/linux/dev/glue/block.c
+++ b/linux/dev/glue/block.c
@@ -302,7 +302,7 @@ alloc_buffer (int size)
if (! linux_auto_config)
{
- while ((m = vm_page_grab (FALSE)) == 0)
+ while ((m = vm_page_grab ()) == 0)
VM_PAGE_WAIT (0);
d = current_thread ()->pcb->data;
assert (d);
@@ -1483,7 +1483,7 @@ device_read (void *d, ipc_port_t reply_port,
/* Allocate and map pages. */
while (alloc_offset < trunc_page (offset) + len)
{
- while ((m = vm_page_grab (FALSE)) == 0)
+ while ((m = vm_page_grab ()) == 0)
VM_PAGE_WAIT (0);
assert (! m->active && ! m->inactive);
m->busy = TRUE;
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 68afc59d..99381efd 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -423,7 +423,7 @@ vm_fault_return_t vm_fault_page(
* need to allocate a real page.
*/
- real_m = vm_page_grab(!object->internal);
+ real_m = vm_page_grab();
if (real_m == VM_PAGE_NULL) {
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -607,7 +607,7 @@ vm_fault_return_t vm_fault_page(
* won't block for pages.
*/
- if (m->fictitious && !vm_page_convert(&m, FALSE)) {
+ if (m->fictitious && !vm_page_convert(&m)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -725,7 +725,7 @@ vm_fault_return_t vm_fault_page(
assert(m->object == object);
first_m = VM_PAGE_NULL;
- if (m->fictitious && !vm_page_convert(&m, !object->internal)) {
+ if (m->fictitious && !vm_page_convert(&m)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -810,7 +810,7 @@ vm_fault_return_t vm_fault_page(
/*
* Allocate a page for the copy
*/
- copy_m = vm_page_grab(!first_object->internal);
+ copy_m = vm_page_grab();
if (copy_m == VM_PAGE_NULL) {
RELEASE_PAGE(m);
vm_fault_cleanup(object, first_m);
diff --git a/vm/vm_map.c b/vm/vm_map.c
index b1c1b4e0..249d18a4 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -1921,7 +1921,7 @@ vm_map_copy_steal_pages(vm_map_copy_t copy)
* Page was not stolen, get a new
* one and do the copy now.
*/
- while ((new_m = vm_page_grab(FALSE)) == VM_PAGE_NULL) {
+ while ((new_m = vm_page_grab()) == VM_PAGE_NULL) {
VM_PAGE_WAIT((void(*)()) 0);
}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 3a1452a7..164ab6d4 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -109,8 +109,7 @@ struct vm_page {
laundry:1, /* page is being cleaned now (P)*/
free:1, /* page is on free list (P) */
reference:1, /* page has been used (P) */
- external:1, /* page considered external (P) */
- extcounted:1, /* page counted in ext counts (P) */
+ external:1, /* page in external object (P) */
busy:1, /* page is in transit (O) */
wanted:1, /* someone is waiting for page (O) */
tabled:1, /* page is in VP table (O) */
@@ -180,16 +179,6 @@ extern
int vm_page_free_reserved; /* How many pages reserved to do pageout */
extern
int vm_page_laundry_count; /* How many pages being laundered? */
-extern
-int vm_page_external_limit; /* Max number of pages for external objects */
-
-/* Only objects marked with the extcounted bit are included in this total.
- Pages which we scan for possible pageout, but which are not actually
- dirty, don't get considered against the external page limits any more
- in this way. */
-extern
-int vm_page_external_count; /* How many pages for external objects? */
-
decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
@@ -212,9 +201,11 @@ extern vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset);
extern vm_page_t vm_page_grab_fictitious(void);
-extern boolean_t vm_page_convert(vm_page_t *, boolean_t);
+extern boolean_t vm_page_convert(vm_page_t *);
extern void vm_page_more_fictitious(void);
-extern vm_page_t vm_page_grab(boolean_t);
+extern vm_page_t vm_page_grab(void);
+extern void vm_page_release(vm_page_t);
+extern phys_addr_t vm_page_grab_phys_addr(void);
extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
extern void vm_page_free_contig(vm_page_t, vm_size_t);
extern void vm_page_wait(void (*)(void));
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index e0bd9880..a36c9905 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -109,21 +109,6 @@
#define VM_PAGE_FREE_MIN(free) (100 + (free) * 8 / 100)
#endif /* VM_PAGE_FREE_MIN */
-/* When vm_page_external_count exceeds vm_page_external_limit,
- * allocations of externally paged pages stops.
- */
-
-#ifndef VM_PAGE_EXTERNAL_LIMIT
-#define VM_PAGE_EXTERNAL_LIMIT(free) ((free) / 2)
-#endif /* VM_PAGE_EXTERNAL_LIMIT */
-
-/* Attempt to keep the number of externally paged pages less
- * than vm_pages_external_target.
- */
-#ifndef VM_PAGE_EXTERNAL_TARGET
-#define VM_PAGE_EXTERNAL_TARGET(free) ((free) / 4)
-#endif /* VM_PAGE_EXTERNAL_TARGET */
-
/*
* When the number of free pages falls below vm_page_free_reserved,
* only vm-privileged threads can allocate pages. vm-privilege
@@ -162,8 +147,6 @@
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
-unsigned int vm_page_external_target = 0;
-
unsigned int vm_pageout_burst_max = 0;
unsigned int vm_pageout_burst_min = 0;
unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */
@@ -633,7 +616,6 @@ void vm_pageout_scan(void)
simple_lock(&vm_page_queue_free_lock);
free_count = vm_page_mem_free();
if ((free_count >= vm_page_free_target) &&
- (vm_page_external_count <= vm_page_external_target) &&
(vm_page_free_wanted == 0)) {
vm_page_unlock_queues();
break;
@@ -783,21 +765,6 @@ void vm_pageout_scan(void)
if (!m->dirty)
m->dirty = pmap_is_modified(m->phys_addr);
- if (m->external) {
- /* Figure out if we still care about this
- page in the limit of externally managed pages.
- Clean pages don't actually cause system hosage,
- so it's ok to stop considering them as
- "consumers" of memory. */
- if (m->dirty && !m->extcounted) {
- m->extcounted = TRUE;
- vm_page_external_count++;
- } else if (!m->dirty && m->extcounted) {
- m->extcounted = FALSE;
- vm_page_external_count--;
- }
- }
-
/* If we don't actually need more memory, and the page
is not dirty, put it on the tail of the inactive queue
and move on to the next page. */
@@ -954,14 +921,6 @@ void vm_pageout(void)
free_after_reserve = vm_page_mem_free() - vm_page_free_reserved;
- if (vm_page_external_limit == 0)
- vm_page_external_limit =
- VM_PAGE_EXTERNAL_LIMIT (free_after_reserve);
-
- if (vm_page_external_target == 0)
- vm_page_external_target =
- VM_PAGE_EXTERNAL_TARGET (free_after_reserve);
-
if (vm_page_free_min == 0)
vm_page_free_min = vm_page_free_reserved +
VM_PAGE_FREE_MIN(free_after_reserve);
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index fdd25912..eac0f50c 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -154,7 +154,6 @@ int vm_page_free_min = 0;
int vm_page_inactive_target = 0;
int vm_page_free_reserved = 0;
int vm_page_laundry_count = 0;
-int vm_page_external_limit = 0;
/*
@@ -342,6 +341,14 @@ void vm_page_insert(
VM_PAGE_CHECK(mem);
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->external);
+
+ if (!object->internal) {
+ mem->external = TRUE;
+ vm_object_external_pages++;
+ }
+
if (mem->tabled)
panic("vm_page_insert");
@@ -390,10 +397,6 @@ void vm_page_insert(
vm_page_deactivate(last_mem);
}
object->last_alloc = offset;
-
- if (!object->internal) {
- vm_object_external_pages++;
- }
}
/*
@@ -415,6 +418,14 @@ void vm_page_replace(
VM_PAGE_CHECK(mem);
+ assert(!mem->active && !mem->inactive);
+ assert(!mem->external);
+
+ if (!object->internal) {
+ mem->external = TRUE;
+ vm_object_external_pages++;
+ }
+
if (mem->tabled)
panic("vm_page_replace");
@@ -447,7 +458,8 @@ void vm_page_replace(
m->tabled = FALSE;
object->resident_page_count--;
- if (!object->internal) {
+ if (m->external) {
+ m->external = FALSE;
vm_object_external_pages--;
}
@@ -483,10 +495,6 @@ void vm_page_replace(
object->resident_page_count++;
assert(object->resident_page_count != 0);
-
- if (!object->internal) {
- vm_object_external_pages++;
- }
}
/*
@@ -543,7 +551,8 @@ void vm_page_remove(
mem->tabled = FALSE;
- if (!mem->object->internal) {
+ if (mem->external) {
+ mem->external = FALSE;
vm_object_external_pages--;
}
}
@@ -720,9 +729,7 @@ void vm_page_more_fictitious(void)
* The object referenced by *MP must be locked.
*/
-boolean_t vm_page_convert(
- struct vm_page **mp,
- boolean_t external)
+boolean_t vm_page_convert(struct vm_page **mp)
{
struct vm_page *real_m, *fict_m;
vm_object_t object;
@@ -735,7 +742,7 @@ boolean_t vm_page_convert(
assert(!fict_m->active);
assert(!fict_m->inactive);
- real_m = vm_page_grab(external);
+ real_m = vm_page_grab();
if (real_m == VM_PAGE_NULL)
return FALSE;
@@ -766,8 +773,7 @@ boolean_t vm_page_convert(
* Returns VM_PAGE_NULL if the free list is too small.
*/
-vm_page_t vm_page_grab(
- boolean_t external)
+vm_page_t vm_page_grab(void)
{
vm_page_t mem;
@@ -779,9 +785,7 @@ vm_page_t vm_page_grab(
* for externally-managed pages.
*/
- if (((vm_page_mem_free() < vm_page_free_reserved)
- || (external
- && (vm_page_external_count > vm_page_external_limit)))
+ if ((vm_page_mem_free() < vm_page_free_reserved)
&& !current_thread()->vm_privilege) {
simple_unlock(&vm_page_queue_free_lock);
return VM_PAGE_NULL;
@@ -794,11 +798,7 @@ vm_page_t vm_page_grab(
return NULL;
}
- if (external)
- vm_page_external_count++;
-
mem->free = FALSE;
- mem->extcounted = mem->external = external;
simple_unlock(&vm_page_queue_free_lock);
/*
@@ -822,7 +822,7 @@ vm_page_t vm_page_grab(
phys_addr_t vm_page_grab_phys_addr(void)
{
- vm_page_t p = vm_page_grab(FALSE);
+ vm_page_t p = vm_page_grab();
if (p == VM_PAGE_NULL)
return -1;
else
@@ -835,17 +835,14 @@ phys_addr_t vm_page_grab_phys_addr(void)
* Return a page to the free list.
*/
-static void vm_page_release(
- vm_page_t mem,
- boolean_t external)
+void vm_page_release(
+ vm_page_t mem)
{
simple_lock(&vm_page_queue_free_lock);
if (mem->free)
panic("vm_page_release");
mem->free = TRUE;
vm_page_free_pa(mem, 0);
- if (external)
- vm_page_external_count--;
/*
* Check if we should wake up someone waiting for page.
@@ -917,7 +914,6 @@ vm_page_t vm_page_grab_contig(
for (i = 0; i < nr_pages; i++) {
mem[i].free = FALSE;
- mem[i].extcounted = mem[i].external = 0;
}
simple_unlock(&vm_page_queue_free_lock);
@@ -994,8 +990,7 @@ void vm_page_wait(
*/
simple_lock(&vm_page_queue_free_lock);
- if ((vm_page_mem_free() < vm_page_free_target)
- || (vm_page_external_count > vm_page_external_limit)) {
+ if ((vm_page_mem_free() < vm_page_free_target)) {
if (vm_page_free_wanted++ == 0)
thread_wakeup((event_t)&vm_page_free_wanted);
assert_wait((event_t)&vm_page_free_avail, FALSE);
@@ -1026,7 +1021,7 @@ vm_page_t vm_page_alloc(
{
vm_page_t mem;
- mem = vm_page_grab(!object->internal);
+ mem = vm_page_grab();
if (mem == VM_PAGE_NULL)
return VM_PAGE_NULL;
@@ -1082,9 +1077,8 @@ void vm_page_free(
mem->fictitious = TRUE;
vm_page_release_fictitious(mem);
} else {
- int external = mem->external && mem->extcounted;
vm_page_init(mem);
- vm_page_release(mem, external);
+ vm_page_release(mem);
}
}
diff --git a/xen/block.c b/xen/block.c
index 7d6f1ca3..dc922348 100644
--- a/xen/block.c
+++ b/xen/block.c
@@ -457,7 +457,7 @@ device_read (void *d, ipc_port_t reply_port,
/* Allocate pages. */
while (alloc_offset < offset + len)
{
- while ((m = vm_page_grab (FALSE)) == 0)
+ while ((m = vm_page_grab ()) == 0)
VM_PAGE_WAIT (0);
assert (! m->active && ! m->inactive);
m->busy = TRUE;
diff --git a/xen/net.c b/xen/net.c
index 5a3f90d9..296035db 100644
--- a/xen/net.c
+++ b/xen/net.c
@@ -620,7 +620,7 @@ device_write(void *d, ipc_port_t reply_port,
offset = copy->offset & PAGE_MASK;
if (paranoia || copy->cpy_npages == 2) {
/* have to copy :/ */
- while ((m = vm_page_grab(FALSE)) == 0)
+ while ((m = vm_page_grab()) == 0)
VM_PAGE_WAIT (0);
assert (! m->active && ! m->inactive);
m->busy = TRUE;