summaryrefslogtreecommitdiff
path: root/vm/vm_resident.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_resident.c')
-rw-r--r--vm/vm_resident.c316
1 files changed, 50 insertions, 266 deletions
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index eac0f50c..e276fe68 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -39,6 +39,7 @@
#include <mach/vm_prot.h>
#include <kern/counters.h>
#include <kern/debug.h>
+#include <kern/list.h>
#include <kern/sched_prim.h>
#include <kern/task.h>
#include <kern/thread.h>
@@ -95,22 +96,13 @@ vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
unsigned long vm_page_bucket_count = 0; /* How big is array? */
unsigned long vm_page_hash_mask; /* Mask for hash function */
-vm_page_t vm_page_queue_fictitious;
+static struct list vm_page_queue_fictitious;
decl_simple_lock_data(,vm_page_queue_free_lock)
-unsigned int vm_page_free_wanted;
int vm_page_fictitious_count;
-int vm_page_external_count;
int vm_object_external_count;
int vm_object_external_pages;
/*
- * This variable isn't directly used. It's merely a placeholder for the
- * address used to synchronize threads waiting for pages to become
- * available. The real value is returned by vm_page_free_mem().
- */
-unsigned int vm_page_free_avail;
-
-/*
* Occasionally, the virtual memory system uses
* resident page structures that do not refer to
* real pages, for example to leave a page with
@@ -136,8 +128,6 @@ phys_addr_t vm_page_fictitious_addr = (phys_addr_t) -1;
* defined here, but are shared by the pageout
* module.
*/
-queue_head_t vm_page_queue_active;
-queue_head_t vm_page_queue_inactive;
decl_simple_lock_data(,vm_page_queue_lock)
int vm_page_active_count;
int vm_page_inactive_count;
@@ -149,11 +139,8 @@ int vm_page_wire_count;
* (done here in vm_page_alloc) can trigger the
* pageout daemon.
*/
-int vm_page_free_target = 0;
-int vm_page_free_min = 0;
-int vm_page_inactive_target = 0;
-int vm_page_free_reserved = 0;
int vm_page_laundry_count = 0;
+int vm_page_external_pagedout = 0;
/*
@@ -191,11 +178,7 @@ void vm_page_bootstrap(
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
- vm_page_queue_fictitious = VM_PAGE_NULL;
- queue_init(&vm_page_queue_active);
- queue_init(&vm_page_queue_inactive);
-
- vm_page_free_wanted = 0;
+ list_init(&vm_page_queue_fictitious);
/*
* Allocate (and initialize) the virtual-to-physical
@@ -330,6 +313,7 @@ void vm_page_module_init(void)
* table and object list.
*
* The object and page must be locked.
+ * The free page queue must not be locked.
*/
void vm_page_insert(
@@ -407,6 +391,7 @@ void vm_page_insert(
* and we don't do deactivate-behind.
*
* The object and page must be locked.
+ * The free page queue must not be locked.
*/
void vm_page_replace(
@@ -457,6 +442,7 @@ void vm_page_replace(
listq);
m->tabled = FALSE;
object->resident_page_count--;
+ VM_PAGE_QUEUES_REMOVE(m);
if (m->external) {
m->external = FALSE;
@@ -501,9 +487,10 @@ void vm_page_replace(
* vm_page_remove: [ internal use only ]
*
* Removes the given mem entry from the object/offset-page
- * table and the object page list.
+ * table, the object page list, and the page queues.
*
* The object and page must be locked.
+ * The free page queue must not be locked.
*/
void vm_page_remove(
@@ -551,6 +538,8 @@ void vm_page_remove(
mem->tabled = FALSE;
+ VM_PAGE_QUEUES_REMOVE(mem);
+
if (mem->external) {
mem->external = FALSE;
vm_object_external_pages--;
@@ -665,11 +654,15 @@ vm_page_t vm_page_grab_fictitious(void)
vm_page_t m;
simple_lock(&vm_page_queue_free_lock);
- m = vm_page_queue_fictitious;
- if (m != VM_PAGE_NULL) {
- vm_page_fictitious_count--;
- vm_page_queue_fictitious = (vm_page_t) m->pageq.next;
+ if (list_empty(&vm_page_queue_fictitious)) {
+ m = VM_PAGE_NULL;
+ } else {
+ m = list_first_entry(&vm_page_queue_fictitious,
+ struct vm_page, node);
+ assert(m->fictitious);
+ list_remove(&m->node);
m->free = FALSE;
+ vm_page_fictitious_count--;
}
simple_unlock(&vm_page_queue_free_lock);
@@ -689,8 +682,7 @@ static void vm_page_release_fictitious(
if (m->free)
panic("vm_page_release_fictitious");
m->free = TRUE;
- m->pageq.next = (queue_entry_t) vm_page_queue_fictitious;
- vm_page_queue_fictitious = m;
+ list_insert_head(&vm_page_queue_fictitious, &m->node);
vm_page_fictitious_count++;
simple_unlock(&vm_page_queue_free_lock);
}
@@ -779,18 +771,6 @@ vm_page_t vm_page_grab(void)
simple_lock(&vm_page_queue_free_lock);
- /*
- * Only let privileged threads (involved in pageout)
- * dip into the reserved pool or exceed the limit
- * for externally-managed pages.
- */
-
- if ((vm_page_mem_free() < vm_page_free_reserved)
- && !current_thread()->vm_privilege) {
- simple_unlock(&vm_page_queue_free_lock);
- return VM_PAGE_NULL;
- }
-
mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
if (mem == NULL) {
@@ -801,22 +781,6 @@ vm_page_t vm_page_grab(void)
mem->free = FALSE;
simple_unlock(&vm_page_queue_free_lock);
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
-
- if ((vm_page_mem_free() < vm_page_free_min) ||
- ((vm_page_mem_free() < vm_page_free_target) &&
- (vm_page_inactive_count < vm_page_inactive_target)))
- thread_wakeup((event_t) &vm_page_free_wanted);
-
return mem;
}
@@ -836,38 +800,37 @@ phys_addr_t vm_page_grab_phys_addr(void)
*/
void vm_page_release(
- vm_page_t mem)
+ vm_page_t mem,
+ boolean_t laundry,
+ boolean_t external)
{
simple_lock(&vm_page_queue_free_lock);
if (mem->free)
panic("vm_page_release");
mem->free = TRUE;
vm_page_free_pa(mem, 0);
+ if (laundry) {
+ vm_page_laundry_count--;
- /*
- * Check if we should wake up someone waiting for page.
- * But don't bother waking them unless they can allocate.
- *
- * We wakeup only one thread, to prevent starvation.
- * Because the scheduling system handles wait queues FIFO,
- * if we wakeup all waiting threads, one greedy thread
- * can starve multiple niceguy threads. When the threads
- * all wakeup, the greedy threads runs first, grabs the page,
- * and waits for another page. It will be the first to run
- * when the next page is freed.
- *
- * However, there is a slight danger here.
- * The thread we wake might not use the free page.
- * Then the other threads could wait indefinitely
- * while the page goes unused. To forestall this,
- * the pageout daemon will keep making free pages
- * as long as vm_page_free_wanted is non-zero.
- */
+ if (vm_page_laundry_count == 0) {
+ vm_pageout_resume();
+ }
+ }
+ if (external) {
+
+ /*
+ * If vm_page_external_pagedout is negative,
+ * the pageout daemon isn't expecting to be
+ * notified.
+ */
+
+ if (vm_page_external_pagedout > 0) {
+ vm_page_external_pagedout--;
+ }
- if ((vm_page_free_wanted > 0) &&
- (vm_page_mem_free() >= vm_page_free_reserved)) {
- vm_page_free_wanted--;
- thread_wakeup_one((event_t) &vm_page_free_avail);
+ if (vm_page_external_pagedout == 0) {
+ vm_pageout_resume();
+ }
}
simple_unlock(&vm_page_queue_free_lock);
@@ -892,18 +855,6 @@ vm_page_t vm_page_grab_contig(
simple_lock(&vm_page_queue_free_lock);
- /*
- * Only let privileged threads (involved in pageout)
- * dip into the reserved pool or exceed the limit
- * for externally-managed pages.
- */
-
- if (((vm_page_mem_free() - nr_pages) <= vm_page_free_reserved)
- && !current_thread()->vm_privilege) {
- simple_unlock(&vm_page_queue_free_lock);
- return VM_PAGE_NULL;
- }
-
/* TODO Allow caller to pass type */
mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL);
@@ -918,22 +869,6 @@ vm_page_t vm_page_grab_contig(
simple_unlock(&vm_page_queue_free_lock);
- /*
- * Decide if we should poke the pageout daemon.
- * We do this if the free count is less than the low
- * water mark, or if the free count is less than the high
- * water mark (but above the low water mark) and the inactive
- * count is less than its target.
- *
- * We don't have the counts locked ... if they change a little,
- * it doesn't really matter.
- */
-
- if ((vm_page_mem_free() < vm_page_free_min) ||
- ((vm_page_mem_free() < vm_page_free_target) &&
- (vm_page_inactive_count < vm_page_inactive_target)))
- thread_wakeup((event_t) &vm_page_free_wanted);
-
return mem;
}
@@ -961,52 +896,10 @@ void vm_page_free_contig(vm_page_t mem, vm_size_t size)
vm_page_free_pa(mem, order);
- if ((vm_page_free_wanted > 0) &&
- (vm_page_mem_free() >= vm_page_free_reserved)) {
- vm_page_free_wanted--;
- thread_wakeup_one((event_t) &vm_page_free_avail);
- }
-
simple_unlock(&vm_page_queue_free_lock);
}
/*
- * vm_page_wait:
- *
- * Wait for a page to become available.
- * If there are plenty of free pages, then we don't sleep.
- */
-
-void vm_page_wait(
- void (*continuation)(void))
-{
-
- /*
- * We can't use vm_page_free_reserved to make this
- * determination. Consider: some thread might
- * need to allocate two pages. The first allocation
- * succeeds, the second fails. After the first page is freed,
- * a call to vm_page_wait must really block.
- */
-
- simple_lock(&vm_page_queue_free_lock);
- if ((vm_page_mem_free() < vm_page_free_target)) {
- if (vm_page_free_wanted++ == 0)
- thread_wakeup((event_t)&vm_page_free_wanted);
- assert_wait((event_t)&vm_page_free_avail, FALSE);
- simple_unlock(&vm_page_queue_free_lock);
- if (continuation != 0) {
- counter(c_vm_page_wait_block_user++);
- thread_block(continuation);
- } else {
- counter(c_vm_page_wait_block_kernel++);
- thread_block((void (*)(void)) 0);
- }
- } else
- simple_unlock(&vm_page_queue_free_lock);
-}
-
-/*
* vm_page_alloc:
*
* Allocate and return a memory cell associated
@@ -1046,9 +939,11 @@ void vm_page_free(
if (mem->free)
panic("vm_page_free");
- if (mem->tabled)
+ if (mem->tabled) {
vm_page_remove(mem);
- VM_PAGE_QUEUES_REMOVE(mem);
+ }
+
+ assert(!mem->active && !mem->inactive);
if (mem->wire_count != 0) {
if (!mem->private && !mem->fictitious)
@@ -1056,11 +951,6 @@ void vm_page_free(
mem->wire_count = 0;
}
- if (mem->laundry) {
- vm_page_laundry_count--;
- mem->laundry = FALSE;
- }
-
PAGE_WAKEUP_DONE(mem);
if (mem->absent)
@@ -1077,116 +967,10 @@ void vm_page_free(
mem->fictitious = TRUE;
vm_page_release_fictitious(mem);
} else {
+ boolean_t laundry = mem->laundry;
+ boolean_t external = mem->external;
vm_page_init(mem);
- vm_page_release(mem);
- }
-}
-
-/*
- * vm_page_wire:
- *
- * Mark this page as wired down by yet
- * another map, removing it from paging queues
- * as necessary.
- *
- * The page's object and the page queues must be locked.
- */
-void vm_page_wire(
- vm_page_t mem)
-{
- VM_PAGE_CHECK(mem);
-
- if (mem->wire_count == 0) {
- VM_PAGE_QUEUES_REMOVE(mem);
- if (!mem->private && !mem->fictitious)
- vm_page_wire_count++;
- }
- mem->wire_count++;
-}
-
-/*
- * vm_page_unwire:
- *
- * Release one wiring of this page, potentially
- * enabling it to be paged again.
- *
- * The page's object and the page queues must be locked.
- */
-void vm_page_unwire(
- vm_page_t mem)
-{
- VM_PAGE_CHECK(mem);
-
- if (--mem->wire_count == 0) {
- queue_enter(&vm_page_queue_active, mem, vm_page_t, pageq);
- vm_page_active_count++;
- mem->active = TRUE;
- if (!mem->private && !mem->fictitious)
- vm_page_wire_count--;
- }
-}
-
-/*
- * vm_page_deactivate:
- *
- * Returns the given page to the inactive list,
- * indicating that no physical maps have access
- * to this page. [Used by the physical mapping system.]
- *
- * The page queues must be locked.
- */
-void vm_page_deactivate(
- vm_page_t m)
-{
- VM_PAGE_CHECK(m);
-
- /*
- * This page is no longer very interesting. If it was
- * interesting (active or inactive/referenced), then we
- * clear the reference bit and (re)enter it in the
- * inactive queue. Note wired pages should not have
- * their reference bit cleared.
- */
-
- if (m->active || (m->inactive && m->reference)) {
- if (!m->fictitious && !m->absent)
- pmap_clear_reference(m->phys_addr);
- m->reference = FALSE;
- VM_PAGE_QUEUES_REMOVE(m);
- }
- if (m->wire_count == 0 && !m->inactive) {
- queue_enter(&vm_page_queue_inactive, m, vm_page_t, pageq);
- m->inactive = TRUE;
- vm_page_inactive_count++;
- }
-}
-
-/*
- * vm_page_activate:
- *
- * Put the specified page on the active list (if appropriate).
- *
- * The page queues must be locked.
- */
-
-void vm_page_activate(
- vm_page_t m)
-{
- VM_PAGE_CHECK(m);
-
- if (m->inactive) {
- queue_remove(&vm_page_queue_inactive, m, vm_page_t,
- pageq);
- vm_page_inactive_count--;
- m->inactive = FALSE;
- }
- if (m->wire_count == 0) {
- if (m->active)
- panic("vm_page_activate: already active");
-
- queue_enter(&vm_page_queue_active, m, vm_page_t, pageq);
- m->active = TRUE;
- vm_page_active_count++;
+ vm_page_release(mem, laundry, external);
}
}