summaryrefslogtreecommitdiff
path: root/vm/vm_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_object.c')
-rw-r--r--vm/vm_object.c232
1 files changed, 92 insertions, 140 deletions
diff --git a/vm/vm_object.c b/vm/vm_object.c
index ece3a83c..bc301288 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -59,6 +59,11 @@
#include <ddb/db_output.h>
#endif /* MACH_KDB */
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name); /* forward */
+
/*
* Virtual memory objects maintain the actual data
* associated with allocated virtual memory. A given
@@ -159,8 +164,9 @@ vm_object_t kernel_object = &kernel_object_store;
*
* The kernel may choose to terminate objects from this
* queue in order to reclaim storage. The current policy
- * is to permit a fixed maximum number of unreferenced
- * objects (vm_object_cached_max).
+ * is to let memory pressure dynamically adjust the number
+ * of unreferenced objects. The pageout daemon attempts to
+ * collect objects after removing pages from them.
*
* A simple lock (accessed by routines
* vm_object_cache_{lock,lock_try,unlock}) governs the
@@ -176,7 +182,6 @@ vm_object_t kernel_object = &kernel_object_store;
*/
queue_head_t vm_object_cached_list;
int vm_object_cached_count;
-int vm_object_cached_max = 4000; /* may be patched*/
decl_simple_lock_data(,vm_object_cached_lock_data)
@@ -298,6 +303,7 @@ void vm_object_bootstrap(void)
vm_object_template.paging_in_progress = 0;
vm_object_template.can_persist = FALSE;
+ vm_object_template.cached = FALSE;
vm_object_template.internal = TRUE;
vm_object_template.temporary = TRUE;
vm_object_template.alive = TRUE;
@@ -344,6 +350,60 @@ void vm_object_init(void)
}
/*
+ * Object cache management functions.
+ *
+ * Both the cache and the object must be locked
+ * before calling these functions.
+ */
+
+static void vm_object_cache_add(
+ vm_object_t object)
+{
+ assert(!object->cached);
+ queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
+ vm_object_cached_count++;
+ vm_object_cached_pages_update(object->resident_page_count);
+ object->cached = TRUE;
+}
+
+static void vm_object_cache_remove(
+ vm_object_t object)
+{
+ assert(object->cached);
+ queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
+ vm_object_cached_count--;
+ vm_object_cached_pages_update(-object->resident_page_count);
+ object->cached = FALSE;
+}
+
+void vm_object_collect(
+ register vm_object_t object)
+{
+ vm_object_unlock(object);
+
+ /*
+ * The cache lock must be acquired in the proper order.
+ */
+
+ vm_object_cache_lock();
+ vm_object_lock(object);
+
+ /*
+ * If the object was referenced while the lock was
+ * dropped, cancel the termination.
+ */
+
+ if (!vm_object_collectable(object)) {
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ return;
+ }
+
+ vm_object_cache_remove(object);
+ vm_object_terminate(object);
+}
+
+/*
* vm_object_reference:
*
* Gets another reference to the given object.
@@ -403,103 +463,31 @@ void vm_object_deallocate(
/*
* See whether this object can persist. If so, enter
- * it in the cache, then deactivate all of its
- * pages.
+ * it in the cache.
*/
- if (object->can_persist) {
- boolean_t overflow;
-
- /*
- * Enter the object onto the queue
- * of "cached" objects. Remember whether
- * we've caused the queue to overflow,
- * as a hint.
- */
-
- queue_enter(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- overflow = (++vm_object_cached_count > vm_object_cached_max);
- vm_object_cached_pages_update(object->resident_page_count);
+ if (object->can_persist && (object->resident_page_count > 0)) {
+ vm_object_cache_add(object);
vm_object_cache_unlock();
-
- vm_object_deactivate_pages(object);
vm_object_unlock(object);
+ return;
+ }
- /*
- * If we didn't overflow, or if the queue has
- * been reduced back to below the specified
- * minimum, then quit.
- */
- if (!overflow)
- return;
-
- while (TRUE) {
- vm_object_cache_lock();
- if (vm_object_cached_count <=
- vm_object_cached_max) {
- vm_object_cache_unlock();
- return;
- }
-
- /*
- * If we must trim down the queue, take
- * the first object, and proceed to
- * terminate it instead of the original
- * object. Have to wait for pager init.
- * if it's in progress.
- */
- object= (vm_object_t)
- queue_first(&vm_object_cached_list);
- vm_object_lock(object);
-
- if (!(object->pager_created &&
- !object->pager_initialized)) {
-
- /*
- * Ok to terminate, hang on to lock.
- */
- break;
- }
-
- vm_object_assert_wait(object,
- VM_OBJECT_EVENT_INITIALIZED, FALSE);
- vm_object_unlock(object);
- vm_object_cache_unlock();
- thread_block((void (*)()) 0);
-
- /*
- * Continue loop to check if cache still
- * needs to be trimmed.
- */
- }
+ if (object->pager_created &&
+ !object->pager_initialized) {
/*
- * Actually remove object from cache.
+ * Have to wait for initialization.
+ * Put reference back and retry
+ * when it's initialized.
*/
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
-
- assert(object->ref_count == 0);
- }
- else {
- if (object->pager_created &&
- !object->pager_initialized) {
-
- /*
- * Have to wait for initialization.
- * Put reference back and retry
- * when it's initialized.
- */
- object->ref_count++;
- vm_object_assert_wait(object,
- VM_OBJECT_EVENT_INITIALIZED, FALSE);
- vm_object_unlock(object);
- vm_object_cache_unlock();
- thread_block((void (*)()) 0);
- continue;
- }
+ object->ref_count++;
+ vm_object_assert_wait(object,
+ VM_OBJECT_EVENT_INITIALIZED, FALSE);
+ vm_object_unlock(object);
+ vm_object_cache_unlock();
+ thread_block((void (*)()) 0);
+ continue;
}
/*
@@ -630,6 +618,7 @@ void vm_object_terminate(
assert(object->ref_count == 0);
assert(object->paging_in_progress == 0);
+ assert(!object->cached);
/*
* Throw away port rights... note that they may
@@ -862,28 +851,6 @@ kern_return_t memory_object_destroy(
}
/*
- * vm_object_deactivate_pages
- *
- * Deactivate all pages in the specified object. (Keep its pages
- * in memory even though it is no longer referenced.)
- *
- * The object must be locked.
- */
-void vm_object_deactivate_pages(
- vm_object_t object)
-{
- vm_page_t p;
-
- queue_iterate(&object->memq, p, vm_page_t, listq) {
- vm_page_lock_queues();
- if (!p->busy)
- vm_page_deactivate(p);
- vm_page_unlock_queues();
- }
-}
-
-
-/*
* Routine: vm_object_pmap_protect
*
* Purpose:
@@ -1837,12 +1804,8 @@ vm_object_t vm_object_lookup(
assert(object->alive);
- if (object->ref_count == 0) {
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- vm_object_cached_pages_update(-object->resident_page_count);
- }
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
object->ref_count++;
vm_object_unlock(object);
@@ -1869,12 +1832,8 @@ vm_object_t vm_object_lookup_name(
assert(object->alive);
- if (object->ref_count == 0) {
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- vm_object_cached_pages_update(-object->resident_page_count);
- }
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
object->ref_count++;
vm_object_unlock(object);
@@ -1906,12 +1865,8 @@ void vm_object_destroy(
object = (vm_object_t) pager->ip_kobject;
vm_object_lock(object);
- if (object->ref_count == 0) {
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- vm_object_cached_pages_update(-object->resident_page_count);
- }
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
object->ref_count++;
object->can_persist = FALSE;
@@ -2059,12 +2014,8 @@ restart:
if ((object != VM_OBJECT_NULL) && !must_init) {
vm_object_lock(object);
- if (object->ref_count == 0) {
- queue_remove(&vm_object_cached_list, object,
- vm_object_t, cached_list);
- vm_object_cached_count--;
- vm_object_cached_pages_update(-object->resident_page_count);
- }
+ if (object->ref_count == 0)
+ vm_object_cache_remove(object);
object->ref_count++;
vm_object_unlock(object);
@@ -2573,6 +2524,7 @@ void vm_object_collapse(
);
assert(backing_object->alive);
+ assert(!backing_object->cached);
backing_object->alive = FALSE;
vm_object_unlock(backing_object);
@@ -2701,7 +2653,7 @@ void vm_object_page_remove(
* It balances vm_object_lookup vs iteration.
*/
- if (atop(end - start) < (unsigned)object->resident_page_count/16) {
+ if (atop(end - start) < object->resident_page_count/16) {
vm_object_page_remove_lookup++;
for (; start < end; start += PAGE_SIZE) {
@@ -2928,7 +2880,7 @@ void vm_object_print(
(vm_offset_t) object, (vm_offset_t) object->size,
object->ref_count);
printf("\n");
- iprintf("%d resident pages,", object->resident_page_count);
+ iprintf("%lu resident pages,", object->resident_page_count);
printf(" %d absent pages,", object->absent_count);
printf(" %d paging ops\n", object->paging_in_progress);
indent += 1;