summaryrefslogtreecommitdiff
path: root/vm/vm_page.h
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_page.h')
-rw-r--r--vm/vm_page.h114
1 files changed, 62 insertions, 52 deletions
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 164ab6d4..eb684c1b 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -40,6 +40,7 @@
#include <vm/vm_object.h>
#include <vm/vm_types.h>
#include <kern/queue.h>
+#include <kern/list.h>
#include <kern/lock.h>
#include <kern/log2.h>
@@ -77,8 +78,7 @@
*/
struct vm_page {
- /* Members used in the vm_page module only */
- struct list node;
+ struct list node; /* page queues or free list (P) */
unsigned short type;
unsigned short seg_index;
unsigned short order;
@@ -90,15 +90,13 @@ struct vm_page {
*/
phys_addr_t phys_addr;
+ queue_chain_t listq; /* all pages in same object (O) */
+ struct vm_page *next; /* VP bucket link (O) */
+
/* We use an empty struct as the delimiter. */
struct {} vm_page_header;
#define VM_PAGE_HEADER_SIZE offsetof(struct vm_page, vm_page_header)
- queue_chain_t pageq; /* queue info for FIFO
- * queue or free list (P) */
- queue_chain_t listq; /* all pages in same object (O) */
- struct vm_page *next; /* VP bucket link (O) */
-
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into that object (O,P) */
@@ -136,7 +134,9 @@ struct vm_page {
* some useful check on a page structure.
*/
-#define VM_PAGE_CHECK(mem)
+#define VM_PAGE_CHECK(mem) vm_page_check(mem)
+
+void vm_page_check(const struct vm_page *page);
/*
* Each pageable resident page falls into one of three lists:
@@ -155,13 +155,6 @@ struct vm_page {
*/
extern
-vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
-extern
-queue_head_t vm_page_queue_active; /* active memory queue */
-extern
-queue_head_t vm_page_queue_inactive; /* inactive memory queue */
-
-extern
int vm_page_fictitious_count;/* How many fictitious pages are free? */
extern
int vm_page_active_count; /* How many pages are active? */
@@ -170,25 +163,15 @@ int vm_page_inactive_count; /* How many pages are inactive? */
extern
int vm_page_wire_count; /* How many pages are wired? */
extern
-int vm_page_free_target; /* How many do we want free? */
-extern
-int vm_page_free_min; /* When to wakeup pageout */
-extern
-int vm_page_inactive_target;/* How many do we want inactive? */
-extern
-int vm_page_free_reserved; /* How many pages reserved to do pageout */
-extern
int vm_page_laundry_count; /* How many pages being laundered? */
-
+extern
+int vm_page_external_pagedout; /* How many external pages being paged out? */
decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive
page queues */
decl_simple_lock_data(extern,vm_page_queue_free_lock)
/* lock on free page queue */
-extern unsigned int vm_page_free_wanted;
- /* how many threads are waiting for memory */
-
extern phys_addr_t vm_page_fictitious_addr;
/* (fake) phys_addr of fictitious pages */
@@ -204,7 +187,7 @@ extern vm_page_t vm_page_grab_fictitious(void);
extern boolean_t vm_page_convert(vm_page_t *);
extern void vm_page_more_fictitious(void);
extern vm_page_t vm_page_grab(void);
-extern void vm_page_release(vm_page_t);
+extern void vm_page_release(vm_page_t, boolean_t, boolean_t);
extern phys_addr_t vm_page_grab_phys_addr(void);
extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
extern void vm_page_free_contig(vm_page_t, vm_size_t);
@@ -294,22 +277,7 @@ extern unsigned int vm_page_info(
#define vm_page_lock_queues() simple_lock(&vm_page_queue_lock)
#define vm_page_unlock_queues() simple_unlock(&vm_page_queue_lock)
-#define VM_PAGE_QUEUES_REMOVE(mem) \
- MACRO_BEGIN \
- if (mem->active) { \
- queue_remove(&vm_page_queue_active, \
- mem, vm_page_t, pageq); \
- mem->active = FALSE; \
- vm_page_active_count--; \
- } \
- \
- if (mem->inactive) { \
- queue_remove(&vm_page_queue_inactive, \
- mem, vm_page_t, pageq); \
- mem->inactive = FALSE; \
- vm_page_inactive_count--; \
- } \
- MACRO_END
+#define VM_PAGE_QUEUES_REMOVE(mem) vm_page_queues_remove(mem)
/*
* Copyright (c) 2010-2014 Richard Braun.
@@ -358,18 +326,11 @@ extern unsigned int vm_page_info(
/*
* Page usage types.
- *
- * Failing to allocate pmap pages will cause a kernel panic.
- * TODO Obviously, this needs to be addressed, e.g. with a reserved pool of
- * pages.
*/
#define VM_PT_FREE 0 /* Page unused */
#define VM_PT_RESERVED 1 /* Page reserved at boot time */
#define VM_PT_TABLE 2 /* Page is part of the page table */
-#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
-#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
-#define VM_PT_STACK 5 /* Type for generic kernel allocations */
-#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 3 /* Type for generic kernel allocations */
static inline unsigned short
vm_page_type(const struct vm_page *page)
@@ -521,4 +482,53 @@ phys_addr_t vm_page_mem_size(void);
*/
unsigned long vm_page_mem_free(void);
+/*
+ * Remove the given page from any page queue it might be in.
+ */
+void vm_page_queues_remove(struct vm_page *page);
+
+/*
+ * Balance physical pages among segments.
+ *
+ * This function should be called first by the pageout daemon
+ * on memory pressure, since it may be unnecessary to perform any
+ * other operation, let alone shrink caches, if balancing is
+ * enough to make enough free pages.
+ *
+ * Return TRUE if balancing made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_balance(void);
+
+/*
+ * Evict physical pages.
+ *
+ * This function should be called by the pageout daemon after balancing
+ * the segments and shrinking kernel caches.
+ *
+ * Return TRUE if eviction made enough free pages for unprivileged
+ * allocations to succeed, in which case pending allocations are resumed.
+ *
+ * Otherwise, report whether the pageout daemon should wait (some pages
+ * have been paged out) or not (only clean pages have been released).
+ *
+ * This function acquires vm_page_queue_free_lock, which is held on return.
+ */
+boolean_t vm_page_evict(boolean_t *should_wait);
+
+/*
+ * Turn active pages into inactive ones for second-chance LRU
+ * approximation.
+ *
+ * This function should be called by the pageout daemon on memory pressure,
+ * i.e. right before evicting pages.
+ *
+ * XXX This is probably not the best strategy, compared to keeping the
+ * active/inactive ratio in check at all times, but this means less
+ * frequent refills.
+ */
+void vm_page_refill_inactive(void);
+
#endif /* _VM_VM_PAGE_H_ */