summaryrefslogtreecommitdiff
path: root/vm
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-20 21:34:07 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-21 00:19:59 +0200
commit66a878640573dd9101e3915db44408b661220038 (patch)
treeb030d125bc83e9c52b5e915fbe50de17d5eaf2bf /vm
parent8322083864500f5726f4f04f80427acee4b52c9a (diff)
Remove phys_first_addr and phys_last_addr global variables
The old assumption that all physical memory is directly mapped in kernel space is about to go away. Those variables are directly linked to that assumption. * i386/i386/model_dep.h (phys_first_addr): Remove extern declaration. (phys_last_addr): Likewise. * i386/i386/phys.c (pmap_zero_page): Use VM_PAGE_DIRECTMAP_LIMIT instead of phys_last_addr. (pmap_copy_page, copy_to_phys, copy_from_phys): Likewise. * i386/i386/trap.c (user_trap): Remove check against phys_last_addr. * i386/i386at/biosmem.c (biosmem_bootstrap_common): Don't set phys_last_addr. * i386/i386at/mem.c (memmmap): Use vm_page_lookup_pa to determine if a physical address references physical memory. * i386/i386at/model_dep.c (phys_first_addr): Remove variable. (phys_last_addr): Likewise. (pmap_free_pages, pmap_valid_page): Remove functions. * i386/intel/pmap.c: Include i386at/biosmem.h. (pa_index): Turn into an alias for vm_page_table_index. (pmap_bootstrap): Replace uses of phys_first_addr and phys_last_addr as appropriate. (pmap_virtual_space): Use vm_page_table_size instead of phys_first_addr and phys_last_addr to obtain the number of physical pages. (pmap_verify_free): Remove function. (valid_page): Turn this macro into an inline function and rewrite using vm_page_lookup_pa. (pmap_page_table_page_alloc): Build the pmap VM object using vm_page_table_size to determine its size. (pmap_remove_range, pmap_page_protect, phys_attribute_clear, phys_attribute_test): Turn page indexes into unsigned long integers. (pmap_enter): Likewise. In addition, use either vm_page_lookup_pa or biosmem_directmap_end to determine if a physical address references physical memory. * i386/xen/xen.c (hyp_p2m_init): Use vm_page_table_size instead of phys_last_addr to obtain the number of physical pages. * kern/startup.c (phys_first_addr): Remove extern declaration. (phys_last_addr): Likewise. * linux/dev/init/main.c (linux_init): Use vm_page_seg_end with the appropriate segment selector instead of phys_last_addr to determine where high memory starts. * vm/pmap.h: Update requirements description. (pmap_free_pages, pmap_valid_page): Remove declarations. * vm/vm_page.c (vm_page_seg_end, vm_page_boot_table_size, vm_page_table_size, vm_page_table_index): New functions. * vm/vm_page.h (vm_page_seg_end, vm_page_table_size, vm_page_table_index): New function declarations. * vm/vm_resident.c (vm_page_bucket_count, vm_page_hash_mask): Define as unsigned long integers. (vm_page_bootstrap): Compute VP table size based on the page table size instead of the value returned by pmap_free_pages.
Diffstat (limited to 'vm')
-rw-r--r--vm/pmap.h14
-rw-r--r--vm/vm_page.c70
-rw-r--r--vm/vm_page.h15
-rw-r--r--vm/vm_resident.c6
4 files changed, 91 insertions, 14 deletions
diff --git a/vm/pmap.h b/vm/pmap.h
index 9bbcdc32..3c1cdcb1 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -65,8 +65,6 @@
/* During VM initialization, steal a chunk of memory. */
extern vm_offset_t pmap_steal_memory(vm_size_t);
-/* During VM initialization, report remaining unused physical pages. */
-extern unsigned int pmap_free_pages(void);
/* Initialization, after kernel runs in virtual memory. */
extern void pmap_init(void);
@@ -75,14 +73,10 @@ extern void pmap_init(void);
* If machine/pmap.h defines MACHINE_PAGES, it must implement
* the above functions. The pmap module has complete control.
* Otherwise, it must implement
- * pmap_free_pages
* pmap_virtual_space
* pmap_init
* and vm/vm_resident.c implements pmap_steal_memory using
- * pmap_free_pages, pmap_virtual_space, and pmap_enter.
- *
- * pmap_free_pages may over-estimate the number of unused physical pages.
- * However, for best performance pmap_free_pages should be accurate.
+ * pmap_virtual_space and pmap_enter.
*/
/* During VM initialization, report virtual space available for the kernel. */
@@ -186,8 +180,6 @@ extern kern_return_t pmap_attribute(void);
*/
extern vm_offset_t pmap_grab_page (void);
-extern boolean_t pmap_valid_page(vm_offset_t x);
-
/*
* Make the specified pages (by pmap, offset)
* pageable (or not) as requested.
@@ -200,8 +192,8 @@ extern void pmap_pageable(
/*
* Back-door routine for mapping kernel VM at initialization.
- * Useful for mapping memory outside the range
- * [phys_first_addr, phys_last_addr) (i.e., devices).
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
* Otherwise like pmap_map.
*/
extern vm_offset_t pmap_map_bd(
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 12e6a5ea..f966e4dc 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -773,6 +773,76 @@ vm_page_info_all(void)
}
phys_addr_t
+vm_page_seg_end(unsigned int selector)
+{
+ return vm_page_segs[vm_page_select_alloc_seg(selector)].end;
+}
+
+static unsigned long
+vm_page_boot_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ if (!vm_page_is_ready) {
+ return vm_page_boot_table_size();
+ }
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ nr_pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_index(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned long index;
+ unsigned int i;
+
+ index = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end)) {
+ return index + vm_page_atop(pa - seg->start);
+ }
+
+ index += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ panic("vm_page: invalid physical address");
+}
+
+phys_addr_t
vm_page_mem_size(void)
{
phys_addr_t total;
diff --git a/vm/vm_page.h b/vm/vm_page.h
index f9682367..ba54b3ab 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -503,6 +503,21 @@ const char * vm_page_seg_name(unsigned int seg_index);
void vm_page_info_all(void);
/*
+ * Return the maximum physical address for a given segment selector.
+ */
+phys_addr_t vm_page_seg_end(unsigned int selector);
+
+/*
+ * Return the total number of physical pages.
+ */
+unsigned long vm_page_table_size(void);
+
+/*
+ * Return the index of a page in the page table.
+ */
+unsigned long vm_page_table_index(phys_addr_t pa);
+
+/*
* Return the total amount of physical memory.
*/
phys_addr_t vm_page_mem_size(void);
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index ed867f54..3dff11bf 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -92,8 +92,8 @@ typedef struct {
} vm_page_bucket_t;
vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
-unsigned int vm_page_bucket_count = 0; /* How big is array? */
-unsigned int vm_page_hash_mask; /* Mask for hash function */
+unsigned long vm_page_bucket_count = 0; /* How big is array? */
+unsigned long vm_page_hash_mask; /* Mask for hash function */
vm_page_t vm_page_queue_fictitious;
decl_simple_lock_data(,vm_page_queue_free_lock)
@@ -209,7 +209,7 @@ void vm_page_bootstrap(
*/
if (vm_page_bucket_count == 0) {
- unsigned int npages = pmap_free_pages();
+ unsigned long npages = vm_page_table_size();
vm_page_bucket_count = 1;
while (vm_page_bucket_count < npages)