summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-20 21:34:07 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-21 00:19:59 +0200
commit66a878640573dd9101e3915db44408b661220038 (patch)
treeb030d125bc83e9c52b5e915fbe50de17d5eaf2bf
parent8322083864500f5726f4f04f80427acee4b52c9a (diff)
Remove phys_first_addr and phys_last_addr global variables
The old assumption that all physical memory is directly mapped in kernel space is about to go away. Those variables are directly linked to that assumption. * i386/i386/model_dep.h (phys_first_addr): Remove extern declaration. (phys_last_addr): Likewise. * i386/i386/phys.c (pmap_zero_page): Use VM_PAGE_DIRECTMAP_LIMIT instead of phys_last_addr. (pmap_copy_page, copy_to_phys, copy_from_phys): Likewise. * i386/i386/trap.c (user_trap): Remove check against phys_last_addr. * i386/i386at/biosmem.c (biosmem_bootstrap_common): Don't set phys_last_addr. * i386/i386at/mem.c (memmmap): Use vm_page_lookup_pa to determine if a physical address references physical memory. * i386/i386at/model_dep.c (phys_first_addr): Remove variable. (phys_last_addr): Likewise. (pmap_free_pages, pmap_valid_page): Remove functions. * i386/intel/pmap.c: Include i386at/biosmem.h. (pa_index): Turn into an alias for vm_page_table_index. (pmap_bootstrap): Replace uses of phys_first_addr and phys_last_addr as appropriate. (pmap_virtual_space): Use vm_page_table_size instead of phys_first_addr and phys_last_addr to obtain the number of physical pages. (pmap_verify_free): Remove function. (valid_page): Turn this macro into an inline function and rewrite using vm_page_lookup_pa. (pmap_page_table_page_alloc): Build the pmap VM object using vm_page_table_size to determine its size. (pmap_remove_range, pmap_page_protect, phys_attribute_clear, phys_attribute_test): Turn page indexes into unsigned long integers. (pmap_enter): Likewise. In addition, use either vm_page_lookup_pa or biosmem_directmap_end to determine if a physical address references physical memory. * i386/xen/xen.c (hyp_p2m_init): Use vm_page_table_size instead of phys_last_addr to obtain the number of physical pages. * kern/startup.c (phys_first_addr): Remove extern declaration. (phys_last_addr): Likewise. * linux/dev/init/main.c (linux_init): Use vm_page_seg_end with the appropriate segment selector instead of phys_last_addr to determine where high memory starts. * vm/pmap.h: Update requirements description. (pmap_free_pages, pmap_valid_page): Remove declarations. * vm/vm_page.c (vm_page_seg_end, vm_page_boot_table_size, vm_page_table_size, vm_page_table_index): New functions. * vm/vm_page.h (vm_page_seg_end, vm_page_table_size, vm_page_table_index): New function declarations. * vm/vm_resident.c (vm_page_bucket_count, vm_page_hash_mask): Define as unsigned long integers. (vm_page_bootstrap): Compute VP table size based on the page table size instead of the value returned by pmap_free_pages.
-rw-r--r--i386/i386/model_dep.h7
-rw-r--r--i386/i386/phys.c10
-rw-r--r--i386/i386/trap.c10
-rw-r--r--i386/i386at/biosmem.c15
-rw-r--r--i386/i386at/mem.c22
-rw-r--r--i386/i386at/model_dep.c22
-rw-r--r--i386/intel/pmap.c75
-rw-r--r--i386/xen/xen.c2
-rw-r--r--kern/startup.c1
-rw-r--r--linux/dev/init/main.c2
-rw-r--r--vm/pmap.h14
-rw-r--r--vm/vm_page.c70
-rw-r--r--vm/vm_page.h15
-rw-r--r--vm/vm_resident.c6
14 files changed, 152 insertions, 119 deletions
diff --git a/i386/i386/model_dep.h b/i386/i386/model_dep.h
index ab2738f8..54aa1ec7 100644
--- a/i386/i386/model_dep.h
+++ b/i386/i386/model_dep.h
@@ -50,11 +50,4 @@ extern void halt_cpu (void) __attribute__ ((noreturn));
*/
extern void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
-/*
- * More-specific code provides these;
- * they indicate the total extent of physical memory
- * that we know about and might ever have to manage.
- */
-extern vm_offset_t phys_first_addr, phys_last_addr;
-
#endif /* _I386AT_MODEL_DEP_H_ */
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
index 8681fba3..fadc5c37 100644
--- a/i386/i386/phys.c
+++ b/i386/i386/phys.c
@@ -52,7 +52,7 @@ pmap_zero_page(vm_offset_t p)
assert(p != vm_page_fictitious_addr);
vm_offset_t v;
pmap_mapwindow_t *map;
- boolean_t mapped = p >= phys_last_addr;
+ boolean_t mapped = p >= VM_PAGE_DIRECTMAP_LIMIT;
if (mapped)
{
@@ -79,8 +79,8 @@ pmap_copy_page(
vm_offset_t src_addr_v, dst_addr_v;
pmap_mapwindow_t *src_map = NULL;
pmap_mapwindow_t *dst_map;
- boolean_t src_mapped = src >= phys_last_addr;
- boolean_t dst_mapped = dst >= phys_last_addr;
+ boolean_t src_mapped = src >= VM_PAGE_DIRECTMAP_LIMIT;
+ boolean_t dst_mapped = dst >= VM_PAGE_DIRECTMAP_LIMIT;
assert(src != vm_page_fictitious_addr);
assert(dst != vm_page_fictitious_addr);
@@ -121,7 +121,7 @@ copy_to_phys(
{
vm_offset_t dst_addr_v;
pmap_mapwindow_t *dst_map;
- boolean_t mapped = dst_addr_p >= phys_last_addr;
+ boolean_t mapped = dst_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
assert(dst_addr_p != vm_page_fictitious_addr);
assert(pa_to_pte(dst_addr_p + count-1) == pa_to_pte(dst_addr_p));
@@ -153,7 +153,7 @@ copy_from_phys(
{
vm_offset_t src_addr_v;
pmap_mapwindow_t *src_map;
- boolean_t mapped = src_addr_p >= phys_last_addr;
+ boolean_t mapped = src_addr_p >= VM_PAGE_DIRECTMAP_LIMIT;
assert(src_addr_p != vm_page_fictitious_addr);
assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
index 64705049..d4bdc7f2 100644
--- a/i386/i386/trap.c
+++ b/i386/i386/trap.c
@@ -351,16 +351,6 @@ int user_trap(struct i386_saved_state *regs)
int type;
thread_t thread = current_thread();
- if ((vm_offset_t)thread < phys_last_addr) {
- printf("user_trap: bad thread pointer 0x%p\n", thread);
- printf("trap type %ld, code 0x%lx, va 0x%lx, eip 0x%lx\n",
- regs->trapno, regs->err, regs->cr2, regs->eip);
- asm volatile ("1: hlt; jmp 1b");
- }
-#if 0
-printf("user trap %d error %d sub %08x\n", type, code, subcode);
-#endif
-
type = regs->trapno;
code = 0;
subcode = 0;
diff --git a/i386/i386at/biosmem.c b/i386/i386at/biosmem.c
index 90ae54a9..62be567c 100644
--- a/i386/i386at/biosmem.c
+++ b/i386/i386at/biosmem.c
@@ -650,7 +650,7 @@ biosmem_setup_allocator(const struct multiboot_raw_info *mbi)
static void __boot
biosmem_bootstrap_common(void)
{
- phys_addr_t phys_start, phys_end, last_addr;
+ phys_addr_t phys_start, phys_end;
int error;
biosmem_map_adjust();
@@ -663,7 +663,6 @@ biosmem_bootstrap_common(void)
boot_panic(biosmem_panic_noseg_msg);
biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end);
- last_addr = phys_end;
phys_start = VM_PAGE_DMA_LIMIT;
#ifdef VM_PAGE_DMA32_LIMIT
@@ -671,10 +670,9 @@ biosmem_bootstrap_common(void)
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (error)
- goto out;
+ return;
biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
- last_addr = phys_end;
phys_start = VM_PAGE_DMA32_LIMIT;
#endif /* VM_PAGE_DMA32_LIMIT */
@@ -682,23 +680,18 @@ biosmem_bootstrap_common(void)
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (error)
- goto out;
+ return;
biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end);
- last_addr = phys_end;
phys_start = VM_PAGE_DIRECTMAP_LIMIT;
phys_end = VM_PAGE_HIGHMEM_LIMIT;
error = biosmem_map_find_avail(&phys_start, &phys_end);
if (error)
- goto out;
+ return;
biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end);
-
-out:
- /* XXX phys_last_addr must be part of the direct physical mapping */
- phys_last_addr = last_addr;
}
#ifdef MACH_HYP
diff --git a/i386/i386at/mem.c b/i386/i386at/mem.c
index f239afac..eac2549f 100644
--- a/i386/i386at/mem.c
+++ b/i386/i386at/mem.c
@@ -36,12 +36,24 @@ dev_t dev;
vm_offset_t off;
vm_prot_t prot;
{
+ struct vm_page *p;
+
if (off == 0)
return 0;
- else if (off < 0xa0000)
- return -1;
- else if (off >= 0x100000 && off < phys_last_addr)
+
+ /*
+ * The legacy device mappings are included in the page tables and
+ * need their own test.
+ */
+ if (off >= 0xa0000 && off < 0x100000)
+ goto out;
+
+ p = vm_page_lookup_pa(off);
+
+ if (p != NULL) {
return -1;
- else
- return i386_btop(off);
+ }
+
+out:
+ return i386_btop(off);
}
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index ab850442..239f63f4 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -106,11 +106,6 @@ static unsigned elf_shdr_shndx;
#define RESERVED_BIOS 0x10000
-/* These indicate the total extent of physical memory addresses we're using.
- They are page-aligned. */
-vm_offset_t phys_first_addr = 0;
-vm_offset_t phys_last_addr;
-
/* A copy of the multiboot info structure passed by the boot loader. */
#ifdef MACH_XEN
struct start_info boot_info;
@@ -690,11 +685,6 @@ resettodr(void)
writetodc();
}
-unsigned int pmap_free_pages(void)
-{
- return vm_page_atop(phys_last_addr); /* XXX */
-}
-
boolean_t
init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
{
@@ -717,15 +707,3 @@ pmap_grab_page(void)
panic("Not enough memory to initialize Mach");
return addr;
}
-
-boolean_t pmap_valid_page(vm_offset_t x)
-{
- /* XXX is this OK? What does it matter for? */
- return (((phys_first_addr <= x) && (x < phys_last_addr))
-#ifndef MACH_HYP
- && !(
- ((boot_info.mem_lower * 1024) <= x) &&
- (x < 1024*1024))
-#endif /* MACH_HYP */
- );
-}
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index e85e5480..7cde0931 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -83,6 +83,7 @@
#include <i386/proc_reg.h>
#include <i386/locore.h>
#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
#include <i386at/model_dep.h>
#ifdef MACH_PSEUDO_PHYS
@@ -158,9 +159,9 @@ vm_offset_t kernel_virtual_end;
/*
* Index into pv_head table, its lock bits, and the modify/reference
- * bits starting at phys_first_addr.
+ * bits.
*/
-#define pa_index(pa) (atop(pa - phys_first_addr))
+#define pa_index(pa) vm_page_table_index(pa)
#define pai_to_pvh(pai) (&pv_head_table[pai])
#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
@@ -499,8 +500,8 @@ vm_offset_t pmap_map(
/*
* Back-door routine for mapping kernel VM at initialization.
- * Useful for mapping memory outside the range
- * [phys_first_addr, phys_last_addr) (i.e., devices).
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
* Otherwise like pmap_map.
*/
vm_offset_t pmap_map_bd(
@@ -600,8 +601,8 @@ void pmap_bootstrap(void)
* mapped into the kernel address space,
* and extends to a stupid arbitrary limit beyond that.
*/
- kernel_virtual_start = phystokv(phys_last_addr);
- kernel_virtual_end = phystokv(phys_last_addr) + VM_KERNEL_MAP_SIZE;
+ kernel_virtual_start = phystokv(biosmem_directmap_end());
+ kernel_virtual_end = kernel_virtual_start + VM_KERNEL_MAP_SIZE;
if (kernel_virtual_end < kernel_virtual_start
|| kernel_virtual_end > VM_MAX_KERNEL_ADDRESS)
@@ -692,8 +693,7 @@ void pmap_bootstrap(void)
pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE) ? INTEL_PTE_GLOBAL : 0;
/*
- * Map virtual memory for all known physical memory, 1-1,
- * from phys_first_addr to phys_last_addr.
+ * Map virtual memory for all directly mappable physical memory, 1-1,
* Make any mappings completely in the kernel's text segment read-only.
*
* Also allocate some additional all-null page tables afterwards
@@ -702,7 +702,7 @@ void pmap_bootstrap(void)
* to allocate new kernel page tables later.
* XX fix this
*/
- for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; )
+ for (va = phystokv(0); va >= phystokv(0) && va < kernel_virtual_end; )
{
pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
@@ -713,7 +713,7 @@ void pmap_bootstrap(void)
| INTEL_PTE_VALID | INTEL_PTE_WRITE);
/* Initialize the page table. */
- for (pte = ptable; (va < phystokv(phys_last_addr)) && (pte < ptable+NPTES); pte++)
+ for (pte = ptable; (va < phystokv(biosmem_directmap_end())) && (pte < ptable+NPTES); pte++)
{
if ((pte - ptable) < ptenum(va))
{
@@ -937,7 +937,7 @@ void pmap_virtual_space(
*/
void pmap_init(void)
{
- long npages;
+ unsigned long npages;
vm_offset_t addr;
vm_size_t s;
#if NCPUS > 1
@@ -949,7 +949,7 @@ void pmap_init(void)
* the modify bit array, and the pte_page table.
*/
- npages = atop(phys_last_addr - phys_first_addr);
+ npages = vm_page_table_size();
s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ pv_lock_table_size(npages)
+ npages);
@@ -997,31 +997,16 @@ void pmap_init(void)
pmap_initialized = TRUE;
}
-#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
-
-boolean_t pmap_verify_free(vm_offset_t phys)
+static inline boolean_t
+valid_page(phys_addr_t addr)
{
- pv_entry_t pv_h;
- int pai;
- int spl;
- boolean_t result;
+ struct vm_page *p;
- assert(phys != vm_page_fictitious_addr);
if (!pmap_initialized)
- return(TRUE);
-
- if (!pmap_valid_page(phys))
- return(FALSE);
+ return FALSE;
- PMAP_WRITE_LOCK(spl);
-
- pai = pa_index(phys);
- pv_h = pai_to_pvh(pai);
-
- result = (pv_h->pmap == PMAP_NULL);
- PMAP_WRITE_UNLOCK(spl);
-
- return(result);
+ p = vm_page_lookup_pa(addr);
+ return (p != NULL);
}
/*
@@ -1046,7 +1031,7 @@ pmap_page_table_page_alloc(void)
* Allocate it now if it is missing.
*/
if (pmap_object == VM_OBJECT_NULL)
- pmap_object = vm_object_allocate(phys_last_addr - phys_first_addr);
+ pmap_object = vm_object_allocate(vm_page_table_size() * PAGE_SIZE);
/*
* Allocate a VM page for the level 2 page table entries.
@@ -1324,8 +1309,8 @@ void pmap_remove_range(
pt_entry_t *epte)
{
pt_entry_t *cpte;
- int num_removed, num_unwired;
- int pai;
+ unsigned long num_removed, num_unwired;
+ unsigned long pai;
vm_offset_t pa;
#ifdef MACH_PV_PAGETABLES
int n, ii = 0;
@@ -1522,7 +1507,7 @@ void pmap_page_protect(
pv_entry_t pv_h, prev;
pv_entry_t pv_e;
pt_entry_t *pte;
- int pai;
+ unsigned long pai;
pmap_t pmap;
int spl;
boolean_t remove;
@@ -1792,9 +1777,10 @@ void pmap_enter(
vm_prot_t prot,
boolean_t wired)
{
+ boolean_t is_physmem;
pt_entry_t *pte;
pv_entry_t pv_h;
- int i, pai;
+ unsigned long i, pai;
pv_entry_t pv_e;
pt_entry_t template;
int spl;
@@ -1923,6 +1909,11 @@ Retry:
continue;
}
+ if (vm_page_ready())
+ is_physmem = (vm_page_lookup_pa(pa) != NULL);
+ else
+ is_physmem = (pa < biosmem_directmap_end());
+
/*
* Special case if the physical page is already mapped
* at this address.
@@ -1944,7 +1935,7 @@ Retry:
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
- && pa >= phys_last_addr)
+ && !is_physmem)
template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
if (wired)
template |= INTEL_PTE_WIRED;
@@ -2056,7 +2047,7 @@ Retry:
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
if (machine_slot[cpu_number()].cpu_type >= CPU_TYPE_I486
- && pa >= phys_last_addr)
+ && !is_physmem)
template |= INTEL_PTE_NCACHE|INTEL_PTE_WTHRU;
if (wired)
template |= INTEL_PTE_WIRED;
@@ -2418,7 +2409,7 @@ phys_attribute_clear(
pv_entry_t pv_h;
pv_entry_t pv_e;
pt_entry_t *pte;
- int pai;
+ unsigned long pai;
pmap_t pmap;
int spl;
@@ -2502,7 +2493,7 @@ phys_attribute_test(
pv_entry_t pv_h;
pv_entry_t pv_e;
pt_entry_t *pte;
- int pai;
+ unsigned long pai;
pmap_t pmap;
int spl;
diff --git a/i386/xen/xen.c b/i386/xen/xen.c
index 8b015c42..d10ecf39 100644
--- a/i386/xen/xen.c
+++ b/i386/xen/xen.c
@@ -58,7 +58,7 @@ void hypclock_machine_intr(int old_ipl, void *ret_addr, struct i386_interrupt_st
}
void hyp_p2m_init(void) {
- unsigned long nb_pfns = atop(phys_last_addr);
+ unsigned long nb_pfns = vm_page_table_size();
#ifdef MACH_PSEUDO_PHYS
#define P2M_PAGE_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
unsigned long *l3 = (unsigned long *)phystokv(pmap_grab_page()), *l2 = NULL;
diff --git a/kern/startup.c b/kern/startup.c
index c87cbb10..19bd7bf6 100644
--- a/kern/startup.c
+++ b/kern/startup.c
@@ -78,7 +78,6 @@ boolean_t reboot_on_panic = TRUE;
#endif /* NCPUS > 1 */
/* XX */
-extern vm_offset_t phys_first_addr, phys_last_addr;
extern char *kernel_cmdline;
/*
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
index d69b3fc7..3740c12c 100644
--- a/linux/dev/init/main.c
+++ b/linux/dev/init/main.c
@@ -104,7 +104,7 @@ linux_init (void)
/*
* Initialize memory size.
*/
- high_memory = phys_last_addr;
+ high_memory = vm_page_seg_end(VM_PAGE_SEL_DIRECTMAP);
init_IRQ ();
linux_sched_init ();
diff --git a/vm/pmap.h b/vm/pmap.h
index 9bbcdc32..3c1cdcb1 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -65,8 +65,6 @@
/* During VM initialization, steal a chunk of memory. */
extern vm_offset_t pmap_steal_memory(vm_size_t);
-/* During VM initialization, report remaining unused physical pages. */
-extern unsigned int pmap_free_pages(void);
/* Initialization, after kernel runs in virtual memory. */
extern void pmap_init(void);
@@ -75,14 +73,10 @@ extern void pmap_init(void);
* If machine/pmap.h defines MACHINE_PAGES, it must implement
* the above functions. The pmap module has complete control.
* Otherwise, it must implement
- * pmap_free_pages
* pmap_virtual_space
* pmap_init
* and vm/vm_resident.c implements pmap_steal_memory using
- * pmap_free_pages, pmap_virtual_space, and pmap_enter.
- *
- * pmap_free_pages may over-estimate the number of unused physical pages.
- * However, for best performance pmap_free_pages should be accurate.
+ * pmap_virtual_space and pmap_enter.
*/
/* During VM initialization, report virtual space available for the kernel. */
@@ -186,8 +180,6 @@ extern kern_return_t pmap_attribute(void);
*/
extern vm_offset_t pmap_grab_page (void);
-extern boolean_t pmap_valid_page(vm_offset_t x);
-
/*
* Make the specified pages (by pmap, offset)
* pageable (or not) as requested.
@@ -200,8 +192,8 @@ extern void pmap_pageable(
/*
* Back-door routine for mapping kernel VM at initialization.
- * Useful for mapping memory outside the range
- * [phys_first_addr, phys_last_addr) (i.e., devices).
+ * Useful for mapping memory outside the range of direct mapped
+ * physical memory (i.e., devices).
* Otherwise like pmap_map.
*/
extern vm_offset_t pmap_map_bd(
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 12e6a5ea..f966e4dc 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -773,6 +773,76 @@ vm_page_info_all(void)
}
phys_addr_t
+vm_page_seg_end(unsigned int selector)
+{
+ return vm_page_segs[vm_page_select_alloc_seg(selector)].end;
+}
+
+static unsigned long
+vm_page_boot_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_size(void)
+{
+ unsigned long nr_pages;
+ unsigned int i;
+
+ if (!vm_page_is_ready) {
+ return vm_page_boot_table_size();
+ }
+
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ nr_pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
+ }
+
+ return nr_pages;
+}
+
+unsigned long
+vm_page_table_index(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned long index;
+ unsigned int i;
+
+ index = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end)) {
+ return index + vm_page_atop(pa - seg->start);
+ }
+
+ index += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ panic("vm_page: invalid physical address");
+}
+
+phys_addr_t
vm_page_mem_size(void)
{
phys_addr_t total;
diff --git a/vm/vm_page.h b/vm/vm_page.h
index f9682367..ba54b3ab 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -503,6 +503,21 @@ const char * vm_page_seg_name(unsigned int seg_index);
void vm_page_info_all(void);
/*
+ * Return the maximum physical address for a given segment selector.
+ */
+phys_addr_t vm_page_seg_end(unsigned int selector);
+
+/*
+ * Return the total number of physical pages.
+ */
+unsigned long vm_page_table_size(void);
+
+/*
+ * Return the index of a page in the page table.
+ */
+unsigned long vm_page_table_index(phys_addr_t pa);
+
+/*
* Return the total amount of physical memory.
*/
phys_addr_t vm_page_mem_size(void);
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index ed867f54..3dff11bf 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -92,8 +92,8 @@ typedef struct {
} vm_page_bucket_t;
vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
-unsigned int vm_page_bucket_count = 0; /* How big is array? */
-unsigned int vm_page_hash_mask; /* Mask for hash function */
+unsigned long vm_page_bucket_count = 0; /* How big is array? */
+unsigned long vm_page_hash_mask; /* Mask for hash function */
vm_page_t vm_page_queue_fictitious;
decl_simple_lock_data(,vm_page_queue_free_lock)
@@ -209,7 +209,7 @@ void vm_page_bootstrap(
*/
if (vm_page_bucket_count == 0) {
- unsigned int npages = pmap_free_pages();
+ unsigned long npages = vm_page_table_size();
vm_page_bucket_count = 1;
while (vm_page_bucket_count < npages)