summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2016-09-21 00:36:22 +0200
committerRichard Braun <rbraun@sceen.net>2016-09-21 00:36:22 +0200
commite29b7797dc2aebcfb00fc08201c31ef0caf5f4d3 (patch)
tree2029331346b37e5fe6108d74703d2d5be4e2bcfd
parent6923672268ae8e51e3cf303314fca196dc369e19 (diff)
Enable high memory
* i386/i386at/biosmem.c (biosmem_setup): Load the HIGHMEM segment if present. (biosmem_free_usable): Report high memory as usable. * vm/vm_page.c (vm_page_boot_table_size, vm_page_table_size, vm_page_mem_size, vm_page_mem_free): Scan all segments. * vm/vm_resident.c (vm_page_grab): Describe allocation strategy with regard to the HIGHMEM segment.
-rw-r--r--i386/i386at/biosmem.c9
-rw-r--r--vm/vm_page.c16
-rw-r--r--vm/vm_resident.c9
3 files changed, 12 insertions, 22 deletions
diff --git a/i386/i386at/biosmem.c b/i386/i386at/biosmem.c
index 62be567c..a1040207 100644
--- a/i386/i386at/biosmem.c
+++ b/i386/i386at/biosmem.c
@@ -903,9 +903,7 @@ biosmem_setup(void)
break;
seg = &biosmem_segments[i];
-
- /* XXX Limit to directmap until highmem is supported */
- biosmem_load_segment(seg, VM_PAGE_DIRECTMAP_LIMIT);
+ biosmem_load_segment(seg, VM_PAGE_HIGHMEM_LIMIT);
}
}
@@ -986,9 +984,8 @@ biosmem_free_usable(void)
end = vm_page_trunc(entry->base_addr + entry->length);
- /* XXX Limit to directmap until highmem is supported */
- if (end > VM_PAGE_DIRECTMAP_LIMIT) {
- end = VM_PAGE_DIRECTMAP_LIMIT;
+ if (end > VM_PAGE_HIGHMEM_LIMIT) {
+ end = VM_PAGE_HIGHMEM_LIMIT;
}
if (start < BIOSMEM_BASE)
diff --git a/vm/vm_page.c b/vm/vm_page.c
index 4c11ea7a..2a9f27b2 100644
--- a/vm/vm_page.c
+++ b/vm/vm_page.c
@@ -1620,10 +1620,6 @@ vm_page_boot_table_size(void)
nr_pages = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
}
@@ -1643,10 +1639,6 @@ vm_page_table_size(void)
nr_pages = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
nr_pages += vm_page_atop(vm_page_seg_size(&vm_page_segs[i]));
}
@@ -1684,10 +1676,6 @@ vm_page_mem_size(void)
total = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
total += vm_page_seg_size(&vm_page_segs[i]);
}
@@ -1703,10 +1691,6 @@ vm_page_mem_free(void)
total = 0;
for (i = 0; i < vm_page_segs_size; i++) {
- /* XXX */
- if (i > VM_PAGE_SEG_DIRECTMAP)
- continue;
-
total += vm_page_segs[i].nr_free_pages;
}
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index e276fe68..e3e34dc3 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -771,6 +771,15 @@ vm_page_t vm_page_grab(void)
simple_lock(&vm_page_queue_free_lock);
+ /*
+ * XXX Mach has many modules that merely assume memory is
+ * directly mapped in kernel space. Instead of updating all
+ * users, we assume those which need specific physical memory
+ * properties will wire down their pages, either because
+ * they can't be paged (not part of an object), or with
+ * explicit VM calls. The strategy is then to let memory
+ * pressure balance the physical segments with pageable pages.
+ */
mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
if (mem == NULL) {