summaryrefslogtreecommitdiff
path: root/vm/vm_user.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_user.c')
-rw-r--r--vm/vm_user.c148
1 files changed, 137 insertions, 11 deletions
diff --git a/vm/vm_user.c b/vm/vm_user.c
index ad1fa75d..62aedad3 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -40,9 +40,11 @@
#include <mach/vm_statistics.h>
#include <mach/vm_cache_statistics.h>
#include <mach/vm_sync.h>
+#include <kern/gnumach.server.h>
#include <kern/host.h>
-#include <kern/task.h>
#include <kern/mach.server.h>
+#include <kern/mach_host.server.h>
+#include <kern/task.h>
#include <vm/vm_fault.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
@@ -338,6 +340,11 @@ kern_return_t vm_map(
if (size == 0)
return KERN_INVALID_ARGUMENT;
+#ifdef USER32
+ if (mask & 0x80000000)
+ mask |= 0xffffffff00000000;
+#endif
+
*address = trunc_page(*address);
size = round_page(size);
@@ -425,12 +432,11 @@ kern_return_t vm_map(
*
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
-kern_return_t vm_wire(port, map, start, size, access)
- const ipc_port_t port;
- vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- vm_prot_t access;
+kern_return_t vm_wire(const ipc_port_t port,
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_prot_t access)
{
boolean_t priv;
@@ -590,6 +596,10 @@ kern_return_t vm_allocate_contiguous(
if (palign == 0)
palign = PAGE_SIZE;
+ /* FIXME: Allows some small alignments less than page size */
+ if ((palign < PAGE_SIZE) && (PAGE_SIZE % palign == 0))
+ palign = PAGE_SIZE;
+
/* FIXME */
if (palign != PAGE_SIZE)
return KERN_INVALID_ARGUMENT;
@@ -597,12 +607,24 @@ kern_return_t vm_allocate_contiguous(
selector = VM_PAGE_SEL_DMA;
if (pmax > VM_PAGE_DMA_LIMIT)
#ifdef VM_PAGE_DMA32_LIMIT
- selector = VM_PAGE_SEL_DMA32;
+#if VM_PAGE_DMA32_LIMIT < VM_PAGE_DIRECTMAP_LIMIT
+ if (pmax <= VM_PAGE_DMA32_LIMIT)
+ selector = VM_PAGE_SEL_DMA32;
if (pmax > VM_PAGE_DMA32_LIMIT)
#endif
- selector = VM_PAGE_SEL_DIRECTMAP;
+#endif
+ if (pmax <= VM_PAGE_DIRECTMAP_LIMIT)
+ selector = VM_PAGE_SEL_DIRECTMAP;
if (pmax > VM_PAGE_DIRECTMAP_LIMIT)
- selector = VM_PAGE_SEL_HIGHMEM;
+#ifdef VM_PAGE_DMA32_LIMIT
+#if VM_PAGE_DMA32_LIMIT > VM_PAGE_DIRECTMAP_LIMIT
+ if (pmax <= VM_PAGE_DMA32_LIMIT)
+ selector = VM_PAGE_SEL_DMA32;
+ if (pmax > VM_PAGE_DMA32_LIMIT)
+#endif
+#endif
+ if (pmax <= VM_PAGE_HIGHMEM_LIMIT)
+ selector = VM_PAGE_SEL_HIGHMEM;
size = vm_page_round(size);
@@ -670,11 +692,115 @@ kern_return_t vm_allocate_contiguous(
return kr;
}
+ for (i = 0; i < vm_page_atop(size); i++)
+ vm_page_unwire(&pages[i]);
+
*result_vaddr = vaddr;
*result_paddr = pages->phys_addr;
assert(*result_paddr >= pmin);
- assert(*result_paddr + size < pmax);
+ assert(*result_paddr + size <= pmax);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * vm_pages_phys returns information about a region of memory
+ */
+kern_return_t vm_pages_phys(
+ host_t host,
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ rpc_phys_addr_array_t *pagespp,
+ mach_msg_type_number_t *countp)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!page_aligned(address))
+ return KERN_INVALID_ARGUMENT;
+ if (!page_aligned(size))
+ return KERN_INVALID_ARGUMENT;
+
+ mach_msg_type_number_t count = atop(size), cur;
+ rpc_phys_addr_array_t pagesp = *pagespp;
+ kern_return_t kr;
+
+ if (*countp < count) {
+ vm_offset_t allocated;
+ /* Avoid faults while we keep vm locks */
+ kr = kmem_alloc(ipc_kernel_map, &allocated,
+ count * sizeof(pagesp[0]));
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ pagesp = (rpc_phys_addr_array_t) allocated;
+ }
+
+ for (cur = 0; cur < count; cur++) {
+ vm_map_t cmap; /* current map in traversal */
+ rpc_phys_addr_t paddr;
+ vm_map_entry_t entry; /* entry in current map */
+
+ /* find the entry containing (or following) the address */
+ vm_map_lock_read(map);
+ for (cmap = map;;) {
+ /* cmap is read-locked */
+
+ if (!vm_map_lookup_entry(cmap, address, &entry)) {
+ entry = VM_MAP_ENTRY_NULL;
+ break;
+ }
+
+ if (entry->is_sub_map) {
+ /* move down to the sub map */
+
+ vm_map_t nmap = entry->object.sub_map;
+ vm_map_lock_read(nmap);
+ vm_map_unlock_read(cmap);
+ cmap = nmap;
+ continue;
+ } else {
+ /* Found it */
+ break;
+ }
+ /*NOTREACHED*/
+ }
+
+ paddr = 0;
+ if (entry) {
+ vm_offset_t offset = address - entry->vme_start + entry->offset;
+ vm_object_t object = entry->object.vm_object;
+
+ if (object) {
+ vm_object_lock(object);
+ vm_page_t page = vm_page_lookup(object, offset);
+ if (page) {
+ if (page->phys_addr != (typeof(pagesp[cur])) page->phys_addr)
+ printf("warning: physical address overflow in vm_pages_phys!!\n");
+ else
+ paddr = page->phys_addr;
+ }
+ vm_object_unlock(object);
+ }
+ }
+ vm_map_unlock_read(cmap);
+ pagesp[cur] = paddr;
+
+ address += PAGE_SIZE;
+ }
+
+ if (pagesp != *pagespp) {
+ vm_map_copy_t copy;
+ kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t) pagesp,
+ count * sizeof(pagesp[0]), TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+ *pagespp = (rpc_phys_addr_array_t) copy;
+ }
+
+ *countp = count;
return KERN_SUCCESS;
}