summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/mach.texi24
-rwxr-xr-xi386/include/mach/i386/machine_types.defs5
-rw-r--r--i386/include/mach/i386/vm_types.h5
-rw-r--r--include/mach/gnumach.defs24
-rw-r--r--vm/vm_user.c126
5 files changed, 184 insertions, 0 deletions
diff --git a/doc/mach.texi b/doc/mach.texi
index dcf0f93c..91ec96ee 100644
--- a/doc/mach.texi
+++ b/doc/mach.texi
@@ -3056,6 +3056,30 @@ specified and @code{KERN_NO_SPACE} if there was not enough space left to
satisfy the request.
@end deftypefun
+@deftypefun kern_return_t vm_allocate_contiguous (@w{host_priv_t @var{host_priv}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t *@var{vaddr}}, @w{phys_addr_t *@var{paddr}}, @w{vm_size_t @var{size}}, @w{phys_addr_t @var{pmin}}, @w{phys_addr_t @var{pmax}}, @w{phys_addr_t @var{palign}})
+The function @code{vm_allocate} allocates a region of physical memory,
+placing virtual mapping of the physical pages in the specified @var{task}'s
+address space.
+
+The virtual space will be allocated wherever it is available. The virtual
+address at which the physical memory was mapped will be returned in
+@var{vaddr}. The physical address of the start of the allocated physical
+memory will be returned in @var{paddr}.
+
+@var{size} is the number of bytes to allocate (rounded by the system in
+a machine dependent way to an integral number of virtual pages).
+
+Constraints can be set on the physical address, to cope with hardware physical
+memory access constraints, e.g. DMAs. @var{pmin} is the minimum physical address
+at which the allocated memory should start. @var{pmax} is the maximum physical
+address at which the allocated memory should end. @var{palign} is the alignment
+restriction, which has to be a power of two.
+
+The function returns @code{KERN_SUCCESS} if the memory was successfully
+allocated, @code{KERN_RESOURCE_SHORTAGE} if there was not enough physical memory
+left to satisfy the request, and @code{KERN_NO_SPACE} if there was not enough
+virtual space left to satisfy the request.
+@end deftypefun
@node Memory Deallocation
@section Memory Deallocation
diff --git a/i386/include/mach/i386/machine_types.defs b/i386/include/mach/i386/machine_types.defs
index 6ff93dbd..dfbc521e 100755
--- a/i386/include/mach/i386/machine_types.defs
+++ b/i386/include/mach/i386/machine_types.defs
@@ -58,4 +58,9 @@ type natural_t = uint32_t;
*/
type integer_t = int32_t;
+/*
+ * Physical address size
+ */
+type rpc_phys_addr_t = uint64_t;
+
#endif /* _MACHINE_MACHINE_TYPES_DEFS_ */
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
index 28609e7c..29b9e1e6 100644
--- a/i386/include/mach/i386/vm_types.h
+++ b/i386/include/mach/i386/vm_types.h
@@ -72,11 +72,16 @@ typedef vm_offset_t * vm_offset_array_t;
/*
* A type for physical addresses.
*/
+#ifdef MACH_KERNEL
#ifdef PAE
typedef unsigned long long phys_addr_t;
#else /* PAE */
typedef unsigned long phys_addr_t;
#endif /* PAE */
+#else
+typedef unsigned long long phys_addr_t;
+#endif
+typedef unsigned long long rpc_phys_addr_t;
/*
* A vm_size_t is the proper type for e.g.
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
index 97ab573c..d423a10a 100644
--- a/include/mach/gnumach.defs
+++ b/include/mach/gnumach.defs
@@ -165,3 +165,27 @@ routine vm_msync(
address : vm_address_t;
size : vm_size_t;
sync_flags : vm_sync_t);
+
+/*
+ * This routine is created for allocating DMA buffers.
+ * We are going to get a contiguous physical memory
+ * and its physical address in addition to the virtual address.
+ * We can specify physical memory range limits and alignment.
+ * NB:
+ * pmax is defined as the byte after the maximum address,
+ * eg 0x100000000 for 4GiB limit.
+ */
+/* XXX
+ * Future work: the RPC should return a special
+ * memory object (similar to device_map() ), which can then be mapped into
+ * the process address space with vm_map() like any other memory object.
+ */
+routine vm_allocate_contiguous(
+ host_priv : host_priv_t;
+ target_task : vm_task_t;
+ out vaddr : vm_address_t;
+ out paddr : rpc_phys_addr_t;
+ size : vm_size_t;
+ pmin : rpc_phys_addr_t;
+ pmax : rpc_phys_addr_t;
+ palign : rpc_phys_addr_t);
diff --git a/vm/vm_user.c b/vm/vm_user.c
index f6fb1a41..4d5728c8 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -533,3 +533,129 @@ kern_return_t vm_msync(
return vm_map_msync(map, (vm_offset_t) address, size, sync_flags);
}
+
+/*
+ * vm_allocate_contiguous allocates "zero fill" physical memory and maps
+ * it into in the specfied map.
+ */
+/* TODO: respect physical alignment (palign)
+ * and minimum physical address (pmin)
+ */
+kern_return_t vm_allocate_contiguous(
+ host_t host_priv,
+ vm_map_t map,
+ vm_address_t *result_vaddr,
+ rpc_phys_addr_t *result_paddr,
+ vm_size_t size,
+ rpc_phys_addr_t pmin,
+ rpc_phys_addr_t pmax,
+ rpc_phys_addr_t palign)
+{
+ vm_size_t alloc_size;
+ unsigned int npages;
+ unsigned int i;
+ unsigned int order;
+ unsigned int selector;
+ vm_page_t pages;
+ vm_object_t object;
+ kern_return_t kr;
+ vm_address_t vaddr;
+
+ if (host_priv == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ /* FIXME */
+ if (pmin != 0)
+ return KERN_INVALID_ARGUMENT;
+
+ if (palign == 0)
+ palign = PAGE_SIZE;
+
+ /* FIXME */
+ if (palign != PAGE_SIZE)
+ return KERN_INVALID_ARGUMENT;
+
+ selector = VM_PAGE_SEL_DMA;
+ if (pmax > VM_PAGE_DMA_LIMIT)
+#ifdef VM_PAGE_DMA32_LIMIT
+ selector = VM_PAGE_SEL_DMA32;
+ if (pmax > VM_PAGE_DMA32_LIMIT)
+#endif
+ selector = VM_PAGE_SEL_DIRECTMAP;
+ if (pmax > VM_PAGE_DIRECTMAP_LIMIT)
+ selector = VM_PAGE_SEL_HIGHMEM;
+
+ size = vm_page_round(size);
+
+ if (size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ object = vm_object_allocate(size);
+
+ if (object == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * XXX The page allocator returns blocks with a power-of-two size.
+ * The requested size may not be a power-of-two, requiring some
+ * work to release back the pages that aren't needed.
+ */
+ order = vm_page_order(size);
+ alloc_size = (1 << (order + PAGE_SHIFT));
+ npages = vm_page_atop(alloc_size);
+
+ pages = vm_page_grab_contig(alloc_size, selector);
+
+ if (pages == NULL) {
+ vm_object_deallocate(object);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ vm_object_lock(object);
+ vm_page_lock_queues();
+
+ for (i = 0; i < vm_page_atop(size); i++) {
+ /*
+ * XXX We can safely handle contiguous pages as an array,
+ * but this relies on knowing the implementation of the
+ * page allocator.
+ */
+ pages[i].busy = FALSE;
+ vm_page_insert(&pages[i], object, vm_page_ptoa(i));
+ vm_page_wire(&pages[i]);
+ }
+
+ vm_page_unlock_queues();
+ vm_object_unlock(object);
+
+ for (i = vm_page_atop(size); i < npages; i++) {
+ vm_page_release(&pages[i], FALSE, FALSE);
+ }
+
+ vaddr = 0;
+ kr = vm_map_enter(map, &vaddr, size, 0, TRUE, object, 0, FALSE,
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
+
+ if (kr != KERN_SUCCESS) {
+ vm_object_deallocate(object);
+ return kr;
+ }
+
+ kr = vm_map_pageable(map, vaddr, vaddr + size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ TRUE, TRUE);
+
+ if (kr != KERN_SUCCESS) {
+ vm_map_remove(map, vaddr, vaddr + size);
+ return kr;
+ }
+
+ *result_vaddr = vaddr;
+ *result_paddr = pages->phys_addr;
+
+ return KERN_SUCCESS;
+}