summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2024-01-30 19:58:18 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2024-01-30 19:59:18 +0100
commit0f9822d2ab9881ebb601b25ab3f381bbb6197b05 (patch)
tree23c0dd493b6ac33b17032b416e5b3a76c605940b
parent366954d50be00d085d44a8c461a80f2d0b663224 (diff)
Add vm_pages_phys
For rumpdisk to efficiently determine the physical address, both for checking whether it is below 4GiB, and for giving it to the disk driver, we need a gnumach primitive (and that is not conditioned by MACH_VM_DEBUG like mach_vm_region_info and mach_vm_object_pages_phys are).
-rw-r--r--doc/mach.texi17
-rwxr-xr-xi386/include/mach/i386/machine_types.defs1
-rw-r--r--i386/include/mach/i386/vm_types.h1
-rw-r--r--include/mach/gnumach.defs10
-rw-r--r--vm/vm_user.c100
5 files changed, 129 insertions, 0 deletions
diff --git a/doc/mach.texi b/doc/mach.texi
index 76bf68f7..f35fc6bb 100644
--- a/doc/mach.texi
+++ b/doc/mach.texi
@@ -3010,6 +3010,7 @@ the kernel.
* Memory Attributes:: Tweaking memory regions.
* Mapping Memory Objects:: How to map memory objects.
* Memory Statistics:: How to get statistics about memory usage.
+* Memory physical addresses:: How to get physical addresses of memory.
@end menu
@node Memory Allocation
@@ -3514,6 +3515,22 @@ constant for the life of the task.
@end deftypefun
+@node Memory physical addresses
+@section Memory physical addresses
+
+@deftypefun kern_return_t vm_pages_phys (@w{host_t @var{host}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{rpc_phys_addr_array_t *@var{pages}, @w{mach_msg_type_number_t *@var{pagesCnt}}})
+The function @code{vm_pages_phys} retrieves the physical addresses of the
+specified region (@var{size} bytes starting from @var{address}) of
+@var{target_task}'s virtual address space.
+
+Both @var{address} and @var{size} have to be aligned on @code{vm_page_size}.
+
+@var{pages} is an array of @code{rpc_phys_addr_array_t} that is supplied by the
+caller and returned filled with the physical page numbers. @var{pagesCnt} is
+supplied as the maximum number of elements in the @var{pages} array. On
+return, it contains the actual number of integers in @var{pages}.
+
+
@node External Memory Management
@chapter External Memory Management
diff --git a/i386/include/mach/i386/machine_types.defs b/i386/include/mach/i386/machine_types.defs
index 3d540be9..76c7dcf9 100755
--- a/i386/include/mach/i386/machine_types.defs
+++ b/i386/include/mach/i386/machine_types.defs
@@ -102,5 +102,6 @@ type long_integer_t = rpc_long_integer_t
* Physical address size
*/
type rpc_phys_addr_t = uint64_t;
+type rpc_phys_addr_array_t = array[] of rpc_phys_addr_t;
#endif /* _MACHINE_MACHINE_TYPES_DEFS_ */
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
index bd07ef26..8f528ae1 100644
--- a/i386/include/mach/i386/vm_types.h
+++ b/i386/include/mach/i386/vm_types.h
@@ -94,6 +94,7 @@ typedef unsigned long phys_addr_t;
typedef unsigned long long phys_addr_t;
#endif
typedef unsigned long long rpc_phys_addr_t;
+typedef rpc_phys_addr_t *rpc_phys_addr_array_t;
/*
* A vm_size_t is the proper type for e.g.
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
index 05101a48..6252de96 100644
--- a/include/mach/gnumach.defs
+++ b/include/mach/gnumach.defs
@@ -197,3 +197,13 @@ routine vm_allocate_contiguous(
simpleroutine task_set_essential(
task : task_t;
essential : boolean_t);
+
+/*
+ * Returns physical addresses of a region of memory
+ */
+routine vm_pages_phys(
+ host_priv : host_priv_t;
+ target_task : vm_task_t;
+ vaddr : vm_address_t;
+ size : vm_size_t;
+ out pages : rpc_phys_addr_array_t);
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 08cc17a4..1a0ec802 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -700,3 +700,103 @@ kern_return_t vm_allocate_contiguous(
return KERN_SUCCESS;
}
+
+/*
+ * vm_pages_phys returns information about a region of memory
+ */
+kern_return_t vm_pages_phys(
+ host_t host,
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ rpc_phys_addr_array_t *pagespp,
+ mach_msg_type_number_t *countp)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+ if (map == VM_MAP_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!page_aligned(address))
+ return KERN_INVALID_ARGUMENT;
+ if (!page_aligned(size))
+ return KERN_INVALID_ARGUMENT;
+
+ mach_msg_type_number_t count = atop(size), cur;
+ rpc_phys_addr_array_t pagesp = *pagespp;
+ kern_return_t kr;
+
+ if (*countp < count) {
+ vm_offset_t allocated;
+ kr = kmem_alloc_pageable(ipc_kernel_map, &allocated,
+ count * sizeof(pagesp[0]));
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+ pagesp = (rpc_phys_addr_array_t) allocated;
+ }
+
+ for (cur = 0; cur < count; cur++) {
+ vm_map_t cmap; /* current map in traversal */
+ rpc_phys_addr_t paddr;
+ vm_map_entry_t entry; /* entry in current map */
+
+ /* find the entry containing (or following) the address */
+ vm_map_lock_read(map);
+ for (cmap = map;;) {
+ /* cmap is read-locked */
+
+ if (!vm_map_lookup_entry(cmap, address, &entry)) {
+ entry = VM_MAP_ENTRY_NULL;
+ break;
+ }
+
+ if (entry->is_sub_map) {
+ /* move down to the sub map */
+
+ vm_map_t nmap = entry->object.sub_map;
+ vm_map_lock_read(nmap);
+ vm_map_unlock_read(cmap);
+ cmap = nmap;
+ continue;
+ } else {
+ /* Found it */
+ break;
+ }
+ /*NOTREACHED*/
+ }
+
+ paddr = 0;
+ if (entry) {
+ vm_offset_t offset = address - entry->vme_start + entry->offset;
+ vm_object_t object = entry->object.vm_object;
+
+ if (object) {
+ vm_object_lock(object);
+ vm_page_t page = vm_page_lookup(object, offset);
+ if (page) {
+ if (page->phys_addr != (typeof(pagesp[cur])) page->phys_addr)
+ printf("warning: physical address overflow in vm_pages_phys!!\n");
+ else
+ paddr = page->phys_addr;
+ }
+ vm_object_unlock(object);
+ }
+ }
+ vm_map_unlock_read(cmap);
+ pagesp[cur] = paddr;
+
+ address += PAGE_SIZE;
+ }
+
+ if (pagesp != *pagespp) {
+ vm_map_copy_t copy;
+ kr = vm_map_copyin(ipc_kernel_map, (vm_offset_t) pagesp,
+ count * sizeof(pagesp[0]), TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+ *pagespp = (rpc_phys_addr_array_t) copy;
+ }
+
+ *countp = count;
+
+ return KERN_SUCCESS;
+}