summaryrefslogtreecommitdiff
path: root/kern/gsync.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/gsync.c')
-rw-r--r--kern/gsync.c19
1 files changed, 6 insertions, 13 deletions
diff --git a/kern/gsync.c b/kern/gsync.c
index e73a6cf0..31b564ca 100644
--- a/kern/gsync.c
+++ b/kern/gsync.c
@@ -134,11 +134,12 @@ probe_address (vm_map_t map, vm_offset_t addr,
vm_prot_t rprot;
boolean_t wired_p;
- if (vm_map_lookup (&map, addr, prot, &ver,
+ if (vm_map_lookup (&map, addr, prot, TRUE, &ver,
&vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS)
return (-1);
else if ((rprot & prot) != prot)
{
+ vm_map_unlock_read (map);
vm_object_unlock (vap->obj);
return (-1);
}
@@ -227,18 +228,13 @@ kern_return_t gsync_wait (task_t task, vm_offset_t addr,
else if (addr % sizeof (int) != 0)
return (KERN_INVALID_ADDRESS);
- vm_map_lock_read (task->map);
-
struct gsync_waiter w;
struct vm_args va;
boolean_t remote = task != current_task ();
int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
if (bucket < 0)
- {
- vm_map_unlock_read (task->map);
- return (KERN_INVALID_ADDRESS);
- }
+ return (KERN_INVALID_ADDRESS);
else if (remote)
/* The VM object is returned locked. However, we are about to acquire
* a sleeping lock for a bucket, so we must not hold any simple
@@ -354,17 +350,12 @@ kern_return_t gsync_wake (task_t task,
else if (addr % sizeof (int) != 0)
return (KERN_INVALID_ADDRESS);
- vm_map_lock_read (task->map);
-
union gsync_key key;
struct vm_args va;
int bucket = gsync_prepare_key (task, addr, flags, &key, &va);
if (bucket < 0)
- {
- vm_map_unlock_read (task->map);
- return (KERN_INVALID_ADDRESS);
- }
+ return (KERN_INVALID_ADDRESS);
else if (current_task () != task && (flags & GSYNC_MUTATE) != 0)
/* See above on why we do this. */
vm_object_reference_locked (va.obj);
@@ -437,6 +428,7 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src,
int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va);
if (src_bkt < 0)
return (KERN_INVALID_ADDRESS);
+ vm_map_unlock_read (task->map);
/* Unlock the VM object before the second lookup. */
vm_object_unlock (va.obj);
@@ -444,6 +436,7 @@ kern_return_t gsync_requeue (task_t task, vm_offset_t src,
int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va);
if (dst_bkt < 0)
return (KERN_INVALID_ADDRESS);
+ vm_map_unlock_read (task->map);
/* We never create any temporary mappings in 'requeue', so we
* can unlock the VM object right now. */