summaryrefslogtreecommitdiff
path: root/kern/thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/thread.c')
-rw-r--r--kern/thread.c181
1 files changed, 102 insertions, 79 deletions
diff --git a/kern/thread.c b/kern/thread.c
index 87be9231..1f47553f 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -57,9 +57,11 @@
#include <kern/slab.h>
#include <kern/mach_clock.h>
#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_port.h>
#include <ipc/mach_msg.h>
+#include <ipc/mach_port.h>
#include <machine/machspl.h> /* for splsched */
#include <machine/pcb.h>
#include <machine/thread.h> /* for MACHINE_STACK */
@@ -72,15 +74,10 @@ struct kmem_cache thread_cache;
queue_head_t reaper_queue;
decl_simple_lock_data(, reaper_lock)
-extern void pcb_module_init(void);
-
/* private */
struct thread thread_template;
#if MACH_DEBUG
-void stack_init(vm_offset_t stack); /* forward */
-void stack_finalize(vm_offset_t stack); /* forward */
-
#define STACK_MARKER 0xdeadbeefU
boolean_t stack_check_usage = FALSE;
decl_simple_lock_data(, stack_usage_lock)
@@ -127,10 +124,6 @@ vm_offset_t stack_free_list; /* splsched only */
unsigned int stack_free_count = 0; /* splsched only */
unsigned int stack_free_limit = 1; /* patchable */
-unsigned int stack_alloc_hits = 0; /* debugging */
-unsigned int stack_alloc_misses = 0; /* debugging */
-unsigned int stack_alloc_max = 0; /* debugging */
-
/*
* The next field is at the base of the stack,
* so the low end is left unsullied.
@@ -149,7 +142,7 @@ boolean_t stack_alloc_try(
thread_t thread,
void (*resume)(thread_t))
{
- register vm_offset_t stack;
+ vm_offset_t stack;
stack_lock();
stack = stack_free_list;
@@ -163,10 +156,10 @@ boolean_t stack_alloc_try(
if (stack != 0) {
stack_attach(thread, stack, resume);
- stack_alloc_hits++;
+ counter(c_stack_alloc_hits++);
return TRUE;
} else {
- stack_alloc_misses++;
+ counter(c_stack_alloc_misses++);
return FALSE;
}
}
@@ -178,7 +171,7 @@ boolean_t stack_alloc_try(
* May block.
*/
-void stack_alloc(
+kern_return_t stack_alloc(
thread_t thread,
void (*resume)(thread_t))
{
@@ -202,15 +195,15 @@ void stack_alloc(
(void) splx(s);
if (stack == 0) {
+ kern_return_t kr;
/*
* Kernel stacks should be naturally aligned,
* so that it is easy to find the starting/ending
* addresses of a stack given an address in the middle.
*/
-
- if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE)
- != KERN_SUCCESS)
- panic("stack_alloc");
+ kr = kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE);
+ if (kr != KERN_SUCCESS)
+ return kr;
#if MACH_DEBUG
stack_init(stack);
@@ -218,6 +211,7 @@ void stack_alloc(
}
stack_attach(thread, stack, resume);
+ return KERN_SUCCESS;
}
/*
@@ -230,7 +224,7 @@ void stack_alloc(
void stack_free(
thread_t thread)
{
- register vm_offset_t stack;
+ vm_offset_t stack;
stack = stack_detach(thread);
@@ -238,8 +232,11 @@ void stack_free(
stack_lock();
stack_next(stack) = stack_free_list;
stack_free_list = stack;
- if (++stack_free_count > stack_alloc_max)
- stack_alloc_max = stack_free_count;
+ stack_free_count += 1;
+#if MACH_COUNTERS
+ if (stack_free_count > c_stack_alloc_max)
+ c_stack_alloc_max = stack_free_count;
+#endif /* MACH_COUNTERS */
stack_unlock();
}
}
@@ -253,7 +250,7 @@ void stack_free(
void stack_collect(void)
{
- register vm_offset_t stack;
+ vm_offset_t stack;
spl_t s;
s = splsched();
@@ -285,7 +282,7 @@ void stack_collect(void)
*/
void stack_privilege(
- register thread_t thread)
+ thread_t thread)
{
/*
* This implementation only works for the current thread.
@@ -398,11 +395,11 @@ void thread_init(void)
}
kern_return_t thread_create(
- register task_t parent_task,
+ task_t parent_task,
thread_t *child_thread) /* OUT */
{
- register thread_t new_thread;
- register processor_set_t pset;
+ thread_t new_thread;
+ processor_set_t pset;
if (parent_task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
@@ -533,7 +530,6 @@ kern_return_t thread_create(
#endif /* HW_FOOTPRINT */
#if MACH_PCSAMPLE
- new_thread->pc_sample.buffer = 0;
new_thread->pc_sample.seqno = 0;
new_thread->pc_sample.sampletypes = 0;
#endif /* MACH_PCSAMPLE */
@@ -576,11 +572,11 @@ kern_return_t thread_create(
unsigned int thread_deallocate_stack = 0;
void thread_deallocate(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
- register task_t task;
- register processor_set_t pset;
+ task_t task;
+ processor_set_t pset;
time_value_t user_time, system_time;
@@ -711,7 +707,7 @@ void thread_deallocate(
}
void thread_reference(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -745,10 +741,10 @@ void thread_reference(
* since it needs a kernel stack to execute.)
*/
kern_return_t thread_terminate(
- register thread_t thread)
+ thread_t thread)
{
- register thread_t cur_thread = current_thread();
- register task_t cur_task;
+ thread_t cur_thread = current_thread();
+ task_t cur_task;
spl_t s;
if (thread == THREAD_NULL)
@@ -851,6 +847,28 @@ kern_return_t thread_terminate(
return KERN_SUCCESS;
}
+kern_return_t thread_terminate_release(
+ thread_t thread,
+ task_t task,
+ mach_port_t thread_name,
+ mach_port_t reply_port,
+ vm_offset_t address,
+ vm_size_t size)
+{
+ if (task == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ mach_port_deallocate(task->itk_space, thread_name);
+
+ if (reply_port != MACH_PORT_NULL)
+ mach_port_destroy(task->itk_space, reply_port);
+
+ if ((address != 0) || (size != 0))
+ vm_deallocate(task->map, address, size);
+
+ return thread_terminate(thread);
+}
+
/*
* thread_force_terminate:
*
@@ -860,9 +878,9 @@ kern_return_t thread_terminate(
*/
void
thread_force_terminate(
- register thread_t thread)
+ thread_t thread)
{
- boolean_t deallocate_here = FALSE;
+ boolean_t deallocate_here;
spl_t s;
ipc_thread_disable(thread);
@@ -902,11 +920,11 @@ thread_force_terminate(
*
*/
kern_return_t thread_halt(
- register thread_t thread,
+ thread_t thread,
boolean_t must_halt)
{
- register thread_t cur_thread = current_thread();
- register kern_return_t ret;
+ thread_t cur_thread = current_thread();
+ kern_return_t ret;
spl_t s;
if (thread == cur_thread)
@@ -950,7 +968,7 @@ kern_return_t thread_halt(
* operation can never cause a deadlock.)
*/
if (cur_thread->ast & AST_HALT) {
- thread_wakeup_with_result((event_t)&cur_thread->wake_active,
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(cur_thread),
THREAD_INTERRUPTED);
thread_unlock(thread);
thread_unlock(cur_thread);
@@ -988,7 +1006,7 @@ kern_return_t thread_halt(
*/
while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) {
thread->wake_active = TRUE;
- thread_sleep((event_t) &thread->wake_active,
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
simple_lock_addr(thread->lock), TRUE);
if (thread->state & TH_HALTED) {
@@ -1027,7 +1045,7 @@ kern_return_t thread_halt(
s = splsched();
thread_lock(thread);
thread_ast_clear(thread, AST_HALT);
- thread_wakeup_with_result((event_t)&thread->wake_active,
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(thread),
THREAD_INTERRUPTED);
thread_unlock(thread);
(void) splx(s);
@@ -1105,7 +1123,7 @@ kern_return_t thread_halt(
}
}
-void walking_zombie(void)
+void __attribute__((noreturn)) walking_zombie(void)
{
panic("the zombie walks!");
}
@@ -1116,7 +1134,7 @@ void walking_zombie(void)
*/
void thread_halt_self(void)
{
- register thread_t thread = current_thread();
+ thread_t thread = current_thread();
spl_t s;
if (thread->ast & AST_TERMINATE) {
@@ -1131,7 +1149,7 @@ void thread_halt_self(void)
s = splsched();
simple_lock(&reaper_lock);
- enqueue_tail(&reaper_queue, (queue_entry_t) thread);
+ enqueue_tail(&reaper_queue, &(thread->links));
simple_unlock(&reaper_lock);
thread_lock(thread);
@@ -1170,7 +1188,7 @@ void thread_halt_self(void)
* suspends is maintained.
*/
void thread_hold(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -1192,11 +1210,11 @@ void thread_hold(
*/
kern_return_t
thread_dowait(
- register thread_t thread,
+ thread_t thread,
boolean_t must_halt)
{
- register boolean_t need_wakeup;
- register kern_return_t ret = KERN_SUCCESS;
+ boolean_t need_wakeup;
+ kern_return_t ret = KERN_SUCCESS;
spl_t s;
if (thread == current_thread())
@@ -1266,7 +1284,7 @@ thread_dowait(
* Check for failure if interrupted.
*/
thread->wake_active = TRUE;
- thread_sleep((event_t) &thread->wake_active,
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
simple_lock_addr(thread->lock), TRUE);
thread_lock(thread);
if ((current_thread()->wait_result != THREAD_AWAKENED) &&
@@ -1290,13 +1308,13 @@ thread_dowait(
(void) splx(s);
if (need_wakeup)
- thread_wakeup((event_t) &thread->wake_active);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
return ret;
}
void thread_release(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -1315,9 +1333,9 @@ void thread_release(
}
kern_return_t thread_suspend(
- register thread_t thread)
+ thread_t thread)
{
- register boolean_t hold;
+ boolean_t hold;
spl_t spl;
if (thread == THREAD_NULL)
@@ -1328,7 +1346,7 @@ kern_return_t thread_suspend(
thread_lock(thread);
/* Wait for thread to get interruptible */
while (thread->state & TH_UNINT) {
- assert_wait(&thread->state, TRUE);
+ assert_wait(TH_EV_STATE(thread), TRUE);
thread_unlock(thread);
thread_block(NULL);
thread_lock(thread);
@@ -1361,9 +1379,9 @@ kern_return_t thread_suspend(
kern_return_t thread_resume(
- register thread_t thread)
+ thread_t thread)
{
- register kern_return_t ret;
+ kern_return_t ret;
spl_t s;
if (thread == THREAD_NULL)
@@ -1399,7 +1417,7 @@ kern_return_t thread_resume(
* Return thread's machine-dependent state.
*/
kern_return_t thread_get_state(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_state_t old_state, /* pointer to OUT array */
natural_t *old_state_count) /*IN/OUT*/
@@ -1423,7 +1441,7 @@ kern_return_t thread_get_state(
* Change thread's machine-dependent state.
*/
kern_return_t thread_set_state(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_state_t new_state,
natural_t new_state_count)
@@ -1444,7 +1462,7 @@ kern_return_t thread_set_state(
}
kern_return_t thread_info(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_info_t thread_info_out, /* pointer to OUT array */
natural_t *thread_info_count) /*IN/OUT*/
@@ -1456,7 +1474,7 @@ kern_return_t thread_info(
return KERN_INVALID_ARGUMENT;
if (flavor == THREAD_BASIC_INFO) {
- register thread_basic_info_t basic_info;
+ thread_basic_info_t basic_info;
/* Allow *thread_info_count to be one smaller than the
usual amount, because creation_time is a new member
@@ -1542,7 +1560,7 @@ kern_return_t thread_info(
return KERN_SUCCESS;
}
else if (flavor == THREAD_SCHED_INFO) {
- register thread_sched_info_t sched_info;
+ thread_sched_info_t sched_info;
if (*thread_info_count < THREAD_SCHED_INFO_COUNT) {
return KERN_INVALID_ARGUMENT;
@@ -1584,7 +1602,7 @@ kern_return_t thread_info(
}
kern_return_t thread_abort(
- register thread_t thread)
+ thread_t thread)
{
if (thread == THREAD_NULL || thread == current_thread()) {
return KERN_INVALID_ARGUMENT;
@@ -1649,9 +1667,13 @@ thread_t kernel_thread(
continuation_t start,
void * arg)
{
+ kern_return_t kr;
thread_t thread;
- (void) thread_create(task, &thread);
+ kr = thread_create(task, &thread);
+ if (kr != KERN_SUCCESS)
+ return THREAD_NULL;
+
/* release "extra" ref that thread_create gave us */
thread_deallocate(thread);
thread_start(thread, start);
@@ -1675,10 +1697,10 @@ thread_t kernel_thread(
* This kernel thread runs forever looking for threads to destroy
* (when they request that they be destroyed, of course).
*/
-void reaper_thread_continue(void)
+void __attribute__((noreturn)) reaper_thread_continue(void)
{
for (;;) {
- register thread_t thread;
+ thread_t thread;
spl_t s;
s = splsched();
@@ -1793,12 +1815,12 @@ thread_unfreeze(
void
thread_doassign(
- register thread_t thread,
- register processor_set_t new_pset,
+ thread_t thread,
+ processor_set_t new_pset,
boolean_t release_freeze)
{
- register processor_set_t pset;
- register boolean_t old_empty, new_empty;
+ processor_set_t pset;
+ boolean_t old_empty, new_empty;
boolean_t recompute_pri = FALSE;
spl_t s;
@@ -1956,6 +1978,9 @@ kern_return_t thread_get_assignment(
thread_t thread,
processor_set_t *pset)
{
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
*pset = thread->processor_set;
pset_reference(*pset);
return KERN_SUCCESS;
@@ -2101,8 +2126,8 @@ thread_policy(
int data)
{
#if MACH_FIXPRI
- register kern_return_t ret = KERN_SUCCESS;
- register int temp;
+ kern_return_t ret = KERN_SUCCESS;
+ int temp;
spl_t s;
#endif /* MACH_FIXPRI */
@@ -2218,7 +2243,6 @@ thread_wire(
void thread_collect_scan(void)
{
-#if 0
register thread_t thread, prev_thread;
processor_set_t pset, prev_pset;
@@ -2271,7 +2295,6 @@ void thread_collect_scan(void)
thread_deallocate(prev_thread);
if (prev_pset != PROCESSOR_SET_NULL)
pset_deallocate(prev_pset);
-#endif /* 0 */
}
boolean_t thread_collect_allowed = TRUE;
@@ -2305,7 +2328,7 @@ void consider_thread_collect(void)
#if MACH_DEBUG
vm_size_t stack_usage(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
int i;
@@ -2322,7 +2345,7 @@ vm_size_t stack_usage(
*/
void stack_init(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
if (stack_check_usage) {
int i;
@@ -2338,7 +2361,7 @@ void stack_init(
*/
void stack_finalize(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
if (stack_check_usage) {
vm_size_t used = stack_usage(stack);
@@ -2431,8 +2454,8 @@ kern_return_t processor_set_stack_usage(
vm_size_t maxusage;
vm_offset_t maxstack;
- register thread_t *threads;
- register thread_t tmp_thread;
+ thread_t *threads;
+ thread_t tmp_thread;
unsigned int actual; /* this many things */
unsigned int i;
@@ -2550,7 +2573,7 @@ kern_return_t processor_set_stack_usage(
void
thread_stats(void)
{
- register thread_t thread;
+ thread_t thread;
int total = 0, rpcreply = 0;
queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) {