/* * Mach Operating System * Copyright (c) 1994-1988 Carnegie Mellon University. * Copyright (c) 1993,1994 The University of Utah and * the Computer Systems Laboratory (CSL). * All rights reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF * THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* * File: mach_clock.c * Author: Avadis Tevanian, Jr. * Date: 1986 * * Clock primitives. */ #include #include #include #include #include #include #include #include "cpu_number.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* HZ */ #include #include #if MACH_PCSAMPLE #include #endif int hz = HZ; /* number of ticks per second */ int tick = (1000000 / HZ); /* number of usec per tick */ time_value_t time = { 0, 0 }; /* time since bootup (uncorrected) */ unsigned long elapsed_ticks = 0; /* ticks elapsed since bootup */ int timedelta = 0; int tickdelta = 0; #if HZ > 500 unsigned tickadj = 1; /* can adjust HZ usecs per second */ #else unsigned tickadj = 500 / HZ; /* can adjust 100 usecs per second */ #endif unsigned bigadj = 1000000; /* adjust 10*tickadj if adjustment > bigadj */ /* * This update protocol, with a check value, allows * do { * secs = mtime->seconds; * __sync_synchronize(); * usecs = mtime->microseconds; * __sync_synchronize(); * } while (secs != mtime->check_seconds); * to read the time correctly. */ volatile mapped_time_value_t *mtime = 0; #define update_mapped_time(time) \ MACRO_BEGIN \ if (mtime != 0) { \ mtime->check_seconds = (time)->seconds; \ __sync_synchronize(); \ mtime->microseconds = (time)->microseconds; \ __sync_synchronize(); \ mtime->seconds = (time)->seconds; \ } \ MACRO_END #define read_mapped_time(time) \ MACRO_BEGIN \ do { \ time->seconds = mtime->seconds; \ __sync_synchronize(); \ time->microseconds = mtime->microseconds; \ __sync_synchronize(); \ } while (time->seconds != mtime->check_seconds); \ MACRO_END decl_simple_lock_data(, timer_lock) /* lock for ... */ timer_elt_data_t timer_head; /* ordered list of timeouts */ /* (doubles as end-of-list) */ /* * Handle clock interrupts. * * The clock interrupt is assumed to be called at a (more or less) * constant rate. The rate must be identical on all CPUS (XXX - fix). * * Usec is the number of microseconds that have elapsed since the * last clock tick. It may be constant or computed, depending on * the accuracy of the hardware clock. * */ void clock_interrupt( int usec, /* microseconds per tick */ boolean_t usermode, /* executing user code */ boolean_t basepri, /* at base priority */ vm_offset_t pc) /* address of interrupted instruction */ { int my_cpu = cpu_number(); thread_t thread = current_thread(); counter(c_clock_ticks++); counter(c_threads_total += c_threads_current); counter(c_stacks_total += c_stacks_current); #if STAT_TIME /* * Increment the thread time, if using * statistical timing. */ if (usermode) { timer_bump(&thread->user_timer, usec); } else { /* Only bump timer if threads are initialized */ if (thread) timer_bump(&thread->system_timer, usec); } #endif /* STAT_TIME */ /* * Increment the CPU time statistics. */ { int state; if (usermode) state = CPU_STATE_USER; else if (!cpu_idle(my_cpu)) state = CPU_STATE_SYSTEM; else state = CPU_STATE_IDLE; machine_slot[my_cpu].cpu_ticks[state]++; /* * Adjust the thread's priority and check for * quantum expiration. */ thread_quantum_update(my_cpu, thread, 1, state); } #if MACH_PCSAMPLE /* * Take a sample of pc for the user if required. * This had better be MP safe. It might be interesting * to keep track of cpu in the sample. */ #ifndef MACH_KERNSAMPLE if (usermode) #endif { if (thread) take_pc_sample_macro(thread, SAMPLED_PC_PERIODIC, usermode, pc); } #endif /* MACH_PCSAMPLE */ /* * Time-of-day and time-out list are updated only * on the master CPU. */ if (my_cpu == master_cpu) { spl_t s; timer_elt_t telt; boolean_t needsoft = FALSE; #if TS_FORMAT == 1 /* * Increment the tick count for the timestamping routine. */ ts_tick_count++; #endif /* TS_FORMAT == 1 */ /* * Update the tick count since bootup, and handle * timeouts. */ s = splsched(); simple_lock(&timer_lock); elapsed_ticks++; telt = (timer_elt_t)queue_first(&timer_head.chain); if (telt->ticks <= elapsed_ticks) needsoft = TRUE; simple_unlock(&timer_lock); splx(s); /* * Increment the time-of-day clock. */ if (timedelta == 0) { time_value_add_usec(&time, usec); } else { int delta; if (timedelta < 0) { if (usec > tickdelta) { delta = usec - tickdelta; timedelta += tickdelta; } else { /* Not enough time has passed, defer overflowing * correction for later, keep only one microsecond * delta */ delta = 1; timedelta += usec - 1; } } else { delta = usec + tickdelta; timedelta -= tickdelta; } time_value_add_usec(&time, delta); } update_mapped_time(&time); /* * Schedule soft-interrupt for timeout if needed */ if (needsoft) { if (basepri) { (void) splsoftclock(); softclock(); } else { setsoftclock(); } } } } /* * There is a nasty race between softclock and reset_timeout. * For example, scheduling code looks at timer_set and calls * reset_timeout, thinking the timer is set. However, softclock * has already removed the timer but hasn't called thread_timeout * yet. * * Interim solution: We initialize timers after pulling * them out of the queue, so a race with reset_timeout won't * hurt. The timeout functions (eg, thread_timeout, * thread_depress_timeout) check timer_set/depress_priority * to see if the timer has been cancelled and if so do nothing. * * This still isn't correct. For example, softclock pulls a * timer off the queue, then thread_go resets timer_set (but * reset_timeout does nothing), then thread_set_timeout puts the * timer back on the queue and sets timer_set, then * thread_timeout finally runs and clears timer_set, then * thread_set_timeout tries to put the timer on the queue again * and corrupts it. */ void softclock(void) { /* * Handle timeouts. */ spl_t s; timer_elt_t telt; void (*fcn)( void * param ); void *param; while (TRUE) { s = splsched(); simple_lock(&timer_lock); telt = (timer_elt_t) queue_first(&timer_head.chain); if (telt->ticks > elapsed_ticks) { simple_unlock(&timer_lock); splx(s); break; } fcn = telt->fcn; param = telt->param; remqueue(&timer_head.chain, (queue_entry_t)telt); telt->set = TELT_UNSET; simple_unlock(&timer_lock); splx(s); assert(fcn != 0); (*fcn)(param); } } /* * Set timeout. * * Parameters: * telt timer element. Function and param are already set. * interval time-out interval, in hz. */ void set_timeout( timer_elt_t telt, /* already loaded */ unsigned int interval) { spl_t s; timer_elt_t next; s = splsched(); simple_lock(&timer_lock); interval += elapsed_ticks; for (next = (timer_elt_t)queue_first(&timer_head.chain); ; next = (timer_elt_t)queue_next((queue_entry_t)next)) { if (next->ticks > interval) break; } telt->ticks = interval; /* * Insert new timer element before 'next' * (after 'next'->prev) */ insque((queue_entry_t) telt, ((queue_entry_t)next)->prev); telt->set = TELT_SET; simple_unlock(&timer_lock); splx(s); } boolean_t reset_timeout(timer_elt_t telt) { spl_t s; s = splsched(); simple_lock(&timer_lock); if (telt->set) { remqueue(&timer_head.chain, (queue_entry_t)telt); telt->set = TELT_UNSET; simple_unlock(&timer_lock); splx(s); return TRUE; } else { simple_unlock(&timer_lock); splx(s); return FALSE; } } void init_timeout(void) { simple_lock_init(&timer_lock); queue_init(&timer_head.chain); timer_head.ticks = ~0; /* MAXUINT - sentinel */ elapsed_ticks = 0; } /* * We record timestamps using the boot-time clock. We keep track of * the boot-time clock by storing the difference to the real-time * clock. */ struct time_value clock_boottime_offset; /* * Update the offset of the boot-time clock from the real-time clock. * This function must be called when the real-time clock is updated. * This function must be called at SPLHIGH. */ void clock_boottime_update(struct time_value *new_time) { struct time_value delta = time; time_value_sub(&delta, new_time); time_value_add(&clock_boottime_offset, &delta); } /* * Record a timestamp in STAMP. Records values in the boot-time clock * frame. */ void record_time_stamp (time_value_t *stamp) { read_mapped_time(stamp); time_value_add(stamp, &clock_boottime_offset); } /* * Read a timestamp in STAMP into RESULT. Returns values in the * real-time clock frame. */ void read_time_stamp (time_value_t *stamp, time_value_t *result) { *result = *stamp; time_value_sub(result, &clock_boottime_offset); } /* * Read the time. */ kern_return_t host_get_time(host, current_time) const host_t host; time_value_t *current_time; /* OUT */ { if (host == HOST_NULL) return(KERN_INVALID_HOST); read_mapped_time(current_time); return (KERN_SUCCESS); } /* * Set the time. Only available to privileged users. */ kern_return_t host_set_time(host, new_time) const host_t host; time_value_t new_time; { spl_t s; if (host == HOST_NULL) return(KERN_INVALID_HOST); #if NCPUS > 1 /* * Switch to the master CPU to synchronize correctly. */ thread_bind(current_thread(), master_processor); if (current_processor() != master_processor) thread_block((void (*)) 0); #endif /* NCPUS > 1 */ s = splhigh(); clock_boottime_update(&new_time); time = new_time; update_mapped_time(&time); resettodr(); splx(s); #if NCPUS > 1 /* * Switch off the master CPU. */ thread_bind(current_thread(), PROCESSOR_NULL); #endif /* NCPUS > 1 */ return (KERN_SUCCESS); } /* * Adjust the time gradually. */ kern_return_t host_adjust_time(host, new_adjustment, old_adjustment) const host_t host; time_value_t new_adjustment; time_value_t *old_adjustment; /* OUT */ { time_value_t oadj; unsigned int ndelta; spl_t s; if (host == HOST_NULL) return (KERN_INVALID_HOST); ndelta = new_adjustment.seconds * 1000000 + new_adjustment.microseconds; #if NCPUS > 1 thread_bind(current_thread(), master_processor); if (current_processor() != master_processor) thread_block((void (*)) 0); #endif /* NCPUS > 1 */ s = splclock(); oadj.seconds = timedelta / 1000000; oadj.microseconds = timedelta % 1000000; if (timedelta == 0) { if (ndelta > bigadj) tickdelta = 10 * tickadj; else tickdelta = tickadj; } if (ndelta % tickdelta) ndelta = ndelta / tickdelta * tickdelta; timedelta = ndelta; splx(s); #if NCPUS > 1 thread_bind(current_thread(), PROCESSOR_NULL); #endif /* NCPUS > 1 */ *old_adjustment = oadj; return (KERN_SUCCESS); } void mapable_time_init(void) { if (kmem_alloc_wired(kernel_map, (vm_offset_t *) &mtime, PAGE_SIZE) != KERN_SUCCESS) panic("mapable_time_init"); memset((void *) mtime, 0, PAGE_SIZE); update_mapped_time(&time); } int timeopen(dev_t dev, int flag, io_req_t ior) { return(0); } void timeclose(dev_t dev, int flag) { return; } /* * Compatibility for device drivers. * New code should use set_timeout/reset_timeout and private timers. * These code can't use a cache to allocate timers, because * it can be called from interrupt handlers. */ #define NTIMERS 20 timer_elt_data_t timeout_timers[NTIMERS]; /* * Set timeout. * * fcn: function to call * param: parameter to pass to function * interval: timeout interval, in hz. */ void timeout( void (*fcn)(void *param), void * param, int interval) { spl_t s; timer_elt_t elt; s = splsched(); simple_lock(&timer_lock); for (elt = &timeout_timers[0]; elt < &timeout_timers[NTIMERS]; elt++) if (elt->set == TELT_UNSET) break; if (elt == &timeout_timers[NTIMERS]) panic("timeout"); elt->fcn = fcn; elt->param = param; elt->set = TELT_ALLOC; simple_unlock(&timer_lock); splx(s); set_timeout(elt, (unsigned int)interval); } /* * Returns a boolean indicating whether the timeout element was found * and removed. */ boolean_t untimeout(fcn, param) void (*fcn)( void * param ); const void * param; { spl_t s; timer_elt_t elt; s = splsched(); simple_lock(&timer_lock); queue_iterate(&timer_head.chain, elt, timer_elt_t, chain) { if ((fcn == elt->fcn) && (param == elt->param)) { /* * Found it. */ remqueue(&timer_head.chain, (queue_entry_t)elt); elt->set = TELT_UNSET; simple_unlock(&timer_lock); splx(s); return (TRUE); } } simple_unlock(&timer_lock); splx(s); return (FALSE); }