kern_timeout.c (211616) | kern_timeout.c (212541) |
---|---|
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. --- 21 unchanged lines hidden (view full) --- 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. --- 21 unchanged lines hidden (view full) --- 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> |
38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 211616 2010-08-22 11:18:57Z rpaulo $"); | 38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 212541 2010-09-13 07:25:35Z mav $"); |
39 40#include "opt_kdtrace.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/callout.h> 46#include <sys/condvar.h> --- 59 unchanged lines hidden (view full) --- 106 struct callout_list cc_callfree; 107 struct callout *cc_next; 108 struct callout *cc_curr; 109 void *cc_cookie; 110 int cc_ticks; 111 int cc_softticks; 112 int cc_cancel; 113 int cc_waiting; | 39 40#include "opt_kdtrace.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/callout.h> 46#include <sys/condvar.h> --- 59 unchanged lines hidden (view full) --- 106 struct callout_list cc_callfree; 107 struct callout *cc_next; 108 struct callout *cc_curr; 109 void *cc_cookie; 110 int cc_ticks; 111 int cc_softticks; 112 int cc_cancel; 113 int cc_waiting; |
114 int cc_firsttick; |
|
114}; 115 116#ifdef SMP 117struct callout_cpu cc_cpu[MAXCPU]; 118#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 119#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 120#else 121struct callout_cpu cc_cpu; 122#define CC_CPU(cpu) &cc_cpu 123#define CC_SELF() &cc_cpu 124#endif 125#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 126#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 127 128static int timeout_cpu; | 115}; 116 117#ifdef SMP 118struct callout_cpu cc_cpu[MAXCPU]; 119#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 120#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 121#else 122struct callout_cpu cc_cpu; 123#define CC_CPU(cpu) &cc_cpu 124#define CC_SELF() &cc_cpu 125#endif 126#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 127#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 128 129static int timeout_cpu; |
130void (*callout_new_inserted)(int cpu, int ticks) = NULL; |
|
129 130MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 131 132/** 133 * Locked by cc_lock: 134 * cc_curr - If a callout is in progress, it is curr_callout. 135 * If curr_callout is non-NULL, threads waiting in 136 * callout_drain() will be woken up as soon as the --- 118 unchanged lines hidden (view full) --- 255 256 /* 257 * Process callouts at a very low cpu priority, so we don't keep the 258 * relatively high clock interrupt priority any longer than necessary. 259 */ 260 need_softclock = 0; 261 cc = CC_SELF(); 262 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); | 131 132MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 133 134/** 135 * Locked by cc_lock: 136 * cc_curr - If a callout is in progress, it is curr_callout. 137 * If curr_callout is non-NULL, threads waiting in 138 * callout_drain() will be woken up as soon as the --- 118 unchanged lines hidden (view full) --- 257 258 /* 259 * Process callouts at a very low cpu priority, so we don't keep the 260 * relatively high clock interrupt priority any longer than necessary. 261 */ 262 need_softclock = 0; 263 cc = CC_SELF(); 264 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); |
263 cc->cc_ticks++; | 265 cc->cc_firsttick = cc->cc_ticks = ticks; |
264 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 265 bucket = cc->cc_softticks & callwheelmask; 266 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 267 need_softclock = 1; 268 break; 269 } 270 } 271 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 272 /* 273 * swi_sched acquires the thread lock, so we don't want to call it 274 * with cc_lock held; incorrect locking order. 275 */ 276 if (need_softclock) 277 swi_sched(cc->cc_cookie, 0); 278} 279 | 266 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 267 bucket = cc->cc_softticks & callwheelmask; 268 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 269 need_softclock = 1; 270 break; 271 } 272 } 273 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 274 /* 275 * swi_sched acquires the thread lock, so we don't want to call it 276 * with cc_lock held; incorrect locking order. 277 */ 278 if (need_softclock) 279 swi_sched(cc->cc_cookie, 0); 280} 281 |
282int 283callout_tickstofirst(void) 284{ 285 struct callout_cpu *cc; 286 struct callout *c; 287 struct callout_tailq *sc; 288 int curticks; 289 int skip = 1; 290 291 cc = CC_SELF(); 292 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 293 curticks = cc->cc_ticks; 294 while( skip < ncallout && skip < hz/8 ) { 295 sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 296 /* search scanning ticks */ 297 TAILQ_FOREACH( c, sc, c_links.tqe ){ 298 if (c && (c->c_time <= curticks + ncallout) 299 && (c->c_time > 0)) 300 goto out; 301 } 302 skip++; 303 } 304out: 305 cc->cc_firsttick = curticks + skip; 306 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 307 return (skip); 308} 309 |
|
280static struct callout_cpu * 281callout_lock(struct callout *c) 282{ 283 struct callout_cpu *cc; 284 int cpu; 285 286 for (;;) { 287 cpu = c->c_cpu; --- 346 unchanged lines hidden (view full) --- 634 } 635 636 if (to_ticks <= 0) 637 to_ticks = 1; 638 639 c->c_arg = arg; 640 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 641 c->c_func = ftn; | 310static struct callout_cpu * 311callout_lock(struct callout *c) 312{ 313 struct callout_cpu *cc; 314 int cpu; 315 316 for (;;) { 317 cpu = c->c_cpu; --- 346 unchanged lines hidden (view full) --- 664 } 665 666 if (to_ticks <= 0) 667 to_ticks = 1; 668 669 c->c_arg = arg; 670 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 671 c->c_func = ftn; |
642 c->c_time = cc->cc_ticks + to_ticks; | 672 c->c_time = ticks + to_ticks; |
643 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 644 c, c_links.tqe); | 673 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 674 c, c_links.tqe); |
675 if ((c->c_time - cc->cc_firsttick) < 0) { 676 cc->cc_firsttick = c->c_time; 677 (*callout_new_inserted)(cpu, 678 to_ticks + (ticks - cc->cc_ticks)); 679 } |
|
645 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 646 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 647 CC_UNLOCK(cc); 648 649 return (cancelled); 650} 651 652/* --- 250 unchanged lines hidden --- | 680 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 681 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 682 CC_UNLOCK(cc); 683 684 return (cancelled); 685} 686 687/* --- 250 unchanged lines hidden --- |