cyclic.c revision 209059
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 * 22 * Portions Copyright 2008 John Birrell <jb@freebsd.org> 23 * 24 * $FreeBSD: head/sys/cddl/dev/cyclic/cyclic.c 209059 2010-06-11 18:46:34Z jhb $ 25 * 26 * This is a simplified version of the cyclic timer subsystem from 27 * OpenSolaris. In the FreeBSD version, we don't use interrupt levels. 28 */ 29 30/* 31 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 32 * Use is subject to license terms. 33 */ 34 35/* 36 * The Cyclic Subsystem 37 * -------------------- 38 * 39 * Prehistory 40 * 41 * Historically, most computer architectures have specified interval-based 42 * timer parts (e.g. SPARCstation's counter/timer; Intel's i8254). While 43 * these parts deal in relative (i.e. not absolute) time values, they are 44 * typically used by the operating system to implement the abstraction of 45 * absolute time. As a result, these parts cannot typically be reprogrammed 46 * without introducing error in the system's notion of time. 47 * 48 * Starting in about 1994, chip architectures began specifying high resolution 49 * timestamp registers. As of this writing (1999), all major chip families 50 * (UltraSPARC, PentiumPro, MIPS, PowerPC, Alpha) have high resolution 51 * timestamp registers, and two (UltraSPARC and MIPS) have added the capacity 52 * to interrupt based on timestamp values. These timestamp-compare registers 53 * present a time-based interrupt source which can be reprogrammed arbitrarily 54 * often without introducing error. Given the low cost of implementing such a 55 * timestamp-compare register (and the tangible benefit of eliminating 56 * discrete timer parts), it is reasonable to expect that future chip 57 * architectures will adopt this feature. 58 * 59 * The cyclic subsystem has been designed to take advantage of chip 60 * architectures with the capacity to interrupt based on absolute, high 61 * resolution values of time. 62 * 63 * Subsystem Overview 64 * 65 * The cyclic subsystem is a low-level kernel subsystem designed to provide 66 * arbitrarily high resolution, per-CPU interval timers (to avoid colliding 67 * with existing terms, we dub such an interval timer a "cyclic"). 68 * Alternatively, a cyclic may be specified to be "omnipresent", denoting 69 * firing on all online CPUs. 70 * 71 * Cyclic Subsystem Interface Overview 72 * ----------------------------------- 73 * 74 * The cyclic subsystem has interfaces with the kernel at-large, with other 75 * kernel subsystems (e.g. the processor management subsystem, the checkpoint 76 * resume subsystem) and with the platform (the cyclic backend). Each 77 * of these interfaces is given a brief synopsis here, and is described 78 * in full above the interface's implementation. 79 * 80 * The following diagram displays the cyclic subsystem's interfaces to 81 * other kernel components. The arrows denote a "calls" relationship, with 82 * the large arrow indicating the cyclic subsystem's consumer interface. 83 * Each arrow is labeled with the section in which the corresponding 84 * interface is described. 85 * 86 * Kernel at-large consumers 87 * -----------++------------ 88 * || 89 * || 90 * _||_ 91 * \ / 92 * \/ 93 * +---------------------+ 94 * | | 95 * | Cyclic subsystem |<----------- Other kernel subsystems 96 * | | 97 * +---------------------+ 98 * ^ | 99 * | | 100 * | | 101 * | v 102 * +---------------------+ 103 * | | 104 * | Cyclic backend | 105 * | (platform specific) | 106 * | | 107 * +---------------------+ 108 * 109 * 110 * Kernel At-Large Interfaces 111 * 112 * cyclic_add() <-- Creates a cyclic 113 * cyclic_add_omni() <-- Creates an omnipresent cyclic 114 * cyclic_remove() <-- Removes a cyclic 115 * 116 * Backend Interfaces 117 * 118 * cyclic_init() <-- Initializes the cyclic subsystem 119 * cyclic_fire() <-- Interrupt entry point 120 * 121 * The backend-supplied interfaces (through the cyc_backend structure) are 122 * documented in detail in <sys/cyclic_impl.h> 123 * 124 * 125 * Cyclic Subsystem Implementation Overview 126 * ---------------------------------------- 127 * 128 * The cyclic subsystem is designed to minimize interference between cyclics 129 * on different CPUs. Thus, all of the cyclic subsystem's data structures 130 * hang off of a per-CPU structure, cyc_cpu. 131 * 132 * Each cyc_cpu has a power-of-two sized array of cyclic structures (the 133 * cyp_cyclics member of the cyc_cpu structure). If cyclic_add() is called 134 * and there does not exist a free slot in the cyp_cyclics array, the size of 135 * the array will be doubled. The array will never shrink. Cyclics are 136 * referred to by their index in the cyp_cyclics array, which is of type 137 * cyc_index_t. 138 * 139 * The cyclics are kept sorted by expiration time in the cyc_cpu's heap. The 140 * heap is keyed by cyclic expiration time, with parents expiring earlier 141 * than their children. 142 * 143 * Heap Management 144 * 145 * The heap is managed primarily by cyclic_fire(). Upon entry, cyclic_fire() 146 * compares the root cyclic's expiration time to the current time. If the 147 * expiration time is in the past, cyclic_expire() is called on the root 148 * cyclic. Upon return from cyclic_expire(), the cyclic's new expiration time 149 * is derived by adding its interval to its old expiration time, and a 150 * downheap operation is performed. After the downheap, cyclic_fire() 151 * examines the (potentially changed) root cyclic, repeating the 152 * cyclic_expire()/add interval/cyclic_downheap() sequence until the root 153 * cyclic has an expiration time in the future. This expiration time 154 * (guaranteed to be the earliest in the heap) is then communicated to the 155 * backend via cyb_reprogram. Optimal backends will next call cyclic_fire() 156 * shortly after the root cyclic's expiration time. 157 * 158 * To allow efficient, deterministic downheap operations, we implement the 159 * heap as an array (the cyp_heap member of the cyc_cpu structure), with each 160 * element containing an index into the CPU's cyp_cyclics array. 161 * 162 * The heap is laid out in the array according to the following: 163 * 164 * 1. The root of the heap is always in the 0th element of the heap array 165 * 2. The left and right children of the nth element are element 166 * (((n + 1) << 1) - 1) and element ((n + 1) << 1), respectively. 167 * 168 * This layout is standard (see, e.g., Cormen's "Algorithms"); the proof 169 * that these constraints correctly lay out a heap (or indeed, any binary 170 * tree) is trivial and left to the reader. 171 * 172 * To see the heap by example, assume our cyclics array has the following 173 * members (at time t): 174 * 175 * cy_handler cy_expire 176 * --------------------------------------------- 177 * [ 0] clock() t+10000000 178 * [ 1] deadman() t+1000000000 179 * [ 2] clock_highres_fire() t+100 180 * [ 3] clock_highres_fire() t+1000 181 * [ 4] clock_highres_fire() t+500 182 * [ 5] (free) -- 183 * [ 6] (free) -- 184 * [ 7] (free) -- 185 * 186 * The heap array could be: 187 * 188 * [0] [1] [2] [3] [4] [5] [6] [7] 189 * +-----+-----+-----+-----+-----+-----+-----+-----+ 190 * | | | | | | | | | 191 * | 2 | 3 | 4 | 0 | 1 | x | x | x | 192 * | | | | | | | | | 193 * +-----+-----+-----+-----+-----+-----+-----+-----+ 194 * 195 * Graphically, this array corresponds to the following (excuse the ASCII art): 196 * 197 * 2 198 * | 199 * +------------------+------------------+ 200 * 3 4 201 * | 202 * +---------+--------+ 203 * 0 1 204 * 205 * Note that the heap is laid out by layer: all nodes at a given depth are 206 * stored in consecutive elements of the array. Moreover, layers of 207 * consecutive depths are in adjacent element ranges. This property 208 * guarantees high locality of reference during downheap operations. 209 * Specifically, we are guaranteed that we can downheap to a depth of 210 * 211 * lg (cache_line_size / sizeof (cyc_index_t)) 212 * 213 * nodes with at most one cache miss. On UltraSPARC (64 byte e-cache line 214 * size), this corresponds to a depth of four nodes. Thus, if there are 215 * fewer than sixteen cyclics in the heap, downheaps on UltraSPARC miss at 216 * most once in the e-cache. 217 * 218 * Downheaps are required to compare siblings as they proceed down the 219 * heap. For downheaps proceeding beyond the one-cache-miss depth, every 220 * access to a left child could potentially miss in the cache. However, 221 * if we assume 222 * 223 * (cache_line_size / sizeof (cyc_index_t)) > 2, 224 * 225 * then all siblings are guaranteed to be on the same cache line. Thus, the 226 * miss on the left child will guarantee a hit on the right child; downheaps 227 * will incur at most one cache miss per layer beyond the one-cache-miss 228 * depth. The total number of cache misses for heap management during a 229 * downheap operation is thus bounded by 230 * 231 * lg (n) - lg (cache_line_size / sizeof (cyc_index_t)) 232 * 233 * Traditional pointer-based heaps are implemented without regard to 234 * locality. Downheaps can thus incur two cache misses per layer (one for 235 * each child), but at most one cache miss at the root. This yields a bound 236 * of 237 * 238 * 2 * lg (n) - 1 239 * 240 * on the total cache misses. 241 * 242 * This difference may seem theoretically trivial (the difference is, after 243 * all, constant), but can become substantial in practice -- especially for 244 * caches with very large cache lines and high miss penalties (e.g. TLBs). 245 * 246 * Heaps must always be full, balanced trees. Heap management must therefore 247 * track the next point-of-insertion into the heap. In pointer-based heaps, 248 * recomputing this point takes O(lg (n)). Given the layout of the 249 * array-based implementation, however, the next point-of-insertion is 250 * always: 251 * 252 * heap[number_of_elements] 253 * 254 * We exploit this property by implementing the free-list in the usused 255 * heap elements. Heap insertion, therefore, consists only of filling in 256 * the cyclic at cyp_cyclics[cyp_heap[number_of_elements]], incrementing 257 * the number of elements, and performing an upheap. Heap deletion consists 258 * of decrementing the number of elements, swapping the to-be-deleted element 259 * with the element at cyp_heap[number_of_elements], and downheaping. 260 * 261 * Filling in more details in our earlier example: 262 * 263 * +--- free list head 264 * | 265 * V 266 * 267 * [0] [1] [2] [3] [4] [5] [6] [7] 268 * +-----+-----+-----+-----+-----+-----+-----+-----+ 269 * | | | | | | | | | 270 * | 2 | 3 | 4 | 0 | 1 | 5 | 6 | 7 | 271 * | | | | | | | | | 272 * +-----+-----+-----+-----+-----+-----+-----+-----+ 273 * 274 * To insert into this heap, we would just need to fill in the cyclic at 275 * cyp_cyclics[5], bump the number of elements (from 5 to 6) and perform 276 * an upheap. 277 * 278 * If we wanted to remove, say, cyp_cyclics[3], we would first scan for it 279 * in the cyp_heap, and discover it at cyp_heap[1]. We would then decrement 280 * the number of elements (from 5 to 4), swap cyp_heap[1] with cyp_heap[4], 281 * and perform a downheap from cyp_heap[1]. The linear scan is required 282 * because the cyclic does not keep a backpointer into the heap. This makes 283 * heap manipulation (e.g. downheaps) faster at the expense of removal 284 * operations. 285 * 286 * Expiry processing 287 * 288 * As alluded to above, cyclic_expire() is called by cyclic_fire() to expire 289 * a cyclic. Cyclic subsystem consumers are guaranteed that for an arbitrary 290 * time t in the future, their cyclic handler will have been called 291 * (t - cyt_when) / cyt_interval times. cyclic_expire() simply needs to call 292 * the handler. 293 * 294 * Resizing 295 * 296 * All of the discussion thus far has assumed a static number of cyclics. 297 * Obviously, static limitations are not practical; we need the capacity 298 * to resize our data structures dynamically. 299 * 300 * We resize our data structures lazily, and only on a per-CPU basis. 301 * The size of the data structures always doubles and never shrinks. We 302 * serialize adds (and thus resizes) on cpu_lock; we never need to deal 303 * with concurrent resizes. Resizes should be rare; they may induce jitter 304 * on the CPU being resized, but should not affect cyclic operation on other 305 * CPUs. 306 * 307 * Three key cyc_cpu data structures need to be resized: the cyclics array, 308 * nad the heap array. Resizing is relatively straightforward: 309 * 310 * 1. The new, larger arrays are allocated in cyclic_expand() (called 311 * from cyclic_add()). 312 * 2. The contents of the old arrays are copied into the new arrays. 313 * 3. The old cyclics array is bzero()'d 314 * 4. The pointers are updated. 315 * 316 * Removals 317 * 318 * Cyclic removals should be rare. To simplify the implementation (and to 319 * allow optimization for the cyclic_fire()/cyclic_expire() 320 * path), we force removals and adds to serialize on cpu_lock. 321 * 322 */ 323#include <sys/cdefs.h> 324#include <sys/param.h> 325#include <sys/conf.h> 326#include <sys/kernel.h> 327#include <sys/lock.h> 328#include <sys/sx.h> 329#include <sys/cyclic_impl.h> 330#include <sys/module.h> 331#include <sys/systm.h> 332#include <sys/atomic.h> 333#include <sys/kmem.h> 334#include <sys/cmn_err.h> 335#include <sys/dtrace_bsd.h> 336#include <machine/cpu.h> 337 338static kmem_cache_t *cyclic_id_cache; 339static cyc_id_t *cyclic_id_head; 340static cyc_backend_t cyclic_backend; 341 342MALLOC_DEFINE(M_CYCLIC, "cyclic", "Cyclic timer subsystem"); 343 344/* 345 * Returns 1 if the upheap propagated to the root, 0 if it did not. This 346 * allows the caller to reprogram the backend only when the root has been 347 * modified. 348 */ 349static int 350cyclic_upheap(cyc_cpu_t *cpu, cyc_index_t ndx) 351{ 352 cyclic_t *cyclics; 353 cyc_index_t *heap; 354 cyc_index_t heap_parent, heap_current = ndx; 355 cyc_index_t parent, current; 356 357 if (heap_current == 0) 358 return (1); 359 360 heap = cpu->cyp_heap; 361 cyclics = cpu->cyp_cyclics; 362 heap_parent = CYC_HEAP_PARENT(heap_current); 363 364 for (;;) { 365 current = heap[heap_current]; 366 parent = heap[heap_parent]; 367 368 /* 369 * We have an expiration time later than our parent; we're 370 * done. 371 */ 372 if (cyclics[current].cy_expire >= cyclics[parent].cy_expire) 373 return (0); 374 375 /* 376 * We need to swap with our parent, and continue up the heap. 377 */ 378 heap[heap_parent] = current; 379 heap[heap_current] = parent; 380 381 /* 382 * If we just reached the root, we're done. 383 */ 384 if (heap_parent == 0) 385 return (1); 386 387 heap_current = heap_parent; 388 heap_parent = CYC_HEAP_PARENT(heap_current); 389 } 390} 391 392static void 393cyclic_downheap(cyc_cpu_t *cpu, cyc_index_t ndx) 394{ 395 cyclic_t *cyclics = cpu->cyp_cyclics; 396 cyc_index_t *heap = cpu->cyp_heap; 397 398 cyc_index_t heap_left, heap_right, heap_me = ndx; 399 cyc_index_t left, right, me; 400 cyc_index_t nelems = cpu->cyp_nelems; 401 402 for (;;) { 403 /* 404 * If we don't have a left child (i.e., we're a leaf), we're 405 * done. 406 */ 407 if ((heap_left = CYC_HEAP_LEFT(heap_me)) >= nelems) 408 return; 409 410 left = heap[heap_left]; 411 me = heap[heap_me]; 412 413 heap_right = CYC_HEAP_RIGHT(heap_me); 414 415 /* 416 * Even if we don't have a right child, we still need to compare 417 * our expiration time against that of our left child. 418 */ 419 if (heap_right >= nelems) 420 goto comp_left; 421 422 right = heap[heap_right]; 423 424 /* 425 * We have both a left and a right child. We need to compare 426 * the expiration times of the children to determine which 427 * expires earlier. 428 */ 429 if (cyclics[right].cy_expire < cyclics[left].cy_expire) { 430 /* 431 * Our right child is the earlier of our children. 432 * We'll now compare our expiration time to its; if 433 * ours is the earlier, we're done. 434 */ 435 if (cyclics[me].cy_expire <= cyclics[right].cy_expire) 436 return; 437 438 /* 439 * Our right child expires earlier than we do; swap 440 * with our right child, and descend right. 441 */ 442 heap[heap_right] = me; 443 heap[heap_me] = right; 444 heap_me = heap_right; 445 continue; 446 } 447 448comp_left: 449 /* 450 * Our left child is the earlier of our children (or we have 451 * no right child). We'll now compare our expiration time 452 * to its; if ours is the earlier, we're done. 453 */ 454 if (cyclics[me].cy_expire <= cyclics[left].cy_expire) 455 return; 456 457 /* 458 * Our left child expires earlier than we do; swap with our 459 * left child, and descend left. 460 */ 461 heap[heap_left] = me; 462 heap[heap_me] = left; 463 heap_me = heap_left; 464 } 465} 466 467static void 468cyclic_expire(cyc_cpu_t *cpu, cyc_index_t ndx, cyclic_t *cyclic) 469{ 470 cyc_func_t handler = cyclic->cy_handler; 471 void *arg = cyclic->cy_arg; 472 473 (*handler)(arg); 474} 475 476static void 477cyclic_enable_xcall(void *v) 478{ 479 cyc_xcallarg_t *argp = v; 480 cyc_cpu_t *cpu = argp->cyx_cpu; 481 cyc_backend_t *be = cpu->cyp_backend; 482 483 be->cyb_enable(be->cyb_arg); 484} 485 486static void 487cyclic_enable(cyc_cpu_t *cpu) 488{ 489 cyc_backend_t *be = cpu->cyp_backend; 490 cyc_xcallarg_t arg; 491 492 arg.cyx_cpu = cpu; 493 494 /* Cross call to the target CPU */ 495 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_enable_xcall, &arg); 496} 497 498static void 499cyclic_disable_xcall(void *v) 500{ 501 cyc_xcallarg_t *argp = v; 502 cyc_cpu_t *cpu = argp->cyx_cpu; 503 cyc_backend_t *be = cpu->cyp_backend; 504 505 be->cyb_disable(be->cyb_arg); 506} 507 508static void 509cyclic_disable(cyc_cpu_t *cpu) 510{ 511 cyc_backend_t *be = cpu->cyp_backend; 512 cyc_xcallarg_t arg; 513 514 arg.cyx_cpu = cpu; 515 516 /* Cross call to the target CPU */ 517 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_disable_xcall, &arg); 518} 519 520static void 521cyclic_reprogram_xcall(void *v) 522{ 523 cyc_xcallarg_t *argp = v; 524 cyc_cpu_t *cpu = argp->cyx_cpu; 525 cyc_backend_t *be = cpu->cyp_backend; 526 527 be->cyb_reprogram(be->cyb_arg, argp->cyx_exp); 528} 529 530static void 531cyclic_reprogram(cyc_cpu_t *cpu, hrtime_t exp) 532{ 533 cyc_backend_t *be = cpu->cyp_backend; 534 cyc_xcallarg_t arg; 535 536 arg.cyx_cpu = cpu; 537 arg.cyx_exp = exp; 538 539 /* Cross call to the target CPU */ 540 be->cyb_xcall(be->cyb_arg, cpu->cyp_cpu, cyclic_reprogram_xcall, &arg); 541} 542 543/* 544 * cyclic_fire(cpu_t *) 545 * 546 * Overview 547 * 548 * cyclic_fire() is the cyclic subsystem's interrupt handler. 549 * Called by the cyclic backend. 550 * 551 * Arguments and notes 552 * 553 * The only argument is the CPU on which the interrupt is executing; 554 * backends must call into cyclic_fire() on the specified CPU. 555 * 556 * cyclic_fire() may be called spuriously without ill effect. Optimal 557 * backends will call into cyclic_fire() at or shortly after the time 558 * requested via cyb_reprogram(). However, calling cyclic_fire() 559 * arbitrarily late will only manifest latency bubbles; the correctness 560 * of the cyclic subsystem does not rely on the timeliness of the backend. 561 * 562 * cyclic_fire() is wait-free; it will not block or spin. 563 * 564 * Return values 565 * 566 * None. 567 * 568 */ 569static void 570cyclic_fire(cpu_t *c) 571{ 572 cyc_cpu_t *cpu = c->cpu_cyclic; 573 574 mtx_lock_spin(&cpu->cyp_mtx); 575 576 cyc_index_t *heap = cpu->cyp_heap; 577 cyclic_t *cyclic, *cyclics = cpu->cyp_cyclics; 578 hrtime_t now = gethrtime(); 579 hrtime_t exp; 580 581 if (cpu->cyp_nelems == 0) { 582 /* This is a spurious fire. */ 583 mtx_unlock_spin(&cpu->cyp_mtx); 584 return; 585 } 586 587 for (;;) { 588 cyc_index_t ndx = heap[0]; 589 590 cyclic = &cyclics[ndx]; 591 592 ASSERT(!(cyclic->cy_flags & CYF_FREE)); 593 594 if ((exp = cyclic->cy_expire) > now) 595 break; 596 597 cyclic_expire(cpu, ndx, cyclic); 598 599 /* 600 * If this cyclic will be set to next expire in the distant 601 * past, we have one of two situations: 602 * 603 * a) This is the first firing of a cyclic which had 604 * cy_expire set to 0. 605 * 606 * b) We are tragically late for a cyclic -- most likely 607 * due to being in the debugger. 608 * 609 * In either case, we set the new expiration time to be the 610 * the next interval boundary. This assures that the 611 * expiration time modulo the interval is invariant. 612 * 613 * We arbitrarily define "distant" to be one second (one second 614 * is chosen because it's shorter than any foray to the 615 * debugger while still being longer than any legitimate 616 * stretch). 617 */ 618 exp += cyclic->cy_interval; 619 620 if (now - exp > NANOSEC) { 621 hrtime_t interval = cyclic->cy_interval; 622 623 exp += ((now - exp) / interval + 1) * interval; 624 } 625 626 cyclic->cy_expire = exp; 627 cyclic_downheap(cpu, 0); 628 } 629 630 /* 631 * Now we have a cyclic in the root slot which isn't in the past; 632 * reprogram the interrupt source. 633 */ 634 cyclic_reprogram(cpu, exp); 635 636 mtx_unlock_spin(&cpu->cyp_mtx); 637} 638 639/* 640 * cyclic_expand() will cross call onto the CPU to perform the actual 641 * expand operation. 642 */ 643static void 644cyclic_expand(cyc_cpu_t *cpu) 645{ 646 cyc_index_t new_size, old_size, i; 647 cyc_index_t *new_heap, *old_heap; 648 cyclic_t *new_cyclics, *old_cyclics; 649 650 ASSERT(MUTEX_HELD(&cpu_lock)); 651 652 if ((new_size = ((old_size = cpu->cyp_size) << 1)) == 0) 653 new_size = CY_DEFAULT_PERCPU; 654 655 /* 656 * Check that the new_size is a power of 2. 657 */ 658 ASSERT(((new_size - 1) & new_size) == 0); 659 660 /* Unlock the mutex while allocating memory so we can wait... */ 661 mtx_unlock_spin(&cpu->cyp_mtx); 662 663 new_heap = malloc(sizeof(cyc_index_t) * new_size, M_CYCLIC, M_WAITOK); 664 new_cyclics = malloc(sizeof(cyclic_t) * new_size, M_CYCLIC, M_ZERO | M_WAITOK); 665 666 /* Grab the lock again now we've got the memory... */ 667 mtx_lock_spin(&cpu->cyp_mtx); 668 669 /* Check if another thread beat us while the mutex was unlocked. */ 670 if (old_size != cpu->cyp_size) { 671 /* Oh well, he won. */ 672 mtx_unlock_spin(&cpu->cyp_mtx); 673 674 free(new_heap, M_CYCLIC); 675 free(new_cyclics, M_CYCLIC); 676 677 mtx_lock_spin(&cpu->cyp_mtx); 678 return; 679 } 680 681 old_heap = cpu->cyp_heap; 682 old_cyclics = cpu->cyp_cyclics; 683 684 bcopy(cpu->cyp_heap, new_heap, sizeof (cyc_index_t) * old_size); 685 bcopy(old_cyclics, new_cyclics, sizeof (cyclic_t) * old_size); 686 687 /* 688 * Set up the free list, and set all of the new cyclics to be CYF_FREE. 689 */ 690 for (i = old_size; i < new_size; i++) { 691 new_heap[i] = i; 692 new_cyclics[i].cy_flags = CYF_FREE; 693 } 694 695 /* 696 * We can go ahead and plow the value of cyp_heap and cyp_cyclics; 697 * cyclic_expand() has kept a copy. 698 */ 699 cpu->cyp_heap = new_heap; 700 cpu->cyp_cyclics = new_cyclics; 701 cpu->cyp_size = new_size; 702 703 if (old_cyclics != NULL) { 704 ASSERT(old_heap != NULL); 705 ASSERT(old_size != 0); 706 mtx_unlock_spin(&cpu->cyp_mtx); 707 708 free(old_cyclics, M_CYCLIC); 709 free(old_heap, M_CYCLIC); 710 711 mtx_lock_spin(&cpu->cyp_mtx); 712 } 713} 714 715static cyc_index_t 716cyclic_add_here(cyc_cpu_t *cpu, cyc_handler_t *hdlr, 717 cyc_time_t *when, uint16_t flags) 718{ 719 cyc_index_t ndx, nelems; 720 cyclic_t *cyclic; 721 722 ASSERT(MUTEX_HELD(&cpu_lock)); 723 724 mtx_lock_spin(&cpu->cyp_mtx); 725 726 ASSERT(!(cpu->cyp_cpu->cpu_flags & CPU_OFFLINE)); 727 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 728 729 while (cpu->cyp_nelems == cpu->cyp_size) 730 cyclic_expand(cpu); 731 732 ASSERT(cpu->cyp_nelems < cpu->cyp_size); 733 734 nelems = cpu->cyp_nelems++; 735 736 if (nelems == 0) 737 /* 738 * If this is the first element, we need to enable the 739 * backend on this CPU. 740 */ 741 cyclic_enable(cpu); 742 743 ndx = cpu->cyp_heap[nelems]; 744 cyclic = &cpu->cyp_cyclics[ndx]; 745 746 ASSERT(cyclic->cy_flags == CYF_FREE); 747 cyclic->cy_interval = when->cyt_interval; 748 749 if (when->cyt_when == 0) 750 cyclic->cy_expire = gethrtime() + cyclic->cy_interval; 751 else 752 cyclic->cy_expire = when->cyt_when; 753 754 cyclic->cy_handler = hdlr->cyh_func; 755 cyclic->cy_arg = hdlr->cyh_arg; 756 cyclic->cy_flags = flags; 757 758 if (cyclic_upheap(cpu, nelems)) { 759 hrtime_t exp = cyclic->cy_expire; 760 761 /* 762 * If our upheap propagated to the root, we need to 763 * reprogram the interrupt source. 764 */ 765 cyclic_reprogram(cpu, exp); 766 } 767 768 mtx_unlock_spin(&cpu->cyp_mtx); 769 770 return (ndx); 771} 772 773 774static int 775cyclic_remove_here(cyc_cpu_t *cpu, cyc_index_t ndx, cyc_time_t *when, int wait) 776{ 777 cyc_index_t nelems, i; 778 cyclic_t *cyclic; 779 cyc_index_t *heap, last; 780 781 ASSERT(MUTEX_HELD(&cpu_lock)); 782 ASSERT(wait == CY_WAIT || wait == CY_NOWAIT); 783 784 mtx_lock_spin(&cpu->cyp_mtx); 785 786 heap = cpu->cyp_heap; 787 788 nelems = cpu->cyp_nelems; 789 790 cyclic = &cpu->cyp_cyclics[ndx]; 791 792 /* 793 * Grab the current expiration time. If this cyclic is being 794 * removed as part of a juggling operation, the expiration time 795 * will be used when the cyclic is added to the new CPU. 796 */ 797 if (when != NULL) { 798 when->cyt_when = cyclic->cy_expire; 799 when->cyt_interval = cyclic->cy_interval; 800 } 801 802 cyclic->cy_flags = CYF_FREE; 803 804 for (i = 0; i < nelems; i++) { 805 if (heap[i] == ndx) 806 break; 807 } 808 809 if (i == nelems) 810 panic("attempt to remove non-existent cyclic"); 811 812 cpu->cyp_nelems = --nelems; 813 814 if (nelems == 0) 815 /* 816 * If we just removed the last element, then we need to 817 * disable the backend on this CPU. 818 */ 819 cyclic_disable(cpu); 820 821 if (i == nelems) 822 /* 823 * If we just removed the last element of the heap, then 824 * we don't have to downheap. 825 */ 826 goto done; 827 828 /* 829 * Swap the last element of the heap with the one we want to 830 * remove, and downheap (this has the implicit effect of putting 831 * the newly freed element on the free list). 832 */ 833 heap[i] = (last = heap[nelems]); 834 heap[nelems] = ndx; 835 836 if (i == 0) 837 cyclic_downheap(cpu, 0); 838 else { 839 if (cyclic_upheap(cpu, i) == 0) { 840 /* 841 * The upheap didn't propagate to the root; if it 842 * didn't propagate at all, we need to downheap. 843 */ 844 if (heap[i] == last) 845 cyclic_downheap(cpu, i); 846 goto done; 847 } 848 } 849 850 /* 851 * We're here because we changed the root; we need to reprogram 852 * the clock source. 853 */ 854 cyclic = &cpu->cyp_cyclics[heap[0]]; 855 856 ASSERT(nelems != 0); 857 cyclic_reprogram(cpu, cyclic->cy_expire); 858 859done: 860 mtx_unlock_spin(&cpu->cyp_mtx); 861 862 return (1); 863} 864 865static void 866cyclic_configure(cpu_t *c) 867{ 868 cyc_cpu_t *cpu = malloc(sizeof(cyc_cpu_t), M_CYCLIC, M_ZERO | M_WAITOK); 869 cyc_backend_t *nbe = malloc(sizeof(cyc_backend_t), M_CYCLIC, M_ZERO | M_WAITOK); 870 871 ASSERT(MUTEX_HELD(&cpu_lock)); 872 873 if (cyclic_id_cache == NULL) 874 cyclic_id_cache = kmem_cache_create("cyclic_id_cache", 875 sizeof (cyc_id_t), 0, NULL, NULL, NULL, NULL, NULL, 0); 876 877 cpu->cyp_cpu = c; 878 879 cpu->cyp_size = 1; 880 cpu->cyp_heap = malloc(sizeof(cyc_index_t), M_CYCLIC, M_ZERO | M_WAITOK); 881 cpu->cyp_cyclics = malloc(sizeof(cyclic_t), M_CYCLIC, M_ZERO | M_WAITOK); 882 cpu->cyp_cyclics->cy_flags = CYF_FREE; 883 884 mtx_init(&cpu->cyp_mtx, "cyclic cpu", NULL, MTX_SPIN); 885 886 /* 887 * Setup the backend for this CPU. 888 */ 889 bcopy(&cyclic_backend, nbe, sizeof (cyc_backend_t)); 890 if (nbe->cyb_configure != NULL) 891 nbe->cyb_arg = nbe->cyb_configure(c); 892 cpu->cyp_backend = nbe; 893 894 /* 895 * On platforms where stray interrupts may be taken during startup, 896 * the CPU's cpu_cyclic pointer serves as an indicator that the 897 * cyclic subsystem for this CPU is prepared to field interrupts. 898 */ 899 membar_producer(); 900 901 c->cpu_cyclic = cpu; 902} 903 904static void 905cyclic_unconfigure(cpu_t *c) 906{ 907 cyc_cpu_t *cpu = c->cpu_cyclic; 908 cyc_backend_t *be = cpu->cyp_backend; 909 cyb_arg_t bar = be->cyb_arg; 910 911 ASSERT(MUTEX_HELD(&cpu_lock)); 912 913 c->cpu_cyclic = NULL; 914 915 /* 916 * Let the backend know that the CPU is being yanked, and free up 917 * the backend structure. 918 */ 919 if (be->cyb_unconfigure != NULL) 920 be->cyb_unconfigure(bar); 921 free(be, M_CYCLIC); 922 cpu->cyp_backend = NULL; 923 924 mtx_destroy(&cpu->cyp_mtx); 925 926 /* Finally, clean up our remaining dynamic structures. */ 927 free(cpu->cyp_cyclics, M_CYCLIC); 928 free(cpu->cyp_heap, M_CYCLIC); 929 free(cpu, M_CYCLIC); 930} 931 932static void 933cyclic_omni_start(cyc_id_t *idp, cyc_cpu_t *cpu) 934{ 935 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 936 cyc_omni_cpu_t *ocpu = malloc(sizeof(cyc_omni_cpu_t), M_CYCLIC , M_WAITOK); 937 cyc_handler_t hdlr; 938 cyc_time_t when; 939 940 ASSERT(MUTEX_HELD(&cpu_lock)); 941 ASSERT(idp->cyi_cpu == NULL); 942 943 hdlr.cyh_func = NULL; 944 hdlr.cyh_arg = NULL; 945 946 when.cyt_when = 0; 947 when.cyt_interval = 0; 948 949 omni->cyo_online(omni->cyo_arg, cpu->cyp_cpu, &hdlr, &when); 950 951 ASSERT(hdlr.cyh_func != NULL); 952 ASSERT(when.cyt_when >= 0 && when.cyt_interval > 0); 953 954 ocpu->cyo_cpu = cpu; 955 ocpu->cyo_arg = hdlr.cyh_arg; 956 ocpu->cyo_ndx = cyclic_add_here(cpu, &hdlr, &when, 0); 957 ocpu->cyo_next = idp->cyi_omni_list; 958 idp->cyi_omni_list = ocpu; 959} 960 961static void 962cyclic_omni_stop(cyc_id_t *idp, cyc_cpu_t *cpu) 963{ 964 cyc_omni_handler_t *omni = &idp->cyi_omni_hdlr; 965 cyc_omni_cpu_t *ocpu = idp->cyi_omni_list, *prev = NULL; 966 967 ASSERT(MUTEX_HELD(&cpu_lock)); 968 ASSERT(idp->cyi_cpu == NULL); 969 ASSERT(ocpu != NULL); 970 971 while (ocpu != NULL && ocpu->cyo_cpu != cpu) { 972 prev = ocpu; 973 ocpu = ocpu->cyo_next; 974 } 975 976 /* 977 * We _must_ have found an cyc_omni_cpu which corresponds to this 978 * CPU -- the definition of an omnipresent cyclic is that it runs 979 * on all online CPUs. 980 */ 981 ASSERT(ocpu != NULL); 982 983 if (prev == NULL) { 984 idp->cyi_omni_list = ocpu->cyo_next; 985 } else { 986 prev->cyo_next = ocpu->cyo_next; 987 } 988 989 (void) cyclic_remove_here(ocpu->cyo_cpu, ocpu->cyo_ndx, NULL, CY_WAIT); 990 991 /* 992 * The cyclic has been removed from this CPU; time to call the 993 * omnipresent offline handler. 994 */ 995 if (omni->cyo_offline != NULL) 996 omni->cyo_offline(omni->cyo_arg, cpu->cyp_cpu, ocpu->cyo_arg); 997 998 free(ocpu, M_CYCLIC); 999} 1000 1001static cyc_id_t * 1002cyclic_new_id(void) 1003{ 1004 cyc_id_t *idp; 1005 1006 ASSERT(MUTEX_HELD(&cpu_lock)); 1007 1008 idp = kmem_cache_alloc(cyclic_id_cache, KM_SLEEP); 1009 1010 /* 1011 * The cyi_cpu field of the cyc_id_t structure tracks the CPU 1012 * associated with the cyclic. If and only if this field is NULL, the 1013 * cyc_id_t is an omnipresent cyclic. Note that cyi_omni_list may be 1014 * NULL for an omnipresent cyclic while the cyclic is being created 1015 * or destroyed. 1016 */ 1017 idp->cyi_cpu = NULL; 1018 idp->cyi_ndx = 0; 1019 1020 idp->cyi_next = cyclic_id_head; 1021 idp->cyi_prev = NULL; 1022 idp->cyi_omni_list = NULL; 1023 1024 if (cyclic_id_head != NULL) { 1025 ASSERT(cyclic_id_head->cyi_prev == NULL); 1026 cyclic_id_head->cyi_prev = idp; 1027 } 1028 1029 cyclic_id_head = idp; 1030 1031 return (idp); 1032} 1033 1034/* 1035 * cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *) 1036 * 1037 * Overview 1038 * 1039 * cyclic_add() will create an unbound cyclic with the specified handler and 1040 * interval. The cyclic will run on a CPU which both has interrupts enabled 1041 * and is in the system CPU partition. 1042 * 1043 * Arguments and notes 1044 * 1045 * As its first argument, cyclic_add() takes a cyc_handler, which has the 1046 * following members: 1047 * 1048 * cyc_func_t cyh_func <-- Cyclic handler 1049 * void *cyh_arg <-- Argument to cyclic handler 1050 * 1051 * In addition to a cyc_handler, cyclic_add() takes a cyc_time, which 1052 * has the following members: 1053 * 1054 * hrtime_t cyt_when <-- Absolute time, in nanoseconds since boot, at 1055 * which to start firing 1056 * hrtime_t cyt_interval <-- Length of interval, in nanoseconds 1057 * 1058 * gethrtime() is the time source for nanoseconds since boot. If cyt_when 1059 * is set to 0, the cyclic will start to fire when cyt_interval next 1060 * divides the number of nanoseconds since boot. 1061 * 1062 * The cyt_interval field _must_ be filled in by the caller; one-shots are 1063 * _not_ explicitly supported by the cyclic subsystem (cyclic_add() will 1064 * assert that cyt_interval is non-zero). The maximum value for either 1065 * field is INT64_MAX; the caller is responsible for assuring that 1066 * cyt_when + cyt_interval <= INT64_MAX. Neither field may be negative. 1067 * 1068 * For an arbitrary time t in the future, the cyclic handler is guaranteed 1069 * to have been called (t - cyt_when) / cyt_interval times. This will 1070 * be true even if interrupts have been disabled for periods greater than 1071 * cyt_interval nanoseconds. In order to compensate for such periods, 1072 * the cyclic handler may be called a finite number of times with an 1073 * arbitrarily small interval. 1074 * 1075 * The cyclic subsystem will not enforce any lower bound on the interval; 1076 * if the interval is less than the time required to process an interrupt, 1077 * the CPU will wedge. It's the responsibility of the caller to assure that 1078 * either the value of the interval is sane, or that its caller has 1079 * sufficient privilege to deny service (i.e. its caller is root). 1080 * 1081 * Return value 1082 * 1083 * cyclic_add() returns a cyclic_id_t, which is guaranteed to be a value 1084 * other than CYCLIC_NONE. cyclic_add() cannot fail. 1085 * 1086 * Caller's context 1087 * 1088 * cpu_lock must be held by the caller, and the caller must not be in 1089 * interrupt context. cyclic_add() will perform a KM_SLEEP kernel 1090 * memory allocation, so the usual rules (e.g. p_lock cannot be held) 1091 * apply. A cyclic may be added even in the presence of CPUs that have 1092 * not been configured with respect to the cyclic subsystem, but only 1093 * configured CPUs will be eligible to run the new cyclic. 1094 * 1095 * Cyclic handler's context 1096 * 1097 * Cyclic handlers will be executed in the interrupt context corresponding 1098 * to the specified level (i.e. either high, lock or low level). The 1099 * usual context rules apply. 1100 * 1101 * A cyclic handler may not grab ANY locks held by the caller of any of 1102 * cyclic_add() or cyclic_remove(); the implementation of these functions 1103 * may require blocking on cyclic handler completion. 1104 * Moreover, cyclic handlers may not make any call back into the cyclic 1105 * subsystem. 1106 */ 1107cyclic_id_t 1108cyclic_add(cyc_handler_t *hdlr, cyc_time_t *when) 1109{ 1110 cyc_id_t *idp = cyclic_new_id(); 1111 solaris_cpu_t *c = &solaris_cpu[curcpu]; 1112 1113 ASSERT(MUTEX_HELD(&cpu_lock)); 1114 ASSERT(when->cyt_when >= 0 && when->cyt_interval > 0); 1115 1116 idp->cyi_cpu = c->cpu_cyclic; 1117 idp->cyi_ndx = cyclic_add_here(idp->cyi_cpu, hdlr, when, 0); 1118 1119 return ((uintptr_t)idp); 1120} 1121 1122/* 1123 * cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *) 1124 * 1125 * Overview 1126 * 1127 * cyclic_add_omni() will create an omnipresent cyclic with the specified 1128 * online and offline handlers. Omnipresent cyclics run on all online 1129 * CPUs, including CPUs which have unbound interrupts disabled. 1130 * 1131 * Arguments 1132 * 1133 * As its only argument, cyclic_add_omni() takes a cyc_omni_handler, which 1134 * has the following members: 1135 * 1136 * void (*cyo_online)() <-- Online handler 1137 * void (*cyo_offline)() <-- Offline handler 1138 * void *cyo_arg <-- Argument to be passed to on/offline handlers 1139 * 1140 * Online handler 1141 * 1142 * The cyo_online member is a pointer to a function which has the following 1143 * four arguments: 1144 * 1145 * void * <-- Argument (cyo_arg) 1146 * cpu_t * <-- Pointer to CPU about to be onlined 1147 * cyc_handler_t * <-- Pointer to cyc_handler_t; must be filled in 1148 * by omni online handler 1149 * cyc_time_t * <-- Pointer to cyc_time_t; must be filled in by 1150 * omni online handler 1151 * 1152 * The omni cyclic online handler is always called _before_ the omni 1153 * cyclic begins to fire on the specified CPU. As the above argument 1154 * description implies, the online handler must fill in the two structures 1155 * passed to it: the cyc_handler_t and the cyc_time_t. These are the 1156 * same two structures passed to cyclic_add(), outlined above. This 1157 * allows the omni cyclic to have maximum flexibility; different CPUs may 1158 * optionally 1159 * 1160 * (a) have different intervals 1161 * (b) be explicitly in or out of phase with one another 1162 * (c) have different handlers 1163 * (d) have different handler arguments 1164 * (e) fire at different levels 1165 * 1166 * Of these, (e) seems somewhat dubious, but is nonetheless allowed. 1167 * 1168 * The omni online handler is called in the same context as cyclic_add(), 1169 * and has the same liberties: omni online handlers may perform KM_SLEEP 1170 * kernel memory allocations, and may grab locks which are also acquired 1171 * by cyclic handlers. However, omni cyclic online handlers may _not_ 1172 * call back into the cyclic subsystem, and should be generally careful 1173 * about calling into arbitrary kernel subsystems. 1174 * 1175 * Offline handler 1176 * 1177 * The cyo_offline member is a pointer to a function which has the following 1178 * three arguments: 1179 * 1180 * void * <-- Argument (cyo_arg) 1181 * cpu_t * <-- Pointer to CPU about to be offlined 1182 * void * <-- CPU's cyclic argument (that is, value 1183 * to which cyh_arg member of the cyc_handler_t 1184 * was set in the omni online handler) 1185 * 1186 * The omni cyclic offline handler is always called _after_ the omni 1187 * cyclic has ceased firing on the specified CPU. Its purpose is to 1188 * allow cleanup of any resources dynamically allocated in the omni cyclic 1189 * online handler. The context of the offline handler is identical to 1190 * that of the online handler; the same constraints and liberties apply. 1191 * 1192 * The offline handler is optional; it may be NULL. 1193 * 1194 * Return value 1195 * 1196 * cyclic_add_omni() returns a cyclic_id_t, which is guaranteed to be a 1197 * value other than CYCLIC_NONE. cyclic_add_omni() cannot fail. 1198 * 1199 * Caller's context 1200 * 1201 * The caller's context is identical to that of cyclic_add(), specified 1202 * above. 1203 */ 1204cyclic_id_t 1205cyclic_add_omni(cyc_omni_handler_t *omni) 1206{ 1207 cyc_id_t *idp = cyclic_new_id(); 1208 cyc_cpu_t *cpu; 1209 cpu_t *c; 1210 int i; 1211 1212 ASSERT(MUTEX_HELD(&cpu_lock)); 1213 ASSERT(omni != NULL && omni->cyo_online != NULL); 1214 1215 idp->cyi_omni_hdlr = *omni; 1216 1217 for (i = 0; i < MAXCPU; i++) { 1218 if (pcpu_find(i) == NULL) 1219 continue; 1220 1221 c = &solaris_cpu[i]; 1222 1223 if ((cpu = c->cpu_cyclic) == NULL) 1224 continue; 1225 1226 cyclic_omni_start(idp, cpu); 1227 } 1228 1229 /* 1230 * We must have found at least one online CPU on which to run 1231 * this cyclic. 1232 */ 1233 ASSERT(idp->cyi_omni_list != NULL); 1234 ASSERT(idp->cyi_cpu == NULL); 1235 1236 return ((uintptr_t)idp); 1237} 1238 1239/* 1240 * void cyclic_remove(cyclic_id_t) 1241 * 1242 * Overview 1243 * 1244 * cyclic_remove() will remove the specified cyclic from the system. 1245 * 1246 * Arguments and notes 1247 * 1248 * The only argument is a cyclic_id returned from either cyclic_add() or 1249 * cyclic_add_omni(). 1250 * 1251 * By the time cyclic_remove() returns, the caller is guaranteed that the 1252 * removed cyclic handler has completed execution (this is the same 1253 * semantic that untimeout() provides). As a result, cyclic_remove() may 1254 * need to block, waiting for the removed cyclic to complete execution. 1255 * This leads to an important constraint on the caller: no lock may be 1256 * held across cyclic_remove() that also may be acquired by a cyclic 1257 * handler. 1258 * 1259 * Return value 1260 * 1261 * None; cyclic_remove() always succeeds. 1262 * 1263 * Caller's context 1264 * 1265 * cpu_lock must be held by the caller, and the caller must not be in 1266 * interrupt context. The caller may not hold any locks which are also 1267 * grabbed by any cyclic handler. See "Arguments and notes", above. 1268 */ 1269void 1270cyclic_remove(cyclic_id_t id) 1271{ 1272 cyc_id_t *idp = (cyc_id_t *)id; 1273 cyc_id_t *prev = idp->cyi_prev, *next = idp->cyi_next; 1274 cyc_cpu_t *cpu = idp->cyi_cpu; 1275 1276 ASSERT(MUTEX_HELD(&cpu_lock)); 1277 1278 if (cpu != NULL) { 1279 (void) cyclic_remove_here(cpu, idp->cyi_ndx, NULL, CY_WAIT); 1280 } else { 1281 ASSERT(idp->cyi_omni_list != NULL); 1282 while (idp->cyi_omni_list != NULL) 1283 cyclic_omni_stop(idp, idp->cyi_omni_list->cyo_cpu); 1284 } 1285 1286 if (prev != NULL) { 1287 ASSERT(cyclic_id_head != idp); 1288 prev->cyi_next = next; 1289 } else { 1290 ASSERT(cyclic_id_head == idp); 1291 cyclic_id_head = next; 1292 } 1293 1294 if (next != NULL) 1295 next->cyi_prev = prev; 1296 1297 kmem_cache_free(cyclic_id_cache, idp); 1298} 1299 1300static void 1301cyclic_init(cyc_backend_t *be) 1302{ 1303 ASSERT(MUTEX_HELD(&cpu_lock)); 1304 1305 /* 1306 * Copy the passed cyc_backend into the backend template. This must 1307 * be done before the CPU can be configured. 1308 */ 1309 bcopy(be, &cyclic_backend, sizeof (cyc_backend_t)); 1310 1311 cyclic_configure(&solaris_cpu[curcpu]); 1312} 1313 1314/* 1315 * It is assumed that cyclic_mp_init() is called some time after cyclic 1316 * init (and therefore, after cpu0 has been initialized). We grab cpu_lock, 1317 * find the already initialized CPU, and initialize every other CPU with the 1318 * same backend. 1319 */ 1320static void 1321cyclic_mp_init(void) 1322{ 1323 cpu_t *c; 1324 int i; 1325 1326 mutex_enter(&cpu_lock); 1327 1328 for (i = 0; i <= mp_maxid; i++) { 1329 if (pcpu_find(i) == NULL) 1330 continue; 1331 1332 c = &solaris_cpu[i]; 1333 1334 if (c->cpu_cyclic == NULL) 1335 cyclic_configure(c); 1336 } 1337 1338 mutex_exit(&cpu_lock); 1339} 1340 1341static void 1342cyclic_uninit(void) 1343{ 1344 cpu_t *c; 1345 int id; 1346 1347 CPU_FOREACH(id) { 1348 c = &solaris_cpu[id]; 1349 1350 if (c->cpu_cyclic == NULL) 1351 continue; 1352 1353 cyclic_unconfigure(c); 1354 } 1355 1356 if (cyclic_id_cache != NULL) 1357 kmem_cache_destroy(cyclic_id_cache); 1358} 1359 1360#include "cyclic_machdep.c" 1361 1362/* 1363 * Cyclic subsystem initialisation. 1364 */ 1365static void 1366cyclic_load(void *dummy) 1367{ 1368 mutex_enter(&cpu_lock); 1369 1370 /* Initialise the machine-dependent backend. */ 1371 cyclic_machdep_init(); 1372 1373 mutex_exit(&cpu_lock); 1374} 1375 1376SYSINIT(cyclic_register, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_load, NULL); 1377 1378static void 1379cyclic_unload(void) 1380{ 1381 mutex_enter(&cpu_lock); 1382 1383 /* Uninitialise the machine-dependent backend. */ 1384 cyclic_machdep_uninit(); 1385 1386 mutex_exit(&cpu_lock); 1387} 1388 1389SYSUNINIT(cyclic_unregister, SI_SUB_CYCLIC, SI_ORDER_SECOND, cyclic_unload, NULL); 1390 1391/* ARGSUSED */ 1392static int 1393cyclic_modevent(module_t mod __unused, int type, void *data __unused) 1394{ 1395 int error = 0; 1396 1397 switch (type) { 1398 case MOD_LOAD: 1399 break; 1400 1401 case MOD_UNLOAD: 1402 break; 1403 1404 case MOD_SHUTDOWN: 1405 break; 1406 1407 default: 1408 error = EOPNOTSUPP; 1409 break; 1410 1411 } 1412 return (error); 1413} 1414 1415DEV_MODULE(cyclic, cyclic_modevent, NULL); 1416MODULE_VERSION(cyclic, 1); 1417MODULE_DEPEND(cyclic, opensolaris, 1, 1, 1); 1418