1/* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Authors: Dipankar Sarma <dipankar@in.ibm.com> 21 * Manfred Spraul <manfred@colorfullife.com> 22 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version 23 * 24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 26 * 27 * For detailed explanation of Read-Copy Update mechanism see - 28 * Documentation/RCU 29 */ 30#include <linux/types.h> 31#include <linux/kernel.h> 32#include <linux/init.h> 33#include <linux/spinlock.h> 34#include <linux/smp.h> 35#include <linux/rcupdate.h> 36#include <linux/interrupt.h> 37#include <linux/sched.h> 38#include <linux/nmi.h> 39#include <asm/atomic.h> 40#include <linux/bitops.h> 41#include <linux/module.h> 42#include <linux/completion.h> 43#include <linux/moduleparam.h> 44#include <linux/percpu.h> 45#include <linux/notifier.h> 46#include <linux/cpu.h> 47#include <linux/mutex.h> 48#include <linux/time.h> 49#include <linux/kernel_stat.h> 50 51#include "rcutree.h" 52 53/* Data structures. */ 54 55static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; 56 57#define RCU_STATE_INITIALIZER(structname) { \ 58 .level = { &structname.node[0] }, \ 59 .levelcnt = { \ 60 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 61 NUM_RCU_LVL_1, \ 62 NUM_RCU_LVL_2, \ 63 NUM_RCU_LVL_3, \ 64 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ 65 }, \ 66 .signaled = RCU_GP_IDLE, \ 67 .gpnum = -300, \ 68 .completed = -300, \ 69 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ 70 .orphan_cbs_list = NULL, \ 71 .orphan_cbs_tail = &structname.orphan_cbs_list, \ 72 .orphan_qlen = 0, \ 73 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ 74 .n_force_qs = 0, \ 75 .n_force_qs_ngp = 0, \ 76 .name = #structname, \ 77} 78 79struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); 80DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); 81 82struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 83DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 84 85int rcu_scheduler_active __read_mostly; 86EXPORT_SYMBOL_GPL(rcu_scheduler_active); 87 88/* 89 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s 90 * permit this function to be invoked without holding the root rcu_node 91 * structure's ->lock, but of course results can be subject to change. 92 */ 93static int rcu_gp_in_progress(struct rcu_state *rsp) 94{ 95 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); 96} 97 98/* 99 * Note a quiescent state. Because we do not need to know 100 * how many quiescent states passed, just if there was at least 101 * one since the start of the grace period, this just sets a flag. 102 */ 103void rcu_sched_qs(int cpu) 104{ 105 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 106 107 rdp->passed_quiesc_completed = rdp->gpnum - 1; 108 barrier(); 109 rdp->passed_quiesc = 1; 110} 111 112void rcu_bh_qs(int cpu) 113{ 114 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 115 116 rdp->passed_quiesc_completed = rdp->gpnum - 1; 117 barrier(); 118 rdp->passed_quiesc = 1; 119} 120 121/* 122 * Note a context switch. This is a quiescent state for RCU-sched, 123 * and requires special handling for preemptible RCU. 124 */ 125void rcu_note_context_switch(int cpu) 126{ 127 rcu_sched_qs(cpu); 128 rcu_preempt_note_context_switch(cpu); 129} 130 131#ifdef CONFIG_NO_HZ 132DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { 133 .dynticks_nesting = 1, 134 .dynticks = 1, 135}; 136#endif /* #ifdef CONFIG_NO_HZ */ 137 138static int blimit = 10; /* Maximum callbacks per softirq. */ 139static int qhimark = 10000; /* If this many pending, ignore blimit. */ 140static int qlowmark = 100; /* Once only this many pending, use blimit. */ 141 142module_param(blimit, int, 0); 143module_param(qhimark, int, 0); 144module_param(qlowmark, int, 0); 145 146static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 147static int rcu_pending(int cpu); 148 149/* 150 * Return the number of RCU-sched batches processed thus far for debug & stats. 151 */ 152long rcu_batches_completed_sched(void) 153{ 154 return rcu_sched_state.completed; 155} 156EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); 157 158/* 159 * Return the number of RCU BH batches processed thus far for debug & stats. 160 */ 161long rcu_batches_completed_bh(void) 162{ 163 return rcu_bh_state.completed; 164} 165EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); 166 167/* 168 * Force a quiescent state for RCU BH. 169 */ 170void rcu_bh_force_quiescent_state(void) 171{ 172 force_quiescent_state(&rcu_bh_state, 0); 173} 174EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); 175 176/* 177 * Force a quiescent state for RCU-sched. 178 */ 179void rcu_sched_force_quiescent_state(void) 180{ 181 force_quiescent_state(&rcu_sched_state, 0); 182} 183EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); 184 185/* 186 * Does the CPU have callbacks ready to be invoked? 187 */ 188static int 189cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) 190{ 191 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; 192} 193 194/* 195 * Does the current CPU require a yet-as-unscheduled grace period? 196 */ 197static int 198cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 199{ 200 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); 201} 202 203/* 204 * Return the root node of the specified rcu_state structure. 205 */ 206static struct rcu_node *rcu_get_root(struct rcu_state *rsp) 207{ 208 return &rsp->node[0]; 209} 210 211#ifdef CONFIG_SMP 212 213/* 214 * If the specified CPU is offline, tell the caller that it is in 215 * a quiescent state. Otherwise, whack it with a reschedule IPI. 216 * Grace periods can end up waiting on an offline CPU when that 217 * CPU is in the process of coming online -- it will be added to the 218 * rcu_node bitmasks before it actually makes it online. The same thing 219 * can happen while a CPU is in the process of coming online. Because this 220 * race is quite rare, we check for it after detecting that the grace 221 * period has been delayed rather than checking each and every CPU 222 * each and every time we start a new grace period. 223 */ 224static int rcu_implicit_offline_qs(struct rcu_data *rdp) 225{ 226 /* 227 * If the CPU is offline, it is in a quiescent state. We can 228 * trust its state not to change because interrupts are disabled. 229 */ 230 if (cpu_is_offline(rdp->cpu)) { 231 rdp->offline_fqs++; 232 return 1; 233 } 234 235 /* If preemptable RCU, no point in sending reschedule IPI. */ 236 if (rdp->preemptable) 237 return 0; 238 239 /* The CPU is online, so send it a reschedule IPI. */ 240 if (rdp->cpu != smp_processor_id()) 241 smp_send_reschedule(rdp->cpu); 242 else 243 set_need_resched(); 244 rdp->resched_ipi++; 245 return 0; 246} 247 248#endif /* #ifdef CONFIG_SMP */ 249 250#ifdef CONFIG_NO_HZ 251 252/** 253 * rcu_enter_nohz - inform RCU that current CPU is entering nohz 254 * 255 * Enter nohz mode, in other words, -leave- the mode in which RCU 256 * read-side critical sections can occur. (Though RCU read-side 257 * critical sections can occur in irq handlers in nohz mode, a possibility 258 * handled by rcu_irq_enter() and rcu_irq_exit()). 259 */ 260void rcu_enter_nohz(void) 261{ 262 unsigned long flags; 263 struct rcu_dynticks *rdtp; 264 265 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 266 local_irq_save(flags); 267 rdtp = &__get_cpu_var(rcu_dynticks); 268 rdtp->dynticks++; 269 rdtp->dynticks_nesting--; 270 WARN_ON_ONCE(rdtp->dynticks & 0x1); 271 local_irq_restore(flags); 272} 273 274/* 275 * rcu_exit_nohz - inform RCU that current CPU is leaving nohz 276 * 277 * Exit nohz mode, in other words, -enter- the mode in which RCU 278 * read-side critical sections normally occur. 279 */ 280void rcu_exit_nohz(void) 281{ 282 unsigned long flags; 283 struct rcu_dynticks *rdtp; 284 285 local_irq_save(flags); 286 rdtp = &__get_cpu_var(rcu_dynticks); 287 rdtp->dynticks++; 288 rdtp->dynticks_nesting++; 289 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 290 local_irq_restore(flags); 291 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 292} 293 294/** 295 * rcu_nmi_enter - inform RCU of entry to NMI context 296 * 297 * If the CPU was idle with dynamic ticks active, and there is no 298 * irq handler running, this updates rdtp->dynticks_nmi to let the 299 * RCU grace-period handling know that the CPU is active. 300 */ 301void rcu_nmi_enter(void) 302{ 303 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 304 305 if (rdtp->dynticks & 0x1) 306 return; 307 rdtp->dynticks_nmi++; 308 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); 309 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 310} 311 312/** 313 * rcu_nmi_exit - inform RCU of exit from NMI context 314 * 315 * If the CPU was idle with dynamic ticks active, and there is no 316 * irq handler running, this updates rdtp->dynticks_nmi to let the 317 * RCU grace-period handling know that the CPU is no longer active. 318 */ 319void rcu_nmi_exit(void) 320{ 321 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 322 323 if (rdtp->dynticks & 0x1) 324 return; 325 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 326 rdtp->dynticks_nmi++; 327 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); 328} 329 330/** 331 * rcu_irq_enter - inform RCU of entry to hard irq context 332 * 333 * If the CPU was idle with dynamic ticks active, this updates the 334 * rdtp->dynticks to let the RCU handling know that the CPU is active. 335 */ 336void rcu_irq_enter(void) 337{ 338 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 339 340 if (rdtp->dynticks_nesting++) 341 return; 342 rdtp->dynticks++; 343 WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); 344 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 345} 346 347/** 348 * rcu_irq_exit - inform RCU of exit from hard irq context 349 * 350 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks 351 * to put let the RCU handling be aware that the CPU is going back to idle 352 * with no ticks. 353 */ 354void rcu_irq_exit(void) 355{ 356 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); 357 358 if (--rdtp->dynticks_nesting) 359 return; 360 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 361 rdtp->dynticks++; 362 WARN_ON_ONCE(rdtp->dynticks & 0x1); 363 364 /* If the interrupt queued a callback, get out of dyntick mode. */ 365 if (__get_cpu_var(rcu_sched_data).nxtlist || 366 __get_cpu_var(rcu_bh_data).nxtlist) 367 set_need_resched(); 368} 369 370#ifdef CONFIG_SMP 371 372/* 373 * Snapshot the specified CPU's dynticks counter so that we can later 374 * credit them with an implicit quiescent state. Return 1 if this CPU 375 * is in dynticks idle mode, which is an extended quiescent state. 376 */ 377static int dyntick_save_progress_counter(struct rcu_data *rdp) 378{ 379 int ret; 380 int snap; 381 int snap_nmi; 382 383 snap = rdp->dynticks->dynticks; 384 snap_nmi = rdp->dynticks->dynticks_nmi; 385 smp_mb(); /* Order sampling of snap with end of grace period. */ 386 rdp->dynticks_snap = snap; 387 rdp->dynticks_nmi_snap = snap_nmi; 388 ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0); 389 if (ret) 390 rdp->dynticks_fqs++; 391 return ret; 392} 393 394/* 395 * Return true if the specified CPU has passed through a quiescent 396 * state by virtue of being in or having passed through an dynticks 397 * idle state since the last call to dyntick_save_progress_counter() 398 * for this same CPU. 399 */ 400static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 401{ 402 long curr; 403 long curr_nmi; 404 long snap; 405 long snap_nmi; 406 407 curr = rdp->dynticks->dynticks; 408 snap = rdp->dynticks_snap; 409 curr_nmi = rdp->dynticks->dynticks_nmi; 410 snap_nmi = rdp->dynticks_nmi_snap; 411 smp_mb(); /* force ordering with cpu entering/leaving dynticks. */ 412 413 /* 414 * If the CPU passed through or entered a dynticks idle phase with 415 * no active irq/NMI handlers, then we can safely pretend that the CPU 416 * already acknowledged the request to pass through a quiescent 417 * state. Either way, that CPU cannot possibly be in an RCU 418 * read-side critical section that started before the beginning 419 * of the current RCU grace period. 420 */ 421 if ((curr != snap || (curr & 0x1) == 0) && 422 (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) { 423 rdp->dynticks_fqs++; 424 return 1; 425 } 426 427 /* Go check for the CPU being offline. */ 428 return rcu_implicit_offline_qs(rdp); 429} 430 431#endif /* #ifdef CONFIG_SMP */ 432 433#else /* #ifdef CONFIG_NO_HZ */ 434 435#ifdef CONFIG_SMP 436 437static int dyntick_save_progress_counter(struct rcu_data *rdp) 438{ 439 return 0; 440} 441 442static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 443{ 444 return rcu_implicit_offline_qs(rdp); 445} 446 447#endif /* #ifdef CONFIG_SMP */ 448 449#endif /* #else #ifdef CONFIG_NO_HZ */ 450 451#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 452 453int rcu_cpu_stall_panicking __read_mostly; 454 455static void record_gp_stall_check_time(struct rcu_state *rsp) 456{ 457 rsp->gp_start = jiffies; 458 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; 459} 460 461static void print_other_cpu_stall(struct rcu_state *rsp) 462{ 463 int cpu; 464 long delta; 465 unsigned long flags; 466 struct rcu_node *rnp = rcu_get_root(rsp); 467 468 /* Only let one CPU complain about others per time interval. */ 469 470 raw_spin_lock_irqsave(&rnp->lock, flags); 471 delta = jiffies - rsp->jiffies_stall; 472 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { 473 raw_spin_unlock_irqrestore(&rnp->lock, flags); 474 return; 475 } 476 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 478 /* 479 * Now rat on any tasks that got kicked up to the root rcu_node 480 * due to CPU offlining. 481 */ 482 rcu_print_task_stall(rnp); 483 raw_spin_unlock_irqrestore(&rnp->lock, flags); 484 485 /* OK, time to rat on our buddy... */ 486 487 printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", 488 rsp->name); 489 rcu_for_each_leaf_node(rsp, rnp) { 490 raw_spin_lock_irqsave(&rnp->lock, flags); 491 rcu_print_task_stall(rnp); 492 raw_spin_unlock_irqrestore(&rnp->lock, flags); 493 if (rnp->qsmask == 0) 494 continue; 495 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 496 if (rnp->qsmask & (1UL << cpu)) 497 printk(" %d", rnp->grplo + cpu); 498 } 499 printk("} (detected by %d, t=%ld jiffies)\n", 500 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 501 trigger_all_cpu_backtrace(); 502 503 /* If so configured, complain about tasks blocking the grace period. */ 504 505 rcu_print_detail_task_stall(rsp); 506 507 force_quiescent_state(rsp, 0); /* Kick them all. */ 508} 509 510static void print_cpu_stall(struct rcu_state *rsp) 511{ 512 unsigned long flags; 513 struct rcu_node *rnp = rcu_get_root(rsp); 514 515 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", 516 rsp->name, smp_processor_id(), jiffies - rsp->gp_start); 517 trigger_all_cpu_backtrace(); 518 519 raw_spin_lock_irqsave(&rnp->lock, flags); 520 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) 521 rsp->jiffies_stall = 522 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 523 raw_spin_unlock_irqrestore(&rnp->lock, flags); 524 525 set_need_resched(); /* kick ourselves to get things going. */ 526} 527 528static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 529{ 530 long delta; 531 struct rcu_node *rnp; 532 533 if (rcu_cpu_stall_panicking) 534 return; 535 delta = jiffies - rsp->jiffies_stall; 536 rnp = rdp->mynode; 537 if ((rnp->qsmask & rdp->grpmask) && delta >= 0) { 538 539 /* We haven't checked in, so go dump stack. */ 540 print_cpu_stall(rsp); 541 542 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { 543 544 /* They had two time units to dump stack, so complain. */ 545 print_other_cpu_stall(rsp); 546 } 547} 548 549static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) 550{ 551 rcu_cpu_stall_panicking = 1; 552 return NOTIFY_DONE; 553} 554 555static struct notifier_block rcu_panic_block = { 556 .notifier_call = rcu_panic, 557}; 558 559static void __init check_cpu_stall_init(void) 560{ 561 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); 562} 563 564#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 565 566static void record_gp_stall_check_time(struct rcu_state *rsp) 567{ 568} 569 570static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) 571{ 572} 573 574static void __init check_cpu_stall_init(void) 575{ 576} 577 578#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 579 580/* 581 * Update CPU-local rcu_data state to record the newly noticed grace period. 582 * This is used both when we started the grace period and when we notice 583 * that someone else started the grace period. The caller must hold the 584 * ->lock of the leaf rcu_node structure corresponding to the current CPU, 585 * and must have irqs disabled. 586 */ 587static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) 588{ 589 if (rdp->gpnum != rnp->gpnum) { 590 rdp->qs_pending = 1; 591 rdp->passed_quiesc = 0; 592 rdp->gpnum = rnp->gpnum; 593 } 594} 595 596static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) 597{ 598 unsigned long flags; 599 struct rcu_node *rnp; 600 601 local_irq_save(flags); 602 rnp = rdp->mynode; 603 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ 604 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 605 local_irq_restore(flags); 606 return; 607 } 608 __note_new_gpnum(rsp, rnp, rdp); 609 raw_spin_unlock_irqrestore(&rnp->lock, flags); 610} 611 612/* 613 * Did someone else start a new RCU grace period start since we last 614 * checked? Update local state appropriately if so. Must be called 615 * on the CPU corresponding to rdp. 616 */ 617static int 618check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) 619{ 620 unsigned long flags; 621 int ret = 0; 622 623 local_irq_save(flags); 624 if (rdp->gpnum != rsp->gpnum) { 625 note_new_gpnum(rsp, rdp); 626 ret = 1; 627 } 628 local_irq_restore(flags); 629 return ret; 630} 631 632/* 633 * Advance this CPU's callbacks, but only if the current grace period 634 * has ended. This may be called only from the CPU to whom the rdp 635 * belongs. In addition, the corresponding leaf rcu_node structure's 636 * ->lock must be held by the caller, with irqs disabled. 637 */ 638static void 639__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) 640{ 641 /* Did another grace period end? */ 642 if (rdp->completed != rnp->completed) { 643 644 /* Advance callbacks. No harm if list empty. */ 645 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; 646 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; 647 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 648 649 /* Remember that we saw this grace-period completion. */ 650 rdp->completed = rnp->completed; 651 } 652} 653 654/* 655 * Advance this CPU's callbacks, but only if the current grace period 656 * has ended. This may be called only from the CPU to whom the rdp 657 * belongs. 658 */ 659static void 660rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) 661{ 662 unsigned long flags; 663 struct rcu_node *rnp; 664 665 local_irq_save(flags); 666 rnp = rdp->mynode; 667 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ 668 !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ 669 local_irq_restore(flags); 670 return; 671 } 672 __rcu_process_gp_end(rsp, rnp, rdp); 673 raw_spin_unlock_irqrestore(&rnp->lock, flags); 674} 675 676/* 677 * Do per-CPU grace-period initialization for running CPU. The caller 678 * must hold the lock of the leaf rcu_node structure corresponding to 679 * this CPU. 680 */ 681static void 682rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) 683{ 684 /* Prior grace period ended, so advance callbacks for current CPU. */ 685 __rcu_process_gp_end(rsp, rnp, rdp); 686 687 /* 688 * Because this CPU just now started the new grace period, we know 689 * that all of its callbacks will be covered by this upcoming grace 690 * period, even the ones that were registered arbitrarily recently. 691 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. 692 * 693 * Other CPUs cannot be sure exactly when the grace period started. 694 * Therefore, their recently registered callbacks must pass through 695 * an additional RCU_NEXT_READY stage, so that they will be handled 696 * by the next RCU grace period. 697 */ 698 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 699 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 700 701 /* Set state so that this CPU will detect the next quiescent state. */ 702 __note_new_gpnum(rsp, rnp, rdp); 703} 704 705/* 706 * Start a new RCU grace period if warranted, re-initializing the hierarchy 707 * in preparation for detecting the next grace period. The caller must hold 708 * the root node's ->lock, which is released before return. Hard irqs must 709 * be disabled. 710 */ 711static void 712rcu_start_gp(struct rcu_state *rsp, unsigned long flags) 713 __releases(rcu_get_root(rsp)->lock) 714{ 715 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 716 struct rcu_node *rnp = rcu_get_root(rsp); 717 718 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { 719 if (cpu_needs_another_gp(rsp, rdp)) 720 rsp->fqs_need_gp = 1; 721 if (rnp->completed == rsp->completed) { 722 raw_spin_unlock_irqrestore(&rnp->lock, flags); 723 return; 724 } 725 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 726 727 /* 728 * Propagate new ->completed value to rcu_node structures 729 * so that other CPUs don't have to wait until the start 730 * of the next grace period to process their callbacks. 731 */ 732 rcu_for_each_node_breadth_first(rsp, rnp) { 733 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 734 rnp->completed = rsp->completed; 735 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 736 } 737 local_irq_restore(flags); 738 return; 739 } 740 741 /* Advance to a new grace period and initialize state. */ 742 rsp->gpnum++; 743 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); 744 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 745 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 746 record_gp_stall_check_time(rsp); 747 748 /* Special-case the common single-level case. */ 749 if (NUM_RCU_NODES == 1) { 750 rcu_preempt_check_blocked_tasks(rnp); 751 rnp->qsmask = rnp->qsmaskinit; 752 rnp->gpnum = rsp->gpnum; 753 rnp->completed = rsp->completed; 754 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 755 rcu_start_gp_per_cpu(rsp, rnp, rdp); 756 raw_spin_unlock_irqrestore(&rnp->lock, flags); 757 return; 758 } 759 760 raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ 761 762 763 /* Exclude any concurrent CPU-hotplug operations. */ 764 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ 765 766 /* 767 * Set the quiescent-state-needed bits in all the rcu_node 768 * structures for all currently online CPUs in breadth-first 769 * order, starting from the root rcu_node structure. This 770 * operation relies on the layout of the hierarchy within the 771 * rsp->node[] array. Note that other CPUs will access only 772 * the leaves of the hierarchy, which still indicate that no 773 * grace period is in progress, at least until the corresponding 774 * leaf node has been initialized. In addition, we have excluded 775 * CPU-hotplug operations. 776 * 777 * Note that the grace period cannot complete until we finish 778 * the initialization process, as there will be at least one 779 * qsmask bit set in the root node until that time, namely the 780 * one corresponding to this CPU, due to the fact that we have 781 * irqs disabled. 782 */ 783 rcu_for_each_node_breadth_first(rsp, rnp) { 784 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 785 rcu_preempt_check_blocked_tasks(rnp); 786 rnp->qsmask = rnp->qsmaskinit; 787 rnp->gpnum = rsp->gpnum; 788 rnp->completed = rsp->completed; 789 if (rnp == rdp->mynode) 790 rcu_start_gp_per_cpu(rsp, rnp, rdp); 791 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 792 } 793 794 rnp = rcu_get_root(rsp); 795 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 796 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 797 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 798 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 799} 800 801/* 802 * Report a full set of quiescent states to the specified rcu_state 803 * data structure. This involves cleaning up after the prior grace 804 * period and letting rcu_start_gp() start up the next grace period 805 * if one is needed. Note that the caller must hold rnp->lock, as 806 * required by rcu_start_gp(), which will release it. 807 */ 808static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) 809 __releases(rcu_get_root(rsp)->lock) 810{ 811 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 812 rsp->completed = rsp->gpnum; 813 rsp->signaled = RCU_GP_IDLE; 814 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 815} 816 817/* 818 * Similar to rcu_report_qs_rdp(), for which it is a helper function. 819 * Allows quiescent states for a group of CPUs to be reported at one go 820 * to the specified rcu_node structure, though all the CPUs in the group 821 * must be represented by the same rcu_node structure (which need not be 822 * a leaf rcu_node structure, though it often will be). That structure's 823 * lock must be held upon entry, and it is released before return. 824 */ 825static void 826rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, 827 struct rcu_node *rnp, unsigned long flags) 828 __releases(rnp->lock) 829{ 830 struct rcu_node *rnp_c; 831 832 /* Walk up the rcu_node hierarchy. */ 833 for (;;) { 834 if (!(rnp->qsmask & mask)) { 835 836 /* Our bit has already been cleared, so done. */ 837 raw_spin_unlock_irqrestore(&rnp->lock, flags); 838 return; 839 } 840 rnp->qsmask &= ~mask; 841 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { 842 843 /* Other bits still set at this level, so done. */ 844 raw_spin_unlock_irqrestore(&rnp->lock, flags); 845 return; 846 } 847 mask = rnp->grpmask; 848 if (rnp->parent == NULL) { 849 850 /* No more levels. Exit loop holding root lock. */ 851 852 break; 853 } 854 raw_spin_unlock_irqrestore(&rnp->lock, flags); 855 rnp_c = rnp; 856 rnp = rnp->parent; 857 raw_spin_lock_irqsave(&rnp->lock, flags); 858 WARN_ON_ONCE(rnp_c->qsmask); 859 } 860 861 /* 862 * Get here if we are the last CPU to pass through a quiescent 863 * state for this grace period. Invoke rcu_report_qs_rsp() 864 * to clean up and start the next grace period if one is needed. 865 */ 866 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ 867} 868 869/* 870 * Record a quiescent state for the specified CPU to that CPU's rcu_data 871 * structure. This must be either called from the specified CPU, or 872 * called when the specified CPU is known to be offline (and when it is 873 * also known that no other CPU is concurrently trying to help the offline 874 * CPU). The lastcomp argument is used to make sure we are still in the 875 * grace period of interest. We don't want to end the current grace period 876 * based on quiescent states detected in an earlier grace period! 877 */ 878static void 879rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 880{ 881 unsigned long flags; 882 unsigned long mask; 883 struct rcu_node *rnp; 884 885 rnp = rdp->mynode; 886 raw_spin_lock_irqsave(&rnp->lock, flags); 887 if (lastcomp != rnp->completed) { 888 889 /* 890 * Someone beat us to it for this grace period, so leave. 891 * The race with GP start is resolved by the fact that we 892 * hold the leaf rcu_node lock, so that the per-CPU bits 893 * cannot yet be initialized -- so we would simply find our 894 * CPU's bit already cleared in rcu_report_qs_rnp() if this 895 * race occurred. 896 */ 897 rdp->passed_quiesc = 0; /* try again later! */ 898 raw_spin_unlock_irqrestore(&rnp->lock, flags); 899 return; 900 } 901 mask = rdp->grpmask; 902 if ((rnp->qsmask & mask) == 0) { 903 raw_spin_unlock_irqrestore(&rnp->lock, flags); 904 } else { 905 rdp->qs_pending = 0; 906 907 /* 908 * This GP can't end until cpu checks in, so all of our 909 * callbacks can be processed during the next GP. 910 */ 911 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 912 913 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ 914 } 915} 916 917/* 918 * Check to see if there is a new grace period of which this CPU 919 * is not yet aware, and if so, set up local rcu_data state for it. 920 * Otherwise, see if this CPU has just passed through its first 921 * quiescent state for this grace period, and record that fact if so. 922 */ 923static void 924rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) 925{ 926 /* If there is now a new grace period, record and return. */ 927 if (check_for_new_grace_period(rsp, rdp)) 928 return; 929 930 /* 931 * Does this CPU still need to do its part for current grace period? 932 * If no, return and let the other CPUs do their part as well. 933 */ 934 if (!rdp->qs_pending) 935 return; 936 937 /* 938 * Was there a quiescent state since the beginning of the grace 939 * period? If no, then exit and wait for the next call. 940 */ 941 if (!rdp->passed_quiesc) 942 return; 943 944 /* 945 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 946 * judge of that). 947 */ 948 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 949} 950 951#ifdef CONFIG_HOTPLUG_CPU 952 953/* 954 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the 955 * specified flavor of RCU. The callbacks will be adopted by the next 956 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever 957 * comes first. Because this is invoked from the CPU_DYING notifier, 958 * irqs are already disabled. 959 */ 960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) 961{ 962 int i; 963 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 964 965 if (rdp->nxtlist == NULL) 966 return; /* irqs disabled, so comparison is stable. */ 967 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ 968 *rsp->orphan_cbs_tail = rdp->nxtlist; 969 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; 970 rdp->nxtlist = NULL; 971 for (i = 0; i < RCU_NEXT_SIZE; i++) 972 rdp->nxttail[i] = &rdp->nxtlist; 973 rsp->orphan_qlen += rdp->qlen; 974 rdp->qlen = 0; 975 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 976} 977 978/* 979 * Adopt previously orphaned RCU callbacks. 980 */ 981static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) 982{ 983 unsigned long flags; 984 struct rcu_data *rdp; 985 986 raw_spin_lock_irqsave(&rsp->onofflock, flags); 987 rdp = rsp->rda[smp_processor_id()]; 988 if (rsp->orphan_cbs_list == NULL) { 989 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 990 return; 991 } 992 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; 993 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; 994 rdp->qlen += rsp->orphan_qlen; 995 rsp->orphan_cbs_list = NULL; 996 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; 997 rsp->orphan_qlen = 0; 998 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 999} 1000 1001/* 1002 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 1003 * and move all callbacks from the outgoing CPU to the current one. 1004 */ 1005static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 1006{ 1007 unsigned long flags; 1008 unsigned long mask; 1009 int need_report = 0; 1010 struct rcu_data *rdp = rsp->rda[cpu]; 1011 struct rcu_node *rnp; 1012 1013 /* Exclude any attempts to start a new grace period. */ 1014 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1015 1016 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 1017 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ 1018 mask = rdp->grpmask; /* rnp->grplo is constant. */ 1019 do { 1020 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1021 rnp->qsmaskinit &= ~mask; 1022 if (rnp->qsmaskinit != 0) { 1023 if (rnp != rdp->mynode) 1024 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1025 break; 1026 } 1027 if (rnp == rdp->mynode) 1028 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 1029 else 1030 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1031 mask = rnp->grpmask; 1032 rnp = rnp->parent; 1033 } while (rnp != NULL); 1034 1035 /* 1036 * We still hold the leaf rcu_node structure lock here, and 1037 * irqs are still disabled. The reason for this subterfuge is 1038 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock 1039 * held leads to deadlock. 1040 */ 1041 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1042 rnp = rdp->mynode; 1043 if (need_report & RCU_OFL_TASKS_NORM_GP) 1044 rcu_report_unblock_qs_rnp(rnp, flags); 1045 else 1046 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1047 if (need_report & RCU_OFL_TASKS_EXP_GP) 1048 rcu_report_exp_rnp(rsp, rnp); 1049 1050 rcu_adopt_orphan_cbs(rsp); 1051} 1052 1053/* 1054 * Remove the specified CPU from the RCU hierarchy and move any pending 1055 * callbacks that it might have to the current CPU. This code assumes 1056 * that at least one CPU in the system will remain running at all times. 1057 * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. 1058 */ 1059static void rcu_offline_cpu(int cpu) 1060{ 1061 __rcu_offline_cpu(cpu, &rcu_sched_state); 1062 __rcu_offline_cpu(cpu, &rcu_bh_state); 1063 rcu_preempt_offline_cpu(cpu); 1064} 1065 1066#else /* #ifdef CONFIG_HOTPLUG_CPU */ 1067 1068static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) 1069{ 1070} 1071 1072static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) 1073{ 1074} 1075 1076static void rcu_offline_cpu(int cpu) 1077{ 1078} 1079 1080#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ 1081 1082/* 1083 * Invoke any RCU callbacks that have made it to the end of their grace 1084 * period. Thottle as specified by rdp->blimit. 1085 */ 1086static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) 1087{ 1088 unsigned long flags; 1089 struct rcu_head *next, *list, **tail; 1090 int count; 1091 1092 /* If no callbacks are ready, just return.*/ 1093 if (!cpu_has_callbacks_ready_to_invoke(rdp)) 1094 return; 1095 1096 /* 1097 * Extract the list of ready callbacks, disabling to prevent 1098 * races with call_rcu() from interrupt handlers. 1099 */ 1100 local_irq_save(flags); 1101 list = rdp->nxtlist; 1102 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 1103 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 1104 tail = rdp->nxttail[RCU_DONE_TAIL]; 1105 for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) 1106 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) 1107 rdp->nxttail[count] = &rdp->nxtlist; 1108 local_irq_restore(flags); 1109 1110 /* Invoke callbacks. */ 1111 count = 0; 1112 while (list) { 1113 next = list->next; 1114 prefetch(next); 1115 debug_rcu_head_unqueue(list); 1116 list->func(list); 1117 list = next; 1118 if (++count >= rdp->blimit) 1119 break; 1120 } 1121 1122 local_irq_save(flags); 1123 1124 /* Update count, and requeue any remaining callbacks. */ 1125 rdp->qlen -= count; 1126 if (list != NULL) { 1127 *tail = rdp->nxtlist; 1128 rdp->nxtlist = list; 1129 for (count = 0; count < RCU_NEXT_SIZE; count++) 1130 if (&rdp->nxtlist == rdp->nxttail[count]) 1131 rdp->nxttail[count] = tail; 1132 else 1133 break; 1134 } 1135 1136 /* Reinstate batch limit if we have worked down the excess. */ 1137 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1138 rdp->blimit = blimit; 1139 1140 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ 1141 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { 1142 rdp->qlen_last_fqs_check = 0; 1143 rdp->n_force_qs_snap = rsp->n_force_qs; 1144 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) 1145 rdp->qlen_last_fqs_check = rdp->qlen; 1146 1147 local_irq_restore(flags); 1148 1149 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1150 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1151 raise_softirq(RCU_SOFTIRQ); 1152} 1153 1154/* 1155 * Check to see if this CPU is in a non-context-switch quiescent state 1156 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 1157 * Also schedule the RCU softirq handler. 1158 * 1159 * This function must be called with hardirqs disabled. It is normally 1160 * invoked from the scheduling-clock interrupt. If rcu_pending returns 1161 * false, there is no point in invoking rcu_check_callbacks(). 1162 */ 1163void rcu_check_callbacks(int cpu, int user) 1164{ 1165 if (user || 1166 (idle_cpu(cpu) && rcu_scheduler_active && 1167 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1168 1169 /* 1170 * Get here if this CPU took its interrupt from user 1171 * mode or from the idle loop, and if this is not a 1172 * nested interrupt. In this case, the CPU is in 1173 * a quiescent state, so note it. 1174 * 1175 * No memory barrier is required here because both 1176 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local 1177 * variables that other CPUs neither access nor modify, 1178 * at least not while the corresponding CPU is online. 1179 */ 1180 1181 rcu_sched_qs(cpu); 1182 rcu_bh_qs(cpu); 1183 1184 } else if (!in_softirq()) { 1185 1186 /* 1187 * Get here if this CPU did not take its interrupt from 1188 * softirq, in other words, if it is not interrupting 1189 * a rcu_bh read-side critical section. This is an _bh 1190 * critical section, so note it. 1191 */ 1192 1193 rcu_bh_qs(cpu); 1194 } 1195 rcu_preempt_check_callbacks(cpu); 1196 if (rcu_pending(cpu)) 1197 raise_softirq(RCU_SOFTIRQ); 1198} 1199 1200#ifdef CONFIG_SMP 1201 1202/* 1203 * Scan the leaf rcu_node structures, processing dyntick state for any that 1204 * have not yet encountered a quiescent state, using the function specified. 1205 * The caller must have suppressed start of new grace periods. 1206 */ 1207static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) 1208{ 1209 unsigned long bit; 1210 int cpu; 1211 unsigned long flags; 1212 unsigned long mask; 1213 struct rcu_node *rnp; 1214 1215 rcu_for_each_leaf_node(rsp, rnp) { 1216 mask = 0; 1217 raw_spin_lock_irqsave(&rnp->lock, flags); 1218 if (!rcu_gp_in_progress(rsp)) { 1219 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1220 return; 1221 } 1222 if (rnp->qsmask == 0) { 1223 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1224 continue; 1225 } 1226 cpu = rnp->grplo; 1227 bit = 1; 1228 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { 1229 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1230 mask |= bit; 1231 } 1232 if (mask != 0) { 1233 1234 /* rcu_report_qs_rnp() releases rnp->lock. */ 1235 rcu_report_qs_rnp(mask, rsp, rnp, flags); 1236 continue; 1237 } 1238 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1239 } 1240} 1241 1242/* 1243 * Force quiescent states on reluctant CPUs, and also detect which 1244 * CPUs are in dyntick-idle mode. 1245 */ 1246static void force_quiescent_state(struct rcu_state *rsp, int relaxed) 1247{ 1248 unsigned long flags; 1249 struct rcu_node *rnp = rcu_get_root(rsp); 1250 1251 if (!rcu_gp_in_progress(rsp)) 1252 return; /* No grace period in progress, nothing to force. */ 1253 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { 1254 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1255 return; /* Someone else is already on the job. */ 1256 } 1257 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) 1258 goto unlock_fqs_ret; /* no emergency and done recently. */ 1259 rsp->n_force_qs++; 1260 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 1261 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1262 if(!rcu_gp_in_progress(rsp)) { 1263 rsp->n_force_qs_ngp++; 1264 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1265 goto unlock_fqs_ret; /* no GP in progress, time updated. */ 1266 } 1267 rsp->fqs_active = 1; 1268 switch (rsp->signaled) { 1269 case RCU_GP_IDLE: 1270 case RCU_GP_INIT: 1271 1272 break; /* grace period idle or initializing, ignore. */ 1273 1274 case RCU_SAVE_DYNTICK: 1275 if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) 1276 break; /* So gcc recognizes the dead code. */ 1277 1278 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1279 1280 /* Record dyntick-idle state. */ 1281 force_qs_rnp(rsp, dyntick_save_progress_counter); 1282 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 1283 if (rcu_gp_in_progress(rsp)) 1284 rsp->signaled = RCU_FORCE_QS; 1285 break; 1286 1287 case RCU_FORCE_QS: 1288 1289 /* Check dyntick-idle state, send IPI to laggarts. */ 1290 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1291 force_qs_rnp(rsp, rcu_implicit_dynticks_qs); 1292 1293 /* Leave state in case more forcing is required. */ 1294 1295 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 1296 break; 1297 } 1298 rsp->fqs_active = 0; 1299 if (rsp->fqs_need_gp) { 1300 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ 1301 rsp->fqs_need_gp = 0; 1302 rcu_start_gp(rsp, flags); /* releases rnp->lock */ 1303 return; 1304 } 1305 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1306unlock_fqs_ret: 1307 raw_spin_unlock_irqrestore(&rsp->fqslock, flags); 1308} 1309 1310#else /* #ifdef CONFIG_SMP */ 1311 1312static void force_quiescent_state(struct rcu_state *rsp, int relaxed) 1313{ 1314 set_need_resched(); 1315} 1316 1317#endif /* #else #ifdef CONFIG_SMP */ 1318 1319/* 1320 * This does the RCU processing work from softirq context for the 1321 * specified rcu_state and rcu_data structures. This may be called 1322 * only from the CPU to whom the rdp belongs. 1323 */ 1324static void 1325__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 1326{ 1327 unsigned long flags; 1328 1329 WARN_ON_ONCE(rdp->beenonline == 0); 1330 1331 /* 1332 * If an RCU GP has gone long enough, go check for dyntick 1333 * idle CPUs and, if needed, send resched IPIs. 1334 */ 1335 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) 1336 force_quiescent_state(rsp, 1); 1337 1338 /* 1339 * Advance callbacks in response to end of earlier grace 1340 * period that some other CPU ended. 1341 */ 1342 rcu_process_gp_end(rsp, rdp); 1343 1344 /* Update RCU state based on any recent quiescent states. */ 1345 rcu_check_quiescent_state(rsp, rdp); 1346 1347 /* Does this CPU require a not-yet-started grace period? */ 1348 if (cpu_needs_another_gp(rsp, rdp)) { 1349 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); 1350 rcu_start_gp(rsp, flags); /* releases above lock */ 1351 } 1352 1353 /* If there are callbacks ready, invoke them. */ 1354 rcu_do_batch(rsp, rdp); 1355} 1356 1357/* 1358 * Do softirq processing for the current CPU. 1359 */ 1360static void rcu_process_callbacks(struct softirq_action *unused) 1361{ 1362 /* 1363 * Memory references from any prior RCU read-side critical sections 1364 * executed by the interrupted code must be seen before any RCU 1365 * grace-period manipulations below. 1366 */ 1367 smp_mb(); /* See above block comment. */ 1368 1369 __rcu_process_callbacks(&rcu_sched_state, 1370 &__get_cpu_var(rcu_sched_data)); 1371 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1372 rcu_preempt_process_callbacks(); 1373 1374 /* 1375 * Memory references from any later RCU read-side critical sections 1376 * executed by the interrupted code must be seen after any RCU 1377 * grace-period manipulations above. 1378 */ 1379 smp_mb(); /* See above block comment. */ 1380 1381 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */ 1382 rcu_needs_cpu_flush(); 1383} 1384 1385static void 1386__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1387 struct rcu_state *rsp) 1388{ 1389 unsigned long flags; 1390 struct rcu_data *rdp; 1391 1392 debug_rcu_head_queue(head); 1393 head->func = func; 1394 head->next = NULL; 1395 1396 smp_mb(); /* Ensure RCU update seen before callback registry. */ 1397 1398 /* 1399 * Opportunistically note grace-period endings and beginnings. 1400 * Note that we might see a beginning right after we see an 1401 * end, but never vice versa, since this CPU has to pass through 1402 * a quiescent state betweentimes. 1403 */ 1404 local_irq_save(flags); 1405 rdp = rsp->rda[smp_processor_id()]; 1406 rcu_process_gp_end(rsp, rdp); 1407 check_for_new_grace_period(rsp, rdp); 1408 1409 /* Add the callback to our list. */ 1410 *rdp->nxttail[RCU_NEXT_TAIL] = head; 1411 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1412 1413 /* Start a new grace period if one not already started. */ 1414 if (!rcu_gp_in_progress(rsp)) { 1415 unsigned long nestflag; 1416 struct rcu_node *rnp_root = rcu_get_root(rsp); 1417 1418 raw_spin_lock_irqsave(&rnp_root->lock, nestflag); 1419 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1420 } 1421 1422 /* 1423 * Force the grace period if too many callbacks or too long waiting. 1424 * Enforce hysteresis, and don't invoke force_quiescent_state() 1425 * if some other CPU has recently done so. Also, don't bother 1426 * invoking force_quiescent_state() if the newly enqueued callback 1427 * is the only one waiting for a grace period to complete. 1428 */ 1429 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { 1430 rdp->blimit = LONG_MAX; 1431 if (rsp->n_force_qs == rdp->n_force_qs_snap && 1432 *rdp->nxttail[RCU_DONE_TAIL] != head) 1433 force_quiescent_state(rsp, 0); 1434 rdp->n_force_qs_snap = rsp->n_force_qs; 1435 rdp->qlen_last_fqs_check = rdp->qlen; 1436 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) 1437 force_quiescent_state(rsp, 1); 1438 local_irq_restore(flags); 1439} 1440 1441/* 1442 * Queue an RCU-sched callback for invocation after a grace period. 1443 */ 1444void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1445{ 1446 __call_rcu(head, func, &rcu_sched_state); 1447} 1448EXPORT_SYMBOL_GPL(call_rcu_sched); 1449 1450/* 1451 * Queue an RCU for invocation after a quicker grace period. 1452 */ 1453void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1454{ 1455 __call_rcu(head, func, &rcu_bh_state); 1456} 1457EXPORT_SYMBOL_GPL(call_rcu_bh); 1458 1459/** 1460 * synchronize_sched - wait until an rcu-sched grace period has elapsed. 1461 * 1462 * Control will return to the caller some time after a full rcu-sched 1463 * grace period has elapsed, in other words after all currently executing 1464 * rcu-sched read-side critical sections have completed. These read-side 1465 * critical sections are delimited by rcu_read_lock_sched() and 1466 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), 1467 * local_irq_disable(), and so on may be used in place of 1468 * rcu_read_lock_sched(). 1469 * 1470 * This means that all preempt_disable code sequences, including NMI and 1471 * hardware-interrupt handlers, in progress on entry will have completed 1472 * before this primitive returns. However, this does not guarantee that 1473 * softirq handlers will have completed, since in some kernels, these 1474 * handlers can run in process context, and can block. 1475 * 1476 * This primitive provides the guarantees made by the (now removed) 1477 * synchronize_kernel() API. In contrast, synchronize_rcu() only 1478 * guarantees that rcu_read_lock() sections will have completed. 1479 * In "classic RCU", these two guarantees happen to be one and 1480 * the same, but can differ in realtime RCU implementations. 1481 */ 1482void synchronize_sched(void) 1483{ 1484 struct rcu_synchronize rcu; 1485 1486 if (rcu_blocking_is_gp()) 1487 return; 1488 1489 init_rcu_head_on_stack(&rcu.head); 1490 init_completion(&rcu.completion); 1491 /* Will wake me after RCU finished. */ 1492 call_rcu_sched(&rcu.head, wakeme_after_rcu); 1493 /* Wait for it. */ 1494 wait_for_completion(&rcu.completion); 1495 destroy_rcu_head_on_stack(&rcu.head); 1496} 1497EXPORT_SYMBOL_GPL(synchronize_sched); 1498 1499/** 1500 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. 1501 * 1502 * Control will return to the caller some time after a full rcu_bh grace 1503 * period has elapsed, in other words after all currently executing rcu_bh 1504 * read-side critical sections have completed. RCU read-side critical 1505 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 1506 * and may be nested. 1507 */ 1508void synchronize_rcu_bh(void) 1509{ 1510 struct rcu_synchronize rcu; 1511 1512 if (rcu_blocking_is_gp()) 1513 return; 1514 1515 init_rcu_head_on_stack(&rcu.head); 1516 init_completion(&rcu.completion); 1517 /* Will wake me after RCU finished. */ 1518 call_rcu_bh(&rcu.head, wakeme_after_rcu); 1519 /* Wait for it. */ 1520 wait_for_completion(&rcu.completion); 1521 destroy_rcu_head_on_stack(&rcu.head); 1522} 1523EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 1524 1525/* 1526 * Check to see if there is any immediate RCU-related work to be done 1527 * by the current CPU, for the specified type of RCU, returning 1 if so. 1528 * The checks are in order of increasing expense: checks that can be 1529 * carried out against CPU-local state are performed first. However, 1530 * we must check for CPU stalls first, else we might not get a chance. 1531 */ 1532static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 1533{ 1534 struct rcu_node *rnp = rdp->mynode; 1535 1536 rdp->n_rcu_pending++; 1537 1538 /* Check for CPU stalls, if enabled. */ 1539 check_cpu_stall(rsp, rdp); 1540 1541 /* Is the RCU core waiting for a quiescent state from this CPU? */ 1542 if (rdp->qs_pending && !rdp->passed_quiesc) { 1543 1544 /* 1545 * If force_quiescent_state() coming soon and this CPU 1546 * needs a quiescent state, and this is either RCU-sched 1547 * or RCU-bh, force a local reschedule. 1548 */ 1549 rdp->n_rp_qs_pending++; 1550 if (!rdp->preemptable && 1551 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, 1552 jiffies)) 1553 set_need_resched(); 1554 } else if (rdp->qs_pending && rdp->passed_quiesc) { 1555 rdp->n_rp_report_qs++; 1556 return 1; 1557 } 1558 1559 /* Does this CPU have callbacks ready to invoke? */ 1560 if (cpu_has_callbacks_ready_to_invoke(rdp)) { 1561 rdp->n_rp_cb_ready++; 1562 return 1; 1563 } 1564 1565 /* Has RCU gone idle with this CPU needing another grace period? */ 1566 if (cpu_needs_another_gp(rsp, rdp)) { 1567 rdp->n_rp_cpu_needs_gp++; 1568 return 1; 1569 } 1570 1571 /* Has another RCU grace period completed? */ 1572 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ 1573 rdp->n_rp_gp_completed++; 1574 return 1; 1575 } 1576 1577 /* Has a new RCU grace period started? */ 1578 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ 1579 rdp->n_rp_gp_started++; 1580 return 1; 1581 } 1582 1583 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1584 if (rcu_gp_in_progress(rsp) && 1585 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) { 1586 rdp->n_rp_need_fqs++; 1587 return 1; 1588 } 1589 1590 /* nothing to do */ 1591 rdp->n_rp_need_nothing++; 1592 return 0; 1593} 1594 1595/* 1596 * Check to see if there is any immediate RCU-related work to be done 1597 * by the current CPU, returning 1 if so. This function is part of the 1598 * RCU implementation; it is -not- an exported member of the RCU API. 1599 */ 1600static int rcu_pending(int cpu) 1601{ 1602 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || 1603 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || 1604 rcu_preempt_pending(cpu); 1605} 1606 1607/* 1608 * Check to see if any future RCU-related work will need to be done 1609 * by the current CPU, even if none need be done immediately, returning 1610 * 1 if so. 1611 */ 1612static int rcu_needs_cpu_quick_check(int cpu) 1613{ 1614 /* RCU callbacks either ready or pending? */ 1615 return per_cpu(rcu_sched_data, cpu).nxtlist || 1616 per_cpu(rcu_bh_data, cpu).nxtlist || 1617 rcu_preempt_needs_cpu(cpu); 1618} 1619 1620static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; 1621static atomic_t rcu_barrier_cpu_count; 1622static DEFINE_MUTEX(rcu_barrier_mutex); 1623static struct completion rcu_barrier_completion; 1624 1625static void rcu_barrier_callback(struct rcu_head *notused) 1626{ 1627 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 1628 complete(&rcu_barrier_completion); 1629} 1630 1631/* 1632 * Called with preemption disabled, and from cross-cpu IRQ context. 1633 */ 1634static void rcu_barrier_func(void *type) 1635{ 1636 int cpu = smp_processor_id(); 1637 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); 1638 void (*call_rcu_func)(struct rcu_head *head, 1639 void (*func)(struct rcu_head *head)); 1640 1641 atomic_inc(&rcu_barrier_cpu_count); 1642 call_rcu_func = type; 1643 call_rcu_func(head, rcu_barrier_callback); 1644} 1645 1646/* 1647 * Orchestrate the specified type of RCU barrier, waiting for all 1648 * RCU callbacks of the specified type to complete. 1649 */ 1650static void _rcu_barrier(struct rcu_state *rsp, 1651 void (*call_rcu_func)(struct rcu_head *head, 1652 void (*func)(struct rcu_head *head))) 1653{ 1654 BUG_ON(in_interrupt()); 1655 /* Take mutex to serialize concurrent rcu_barrier() requests. */ 1656 mutex_lock(&rcu_barrier_mutex); 1657 init_completion(&rcu_barrier_completion); 1658 /* 1659 * Initialize rcu_barrier_cpu_count to 1, then invoke 1660 * rcu_barrier_func() on each CPU, so that each CPU also has 1661 * incremented rcu_barrier_cpu_count. Only then is it safe to 1662 * decrement rcu_barrier_cpu_count -- otherwise the first CPU 1663 * might complete its grace period before all of the other CPUs 1664 * did their increment, causing this function to return too 1665 * early. 1666 */ 1667 atomic_set(&rcu_barrier_cpu_count, 1); 1668 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ 1669 rcu_adopt_orphan_cbs(rsp); 1670 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); 1671 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ 1672 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 1673 complete(&rcu_barrier_completion); 1674 wait_for_completion(&rcu_barrier_completion); 1675 mutex_unlock(&rcu_barrier_mutex); 1676} 1677 1678/** 1679 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. 1680 */ 1681void rcu_barrier_bh(void) 1682{ 1683 _rcu_barrier(&rcu_bh_state, call_rcu_bh); 1684} 1685EXPORT_SYMBOL_GPL(rcu_barrier_bh); 1686 1687/** 1688 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. 1689 */ 1690void rcu_barrier_sched(void) 1691{ 1692 _rcu_barrier(&rcu_sched_state, call_rcu_sched); 1693} 1694EXPORT_SYMBOL_GPL(rcu_barrier_sched); 1695 1696/* 1697 * Do boot-time initialization of a CPU's per-CPU RCU data. 1698 */ 1699static void __init 1700rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) 1701{ 1702 unsigned long flags; 1703 int i; 1704 struct rcu_data *rdp = rsp->rda[cpu]; 1705 struct rcu_node *rnp = rcu_get_root(rsp); 1706 1707 /* Set up local state, ensuring consistent view of global state. */ 1708 raw_spin_lock_irqsave(&rnp->lock, flags); 1709 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); 1710 rdp->nxtlist = NULL; 1711 for (i = 0; i < RCU_NEXT_SIZE; i++) 1712 rdp->nxttail[i] = &rdp->nxtlist; 1713 rdp->qlen = 0; 1714#ifdef CONFIG_NO_HZ 1715 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 1716#endif /* #ifdef CONFIG_NO_HZ */ 1717 rdp->cpu = cpu; 1718 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1719} 1720 1721/* 1722 * Initialize a CPU's per-CPU RCU data. Note that only one online or 1723 * offline event can be happening at a given time. Note also that we 1724 * can accept some slop in the rsp->completed access due to the fact 1725 * that this CPU cannot possibly have any RCU callbacks in flight yet. 1726 */ 1727static void __cpuinit 1728rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) 1729{ 1730 unsigned long flags; 1731 unsigned long mask; 1732 struct rcu_data *rdp = rsp->rda[cpu]; 1733 struct rcu_node *rnp = rcu_get_root(rsp); 1734 1735 /* Set up local state, ensuring consistent view of global state. */ 1736 raw_spin_lock_irqsave(&rnp->lock, flags); 1737 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1738 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1739 rdp->beenonline = 1; /* We have now been online. */ 1740 rdp->preemptable = preemptable; 1741 rdp->qlen_last_fqs_check = 0; 1742 rdp->n_force_qs_snap = rsp->n_force_qs; 1743 rdp->blimit = blimit; 1744 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1745 1746 /* 1747 * A new grace period might start here. If so, we won't be part 1748 * of it, but that is OK, as we are currently in a quiescent state. 1749 */ 1750 1751 /* Exclude any attempts to start a new GP on large systems. */ 1752 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ 1753 1754 /* Add CPU to rcu_node bitmasks. */ 1755 rnp = rdp->mynode; 1756 mask = rdp->grpmask; 1757 do { 1758 /* Exclude any attempts to start a new GP on small systems. */ 1759 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 1760 rnp->qsmaskinit |= mask; 1761 mask = rnp->grpmask; 1762 if (rnp == rdp->mynode) { 1763 rdp->gpnum = rnp->completed; /* if GP in progress... */ 1764 rdp->completed = rnp->completed; 1765 rdp->passed_quiesc_completed = rnp->completed - 1; 1766 } 1767 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ 1768 rnp = rnp->parent; 1769 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1770 1771 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 1772} 1773 1774static void __cpuinit rcu_online_cpu(int cpu) 1775{ 1776 rcu_init_percpu_data(cpu, &rcu_sched_state, 0); 1777 rcu_init_percpu_data(cpu, &rcu_bh_state, 0); 1778 rcu_preempt_init_percpu_data(cpu); 1779} 1780 1781/* 1782 * Handle CPU online/offline notification events. 1783 */ 1784static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1785 unsigned long action, void *hcpu) 1786{ 1787 long cpu = (long)hcpu; 1788 1789 switch (action) { 1790 case CPU_UP_PREPARE: 1791 case CPU_UP_PREPARE_FROZEN: 1792 rcu_online_cpu(cpu); 1793 break; 1794 case CPU_DYING: 1795 case CPU_DYING_FROZEN: 1796 /* 1797 * preempt_disable() in _rcu_barrier() prevents stop_machine(), 1798 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" 1799 * returns, all online cpus have queued rcu_barrier_func(). 1800 * The dying CPU clears its cpu_online_mask bit and 1801 * moves all of its RCU callbacks to ->orphan_cbs_list 1802 * in the context of stop_machine(), so subsequent calls 1803 * to _rcu_barrier() will adopt these callbacks and only 1804 * then queue rcu_barrier_func() on all remaining CPUs. 1805 */ 1806 rcu_send_cbs_to_orphanage(&rcu_bh_state); 1807 rcu_send_cbs_to_orphanage(&rcu_sched_state); 1808 rcu_preempt_send_cbs_to_orphanage(); 1809 break; 1810 case CPU_DEAD: 1811 case CPU_DEAD_FROZEN: 1812 case CPU_UP_CANCELED: 1813 case CPU_UP_CANCELED_FROZEN: 1814 rcu_offline_cpu(cpu); 1815 break; 1816 default: 1817 break; 1818 } 1819 return NOTIFY_OK; 1820} 1821 1822/* 1823 * This function is invoked towards the end of the scheduler's initialization 1824 * process. Before this is called, the idle task might contain 1825 * RCU read-side critical sections (during which time, this idle 1826 * task is booting the system). After this function is called, the 1827 * idle tasks are prohibited from containing RCU read-side critical 1828 * sections. This function also enables RCU lockdep checking. 1829 */ 1830void rcu_scheduler_starting(void) 1831{ 1832 WARN_ON(num_online_cpus() != 1); 1833 WARN_ON(nr_context_switches() > 0); 1834 rcu_scheduler_active = 1; 1835} 1836 1837/* 1838 * Compute the per-level fanout, either using the exact fanout specified 1839 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. 1840 */ 1841#ifdef CONFIG_RCU_FANOUT_EXACT 1842static void __init rcu_init_levelspread(struct rcu_state *rsp) 1843{ 1844 int i; 1845 1846 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) 1847 rsp->levelspread[i] = CONFIG_RCU_FANOUT; 1848} 1849#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ 1850static void __init rcu_init_levelspread(struct rcu_state *rsp) 1851{ 1852 int ccur; 1853 int cprv; 1854 int i; 1855 1856 cprv = NR_CPUS; 1857 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { 1858 ccur = rsp->levelcnt[i]; 1859 rsp->levelspread[i] = (cprv + ccur - 1) / ccur; 1860 cprv = ccur; 1861 } 1862} 1863#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ 1864 1865/* 1866 * Helper function for rcu_init() that initializes one rcu_state structure. 1867 */ 1868static void __init rcu_init_one(struct rcu_state *rsp) 1869{ 1870 static char *buf[] = { "rcu_node_level_0", 1871 "rcu_node_level_1", 1872 "rcu_node_level_2", 1873 "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */ 1874 int cpustride = 1; 1875 int i; 1876 int j; 1877 struct rcu_node *rnp; 1878 1879 BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ 1880 1881 /* Initialize the level-tracking arrays. */ 1882 1883 for (i = 1; i < NUM_RCU_LVLS; i++) 1884 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 1885 rcu_init_levelspread(rsp); 1886 1887 /* Initialize the elements themselves, starting from the leaves. */ 1888 1889 for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { 1890 cpustride *= rsp->levelspread[i]; 1891 rnp = rsp->level[i]; 1892 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1893 raw_spin_lock_init(&rnp->lock); 1894 lockdep_set_class_and_name(&rnp->lock, 1895 &rcu_node_class[i], buf[i]); 1896 rnp->gpnum = 0; 1897 rnp->qsmask = 0; 1898 rnp->qsmaskinit = 0; 1899 rnp->grplo = j * cpustride; 1900 rnp->grphi = (j + 1) * cpustride - 1; 1901 if (rnp->grphi >= NR_CPUS) 1902 rnp->grphi = NR_CPUS - 1; 1903 if (i == 0) { 1904 rnp->grpnum = 0; 1905 rnp->grpmask = 0; 1906 rnp->parent = NULL; 1907 } else { 1908 rnp->grpnum = j % rsp->levelspread[i - 1]; 1909 rnp->grpmask = 1UL << rnp->grpnum; 1910 rnp->parent = rsp->level[i - 1] + 1911 j / rsp->levelspread[i - 1]; 1912 } 1913 rnp->level = i; 1914 INIT_LIST_HEAD(&rnp->blocked_tasks[0]); 1915 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1916 INIT_LIST_HEAD(&rnp->blocked_tasks[2]); 1917 INIT_LIST_HEAD(&rnp->blocked_tasks[3]); 1918 } 1919 } 1920 1921 rnp = rsp->level[NUM_RCU_LVLS - 1]; 1922 for_each_possible_cpu(i) { 1923 while (i > rnp->grphi) 1924 rnp++; 1925 rsp->rda[i]->mynode = rnp; 1926 rcu_boot_init_percpu_data(i, rsp); 1927 } 1928} 1929 1930/* 1931 * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used 1932 * nowhere else! Assigns leaf node pointers into each CPU's rcu_data 1933 * structure. 1934 */ 1935#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1936do { \ 1937 int i; \ 1938 \ 1939 for_each_possible_cpu(i) { \ 1940 (rsp)->rda[i] = &per_cpu(rcu_data, i); \ 1941 } \ 1942 rcu_init_one(rsp); \ 1943} while (0) 1944 1945void __init rcu_init(void) 1946{ 1947 int cpu; 1948 1949 rcu_bootup_announce(); 1950 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1951 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1952 __rcu_init_preempt(); 1953 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1954 1955 /* 1956 * We don't need protection against CPU-hotplug here because 1957 * this is called early in boot, before either interrupts 1958 * or the scheduler are operational. 1959 */ 1960 cpu_notifier(rcu_cpu_notify, 0); 1961 for_each_online_cpu(cpu) 1962 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); 1963 check_cpu_stall_init(); 1964} 1965 1966#include "rcutree_plugin.h" 1967