Lines Matching defs:rdp

153 static void rcu_report_exp_rdp(struct rcu_data *rdp);
155 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
156 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
157 static bool rcu_rdp_cpu_online(struct rcu_data *rdp);
239 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
241 if (rcu_segcblist_is_enabled(&rdp->cblist))
242 return rcu_segcblist_n_cbs(&rdp->cblist);
322 static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
324 return snap != rcu_dynticks_snap(rdp->cpu);
507 static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
573 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
582 if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
636 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
645 if (!tick_nohz_full_cpu(rdp->cpu) ||
646 !READ_ONCE(rdp->rcu_urgent_qs) ||
647 READ_ONCE(rdp->rcu_forced_tick)) {
659 raw_spin_lock_rcu_node(rdp->mynode);
660 if (READ_ONCE(rdp->rcu_urgent_qs) && !rdp->rcu_forced_tick) {
663 WRITE_ONCE(rdp->rcu_forced_tick, true);
664 tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
666 raw_spin_unlock_rcu_node(rdp->mynode);
693 static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
695 raw_lockdep_assert_held_rcu_node(rdp->mynode);
696 WRITE_ONCE(rdp->rcu_urgent_qs, false);
697 WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
698 if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
699 tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
700 WRITE_ONCE(rdp->rcu_forced_tick, false);
755 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
758 if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
760 WRITE_ONCE(rdp->gpwrap, true);
761 if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
762 rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
770 static int dyntick_save_progress_counter(struct rcu_data *rdp)
772 rdp->dynticks_snap = rcu_dynticks_snap(rdp->cpu);
773 if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
774 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
775 rcu_gpnum_ovf(rdp->mynode, rdp);
791 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
795 struct rcu_node *rnp = rdp->mynode;
805 if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
806 trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
807 rcu_gpnum_ovf(rnp, rdp);
829 if (WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp))) {
839 __func__, rdp->cpu, ".o"[rcu_rdp_cpu_online(rdp)],
840 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
841 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
857 if (!READ_ONCE(rdp->rcu_need_heavy_qs) &&
861 WRITE_ONCE(rdp->rcu_need_heavy_qs, true);
863 smp_store_release(&rdp->rcu_urgent_qs, true);
865 WRITE_ONCE(rdp->rcu_urgent_qs, true);
876 if (tick_nohz_full_cpu(rdp->cpu) &&
877 (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
879 WRITE_ONCE(rdp->rcu_urgent_qs, true);
880 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
893 READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
894 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
898 !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
899 (rnp->ffmask & rdp->grpmask)) {
900 rdp->rcu_iw_pending = true;
901 rdp->rcu_iw_gp_seq = rnp->gp_seq;
902 irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
905 if (rcu_cpu_stall_cputime && rdp->snap_record.gp_seq != rdp->gp_seq) {
906 int cpu = rdp->cpu;
912 rsrp = &rdp->snap_record;
916 rsrp->nr_hardirqs = kstat_cpu_irqs_sum(rdp->cpu);
917 rsrp->nr_softirqs = kstat_cpu_softirqs_sum(rdp->cpu);
918 rsrp->nr_csw = nr_context_switches_cpu(rdp->cpu);
920 rsrp->gp_seq = rdp->gp_seq;
928 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
939 * @rdp: The rcu_data corresponding to the CPU from which to start.
952 static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
968 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
976 trace_rcu_this_gp(rnp, rdp, gp_seq_req,
988 trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1000 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1003 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1007 trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1016 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1030 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1035 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1111 static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1116 rcu_lockdep_assert_cblist_protected(rdp);
1120 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1123 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1136 if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1137 ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1140 if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1145 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1158 struct rcu_data *rdp)
1163 rcu_lockdep_assert_cblist_protected(rdp);
1165 if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1167 (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1171 needwake = rcu_accelerate_cbs(rnp, rdp);
1187 static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1189 rcu_lockdep_assert_cblist_protected(rdp);
1193 if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1200 rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1203 return rcu_accelerate_cbs(rnp, rdp);
1211 struct rcu_data *rdp)
1213 rcu_lockdep_assert_cblist_protected(rdp);
1218 WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1241 static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1245 const bool offloaded = rcu_rdp_is_offloaded(rdp);
1249 if (rdp->gp_seq == rnp->gp_seq)
1253 if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1254 unlikely(READ_ONCE(rdp->gpwrap))) {
1256 ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1257 rdp->core_needs_qs = false;
1258 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1261 ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1262 if (rdp->core_needs_qs)
1263 rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1267 if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1268 unlikely(READ_ONCE(rdp->gpwrap))) {
1275 need_qs = !!(rnp->qsmask & rdp->grpmask);
1276 rdp->cpu_no_qs.b.norm = need_qs;
1277 rdp->core_needs_qs = need_qs;
1278 zero_cpu_stall_ticks(rdp);
1280 rdp->gp_seq = rnp->gp_seq; /* Remember new grace-period state. */
1281 if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1282 WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1283 if (IS_ENABLED(CONFIG_PROVE_RCU) && READ_ONCE(rdp->gpwrap))
1284 WRITE_ONCE(rdp->last_sched_clock, jiffies);
1285 WRITE_ONCE(rdp->gpwrap, false);
1286 rcu_gpnum_ovf(rnp, rdp);
1290 static void note_gp_changes(struct rcu_data *rdp)
1297 rnp = rdp->mynode;
1298 if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1299 !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1304 needwake = __note_gp_changes(rnp, rdp);
1756 struct rcu_data *rdp;
1878 rdp = this_cpu_ptr(&rcu_data);
1882 if (rnp == rdp->mynode)
1883 (void)__note_gp_changes(rnp, rdp);
2062 struct rcu_data *rdp;
2103 rdp = this_cpu_ptr(&rcu_data);
2104 if (rnp == rdp->mynode)
2105 needgp = __note_gp_changes(rnp, rdp) || needgp;
2111 rdp = per_cpu_ptr(&rcu_data, cpu);
2112 check_cb_ovld_locked(rdp, rnp);
2130 rdp = this_cpu_ptr(&rcu_data);
2132 trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2137 offloaded = rcu_rdp_is_offloaded(rdp);
2138 if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2348 rcu_report_qs_rdp(struct rcu_data *rdp)
2355 WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2356 rnp = rdp->mynode;
2358 if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2359 rdp->gpwrap) {
2367 rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
2371 mask = rdp->grpmask;
2372 rdp->core_needs_qs = false;
2382 if (!rcu_rdp_is_offloaded(rdp)) {
2388 WARN_ON_ONCE(rcu_accelerate_cbs(rnp, rdp));
2389 } else if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
2397 rcu_disable_urgency_upon_qs(rdp);
2402 rcu_nocb_lock_irqsave(rdp, flags);
2403 rcu_accelerate_cbs_unlocked(rnp, rdp);
2404 rcu_nocb_unlock_irqrestore(rdp, flags);
2416 rcu_check_quiescent_state(struct rcu_data *rdp)
2419 note_gp_changes(rdp);
2425 if (!rdp->core_needs_qs)
2432 if (rdp->cpu_no_qs.b.norm)
2439 rcu_report_qs_rdp(rdp);
2456 * period. Throttle as specified by rdp->blimit.
2458 static void rcu_do_batch(struct rcu_data *rdp)
2473 if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2475 rcu_segcblist_n_cbs(&rdp->cblist), 0);
2477 !rcu_segcblist_empty(&rdp->cblist),
2479 rcu_is_callbacks_kthread(rdp));
2494 rcu_nocb_lock_irqsave(rdp, flags);
2496 pending = rcu_segcblist_get_seglen(&rdp->cblist, RCU_DONE_TAIL);
2499 bl = max(rdp->blimit, pending >> div);
2500 if ((in_serving_softirq() || rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING) &&
2511 rcu_segcblist_n_cbs(&rdp->cblist), bl);
2512 rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2513 if (rcu_rdp_is_offloaded(rdp))
2514 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2516 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2517 rcu_nocb_unlock_irqrestore(rdp, flags);
2561 if (rdp->rcu_cpu_kthread_status == RCU_KTHREAD_RUNNING &&
2563 rdp->rcu_cpu_has_work = 1;
2569 rcu_nocb_lock_irqsave(rdp, flags);
2570 rdp->n_cbs_invoked += count;
2572 is_idle_task(current), rcu_is_callbacks_kthread(rdp));
2575 rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2576 rcu_segcblist_add_len(&rdp->cblist, -count);
2579 count = rcu_segcblist_n_cbs(&rdp->cblist);
2580 if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2581 rdp->blimit = blimit;
2584 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2585 rdp->qlen_last_fqs_check = 0;
2586 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2587 } else if (count < rdp->qlen_last_fqs_check - qhimark)
2588 rdp->qlen_last_fqs_check = count;
2594 empty = rcu_segcblist_empty(&rdp->cblist);
2598 WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2599 WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2601 rcu_nocb_unlock_irqrestore(rdp, flags);
2652 static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2682 struct rcu_data *rdp;
2685 rdp = per_cpu_ptr(&rcu_data, cpu);
2686 ret = f(rdp);
2688 mask |= rdp->grpmask;
2689 rcu_disable_urgency_upon_qs(rdp);
2692 rsmask |= rdp->grpmask;
2758 struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2759 struct rcu_node *rnp = rdp->mynode;
2777 const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2782 WARN_ON_ONCE(!rdp->beenonline);
2793 rcu_check_quiescent_state(rdp);
2797 rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2798 rcu_nocb_lock_irqsave(rdp, flags);
2799 if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2800 rcu_accelerate_cbs_unlocked(rnp, rdp);
2801 rcu_nocb_unlock_irqrestore(rdp, flags);
2804 rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2807 if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2809 rcu_do_batch(rdp);
2811 if (rcu_segcblist_ready_cbs(&rdp->cblist))
2816 do_nocb_deferred_wakeup(rdp);
2821 queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2938 static void rcutree_enqueue(struct rcu_data *rdp, struct rcu_head *head, rcu_callback_t func)
2940 rcu_segcblist_enqueue(&rdp->cblist, head);
2944 rcu_segcblist_n_cbs(&rdp->cblist));
2947 rcu_segcblist_n_cbs(&rdp->cblist));
2948 trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
2954 static void call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2957 rcutree_enqueue(rdp, head, func);
2976 if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2977 rdp->qlen_last_fqs_check + qhimark)) {
2980 note_gp_changes(rdp);
2984 rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2987 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2988 if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
2989 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2991 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
2992 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
3010 static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
3015 if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
3016 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
3018 WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
3033 static void check_cb_ovld(struct rcu_data *rdp)
3035 struct rcu_node *const rnp = rdp->mynode;
3038 ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
3039 !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
3042 check_cb_ovld_locked(rdp, rnp);
3052 struct rcu_data *rdp;
3074 rdp = this_cpu_ptr(&rcu_data);
3078 if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3084 if (rcu_segcblist_empty(&rdp->cblist))
3085 rcu_segcblist_init(&rdp->cblist);
3088 check_cb_ovld(rdp);
3090 if (unlikely(rcu_rdp_is_offloaded(rdp)))
3091 call_rcu_nocb(rdp, head, func, flags, lazy);
3093 call_rcu_core(rdp, head, func, flags);
4085 struct rcu_data *rdp;
4090 rdp = this_cpu_ptr(&rcu_data);
4091 rnp = rdp->mynode;
4099 needwake = rcu_start_this_gp(rnp, rdp, rcu_seq_snap(&rcu_state.gp_seq));
4303 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
4304 struct rcu_node *rnp = rdp->mynode;
4309 check_cpu_stall(rdp);
4312 if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
4321 if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
4325 if (!rcu_rdp_is_offloaded(rdp) &&
4326 rcu_segcblist_ready_cbs(&rdp->cblist))
4330 if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
4331 !rcu_rdp_is_offloaded(rdp) &&
4332 !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
4336 if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
4337 unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
4377 * If needed, entrain an rcu_barrier() callback on rdp->cblist.
4379 static void rcu_barrier_entrain(struct rcu_data *rdp)
4382 unsigned long lseq = READ_ONCE(rdp->barrier_seq_snap);
4390 rdp->barrier_head.func = rcu_barrier_callback;
4391 debug_rcu_head_queue(&rdp->barrier_head);
4392 rcu_nocb_lock(rdp);
4398 was_alldone = rcu_rdp_is_offloaded(rdp) && !rcu_segcblist_pend_cbs(&rdp->cblist);
4399 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies, false));
4400 wake_nocb = was_alldone && rcu_segcblist_pend_cbs(&rdp->cblist);
4401 if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4404 debug_rcu_head_unqueue(&rdp->barrier_head);
4407 rcu_nocb_unlock(rdp);
4409 wake_nocb_gp(rdp, false);
4410 smp_store_release(&rdp->barrier_seq_snap, gseq);
4419 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4422 WARN_ON_ONCE(cpu != rdp->cpu);
4425 rcu_barrier_entrain(rdp);
4442 struct rcu_data *rdp;
4481 rdp = per_cpu_ptr(&rcu_data, cpu);
4483 if (smp_load_acquire(&rdp->barrier_seq_snap) == gseq)
4486 if (!rcu_segcblist_n_cbs(&rdp->cblist)) {
4487 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4492 if (!rcu_rdp_cpu_online(rdp)) {
4493 rcu_barrier_entrain(rdp);
4494 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4504 WARN_ON_ONCE(READ_ONCE(rdp->barrier_seq_snap) != gseq);
4523 rdp = per_cpu_ptr(&rcu_data, cpu);
4525 WRITE_ONCE(rdp->barrier_seq_snap, gseq);
4625 static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
4627 return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
4632 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4634 return rcu_rdp_cpu_online(rdp);
4654 struct rcu_data *rdp;
4660 rdp = this_cpu_ptr(&rcu_data);
4663 * in rcutree_report_cpu_starting() and thus has an excuse for rdp->grpmask
4668 if (rcu_rdp_cpu_online(rdp) || arch_spin_is_locked(&rcu_state.ofl_lock))
4764 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4767 rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4768 INIT_WORK(&rdp->strict_work, strict_work_handler);
4771 rdp->barrier_seq_snap = rcu_state.barrier_sequence;
4772 rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4773 rdp->rcu_ofl_gp_state = RCU_GP_CLEANED;
4774 rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4775 rdp->rcu_onl_gp_state = RCU_GP_CLEANED;
4776 rdp->last_sched_clock = jiffies;
4777 rdp->cpu = cpu;
4778 rcu_boot_init_nocb_percpu_data(rdp);
4855 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4860 rdp->qlen_last_fqs_check = 0;
4861 rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
4862 rdp->blimit = blimit;
4870 if (!rcu_segcblist_is_enabled(&rdp->cblist))
4871 rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
4878 rnp = rdp->mynode;
4880 rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4881 rdp->gp_seq_needed = rdp->gp_seq;
4882 rdp->cpu_no_qs.b.norm = true;
4883 rdp->core_needs_qs = false;
4884 rdp->rcu_iw_pending = false;
4885 rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4886 rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4887 trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4914 struct rcu_data *rdp;
4918 rdp = per_cpu_ptr(&rcu_data, cpu);
4919 rnp = rdp->mynode;
4963 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4965 return smp_load_acquire(&rdp->beenonline);
4975 struct rcu_data *rdp;
4978 rdp = per_cpu_ptr(&rcu_data, cpu);
4979 rnp = rdp->mynode;
4981 rnp->ffmask |= rdp->grpmask;
5010 struct rcu_data *rdp;
5015 rdp = per_cpu_ptr(&rcu_data, cpu);
5016 if (rdp->cpu_started)
5018 rdp->cpu_started = true;
5020 rnp = rdp->mynode;
5021 mask = rdp->grpmask;
5033 rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
5034 rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
5035 rdp->rcu_onl_gp_state = READ_ONCE(rcu_state.gp_state);
5043 rcu_disable_urgency_upon_qs(rdp);
5050 smp_store_release(&rdp->beenonline, true);
5068 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
5069 struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
5077 do_nocb_deferred_wakeup(rdp);
5082 mask = rdp->grpmask;
5085 rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
5086 rdp->rcu_ofl_gp_state = READ_ONCE(rcu_state.gp_state);
5089 rcu_disable_urgency_upon_qs(rdp);
5096 rdp->cpu_started = false;
5110 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5113 if (rcu_rdp_is_offloaded(rdp) ||
5114 rcu_segcblist_empty(&rdp->cblist))
5118 WARN_ON_ONCE(rcu_rdp_cpu_online(rdp));
5119 rcu_barrier_entrain(rdp);
5126 needwake = rcu_advance_cbs(my_rnp, rdp) ||
5128 rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
5131 rcu_segcblist_disable(&rdp->cblist);
5145 WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
5146 !rcu_segcblist_empty(&rdp->cblist),
5148 cpu, rcu_segcblist_n_cbs(&rdp->cblist),
5149 rcu_segcblist_first_cb(&rdp->cblist));
5174 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
5175 struct rcu_node *rnp = rdp->mynode;
5177 blkd = !!(READ_ONCE(rnp->qsmask) & rdp->grpmask);
5190 struct rcu_data *rdp;
5193 rdp = per_cpu_ptr(&rcu_data, cpu);
5194 rnp = rdp->mynode;
5196 rnp->ffmask &= ~rdp->grpmask;
5240 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
5265 rcu_spawn_rnp_kthreads(rdp->mynode);