Lines Matching refs:rnp

13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
83 struct rcu_node *rnp;
95 rcu_for_each_leaf_node(rnp) {
96 raw_spin_lock_irqsave_rcu_node(rnp, flags);
97 if (rnp->expmaskinit == rnp->expmaskinitnext) {
98 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103 oldmask = rnp->expmaskinit;
104 rnp->expmaskinit = rnp->expmaskinitnext;
105 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
112 mask = rnp->grpmask;
113 rnp_up = rnp->parent;
136 struct rcu_node *rnp;
139 rcu_for_each_node_breadth_first(rnp) {
140 raw_spin_lock_irqsave_rcu_node(rnp, flags);
141 WARN_ON_ONCE(rnp->expmask);
142 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
143 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
153 static bool sync_rcu_exp_done(struct rcu_node *rnp)
155 raw_lockdep_assert_held_rcu_node(rnp);
156 return READ_ONCE(rnp->exp_tasks) == NULL &&
157 READ_ONCE(rnp->expmask) == 0;
164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
184 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
186 __releases(rnp->lock)
190 raw_lockdep_assert_held_rcu_node(rnp);
192 if (!sync_rcu_exp_done(rnp)) {
193 if (!rnp->expmask)
194 rcu_initiate_boost(rnp, flags);
196 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 if (rnp->parent == NULL) {
200 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
206 mask = rnp->grpmask;
207 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
208 rnp = rnp->parent;
209 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
210 WARN_ON_ONCE(!(rnp->expmask & mask));
211 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
219 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
223 raw_spin_lock_irqsave_rcu_node(rnp, flags);
224 __rcu_report_exp_rnp(rnp, wake, flags);
231 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
238 raw_spin_lock_irqsave_rcu_node(rnp, flags);
239 if (!(rnp->expmask & mask)) {
240 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
243 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
244 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
251 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
284 struct rcu_node *rnp = rdp->mynode;
288 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
289 (rnp == rnp_root ||
301 for (; rnp != NULL; rnp = rnp->parent) {
306 spin_lock(&rnp->exp_lock);
307 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
310 spin_unlock(&rnp->exp_lock);
311 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
312 rnp->grplo, rnp->grphi,
314 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
318 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
319 spin_unlock(&rnp->exp_lock);
320 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
321 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
345 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347 raw_spin_lock_irqsave_rcu_node(rnp, flags);
351 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
357 !(rnp->qsmaskinitnext & mask)) {
367 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
374 if (rcu_preempt_has_tasks(rnp))
375 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
376 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
379 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
400 raw_spin_lock_irqsave_rcu_node(rnp, flags);
401 if ((rnp->qsmaskinitnext & mask) &&
402 (rnp->expmask & mask)) {
404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
410 if (rnp->expmask & mask)
412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
416 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
434 static inline bool rcu_exp_par_worker_started(struct rcu_node *rnp)
436 return !!READ_ONCE(rnp->exp_kworker);
439 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
441 kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
447 kthread_queue_work(READ_ONCE(rnp->exp_kworker), &rnp->rew.rew_work);
450 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
452 kthread_flush_work(&rnp->rew.rew_work);
478 struct rcu_node *rnp;
485 rcu_for_each_leaf_node(rnp) {
486 rnp->exp_need_flush = false;
487 if (!READ_ONCE(rnp->expmask))
489 if (!rcu_exp_par_worker_started(rnp) ||
491 rcu_is_last_leaf_node(rnp)) {
493 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
496 sync_rcu_exp_select_cpus_queue_work(rnp);
497 rnp->exp_need_flush = true;
501 rcu_for_each_leaf_node(rnp)
502 if (rnp->exp_need_flush)
503 sync_rcu_exp_select_cpus_flush_work(rnp);
539 struct rcu_node *rnp;
549 rcu_for_each_leaf_node(rnp) {
550 raw_spin_lock_irqsave_rcu_node(rnp, flags);
551 mask = READ_ONCE(rnp->expmask);
552 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
560 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
580 rcu_for_each_leaf_node(rnp) {
581 ndetected += rcu_print_task_exp_stall(rnp);
582 for_each_leaf_node_possible_cpu(rnp, cpu) {
585 mask = leaf_node_cpu_bit(rnp, cpu);
586 if (!(READ_ONCE(rnp->expmask) & mask))
592 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
593 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
603 rcu_for_each_node_breadth_first(rnp) {
604 if (rnp == rnp_root)
606 if (sync_rcu_exp_done_unlocked(rnp))
609 rnp->level, rnp->grplo, rnp->grphi,
610 data_race(rnp->expmask),
611 ".T"[!!data_race(rnp->exp_tasks)]);
615 rcu_for_each_leaf_node(rnp) {
616 for_each_leaf_node_possible_cpu(rnp, cpu) {
617 mask = leaf_node_cpu_bit(rnp, cpu);
618 if (!(READ_ONCE(rnp->expmask) & mask))
624 rcu_exp_print_detail_task_stall_rnp(rnp);
639 struct rcu_node *rnp;
650 rcu_for_each_node_breadth_first(rnp) {
651 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
652 spin_lock(&rnp->exp_lock);
654 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
655 WRITE_ONCE(rnp->exp_seq_rq, s);
656 spin_unlock(&rnp->exp_lock);
659 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
692 struct rcu_node *rnp = rdp->mynode;
725 raw_spin_lock_irqsave_rcu_node(rnp, flags);
726 if (rnp->expmask & rdp->grpmask) {
730 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
748 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
754 raw_spin_lock_irqsave_rcu_node(rnp, flags);
755 if (!rnp->exp_tasks) {
756 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
759 t = list_entry(rnp->exp_tasks->prev,
761 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
765 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
774 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
781 raw_spin_lock_irqsave_rcu_node(rnp, flags);
782 if (!READ_ONCE(rnp->exp_tasks)) {
783 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
786 t = list_entry(rnp->exp_tasks->prev,
788 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
796 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
815 struct rcu_node *rnp = rdp->mynode;
818 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
836 struct rcu_node *rnp;
839 rnp = rdp->mynode;
842 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
866 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
876 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
906 struct rcu_node *rnp;
953 rnp = rcu_get_root();
954 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
971 struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
974 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
975 s = rnp->exp_seq_poll_rq;
976 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
977 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
986 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
987 s = rnp->exp_seq_poll_rq;
989 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
990 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1006 struct rcu_node *rnp;
1011 rnp = rdp->mynode;
1013 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1016 rnp->exp_seq_poll_rq = s;
1017 queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1021 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);