Lines Matching defs:rnp

105 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
151 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
152 __releases(rnp->lock) /* But leaves rrupts disabled. */
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
160 raw_lockdep_assert_held_rcu_node(rnp);
161 WARN_ON_ONCE(rdp->mynode != rnp);
162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
164 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
216 list_add(&t->rcu_node_entry, rnp->exp_tasks);
227 list_add(&t->rcu_node_entry, rnp->gp_tasks);
243 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
245 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
247 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
250 !(rnp->qsmask & rdp->grpmask));
252 !(rnp->expmask & rdp->grpmask));
253 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
308 * rnp->gp_tasks becomes NULL.
316 struct rcu_node *rnp;
325 rnp = rdp->mynode;
326 raw_spin_lock_rcu_node(rnp);
328 t->rcu_blocked_node = rnp;
339 (rnp->qsmask & rdp->grpmask)
340 ? rnp->gp_seq
341 : rcu_seq_snap(&rnp->gp_seq));
342 rcu_preempt_ctxt_queue(rnp, rdp);
369 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
371 return READ_ONCE(rnp->gp_tasks) != NULL;
441 struct rcu_node *rnp)
446 if (np == &rnp->blkd_tasks)
455 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
457 return !list_empty(&rnp->blkd_tasks);
474 struct rcu_node *rnp;
517 rnp = t->rcu_blocked_node;
518 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
519 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
520 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
521 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
522 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
523 (!empty_norm || rnp->qsmask));
524 empty_exp = sync_rcu_exp_done(rnp);
526 np = rcu_next_node_entry(t, rnp);
530 rnp->gp_seq, t->pid);
531 if (&t->rcu_node_entry == rnp->gp_tasks)
532 WRITE_ONCE(rnp->gp_tasks, np);
533 if (&t->rcu_node_entry == rnp->exp_tasks)
534 WRITE_ONCE(rnp->exp_tasks, np);
536 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
537 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
538 if (&t->rcu_node_entry == rnp->boost_tasks)
539 WRITE_ONCE(rnp->boost_tasks, np);
545 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
548 empty_exp_now = sync_rcu_exp_done(rnp);
549 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
551 rnp->gp_seq,
552 0, rnp->qsmask,
553 rnp->level,
554 rnp->grplo,
555 rnp->grphi,
556 !!rnp->gp_tasks);
557 rcu_report_unblock_qs_rnp(rnp, flags);
559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
567 rcu_report_exp_rnp(rnp, true);
571 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
642 struct rcu_node *rnp = rdp->mynode;
645 (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
647 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
688 * invoked -before- updating this rnp's ->gp_seq.
693 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
698 raw_lockdep_assert_held_rcu_node(rnp);
699 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
700 dump_blkd_tasks(rnp, 10);
701 if (rcu_preempt_has_tasks(rnp) &&
702 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
703 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
704 t = container_of(rnp->gp_tasks, struct task_struct,
707 rnp->gp_seq, t->pid);
709 WARN_ON_ONCE(rnp->qsmask);
778 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
786 raw_lockdep_assert_held_rcu_node(rnp);
788 __func__, rnp->grplo, rnp->grphi, rnp->level,
789 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
790 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
794 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
795 READ_ONCE(rnp->exp_tasks));
798 list_for_each(lhp, &rnp->blkd_tasks) {
804 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
913 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
921 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
955 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
957 WARN_ON_ONCE(rnp->qsmask);
995 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
997 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
1047 static int rcu_boost(struct rcu_node *rnp)
1053 if (READ_ONCE(rnp->exp_tasks) == NULL &&
1054 READ_ONCE(rnp->boost_tasks) == NULL)
1057 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1063 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1064 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1074 if (rnp->exp_tasks != NULL)
1075 tb = rnp->exp_tasks;
1077 tb = rnp->boost_tasks;
1087 * Note that task t must acquire rnp->lock to remove itself from
1090 * stay around at least until we drop rnp->lock. Note that
1091 * rnp->lock also resolves races between our priority boosting
1096 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1097 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1099 rt_mutex_lock(&rnp->boost_mtx);
1100 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1101 rnp->n_boosts++;
1103 return READ_ONCE(rnp->exp_tasks) != NULL ||
1104 READ_ONCE(rnp->boost_tasks) != NULL;
1112 struct rcu_node *rnp = (struct rcu_node *)arg;
1118 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1120 rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1121 READ_ONCE(rnp->exp_tasks));
1123 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1124 more2boost = rcu_boost(rnp);
1130 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1148 * The caller must hold rnp->lock, which this function releases.
1152 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1153 __releases(rnp->lock)
1155 raw_lockdep_assert_held_rcu_node(rnp);
1156 if (!rnp->boost_kthread_task ||
1157 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
1158 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1161 if (rnp->exp_tasks != NULL ||
1162 (rnp->gp_tasks != NULL &&
1163 rnp->boost_tasks == NULL &&
1164 rnp->qsmask == 0 &&
1165 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
1167 if (rnp->exp_tasks == NULL)
1168 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1169 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1170 rcu_wake_cond(rnp->boost_kthread_task,
1171 READ_ONCE(rnp->boost_kthread_status));
1173 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1182 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1184 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1191 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1194 int rnp_index = rnp - rcu_get_root();
1198 if (rnp->boost_kthread_task)
1201 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1206 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1207 rnp->boost_kthread_task = t;
1208 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1214 static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
1216 return READ_ONCE(rnp->boost_kthread_task);
1221 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1222 __releases(rnp->lock)
1224 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1227 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1231 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1235 static struct task_struct *rcu_boost_task(struct rcu_node *rnp)