• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching defs:rsp

93 static int rcu_gp_in_progress(struct rcu_state *rsp)
95 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
146 static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
198 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
200 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
206 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
208 return &rsp->node[0];
455 static void record_gp_stall_check_time(struct rcu_state *rsp)
457 rsp->gp_start = jiffies;
458 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
461 static void print_other_cpu_stall(struct rcu_state *rsp)
466 struct rcu_node *rnp = rcu_get_root(rsp);
471 delta = jiffies - rsp->jiffies_stall;
472 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
476 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
488 rsp->name);
489 rcu_for_each_leaf_node(rsp, rnp) {
500 smp_processor_id(), (long)(jiffies - rsp->gp_start));
505 rcu_print_detail_task_stall(rsp);
507 force_quiescent_state(rsp, 0); /* Kick them all. */
510 static void print_cpu_stall(struct rcu_state *rsp)
513 struct rcu_node *rnp = rcu_get_root(rsp);
516 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
520 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
521 rsp->jiffies_stall =
528 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
535 delta = jiffies - rsp->jiffies_stall;
540 print_cpu_stall(rsp);
542 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
545 print_other_cpu_stall(rsp);
566 static void record_gp_stall_check_time(struct rcu_state *rsp)
570 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
587 static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
596 static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
608 __note_new_gpnum(rsp, rnp, rdp);
618 check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
624 if (rdp->gpnum != rsp->gpnum) {
625 note_new_gpnum(rsp, rdp);
639 __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
660 rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
672 __rcu_process_gp_end(rsp, rnp, rdp);
682 rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
685 __rcu_process_gp_end(rsp, rnp, rdp);
702 __note_new_gpnum(rsp, rnp, rdp);
712 rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
713 __releases(rcu_get_root(rsp)->lock)
715 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
716 struct rcu_node *rnp = rcu_get_root(rsp);
718 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
719 if (cpu_needs_another_gp(rsp, rdp))
720 rsp->fqs_need_gp = 1;
721 if (rnp->completed == rsp->completed) {
732 rcu_for_each_node_breadth_first(rsp, rnp) {
734 rnp->completed = rsp->completed;
742 rsp->gpnum++;
743 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
744 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
745 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
746 record_gp_stall_check_time(rsp);
752 rnp->gpnum = rsp->gpnum;
753 rnp->completed = rsp->completed;
754 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
755 rcu_start_gp_per_cpu(rsp, rnp, rdp);
764 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
771 * rsp->node[] array. Note that other CPUs will access only
783 rcu_for_each_node_breadth_first(rsp, rnp) {
787 rnp->gpnum = rsp->gpnum;
788 rnp->completed = rsp->completed;
790 rcu_start_gp_per_cpu(rsp, rnp, rdp);
794 rnp = rcu_get_root(rsp);
796 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
798 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
808 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
809 __releases(rcu_get_root(rsp)->lock)
811 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
812 rsp->completed = rsp->gpnum;
813 rsp->signaled = RCU_GP_IDLE;
814 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
826 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
866 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
879 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
913 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
924 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
927 if (check_for_new_grace_period(rsp, rdp))
948 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
960 static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
963 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
967 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
968 *rsp->orphan_cbs_tail = rdp->nxtlist;
969 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
973 rsp->orphan_qlen += rdp->qlen;
975 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
981 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
986 raw_spin_lock_irqsave(&rsp->onofflock, flags);
987 rdp = rsp->rda[smp_processor_id()];
988 if (rsp->orphan_cbs_list == NULL) {
989 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
992 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
993 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
994 rdp->qlen += rsp->orphan_qlen;
995 rsp->orphan_cbs_list = NULL;
996 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
997 rsp->orphan_qlen = 0;
998 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
1005 static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1010 struct rcu_data *rdp = rsp->rda[cpu];
1014 raw_spin_lock_irqsave(&rsp->onofflock, flags);
1028 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1041 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
1048 rcu_report_exp_rnp(rsp, rnp);
1050 rcu_adopt_orphan_cbs(rsp);
1068 static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
1072 static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1086 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1143 rdp->n_force_qs_snap = rsp->n_force_qs;
1207 static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1215 rcu_for_each_leaf_node(rsp, rnp) {
1218 if (!rcu_gp_in_progress(rsp)) {
1229 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1235 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1246 static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1249 struct rcu_node *rnp = rcu_get_root(rsp);
1251 if (!rcu_gp_in_progress(rsp))
1253 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1254 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1257 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
1259 rsp->n_force_qs++;
1261 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1262 if(!rcu_gp_in_progress(rsp)) {
1263 rsp->n_force_qs_ngp++;
1267 rsp->fqs_active = 1;
1268 switch (rsp->signaled) {
1281 force_qs_rnp(rsp, dyntick_save_progress_counter);
1283 if (rcu_gp_in_progress(rsp))
1284 rsp->signaled = RCU_FORCE_QS;
1291 force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
1298 rsp->fqs_active = 0;
1299 if (rsp->fqs_need_gp) {
1300 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1301 rsp->fqs_need_gp = 0;
1302 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1307 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1312 static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1325 __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1335 if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1336 force_quiescent_state(rsp, 1);
1342 rcu_process_gp_end(rsp, rdp);
1345 rcu_check_quiescent_state(rsp, rdp);
1348 if (cpu_needs_another_gp(rsp, rdp)) {
1349 raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
1350 rcu_start_gp(rsp, flags); /* releases above lock */
1354 rcu_do_batch(rsp, rdp);
1387 struct rcu_state *rsp)
1405 rdp = rsp->rda[smp_processor_id()];
1406 rcu_process_gp_end(rsp, rdp);
1407 check_for_new_grace_period(rsp, rdp);
1414 if (!rcu_gp_in_progress(rsp)) {
1416 struct rcu_node *rnp_root = rcu_get_root(rsp);
1419 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1431 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1433 force_quiescent_state(rsp, 0);
1434 rdp->n_force_qs_snap = rsp->n_force_qs;
1436 } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
1437 force_quiescent_state(rsp, 1);
1532 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1539 check_cpu_stall(rsp, rdp);
1551 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1566 if (cpu_needs_another_gp(rsp, rdp)) {
1584 if (rcu_gp_in_progress(rsp) &&
1585 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) {
1650 static void _rcu_barrier(struct rcu_state *rsp,
1669 rcu_adopt_orphan_cbs(rsp);
1700 rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1704 struct rcu_data *rdp = rsp->rda[cpu];
1705 struct rcu_node *rnp = rcu_get_root(rsp);
1724 * can accept some slop in the rsp->completed access due to the fact
1728 rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1732 struct rcu_data *rdp = rsp->rda[cpu];
1733 struct rcu_node *rnp = rcu_get_root(rsp);
1742 rdp->n_force_qs_snap = rsp->n_force_qs;
1752 raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */
1771 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
1842 static void __init rcu_init_levelspread(struct rcu_state *rsp)
1847 rsp->levelspread[i] = CONFIG_RCU_FANOUT;
1850 static void __init rcu_init_levelspread(struct rcu_state *rsp)
1858 ccur = rsp->levelcnt[i];
1859 rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
1868 static void __init rcu_init_one(struct rcu_state *rsp)
1884 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
1885 rcu_init_levelspread(rsp);
1890 cpustride *= rsp->levelspread[i];
1891 rnp = rsp->level[i];
1892 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1908 rnp->grpnum = j % rsp->levelspread[i - 1];
1910 rnp->parent = rsp->level[i - 1] +
1911 j / rsp->levelspread[i - 1];
1921 rnp = rsp->level[NUM_RCU_LVLS - 1];
1925 rsp->rda[i]->mynode = rnp;
1926 rcu_boot_init_percpu_data(i, rsp);
1935 #define RCU_INIT_FLAVOR(rsp, rcu_data) \
1940 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1942 rcu_init_one(rsp); \