• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching refs:rdp

105 	struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
107 rdp->passed_quiesc_completed = rdp->gpnum - 1;
109 rdp->passed_quiesc = 1;
114 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
116 rdp->passed_quiesc_completed = rdp->gpnum - 1;
118 rdp->passed_quiesc = 1;
189 cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
191 return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
198 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
200 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
224 static int rcu_implicit_offline_qs(struct rcu_data *rdp)
230 if (cpu_is_offline(rdp->cpu)) {
231 rdp->offline_fqs++;
236 if (rdp->preemptable)
240 if (rdp->cpu != smp_processor_id())
241 smp_send_reschedule(rdp->cpu);
244 rdp->resched_ipi++;
350 * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
377 static int dyntick_save_progress_counter(struct rcu_data *rdp)
383 snap = rdp->dynticks->dynticks;
384 snap_nmi = rdp->dynticks->dynticks_nmi;
386 rdp->dynticks_snap = snap;
387 rdp->dynticks_nmi_snap = snap_nmi;
390 rdp->dynticks_fqs++;
400 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
407 curr = rdp->dynticks->dynticks;
408 snap = rdp->dynticks_snap;
409 curr_nmi = rdp->dynticks->dynticks_nmi;
410 snap_nmi = rdp->dynticks_nmi_snap;
423 rdp->dynticks_fqs++;
428 return rcu_implicit_offline_qs(rdp);
437 static int dyntick_save_progress_counter(struct rcu_data *rdp)
442 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
444 return rcu_implicit_offline_qs(rdp);
528 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
536 rnp = rdp->mynode;
537 if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
570 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
587 static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
589 if (rdp->gpnum != rnp->gpnum) {
590 rdp->qs_pending = 1;
591 rdp->passed_quiesc = 0;
592 rdp->gpnum = rnp->gpnum;
596 static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
602 rnp = rdp->mynode;
603 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
608 __note_new_gpnum(rsp, rnp, rdp);
615 * on the CPU corresponding to rdp.
618 check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
624 if (rdp->gpnum != rsp->gpnum) {
625 note_new_gpnum(rsp, rdp);
634 * has ended. This may be called only from the CPU to whom the rdp
639 __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
642 if (rdp->completed != rnp->completed) {
645 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
646 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
647 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
650 rdp->completed = rnp->completed;
656 * has ended. This may be called only from the CPU to whom the rdp
660 rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
666 rnp = rdp->mynode;
667 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
672 __rcu_process_gp_end(rsp, rnp, rdp);
682 rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
685 __rcu_process_gp_end(rsp, rnp, rdp);
698 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
699 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
702 __note_new_gpnum(rsp, rnp, rdp);
715 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
718 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
719 if (cpu_needs_another_gp(rsp, rdp))
755 rcu_start_gp_per_cpu(rsp, rnp, rdp);
789 if (rnp == rdp->mynode)
790 rcu_start_gp_per_cpu(rsp, rnp, rdp);
879 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
885 rnp = rdp->mynode;
897 rdp->passed_quiesc = 0; /* try again later! */
901 mask = rdp->grpmask;
905 rdp->qs_pending = 0;
911 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
924 rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
927 if (check_for_new_grace_period(rsp, rdp))
934 if (!rdp->qs_pending)
941 if (!rdp->passed_quiesc)
948 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
963 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
965 if (rdp->nxtlist == NULL)
968 *rsp->orphan_cbs_tail = rdp->nxtlist;
969 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
970 rdp->nxtlist = NULL;
972 rdp->nxttail[i] = &rdp->nxtlist;
973 rsp->orphan_qlen += rdp->qlen;
974 rdp->qlen = 0;
984 struct rcu_data *rdp;
987 rdp = rsp->rda[smp_processor_id()];
992 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
993 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
994 rdp->qlen += rsp->orphan_qlen;
1010 struct rcu_data *rdp = rsp->rda[cpu];
1017 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
1018 mask = rdp->grpmask; /* rnp->grplo is constant. */
1023 if (rnp != rdp->mynode)
1027 if (rnp == rdp->mynode)
1028 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1042 rnp = rdp->mynode;
1084 * period. Thottle as specified by rdp->blimit.
1086 static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1093 if (!cpu_has_callbacks_ready_to_invoke(rdp))
1101 list = rdp->nxtlist;
1102 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1103 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1104 tail = rdp->nxttail[RCU_DONE_TAIL];
1106 if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
1107 rdp->nxttail[count] = &rdp->nxtlist;
1118 if (++count >= rdp->blimit)
1125 rdp->qlen -= count;
1127 *tail = rdp->nxtlist;
1128 rdp->nxtlist = list;
1130 if (&rdp->nxtlist == rdp->nxttail[count])
1131 rdp->nxttail[count] = tail;
1137 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
1138 rdp->blimit = blimit;
1141 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1142 rdp->qlen_last_fqs_check = 0;
1143 rdp->n_force_qs_snap = rsp->n_force_qs;
1144 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1145 rdp->qlen_last_fqs_check = rdp->qlen;
1150 if (cpu_has_callbacks_ready_to_invoke(rdp))
1322 * only from the CPU to whom the rdp belongs.
1325 __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1329 WARN_ON_ONCE(rdp->beenonline == 0);
1342 rcu_process_gp_end(rsp, rdp);
1345 rcu_check_quiescent_state(rsp, rdp);
1348 if (cpu_needs_another_gp(rsp, rdp)) {
1354 rcu_do_batch(rsp, rdp);
1390 struct rcu_data *rdp;
1405 rdp = rsp->rda[smp_processor_id()];
1406 rcu_process_gp_end(rsp, rdp);
1407 check_for_new_grace_period(rsp, rdp);
1410 *rdp->nxttail[RCU_NEXT_TAIL] = head;
1411 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1429 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1430 rdp->blimit = LONG_MAX;
1431 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1432 *rdp->nxttail[RCU_DONE_TAIL] != head)
1434 rdp->n_force_qs_snap = rsp->n_force_qs;
1435 rdp->qlen_last_fqs_check = rdp->qlen;
1532 static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1534 struct rcu_node *rnp = rdp->mynode;
1536 rdp->n_rcu_pending++;
1539 check_cpu_stall(rsp, rdp);
1542 if (rdp->qs_pending && !rdp->passed_quiesc) {
1549 rdp->n_rp_qs_pending++;
1550 if (!rdp->preemptable &&
1554 } else if (rdp->qs_pending && rdp->passed_quiesc) {
1555 rdp->n_rp_report_qs++;
1560 if (cpu_has_callbacks_ready_to_invoke(rdp)) {
1561 rdp->n_rp_cb_ready++;
1566 if (cpu_needs_another_gp(rsp, rdp)) {
1567 rdp->n_rp_cpu_needs_gp++;
1572 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1573 rdp->n_rp_gp_completed++;
1578 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1579 rdp->n_rp_gp_started++;
1586 rdp->n_rp_need_fqs++;
1591 rdp->n_rp_need_nothing++;
1704 struct rcu_data *rdp = rsp->rda[cpu];
1709 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1710 rdp->nxtlist = NULL;
1712 rdp->nxttail[i] = &rdp->nxtlist;
1713 rdp->qlen = 0;
1715 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1717 rdp->cpu = cpu;
1732 struct rcu_data *rdp = rsp->rda[cpu];
1737 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1738 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1739 rdp->beenonline = 1; /* We have now been online. */
1740 rdp->preemptable = preemptable;
1741 rdp->qlen_last_fqs_check = 0;
1742 rdp->n_force_qs_snap = rsp->n_force_qs;
1743 rdp->blimit = blimit;
1755 rnp = rdp->mynode;
1756 mask = rdp->grpmask;
1762 if (rnp == rdp->mynode) {
1763 rdp->gpnum = rnp->completed; /* if GP in progress... */
1764 rdp->completed = rnp->completed;
1765 rdp->passed_quiesc_completed = rnp->completed - 1;