Lines Matching refs:ssp

76 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
122 static void init_srcu_struct_data(struct srcu_struct *ssp)
134 sdp = per_cpu_ptr(ssp->sda, cpu);
138 sdp->srcu_gp_seq_needed = ssp->srcu_sup->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_sup->srcu_gp_seq;
144 sdp->ssp = ssp;
164 static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
176 ssp->srcu_sup->node = kcalloc(rcu_num_nodes, sizeof(*ssp->srcu_sup->node), gfp_flags);
177 if (!ssp->srcu_sup->node)
181 ssp->srcu_sup->level[0] = &ssp->srcu_sup->node[0];
183 ssp->srcu_sup->level[i] = ssp->srcu_sup->level[i - 1] + num_rcu_lvl[i - 1];
187 srcu_for_each_node_breadth_first(ssp, snp) {
198 if (snp == &ssp->srcu_sup->node[0]) {
205 if (snp == ssp->srcu_sup->level[level + 1])
207 snp->srcu_parent = ssp->srcu_sup->level[level - 1] +
208 (snp - ssp->srcu_sup->level[level]) /
217 snp_first = ssp->srcu_sup->level[level];
219 sdp = per_cpu_ptr(ssp->sda, cpu);
228 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
237 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
240 ssp->srcu_sup = kzalloc(sizeof(*ssp->srcu_sup), GFP_KERNEL);
241 if (!ssp->srcu_sup)
244 spin_lock_init(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
245 ssp->srcu_sup->srcu_size_state = SRCU_SIZE_SMALL;
246 ssp->srcu_sup->node = NULL;
247 mutex_init(&ssp->srcu_sup->srcu_cb_mutex);
248 mutex_init(&ssp->srcu_sup->srcu_gp_mutex);
249 ssp->srcu_idx = 0;
250 ssp->srcu_sup->srcu_gp_seq = 0;
251 ssp->srcu_sup->srcu_barrier_seq = 0;
252 mutex_init(&ssp->srcu_sup->srcu_barrier_mutex);
253 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 0);
254 INIT_DELAYED_WORK(&ssp->srcu_sup->work, process_srcu);
255 ssp->srcu_sup->sda_is_static = is_static;
257 ssp->sda = alloc_percpu(struct srcu_data);
258 if (!ssp->sda)
260 init_srcu_struct_data(ssp);
261 ssp->srcu_sup->srcu_gp_seq_needed_exp = 0;
262 ssp->srcu_sup->srcu_last_gp_end = ktime_get_mono_fast_ns();
263 if (READ_ONCE(ssp->srcu_sup->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
264 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC))
266 WRITE_ONCE(ssp->srcu_sup->srcu_size_state, SRCU_SIZE_BIG);
268 ssp->srcu_sup->srcu_ssp = ssp;
269 smp_store_release(&ssp->srcu_sup->srcu_gp_seq_needed, 0); /* Init done. */
274 free_percpu(ssp->sda);
275 ssp->sda = NULL;
279 kfree(ssp->srcu_sup);
280 ssp->srcu_sup = NULL;
287 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
291 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
292 lockdep_init_map(&ssp->dep_map, name, key, 0);
293 return init_srcu_struct_fields(ssp, false);
301 * @ssp: structure to initialize.
307 int init_srcu_struct(struct srcu_struct *ssp)
309 return init_srcu_struct_fields(ssp, false);
318 static void __srcu_transition_to_big(struct srcu_struct *ssp)
320 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
321 smp_store_release(&ssp->srcu_sup->srcu_size_state, SRCU_SIZE_ALLOC);
327 static void srcu_transition_to_big(struct srcu_struct *ssp)
332 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL)
334 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
335 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) != SRCU_SIZE_SMALL) {
336 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
339 __srcu_transition_to_big(ssp);
340 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
347 static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
351 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_sup->srcu_size_state)
354 if (ssp->srcu_sup->srcu_size_jiffies != j) {
355 ssp->srcu_sup->srcu_size_jiffies = j;
356 ssp->srcu_sup->srcu_n_lock_retries = 0;
358 if (++ssp->srcu_sup->srcu_n_lock_retries <= small_contention_lim)
360 __srcu_transition_to_big(ssp);
371 struct srcu_struct *ssp = sdp->ssp;
375 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
376 spin_lock_irqsave_check_contention(ssp);
377 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, *flags);
387 static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
389 if (spin_trylock_irqsave_rcu_node(ssp->srcu_sup, *flags))
391 spin_lock_irqsave_rcu_node(ssp->srcu_sup, *flags);
392 spin_lock_irqsave_check_contention(ssp);
399 * to each update-side SRCU primitive. Use ssp->lock, which -is-
403 static void check_init_srcu_struct(struct srcu_struct *ssp)
408 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed))) /*^^^*/
410 spin_lock_irqsave_rcu_node(ssp->srcu_sup, flags);
411 if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq_needed)) {
412 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
415 init_srcu_struct_fields(ssp, true);
416 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
423 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
429 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
440 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
447 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
454 "Mixed NMI-safe readers for srcu_struct at %ps.\n", ssp);
462 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
466 unlocks = srcu_readers_unlock_idx(ssp, idx);
537 return srcu_readers_lock_idx(ssp, idx) == unlocks;
543 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
549 static bool srcu_readers_active(struct srcu_struct *ssp)
555 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
616 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
621 struct srcu_usage *sup = ssp->srcu_sup;
641 * @ssp: structure to clean up.
646 void cleanup_srcu_struct(struct srcu_struct *ssp)
649 struct srcu_usage *sup = ssp->srcu_sup;
651 if (WARN_ON(!srcu_get_delay(ssp)))
653 if (WARN_ON(srcu_readers_active(ssp)))
657 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
666 WARN_ON(srcu_readers_active(ssp))) {
668 __func__, ssp, rcu_seq_state(READ_ONCE(sup->srcu_gp_seq)),
676 free_percpu(ssp->sda);
677 ssp->sda = NULL;
679 ssp->srcu_sup = NULL;
688 void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe)
696 sdp = raw_cpu_ptr(ssp->sda);
712 int __srcu_read_lock(struct srcu_struct *ssp)
716 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
717 this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter);
728 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
731 this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter);
742 int __srcu_read_lock_nmisafe(struct srcu_struct *ssp)
745 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
747 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
759 void __srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx)
761 struct srcu_data *sdp = raw_cpu_ptr(ssp->sda);
773 static void srcu_gp_start(struct srcu_struct *ssp)
777 lockdep_assert_held(&ACCESS_PRIVATE(ssp->srcu_sup, lock));
778 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed));
779 WRITE_ONCE(ssp->srcu_sup->srcu_gp_start, jiffies);
780 WRITE_ONCE(ssp->srcu_sup->srcu_n_exp_nodelay, 0);
782 rcu_seq_start(&ssp->srcu_sup->srcu_gp_seq);
783 state = rcu_seq_state(ssp->srcu_sup->srcu_gp_seq);
821 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
829 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
842 static void srcu_gp_end(struct srcu_struct *ssp)
856 struct srcu_usage *sup = ssp->srcu_sup;
880 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, get_boot_cpu_id()),
884 srcu_for_each_node_breadth_first(ssp, snp) {
902 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
909 sdp = per_cpu_ptr(ssp->sda, cpu);
926 srcu_gp_start(ssp);
928 srcu_reschedule(ssp, 0);
936 init_srcu_struct_nodes(ssp, GFP_KERNEL);
949 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
958 if (WARN_ON_ONCE(rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, s)) ||
970 spin_lock_irqsave_ssp_contention(ssp, &flags);
971 if (ULONG_CMP_LT(ssp->srcu_sup->srcu_gp_seq_needed_exp, s))
972 WRITE_ONCE(ssp->srcu_sup->srcu_gp_seq_needed_exp, s);
973 spin_unlock_irqrestore_rcu_node(ssp->srcu_sup, flags);
989 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
998 struct srcu_usage *sup = ssp->srcu_sup;
1022 srcu_funnel_exp_start(ssp, snp, s);
1035 spin_lock_irqsave_ssp_contention(ssp, &flags);
1050 srcu_gp_start(ssp);
1059 !!srcu_get_delay(ssp));
1071 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
1075 curdelay = !srcu_get_delay(ssp);
1078 if (srcu_readers_active_idx_check(ssp, idx))
1091 static void srcu_flip(struct srcu_struct *ssp)
1123 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); // Flip the counter.
1157 static bool srcu_might_be_idle(struct srcu_struct *ssp)
1165 check_init_srcu_struct(ssp);
1167 sdp = raw_cpu_ptr(ssp->sda);
1183 tlast = READ_ONCE(ssp->srcu_sup->srcu_last_gp_end);
1189 curseq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1191 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_sup->srcu_gp_seq_needed)))
1194 if (curseq != rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq))
1209 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1221 check_init_srcu_struct(ssp);
1227 idx = __srcu_read_lock_nmisafe(ssp);
1228 ss_state = smp_load_acquire(&ssp->srcu_sup->srcu_size_state);
1230 sdp = per_cpu_ptr(ssp->sda, get_boot_cpu_id());
1232 sdp = raw_cpu_ptr(ssp->sda);
1272 s = rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1275 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1302 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1304 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1305 __srcu_read_unlock_nmisafe(ssp, idx);
1337 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1347 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1352 * @ssp: srcu_struct in queue the callback
1367 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1370 __call_srcu(ssp, rhp, func, true);
1377 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1381 srcu_lock_sync(&ssp->dep_map);
1383 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1392 check_init_srcu_struct(ssp);
1395 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1411 * @ssp: srcu_struct with which to synchronize.
1419 void synchronize_srcu_expedited(struct srcu_struct *ssp)
1421 __synchronize_srcu(ssp, rcu_gp_is_normal());
1427 * @ssp: srcu_struct with which to synchronize.
1472 void synchronize_srcu(struct srcu_struct *ssp)
1474 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1475 synchronize_srcu_expedited(ssp);
1477 __synchronize_srcu(ssp, true);
1483 * @ssp: srcu_struct to provide cookie for.
1491 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1496 return rcu_seq_snap(&ssp->srcu_sup->srcu_gp_seq);
1502 * @ssp: srcu_struct to provide cookie for.
1510 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1512 return srcu_gp_start_if_needed(ssp, NULL, true);
1518 * @ssp: srcu_struct to provide cookie for.
1541 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1543 if (!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
1558 struct srcu_struct *ssp;
1561 ssp = sdp->ssp;
1562 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1563 complete(&ssp->srcu_sup->srcu_barrier_completion);
1574 static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1577 atomic_inc(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1583 atomic_dec(&ssp->srcu_sup->srcu_barrier_cpu_cnt);
1590 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1592 void srcu_barrier(struct srcu_struct *ssp)
1596 unsigned long s = rcu_seq_snap(&ssp->srcu_sup->srcu_barrier_seq);
1598 check_init_srcu_struct(ssp);
1599 mutex_lock(&ssp->srcu_sup->srcu_barrier_mutex);
1600 if (rcu_seq_done(&ssp->srcu_sup->srcu_barrier_seq, s)) {
1602 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1605 rcu_seq_start(&ssp->srcu_sup->srcu_barrier_seq);
1606 init_completion(&ssp->srcu_sup->srcu_barrier_completion);
1609 atomic_set(&ssp->srcu_sup->srcu_barrier_cpu_cnt, 1);
1611 idx = __srcu_read_lock_nmisafe(ssp);
1612 if (smp_load_acquire(&ssp->srcu_sup->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1613 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, get_boot_cpu_id()));
1616 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1617 __srcu_read_unlock_nmisafe(ssp, idx);
1620 if (atomic_dec_and_test(&ssp->srcu_sup->srcu_barrier_cpu_cnt))
1621 complete(&ssp->srcu_sup->srcu_barrier_completion);
1622 wait_for_completion(&ssp->srcu_sup->srcu_barrier_completion);
1624 rcu_seq_end(&ssp->srcu_sup->srcu_barrier_seq);
1625 mutex_unlock(&ssp->srcu_sup->srcu_barrier_mutex);
1631 * @ssp: srcu_struct on which to report batch completion.
1636 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1638 return READ_ONCE(ssp->srcu_idx);
1647 static void srcu_advance_state(struct srcu_struct *ssp)
1651 mutex_lock(&ssp->srcu_sup->srcu_gp_mutex);
1663 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq)); /* ^^^ */
1665 spin_lock_irq_rcu_node(ssp->srcu_sup);
1666 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1667 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq));
1668 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1669 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1672 idx = rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq));
1674 srcu_gp_start(ssp);
1675 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1677 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1682 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1683 idx = 1 ^ (ssp->srcu_idx & 1);
1684 if (!try_check_zero(ssp, idx, 1)) {
1685 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1688 srcu_flip(ssp);
1689 spin_lock_irq_rcu_node(ssp->srcu_sup);
1690 rcu_seq_set_state(&ssp->srcu_sup->srcu_gp_seq, SRCU_STATE_SCAN2);
1691 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1692 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1695 if (rcu_seq_state(READ_ONCE(ssp->srcu_sup->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1701 idx = 1 ^ (ssp->srcu_idx & 1);
1702 if (!try_check_zero(ssp, idx, 2)) {
1703 mutex_unlock(&ssp->srcu_sup->srcu_gp_mutex);
1706 ssp->srcu_sup->srcu_n_exp_nodelay = 0;
1707 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1724 struct srcu_struct *ssp;
1728 ssp = sdp->ssp;
1733 rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq));
1778 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1782 spin_lock_irq_rcu_node(ssp->srcu_sup);
1783 if (ULONG_CMP_GE(ssp->srcu_sup->srcu_gp_seq, ssp->srcu_sup->srcu_gp_seq_needed)) {
1784 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_sup->srcu_gp_seq))) {
1788 } else if (!rcu_seq_state(ssp->srcu_sup->srcu_gp_seq)) {
1790 srcu_gp_start(ssp);
1792 spin_unlock_irq_rcu_node(ssp->srcu_sup);
1795 queue_delayed_work(rcu_gp_wq, &ssp->srcu_sup->work, delay);
1805 struct srcu_struct *ssp;
1809 ssp = sup->srcu_ssp;
1811 srcu_advance_state(ssp);
1812 curdelay = srcu_get_delay(ssp);
1826 srcu_reschedule(ssp, curdelay);
1830 struct srcu_struct *ssp, int *flags,
1836 *gp_seq = rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq);
1853 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1858 int ss_state = READ_ONCE(ssp->srcu_sup->srcu_size_state);
1861 idx = ssp->srcu_idx & 0x1;
1865 tt, tf, rcu_seq_current(&ssp->srcu_sup->srcu_gp_seq), ss_state,
1867 if (!ssp->sda) {
1878 sdp = per_cpu_ptr(ssp->sda, cpu);
1902 srcu_transition_to_big(ssp);
1958 struct srcu_struct *ssp;
1962 ssp = *(sspp++);
1963 ssp->sda = alloc_percpu(struct srcu_data);
1964 if (WARN_ON_ONCE(!ssp->sda))
1974 struct srcu_struct *ssp;
1978 ssp = *(sspp++);
1979 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_sup->srcu_gp_seq_needed)) &&
1980 !WARN_ON_ONCE(!ssp->srcu_sup->sda_is_static))
1981 cleanup_srcu_struct(ssp);
1982 if (!WARN_ON(srcu_readers_active(ssp)))
1983 free_percpu(ssp->sda);