1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 *	tiny variant.
5 *
6 * Copyright (C) IBM Corporation, 2017
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 */
10
11#ifndef _LINUX_SRCU_TINY_H
12#define _LINUX_SRCU_TINY_H
13
14#include <linux/swait.h>
15
16struct srcu_struct {
17	short srcu_lock_nesting[2];	/* srcu_read_lock() nesting depth. */
18	u8 srcu_gp_running;		/* GP workqueue running? */
19	u8 srcu_gp_waiting;		/* GP waiting for readers? */
20	unsigned long srcu_idx;		/* Current reader array element in bit 0x2. */
21	unsigned long srcu_idx_max;	/* Furthest future srcu_idx request. */
22	struct swait_queue_head srcu_wq;
23					/* Last srcu_read_unlock() wakes GP. */
24	struct rcu_head *srcu_cb_head;	/* Pending callbacks: Head. */
25	struct rcu_head **srcu_cb_tail;	/* Pending callbacks: Tail. */
26	struct work_struct srcu_work;	/* For driving grace periods. */
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28	struct lockdep_map dep_map;
29#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
30};
31
32void srcu_drive_gp(struct work_struct *wp);
33
34#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored)			\
35{									\
36	.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq),	\
37	.srcu_cb_tail = &name.srcu_cb_head,				\
38	.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp),	\
39	__SRCU_DEP_MAP_INIT(name)					\
40}
41
42/*
43 * This odd _STATIC_ arrangement is needed for API compatibility with
44 * Tree SRCU, which needs some per-CPU data.
45 */
46#define DEFINE_SRCU(name) \
47	struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
48#define DEFINE_STATIC_SRCU(name) \
49	static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
50
51// Dummy structure for srcu_notifier_head.
52struct srcu_usage { };
53#define __SRCU_USAGE_INIT(name) { }
54
55void synchronize_srcu(struct srcu_struct *ssp);
56
57/*
58 * Counts the new reader in the appropriate per-CPU element of the
59 * srcu_struct.  Can be invoked from irq/bh handlers, but the matching
60 * __srcu_read_unlock() must be in the same handler instance.  Returns an
61 * index that must be passed to the matching srcu_read_unlock().
62 */
63static inline int __srcu_read_lock(struct srcu_struct *ssp)
64{
65	int idx;
66
67	idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
68	WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
69	return idx;
70}
71
72static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
73{
74	synchronize_srcu(ssp);
75}
76
77static inline void srcu_barrier(struct srcu_struct *ssp)
78{
79	synchronize_srcu(ssp);
80}
81
82/* Defined here to avoid size increase for non-torture kernels. */
83static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
84					    char *tt, char *tf)
85{
86	int idx;
87
88	idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
89	pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
90		 tt, tf, idx,
91		 data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
92		 data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
93		 data_race(READ_ONCE(ssp->srcu_idx)),
94		 data_race(READ_ONCE(ssp->srcu_idx_max)));
95}
96
97#endif
98