1/*-
2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
3 * Copyright (c) 2017-2021 Hans Petter Selasky (hselasky@freebsd.org)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/types.h>
29#include <sys/systm.h>
30#include <sys/malloc.h>
31#include <sys/kernel.h>
32#include <sys/lock.h>
33#include <sys/mutex.h>
34#include <sys/proc.h>
35#include <sys/sched.h>
36#include <sys/smp.h>
37#include <sys/queue.h>
38#include <sys/taskqueue.h>
39#include <sys/kdb.h>
40
41#include <ck_epoch.h>
42
43#include <linux/rcupdate.h>
44#include <linux/sched.h>
45#include <linux/srcu.h>
46#include <linux/slab.h>
47#include <linux/kernel.h>
48#include <linux/compat.h>
49#include <linux/llist.h>
50#include <linux/irq_work.h>
51
52/*
53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
54 * not be skipped during panic().
55 */
56#ifdef CONFIG_NO_RCU_SKIP
57#define	RCU_SKIP(void) 0
58#else
59#define	RCU_SKIP(void)	unlikely(SCHEDULER_STOPPED() || kdb_active)
60#endif
61
62struct callback_head {
63	union {
64		STAILQ_ENTRY(callback_head) entry;
65		struct llist_node node;
66	};
67	rcu_callback_t func;
68};
69
70struct linux_epoch_head {
71	struct llist_head cb_head;
72	struct task task;
73} __aligned(CACHE_LINE_SIZE);
74
75struct linux_epoch_record {
76	ck_epoch_record_t epoch_record;
77	TAILQ_HEAD(, task_struct) ts_head;
78	int cpuid;
79	int type;
80} __aligned(CACHE_LINE_SIZE);
81
82/*
83 * Verify that "struct rcu_head" is big enough to hold "struct
84 * callback_head". This has been done to avoid having to add special
85 * compile flags for including ck_epoch.h to all clients of the
86 * LinuxKPI.
87 */
88CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
89
90/*
91 * Verify that "rcu_section[0]" has the same size as
92 * "ck_epoch_section_t". This has been done to avoid having to add
93 * special compile flags for including ck_epoch.h to all clients of
94 * the LinuxKPI.
95 */
96CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] ==
97    sizeof(ck_epoch_section_t)));
98
99/*
100 * Verify that "epoch_record" is at beginning of "struct
101 * linux_epoch_record":
102 */
103CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
104
105CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX);
106
107static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
108static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
109DPCPU_DEFINE_STATIC(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
110
111static void linux_rcu_cleaner_func(void *, int);
112
113static void
114linux_rcu_runtime_init(void *arg __unused)
115{
116	struct linux_epoch_head *head;
117	int i;
118	int j;
119
120	for (j = 0; j != RCU_TYPE_MAX; j++) {
121		ck_epoch_init(&linux_epoch[j]);
122
123		head = &linux_epoch_head[j];
124
125		TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
126		init_llist_head(&head->cb_head);
127
128		CPU_FOREACH(i) {
129			struct linux_epoch_record *record;
130
131			record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
132
133			record->cpuid = i;
134			record->type = j;
135			ck_epoch_register(&linux_epoch[j],
136			    &record->epoch_record, NULL);
137			TAILQ_INIT(&record->ts_head);
138		}
139	}
140}
141SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
142
143static void
144linux_rcu_cleaner_func(void *context, int pending __unused)
145{
146	struct linux_epoch_head *head = context;
147	struct callback_head *rcu;
148	STAILQ_HEAD(, callback_head) tmp_head;
149	struct llist_node *node, *next;
150	uintptr_t offset;
151
152	/* move current callbacks into own queue */
153	STAILQ_INIT(&tmp_head);
154	llist_for_each_safe(node, next, llist_del_all(&head->cb_head)) {
155		rcu = container_of(node, struct callback_head, node);
156		/* re-reverse list to restore chronological order */
157		STAILQ_INSERT_HEAD(&tmp_head, rcu, entry);
158	}
159
160	/* synchronize */
161	linux_synchronize_rcu(head - linux_epoch_head);
162
163	/* dispatch all callbacks, if any */
164	while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
165		STAILQ_REMOVE_HEAD(&tmp_head, entry);
166
167		offset = (uintptr_t)rcu->func;
168
169		if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
170			kfree((char *)rcu - offset);
171		else
172			rcu->func((struct rcu_head *)rcu);
173	}
174}
175
176void
177linux_rcu_read_lock(unsigned type)
178{
179	struct linux_epoch_record *record;
180	struct task_struct *ts;
181
182	MPASS(type < RCU_TYPE_MAX);
183
184	if (RCU_SKIP())
185		return;
186
187	ts = current;
188
189	/* assert valid refcount */
190	MPASS(ts->rcu_recurse[type] != INT_MAX);
191
192	if (++(ts->rcu_recurse[type]) != 1)
193		return;
194
195	/*
196	 * Pin thread to current CPU so that the unlock code gets the
197	 * same per-CPU epoch record:
198	 */
199	sched_pin();
200
201	record = &DPCPU_GET(linux_epoch_record[type]);
202
203	/*
204	 * Use a critical section to prevent recursion inside
205	 * ck_epoch_begin(). Else this function supports recursion.
206	 */
207	critical_enter();
208	ck_epoch_begin(&record->epoch_record,
209	    (ck_epoch_section_t *)&ts->rcu_section[type]);
210	TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]);
211	critical_exit();
212}
213
214void
215linux_rcu_read_unlock(unsigned type)
216{
217	struct linux_epoch_record *record;
218	struct task_struct *ts;
219
220	MPASS(type < RCU_TYPE_MAX);
221
222	if (RCU_SKIP())
223		return;
224
225	ts = current;
226
227	/* assert valid refcount */
228	MPASS(ts->rcu_recurse[type] > 0);
229
230	if (--(ts->rcu_recurse[type]) != 0)
231		return;
232
233	record = &DPCPU_GET(linux_epoch_record[type]);
234
235	/*
236	 * Use a critical section to prevent recursion inside
237	 * ck_epoch_end(). Else this function supports recursion.
238	 */
239	critical_enter();
240	ck_epoch_end(&record->epoch_record,
241	    (ck_epoch_section_t *)&ts->rcu_section[type]);
242	TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]);
243	critical_exit();
244
245	sched_unpin();
246}
247
248static void
249linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
250{
251	struct linux_epoch_record *record =
252	    container_of(epoch_record, struct linux_epoch_record, epoch_record);
253	struct thread *td = curthread;
254	struct task_struct *ts;
255
256	/* check if blocked on the current CPU */
257	if (record->cpuid == PCPU_GET(cpuid)) {
258		bool is_sleeping = 0;
259		u_char prio = 0;
260
261		/*
262		 * Find the lowest priority or sleeping thread which
263		 * is blocking synchronization on this CPU core. All
264		 * the threads in the queue are CPU-pinned and cannot
265		 * go anywhere while the current thread is locked.
266		 */
267		TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) {
268			if (ts->task_thread->td_priority > prio)
269				prio = ts->task_thread->td_priority;
270			is_sleeping |= (ts->task_thread->td_inhibitors != 0);
271		}
272
273		if (is_sleeping) {
274			thread_unlock(td);
275			pause("W", 1);
276			thread_lock(td);
277		} else {
278			/* set new thread priority */
279			sched_prio(td, prio);
280			/* task switch */
281			mi_switch(SW_VOL | SWT_RELINQUISH);
282			/*
283			 * It is important the thread lock is dropped
284			 * while yielding to allow other threads to
285			 * acquire the lock pointed to by
286			 * TDQ_LOCKPTR(td). Currently mi_switch() will
287			 * unlock the thread lock before
288			 * returning. Else a deadlock like situation
289			 * might happen.
290			 */
291			thread_lock(td);
292		}
293	} else {
294		/*
295		 * To avoid spinning move execution to the other CPU
296		 * which is blocking synchronization. Set highest
297		 * thread priority so that code gets run. The thread
298		 * priority will be restored later.
299		 */
300		sched_prio(td, 0);
301		sched_bind(td, record->cpuid);
302	}
303}
304
305void
306linux_synchronize_rcu(unsigned type)
307{
308	struct thread *td;
309	int was_bound;
310	int old_cpu;
311	int old_pinned;
312	u_char old_prio;
313
314	MPASS(type < RCU_TYPE_MAX);
315
316	if (RCU_SKIP())
317		return;
318
319	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
320	    "linux_synchronize_rcu() can sleep");
321
322	td = curthread;
323	DROP_GIANT();
324
325	/*
326	 * Synchronizing RCU might change the CPU core this function
327	 * is running on. Save current values:
328	 */
329	thread_lock(td);
330
331	old_cpu = PCPU_GET(cpuid);
332	old_pinned = td->td_pinned;
333	old_prio = td->td_priority;
334	was_bound = sched_is_bound(td);
335	sched_unbind(td);
336	td->td_pinned = 0;
337	sched_bind(td, old_cpu);
338
339	ck_epoch_synchronize_wait(&linux_epoch[type],
340	    &linux_synchronize_rcu_cb, NULL);
341
342	/* restore CPU binding, if any */
343	if (was_bound != 0) {
344		sched_bind(td, old_cpu);
345	} else {
346		/* get thread back to initial CPU, if any */
347		if (old_pinned != 0)
348			sched_bind(td, old_cpu);
349		sched_unbind(td);
350	}
351	/* restore pinned after bind */
352	td->td_pinned = old_pinned;
353
354	/* restore thread priority */
355	sched_prio(td, old_prio);
356	thread_unlock(td);
357
358	PICKUP_GIANT();
359}
360
361void
362linux_rcu_barrier(unsigned type)
363{
364	struct linux_epoch_head *head;
365
366	MPASS(type < RCU_TYPE_MAX);
367
368	/*
369	 * This function is not obligated to wait for a grace period.
370	 * It only waits for RCU callbacks that have already been posted.
371	 * If there are no RCU callbacks posted, rcu_barrier() can return
372	 * immediately.
373	 */
374	head = &linux_epoch_head[type];
375
376	/* wait for callbacks to complete */
377	taskqueue_drain(linux_irq_work_tq, &head->task);
378}
379
380void
381linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
382{
383	struct callback_head *rcu;
384	struct linux_epoch_head *head;
385
386	MPASS(type < RCU_TYPE_MAX);
387
388	rcu = (struct callback_head *)context;
389	head = &linux_epoch_head[type];
390
391	rcu->func = func;
392	llist_add(&rcu->node, &head->cb_head);
393	taskqueue_enqueue(linux_irq_work_tq, &head->task);
394}
395
396int
397init_srcu_struct(struct srcu_struct *srcu)
398{
399	return (0);
400}
401
402void
403cleanup_srcu_struct(struct srcu_struct *srcu)
404{
405}
406
407int
408srcu_read_lock(struct srcu_struct *srcu)
409{
410	linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
411	return (0);
412}
413
414void
415srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
416{
417	linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
418}
419
420void
421synchronize_srcu(struct srcu_struct *srcu)
422{
423	linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
424}
425
426void
427srcu_barrier(struct srcu_struct *srcu)
428{
429	linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
430}
431