linux_rcu.c revision 364391
1/*-
2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io)
3 * Copyright (c) 2017-2020 Hans Petter Selasky (hselasky@freebsd.org)
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_rcu.c 364391 2020-08-19 13:35:32Z hselasky $");
30
31#include <sys/types.h>
32#include <sys/systm.h>
33#include <sys/malloc.h>
34#include <sys/kernel.h>
35#include <sys/lock.h>
36#include <sys/mutex.h>
37#include <sys/proc.h>
38#include <sys/sched.h>
39#include <sys/smp.h>
40#include <sys/queue.h>
41#include <sys/taskqueue.h>
42#include <sys/kdb.h>
43
44#include <ck_epoch.h>
45
46#include <linux/rcupdate.h>
47#include <linux/srcu.h>
48#include <linux/slab.h>
49#include <linux/kernel.h>
50#include <linux/compat.h>
51
52/*
53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will
54 * not be skipped during panic().
55 */
56#ifdef CONFIG_NO_RCU_SKIP
57#define	RCU_SKIP(void) 0
58#else
59#define	RCU_SKIP(void)	unlikely(SCHEDULER_STOPPED() || kdb_active)
60#endif
61
62struct callback_head {
63	STAILQ_ENTRY(callback_head) entry;
64	rcu_callback_t func;
65};
66
67struct linux_epoch_head {
68	STAILQ_HEAD(, callback_head) cb_head;
69	struct mtx lock;
70	struct task task;
71} __aligned(CACHE_LINE_SIZE);
72
73struct linux_epoch_record {
74	ck_epoch_record_t epoch_record;
75	TAILQ_HEAD(, task_struct) ts_head;
76	int cpuid;
77	int type;
78} __aligned(CACHE_LINE_SIZE);
79
80/*
81 * Verify that "struct rcu_head" is big enough to hold "struct
82 * callback_head". This has been done to avoid having to add special
83 * compile flags for including ck_epoch.h to all clients of the
84 * LinuxKPI.
85 */
86CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head));
87
88/*
89 * Verify that "epoch_record" is at beginning of "struct
90 * linux_epoch_record":
91 */
92CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0);
93
94CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX);
95
96static ck_epoch_t linux_epoch[RCU_TYPE_MAX];
97static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX];
98static DPCPU_DEFINE(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]);
99
100static void linux_rcu_cleaner_func(void *, int);
101
102static void
103linux_rcu_runtime_init(void *arg __unused)
104{
105	struct linux_epoch_head *head;
106	int i;
107	int j;
108
109	for (j = 0; j != RCU_TYPE_MAX; j++) {
110		ck_epoch_init(&linux_epoch[j]);
111
112		head = &linux_epoch_head[j];
113
114		mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF);
115		TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head);
116		STAILQ_INIT(&head->cb_head);
117
118		CPU_FOREACH(i) {
119			struct linux_epoch_record *record;
120
121			record = &DPCPU_ID_GET(i, linux_epoch_record[j]);
122
123			record->cpuid = i;
124			record->type = j;
125			ck_epoch_register(&linux_epoch[j],
126			    &record->epoch_record, NULL);
127			TAILQ_INIT(&record->ts_head);
128		}
129	}
130}
131SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL);
132
133static void
134linux_rcu_runtime_uninit(void *arg __unused)
135{
136	struct linux_epoch_head *head;
137	int j;
138
139	for (j = 0; j != RCU_TYPE_MAX; j++) {
140		head = &linux_epoch_head[j];
141
142		mtx_destroy(&head->lock);
143	}
144}
145SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL);
146
147static void
148linux_rcu_cleaner_func(void *context, int pending __unused)
149{
150	struct linux_epoch_head *head;
151	struct callback_head *rcu;
152	STAILQ_HEAD(, callback_head) tmp_head;
153	uintptr_t offset;
154
155	linux_set_current(curthread);
156
157	head = context;
158
159	/* move current callbacks into own queue */
160	mtx_lock(&head->lock);
161	STAILQ_INIT(&tmp_head);
162	STAILQ_CONCAT(&tmp_head, &head->cb_head);
163	mtx_unlock(&head->lock);
164
165	/* synchronize */
166	linux_synchronize_rcu(head - linux_epoch_head);
167
168	/* dispatch all callbacks, if any */
169	while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) {
170
171		STAILQ_REMOVE_HEAD(&tmp_head, entry);
172
173		offset = (uintptr_t)rcu->func;
174
175		if (offset < LINUX_KFREE_RCU_OFFSET_MAX)
176			kfree((char *)rcu - offset);
177		else
178			rcu->func((struct rcu_head *)rcu);
179	}
180}
181
182void
183linux_rcu_read_lock(unsigned type)
184{
185	struct linux_epoch_record *record;
186	struct task_struct *ts;
187
188	MPASS(type < RCU_TYPE_MAX);
189
190	if (RCU_SKIP())
191		return;
192
193	/*
194	 * Pin thread to current CPU so that the unlock code gets the
195	 * same per-CPU epoch record:
196	 */
197	sched_pin();
198
199	record = &DPCPU_GET(linux_epoch_record[type]);
200	ts = current;
201
202	/*
203	 * Use a critical section to prevent recursion inside
204	 * ck_epoch_begin(). Else this function supports recursion.
205	 */
206	critical_enter();
207	ck_epoch_begin(&record->epoch_record, NULL);
208	ts->rcu_recurse[type]++;
209	if (ts->rcu_recurse[type] == 1)
210		TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]);
211	critical_exit();
212}
213
214void
215linux_rcu_read_unlock(unsigned type)
216{
217	struct linux_epoch_record *record;
218	struct task_struct *ts;
219
220	MPASS(type < RCU_TYPE_MAX);
221
222	if (RCU_SKIP())
223		return;
224
225	record = &DPCPU_GET(linux_epoch_record[type]);
226	ts = current;
227
228	/*
229	 * Use a critical section to prevent recursion inside
230	 * ck_epoch_end(). Else this function supports recursion.
231	 */
232	critical_enter();
233	ck_epoch_end(&record->epoch_record, NULL);
234	ts->rcu_recurse[type]--;
235	if (ts->rcu_recurse[type] == 0)
236		TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]);
237	critical_exit();
238
239	sched_unpin();
240}
241
242static void
243linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused)
244{
245	struct linux_epoch_record *record =
246	    container_of(epoch_record, struct linux_epoch_record, epoch_record);
247	struct thread *td = curthread;
248	struct task_struct *ts;
249
250	/* check if blocked on the current CPU */
251	if (record->cpuid == PCPU_GET(cpuid)) {
252		bool is_sleeping = 0;
253		u_char prio = 0;
254
255		/*
256		 * Find the lowest priority or sleeping thread which
257		 * is blocking synchronization on this CPU core. All
258		 * the threads in the queue are CPU-pinned and cannot
259		 * go anywhere while the current thread is locked.
260		 */
261		TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) {
262			if (ts->task_thread->td_priority > prio)
263				prio = ts->task_thread->td_priority;
264			is_sleeping |= (ts->task_thread->td_inhibitors != 0);
265		}
266
267		if (is_sleeping) {
268			thread_unlock(td);
269			pause("W", 1);
270			thread_lock(td);
271		} else {
272			/* set new thread priority */
273			sched_prio(td, prio);
274			/* task switch */
275			mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
276
277			/*
278			 * Release the thread lock while yielding to
279			 * allow other threads to acquire the lock
280			 * pointed to by TDQ_LOCKPTR(td). Else a
281			 * deadlock like situation might happen.
282			 */
283			thread_unlock(td);
284			thread_lock(td);
285		}
286	} else {
287		/*
288		 * To avoid spinning move execution to the other CPU
289		 * which is blocking synchronization. Set highest
290		 * thread priority so that code gets run. The thread
291		 * priority will be restored later.
292		 */
293		sched_prio(td, 0);
294		sched_bind(td, record->cpuid);
295	}
296}
297
298void
299linux_synchronize_rcu(unsigned type)
300{
301	struct thread *td;
302	int was_bound;
303	int old_cpu;
304	int old_pinned;
305	u_char old_prio;
306
307	MPASS(type < RCU_TYPE_MAX);
308
309	if (RCU_SKIP())
310		return;
311
312	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
313	    "linux_synchronize_rcu() can sleep");
314
315	td = curthread;
316	DROP_GIANT();
317
318	/*
319	 * Synchronizing RCU might change the CPU core this function
320	 * is running on. Save current values:
321	 */
322	thread_lock(td);
323
324	old_cpu = PCPU_GET(cpuid);
325	old_pinned = td->td_pinned;
326	old_prio = td->td_priority;
327	was_bound = sched_is_bound(td);
328	sched_unbind(td);
329	td->td_pinned = 0;
330	sched_bind(td, old_cpu);
331
332	ck_epoch_synchronize_wait(&linux_epoch[type],
333	    &linux_synchronize_rcu_cb, NULL);
334
335	/* restore CPU binding, if any */
336	if (was_bound != 0) {
337		sched_bind(td, old_cpu);
338	} else {
339		/* get thread back to initial CPU, if any */
340		if (old_pinned != 0)
341			sched_bind(td, old_cpu);
342		sched_unbind(td);
343	}
344	/* restore pinned after bind */
345	td->td_pinned = old_pinned;
346
347	/* restore thread priority */
348	sched_prio(td, old_prio);
349	thread_unlock(td);
350
351	PICKUP_GIANT();
352}
353
354void
355linux_rcu_barrier(unsigned type)
356{
357	struct linux_epoch_head *head;
358
359	MPASS(type < RCU_TYPE_MAX);
360
361	linux_synchronize_rcu(type);
362
363	head = &linux_epoch_head[type];
364
365	/* wait for callbacks to complete */
366	taskqueue_drain(taskqueue_fast, &head->task);
367}
368
369void
370linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func)
371{
372	struct callback_head *rcu;
373	struct linux_epoch_head *head;
374
375	MPASS(type < RCU_TYPE_MAX);
376
377	rcu = (struct callback_head *)context;
378	head = &linux_epoch_head[type];
379
380	mtx_lock(&head->lock);
381	rcu->func = func;
382	STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry);
383	taskqueue_enqueue(taskqueue_fast, &head->task);
384	mtx_unlock(&head->lock);
385}
386
387int
388init_srcu_struct(struct srcu_struct *srcu)
389{
390	return (0);
391}
392
393void
394cleanup_srcu_struct(struct srcu_struct *srcu)
395{
396}
397
398int
399srcu_read_lock(struct srcu_struct *srcu)
400{
401	linux_rcu_read_lock(RCU_TYPE_SLEEPABLE);
402	return (0);
403}
404
405void
406srcu_read_unlock(struct srcu_struct *srcu, int key __unused)
407{
408	linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE);
409}
410
411void
412synchronize_srcu(struct srcu_struct *srcu)
413{
414	linux_synchronize_rcu(RCU_TYPE_SLEEPABLE);
415}
416
417void
418srcu_barrier(struct srcu_struct *srcu)
419{
420	linux_rcu_barrier(RCU_TYPE_SLEEPABLE);
421}
422