1/*- 2 * Copyright (c) 2016 Matthew Macy (mmacy@mattmacy.io) 3 * Copyright (c) 2017-2021 Hans Petter Selasky (hselasky@freebsd.org) 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_rcu.c 369549 2021-04-06 10:30:29Z hselasky $"); 30 31#include <sys/types.h> 32#include <sys/systm.h> 33#include <sys/malloc.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/mutex.h> 37#include <sys/proc.h> 38#include <sys/sched.h> 39#include <sys/smp.h> 40#include <sys/queue.h> 41#include <sys/taskqueue.h> 42#include <sys/kdb.h> 43 44#include <ck_epoch.h> 45 46#include <linux/rcupdate.h> 47#include <linux/srcu.h> 48#include <linux/slab.h> 49#include <linux/kernel.h> 50#include <linux/compat.h> 51 52/* 53 * By defining CONFIG_NO_RCU_SKIP LinuxKPI RCU locks and asserts will 54 * not be skipped during panic(). 55 */ 56#ifdef CONFIG_NO_RCU_SKIP 57#define RCU_SKIP(void) 0 58#else 59#define RCU_SKIP(void) unlikely(SCHEDULER_STOPPED() || kdb_active) 60#endif 61 62struct callback_head { 63 STAILQ_ENTRY(callback_head) entry; 64 rcu_callback_t func; 65}; 66 67struct linux_epoch_head { 68 STAILQ_HEAD(, callback_head) cb_head; 69 struct mtx lock; 70 struct task task; 71} __aligned(CACHE_LINE_SIZE); 72 73struct linux_epoch_record { 74 ck_epoch_record_t epoch_record; 75 TAILQ_HEAD(, task_struct) ts_head; 76 int cpuid; 77 int type; 78} __aligned(CACHE_LINE_SIZE); 79 80/* 81 * Verify that "struct rcu_head" is big enough to hold "struct 82 * callback_head". This has been done to avoid having to add special 83 * compile flags for including ck_epoch.h to all clients of the 84 * LinuxKPI. 85 */ 86CTASSERT(sizeof(struct rcu_head) == sizeof(struct callback_head)); 87 88/* 89 * Verify that "rcu_section[0]" has the same size as 90 * "ck_epoch_section_t". This has been done to avoid having to add 91 * special compile flags for including ck_epoch.h to all clients of 92 * the LinuxKPI. 93 */ 94CTASSERT(sizeof(((struct task_struct *)0)->rcu_section[0] == 95 sizeof(ck_epoch_section_t))); 96 97/* 98 * Verify that "epoch_record" is at beginning of "struct 99 * linux_epoch_record": 100 */ 101CTASSERT(offsetof(struct linux_epoch_record, epoch_record) == 0); 102 103CTASSERT(TS_RCU_TYPE_MAX == RCU_TYPE_MAX); 104 105static ck_epoch_t linux_epoch[RCU_TYPE_MAX]; 106static struct linux_epoch_head linux_epoch_head[RCU_TYPE_MAX]; 107static DPCPU_DEFINE(struct linux_epoch_record, linux_epoch_record[RCU_TYPE_MAX]); 108 109static void linux_rcu_cleaner_func(void *, int); 110 111static void 112linux_rcu_runtime_init(void *arg __unused) 113{ 114 struct linux_epoch_head *head; 115 int i; 116 int j; 117 118 for (j = 0; j != RCU_TYPE_MAX; j++) { 119 ck_epoch_init(&linux_epoch[j]); 120 121 head = &linux_epoch_head[j]; 122 123 mtx_init(&head->lock, "LRCU-HEAD", NULL, MTX_DEF); 124 TASK_INIT(&head->task, 0, linux_rcu_cleaner_func, head); 125 STAILQ_INIT(&head->cb_head); 126 127 CPU_FOREACH(i) { 128 struct linux_epoch_record *record; 129 130 record = &DPCPU_ID_GET(i, linux_epoch_record[j]); 131 132 record->cpuid = i; 133 record->type = j; 134 ck_epoch_register(&linux_epoch[j], 135 &record->epoch_record, NULL); 136 TAILQ_INIT(&record->ts_head); 137 } 138 } 139} 140SYSINIT(linux_rcu_runtime, SI_SUB_CPU, SI_ORDER_ANY, linux_rcu_runtime_init, NULL); 141 142static void 143linux_rcu_runtime_uninit(void *arg __unused) 144{ 145 struct linux_epoch_head *head; 146 int j; 147 148 for (j = 0; j != RCU_TYPE_MAX; j++) { 149 head = &linux_epoch_head[j]; 150 151 mtx_destroy(&head->lock); 152 } 153} 154SYSUNINIT(linux_rcu_runtime, SI_SUB_LOCK, SI_ORDER_SECOND, linux_rcu_runtime_uninit, NULL); 155 156static void 157linux_rcu_cleaner_func(void *context, int pending __unused) 158{ 159 struct linux_epoch_head *head; 160 struct callback_head *rcu; 161 STAILQ_HEAD(, callback_head) tmp_head; 162 uintptr_t offset; 163 164 linux_set_current(curthread); 165 166 head = context; 167 168 /* move current callbacks into own queue */ 169 mtx_lock(&head->lock); 170 STAILQ_INIT(&tmp_head); 171 STAILQ_CONCAT(&tmp_head, &head->cb_head); 172 mtx_unlock(&head->lock); 173 174 /* synchronize */ 175 linux_synchronize_rcu(head - linux_epoch_head); 176 177 /* dispatch all callbacks, if any */ 178 while ((rcu = STAILQ_FIRST(&tmp_head)) != NULL) { 179 180 STAILQ_REMOVE_HEAD(&tmp_head, entry); 181 182 offset = (uintptr_t)rcu->func; 183 184 if (offset < LINUX_KFREE_RCU_OFFSET_MAX) 185 kfree((char *)rcu - offset); 186 else 187 rcu->func((struct rcu_head *)rcu); 188 } 189} 190 191void 192linux_rcu_read_lock(unsigned type) 193{ 194 struct linux_epoch_record *record; 195 struct task_struct *ts; 196 197 MPASS(type < RCU_TYPE_MAX); 198 199 if (RCU_SKIP()) 200 return; 201 202 ts = current; 203 204 /* assert valid refcount */ 205 MPASS(ts->rcu_recurse[type] != INT_MAX); 206 207 if (++(ts->rcu_recurse[type]) != 1) 208 return; 209 210 /* 211 * Pin thread to current CPU so that the unlock code gets the 212 * same per-CPU epoch record: 213 */ 214 sched_pin(); 215 216 record = &DPCPU_GET(linux_epoch_record[type]); 217 218 /* 219 * Use a critical section to prevent recursion inside 220 * ck_epoch_begin(). Else this function supports recursion. 221 */ 222 critical_enter(); 223 ck_epoch_begin(&record->epoch_record, 224 (ck_epoch_section_t *)&ts->rcu_section[type]); 225 TAILQ_INSERT_TAIL(&record->ts_head, ts, rcu_entry[type]); 226 critical_exit(); 227} 228 229void 230linux_rcu_read_unlock(unsigned type) 231{ 232 struct linux_epoch_record *record; 233 struct task_struct *ts; 234 235 MPASS(type < RCU_TYPE_MAX); 236 237 if (RCU_SKIP()) 238 return; 239 240 ts = current; 241 242 /* assert valid refcount */ 243 MPASS(ts->rcu_recurse[type] > 0); 244 245 if (--(ts->rcu_recurse[type]) != 0) 246 return; 247 248 record = &DPCPU_GET(linux_epoch_record[type]); 249 250 /* 251 * Use a critical section to prevent recursion inside 252 * ck_epoch_end(). Else this function supports recursion. 253 */ 254 critical_enter(); 255 ck_epoch_end(&record->epoch_record, 256 (ck_epoch_section_t *)&ts->rcu_section[type]); 257 TAILQ_REMOVE(&record->ts_head, ts, rcu_entry[type]); 258 critical_exit(); 259 260 sched_unpin(); 261} 262 263static void 264linux_synchronize_rcu_cb(ck_epoch_t *epoch __unused, ck_epoch_record_t *epoch_record, void *arg __unused) 265{ 266 struct linux_epoch_record *record = 267 container_of(epoch_record, struct linux_epoch_record, epoch_record); 268 struct thread *td = curthread; 269 struct task_struct *ts; 270 271 /* check if blocked on the current CPU */ 272 if (record->cpuid == PCPU_GET(cpuid)) { 273 bool is_sleeping = 0; 274 u_char prio = 0; 275 276 /* 277 * Find the lowest priority or sleeping thread which 278 * is blocking synchronization on this CPU core. All 279 * the threads in the queue are CPU-pinned and cannot 280 * go anywhere while the current thread is locked. 281 */ 282 TAILQ_FOREACH(ts, &record->ts_head, rcu_entry[record->type]) { 283 if (ts->task_thread->td_priority > prio) 284 prio = ts->task_thread->td_priority; 285 is_sleeping |= (ts->task_thread->td_inhibitors != 0); 286 } 287 288 if (is_sleeping) { 289 thread_unlock(td); 290 pause("W", 1); 291 thread_lock(td); 292 } else { 293 /* set new thread priority */ 294 sched_prio(td, prio); 295 /* task switch */ 296 mi_switch(SW_VOL | SWT_RELINQUISH, NULL); 297 298 /* 299 * Release the thread lock while yielding to 300 * allow other threads to acquire the lock 301 * pointed to by TDQ_LOCKPTR(td). Else a 302 * deadlock like situation might happen. 303 */ 304 thread_unlock(td); 305 thread_lock(td); 306 } 307 } else { 308 /* 309 * To avoid spinning move execution to the other CPU 310 * which is blocking synchronization. Set highest 311 * thread priority so that code gets run. The thread 312 * priority will be restored later. 313 */ 314 sched_prio(td, 0); 315 sched_bind(td, record->cpuid); 316 } 317} 318 319void 320linux_synchronize_rcu(unsigned type) 321{ 322 struct thread *td; 323 int was_bound; 324 int old_cpu; 325 int old_pinned; 326 u_char old_prio; 327 328 MPASS(type < RCU_TYPE_MAX); 329 330 if (RCU_SKIP()) 331 return; 332 333 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 334 "linux_synchronize_rcu() can sleep"); 335 336 td = curthread; 337 DROP_GIANT(); 338 339 /* 340 * Synchronizing RCU might change the CPU core this function 341 * is running on. Save current values: 342 */ 343 thread_lock(td); 344 345 old_cpu = PCPU_GET(cpuid); 346 old_pinned = td->td_pinned; 347 old_prio = td->td_priority; 348 was_bound = sched_is_bound(td); 349 sched_unbind(td); 350 td->td_pinned = 0; 351 sched_bind(td, old_cpu); 352 353 ck_epoch_synchronize_wait(&linux_epoch[type], 354 &linux_synchronize_rcu_cb, NULL); 355 356 /* restore CPU binding, if any */ 357 if (was_bound != 0) { 358 sched_bind(td, old_cpu); 359 } else { 360 /* get thread back to initial CPU, if any */ 361 if (old_pinned != 0) 362 sched_bind(td, old_cpu); 363 sched_unbind(td); 364 } 365 /* restore pinned after bind */ 366 td->td_pinned = old_pinned; 367 368 /* restore thread priority */ 369 sched_prio(td, old_prio); 370 thread_unlock(td); 371 372 PICKUP_GIANT(); 373} 374 375void 376linux_rcu_barrier(unsigned type) 377{ 378 struct linux_epoch_head *head; 379 380 MPASS(type < RCU_TYPE_MAX); 381 382 linux_synchronize_rcu(type); 383 384 head = &linux_epoch_head[type]; 385 386 /* wait for callbacks to complete */ 387 taskqueue_drain(taskqueue_fast, &head->task); 388} 389 390void 391linux_call_rcu(unsigned type, struct rcu_head *context, rcu_callback_t func) 392{ 393 struct callback_head *rcu; 394 struct linux_epoch_head *head; 395 396 MPASS(type < RCU_TYPE_MAX); 397 398 rcu = (struct callback_head *)context; 399 head = &linux_epoch_head[type]; 400 401 mtx_lock(&head->lock); 402 rcu->func = func; 403 STAILQ_INSERT_TAIL(&head->cb_head, rcu, entry); 404 taskqueue_enqueue(taskqueue_fast, &head->task); 405 mtx_unlock(&head->lock); 406} 407 408int 409init_srcu_struct(struct srcu_struct *srcu) 410{ 411 return (0); 412} 413 414void 415cleanup_srcu_struct(struct srcu_struct *srcu) 416{ 417} 418 419int 420srcu_read_lock(struct srcu_struct *srcu) 421{ 422 linux_rcu_read_lock(RCU_TYPE_SLEEPABLE); 423 return (0); 424} 425 426void 427srcu_read_unlock(struct srcu_struct *srcu, int key __unused) 428{ 429 linux_rcu_read_unlock(RCU_TYPE_SLEEPABLE); 430} 431 432void 433synchronize_srcu(struct srcu_struct *srcu) 434{ 435 linux_synchronize_rcu(RCU_TYPE_SLEEPABLE); 436} 437 438void 439srcu_barrier(struct srcu_struct *srcu) 440{ 441 linux_rcu_barrier(RCU_TYPE_SLEEPABLE); 442} 443