subr_psref.c revision 1.2
1/* $NetBSD: subr_psref.c,v 1.2 2016/04/11 13:18:13 riastradh Exp $ */ 2 3/*- 4 * Copyright (c) 2016 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Taylor R. Campbell. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Passive references 34 * 35 * Passive references are references to objects that guarantee the 36 * object will not be destroyed until the reference is released. 37 * 38 * Passive references require no interprocessor synchronization to 39 * acquire or release. However, destroying the target of passive 40 * references requires expensive interprocessor synchronization -- 41 * xcalls to determine on which CPUs the object is still in use. 42 * 43 * Passive references may be held only on a single CPU and by a 44 * single LWP. They require the caller to allocate a little stack 45 * space, a struct psref object. Sleeping while a passive 46 * reference is held is allowed, provided that the owner's LWP is 47 * bound to a CPU -- e.g., the owner is a softint or a bound 48 * kthread. However, sleeping should be kept to a short duration, 49 * e.g. sleeping on an adaptive lock. 50 * 51 * Passive references serve as an intermediate stage between 52 * reference counting and passive serialization (pserialize(9)): 53 * 54 * - If you need references to transfer from CPU to CPU or LWP to 55 * LWP, or if you need long-term references, you must use 56 * reference counting, e.g. with atomic operations or locks, 57 * which incurs interprocessor synchronization for every use -- 58 * cheaper than an xcall, but not scalable. 59 * 60 * - If all users *guarantee* that they will not sleep, then it is 61 * not necessary to use passive references: you may as well just 62 * use the even cheaper pserialize(9), because you have 63 * satisfied the requirements of a pserialize read section. 64 */ 65 66#include <sys/cdefs.h> 67__KERNEL_RCSID(0, "$NetBSD: subr_psref.c,v 1.2 2016/04/11 13:18:13 riastradh Exp $"); 68 69#include <sys/types.h> 70#include <sys/param.h> 71#include <sys/condvar.h> 72#include <sys/cpu.h> 73#include <sys/intr.h> 74#include <sys/kmem.h> 75#include <sys/lwp.h> 76#include <sys/mutex.h> 77#include <sys/percpu.h> 78#include <sys/psref.h> 79#include <sys/queue.h> 80#include <sys/xcall.h> 81 82LIST_HEAD(psref_head, psref); 83 84/* 85 * struct psref_class 86 * 87 * Private global state for a class of passive reference targets. 88 * Opaque to callers. 89 */ 90struct psref_class { 91 kmutex_t prc_lock; 92 kcondvar_t prc_cv; 93 struct percpu *prc_percpu; /* struct psref_cpu */ 94 ipl_cookie_t prc_iplcookie; 95}; 96 97/* 98 * struct psref_cpu 99 * 100 * Private per-CPU state for a class of passive reference targets. 101 * Not exposed by the API. 102 */ 103struct psref_cpu { 104 struct psref_head pcpu_head; 105}; 106 107/* 108 * psref_class_create(name, ipl) 109 * 110 * Create a new passive reference class, with the given wchan name 111 * and ipl. 112 */ 113struct psref_class * 114psref_class_create(const char *name, int ipl) 115{ 116 struct psref_class *class; 117 118 ASSERT_SLEEPABLE(); 119 120 class = kmem_alloc(sizeof(*class), KM_SLEEP); 121 if (class == NULL) 122 goto fail0; 123 124 class->prc_percpu = percpu_alloc(sizeof(struct psref_cpu)); 125 if (class->prc_percpu == NULL) 126 goto fail1; 127 128 mutex_init(&class->prc_lock, MUTEX_DEFAULT, ipl); 129 cv_init(&class->prc_cv, name); 130 class->prc_iplcookie = makeiplcookie(ipl); 131 132 return class; 133 134fail1: kmem_free(class, sizeof(*class)); 135fail0: return NULL; 136} 137 138#ifdef DIAGNOSTIC 139static void 140psref_cpu_drained_p(void *p, void *cookie, struct cpu_info *ci __unused) 141{ 142 const struct psref_cpu *pcpu = p; 143 bool *retp = cookie; 144 145 if (!LIST_EMPTY(&pcpu->pcpu_head)) 146 *retp = false; 147} 148 149static bool 150psref_class_drained_p(const struct psref_class *prc) 151{ 152 bool ret = true; 153 154 percpu_foreach(prc->prc_percpu, &psref_cpu_drained_p, &ret); 155 156 return ret; 157} 158#endif /* DIAGNOSTIC */ 159 160/* 161 * psref_class_destroy(class) 162 * 163 * Destroy a passive reference class and free memory associated 164 * with it. All targets in this class must have been drained and 165 * destroyed already. 166 */ 167void 168psref_class_destroy(struct psref_class *class) 169{ 170 171 KASSERT(psref_class_drained_p(class)); 172 173 cv_destroy(&class->prc_cv); 174 mutex_destroy(&class->prc_lock); 175 percpu_free(class->prc_percpu, sizeof(struct psref_cpu)); 176 kmem_free(class, sizeof(*class)); 177} 178 179/* 180 * psref_target_init(target, class) 181 * 182 * Initialize a passive reference target in the specified class. 183 * The caller is responsible for issuing a membar_producer after 184 * psref_target_init and before exposing a pointer to the target 185 * to other CPUs. 186 */ 187void 188psref_target_init(struct psref_target *target, 189 struct psref_class *class) 190{ 191 192 target->prt_class = class; 193 target->prt_draining = false; 194} 195 196/* 197 * psref_acquire(psref, target, class) 198 * 199 * Acquire a passive reference to the specified target, which must 200 * be in the specified class. 201 * 202 * The caller must guarantee that the target will not be destroyed 203 * before psref_acquire returns. 204 * 205 * The caller must additionally guarantee that it will not switch 206 * CPUs before releasing the passive reference, either by 207 * disabling kpreemption and avoiding sleeps, or by being in a 208 * softint or in an LWP bound to a CPU. 209 */ 210void 211psref_acquire(struct psref *psref, const struct psref_target *target, 212 struct psref_class *class) 213{ 214 struct psref_cpu *pcpu; 215 int s; 216 217 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 218 ISSET(curlwp->l_pflag, LP_BOUND)), 219 "passive references are CPU-local," 220 " but preemption is enabled and the caller is not" 221 " in a softint or CPU-bound LWP"); 222 KASSERTMSG((target->prt_class == class), 223 "mismatched psref target class: %p (ref) != %p (expected)", 224 target->prt_class, class); 225 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 226 target); 227 228 /* Block interrupts and acquire the current CPU's reference list. */ 229 s = splraiseipl(class->prc_iplcookie); 230 pcpu = percpu_getref(class->prc_percpu); 231 232 /* Record our reference. */ 233 LIST_INSERT_HEAD(&pcpu->pcpu_head, psref, psref_entry); 234 psref->psref_target = target; 235 psref->psref_lwp = curlwp; 236 psref->psref_cpu = curcpu(); 237 238 /* Release the CPU list and restore interrupts. */ 239 percpu_putref(class->prc_percpu); 240 splx(s); 241} 242 243/* 244 * psref_release(psref, target, class) 245 * 246 * Release a passive reference to the specified target, which must 247 * be in the specified class. 248 * 249 * The caller must not have switched CPUs or LWPs since acquiring 250 * the passive reference. 251 */ 252void 253psref_release(struct psref *psref, const struct psref_target *target, 254 struct psref_class *class) 255{ 256 int s; 257 258 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 259 ISSET(curlwp->l_pflag, LP_BOUND)), 260 "passive references are CPU-local," 261 " but preemption is enabled and the caller is not" 262 " in a softint or CPU-bound LWP"); 263 KASSERTMSG((target->prt_class == class), 264 "mismatched psref target class: %p (ref) != %p (expected)", 265 target->prt_class, class); 266 267 /* Make sure the psref looks sensible. */ 268 KASSERTMSG((psref->psref_target == target), 269 "passive reference target mismatch: %p (ref) != %p (expected)", 270 psref->psref_target, target); 271 KASSERTMSG((psref->psref_lwp == curlwp), 272 "passive reference transferred from lwp %p to lwp %p", 273 psref->psref_lwp, curlwp); 274 KASSERTMSG((psref->psref_cpu == curcpu()), 275 "passive reference transferred from CPU %u to CPU %u", 276 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 277 278 /* 279 * Block interrupts and remove the psref from the current CPU's 280 * list. No need to percpu_getref or get the head of the list, 281 * and the caller guarantees that we are bound to a CPU anyway 282 * (as does blocking interrupts). 283 */ 284 s = splraiseipl(class->prc_iplcookie); 285 LIST_REMOVE(psref, psref_entry); 286 splx(s); 287 288 /* If someone is waiting for users to drain, notify 'em. */ 289 if (__predict_false(target->prt_draining)) 290 cv_broadcast(&class->prc_cv); 291} 292 293/* 294 * psref_copy(pto, pfrom, class) 295 * 296 * Copy a passive reference from pfrom, which must be in the 297 * specified class, to pto. Both pfrom and pto must later be 298 * released with psref_release. 299 * 300 * The caller must not have switched CPUs or LWPs since acquiring 301 * pfrom, and must not switch CPUs or LWPs before releasing both 302 * pfrom and pto. 303 */ 304void 305psref_copy(struct psref *pto, const struct psref *pfrom, 306 struct psref_class *class) 307{ 308 struct psref_cpu *pcpu; 309 int s; 310 311 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 312 ISSET(curlwp->l_pflag, LP_BOUND)), 313 "passive references are CPU-local," 314 " but preemption is enabled and the caller is not" 315 " in a softint or CPU-bound LWP"); 316 KASSERTMSG((pto != pfrom), 317 "can't copy passive reference to itself: %p", 318 pto); 319 320 /* Make sure the pfrom reference looks sensible. */ 321 KASSERTMSG((pfrom->psref_lwp == curlwp), 322 "passive reference transferred from lwp %p to lwp %p", 323 pfrom->psref_lwp, curlwp); 324 KASSERTMSG((pfrom->psref_cpu == curcpu()), 325 "passive reference transferred from CPU %u to CPU %u", 326 cpu_index(pfrom->psref_cpu), cpu_index(curcpu())); 327 KASSERTMSG((pfrom->psref_target->prt_class == class), 328 "mismatched psref target class: %p (ref) != %p (expected)", 329 pfrom->psref_target->prt_class, class); 330 331 /* Block interrupts and acquire the current CPU's reference list. */ 332 s = splraiseipl(class->prc_iplcookie); 333 pcpu = percpu_getref(class->prc_percpu); 334 335 /* Record the new reference. */ 336 LIST_INSERT_HEAD(&pcpu->pcpu_head, pto, psref_entry); 337 pto->psref_target = pfrom->psref_target; 338 pto->psref_lwp = curlwp; 339 pto->psref_cpu = curcpu(); 340 341 /* Release the CPU list and restore interrupts. */ 342 percpu_putref(class->prc_percpu); 343 splx(s); 344} 345 346/* 347 * struct psreffed 348 * 349 * Global state for draining a psref target. 350 */ 351struct psreffed { 352 struct psref_class *class; 353 struct psref_target *target; 354 bool ret; 355}; 356 357static void 358psreffed_p_xc(void *cookie0, void *cookie1 __unused) 359{ 360 struct psreffed *P = cookie0; 361 362 /* 363 * If we hold a psref to the target, then answer true. 364 * 365 * This is the only dynamic decision that may be made with 366 * psref_held. 367 * 368 * No need to lock anything here: every write transitions from 369 * false to true, so there can be no conflicting writes. No 370 * need for a memory barrier here because P->ret is read only 371 * after xc_wait, which has already issued any necessary memory 372 * barriers. 373 */ 374 if (psref_held(P->target, P->class)) 375 P->ret = true; 376} 377 378static bool 379psreffed_p(struct psref_target *target, struct psref_class *class) 380{ 381 struct psreffed P = { 382 .class = class, 383 .target = target, 384 .ret = false, 385 }; 386 387 /* Ask all CPUs to say whether they hold a psref to the target. */ 388 xc_wait(xc_broadcast(0, &psreffed_p_xc, &P, NULL)); 389 390 return P.ret; 391} 392 393/* 394 * psref_target_destroy(target, class) 395 * 396 * Destroy a passive reference target. Waits for all existing 397 * references to drain. Caller must guarantee no new references 398 * will be acquired once it calls psref_target_destroy, e.g. by 399 * removing the target from a global list first. May sleep. 400 */ 401void 402psref_target_destroy(struct psref_target *target, struct psref_class *class) 403{ 404 405 ASSERT_SLEEPABLE(); 406 407 KASSERTMSG((target->prt_class == class), 408 "mismatched psref target class: %p (ref) != %p (expected)", 409 target->prt_class, class); 410 411 /* Request psref_release to notify us when done. */ 412 KASSERTMSG(!target->prt_draining, "psref target already destroyed: %p", 413 target); 414 target->prt_draining = true; 415 416 /* Wait until there are no more references on any CPU. */ 417 while (psreffed_p(target, class)) { 418 /* 419 * This enter/wait/exit business looks wrong, but it is 420 * both necessary, because psreffed_p performs a 421 * low-priority xcall and hence cannot run while a 422 * mutex is locked, and OK, because the wait is timed 423 * -- explicit wakeups are only an optimization. 424 */ 425 mutex_enter(&class->prc_lock); 426 (void)cv_timedwait(&class->prc_cv, &class->prc_lock, 1); 427 mutex_exit(&class->prc_lock); 428 } 429 430 /* No more references. Cause subsequent psref_acquire to kassert. */ 431 target->prt_class = NULL; 432} 433 434/* 435 * psref_held(target, class) 436 * 437 * True if the current CPU holds a passive reference to target, 438 * false otherwise. May be used only inside assertions. 439 */ 440bool 441psref_held(const struct psref_target *target, struct psref_class *class) 442{ 443 const struct psref_cpu *pcpu; 444 const struct psref *psref; 445 int s; 446 bool held = false; 447 448 KASSERTMSG((kpreempt_disabled() || cpu_softintr_p() || 449 ISSET(curlwp->l_pflag, LP_BOUND)), 450 "passive references are CPU-local," 451 " but preemption is enabled and the caller is not" 452 " in a softint or CPU-bound LWP"); 453 KASSERTMSG((target->prt_class == class), 454 "mismatched psref target class: %p (ref) != %p (expected)", 455 target->prt_class, class); 456 457 /* Block interrupts and acquire the current CPU's reference list. */ 458 s = splraiseipl(class->prc_iplcookie); 459 pcpu = percpu_getref(class->prc_percpu); 460 461 /* Search through all the references on this CPU. */ 462 LIST_FOREACH(psref, &pcpu->pcpu_head, psref_entry) { 463 /* Sanity-check the reference. */ 464 KASSERTMSG((psref->psref_lwp == curlwp), 465 "passive reference transferred from lwp %p to lwp %p", 466 psref->psref_lwp, curlwp); 467 KASSERTMSG((psref->psref_cpu == curcpu()), 468 "passive reference transferred from CPU %u to CPU %u", 469 cpu_index(psref->psref_cpu), cpu_index(curcpu())); 470 471 /* If it matches, stop here and answer yes. */ 472 if (psref->psref_target == target) { 473 held = true; 474 break; 475 } 476 } 477 478 /* Release the CPU list and restore interrupts. */ 479 percpu_putref(class->prc_percpu); 480 splx(s); 481 482 return held; 483} 484