1154484Sjhb/*- 2154484Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154484Sjhb * All rights reserved. 4154484Sjhb * 5154484Sjhb * Redistribution and use in source and binary forms, with or without 6154484Sjhb * modification, are permitted provided that the following conditions 7154484Sjhb * are met: 8154484Sjhb * 1. Redistributions of source code must retain the above copyright 9154484Sjhb * notice, this list of conditions and the following disclaimer. 10154484Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154484Sjhb * notice, this list of conditions and the following disclaimer in the 12154484Sjhb * documentation and/or other materials provided with the distribution. 13154484Sjhb * 14154484Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15154484Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16154484Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17154484Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18154484Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19154484Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20154484Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21154484Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22154484Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23154484Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24154484Sjhb * SUCH DAMAGE. 25154484Sjhb */ 26154484Sjhb 27154484Sjhb/* 28154484Sjhb * This module holds the global variables and functions used to maintain 29154484Sjhb * lock_object structures. 30154484Sjhb */ 31154484Sjhb 32154484Sjhb#include <sys/cdefs.h> 33154484Sjhb__FBSDID("$FreeBSD: stable/10/sys/kern/subr_lock.c 323870 2017-09-21 19:24:11Z marius $"); 34154484Sjhb 35154485Sjhb#include "opt_ddb.h" 36164159Skmacy#include "opt_mprof.h" 37154485Sjhb 38154484Sjhb#include <sys/param.h> 39154484Sjhb#include <sys/systm.h> 40174629Sjeff#include <sys/kernel.h> 41154484Sjhb#include <sys/ktr.h> 42154484Sjhb#include <sys/lock.h> 43174629Sjeff#include <sys/lock_profile.h> 44174629Sjeff#include <sys/malloc.h> 45189845Sjeff#include <sys/mutex.h> 46174629Sjeff#include <sys/pcpu.h> 47174629Sjeff#include <sys/proc.h> 48164159Skmacy#include <sys/sbuf.h> 49189845Sjeff#include <sys/sched.h> 50174629Sjeff#include <sys/smp.h> 51164159Skmacy#include <sys/sysctl.h> 52154484Sjhb 53154484Sjhb#ifdef DDB 54154484Sjhb#include <ddb/ddb.h> 55154484Sjhb#endif 56154484Sjhb 57174629Sjeff#include <machine/cpufunc.h> 58310980Smjg#include <machine/cpu.h> 59174629Sjeff 60154484SjhbCTASSERT(LOCK_CLASS_MAX == 15); 61154484Sjhb 62154484Sjhbstruct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { 63154484Sjhb &lock_class_mtx_spin, 64154484Sjhb &lock_class_mtx_sleep, 65154484Sjhb &lock_class_sx, 66173444Sups &lock_class_rm, 67252209Sjhb &lock_class_rm_sleepable, 68154941Sjhb &lock_class_rw, 69164246Skmacy &lock_class_lockmgr, 70154484Sjhb}; 71154484Sjhb 72154484Sjhbvoid 73154484Sjhblock_init(struct lock_object *lock, struct lock_class *class, const char *name, 74154484Sjhb const char *type, int flags) 75154484Sjhb{ 76154484Sjhb int i; 77154484Sjhb 78154484Sjhb /* Check for double-init and zero object. */ 79323870Smarius KASSERT(flags & LO_NEW || !lock_initalized(lock), 80323870Smarius ("lock \"%s\" %p already initialized", name, lock)); 81154484Sjhb 82154484Sjhb /* Look up lock class to find its index. */ 83154484Sjhb for (i = 0; i < LOCK_CLASS_MAX; i++) 84154484Sjhb if (lock_classes[i] == class) { 85154484Sjhb lock->lo_flags = i << LO_CLASSSHIFT; 86154484Sjhb break; 87154484Sjhb } 88154484Sjhb KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); 89154484Sjhb 90154484Sjhb /* Initialize the lock object. */ 91154484Sjhb lock->lo_name = name; 92154484Sjhb lock->lo_flags |= flags | LO_INITIALIZED; 93154484Sjhb LOCK_LOG_INIT(lock, 0); 94179025Sattilio WITNESS_INIT(lock, (type != NULL) ? type : name); 95154484Sjhb} 96154484Sjhb 97154484Sjhbvoid 98154484Sjhblock_destroy(struct lock_object *lock) 99154484Sjhb{ 100154484Sjhb 101154484Sjhb KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); 102154484Sjhb WITNESS_DESTROY(lock); 103154484Sjhb LOCK_LOG_DESTROY(lock, 0); 104154484Sjhb lock->lo_flags &= ~LO_INITIALIZED; 105154484Sjhb} 106154484Sjhb 107310980Smjgvoid 108310980Smjglock_delay(struct lock_delay_arg *la) 109310980Smjg{ 110310980Smjg u_int i, delay, backoff, min, max; 111310980Smjg struct lock_delay_config *lc = la->config; 112310980Smjg 113310980Smjg delay = la->delay; 114310980Smjg 115310980Smjg if (delay == 0) 116310980Smjg delay = lc->initial; 117310980Smjg else { 118310980Smjg delay += lc->step; 119310980Smjg max = lc->max; 120310980Smjg if (delay > max) 121310980Smjg delay = max; 122310980Smjg } 123310980Smjg 124310980Smjg backoff = cpu_ticks() % delay; 125310980Smjg min = lc->min; 126310980Smjg if (backoff < min) 127310980Smjg backoff = min; 128310980Smjg for (i = 0; i < backoff; i++) 129310980Smjg cpu_spinwait(); 130310980Smjg 131310980Smjg la->delay = delay; 132310980Smjg la->spin_cnt += backoff; 133310980Smjg} 134310980Smjg 135154484Sjhb#ifdef DDB 136154484SjhbDB_SHOW_COMMAND(lock, db_show_lock) 137154484Sjhb{ 138154484Sjhb struct lock_object *lock; 139154484Sjhb struct lock_class *class; 140154484Sjhb 141154484Sjhb if (!have_addr) 142154484Sjhb return; 143154484Sjhb lock = (struct lock_object *)addr; 144154484Sjhb if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { 145154484Sjhb db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); 146154484Sjhb return; 147154484Sjhb } 148154484Sjhb class = LOCK_CLASS(lock); 149154484Sjhb db_printf(" class: %s\n", class->lc_name); 150154484Sjhb db_printf(" name: %s\n", lock->lo_name); 151154484Sjhb class->lc_ddb_show(lock); 152154484Sjhb} 153154484Sjhb#endif 154164159Skmacy 155164159Skmacy#ifdef LOCK_PROFILING 156174629Sjeff 157174629Sjeff/* 158174629Sjeff * One object per-thread for each lock the thread owns. Tracks individual 159174629Sjeff * lock instances. 160174629Sjeff */ 161174629Sjeffstruct lock_profile_object { 162174629Sjeff LIST_ENTRY(lock_profile_object) lpo_link; 163174629Sjeff struct lock_object *lpo_obj; 164174629Sjeff const char *lpo_file; 165174629Sjeff int lpo_line; 166174629Sjeff uint16_t lpo_ref; 167174629Sjeff uint16_t lpo_cnt; 168209390Sed uint64_t lpo_acqtime; 169209390Sed uint64_t lpo_waittime; 170174629Sjeff u_int lpo_contest_locking; 171174629Sjeff}; 172174629Sjeff 173174629Sjeff/* 174174629Sjeff * One lock_prof for each (file, line, lock object) triple. 175174629Sjeff */ 176174629Sjeffstruct lock_prof { 177174629Sjeff SLIST_ENTRY(lock_prof) link; 178175010Sjeff struct lock_class *class; 179174629Sjeff const char *file; 180174629Sjeff const char *name; 181174629Sjeff int line; 182174629Sjeff int ticks; 183180852Skmacy uintmax_t cnt_wait_max; 184174629Sjeff uintmax_t cnt_max; 185174629Sjeff uintmax_t cnt_tot; 186174629Sjeff uintmax_t cnt_wait; 187174629Sjeff uintmax_t cnt_cur; 188174629Sjeff uintmax_t cnt_contest_locking; 189174629Sjeff}; 190174629Sjeff 191174629SjeffSLIST_HEAD(lphead, lock_prof); 192174629Sjeff 193174629Sjeff#define LPROF_HASH_SIZE 4096 194174629Sjeff#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) 195174629Sjeff#define LPROF_CACHE_SIZE 4096 196174629Sjeff 197174629Sjeff/* 198174629Sjeff * Array of objects and profs for each type of object for each cpu. Spinlocks 199215034Sbrucec * are handled separately because a thread may be preempted and acquire a 200174629Sjeff * spinlock while in the lock profiling code of a non-spinlock. In this way 201174629Sjeff * we only need a critical section to protect the per-cpu lists. 202174629Sjeff */ 203174629Sjeffstruct lock_prof_type { 204174629Sjeff struct lphead lpt_lpalloc; 205174629Sjeff struct lpohead lpt_lpoalloc; 206174629Sjeff struct lphead lpt_hash[LPROF_HASH_SIZE]; 207174629Sjeff struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; 208174629Sjeff struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; 209174629Sjeff}; 210174629Sjeff 211174629Sjeffstruct lock_prof_cpu { 212174629Sjeff struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ 213174629Sjeff}; 214174629Sjeff 215174629Sjeffstruct lock_prof_cpu *lp_cpu[MAXCPU]; 216174629Sjeff 217189845Sjeffvolatile int lock_prof_enable = 0; 218189845Sjeffstatic volatile int lock_prof_resetting; 219174629Sjeff 220212750Smdf#define LPROF_SBUF_SIZE 256 221174629Sjeff 222174629Sjeffstatic int lock_prof_rejected; 223174629Sjeffstatic int lock_prof_skipspin; 224174629Sjeffstatic int lock_prof_skipcount; 225174629Sjeff 226174629Sjeff#ifndef USE_CPU_NANOSECONDS 227209390Seduint64_t 228174629Sjeffnanoseconds(void) 229164159Skmacy{ 230174629Sjeff struct bintime bt; 231209390Sed uint64_t ns; 232164159Skmacy 233174629Sjeff binuptime(&bt); 234174629Sjeff /* From bintime2timespec */ 235209390Sed ns = bt.sec * (uint64_t)1000000000; 236174629Sjeff ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; 237174629Sjeff return (ns); 238174629Sjeff} 239174629Sjeff#endif 240174629Sjeff 241174629Sjeffstatic void 242174629Sjefflock_prof_init_type(struct lock_prof_type *type) 243174629Sjeff{ 244174629Sjeff int i; 245174629Sjeff 246174629Sjeff SLIST_INIT(&type->lpt_lpalloc); 247174629Sjeff LIST_INIT(&type->lpt_lpoalloc); 248174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 249174629Sjeff SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], 250174629Sjeff link); 251174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], 252174629Sjeff lpo_link); 253174629Sjeff } 254174629Sjeff} 255174629Sjeff 256174629Sjeffstatic void 257174629Sjefflock_prof_init(void *arg) 258174629Sjeff{ 259174629Sjeff int cpu; 260174629Sjeff 261174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 262174629Sjeff lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, 263174629Sjeff M_WAITOK | M_ZERO); 264174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); 265174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); 266174629Sjeff } 267174629Sjeff} 268174629SjeffSYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); 269174629Sjeff 270174629Sjeffstatic void 271189845Sjefflock_prof_reset_wait(void) 272189845Sjeff{ 273189845Sjeff 274189845Sjeff /* 275243046Sjeff * Spin relinquishing our cpu so that quiesce_all_cpus may 276243046Sjeff * complete. 277189845Sjeff */ 278189845Sjeff while (lock_prof_resetting) 279189845Sjeff sched_relinquish(curthread); 280189845Sjeff} 281189845Sjeff 282189845Sjeffstatic void 283174629Sjefflock_prof_reset(void) 284174629Sjeff{ 285174629Sjeff struct lock_prof_cpu *lpc; 286174629Sjeff int enabled, i, cpu; 287174629Sjeff 288189845Sjeff /* 289189845Sjeff * We not only race with acquiring and releasing locks but also 290189845Sjeff * thread exit. To be certain that threads exit without valid head 291189845Sjeff * pointers they must see resetting set before enabled is cleared. 292189845Sjeff * Otherwise a lock may not be removed from a per-thread list due 293189845Sjeff * to disabled being set but not wait for reset() to remove it below. 294189845Sjeff */ 295189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 1); 296174629Sjeff enabled = lock_prof_enable; 297174629Sjeff lock_prof_enable = 0; 298243046Sjeff quiesce_all_cpus("profreset", 0); 299189845Sjeff /* 300189845Sjeff * Some objects may have migrated between CPUs. Clear all links 301189845Sjeff * before we zero the structures. Some items may still be linked 302189845Sjeff * into per-thread lists as well. 303189845Sjeff */ 304174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 305174629Sjeff lpc = lp_cpu[cpu]; 306174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 307174629Sjeff LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); 308174629Sjeff LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); 309174629Sjeff } 310189845Sjeff } 311189845Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 312189845Sjeff lpc = lp_cpu[cpu]; 313174629Sjeff bzero(lpc, sizeof(*lpc)); 314174629Sjeff lock_prof_init_type(&lpc->lpc_types[0]); 315174629Sjeff lock_prof_init_type(&lpc->lpc_types[1]); 316174629Sjeff } 317189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 0); 318174629Sjeff lock_prof_enable = enabled; 319174629Sjeff} 320174629Sjeff 321174629Sjeffstatic void 322174629Sjefflock_prof_output(struct lock_prof *lp, struct sbuf *sb) 323174629Sjeff{ 324174629Sjeff const char *p; 325174629Sjeff 326174629Sjeff for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); 327174629Sjeff sbuf_printf(sb, 328180852Skmacy "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", 329180852Skmacy lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, 330174629Sjeff lp->cnt_wait / 1000, lp->cnt_cur, 331174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 332174629Sjeff lp->cnt_tot / (lp->cnt_cur * 1000), 333174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 334174629Sjeff lp->cnt_wait / (lp->cnt_cur * 1000), 335174629Sjeff (uintmax_t)0, lp->cnt_contest_locking, 336175010Sjeff p, lp->line, lp->class->lc_name, lp->name); 337174629Sjeff} 338174629Sjeff 339174629Sjeffstatic void 340174629Sjefflock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, 341174629Sjeff int spin, int t) 342174629Sjeff{ 343174629Sjeff struct lock_prof_type *type; 344174629Sjeff struct lock_prof *l; 345174629Sjeff int cpu; 346174629Sjeff 347174629Sjeff dst->file = match->file; 348174629Sjeff dst->line = match->line; 349175010Sjeff dst->class = match->class; 350174629Sjeff dst->name = match->name; 351174629Sjeff 352174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 353174629Sjeff if (lp_cpu[cpu] == NULL) 354174629Sjeff continue; 355174629Sjeff type = &lp_cpu[cpu]->lpc_types[spin]; 356174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[hash], link) { 357174629Sjeff if (l->ticks == t) 358174629Sjeff continue; 359174629Sjeff if (l->file != match->file || l->line != match->line || 360175010Sjeff l->name != match->name) 361174629Sjeff continue; 362174629Sjeff l->ticks = t; 363174629Sjeff if (l->cnt_max > dst->cnt_max) 364174629Sjeff dst->cnt_max = l->cnt_max; 365180852Skmacy if (l->cnt_wait_max > dst->cnt_wait_max) 366180852Skmacy dst->cnt_wait_max = l->cnt_wait_max; 367174629Sjeff dst->cnt_tot += l->cnt_tot; 368174629Sjeff dst->cnt_wait += l->cnt_wait; 369174629Sjeff dst->cnt_cur += l->cnt_cur; 370174629Sjeff dst->cnt_contest_locking += l->cnt_contest_locking; 371174629Sjeff } 372174629Sjeff } 373167012Skmacy 374174629Sjeff} 375174629Sjeff 376174629Sjeffstatic void 377174629Sjefflock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, 378174629Sjeff int t) 379174629Sjeff{ 380174629Sjeff struct lock_prof *l; 381174629Sjeff int i; 382174629Sjeff 383174629Sjeff for (i = 0; i < LPROF_HASH_SIZE; ++i) { 384174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[i], link) { 385174629Sjeff struct lock_prof lp = {}; 386174629Sjeff 387174629Sjeff if (l->ticks == t) 388174629Sjeff continue; 389174629Sjeff lock_prof_sum(l, &lp, i, spin, t); 390174629Sjeff lock_prof_output(&lp, sb); 391174629Sjeff } 392174629Sjeff } 393174629Sjeff} 394174629Sjeff 395174629Sjeffstatic int 396174629Sjeffdump_lock_prof_stats(SYSCTL_HANDLER_ARGS) 397174629Sjeff{ 398174629Sjeff struct sbuf *sb; 399174629Sjeff int error, cpu, t; 400175010Sjeff int enabled; 401174629Sjeff 402217916Smdf error = sysctl_wire_old_buffer(req, 0); 403217916Smdf if (error != 0) 404217916Smdf return (error); 405212750Smdf sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); 406180852Skmacy sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", 407180852Skmacy "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); 408175010Sjeff enabled = lock_prof_enable; 409175010Sjeff lock_prof_enable = 0; 410243046Sjeff quiesce_all_cpus("profstat", 0); 411174629Sjeff t = ticks; 412174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 413174629Sjeff if (lp_cpu[cpu] == NULL) 414174629Sjeff continue; 415174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); 416174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); 417174629Sjeff } 418175010Sjeff lock_prof_enable = enabled; 419174629Sjeff 420212750Smdf error = sbuf_finish(sb); 421212750Smdf /* Output a trailing NUL. */ 422212750Smdf if (error == 0) 423212750Smdf error = SYSCTL_OUT(req, "", 1); 424174629Sjeff sbuf_delete(sb); 425174629Sjeff return (error); 426174629Sjeff} 427174629Sjeff 428174629Sjeffstatic int 429174629Sjeffenable_lock_prof(SYSCTL_HANDLER_ARGS) 430174629Sjeff{ 431174629Sjeff int error, v; 432174629Sjeff 433174629Sjeff v = lock_prof_enable; 434174629Sjeff error = sysctl_handle_int(oidp, &v, v, req); 435174629Sjeff if (error) 436174629Sjeff return (error); 437174629Sjeff if (req->newptr == NULL) 438174629Sjeff return (error); 439174629Sjeff if (v == lock_prof_enable) 440174629Sjeff return (0); 441174629Sjeff if (v == 1) 442174629Sjeff lock_prof_reset(); 443174629Sjeff lock_prof_enable = !!v; 444174629Sjeff 445174629Sjeff return (0); 446174629Sjeff} 447174629Sjeff 448174629Sjeffstatic int 449174629Sjeffreset_lock_prof_stats(SYSCTL_HANDLER_ARGS) 450174629Sjeff{ 451174629Sjeff int error, v; 452174629Sjeff 453174629Sjeff v = 0; 454174629Sjeff error = sysctl_handle_int(oidp, &v, 0, req); 455174629Sjeff if (error) 456174629Sjeff return (error); 457174629Sjeff if (req->newptr == NULL) 458174629Sjeff return (error); 459174629Sjeff if (v == 0) 460174629Sjeff return (0); 461174629Sjeff lock_prof_reset(); 462174629Sjeff 463174629Sjeff return (0); 464174629Sjeff} 465174629Sjeff 466174629Sjeffstatic struct lock_prof * 467174629Sjefflock_profile_lookup(struct lock_object *lo, int spin, const char *file, 468174629Sjeff int line) 469174629Sjeff{ 470174629Sjeff const char *unknown = "(unknown)"; 471174629Sjeff struct lock_prof_type *type; 472174629Sjeff struct lock_prof *lp; 473174629Sjeff struct lphead *head; 474174629Sjeff const char *p; 475174629Sjeff u_int hash; 476174629Sjeff 477174629Sjeff p = file; 478174629Sjeff if (p == NULL || *p == '\0') 479174629Sjeff p = unknown; 480174629Sjeff hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; 481174629Sjeff hash &= LPROF_HASH_MASK; 482174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 483174629Sjeff head = &type->lpt_hash[hash]; 484174629Sjeff SLIST_FOREACH(lp, head, link) { 485174629Sjeff if (lp->line == line && lp->file == p && 486174629Sjeff lp->name == lo->lo_name) 487174629Sjeff return (lp); 488174629Sjeff 489174629Sjeff } 490174629Sjeff lp = SLIST_FIRST(&type->lpt_lpalloc); 491174629Sjeff if (lp == NULL) { 492174629Sjeff lock_prof_rejected++; 493174629Sjeff return (lp); 494174629Sjeff } 495174629Sjeff SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); 496174629Sjeff lp->file = p; 497174629Sjeff lp->line = line; 498175010Sjeff lp->class = LOCK_CLASS(lo); 499174629Sjeff lp->name = lo->lo_name; 500174629Sjeff SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); 501174629Sjeff return (lp); 502174629Sjeff} 503174629Sjeff 504174629Sjeffstatic struct lock_profile_object * 505174629Sjefflock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, 506174629Sjeff int line) 507174629Sjeff{ 508174629Sjeff struct lock_profile_object *l; 509174629Sjeff struct lock_prof_type *type; 510174629Sjeff struct lpohead *head; 511174629Sjeff 512174629Sjeff head = &curthread->td_lprof[spin]; 513174629Sjeff LIST_FOREACH(l, head, lpo_link) 514174629Sjeff if (l->lpo_obj == lo && l->lpo_file == file && 515174629Sjeff l->lpo_line == line) 516174629Sjeff return (l); 517174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 518174629Sjeff l = LIST_FIRST(&type->lpt_lpoalloc); 519174629Sjeff if (l == NULL) { 520174629Sjeff lock_prof_rejected++; 521174629Sjeff return (NULL); 522174629Sjeff } 523174629Sjeff LIST_REMOVE(l, lpo_link); 524174629Sjeff l->lpo_obj = lo; 525174629Sjeff l->lpo_file = file; 526174629Sjeff l->lpo_line = line; 527174629Sjeff l->lpo_cnt = 0; 528174629Sjeff LIST_INSERT_HEAD(head, l, lpo_link); 529174629Sjeff 530174629Sjeff return (l); 531174629Sjeff} 532174629Sjeff 533174629Sjeffvoid 534174629Sjefflock_profile_obtain_lock_success(struct lock_object *lo, int contested, 535174629Sjeff uint64_t waittime, const char *file, int line) 536174629Sjeff{ 537174629Sjeff static int lock_prof_count; 538174629Sjeff struct lock_profile_object *l; 539174629Sjeff int spin; 540174629Sjeff 541228424Savg if (SCHEDULER_STOPPED()) 542228424Savg return; 543228424Savg 544174629Sjeff /* don't reset the timer when/if recursing */ 545174629Sjeff if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) 546174629Sjeff return; 547174629Sjeff if (lock_prof_skipcount && 548175150Skris (++lock_prof_count % lock_prof_skipcount) != 0) 549174629Sjeff return; 550176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 551174629Sjeff if (spin && lock_prof_skipspin == 1) 552174629Sjeff return; 553189845Sjeff critical_enter(); 554189845Sjeff /* Recheck enabled now that we're in a critical section. */ 555189845Sjeff if (lock_prof_enable == 0) 556189845Sjeff goto out; 557174629Sjeff l = lock_profile_object_lookup(lo, spin, file, line); 558174629Sjeff if (l == NULL) 559189845Sjeff goto out; 560174629Sjeff l->lpo_cnt++; 561174629Sjeff if (++l->lpo_ref > 1) 562189845Sjeff goto out; 563174629Sjeff l->lpo_contest_locking = contested; 564168315Skmacy l->lpo_acqtime = nanoseconds(); 565168315Skmacy if (waittime && (l->lpo_acqtime > waittime)) 566168315Skmacy l->lpo_waittime = l->lpo_acqtime - waittime; 567168315Skmacy else 568168315Skmacy l->lpo_waittime = 0; 569189845Sjeffout: 570189845Sjeff critical_exit(); 571164159Skmacy} 572164159Skmacy 573174629Sjeffvoid 574189845Sjefflock_profile_thread_exit(struct thread *td) 575189845Sjeff{ 576189845Sjeff#ifdef INVARIANTS 577189845Sjeff struct lock_profile_object *l; 578189845Sjeff 579189845Sjeff MPASS(curthread->td_critnest == 0); 580189845Sjeff#endif 581189845Sjeff /* 582189845Sjeff * If lock profiling was disabled we have to wait for reset to 583189845Sjeff * clear our pointers before we can exit safely. 584189845Sjeff */ 585189845Sjeff lock_prof_reset_wait(); 586189845Sjeff#ifdef INVARIANTS 587189845Sjeff LIST_FOREACH(l, &td->td_lprof[0], lpo_link) 588189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 589189845Sjeff l->lpo_file, l->lpo_line); 590189845Sjeff LIST_FOREACH(l, &td->td_lprof[1], lpo_link) 591189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 592189845Sjeff l->lpo_file, l->lpo_line); 593189845Sjeff#endif 594189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); 595189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); 596189845Sjeff} 597189845Sjeff 598189845Sjeffvoid 599174629Sjefflock_profile_release_lock(struct lock_object *lo) 600164159Skmacy{ 601174629Sjeff struct lock_profile_object *l; 602174629Sjeff struct lock_prof_type *type; 603174629Sjeff struct lock_prof *lp; 604209390Sed uint64_t curtime, holdtime; 605174629Sjeff struct lpohead *head; 606174629Sjeff int spin; 607164159Skmacy 608228424Savg if (SCHEDULER_STOPPED()) 609228424Savg return; 610189845Sjeff if (lo->lo_flags & LO_NOPROFILE) 611174629Sjeff return; 612176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 613174629Sjeff head = &curthread->td_lprof[spin]; 614189845Sjeff if (LIST_FIRST(head) == NULL) 615189845Sjeff return; 616174629Sjeff critical_enter(); 617189845Sjeff /* Recheck enabled now that we're in a critical section. */ 618189845Sjeff if (lock_prof_enable == 0 && lock_prof_resetting == 1) 619189845Sjeff goto out; 620189845Sjeff /* 621189845Sjeff * If lock profiling is not enabled we still want to remove the 622189845Sjeff * lpo from our queue. 623189845Sjeff */ 624174629Sjeff LIST_FOREACH(l, head, lpo_link) 625174629Sjeff if (l->lpo_obj == lo) 626174629Sjeff break; 627174629Sjeff if (l == NULL) 628174629Sjeff goto out; 629174629Sjeff if (--l->lpo_ref > 0) 630174629Sjeff goto out; 631174629Sjeff lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); 632174629Sjeff if (lp == NULL) 633174629Sjeff goto release; 634209247Savg curtime = nanoseconds(); 635209247Savg if (curtime < l->lpo_acqtime) 636174629Sjeff goto release; 637209247Savg holdtime = curtime - l->lpo_acqtime; 638209247Savg 639174629Sjeff /* 640174629Sjeff * Record if the lock has been held longer now than ever 641174629Sjeff * before. 642174629Sjeff */ 643174629Sjeff if (holdtime > lp->cnt_max) 644174629Sjeff lp->cnt_max = holdtime; 645180852Skmacy if (l->lpo_waittime > lp->cnt_wait_max) 646180852Skmacy lp->cnt_wait_max = l->lpo_waittime; 647174629Sjeff lp->cnt_tot += holdtime; 648174629Sjeff lp->cnt_wait += l->lpo_waittime; 649174629Sjeff lp->cnt_contest_locking += l->lpo_contest_locking; 650174629Sjeff lp->cnt_cur += l->lpo_cnt; 651174629Sjeffrelease: 652174629Sjeff LIST_REMOVE(l, lpo_link); 653174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 654174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); 655174629Sjeffout: 656174629Sjeff critical_exit(); 657174629Sjeff} 658164159Skmacy 659227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); 660227309Sedstatic SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, 661227309Sed "lock profiling"); 662174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, 663174629Sjeff &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); 664174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, 665174629Sjeff &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); 666174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, 667174629Sjeff &lock_prof_rejected, 0, "Number of rejected profiling records"); 668174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 669174629Sjeff NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); 670174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 671174629Sjeff NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); 672174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 673174629Sjeff NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); 674164159Skmacy 675164159Skmacy#endif 676