subr_lock.c revision 243046
1154484Sjhb/*- 2154484Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154484Sjhb * All rights reserved. 4154484Sjhb * 5154484Sjhb * Redistribution and use in source and binary forms, with or without 6154484Sjhb * modification, are permitted provided that the following conditions 7154484Sjhb * are met: 8154484Sjhb * 1. Redistributions of source code must retain the above copyright 9154484Sjhb * notice, this list of conditions and the following disclaimer. 10154484Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154484Sjhb * notice, this list of conditions and the following disclaimer in the 12154484Sjhb * documentation and/or other materials provided with the distribution. 13154484Sjhb * 3. Neither the name of the author nor the names of any co-contributors 14154484Sjhb * may be used to endorse or promote products derived from this software 15154484Sjhb * without specific prior written permission. 16154484Sjhb * 17154484Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18154484Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19154484Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20154484Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21154484Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22154484Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23154484Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24154484Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25154484Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26154484Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27154484Sjhb * SUCH DAMAGE. 28154484Sjhb */ 29154484Sjhb 30154484Sjhb/* 31154484Sjhb * This module holds the global variables and functions used to maintain 32154484Sjhb * lock_object structures. 33154484Sjhb */ 34154484Sjhb 35154484Sjhb#include <sys/cdefs.h> 36154484Sjhb__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 243046 2012-11-15 00:51:57Z jeff $"); 37154484Sjhb 38154485Sjhb#include "opt_ddb.h" 39164159Skmacy#include "opt_mprof.h" 40154485Sjhb 41154484Sjhb#include <sys/param.h> 42154484Sjhb#include <sys/systm.h> 43174629Sjeff#include <sys/kernel.h> 44154484Sjhb#include <sys/ktr.h> 45154484Sjhb#include <sys/lock.h> 46174629Sjeff#include <sys/lock_profile.h> 47174629Sjeff#include <sys/malloc.h> 48189845Sjeff#include <sys/mutex.h> 49174629Sjeff#include <sys/pcpu.h> 50174629Sjeff#include <sys/proc.h> 51164159Skmacy#include <sys/sbuf.h> 52189845Sjeff#include <sys/sched.h> 53174629Sjeff#include <sys/smp.h> 54164159Skmacy#include <sys/sysctl.h> 55154484Sjhb 56154484Sjhb#ifdef DDB 57154484Sjhb#include <ddb/ddb.h> 58154484Sjhb#endif 59154484Sjhb 60174629Sjeff#include <machine/cpufunc.h> 61174629Sjeff 62154484SjhbCTASSERT(LOCK_CLASS_MAX == 15); 63154484Sjhb 64154484Sjhbstruct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { 65154484Sjhb &lock_class_mtx_spin, 66154484Sjhb &lock_class_mtx_sleep, 67154484Sjhb &lock_class_sx, 68173444Sups &lock_class_rm, 69154941Sjhb &lock_class_rw, 70164246Skmacy &lock_class_lockmgr, 71154484Sjhb}; 72154484Sjhb 73154484Sjhbvoid 74154484Sjhblock_init(struct lock_object *lock, struct lock_class *class, const char *name, 75154484Sjhb const char *type, int flags) 76154484Sjhb{ 77154484Sjhb int i; 78154484Sjhb 79154484Sjhb /* Check for double-init and zero object. */ 80154484Sjhb KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized", 81154484Sjhb name, lock)); 82154484Sjhb 83154484Sjhb /* Look up lock class to find its index. */ 84154484Sjhb for (i = 0; i < LOCK_CLASS_MAX; i++) 85154484Sjhb if (lock_classes[i] == class) { 86154484Sjhb lock->lo_flags = i << LO_CLASSSHIFT; 87154484Sjhb break; 88154484Sjhb } 89154484Sjhb KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); 90154484Sjhb 91154484Sjhb /* Initialize the lock object. */ 92154484Sjhb lock->lo_name = name; 93154484Sjhb lock->lo_flags |= flags | LO_INITIALIZED; 94154484Sjhb LOCK_LOG_INIT(lock, 0); 95179025Sattilio WITNESS_INIT(lock, (type != NULL) ? type : name); 96154484Sjhb} 97154484Sjhb 98154484Sjhbvoid 99154484Sjhblock_destroy(struct lock_object *lock) 100154484Sjhb{ 101154484Sjhb 102154484Sjhb KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); 103154484Sjhb WITNESS_DESTROY(lock); 104154484Sjhb LOCK_LOG_DESTROY(lock, 0); 105154484Sjhb lock->lo_flags &= ~LO_INITIALIZED; 106154484Sjhb} 107154484Sjhb 108154484Sjhb#ifdef DDB 109154484SjhbDB_SHOW_COMMAND(lock, db_show_lock) 110154484Sjhb{ 111154484Sjhb struct lock_object *lock; 112154484Sjhb struct lock_class *class; 113154484Sjhb 114154484Sjhb if (!have_addr) 115154484Sjhb return; 116154484Sjhb lock = (struct lock_object *)addr; 117154484Sjhb if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { 118154484Sjhb db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); 119154484Sjhb return; 120154484Sjhb } 121154484Sjhb class = LOCK_CLASS(lock); 122154484Sjhb db_printf(" class: %s\n", class->lc_name); 123154484Sjhb db_printf(" name: %s\n", lock->lo_name); 124154484Sjhb class->lc_ddb_show(lock); 125154484Sjhb} 126154484Sjhb#endif 127164159Skmacy 128164159Skmacy#ifdef LOCK_PROFILING 129174629Sjeff 130174629Sjeff/* 131174629Sjeff * One object per-thread for each lock the thread owns. Tracks individual 132174629Sjeff * lock instances. 133174629Sjeff */ 134174629Sjeffstruct lock_profile_object { 135174629Sjeff LIST_ENTRY(lock_profile_object) lpo_link; 136174629Sjeff struct lock_object *lpo_obj; 137174629Sjeff const char *lpo_file; 138174629Sjeff int lpo_line; 139174629Sjeff uint16_t lpo_ref; 140174629Sjeff uint16_t lpo_cnt; 141209390Sed uint64_t lpo_acqtime; 142209390Sed uint64_t lpo_waittime; 143174629Sjeff u_int lpo_contest_locking; 144174629Sjeff}; 145174629Sjeff 146174629Sjeff/* 147174629Sjeff * One lock_prof for each (file, line, lock object) triple. 148174629Sjeff */ 149174629Sjeffstruct lock_prof { 150174629Sjeff SLIST_ENTRY(lock_prof) link; 151175010Sjeff struct lock_class *class; 152174629Sjeff const char *file; 153174629Sjeff const char *name; 154174629Sjeff int line; 155174629Sjeff int ticks; 156180852Skmacy uintmax_t cnt_wait_max; 157174629Sjeff uintmax_t cnt_max; 158174629Sjeff uintmax_t cnt_tot; 159174629Sjeff uintmax_t cnt_wait; 160174629Sjeff uintmax_t cnt_cur; 161174629Sjeff uintmax_t cnt_contest_locking; 162174629Sjeff}; 163174629Sjeff 164174629SjeffSLIST_HEAD(lphead, lock_prof); 165174629Sjeff 166174629Sjeff#define LPROF_HASH_SIZE 4096 167174629Sjeff#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) 168174629Sjeff#define LPROF_CACHE_SIZE 4096 169174629Sjeff 170174629Sjeff/* 171174629Sjeff * Array of objects and profs for each type of object for each cpu. Spinlocks 172215034Sbrucec * are handled separately because a thread may be preempted and acquire a 173174629Sjeff * spinlock while in the lock profiling code of a non-spinlock. In this way 174174629Sjeff * we only need a critical section to protect the per-cpu lists. 175174629Sjeff */ 176174629Sjeffstruct lock_prof_type { 177174629Sjeff struct lphead lpt_lpalloc; 178174629Sjeff struct lpohead lpt_lpoalloc; 179174629Sjeff struct lphead lpt_hash[LPROF_HASH_SIZE]; 180174629Sjeff struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; 181174629Sjeff struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; 182174629Sjeff}; 183174629Sjeff 184174629Sjeffstruct lock_prof_cpu { 185174629Sjeff struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ 186174629Sjeff}; 187174629Sjeff 188174629Sjeffstruct lock_prof_cpu *lp_cpu[MAXCPU]; 189174629Sjeff 190189845Sjeffvolatile int lock_prof_enable = 0; 191189845Sjeffstatic volatile int lock_prof_resetting; 192174629Sjeff 193212750Smdf#define LPROF_SBUF_SIZE 256 194174629Sjeff 195174629Sjeffstatic int lock_prof_rejected; 196174629Sjeffstatic int lock_prof_skipspin; 197174629Sjeffstatic int lock_prof_skipcount; 198174629Sjeff 199174629Sjeff#ifndef USE_CPU_NANOSECONDS 200209390Seduint64_t 201174629Sjeffnanoseconds(void) 202164159Skmacy{ 203174629Sjeff struct bintime bt; 204209390Sed uint64_t ns; 205164159Skmacy 206174629Sjeff binuptime(&bt); 207174629Sjeff /* From bintime2timespec */ 208209390Sed ns = bt.sec * (uint64_t)1000000000; 209174629Sjeff ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; 210174629Sjeff return (ns); 211174629Sjeff} 212174629Sjeff#endif 213174629Sjeff 214174629Sjeffstatic void 215174629Sjefflock_prof_init_type(struct lock_prof_type *type) 216174629Sjeff{ 217174629Sjeff int i; 218174629Sjeff 219174629Sjeff SLIST_INIT(&type->lpt_lpalloc); 220174629Sjeff LIST_INIT(&type->lpt_lpoalloc); 221174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 222174629Sjeff SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], 223174629Sjeff link); 224174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], 225174629Sjeff lpo_link); 226174629Sjeff } 227174629Sjeff} 228174629Sjeff 229174629Sjeffstatic void 230174629Sjefflock_prof_init(void *arg) 231174629Sjeff{ 232174629Sjeff int cpu; 233174629Sjeff 234174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 235174629Sjeff lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, 236174629Sjeff M_WAITOK | M_ZERO); 237174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); 238174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); 239174629Sjeff } 240174629Sjeff} 241174629SjeffSYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); 242174629Sjeff 243174629Sjeffstatic void 244189845Sjefflock_prof_reset_wait(void) 245189845Sjeff{ 246189845Sjeff 247189845Sjeff /* 248243046Sjeff * Spin relinquishing our cpu so that quiesce_all_cpus may 249243046Sjeff * complete. 250189845Sjeff */ 251189845Sjeff while (lock_prof_resetting) 252189845Sjeff sched_relinquish(curthread); 253189845Sjeff} 254189845Sjeff 255189845Sjeffstatic void 256174629Sjefflock_prof_reset(void) 257174629Sjeff{ 258174629Sjeff struct lock_prof_cpu *lpc; 259174629Sjeff int enabled, i, cpu; 260174629Sjeff 261189845Sjeff /* 262189845Sjeff * We not only race with acquiring and releasing locks but also 263189845Sjeff * thread exit. To be certain that threads exit without valid head 264189845Sjeff * pointers they must see resetting set before enabled is cleared. 265189845Sjeff * Otherwise a lock may not be removed from a per-thread list due 266189845Sjeff * to disabled being set but not wait for reset() to remove it below. 267189845Sjeff */ 268189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 1); 269174629Sjeff enabled = lock_prof_enable; 270174629Sjeff lock_prof_enable = 0; 271243046Sjeff quiesce_all_cpus("profreset", 0); 272189845Sjeff /* 273189845Sjeff * Some objects may have migrated between CPUs. Clear all links 274189845Sjeff * before we zero the structures. Some items may still be linked 275189845Sjeff * into per-thread lists as well. 276189845Sjeff */ 277174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 278174629Sjeff lpc = lp_cpu[cpu]; 279174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 280174629Sjeff LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); 281174629Sjeff LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); 282174629Sjeff } 283189845Sjeff } 284189845Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 285189845Sjeff lpc = lp_cpu[cpu]; 286174629Sjeff bzero(lpc, sizeof(*lpc)); 287174629Sjeff lock_prof_init_type(&lpc->lpc_types[0]); 288174629Sjeff lock_prof_init_type(&lpc->lpc_types[1]); 289174629Sjeff } 290189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 0); 291174629Sjeff lock_prof_enable = enabled; 292174629Sjeff} 293174629Sjeff 294174629Sjeffstatic void 295174629Sjefflock_prof_output(struct lock_prof *lp, struct sbuf *sb) 296174629Sjeff{ 297174629Sjeff const char *p; 298174629Sjeff 299174629Sjeff for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); 300174629Sjeff sbuf_printf(sb, 301180852Skmacy "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", 302180852Skmacy lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, 303174629Sjeff lp->cnt_wait / 1000, lp->cnt_cur, 304174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 305174629Sjeff lp->cnt_tot / (lp->cnt_cur * 1000), 306174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 307174629Sjeff lp->cnt_wait / (lp->cnt_cur * 1000), 308174629Sjeff (uintmax_t)0, lp->cnt_contest_locking, 309175010Sjeff p, lp->line, lp->class->lc_name, lp->name); 310174629Sjeff} 311174629Sjeff 312174629Sjeffstatic void 313174629Sjefflock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, 314174629Sjeff int spin, int t) 315174629Sjeff{ 316174629Sjeff struct lock_prof_type *type; 317174629Sjeff struct lock_prof *l; 318174629Sjeff int cpu; 319174629Sjeff 320174629Sjeff dst->file = match->file; 321174629Sjeff dst->line = match->line; 322175010Sjeff dst->class = match->class; 323174629Sjeff dst->name = match->name; 324174629Sjeff 325174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 326174629Sjeff if (lp_cpu[cpu] == NULL) 327174629Sjeff continue; 328174629Sjeff type = &lp_cpu[cpu]->lpc_types[spin]; 329174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[hash], link) { 330174629Sjeff if (l->ticks == t) 331174629Sjeff continue; 332174629Sjeff if (l->file != match->file || l->line != match->line || 333175010Sjeff l->name != match->name) 334174629Sjeff continue; 335174629Sjeff l->ticks = t; 336174629Sjeff if (l->cnt_max > dst->cnt_max) 337174629Sjeff dst->cnt_max = l->cnt_max; 338180852Skmacy if (l->cnt_wait_max > dst->cnt_wait_max) 339180852Skmacy dst->cnt_wait_max = l->cnt_wait_max; 340174629Sjeff dst->cnt_tot += l->cnt_tot; 341174629Sjeff dst->cnt_wait += l->cnt_wait; 342174629Sjeff dst->cnt_cur += l->cnt_cur; 343174629Sjeff dst->cnt_contest_locking += l->cnt_contest_locking; 344174629Sjeff } 345174629Sjeff } 346167012Skmacy 347174629Sjeff} 348174629Sjeff 349174629Sjeffstatic void 350174629Sjefflock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, 351174629Sjeff int t) 352174629Sjeff{ 353174629Sjeff struct lock_prof *l; 354174629Sjeff int i; 355174629Sjeff 356174629Sjeff for (i = 0; i < LPROF_HASH_SIZE; ++i) { 357174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[i], link) { 358174629Sjeff struct lock_prof lp = {}; 359174629Sjeff 360174629Sjeff if (l->ticks == t) 361174629Sjeff continue; 362174629Sjeff lock_prof_sum(l, &lp, i, spin, t); 363174629Sjeff lock_prof_output(&lp, sb); 364174629Sjeff } 365174629Sjeff } 366174629Sjeff} 367174629Sjeff 368174629Sjeffstatic int 369174629Sjeffdump_lock_prof_stats(SYSCTL_HANDLER_ARGS) 370174629Sjeff{ 371174629Sjeff struct sbuf *sb; 372174629Sjeff int error, cpu, t; 373175010Sjeff int enabled; 374174629Sjeff 375217916Smdf error = sysctl_wire_old_buffer(req, 0); 376217916Smdf if (error != 0) 377217916Smdf return (error); 378212750Smdf sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); 379180852Skmacy sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", 380180852Skmacy "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); 381175010Sjeff enabled = lock_prof_enable; 382175010Sjeff lock_prof_enable = 0; 383243046Sjeff quiesce_all_cpus("profstat", 0); 384174629Sjeff t = ticks; 385174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 386174629Sjeff if (lp_cpu[cpu] == NULL) 387174629Sjeff continue; 388174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); 389174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); 390174629Sjeff } 391175010Sjeff lock_prof_enable = enabled; 392174629Sjeff 393212750Smdf error = sbuf_finish(sb); 394212750Smdf /* Output a trailing NUL. */ 395212750Smdf if (error == 0) 396212750Smdf error = SYSCTL_OUT(req, "", 1); 397174629Sjeff sbuf_delete(sb); 398174629Sjeff return (error); 399174629Sjeff} 400174629Sjeff 401174629Sjeffstatic int 402174629Sjeffenable_lock_prof(SYSCTL_HANDLER_ARGS) 403174629Sjeff{ 404174629Sjeff int error, v; 405174629Sjeff 406174629Sjeff v = lock_prof_enable; 407174629Sjeff error = sysctl_handle_int(oidp, &v, v, req); 408174629Sjeff if (error) 409174629Sjeff return (error); 410174629Sjeff if (req->newptr == NULL) 411174629Sjeff return (error); 412174629Sjeff if (v == lock_prof_enable) 413174629Sjeff return (0); 414174629Sjeff if (v == 1) 415174629Sjeff lock_prof_reset(); 416174629Sjeff lock_prof_enable = !!v; 417174629Sjeff 418174629Sjeff return (0); 419174629Sjeff} 420174629Sjeff 421174629Sjeffstatic int 422174629Sjeffreset_lock_prof_stats(SYSCTL_HANDLER_ARGS) 423174629Sjeff{ 424174629Sjeff int error, v; 425174629Sjeff 426174629Sjeff v = 0; 427174629Sjeff error = sysctl_handle_int(oidp, &v, 0, req); 428174629Sjeff if (error) 429174629Sjeff return (error); 430174629Sjeff if (req->newptr == NULL) 431174629Sjeff return (error); 432174629Sjeff if (v == 0) 433174629Sjeff return (0); 434174629Sjeff lock_prof_reset(); 435174629Sjeff 436174629Sjeff return (0); 437174629Sjeff} 438174629Sjeff 439174629Sjeffstatic struct lock_prof * 440174629Sjefflock_profile_lookup(struct lock_object *lo, int spin, const char *file, 441174629Sjeff int line) 442174629Sjeff{ 443174629Sjeff const char *unknown = "(unknown)"; 444174629Sjeff struct lock_prof_type *type; 445174629Sjeff struct lock_prof *lp; 446174629Sjeff struct lphead *head; 447174629Sjeff const char *p; 448174629Sjeff u_int hash; 449174629Sjeff 450174629Sjeff p = file; 451174629Sjeff if (p == NULL || *p == '\0') 452174629Sjeff p = unknown; 453174629Sjeff hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; 454174629Sjeff hash &= LPROF_HASH_MASK; 455174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 456174629Sjeff head = &type->lpt_hash[hash]; 457174629Sjeff SLIST_FOREACH(lp, head, link) { 458174629Sjeff if (lp->line == line && lp->file == p && 459174629Sjeff lp->name == lo->lo_name) 460174629Sjeff return (lp); 461174629Sjeff 462174629Sjeff } 463174629Sjeff lp = SLIST_FIRST(&type->lpt_lpalloc); 464174629Sjeff if (lp == NULL) { 465174629Sjeff lock_prof_rejected++; 466174629Sjeff return (lp); 467174629Sjeff } 468174629Sjeff SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); 469174629Sjeff lp->file = p; 470174629Sjeff lp->line = line; 471175010Sjeff lp->class = LOCK_CLASS(lo); 472174629Sjeff lp->name = lo->lo_name; 473174629Sjeff SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); 474174629Sjeff return (lp); 475174629Sjeff} 476174629Sjeff 477174629Sjeffstatic struct lock_profile_object * 478174629Sjefflock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, 479174629Sjeff int line) 480174629Sjeff{ 481174629Sjeff struct lock_profile_object *l; 482174629Sjeff struct lock_prof_type *type; 483174629Sjeff struct lpohead *head; 484174629Sjeff 485174629Sjeff head = &curthread->td_lprof[spin]; 486174629Sjeff LIST_FOREACH(l, head, lpo_link) 487174629Sjeff if (l->lpo_obj == lo && l->lpo_file == file && 488174629Sjeff l->lpo_line == line) 489174629Sjeff return (l); 490174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 491174629Sjeff l = LIST_FIRST(&type->lpt_lpoalloc); 492174629Sjeff if (l == NULL) { 493174629Sjeff lock_prof_rejected++; 494174629Sjeff return (NULL); 495174629Sjeff } 496174629Sjeff LIST_REMOVE(l, lpo_link); 497174629Sjeff l->lpo_obj = lo; 498174629Sjeff l->lpo_file = file; 499174629Sjeff l->lpo_line = line; 500174629Sjeff l->lpo_cnt = 0; 501174629Sjeff LIST_INSERT_HEAD(head, l, lpo_link); 502174629Sjeff 503174629Sjeff return (l); 504174629Sjeff} 505174629Sjeff 506174629Sjeffvoid 507174629Sjefflock_profile_obtain_lock_success(struct lock_object *lo, int contested, 508174629Sjeff uint64_t waittime, const char *file, int line) 509174629Sjeff{ 510174629Sjeff static int lock_prof_count; 511174629Sjeff struct lock_profile_object *l; 512174629Sjeff int spin; 513174629Sjeff 514228424Savg if (SCHEDULER_STOPPED()) 515228424Savg return; 516228424Savg 517174629Sjeff /* don't reset the timer when/if recursing */ 518174629Sjeff if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) 519174629Sjeff return; 520174629Sjeff if (lock_prof_skipcount && 521175150Skris (++lock_prof_count % lock_prof_skipcount) != 0) 522174629Sjeff return; 523176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 524174629Sjeff if (spin && lock_prof_skipspin == 1) 525174629Sjeff return; 526189845Sjeff critical_enter(); 527189845Sjeff /* Recheck enabled now that we're in a critical section. */ 528189845Sjeff if (lock_prof_enable == 0) 529189845Sjeff goto out; 530174629Sjeff l = lock_profile_object_lookup(lo, spin, file, line); 531174629Sjeff if (l == NULL) 532189845Sjeff goto out; 533174629Sjeff l->lpo_cnt++; 534174629Sjeff if (++l->lpo_ref > 1) 535189845Sjeff goto out; 536174629Sjeff l->lpo_contest_locking = contested; 537168315Skmacy l->lpo_acqtime = nanoseconds(); 538168315Skmacy if (waittime && (l->lpo_acqtime > waittime)) 539168315Skmacy l->lpo_waittime = l->lpo_acqtime - waittime; 540168315Skmacy else 541168315Skmacy l->lpo_waittime = 0; 542189845Sjeffout: 543189845Sjeff critical_exit(); 544164159Skmacy} 545164159Skmacy 546174629Sjeffvoid 547189845Sjefflock_profile_thread_exit(struct thread *td) 548189845Sjeff{ 549189845Sjeff#ifdef INVARIANTS 550189845Sjeff struct lock_profile_object *l; 551189845Sjeff 552189845Sjeff MPASS(curthread->td_critnest == 0); 553189845Sjeff#endif 554189845Sjeff /* 555189845Sjeff * If lock profiling was disabled we have to wait for reset to 556189845Sjeff * clear our pointers before we can exit safely. 557189845Sjeff */ 558189845Sjeff lock_prof_reset_wait(); 559189845Sjeff#ifdef INVARIANTS 560189845Sjeff LIST_FOREACH(l, &td->td_lprof[0], lpo_link) 561189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 562189845Sjeff l->lpo_file, l->lpo_line); 563189845Sjeff LIST_FOREACH(l, &td->td_lprof[1], lpo_link) 564189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 565189845Sjeff l->lpo_file, l->lpo_line); 566189845Sjeff#endif 567189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); 568189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); 569189845Sjeff} 570189845Sjeff 571189845Sjeffvoid 572174629Sjefflock_profile_release_lock(struct lock_object *lo) 573164159Skmacy{ 574174629Sjeff struct lock_profile_object *l; 575174629Sjeff struct lock_prof_type *type; 576174629Sjeff struct lock_prof *lp; 577209390Sed uint64_t curtime, holdtime; 578174629Sjeff struct lpohead *head; 579174629Sjeff int spin; 580164159Skmacy 581228424Savg if (SCHEDULER_STOPPED()) 582228424Savg return; 583189845Sjeff if (lo->lo_flags & LO_NOPROFILE) 584174629Sjeff return; 585176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 586174629Sjeff head = &curthread->td_lprof[spin]; 587189845Sjeff if (LIST_FIRST(head) == NULL) 588189845Sjeff return; 589174629Sjeff critical_enter(); 590189845Sjeff /* Recheck enabled now that we're in a critical section. */ 591189845Sjeff if (lock_prof_enable == 0 && lock_prof_resetting == 1) 592189845Sjeff goto out; 593189845Sjeff /* 594189845Sjeff * If lock profiling is not enabled we still want to remove the 595189845Sjeff * lpo from our queue. 596189845Sjeff */ 597174629Sjeff LIST_FOREACH(l, head, lpo_link) 598174629Sjeff if (l->lpo_obj == lo) 599174629Sjeff break; 600174629Sjeff if (l == NULL) 601174629Sjeff goto out; 602174629Sjeff if (--l->lpo_ref > 0) 603174629Sjeff goto out; 604174629Sjeff lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); 605174629Sjeff if (lp == NULL) 606174629Sjeff goto release; 607209247Savg curtime = nanoseconds(); 608209247Savg if (curtime < l->lpo_acqtime) 609174629Sjeff goto release; 610209247Savg holdtime = curtime - l->lpo_acqtime; 611209247Savg 612174629Sjeff /* 613174629Sjeff * Record if the lock has been held longer now than ever 614174629Sjeff * before. 615174629Sjeff */ 616174629Sjeff if (holdtime > lp->cnt_max) 617174629Sjeff lp->cnt_max = holdtime; 618180852Skmacy if (l->lpo_waittime > lp->cnt_wait_max) 619180852Skmacy lp->cnt_wait_max = l->lpo_waittime; 620174629Sjeff lp->cnt_tot += holdtime; 621174629Sjeff lp->cnt_wait += l->lpo_waittime; 622174629Sjeff lp->cnt_contest_locking += l->lpo_contest_locking; 623174629Sjeff lp->cnt_cur += l->lpo_cnt; 624174629Sjeffrelease: 625174629Sjeff LIST_REMOVE(l, lpo_link); 626174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 627174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); 628174629Sjeffout: 629174629Sjeff critical_exit(); 630174629Sjeff} 631164159Skmacy 632227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); 633227309Sedstatic SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, 634227309Sed "lock profiling"); 635174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, 636174629Sjeff &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); 637174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, 638174629Sjeff &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); 639174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, 640174629Sjeff &lock_prof_rejected, 0, "Number of rejected profiling records"); 641174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 642174629Sjeff NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); 643174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 644174629Sjeff NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); 645174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 646174629Sjeff NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); 647164159Skmacy 648164159Skmacy#endif 649