subr_lock.c revision 228424
1154484Sjhb/*- 2154484Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154484Sjhb * All rights reserved. 4154484Sjhb * 5154484Sjhb * Redistribution and use in source and binary forms, with or without 6154484Sjhb * modification, are permitted provided that the following conditions 7154484Sjhb * are met: 8154484Sjhb * 1. Redistributions of source code must retain the above copyright 9154484Sjhb * notice, this list of conditions and the following disclaimer. 10154484Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154484Sjhb * notice, this list of conditions and the following disclaimer in the 12154484Sjhb * documentation and/or other materials provided with the distribution. 13154484Sjhb * 3. Neither the name of the author nor the names of any co-contributors 14154484Sjhb * may be used to endorse or promote products derived from this software 15154484Sjhb * without specific prior written permission. 16154484Sjhb * 17154484Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18154484Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19154484Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20154484Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21154484Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22154484Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23154484Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24154484Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25154484Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26154484Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27154484Sjhb * SUCH DAMAGE. 28154484Sjhb */ 29154484Sjhb 30154484Sjhb/* 31154484Sjhb * This module holds the global variables and functions used to maintain 32154484Sjhb * lock_object structures. 33154484Sjhb */ 34154484Sjhb 35154484Sjhb#include <sys/cdefs.h> 36154484Sjhb__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 228424 2011-12-11 21:02:01Z avg $"); 37154484Sjhb 38154485Sjhb#include "opt_ddb.h" 39164159Skmacy#include "opt_mprof.h" 40154485Sjhb 41154484Sjhb#include <sys/param.h> 42154484Sjhb#include <sys/systm.h> 43174629Sjeff#include <sys/kernel.h> 44154484Sjhb#include <sys/ktr.h> 45154484Sjhb#include <sys/lock.h> 46174629Sjeff#include <sys/lock_profile.h> 47174629Sjeff#include <sys/malloc.h> 48189845Sjeff#include <sys/mutex.h> 49174629Sjeff#include <sys/pcpu.h> 50174629Sjeff#include <sys/proc.h> 51164159Skmacy#include <sys/sbuf.h> 52189845Sjeff#include <sys/sched.h> 53174629Sjeff#include <sys/smp.h> 54164159Skmacy#include <sys/sysctl.h> 55154484Sjhb 56154484Sjhb#ifdef DDB 57154484Sjhb#include <ddb/ddb.h> 58154484Sjhb#endif 59154484Sjhb 60174629Sjeff#include <machine/cpufunc.h> 61174629Sjeff 62154484SjhbCTASSERT(LOCK_CLASS_MAX == 15); 63154484Sjhb 64154484Sjhbstruct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { 65154484Sjhb &lock_class_mtx_spin, 66154484Sjhb &lock_class_mtx_sleep, 67154484Sjhb &lock_class_sx, 68173444Sups &lock_class_rm, 69154941Sjhb &lock_class_rw, 70164246Skmacy &lock_class_lockmgr, 71154484Sjhb}; 72154484Sjhb 73154484Sjhbvoid 74154484Sjhblock_init(struct lock_object *lock, struct lock_class *class, const char *name, 75154484Sjhb const char *type, int flags) 76154484Sjhb{ 77154484Sjhb int i; 78154484Sjhb 79154484Sjhb /* Check for double-init and zero object. */ 80154484Sjhb KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized", 81154484Sjhb name, lock)); 82154484Sjhb 83154484Sjhb /* Look up lock class to find its index. */ 84154484Sjhb for (i = 0; i < LOCK_CLASS_MAX; i++) 85154484Sjhb if (lock_classes[i] == class) { 86154484Sjhb lock->lo_flags = i << LO_CLASSSHIFT; 87154484Sjhb break; 88154484Sjhb } 89154484Sjhb KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); 90154484Sjhb 91154484Sjhb /* Initialize the lock object. */ 92154484Sjhb lock->lo_name = name; 93154484Sjhb lock->lo_flags |= flags | LO_INITIALIZED; 94154484Sjhb LOCK_LOG_INIT(lock, 0); 95179025Sattilio WITNESS_INIT(lock, (type != NULL) ? type : name); 96154484Sjhb} 97154484Sjhb 98154484Sjhbvoid 99154484Sjhblock_destroy(struct lock_object *lock) 100154484Sjhb{ 101154484Sjhb 102154484Sjhb KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); 103154484Sjhb WITNESS_DESTROY(lock); 104154484Sjhb LOCK_LOG_DESTROY(lock, 0); 105154484Sjhb lock->lo_flags &= ~LO_INITIALIZED; 106154484Sjhb} 107154484Sjhb 108154484Sjhb#ifdef DDB 109154484SjhbDB_SHOW_COMMAND(lock, db_show_lock) 110154484Sjhb{ 111154484Sjhb struct lock_object *lock; 112154484Sjhb struct lock_class *class; 113154484Sjhb 114154484Sjhb if (!have_addr) 115154484Sjhb return; 116154484Sjhb lock = (struct lock_object *)addr; 117154484Sjhb if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { 118154484Sjhb db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); 119154484Sjhb return; 120154484Sjhb } 121154484Sjhb class = LOCK_CLASS(lock); 122154484Sjhb db_printf(" class: %s\n", class->lc_name); 123154484Sjhb db_printf(" name: %s\n", lock->lo_name); 124154484Sjhb class->lc_ddb_show(lock); 125154484Sjhb} 126154484Sjhb#endif 127164159Skmacy 128164159Skmacy#ifdef LOCK_PROFILING 129174629Sjeff 130174629Sjeff/* 131174629Sjeff * One object per-thread for each lock the thread owns. Tracks individual 132174629Sjeff * lock instances. 133174629Sjeff */ 134174629Sjeffstruct lock_profile_object { 135174629Sjeff LIST_ENTRY(lock_profile_object) lpo_link; 136174629Sjeff struct lock_object *lpo_obj; 137174629Sjeff const char *lpo_file; 138174629Sjeff int lpo_line; 139174629Sjeff uint16_t lpo_ref; 140174629Sjeff uint16_t lpo_cnt; 141209390Sed uint64_t lpo_acqtime; 142209390Sed uint64_t lpo_waittime; 143174629Sjeff u_int lpo_contest_locking; 144174629Sjeff}; 145174629Sjeff 146174629Sjeff/* 147174629Sjeff * One lock_prof for each (file, line, lock object) triple. 148174629Sjeff */ 149174629Sjeffstruct lock_prof { 150174629Sjeff SLIST_ENTRY(lock_prof) link; 151175010Sjeff struct lock_class *class; 152174629Sjeff const char *file; 153174629Sjeff const char *name; 154174629Sjeff int line; 155174629Sjeff int ticks; 156180852Skmacy uintmax_t cnt_wait_max; 157174629Sjeff uintmax_t cnt_max; 158174629Sjeff uintmax_t cnt_tot; 159174629Sjeff uintmax_t cnt_wait; 160174629Sjeff uintmax_t cnt_cur; 161174629Sjeff uintmax_t cnt_contest_locking; 162174629Sjeff}; 163174629Sjeff 164174629SjeffSLIST_HEAD(lphead, lock_prof); 165174629Sjeff 166174629Sjeff#define LPROF_HASH_SIZE 4096 167174629Sjeff#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) 168174629Sjeff#define LPROF_CACHE_SIZE 4096 169174629Sjeff 170174629Sjeff/* 171174629Sjeff * Array of objects and profs for each type of object for each cpu. Spinlocks 172215034Sbrucec * are handled separately because a thread may be preempted and acquire a 173174629Sjeff * spinlock while in the lock profiling code of a non-spinlock. In this way 174174629Sjeff * we only need a critical section to protect the per-cpu lists. 175174629Sjeff */ 176174629Sjeffstruct lock_prof_type { 177174629Sjeff struct lphead lpt_lpalloc; 178174629Sjeff struct lpohead lpt_lpoalloc; 179174629Sjeff struct lphead lpt_hash[LPROF_HASH_SIZE]; 180174629Sjeff struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; 181174629Sjeff struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; 182174629Sjeff}; 183174629Sjeff 184174629Sjeffstruct lock_prof_cpu { 185174629Sjeff struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ 186174629Sjeff}; 187174629Sjeff 188174629Sjeffstruct lock_prof_cpu *lp_cpu[MAXCPU]; 189174629Sjeff 190189845Sjeffvolatile int lock_prof_enable = 0; 191189845Sjeffstatic volatile int lock_prof_resetting; 192174629Sjeff 193212750Smdf#define LPROF_SBUF_SIZE 256 194174629Sjeff 195174629Sjeffstatic int lock_prof_rejected; 196174629Sjeffstatic int lock_prof_skipspin; 197174629Sjeffstatic int lock_prof_skipcount; 198174629Sjeff 199174629Sjeff#ifndef USE_CPU_NANOSECONDS 200209390Seduint64_t 201174629Sjeffnanoseconds(void) 202164159Skmacy{ 203174629Sjeff struct bintime bt; 204209390Sed uint64_t ns; 205164159Skmacy 206174629Sjeff binuptime(&bt); 207174629Sjeff /* From bintime2timespec */ 208209390Sed ns = bt.sec * (uint64_t)1000000000; 209174629Sjeff ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; 210174629Sjeff return (ns); 211174629Sjeff} 212174629Sjeff#endif 213174629Sjeff 214174629Sjeffstatic void 215174629Sjefflock_prof_init_type(struct lock_prof_type *type) 216174629Sjeff{ 217174629Sjeff int i; 218174629Sjeff 219174629Sjeff SLIST_INIT(&type->lpt_lpalloc); 220174629Sjeff LIST_INIT(&type->lpt_lpoalloc); 221174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 222174629Sjeff SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], 223174629Sjeff link); 224174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], 225174629Sjeff lpo_link); 226174629Sjeff } 227174629Sjeff} 228174629Sjeff 229174629Sjeffstatic void 230174629Sjefflock_prof_init(void *arg) 231174629Sjeff{ 232174629Sjeff int cpu; 233174629Sjeff 234174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 235174629Sjeff lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, 236174629Sjeff M_WAITOK | M_ZERO); 237174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); 238174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); 239174629Sjeff } 240174629Sjeff} 241174629SjeffSYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); 242174629Sjeff 243189845Sjeff/* 244189845Sjeff * To be certain that lock profiling has idled on all cpus before we 245189845Sjeff * reset, we schedule the resetting thread on all active cpus. Since 246189845Sjeff * all operations happen within critical sections we can be sure that 247189845Sjeff * it is safe to zero the profiling structures. 248189845Sjeff */ 249174629Sjeffstatic void 250189845Sjefflock_prof_idle(void) 251189845Sjeff{ 252189845Sjeff struct thread *td; 253189845Sjeff int cpu; 254189845Sjeff 255189845Sjeff td = curthread; 256189845Sjeff thread_lock(td); 257209059Sjhb CPU_FOREACH(cpu) { 258189845Sjeff sched_bind(td, cpu); 259189845Sjeff } 260189845Sjeff sched_unbind(td); 261189845Sjeff thread_unlock(td); 262189845Sjeff} 263189845Sjeff 264189845Sjeffstatic void 265189845Sjefflock_prof_reset_wait(void) 266189845Sjeff{ 267189845Sjeff 268189845Sjeff /* 269189845Sjeff * Spin relinquishing our cpu so that lock_prof_idle may 270189845Sjeff * run on it. 271189845Sjeff */ 272189845Sjeff while (lock_prof_resetting) 273189845Sjeff sched_relinquish(curthread); 274189845Sjeff} 275189845Sjeff 276189845Sjeffstatic void 277174629Sjefflock_prof_reset(void) 278174629Sjeff{ 279174629Sjeff struct lock_prof_cpu *lpc; 280174629Sjeff int enabled, i, cpu; 281174629Sjeff 282189845Sjeff /* 283189845Sjeff * We not only race with acquiring and releasing locks but also 284189845Sjeff * thread exit. To be certain that threads exit without valid head 285189845Sjeff * pointers they must see resetting set before enabled is cleared. 286189845Sjeff * Otherwise a lock may not be removed from a per-thread list due 287189845Sjeff * to disabled being set but not wait for reset() to remove it below. 288189845Sjeff */ 289189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 1); 290174629Sjeff enabled = lock_prof_enable; 291174629Sjeff lock_prof_enable = 0; 292189845Sjeff lock_prof_idle(); 293189845Sjeff /* 294189845Sjeff * Some objects may have migrated between CPUs. Clear all links 295189845Sjeff * before we zero the structures. Some items may still be linked 296189845Sjeff * into per-thread lists as well. 297189845Sjeff */ 298174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 299174629Sjeff lpc = lp_cpu[cpu]; 300174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 301174629Sjeff LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); 302174629Sjeff LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); 303174629Sjeff } 304189845Sjeff } 305189845Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 306189845Sjeff lpc = lp_cpu[cpu]; 307174629Sjeff bzero(lpc, sizeof(*lpc)); 308174629Sjeff lock_prof_init_type(&lpc->lpc_types[0]); 309174629Sjeff lock_prof_init_type(&lpc->lpc_types[1]); 310174629Sjeff } 311189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 0); 312174629Sjeff lock_prof_enable = enabled; 313174629Sjeff} 314174629Sjeff 315174629Sjeffstatic void 316174629Sjefflock_prof_output(struct lock_prof *lp, struct sbuf *sb) 317174629Sjeff{ 318174629Sjeff const char *p; 319174629Sjeff 320174629Sjeff for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); 321174629Sjeff sbuf_printf(sb, 322180852Skmacy "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", 323180852Skmacy lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, 324174629Sjeff lp->cnt_wait / 1000, lp->cnt_cur, 325174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 326174629Sjeff lp->cnt_tot / (lp->cnt_cur * 1000), 327174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 328174629Sjeff lp->cnt_wait / (lp->cnt_cur * 1000), 329174629Sjeff (uintmax_t)0, lp->cnt_contest_locking, 330175010Sjeff p, lp->line, lp->class->lc_name, lp->name); 331174629Sjeff} 332174629Sjeff 333174629Sjeffstatic void 334174629Sjefflock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, 335174629Sjeff int spin, int t) 336174629Sjeff{ 337174629Sjeff struct lock_prof_type *type; 338174629Sjeff struct lock_prof *l; 339174629Sjeff int cpu; 340174629Sjeff 341174629Sjeff dst->file = match->file; 342174629Sjeff dst->line = match->line; 343175010Sjeff dst->class = match->class; 344174629Sjeff dst->name = match->name; 345174629Sjeff 346174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 347174629Sjeff if (lp_cpu[cpu] == NULL) 348174629Sjeff continue; 349174629Sjeff type = &lp_cpu[cpu]->lpc_types[spin]; 350174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[hash], link) { 351174629Sjeff if (l->ticks == t) 352174629Sjeff continue; 353174629Sjeff if (l->file != match->file || l->line != match->line || 354175010Sjeff l->name != match->name) 355174629Sjeff continue; 356174629Sjeff l->ticks = t; 357174629Sjeff if (l->cnt_max > dst->cnt_max) 358174629Sjeff dst->cnt_max = l->cnt_max; 359180852Skmacy if (l->cnt_wait_max > dst->cnt_wait_max) 360180852Skmacy dst->cnt_wait_max = l->cnt_wait_max; 361174629Sjeff dst->cnt_tot += l->cnt_tot; 362174629Sjeff dst->cnt_wait += l->cnt_wait; 363174629Sjeff dst->cnt_cur += l->cnt_cur; 364174629Sjeff dst->cnt_contest_locking += l->cnt_contest_locking; 365174629Sjeff } 366174629Sjeff } 367167012Skmacy 368174629Sjeff} 369174629Sjeff 370174629Sjeffstatic void 371174629Sjefflock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, 372174629Sjeff int t) 373174629Sjeff{ 374174629Sjeff struct lock_prof *l; 375174629Sjeff int i; 376174629Sjeff 377174629Sjeff for (i = 0; i < LPROF_HASH_SIZE; ++i) { 378174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[i], link) { 379174629Sjeff struct lock_prof lp = {}; 380174629Sjeff 381174629Sjeff if (l->ticks == t) 382174629Sjeff continue; 383174629Sjeff lock_prof_sum(l, &lp, i, spin, t); 384174629Sjeff lock_prof_output(&lp, sb); 385174629Sjeff } 386174629Sjeff } 387174629Sjeff} 388174629Sjeff 389174629Sjeffstatic int 390174629Sjeffdump_lock_prof_stats(SYSCTL_HANDLER_ARGS) 391174629Sjeff{ 392174629Sjeff struct sbuf *sb; 393174629Sjeff int error, cpu, t; 394175010Sjeff int enabled; 395174629Sjeff 396217916Smdf error = sysctl_wire_old_buffer(req, 0); 397217916Smdf if (error != 0) 398217916Smdf return (error); 399212750Smdf sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); 400180852Skmacy sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", 401180852Skmacy "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); 402175010Sjeff enabled = lock_prof_enable; 403175010Sjeff lock_prof_enable = 0; 404189845Sjeff lock_prof_idle(); 405174629Sjeff t = ticks; 406174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 407174629Sjeff if (lp_cpu[cpu] == NULL) 408174629Sjeff continue; 409174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); 410174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); 411174629Sjeff } 412175010Sjeff lock_prof_enable = enabled; 413174629Sjeff 414212750Smdf error = sbuf_finish(sb); 415212750Smdf /* Output a trailing NUL. */ 416212750Smdf if (error == 0) 417212750Smdf error = SYSCTL_OUT(req, "", 1); 418174629Sjeff sbuf_delete(sb); 419174629Sjeff return (error); 420174629Sjeff} 421174629Sjeff 422174629Sjeffstatic int 423174629Sjeffenable_lock_prof(SYSCTL_HANDLER_ARGS) 424174629Sjeff{ 425174629Sjeff int error, v; 426174629Sjeff 427174629Sjeff v = lock_prof_enable; 428174629Sjeff error = sysctl_handle_int(oidp, &v, v, req); 429174629Sjeff if (error) 430174629Sjeff return (error); 431174629Sjeff if (req->newptr == NULL) 432174629Sjeff return (error); 433174629Sjeff if (v == lock_prof_enable) 434174629Sjeff return (0); 435174629Sjeff if (v == 1) 436174629Sjeff lock_prof_reset(); 437174629Sjeff lock_prof_enable = !!v; 438174629Sjeff 439174629Sjeff return (0); 440174629Sjeff} 441174629Sjeff 442174629Sjeffstatic int 443174629Sjeffreset_lock_prof_stats(SYSCTL_HANDLER_ARGS) 444174629Sjeff{ 445174629Sjeff int error, v; 446174629Sjeff 447174629Sjeff v = 0; 448174629Sjeff error = sysctl_handle_int(oidp, &v, 0, req); 449174629Sjeff if (error) 450174629Sjeff return (error); 451174629Sjeff if (req->newptr == NULL) 452174629Sjeff return (error); 453174629Sjeff if (v == 0) 454174629Sjeff return (0); 455174629Sjeff lock_prof_reset(); 456174629Sjeff 457174629Sjeff return (0); 458174629Sjeff} 459174629Sjeff 460174629Sjeffstatic struct lock_prof * 461174629Sjefflock_profile_lookup(struct lock_object *lo, int spin, const char *file, 462174629Sjeff int line) 463174629Sjeff{ 464174629Sjeff const char *unknown = "(unknown)"; 465174629Sjeff struct lock_prof_type *type; 466174629Sjeff struct lock_prof *lp; 467174629Sjeff struct lphead *head; 468174629Sjeff const char *p; 469174629Sjeff u_int hash; 470174629Sjeff 471174629Sjeff p = file; 472174629Sjeff if (p == NULL || *p == '\0') 473174629Sjeff p = unknown; 474174629Sjeff hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; 475174629Sjeff hash &= LPROF_HASH_MASK; 476174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 477174629Sjeff head = &type->lpt_hash[hash]; 478174629Sjeff SLIST_FOREACH(lp, head, link) { 479174629Sjeff if (lp->line == line && lp->file == p && 480174629Sjeff lp->name == lo->lo_name) 481174629Sjeff return (lp); 482174629Sjeff 483174629Sjeff } 484174629Sjeff lp = SLIST_FIRST(&type->lpt_lpalloc); 485174629Sjeff if (lp == NULL) { 486174629Sjeff lock_prof_rejected++; 487174629Sjeff return (lp); 488174629Sjeff } 489174629Sjeff SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); 490174629Sjeff lp->file = p; 491174629Sjeff lp->line = line; 492175010Sjeff lp->class = LOCK_CLASS(lo); 493174629Sjeff lp->name = lo->lo_name; 494174629Sjeff SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); 495174629Sjeff return (lp); 496174629Sjeff} 497174629Sjeff 498174629Sjeffstatic struct lock_profile_object * 499174629Sjefflock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, 500174629Sjeff int line) 501174629Sjeff{ 502174629Sjeff struct lock_profile_object *l; 503174629Sjeff struct lock_prof_type *type; 504174629Sjeff struct lpohead *head; 505174629Sjeff 506174629Sjeff head = &curthread->td_lprof[spin]; 507174629Sjeff LIST_FOREACH(l, head, lpo_link) 508174629Sjeff if (l->lpo_obj == lo && l->lpo_file == file && 509174629Sjeff l->lpo_line == line) 510174629Sjeff return (l); 511174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 512174629Sjeff l = LIST_FIRST(&type->lpt_lpoalloc); 513174629Sjeff if (l == NULL) { 514174629Sjeff lock_prof_rejected++; 515174629Sjeff return (NULL); 516174629Sjeff } 517174629Sjeff LIST_REMOVE(l, lpo_link); 518174629Sjeff l->lpo_obj = lo; 519174629Sjeff l->lpo_file = file; 520174629Sjeff l->lpo_line = line; 521174629Sjeff l->lpo_cnt = 0; 522174629Sjeff LIST_INSERT_HEAD(head, l, lpo_link); 523174629Sjeff 524174629Sjeff return (l); 525174629Sjeff} 526174629Sjeff 527174629Sjeffvoid 528174629Sjefflock_profile_obtain_lock_success(struct lock_object *lo, int contested, 529174629Sjeff uint64_t waittime, const char *file, int line) 530174629Sjeff{ 531174629Sjeff static int lock_prof_count; 532174629Sjeff struct lock_profile_object *l; 533174629Sjeff int spin; 534174629Sjeff 535228424Savg if (SCHEDULER_STOPPED()) 536228424Savg return; 537228424Savg 538174629Sjeff /* don't reset the timer when/if recursing */ 539174629Sjeff if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) 540174629Sjeff return; 541174629Sjeff if (lock_prof_skipcount && 542175150Skris (++lock_prof_count % lock_prof_skipcount) != 0) 543174629Sjeff return; 544176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 545174629Sjeff if (spin && lock_prof_skipspin == 1) 546174629Sjeff return; 547189845Sjeff critical_enter(); 548189845Sjeff /* Recheck enabled now that we're in a critical section. */ 549189845Sjeff if (lock_prof_enable == 0) 550189845Sjeff goto out; 551174629Sjeff l = lock_profile_object_lookup(lo, spin, file, line); 552174629Sjeff if (l == NULL) 553189845Sjeff goto out; 554174629Sjeff l->lpo_cnt++; 555174629Sjeff if (++l->lpo_ref > 1) 556189845Sjeff goto out; 557174629Sjeff l->lpo_contest_locking = contested; 558168315Skmacy l->lpo_acqtime = nanoseconds(); 559168315Skmacy if (waittime && (l->lpo_acqtime > waittime)) 560168315Skmacy l->lpo_waittime = l->lpo_acqtime - waittime; 561168315Skmacy else 562168315Skmacy l->lpo_waittime = 0; 563189845Sjeffout: 564189845Sjeff critical_exit(); 565164159Skmacy} 566164159Skmacy 567174629Sjeffvoid 568189845Sjefflock_profile_thread_exit(struct thread *td) 569189845Sjeff{ 570189845Sjeff#ifdef INVARIANTS 571189845Sjeff struct lock_profile_object *l; 572189845Sjeff 573189845Sjeff MPASS(curthread->td_critnest == 0); 574189845Sjeff#endif 575189845Sjeff /* 576189845Sjeff * If lock profiling was disabled we have to wait for reset to 577189845Sjeff * clear our pointers before we can exit safely. 578189845Sjeff */ 579189845Sjeff lock_prof_reset_wait(); 580189845Sjeff#ifdef INVARIANTS 581189845Sjeff LIST_FOREACH(l, &td->td_lprof[0], lpo_link) 582189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 583189845Sjeff l->lpo_file, l->lpo_line); 584189845Sjeff LIST_FOREACH(l, &td->td_lprof[1], lpo_link) 585189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 586189845Sjeff l->lpo_file, l->lpo_line); 587189845Sjeff#endif 588189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); 589189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); 590189845Sjeff} 591189845Sjeff 592189845Sjeffvoid 593174629Sjefflock_profile_release_lock(struct lock_object *lo) 594164159Skmacy{ 595174629Sjeff struct lock_profile_object *l; 596174629Sjeff struct lock_prof_type *type; 597174629Sjeff struct lock_prof *lp; 598209390Sed uint64_t curtime, holdtime; 599174629Sjeff struct lpohead *head; 600174629Sjeff int spin; 601164159Skmacy 602228424Savg if (SCHEDULER_STOPPED()) 603228424Savg return; 604189845Sjeff if (lo->lo_flags & LO_NOPROFILE) 605174629Sjeff return; 606176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 607174629Sjeff head = &curthread->td_lprof[spin]; 608189845Sjeff if (LIST_FIRST(head) == NULL) 609189845Sjeff return; 610174629Sjeff critical_enter(); 611189845Sjeff /* Recheck enabled now that we're in a critical section. */ 612189845Sjeff if (lock_prof_enable == 0 && lock_prof_resetting == 1) 613189845Sjeff goto out; 614189845Sjeff /* 615189845Sjeff * If lock profiling is not enabled we still want to remove the 616189845Sjeff * lpo from our queue. 617189845Sjeff */ 618174629Sjeff LIST_FOREACH(l, head, lpo_link) 619174629Sjeff if (l->lpo_obj == lo) 620174629Sjeff break; 621174629Sjeff if (l == NULL) 622174629Sjeff goto out; 623174629Sjeff if (--l->lpo_ref > 0) 624174629Sjeff goto out; 625174629Sjeff lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); 626174629Sjeff if (lp == NULL) 627174629Sjeff goto release; 628209247Savg curtime = nanoseconds(); 629209247Savg if (curtime < l->lpo_acqtime) 630174629Sjeff goto release; 631209247Savg holdtime = curtime - l->lpo_acqtime; 632209247Savg 633174629Sjeff /* 634174629Sjeff * Record if the lock has been held longer now than ever 635174629Sjeff * before. 636174629Sjeff */ 637174629Sjeff if (holdtime > lp->cnt_max) 638174629Sjeff lp->cnt_max = holdtime; 639180852Skmacy if (l->lpo_waittime > lp->cnt_wait_max) 640180852Skmacy lp->cnt_wait_max = l->lpo_waittime; 641174629Sjeff lp->cnt_tot += holdtime; 642174629Sjeff lp->cnt_wait += l->lpo_waittime; 643174629Sjeff lp->cnt_contest_locking += l->lpo_contest_locking; 644174629Sjeff lp->cnt_cur += l->lpo_cnt; 645174629Sjeffrelease: 646174629Sjeff LIST_REMOVE(l, lpo_link); 647174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 648174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); 649174629Sjeffout: 650174629Sjeff critical_exit(); 651174629Sjeff} 652164159Skmacy 653227309Sedstatic SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); 654227309Sedstatic SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, 655227309Sed "lock profiling"); 656174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, 657174629Sjeff &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); 658174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, 659174629Sjeff &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); 660174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, 661174629Sjeff &lock_prof_rejected, 0, "Number of rejected profiling records"); 662174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 663174629Sjeff NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); 664174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 665174629Sjeff NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); 666174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 667174629Sjeff NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); 668164159Skmacy 669164159Skmacy#endif 670