1154484Sjhb/*- 2154484Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org> 3154484Sjhb * All rights reserved. 4154484Sjhb * 5154484Sjhb * Redistribution and use in source and binary forms, with or without 6154484Sjhb * modification, are permitted provided that the following conditions 7154484Sjhb * are met: 8154484Sjhb * 1. Redistributions of source code must retain the above copyright 9154484Sjhb * notice, this list of conditions and the following disclaimer. 10154484Sjhb * 2. Redistributions in binary form must reproduce the above copyright 11154484Sjhb * notice, this list of conditions and the following disclaimer in the 12154484Sjhb * documentation and/or other materials provided with the distribution. 13154484Sjhb * 14154484Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15154484Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16154484Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17154484Sjhb * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18154484Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19154484Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20154484Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21154484Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22154484Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23154484Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24154484Sjhb * SUCH DAMAGE. 25154484Sjhb */ 26154484Sjhb 27154484Sjhb/* 28154484Sjhb * This module holds the global variables and functions used to maintain 29154484Sjhb * lock_object structures. 30154484Sjhb */ 31154484Sjhb 32154484Sjhb#include <sys/cdefs.h> 33154484Sjhb__FBSDID("$FreeBSD$"); 34154484Sjhb 35154485Sjhb#include "opt_ddb.h" 36164159Skmacy#include "opt_mprof.h" 37154485Sjhb 38154484Sjhb#include <sys/param.h> 39154484Sjhb#include <sys/systm.h> 40174629Sjeff#include <sys/kernel.h> 41154484Sjhb#include <sys/ktr.h> 42154484Sjhb#include <sys/lock.h> 43174629Sjeff#include <sys/lock_profile.h> 44174629Sjeff#include <sys/malloc.h> 45189845Sjeff#include <sys/mutex.h> 46174629Sjeff#include <sys/pcpu.h> 47174629Sjeff#include <sys/proc.h> 48164159Skmacy#include <sys/sbuf.h> 49189845Sjeff#include <sys/sched.h> 50174629Sjeff#include <sys/smp.h> 51164159Skmacy#include <sys/sysctl.h> 52154484Sjhb 53154484Sjhb#ifdef DDB 54154484Sjhb#include <ddb/ddb.h> 55154484Sjhb#endif 56154484Sjhb 57174629Sjeff#include <machine/cpufunc.h> 58174629Sjeff 59154484SjhbCTASSERT(LOCK_CLASS_MAX == 15); 60154484Sjhb 61154484Sjhbstruct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = { 62154484Sjhb &lock_class_mtx_spin, 63154484Sjhb &lock_class_mtx_sleep, 64154484Sjhb &lock_class_sx, 65173444Sups &lock_class_rm, 66256001Sjhb &lock_class_rm_sleepable, 67154941Sjhb &lock_class_rw, 68164246Skmacy &lock_class_lockmgr, 69154484Sjhb}; 70154484Sjhb 71154484Sjhbvoid 72154484Sjhblock_init(struct lock_object *lock, struct lock_class *class, const char *name, 73154484Sjhb const char *type, int flags) 74154484Sjhb{ 75154484Sjhb int i; 76154484Sjhb 77154484Sjhb /* Check for double-init and zero object. */ 78154484Sjhb KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized", 79154484Sjhb name, lock)); 80154484Sjhb 81154484Sjhb /* Look up lock class to find its index. */ 82154484Sjhb for (i = 0; i < LOCK_CLASS_MAX; i++) 83154484Sjhb if (lock_classes[i] == class) { 84154484Sjhb lock->lo_flags = i << LO_CLASSSHIFT; 85154484Sjhb break; 86154484Sjhb } 87154484Sjhb KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class)); 88154484Sjhb 89154484Sjhb /* Initialize the lock object. */ 90154484Sjhb lock->lo_name = name; 91154484Sjhb lock->lo_flags |= flags | LO_INITIALIZED; 92154484Sjhb LOCK_LOG_INIT(lock, 0); 93179025Sattilio WITNESS_INIT(lock, (type != NULL) ? type : name); 94154484Sjhb} 95154484Sjhb 96154484Sjhbvoid 97154484Sjhblock_destroy(struct lock_object *lock) 98154484Sjhb{ 99154484Sjhb 100154484Sjhb KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock)); 101154484Sjhb WITNESS_DESTROY(lock); 102154484Sjhb LOCK_LOG_DESTROY(lock, 0); 103154484Sjhb lock->lo_flags &= ~LO_INITIALIZED; 104154484Sjhb} 105154484Sjhb 106154484Sjhb#ifdef DDB 107154484SjhbDB_SHOW_COMMAND(lock, db_show_lock) 108154484Sjhb{ 109154484Sjhb struct lock_object *lock; 110154484Sjhb struct lock_class *class; 111154484Sjhb 112154484Sjhb if (!have_addr) 113154484Sjhb return; 114154484Sjhb lock = (struct lock_object *)addr; 115154484Sjhb if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) { 116154484Sjhb db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock)); 117154484Sjhb return; 118154484Sjhb } 119154484Sjhb class = LOCK_CLASS(lock); 120154484Sjhb db_printf(" class: %s\n", class->lc_name); 121154484Sjhb db_printf(" name: %s\n", lock->lo_name); 122154484Sjhb class->lc_ddb_show(lock); 123154484Sjhb} 124154484Sjhb#endif 125164159Skmacy 126164159Skmacy#ifdef LOCK_PROFILING 127174629Sjeff 128174629Sjeff/* 129174629Sjeff * One object per-thread for each lock the thread owns. Tracks individual 130174629Sjeff * lock instances. 131174629Sjeff */ 132174629Sjeffstruct lock_profile_object { 133174629Sjeff LIST_ENTRY(lock_profile_object) lpo_link; 134174629Sjeff struct lock_object *lpo_obj; 135174629Sjeff const char *lpo_file; 136174629Sjeff int lpo_line; 137174629Sjeff uint16_t lpo_ref; 138174629Sjeff uint16_t lpo_cnt; 139209390Sed uint64_t lpo_acqtime; 140209390Sed uint64_t lpo_waittime; 141174629Sjeff u_int lpo_contest_locking; 142174629Sjeff}; 143174629Sjeff 144174629Sjeff/* 145174629Sjeff * One lock_prof for each (file, line, lock object) triple. 146174629Sjeff */ 147174629Sjeffstruct lock_prof { 148174629Sjeff SLIST_ENTRY(lock_prof) link; 149175010Sjeff struct lock_class *class; 150174629Sjeff const char *file; 151174629Sjeff const char *name; 152174629Sjeff int line; 153174629Sjeff int ticks; 154180852Skmacy uintmax_t cnt_wait_max; 155174629Sjeff uintmax_t cnt_max; 156174629Sjeff uintmax_t cnt_tot; 157174629Sjeff uintmax_t cnt_wait; 158174629Sjeff uintmax_t cnt_cur; 159174629Sjeff uintmax_t cnt_contest_locking; 160174629Sjeff}; 161174629Sjeff 162174629SjeffSLIST_HEAD(lphead, lock_prof); 163174629Sjeff 164174629Sjeff#define LPROF_HASH_SIZE 4096 165174629Sjeff#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1) 166174629Sjeff#define LPROF_CACHE_SIZE 4096 167174629Sjeff 168174629Sjeff/* 169174629Sjeff * Array of objects and profs for each type of object for each cpu. Spinlocks 170215034Sbrucec * are handled separately because a thread may be preempted and acquire a 171174629Sjeff * spinlock while in the lock profiling code of a non-spinlock. In this way 172174629Sjeff * we only need a critical section to protect the per-cpu lists. 173174629Sjeff */ 174174629Sjeffstruct lock_prof_type { 175174629Sjeff struct lphead lpt_lpalloc; 176174629Sjeff struct lpohead lpt_lpoalloc; 177174629Sjeff struct lphead lpt_hash[LPROF_HASH_SIZE]; 178174629Sjeff struct lock_prof lpt_prof[LPROF_CACHE_SIZE]; 179174629Sjeff struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE]; 180174629Sjeff}; 181174629Sjeff 182174629Sjeffstruct lock_prof_cpu { 183174629Sjeff struct lock_prof_type lpc_types[2]; /* One for spin one for other. */ 184174629Sjeff}; 185174629Sjeff 186174629Sjeffstruct lock_prof_cpu *lp_cpu[MAXCPU]; 187174629Sjeff 188189845Sjeffvolatile int lock_prof_enable = 0; 189189845Sjeffstatic volatile int lock_prof_resetting; 190174629Sjeff 191212750Smdf#define LPROF_SBUF_SIZE 256 192174629Sjeff 193174629Sjeffstatic int lock_prof_rejected; 194174629Sjeffstatic int lock_prof_skipspin; 195174629Sjeffstatic int lock_prof_skipcount; 196174629Sjeff 197174629Sjeff#ifndef USE_CPU_NANOSECONDS 198209390Seduint64_t 199174629Sjeffnanoseconds(void) 200164159Skmacy{ 201174629Sjeff struct bintime bt; 202209390Sed uint64_t ns; 203164159Skmacy 204174629Sjeff binuptime(&bt); 205174629Sjeff /* From bintime2timespec */ 206209390Sed ns = bt.sec * (uint64_t)1000000000; 207174629Sjeff ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32; 208174629Sjeff return (ns); 209174629Sjeff} 210174629Sjeff#endif 211174629Sjeff 212174629Sjeffstatic void 213174629Sjefflock_prof_init_type(struct lock_prof_type *type) 214174629Sjeff{ 215174629Sjeff int i; 216174629Sjeff 217174629Sjeff SLIST_INIT(&type->lpt_lpalloc); 218174629Sjeff LIST_INIT(&type->lpt_lpoalloc); 219174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 220174629Sjeff SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i], 221174629Sjeff link); 222174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i], 223174629Sjeff lpo_link); 224174629Sjeff } 225174629Sjeff} 226174629Sjeff 227174629Sjeffstatic void 228174629Sjefflock_prof_init(void *arg) 229174629Sjeff{ 230174629Sjeff int cpu; 231174629Sjeff 232174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 233174629Sjeff lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF, 234174629Sjeff M_WAITOK | M_ZERO); 235174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]); 236174629Sjeff lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]); 237174629Sjeff } 238174629Sjeff} 239174629SjeffSYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL); 240174629Sjeff 241189845Sjeff/* 242189845Sjeff * To be certain that lock profiling has idled on all cpus before we 243189845Sjeff * reset, we schedule the resetting thread on all active cpus. Since 244189845Sjeff * all operations happen within critical sections we can be sure that 245189845Sjeff * it is safe to zero the profiling structures. 246189845Sjeff */ 247174629Sjeffstatic void 248189845Sjefflock_prof_idle(void) 249189845Sjeff{ 250189845Sjeff struct thread *td; 251189845Sjeff int cpu; 252189845Sjeff 253189845Sjeff td = curthread; 254189845Sjeff thread_lock(td); 255209059Sjhb CPU_FOREACH(cpu) { 256189845Sjeff sched_bind(td, cpu); 257189845Sjeff } 258189845Sjeff sched_unbind(td); 259189845Sjeff thread_unlock(td); 260189845Sjeff} 261189845Sjeff 262189845Sjeffstatic void 263189845Sjefflock_prof_reset_wait(void) 264189845Sjeff{ 265189845Sjeff 266189845Sjeff /* 267189845Sjeff * Spin relinquishing our cpu so that lock_prof_idle may 268189845Sjeff * run on it. 269189845Sjeff */ 270189845Sjeff while (lock_prof_resetting) 271189845Sjeff sched_relinquish(curthread); 272189845Sjeff} 273189845Sjeff 274189845Sjeffstatic void 275174629Sjefflock_prof_reset(void) 276174629Sjeff{ 277174629Sjeff struct lock_prof_cpu *lpc; 278174629Sjeff int enabled, i, cpu; 279174629Sjeff 280189845Sjeff /* 281189845Sjeff * We not only race with acquiring and releasing locks but also 282189845Sjeff * thread exit. To be certain that threads exit without valid head 283189845Sjeff * pointers they must see resetting set before enabled is cleared. 284189845Sjeff * Otherwise a lock may not be removed from a per-thread list due 285189845Sjeff * to disabled being set but not wait for reset() to remove it below. 286189845Sjeff */ 287189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 1); 288174629Sjeff enabled = lock_prof_enable; 289174629Sjeff lock_prof_enable = 0; 290189845Sjeff lock_prof_idle(); 291189845Sjeff /* 292189845Sjeff * Some objects may have migrated between CPUs. Clear all links 293189845Sjeff * before we zero the structures. Some items may still be linked 294189845Sjeff * into per-thread lists as well. 295189845Sjeff */ 296174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 297174629Sjeff lpc = lp_cpu[cpu]; 298174629Sjeff for (i = 0; i < LPROF_CACHE_SIZE; i++) { 299174629Sjeff LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link); 300174629Sjeff LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link); 301174629Sjeff } 302189845Sjeff } 303189845Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 304189845Sjeff lpc = lp_cpu[cpu]; 305174629Sjeff bzero(lpc, sizeof(*lpc)); 306174629Sjeff lock_prof_init_type(&lpc->lpc_types[0]); 307174629Sjeff lock_prof_init_type(&lpc->lpc_types[1]); 308174629Sjeff } 309189845Sjeff atomic_store_rel_int(&lock_prof_resetting, 0); 310174629Sjeff lock_prof_enable = enabled; 311174629Sjeff} 312174629Sjeff 313174629Sjeffstatic void 314174629Sjefflock_prof_output(struct lock_prof *lp, struct sbuf *sb) 315174629Sjeff{ 316174629Sjeff const char *p; 317174629Sjeff 318174629Sjeff for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3); 319174629Sjeff sbuf_printf(sb, 320180852Skmacy "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n", 321180852Skmacy lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000, 322174629Sjeff lp->cnt_wait / 1000, lp->cnt_cur, 323174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 324174629Sjeff lp->cnt_tot / (lp->cnt_cur * 1000), 325174629Sjeff lp->cnt_cur == 0 ? (uintmax_t)0 : 326174629Sjeff lp->cnt_wait / (lp->cnt_cur * 1000), 327174629Sjeff (uintmax_t)0, lp->cnt_contest_locking, 328175010Sjeff p, lp->line, lp->class->lc_name, lp->name); 329174629Sjeff} 330174629Sjeff 331174629Sjeffstatic void 332174629Sjefflock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash, 333174629Sjeff int spin, int t) 334174629Sjeff{ 335174629Sjeff struct lock_prof_type *type; 336174629Sjeff struct lock_prof *l; 337174629Sjeff int cpu; 338174629Sjeff 339174629Sjeff dst->file = match->file; 340174629Sjeff dst->line = match->line; 341175010Sjeff dst->class = match->class; 342174629Sjeff dst->name = match->name; 343174629Sjeff 344174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 345174629Sjeff if (lp_cpu[cpu] == NULL) 346174629Sjeff continue; 347174629Sjeff type = &lp_cpu[cpu]->lpc_types[spin]; 348174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[hash], link) { 349174629Sjeff if (l->ticks == t) 350174629Sjeff continue; 351174629Sjeff if (l->file != match->file || l->line != match->line || 352175010Sjeff l->name != match->name) 353174629Sjeff continue; 354174629Sjeff l->ticks = t; 355174629Sjeff if (l->cnt_max > dst->cnt_max) 356174629Sjeff dst->cnt_max = l->cnt_max; 357180852Skmacy if (l->cnt_wait_max > dst->cnt_wait_max) 358180852Skmacy dst->cnt_wait_max = l->cnt_wait_max; 359174629Sjeff dst->cnt_tot += l->cnt_tot; 360174629Sjeff dst->cnt_wait += l->cnt_wait; 361174629Sjeff dst->cnt_cur += l->cnt_cur; 362174629Sjeff dst->cnt_contest_locking += l->cnt_contest_locking; 363174629Sjeff } 364174629Sjeff } 365167012Skmacy 366174629Sjeff} 367174629Sjeff 368174629Sjeffstatic void 369174629Sjefflock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin, 370174629Sjeff int t) 371174629Sjeff{ 372174629Sjeff struct lock_prof *l; 373174629Sjeff int i; 374174629Sjeff 375174629Sjeff for (i = 0; i < LPROF_HASH_SIZE; ++i) { 376174629Sjeff SLIST_FOREACH(l, &type->lpt_hash[i], link) { 377174629Sjeff struct lock_prof lp = {}; 378174629Sjeff 379174629Sjeff if (l->ticks == t) 380174629Sjeff continue; 381174629Sjeff lock_prof_sum(l, &lp, i, spin, t); 382174629Sjeff lock_prof_output(&lp, sb); 383174629Sjeff } 384174629Sjeff } 385174629Sjeff} 386174629Sjeff 387174629Sjeffstatic int 388174629Sjeffdump_lock_prof_stats(SYSCTL_HANDLER_ARGS) 389174629Sjeff{ 390174629Sjeff struct sbuf *sb; 391174629Sjeff int error, cpu, t; 392175010Sjeff int enabled; 393174629Sjeff 394217916Smdf error = sysctl_wire_old_buffer(req, 0); 395217916Smdf if (error != 0) 396217916Smdf return (error); 397212750Smdf sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req); 398180852Skmacy sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n", 399180852Skmacy "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name"); 400175010Sjeff enabled = lock_prof_enable; 401175010Sjeff lock_prof_enable = 0; 402189845Sjeff lock_prof_idle(); 403174629Sjeff t = ticks; 404174629Sjeff for (cpu = 0; cpu <= mp_maxid; cpu++) { 405174629Sjeff if (lp_cpu[cpu] == NULL) 406174629Sjeff continue; 407174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t); 408174629Sjeff lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t); 409174629Sjeff } 410175010Sjeff lock_prof_enable = enabled; 411174629Sjeff 412212750Smdf error = sbuf_finish(sb); 413212750Smdf /* Output a trailing NUL. */ 414212750Smdf if (error == 0) 415212750Smdf error = SYSCTL_OUT(req, "", 1); 416174629Sjeff sbuf_delete(sb); 417174629Sjeff return (error); 418174629Sjeff} 419174629Sjeff 420174629Sjeffstatic int 421174629Sjeffenable_lock_prof(SYSCTL_HANDLER_ARGS) 422174629Sjeff{ 423174629Sjeff int error, v; 424174629Sjeff 425174629Sjeff v = lock_prof_enable; 426174629Sjeff error = sysctl_handle_int(oidp, &v, v, req); 427174629Sjeff if (error) 428174629Sjeff return (error); 429174629Sjeff if (req->newptr == NULL) 430174629Sjeff return (error); 431174629Sjeff if (v == lock_prof_enable) 432174629Sjeff return (0); 433174629Sjeff if (v == 1) 434174629Sjeff lock_prof_reset(); 435174629Sjeff lock_prof_enable = !!v; 436174629Sjeff 437174629Sjeff return (0); 438174629Sjeff} 439174629Sjeff 440174629Sjeffstatic int 441174629Sjeffreset_lock_prof_stats(SYSCTL_HANDLER_ARGS) 442174629Sjeff{ 443174629Sjeff int error, v; 444174629Sjeff 445174629Sjeff v = 0; 446174629Sjeff error = sysctl_handle_int(oidp, &v, 0, req); 447174629Sjeff if (error) 448174629Sjeff return (error); 449174629Sjeff if (req->newptr == NULL) 450174629Sjeff return (error); 451174629Sjeff if (v == 0) 452174629Sjeff return (0); 453174629Sjeff lock_prof_reset(); 454174629Sjeff 455174629Sjeff return (0); 456174629Sjeff} 457174629Sjeff 458174629Sjeffstatic struct lock_prof * 459174629Sjefflock_profile_lookup(struct lock_object *lo, int spin, const char *file, 460174629Sjeff int line) 461174629Sjeff{ 462174629Sjeff const char *unknown = "(unknown)"; 463174629Sjeff struct lock_prof_type *type; 464174629Sjeff struct lock_prof *lp; 465174629Sjeff struct lphead *head; 466174629Sjeff const char *p; 467174629Sjeff u_int hash; 468174629Sjeff 469174629Sjeff p = file; 470174629Sjeff if (p == NULL || *p == '\0') 471174629Sjeff p = unknown; 472174629Sjeff hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line; 473174629Sjeff hash &= LPROF_HASH_MASK; 474174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 475174629Sjeff head = &type->lpt_hash[hash]; 476174629Sjeff SLIST_FOREACH(lp, head, link) { 477174629Sjeff if (lp->line == line && lp->file == p && 478174629Sjeff lp->name == lo->lo_name) 479174629Sjeff return (lp); 480174629Sjeff 481174629Sjeff } 482174629Sjeff lp = SLIST_FIRST(&type->lpt_lpalloc); 483174629Sjeff if (lp == NULL) { 484174629Sjeff lock_prof_rejected++; 485174629Sjeff return (lp); 486174629Sjeff } 487174629Sjeff SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link); 488174629Sjeff lp->file = p; 489174629Sjeff lp->line = line; 490175010Sjeff lp->class = LOCK_CLASS(lo); 491174629Sjeff lp->name = lo->lo_name; 492174629Sjeff SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link); 493174629Sjeff return (lp); 494174629Sjeff} 495174629Sjeff 496174629Sjeffstatic struct lock_profile_object * 497174629Sjefflock_profile_object_lookup(struct lock_object *lo, int spin, const char *file, 498174629Sjeff int line) 499174629Sjeff{ 500174629Sjeff struct lock_profile_object *l; 501174629Sjeff struct lock_prof_type *type; 502174629Sjeff struct lpohead *head; 503174629Sjeff 504174629Sjeff head = &curthread->td_lprof[spin]; 505174629Sjeff LIST_FOREACH(l, head, lpo_link) 506174629Sjeff if (l->lpo_obj == lo && l->lpo_file == file && 507174629Sjeff l->lpo_line == line) 508174629Sjeff return (l); 509174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 510174629Sjeff l = LIST_FIRST(&type->lpt_lpoalloc); 511174629Sjeff if (l == NULL) { 512174629Sjeff lock_prof_rejected++; 513174629Sjeff return (NULL); 514174629Sjeff } 515174629Sjeff LIST_REMOVE(l, lpo_link); 516174629Sjeff l->lpo_obj = lo; 517174629Sjeff l->lpo_file = file; 518174629Sjeff l->lpo_line = line; 519174629Sjeff l->lpo_cnt = 0; 520174629Sjeff LIST_INSERT_HEAD(head, l, lpo_link); 521174629Sjeff 522174629Sjeff return (l); 523174629Sjeff} 524174629Sjeff 525174629Sjeffvoid 526174629Sjefflock_profile_obtain_lock_success(struct lock_object *lo, int contested, 527174629Sjeff uint64_t waittime, const char *file, int line) 528174629Sjeff{ 529174629Sjeff static int lock_prof_count; 530174629Sjeff struct lock_profile_object *l; 531174629Sjeff int spin; 532174629Sjeff 533235404Savg if (SCHEDULER_STOPPED()) 534235404Savg return; 535235404Savg 536174629Sjeff /* don't reset the timer when/if recursing */ 537174629Sjeff if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) 538174629Sjeff return; 539174629Sjeff if (lock_prof_skipcount && 540175150Skris (++lock_prof_count % lock_prof_skipcount) != 0) 541174629Sjeff return; 542176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 543174629Sjeff if (spin && lock_prof_skipspin == 1) 544174629Sjeff return; 545189845Sjeff critical_enter(); 546189845Sjeff /* Recheck enabled now that we're in a critical section. */ 547189845Sjeff if (lock_prof_enable == 0) 548189845Sjeff goto out; 549174629Sjeff l = lock_profile_object_lookup(lo, spin, file, line); 550174629Sjeff if (l == NULL) 551189845Sjeff goto out; 552174629Sjeff l->lpo_cnt++; 553174629Sjeff if (++l->lpo_ref > 1) 554189845Sjeff goto out; 555174629Sjeff l->lpo_contest_locking = contested; 556168315Skmacy l->lpo_acqtime = nanoseconds(); 557168315Skmacy if (waittime && (l->lpo_acqtime > waittime)) 558168315Skmacy l->lpo_waittime = l->lpo_acqtime - waittime; 559168315Skmacy else 560168315Skmacy l->lpo_waittime = 0; 561189845Sjeffout: 562189845Sjeff critical_exit(); 563164159Skmacy} 564164159Skmacy 565174629Sjeffvoid 566189845Sjefflock_profile_thread_exit(struct thread *td) 567189845Sjeff{ 568189845Sjeff#ifdef INVARIANTS 569189845Sjeff struct lock_profile_object *l; 570189845Sjeff 571189845Sjeff MPASS(curthread->td_critnest == 0); 572189845Sjeff#endif 573189845Sjeff /* 574189845Sjeff * If lock profiling was disabled we have to wait for reset to 575189845Sjeff * clear our pointers before we can exit safely. 576189845Sjeff */ 577189845Sjeff lock_prof_reset_wait(); 578189845Sjeff#ifdef INVARIANTS 579189845Sjeff LIST_FOREACH(l, &td->td_lprof[0], lpo_link) 580189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 581189845Sjeff l->lpo_file, l->lpo_line); 582189845Sjeff LIST_FOREACH(l, &td->td_lprof[1], lpo_link) 583189845Sjeff printf("thread still holds lock acquired at %s:%d\n", 584189845Sjeff l->lpo_file, l->lpo_line); 585189845Sjeff#endif 586189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL); 587189845Sjeff MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL); 588189845Sjeff} 589189845Sjeff 590189845Sjeffvoid 591174629Sjefflock_profile_release_lock(struct lock_object *lo) 592164159Skmacy{ 593174629Sjeff struct lock_profile_object *l; 594174629Sjeff struct lock_prof_type *type; 595174629Sjeff struct lock_prof *lp; 596209390Sed uint64_t curtime, holdtime; 597174629Sjeff struct lpohead *head; 598174629Sjeff int spin; 599164159Skmacy 600235404Savg if (SCHEDULER_STOPPED()) 601235404Savg return; 602189845Sjeff if (lo->lo_flags & LO_NOPROFILE) 603174629Sjeff return; 604176013Sattilio spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; 605174629Sjeff head = &curthread->td_lprof[spin]; 606189845Sjeff if (LIST_FIRST(head) == NULL) 607189845Sjeff return; 608174629Sjeff critical_enter(); 609189845Sjeff /* Recheck enabled now that we're in a critical section. */ 610189845Sjeff if (lock_prof_enable == 0 && lock_prof_resetting == 1) 611189845Sjeff goto out; 612189845Sjeff /* 613189845Sjeff * If lock profiling is not enabled we still want to remove the 614189845Sjeff * lpo from our queue. 615189845Sjeff */ 616174629Sjeff LIST_FOREACH(l, head, lpo_link) 617174629Sjeff if (l->lpo_obj == lo) 618174629Sjeff break; 619174629Sjeff if (l == NULL) 620174629Sjeff goto out; 621174629Sjeff if (--l->lpo_ref > 0) 622174629Sjeff goto out; 623174629Sjeff lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line); 624174629Sjeff if (lp == NULL) 625174629Sjeff goto release; 626209247Savg curtime = nanoseconds(); 627209247Savg if (curtime < l->lpo_acqtime) 628174629Sjeff goto release; 629209247Savg holdtime = curtime - l->lpo_acqtime; 630209247Savg 631174629Sjeff /* 632174629Sjeff * Record if the lock has been held longer now than ever 633174629Sjeff * before. 634174629Sjeff */ 635174629Sjeff if (holdtime > lp->cnt_max) 636174629Sjeff lp->cnt_max = holdtime; 637180852Skmacy if (l->lpo_waittime > lp->cnt_wait_max) 638180852Skmacy lp->cnt_wait_max = l->lpo_waittime; 639174629Sjeff lp->cnt_tot += holdtime; 640174629Sjeff lp->cnt_wait += l->lpo_waittime; 641174629Sjeff lp->cnt_contest_locking += l->lpo_contest_locking; 642174629Sjeff lp->cnt_cur += l->lpo_cnt; 643174629Sjeffrelease: 644174629Sjeff LIST_REMOVE(l, lpo_link); 645174629Sjeff type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin]; 646174629Sjeff LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link); 647174629Sjeffout: 648174629Sjeff critical_exit(); 649174629Sjeff} 650164159Skmacy 651248085Smariusstatic SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging"); 652248085Smariusstatic SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, 653248085Smarius "lock profiling"); 654174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW, 655174629Sjeff &lock_prof_skipspin, 0, "Skip profiling on spinlocks."); 656174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW, 657174629Sjeff &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions."); 658174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD, 659174629Sjeff &lock_prof_rejected, 0, "Number of rejected profiling records"); 660174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD, 661174629Sjeff NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics"); 662174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW, 663174629Sjeff NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics"); 664174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW, 665174629Sjeff NULL, 0, enable_lock_prof, "I", "Enable lock profiling"); 666164159Skmacy 667164159Skmacy#endif 668