subr_lock.c revision 175010
1154484Sjhb/*-
2154484Sjhb * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3154484Sjhb * All rights reserved.
4154484Sjhb *
5154484Sjhb * Redistribution and use in source and binary forms, with or without
6154484Sjhb * modification, are permitted provided that the following conditions
7154484Sjhb * are met:
8154484Sjhb * 1. Redistributions of source code must retain the above copyright
9154484Sjhb *    notice, this list of conditions and the following disclaimer.
10154484Sjhb * 2. Redistributions in binary form must reproduce the above copyright
11154484Sjhb *    notice, this list of conditions and the following disclaimer in the
12154484Sjhb *    documentation and/or other materials provided with the distribution.
13154484Sjhb * 3. Neither the name of the author nor the names of any co-contributors
14154484Sjhb *    may be used to endorse or promote products derived from this software
15154484Sjhb *    without specific prior written permission.
16154484Sjhb *
17154484Sjhb * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18154484Sjhb * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19154484Sjhb * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20154484Sjhb * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21154484Sjhb * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22154484Sjhb * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23154484Sjhb * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24154484Sjhb * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25154484Sjhb * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26154484Sjhb * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27154484Sjhb * SUCH DAMAGE.
28154484Sjhb */
29154484Sjhb
30154484Sjhb/*
31154484Sjhb * This module holds the global variables and functions used to maintain
32154484Sjhb * lock_object structures.
33154484Sjhb */
34154484Sjhb
35154484Sjhb#include <sys/cdefs.h>
36154484Sjhb__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 175010 2007-12-31 03:45:51Z jeff $");
37154484Sjhb
38154485Sjhb#include "opt_ddb.h"
39164159Skmacy#include "opt_mprof.h"
40154485Sjhb
41154484Sjhb#include <sys/param.h>
42154484Sjhb#include <sys/systm.h>
43174629Sjeff#include <sys/kernel.h>
44154484Sjhb#include <sys/ktr.h>
45154485Sjhb#include <sys/linker_set.h>
46154484Sjhb#include <sys/lock.h>
47174629Sjeff#include <sys/lock_profile.h>
48174629Sjeff#include <sys/malloc.h>
49174629Sjeff#include <sys/pcpu.h>
50174629Sjeff#include <sys/proc.h>
51164159Skmacy#include <sys/sbuf.h>
52174629Sjeff#include <sys/smp.h>
53164159Skmacy#include <sys/sysctl.h>
54154484Sjhb
55154484Sjhb#ifdef DDB
56154484Sjhb#include <ddb/ddb.h>
57154484Sjhb#endif
58154484Sjhb
59174629Sjeff#include <machine/cpufunc.h>
60174629Sjeff
61154484SjhbCTASSERT(LOCK_CLASS_MAX == 15);
62154484Sjhb
63154484Sjhbstruct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
64154484Sjhb	&lock_class_mtx_spin,
65154484Sjhb	&lock_class_mtx_sleep,
66154484Sjhb	&lock_class_sx,
67173444Sups	&lock_class_rm,
68154941Sjhb	&lock_class_rw,
69164246Skmacy	&lock_class_lockmgr,
70154484Sjhb};
71154484Sjhb
72154484Sjhbvoid
73154484Sjhblock_init(struct lock_object *lock, struct lock_class *class, const char *name,
74154484Sjhb    const char *type, int flags)
75154484Sjhb{
76154484Sjhb	int i;
77154484Sjhb
78154484Sjhb	/* Check for double-init and zero object. */
79154484Sjhb	KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
80154484Sjhb	    name, lock));
81154484Sjhb
82154484Sjhb	/* Look up lock class to find its index. */
83154484Sjhb	for (i = 0; i < LOCK_CLASS_MAX; i++)
84154484Sjhb		if (lock_classes[i] == class) {
85154484Sjhb			lock->lo_flags = i << LO_CLASSSHIFT;
86154484Sjhb			break;
87154484Sjhb		}
88154484Sjhb	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
89154484Sjhb
90154484Sjhb	/* Initialize the lock object. */
91154484Sjhb	lock->lo_name = name;
92164231Skmacy	lock->lo_type = type != NULL ? type : name;
93154484Sjhb	lock->lo_flags |= flags | LO_INITIALIZED;
94154484Sjhb	LOCK_LOG_INIT(lock, 0);
95154484Sjhb	WITNESS_INIT(lock);
96154484Sjhb}
97154484Sjhb
98154484Sjhbvoid
99154484Sjhblock_destroy(struct lock_object *lock)
100154484Sjhb{
101154484Sjhb
102154484Sjhb	KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
103154484Sjhb	WITNESS_DESTROY(lock);
104154484Sjhb	LOCK_LOG_DESTROY(lock, 0);
105154484Sjhb	lock->lo_flags &= ~LO_INITIALIZED;
106154484Sjhb}
107154484Sjhb
108154484Sjhb#ifdef DDB
109154484SjhbDB_SHOW_COMMAND(lock, db_show_lock)
110154484Sjhb{
111154484Sjhb	struct lock_object *lock;
112154484Sjhb	struct lock_class *class;
113154484Sjhb
114154484Sjhb	if (!have_addr)
115154484Sjhb		return;
116154484Sjhb	lock = (struct lock_object *)addr;
117154484Sjhb	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
118154484Sjhb		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
119154484Sjhb		return;
120154484Sjhb	}
121154484Sjhb	class = LOCK_CLASS(lock);
122154484Sjhb	db_printf(" class: %s\n", class->lc_name);
123154484Sjhb	db_printf(" name: %s\n", lock->lo_name);
124154484Sjhb	if (lock->lo_type && lock->lo_type != lock->lo_name)
125154484Sjhb		db_printf(" type: %s\n", lock->lo_type);
126154484Sjhb	class->lc_ddb_show(lock);
127154484Sjhb}
128154484Sjhb#endif
129164159Skmacy
130164159Skmacy#ifdef LOCK_PROFILING
131174629Sjeff
132174629Sjeff/*
133174629Sjeff * One object per-thread for each lock the thread owns.  Tracks individual
134174629Sjeff * lock instances.
135174629Sjeff */
136174629Sjeffstruct lock_profile_object {
137174629Sjeff	LIST_ENTRY(lock_profile_object) lpo_link;
138174629Sjeff	struct lock_object *lpo_obj;
139174629Sjeff	const char	*lpo_file;
140174629Sjeff	int		lpo_line;
141174629Sjeff	uint16_t	lpo_ref;
142174629Sjeff	uint16_t	lpo_cnt;
143174629Sjeff	u_int64_t	lpo_acqtime;
144174629Sjeff	u_int64_t	lpo_waittime;
145174629Sjeff	u_int		lpo_contest_locking;
146174629Sjeff};
147174629Sjeff
148174629Sjeff/*
149174629Sjeff * One lock_prof for each (file, line, lock object) triple.
150174629Sjeff */
151174629Sjeffstruct lock_prof {
152174629Sjeff	SLIST_ENTRY(lock_prof) link;
153175010Sjeff	struct lock_class *class;
154174629Sjeff	const char	*file;
155174629Sjeff	const char	*name;
156174629Sjeff	int		line;
157174629Sjeff	int		ticks;
158174629Sjeff	uintmax_t	cnt_max;
159174629Sjeff	uintmax_t	cnt_tot;
160174629Sjeff	uintmax_t	cnt_wait;
161174629Sjeff	uintmax_t	cnt_cur;
162174629Sjeff	uintmax_t	cnt_contest_locking;
163174629Sjeff};
164174629Sjeff
165174629SjeffSLIST_HEAD(lphead, lock_prof);
166174629Sjeff
167174629Sjeff#define	LPROF_HASH_SIZE		4096
168174629Sjeff#define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
169174629Sjeff#define	LPROF_CACHE_SIZE	4096
170174629Sjeff
171174629Sjeff/*
172174629Sjeff * Array of objects and profs for each type of object for each cpu.  Spinlocks
173174629Sjeff * are handled seperately because a thread may be preempted and acquire a
174174629Sjeff * spinlock while in the lock profiling code of a non-spinlock.  In this way
175174629Sjeff * we only need a critical section to protect the per-cpu lists.
176174629Sjeff */
177174629Sjeffstruct lock_prof_type {
178174629Sjeff	struct lphead		lpt_lpalloc;
179174629Sjeff	struct lpohead		lpt_lpoalloc;
180174629Sjeff	struct lphead		lpt_hash[LPROF_HASH_SIZE];
181174629Sjeff	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
182174629Sjeff	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
183174629Sjeff};
184174629Sjeff
185174629Sjeffstruct lock_prof_cpu {
186174629Sjeff	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
187174629Sjeff};
188174629Sjeff
189174629Sjeffstruct lock_prof_cpu *lp_cpu[MAXCPU];
190174629Sjeff
191174629Sjeffint lock_prof_enable = 0;
192174629Sjeff
193174629Sjeff/* SWAG: sbuf size = avg stat. line size * number of locks */
194174629Sjeff#define LPROF_SBUF_SIZE		256 * 400
195174629Sjeff
196174629Sjeffstatic int lock_prof_rejected;
197174629Sjeffstatic int lock_prof_skipspin;
198174629Sjeffstatic int lock_prof_skipcount;
199174629Sjeff
200174629Sjeff#ifndef USE_CPU_NANOSECONDS
201174629Sjeffu_int64_t
202174629Sjeffnanoseconds(void)
203164159Skmacy{
204174629Sjeff	struct bintime bt;
205174629Sjeff	u_int64_t ns;
206164159Skmacy
207174629Sjeff	binuptime(&bt);
208174629Sjeff	/* From bintime2timespec */
209174629Sjeff	ns = bt.sec * (u_int64_t)1000000000;
210174629Sjeff	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
211174629Sjeff	return (ns);
212174629Sjeff}
213174629Sjeff#endif
214174629Sjeff
215174629Sjeffstatic void
216174629Sjefflock_prof_init_type(struct lock_prof_type *type)
217174629Sjeff{
218174629Sjeff	int i;
219174629Sjeff
220174629Sjeff	SLIST_INIT(&type->lpt_lpalloc);
221174629Sjeff	LIST_INIT(&type->lpt_lpoalloc);
222174629Sjeff	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
223174629Sjeff		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
224174629Sjeff		    link);
225174629Sjeff		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
226174629Sjeff		    lpo_link);
227174629Sjeff	}
228174629Sjeff}
229174629Sjeff
230174629Sjeffstatic void
231174629Sjefflock_prof_init(void *arg)
232174629Sjeff{
233174629Sjeff	int cpu;
234174629Sjeff
235174629Sjeff	for (cpu = 0; cpu <= mp_maxid; cpu++) {
236174629Sjeff		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
237174629Sjeff		    M_WAITOK | M_ZERO);
238174629Sjeff		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
239174629Sjeff		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
240174629Sjeff	}
241174629Sjeff}
242174629SjeffSYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
243174629Sjeff
244174629Sjeffstatic void
245174629Sjefflock_prof_reset(void)
246174629Sjeff{
247174629Sjeff	struct lock_prof_cpu *lpc;
248174629Sjeff	int enabled, i, cpu;
249174629Sjeff
250174629Sjeff	enabled = lock_prof_enable;
251174629Sjeff	lock_prof_enable = 0;
252175010Sjeff	pause("lpreset", hz / 10);
253174629Sjeff	for (cpu = 0; cpu <= mp_maxid; cpu++) {
254174629Sjeff		lpc = lp_cpu[cpu];
255174629Sjeff		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
256174629Sjeff			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
257174629Sjeff			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
258174629Sjeff		}
259174629Sjeff		bzero(lpc, sizeof(*lpc));
260174629Sjeff		lock_prof_init_type(&lpc->lpc_types[0]);
261174629Sjeff		lock_prof_init_type(&lpc->lpc_types[1]);
262174629Sjeff	}
263174629Sjeff	lock_prof_enable = enabled;
264174629Sjeff}
265174629Sjeff
266174629Sjeffstatic void
267174629Sjefflock_prof_output(struct lock_prof *lp, struct sbuf *sb)
268174629Sjeff{
269174629Sjeff	const char *p;
270174629Sjeff
271174629Sjeff	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
272174629Sjeff	sbuf_printf(sb,
273174629Sjeff	    "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
274174629Sjeff	    lp->cnt_max / 1000, lp->cnt_tot / 1000,
275174629Sjeff	    lp->cnt_wait / 1000, lp->cnt_cur,
276174629Sjeff	    lp->cnt_cur == 0 ? (uintmax_t)0 :
277174629Sjeff	    lp->cnt_tot / (lp->cnt_cur * 1000),
278174629Sjeff	    lp->cnt_cur == 0 ? (uintmax_t)0 :
279174629Sjeff	    lp->cnt_wait / (lp->cnt_cur * 1000),
280174629Sjeff	    (uintmax_t)0, lp->cnt_contest_locking,
281175010Sjeff	    p, lp->line, lp->class->lc_name, lp->name);
282174629Sjeff}
283174629Sjeff
284174629Sjeffstatic void
285174629Sjefflock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
286174629Sjeff    int spin, int t)
287174629Sjeff{
288174629Sjeff	struct lock_prof_type *type;
289174629Sjeff	struct lock_prof *l;
290174629Sjeff	int cpu;
291174629Sjeff
292174629Sjeff	dst->file = match->file;
293174629Sjeff	dst->line = match->line;
294175010Sjeff	dst->class = match->class;
295174629Sjeff	dst->name = match->name;
296174629Sjeff
297174629Sjeff	for (cpu = 0; cpu <= mp_maxid; cpu++) {
298174629Sjeff		if (lp_cpu[cpu] == NULL)
299174629Sjeff			continue;
300174629Sjeff		type = &lp_cpu[cpu]->lpc_types[spin];
301174629Sjeff		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
302174629Sjeff			if (l->ticks == t)
303174629Sjeff				continue;
304174629Sjeff			if (l->file != match->file || l->line != match->line ||
305175010Sjeff			    l->name != match->name)
306174629Sjeff				continue;
307174629Sjeff			l->ticks = t;
308174629Sjeff			if (l->cnt_max > dst->cnt_max)
309174629Sjeff				dst->cnt_max = l->cnt_max;
310174629Sjeff			dst->cnt_tot += l->cnt_tot;
311174629Sjeff			dst->cnt_wait += l->cnt_wait;
312174629Sjeff			dst->cnt_cur += l->cnt_cur;
313174629Sjeff			dst->cnt_contest_locking += l->cnt_contest_locking;
314174629Sjeff		}
315174629Sjeff	}
316167012Skmacy
317174629Sjeff}
318174629Sjeff
319174629Sjeffstatic void
320174629Sjefflock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
321174629Sjeff    int t)
322174629Sjeff{
323174629Sjeff	struct lock_prof *l;
324174629Sjeff	int i;
325174629Sjeff
326174629Sjeff	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
327174629Sjeff		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
328174629Sjeff			struct lock_prof lp = {};
329174629Sjeff
330174629Sjeff			if (l->ticks == t)
331174629Sjeff				continue;
332174629Sjeff			lock_prof_sum(l, &lp, i, spin, t);
333174629Sjeff			lock_prof_output(&lp, sb);
334174629Sjeff			if (sbuf_overflowed(sb))
335174629Sjeff				return;
336174629Sjeff		}
337174629Sjeff	}
338174629Sjeff}
339174629Sjeff
340174629Sjeffstatic int
341174629Sjeffdump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
342174629Sjeff{
343174629Sjeff	static int multiplier = 1;
344174629Sjeff	struct sbuf *sb;
345174629Sjeff	int error, cpu, t;
346175010Sjeff	int enabled;
347174629Sjeff
348174629Sjeffretry_sbufops:
349174629Sjeff	sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
350174629Sjeff	sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
351174629Sjeff	    "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
352175010Sjeff	enabled = lock_prof_enable;
353175010Sjeff	lock_prof_enable = 0;
354175010Sjeff	pause("lpreset", hz / 10);
355174629Sjeff	t = ticks;
356174629Sjeff	for (cpu = 0; cpu <= mp_maxid; cpu++) {
357174629Sjeff		if (lp_cpu[cpu] == NULL)
358174629Sjeff			continue;
359174629Sjeff		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
360174629Sjeff		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
361174629Sjeff		if (sbuf_overflowed(sb)) {
362174629Sjeff			sbuf_delete(sb);
363174629Sjeff			multiplier++;
364174629Sjeff			goto retry_sbufops;
365174629Sjeff		}
366174629Sjeff	}
367175010Sjeff	lock_prof_enable = enabled;
368174629Sjeff
369174629Sjeff	sbuf_finish(sb);
370174629Sjeff	error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
371174629Sjeff	sbuf_delete(sb);
372174629Sjeff	return (error);
373174629Sjeff}
374174629Sjeff
375174629Sjeffstatic int
376174629Sjeffenable_lock_prof(SYSCTL_HANDLER_ARGS)
377174629Sjeff{
378174629Sjeff	int error, v;
379174629Sjeff
380174629Sjeff	v = lock_prof_enable;
381174629Sjeff	error = sysctl_handle_int(oidp, &v, v, req);
382174629Sjeff	if (error)
383174629Sjeff		return (error);
384174629Sjeff	if (req->newptr == NULL)
385174629Sjeff		return (error);
386174629Sjeff	if (v == lock_prof_enable)
387174629Sjeff		return (0);
388174629Sjeff	if (v == 1)
389174629Sjeff		lock_prof_reset();
390174629Sjeff	lock_prof_enable = !!v;
391174629Sjeff
392174629Sjeff	return (0);
393174629Sjeff}
394174629Sjeff
395174629Sjeffstatic int
396174629Sjeffreset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
397174629Sjeff{
398174629Sjeff	int error, v;
399174629Sjeff
400174629Sjeff	v = 0;
401174629Sjeff	error = sysctl_handle_int(oidp, &v, 0, req);
402174629Sjeff	if (error)
403174629Sjeff		return (error);
404174629Sjeff	if (req->newptr == NULL)
405174629Sjeff		return (error);
406174629Sjeff	if (v == 0)
407174629Sjeff		return (0);
408174629Sjeff	lock_prof_reset();
409174629Sjeff
410174629Sjeff	return (0);
411174629Sjeff}
412174629Sjeff
413174629Sjeffstatic struct lock_prof *
414174629Sjefflock_profile_lookup(struct lock_object *lo, int spin, const char *file,
415174629Sjeff    int line)
416174629Sjeff{
417174629Sjeff	const char *unknown = "(unknown)";
418174629Sjeff	struct lock_prof_type *type;
419174629Sjeff	struct lock_prof *lp;
420174629Sjeff	struct lphead *head;
421174629Sjeff	const char *p;
422174629Sjeff	u_int hash;
423174629Sjeff
424174629Sjeff	p = file;
425174629Sjeff	if (p == NULL || *p == '\0')
426174629Sjeff		p = unknown;
427174629Sjeff	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
428174629Sjeff	hash &= LPROF_HASH_MASK;
429174629Sjeff	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
430174629Sjeff	head = &type->lpt_hash[hash];
431174629Sjeff	SLIST_FOREACH(lp, head, link) {
432174629Sjeff		if (lp->line == line && lp->file == p &&
433174629Sjeff		    lp->name == lo->lo_name)
434174629Sjeff			return (lp);
435174629Sjeff
436174629Sjeff	}
437174629Sjeff	lp = SLIST_FIRST(&type->lpt_lpalloc);
438174629Sjeff	if (lp == NULL) {
439174629Sjeff		lock_prof_rejected++;
440174629Sjeff		return (lp);
441174629Sjeff	}
442174629Sjeff	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
443174629Sjeff	lp->file = p;
444174629Sjeff	lp->line = line;
445175010Sjeff	lp->class = LOCK_CLASS(lo);
446174629Sjeff	lp->name = lo->lo_name;
447174629Sjeff	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
448174629Sjeff	return (lp);
449174629Sjeff}
450174629Sjeff
451174629Sjeffstatic struct lock_profile_object *
452174629Sjefflock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
453174629Sjeff    int line)
454174629Sjeff{
455174629Sjeff	struct lock_profile_object *l;
456174629Sjeff	struct lock_prof_type *type;
457174629Sjeff	struct lpohead *head;
458174629Sjeff
459174629Sjeff	head = &curthread->td_lprof[spin];
460174629Sjeff	LIST_FOREACH(l, head, lpo_link)
461174629Sjeff		if (l->lpo_obj == lo && l->lpo_file == file &&
462174629Sjeff		    l->lpo_line == line)
463174629Sjeff			return (l);
464174629Sjeff	critical_enter();
465174629Sjeff	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
466174629Sjeff	l = LIST_FIRST(&type->lpt_lpoalloc);
467174629Sjeff	if (l == NULL) {
468174629Sjeff		lock_prof_rejected++;
469174629Sjeff		critical_exit();
470174629Sjeff		return (NULL);
471174629Sjeff	}
472174629Sjeff	LIST_REMOVE(l, lpo_link);
473174629Sjeff	critical_exit();
474174629Sjeff	l->lpo_obj = lo;
475174629Sjeff	l->lpo_file = file;
476174629Sjeff	l->lpo_line = line;
477174629Sjeff	l->lpo_cnt = 0;
478174629Sjeff	LIST_INSERT_HEAD(head, l, lpo_link);
479174629Sjeff
480174629Sjeff	return (l);
481174629Sjeff}
482174629Sjeff
483174629Sjeffvoid
484174629Sjefflock_profile_obtain_lock_success(struct lock_object *lo, int contested,
485174629Sjeff    uint64_t waittime, const char *file, int line)
486174629Sjeff{
487174629Sjeff	static int lock_prof_count;
488174629Sjeff	struct lock_profile_object *l;
489174629Sjeff	int spin;
490174629Sjeff
491174629Sjeff	/* don't reset the timer when/if recursing */
492174629Sjeff	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
493174629Sjeff		return;
494174629Sjeff	if (lock_prof_skipcount &&
495174629Sjeff	    (++lock_prof_count % lock_prof_skipcount) == 0)
496174629Sjeff		return;
497174629Sjeff	spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
498174629Sjeff	if (spin && lock_prof_skipspin == 1)
499174629Sjeff		return;
500174629Sjeff	l = lock_profile_object_lookup(lo, spin, file, line);
501174629Sjeff	if (l == NULL)
502174629Sjeff		return;
503174629Sjeff	l->lpo_cnt++;
504174629Sjeff	if (++l->lpo_ref > 1)
505174629Sjeff		return;
506174629Sjeff	l->lpo_contest_locking = contested;
507168315Skmacy	l->lpo_acqtime = nanoseconds();
508168315Skmacy	if (waittime && (l->lpo_acqtime > waittime))
509168315Skmacy		l->lpo_waittime = l->lpo_acqtime - waittime;
510168315Skmacy	else
511168315Skmacy		l->lpo_waittime = 0;
512164159Skmacy}
513164159Skmacy
514174629Sjeffvoid
515174629Sjefflock_profile_release_lock(struct lock_object *lo)
516164159Skmacy{
517174629Sjeff	struct lock_profile_object *l;
518174629Sjeff	struct lock_prof_type *type;
519174629Sjeff	struct lock_prof *lp;
520174629Sjeff	u_int64_t holdtime;
521174629Sjeff	struct lpohead *head;
522174629Sjeff	int spin;
523164159Skmacy
524174629Sjeff	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
525174629Sjeff		return;
526174629Sjeff	spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
527174629Sjeff	head = &curthread->td_lprof[spin];
528174629Sjeff	critical_enter();
529174629Sjeff	LIST_FOREACH(l, head, lpo_link)
530174629Sjeff		if (l->lpo_obj == lo)
531174629Sjeff			break;
532174629Sjeff	if (l == NULL)
533174629Sjeff		goto out;
534174629Sjeff	if (--l->lpo_ref > 0)
535174629Sjeff		goto out;
536174629Sjeff	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
537174629Sjeff	if (lp == NULL)
538174629Sjeff		goto release;
539174629Sjeff	holdtime = nanoseconds() - l->lpo_acqtime;
540174629Sjeff	if (holdtime < 0)
541174629Sjeff		goto release;
542174629Sjeff	/*
543174629Sjeff	 * Record if the lock has been held longer now than ever
544174629Sjeff	 * before.
545174629Sjeff	 */
546174629Sjeff	if (holdtime > lp->cnt_max)
547174629Sjeff		lp->cnt_max = holdtime;
548174629Sjeff	lp->cnt_tot += holdtime;
549174629Sjeff	lp->cnt_wait += l->lpo_waittime;
550174629Sjeff	lp->cnt_contest_locking += l->lpo_contest_locking;
551174629Sjeff	lp->cnt_cur += l->lpo_cnt;
552174629Sjeffrelease:
553174629Sjeff	LIST_REMOVE(l, lpo_link);
554174629Sjeff	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
555174629Sjeff	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
556174629Sjeffout:
557174629Sjeff	critical_exit();
558174629Sjeff}
559164159Skmacy
560174629SjeffSYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
561174629SjeffSYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
562174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
563174629Sjeff    &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
564174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
565174629Sjeff    &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
566174629SjeffSYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
567174629Sjeff    &lock_prof_rejected, 0, "Number of rejected profiling records");
568174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
569174629Sjeff    NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
570174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
571174629Sjeff    NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
572174629SjeffSYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
573174629Sjeff    NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
574164159Skmacy
575164159Skmacy#endif
576