1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26/*
27 * This module holds the global variables and functions used to maintain
28 * lock_object structures.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: stable/11/sys/kern/subr_lock.c 367457 2020-11-07 18:10:59Z dim $");
33
34#include "opt_ddb.h"
35#include "opt_mprof.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/kernel.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/lock_profile.h>
43#include <sys/malloc.h>
44#include <sys/mutex.h>
45#include <sys/pcpu.h>
46#include <sys/proc.h>
47#include <sys/sbuf.h>
48#include <sys/sched.h>
49#include <sys/smp.h>
50#include <sys/sysctl.h>
51
52#ifdef DDB
53#include <ddb/ddb.h>
54#endif
55
56#include <machine/cpufunc.h>
57
58SDT_PROVIDER_DEFINE(lock);
59SDT_PROBE_DEFINE1(lock, , , starvation, "u_int");
60
61CTASSERT(LOCK_CLASS_MAX == 15);
62
63struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
64	&lock_class_mtx_spin,
65	&lock_class_mtx_sleep,
66	&lock_class_sx,
67	&lock_class_rm,
68	&lock_class_rm_sleepable,
69	&lock_class_rw,
70	&lock_class_lockmgr,
71};
72
73void
74lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
75    const char *type, int flags)
76{
77	int i;
78
79	/* Check for double-init and zero object. */
80	KASSERT(flags & LO_NEW || !lock_initialized(lock),
81	    ("lock \"%s\" %p already initialized", name, lock));
82
83	/* Look up lock class to find its index. */
84	for (i = 0; i < LOCK_CLASS_MAX; i++)
85		if (lock_classes[i] == class) {
86			lock->lo_flags = i << LO_CLASSSHIFT;
87			break;
88		}
89	KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
90
91	/* Initialize the lock object. */
92	lock->lo_name = name;
93	lock->lo_flags |= flags | LO_INITIALIZED;
94	LOCK_LOG_INIT(lock, 0);
95	WITNESS_INIT(lock, (type != NULL) ? type : name);
96}
97
98void
99lock_destroy(struct lock_object *lock)
100{
101
102	KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock));
103	WITNESS_DESTROY(lock);
104	LOCK_LOG_DESTROY(lock, 0);
105	lock->lo_flags &= ~LO_INITIALIZED;
106}
107
108static SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
109static SYSCTL_NODE(_debug_lock, OID_AUTO, delay, CTLFLAG_RD, NULL,
110    "lock delay");
111
112static u_int __read_mostly starvation_limit = 131072;
113SYSCTL_INT(_debug_lock_delay, OID_AUTO, starvation_limit, CTLFLAG_RW,
114    &starvation_limit, 0, "");
115
116static u_int __read_mostly restrict_starvation = 0;
117SYSCTL_INT(_debug_lock_delay, OID_AUTO, restrict_starvation, CTLFLAG_RW,
118    &restrict_starvation, 0, "");
119
120void
121lock_delay(struct lock_delay_arg *la)
122{
123	struct lock_delay_config *lc = la->config;
124	u_int i;
125
126	la->delay <<= 1;
127	if (__predict_false(la->delay > lc->max))
128		la->delay = lc->max;
129
130	for (i = la->delay; i > 0; i--)
131		cpu_spinwait();
132
133	la->spin_cnt += la->delay;
134	if (__predict_false(la->spin_cnt > starvation_limit)) {
135		SDT_PROBE1(lock, , , starvation, la->delay);
136		if (restrict_starvation)
137			la->delay = lc->base;
138	}
139}
140
141static u_int
142lock_roundup_2(u_int val)
143{
144	u_int res;
145
146	for (res = 1; res <= val; res <<= 1)
147		continue;
148
149	return (res);
150}
151
152void
153lock_delay_default_init(struct lock_delay_config *lc)
154{
155
156	lc->base = lock_roundup_2(mp_ncpus) / 4;
157	lc->max = lc->base * 1024;
158}
159
160#ifdef DDB
161DB_SHOW_COMMAND(lock, db_show_lock)
162{
163	struct lock_object *lock;
164	struct lock_class *class;
165
166	if (!have_addr)
167		return;
168	lock = (struct lock_object *)addr;
169	if (LO_CLASSINDEX(lock) > LOCK_CLASS_MAX) {
170		db_printf("Unknown lock class: %d\n", LO_CLASSINDEX(lock));
171		return;
172	}
173	class = LOCK_CLASS(lock);
174	db_printf(" class: %s\n", class->lc_name);
175	db_printf(" name: %s\n", lock->lo_name);
176	class->lc_ddb_show(lock);
177}
178#endif
179
180#ifdef LOCK_PROFILING
181
182/*
183 * One object per-thread for each lock the thread owns.  Tracks individual
184 * lock instances.
185 */
186struct lock_profile_object {
187	LIST_ENTRY(lock_profile_object) lpo_link;
188	struct lock_object *lpo_obj;
189	const char	*lpo_file;
190	int		lpo_line;
191	uint16_t	lpo_ref;
192	uint16_t	lpo_cnt;
193	uint64_t	lpo_acqtime;
194	uint64_t	lpo_waittime;
195	u_int		lpo_contest_locking;
196};
197
198/*
199 * One lock_prof for each (file, line, lock object) triple.
200 */
201struct lock_prof {
202	SLIST_ENTRY(lock_prof) link;
203	struct lock_class *class;
204	const char	*file;
205	const char	*name;
206	int		line;
207	int		ticks;
208	uintmax_t	cnt_wait_max;
209	uintmax_t	cnt_max;
210	uintmax_t	cnt_tot;
211	uintmax_t	cnt_wait;
212	uintmax_t	cnt_cur;
213	uintmax_t	cnt_contest_locking;
214};
215
216SLIST_HEAD(lphead, lock_prof);
217
218#define	LPROF_HASH_SIZE		4096
219#define	LPROF_HASH_MASK		(LPROF_HASH_SIZE - 1)
220#define	LPROF_CACHE_SIZE	4096
221
222/*
223 * Array of objects and profs for each type of object for each cpu.  Spinlocks
224 * are handled separately because a thread may be preempted and acquire a
225 * spinlock while in the lock profiling code of a non-spinlock.  In this way
226 * we only need a critical section to protect the per-cpu lists.
227 */
228struct lock_prof_type {
229	struct lphead		lpt_lpalloc;
230	struct lpohead		lpt_lpoalloc;
231	struct lphead		lpt_hash[LPROF_HASH_SIZE];
232	struct lock_prof	lpt_prof[LPROF_CACHE_SIZE];
233	struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
234};
235
236struct lock_prof_cpu {
237	struct lock_prof_type	lpc_types[2]; /* One for spin one for other. */
238};
239
240struct lock_prof_cpu *lp_cpu[MAXCPU];
241
242volatile int __read_mostly lock_prof_enable;
243static volatile int lock_prof_resetting;
244
245#define LPROF_SBUF_SIZE		256
246
247static int lock_prof_rejected;
248static int lock_prof_skipspin;
249static int lock_prof_skipcount;
250
251#ifndef USE_CPU_NANOSECONDS
252uint64_t
253nanoseconds(void)
254{
255	struct bintime bt;
256	uint64_t ns;
257
258	binuptime(&bt);
259	/* From bintime2timespec */
260	ns = bt.sec * (uint64_t)1000000000;
261	ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
262	return (ns);
263}
264#endif
265
266static void
267lock_prof_init_type(struct lock_prof_type *type)
268{
269	int i;
270
271	SLIST_INIT(&type->lpt_lpalloc);
272	LIST_INIT(&type->lpt_lpoalloc);
273	for (i = 0; i < LPROF_CACHE_SIZE; i++) {
274		SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
275		    link);
276		LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
277		    lpo_link);
278	}
279}
280
281static void
282lock_prof_init(void *arg)
283{
284	int cpu;
285
286	for (cpu = 0; cpu <= mp_maxid; cpu++) {
287		lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
288		    M_WAITOK | M_ZERO);
289		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
290		lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
291	}
292}
293SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
294
295static void
296lock_prof_reset_wait(void)
297{
298
299	/*
300	 * Spin relinquishing our cpu so that quiesce_all_cpus may
301	 * complete.
302	 */
303	while (lock_prof_resetting)
304		sched_relinquish(curthread);
305}
306
307static void
308lock_prof_reset(void)
309{
310	struct lock_prof_cpu *lpc;
311	int enabled, i, cpu;
312
313	/*
314	 * We not only race with acquiring and releasing locks but also
315	 * thread exit.  To be certain that threads exit without valid head
316	 * pointers they must see resetting set before enabled is cleared.
317	 * Otherwise a lock may not be removed from a per-thread list due
318	 * to disabled being set but not wait for reset() to remove it below.
319	 */
320	atomic_store_rel_int(&lock_prof_resetting, 1);
321	enabled = lock_prof_enable;
322	lock_prof_enable = 0;
323	quiesce_all_cpus("profreset", 0);
324	/*
325	 * Some objects may have migrated between CPUs.  Clear all links
326	 * before we zero the structures.  Some items may still be linked
327	 * into per-thread lists as well.
328	 */
329	for (cpu = 0; cpu <= mp_maxid; cpu++) {
330		lpc = lp_cpu[cpu];
331		for (i = 0; i < LPROF_CACHE_SIZE; i++) {
332			LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
333			LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
334		}
335	}
336	for (cpu = 0; cpu <= mp_maxid; cpu++) {
337		lpc = lp_cpu[cpu];
338		bzero(lpc, sizeof(*lpc));
339		lock_prof_init_type(&lpc->lpc_types[0]);
340		lock_prof_init_type(&lpc->lpc_types[1]);
341	}
342	atomic_store_rel_int(&lock_prof_resetting, 0);
343	lock_prof_enable = enabled;
344}
345
346static void
347lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
348{
349	const char *p;
350
351	for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
352	sbuf_printf(sb,
353	    "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
354	    lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
355	    lp->cnt_wait / 1000, lp->cnt_cur,
356	    lp->cnt_cur == 0 ? (uintmax_t)0 :
357	    lp->cnt_tot / (lp->cnt_cur * 1000),
358	    lp->cnt_cur == 0 ? (uintmax_t)0 :
359	    lp->cnt_wait / (lp->cnt_cur * 1000),
360	    (uintmax_t)0, lp->cnt_contest_locking,
361	    p, lp->line, lp->class->lc_name, lp->name);
362}
363
364static void
365lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
366    int spin, int t)
367{
368	struct lock_prof_type *type;
369	struct lock_prof *l;
370	int cpu;
371
372	dst->file = match->file;
373	dst->line = match->line;
374	dst->class = match->class;
375	dst->name = match->name;
376
377	for (cpu = 0; cpu <= mp_maxid; cpu++) {
378		if (lp_cpu[cpu] == NULL)
379			continue;
380		type = &lp_cpu[cpu]->lpc_types[spin];
381		SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
382			if (l->ticks == t)
383				continue;
384			if (l->file != match->file || l->line != match->line ||
385			    l->name != match->name)
386				continue;
387			l->ticks = t;
388			if (l->cnt_max > dst->cnt_max)
389				dst->cnt_max = l->cnt_max;
390			if (l->cnt_wait_max > dst->cnt_wait_max)
391				dst->cnt_wait_max = l->cnt_wait_max;
392			dst->cnt_tot += l->cnt_tot;
393			dst->cnt_wait += l->cnt_wait;
394			dst->cnt_cur += l->cnt_cur;
395			dst->cnt_contest_locking += l->cnt_contest_locking;
396		}
397	}
398
399}
400
401static void
402lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
403    int t)
404{
405	struct lock_prof *l;
406	int i;
407
408	for (i = 0; i < LPROF_HASH_SIZE; ++i) {
409		SLIST_FOREACH(l, &type->lpt_hash[i], link) {
410			struct lock_prof lp = {};
411
412			if (l->ticks == t)
413				continue;
414			lock_prof_sum(l, &lp, i, spin, t);
415			lock_prof_output(&lp, sb);
416		}
417	}
418}
419
420static int
421dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
422{
423	struct sbuf *sb;
424	int error, cpu, t;
425	int enabled;
426
427	error = sysctl_wire_old_buffer(req, 0);
428	if (error != 0)
429		return (error);
430	sb = sbuf_new_for_sysctl(NULL, NULL, LPROF_SBUF_SIZE, req);
431	sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
432	    "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
433	enabled = lock_prof_enable;
434	lock_prof_enable = 0;
435	quiesce_all_cpus("profstat", 0);
436	t = ticks;
437	for (cpu = 0; cpu <= mp_maxid; cpu++) {
438		if (lp_cpu[cpu] == NULL)
439			continue;
440		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
441		lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
442	}
443	lock_prof_enable = enabled;
444
445	error = sbuf_finish(sb);
446	/* Output a trailing NUL. */
447	if (error == 0)
448		error = SYSCTL_OUT(req, "", 1);
449	sbuf_delete(sb);
450	return (error);
451}
452
453static int
454enable_lock_prof(SYSCTL_HANDLER_ARGS)
455{
456	int error, v;
457
458	v = lock_prof_enable;
459	error = sysctl_handle_int(oidp, &v, v, req);
460	if (error)
461		return (error);
462	if (req->newptr == NULL)
463		return (error);
464	if (v == lock_prof_enable)
465		return (0);
466	if (v == 1)
467		lock_prof_reset();
468	lock_prof_enable = !!v;
469
470	return (0);
471}
472
473static int
474reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
475{
476	int error, v;
477
478	v = 0;
479	error = sysctl_handle_int(oidp, &v, 0, req);
480	if (error)
481		return (error);
482	if (req->newptr == NULL)
483		return (error);
484	if (v == 0)
485		return (0);
486	lock_prof_reset();
487
488	return (0);
489}
490
491static struct lock_prof *
492lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
493    int line)
494{
495	const char *unknown = "(unknown)";
496	struct lock_prof_type *type;
497	struct lock_prof *lp;
498	struct lphead *head;
499	const char *p;
500	u_int hash;
501
502	p = file;
503	if (p == NULL || *p == '\0')
504		p = unknown;
505	hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
506	hash &= LPROF_HASH_MASK;
507	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
508	head = &type->lpt_hash[hash];
509	SLIST_FOREACH(lp, head, link) {
510		if (lp->line == line && lp->file == p &&
511		    lp->name == lo->lo_name)
512			return (lp);
513
514	}
515	lp = SLIST_FIRST(&type->lpt_lpalloc);
516	if (lp == NULL) {
517		lock_prof_rejected++;
518		return (lp);
519	}
520	SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
521	lp->file = p;
522	lp->line = line;
523	lp->class = LOCK_CLASS(lo);
524	lp->name = lo->lo_name;
525	SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
526	return (lp);
527}
528
529static struct lock_profile_object *
530lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
531    int line)
532{
533	struct lock_profile_object *l;
534	struct lock_prof_type *type;
535	struct lpohead *head;
536
537	head = &curthread->td_lprof[spin];
538	LIST_FOREACH(l, head, lpo_link)
539		if (l->lpo_obj == lo && l->lpo_file == file &&
540		    l->lpo_line == line)
541			return (l);
542	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
543	l = LIST_FIRST(&type->lpt_lpoalloc);
544	if (l == NULL) {
545		lock_prof_rejected++;
546		return (NULL);
547	}
548	LIST_REMOVE(l, lpo_link);
549	l->lpo_obj = lo;
550	l->lpo_file = file;
551	l->lpo_line = line;
552	l->lpo_cnt = 0;
553	LIST_INSERT_HEAD(head, l, lpo_link);
554
555	return (l);
556}
557
558void
559lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
560    uint64_t waittime, const char *file, int line)
561{
562	static int lock_prof_count;
563	struct lock_profile_object *l;
564	int spin;
565
566	if (SCHEDULER_STOPPED())
567		return;
568
569	/* don't reset the timer when/if recursing */
570	if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
571		return;
572	if (lock_prof_skipcount &&
573	    (++lock_prof_count % lock_prof_skipcount) != 0)
574		return;
575	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
576	if (spin && lock_prof_skipspin == 1)
577		return;
578	critical_enter();
579	/* Recheck enabled now that we're in a critical section. */
580	if (lock_prof_enable == 0)
581		goto out;
582	l = lock_profile_object_lookup(lo, spin, file, line);
583	if (l == NULL)
584		goto out;
585	l->lpo_cnt++;
586	if (++l->lpo_ref > 1)
587		goto out;
588	l->lpo_contest_locking = contested;
589	l->lpo_acqtime = nanoseconds();
590	if (waittime && (l->lpo_acqtime > waittime))
591		l->lpo_waittime = l->lpo_acqtime - waittime;
592	else
593		l->lpo_waittime = 0;
594out:
595	critical_exit();
596}
597
598void
599lock_profile_thread_exit(struct thread *td)
600{
601#ifdef INVARIANTS
602	struct lock_profile_object *l;
603
604	MPASS(curthread->td_critnest == 0);
605#endif
606	/*
607	 * If lock profiling was disabled we have to wait for reset to
608	 * clear our pointers before we can exit safely.
609	 */
610	lock_prof_reset_wait();
611#ifdef INVARIANTS
612	LIST_FOREACH(l, &td->td_lprof[0], lpo_link)
613		printf("thread still holds lock acquired at %s:%d\n",
614		    l->lpo_file, l->lpo_line);
615	LIST_FOREACH(l, &td->td_lprof[1], lpo_link)
616		printf("thread still holds lock acquired at %s:%d\n",
617		    l->lpo_file, l->lpo_line);
618#endif
619	MPASS(LIST_FIRST(&td->td_lprof[0]) == NULL);
620	MPASS(LIST_FIRST(&td->td_lprof[1]) == NULL);
621}
622
623void
624lock_profile_release_lock(struct lock_object *lo)
625{
626	struct lock_profile_object *l;
627	struct lock_prof_type *type;
628	struct lock_prof *lp;
629	uint64_t curtime, holdtime;
630	struct lpohead *head;
631	int spin;
632
633	if (SCHEDULER_STOPPED())
634		return;
635	if (lo->lo_flags & LO_NOPROFILE)
636		return;
637	spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0;
638	head = &curthread->td_lprof[spin];
639	if (LIST_FIRST(head) == NULL)
640		return;
641	critical_enter();
642	/* Recheck enabled now that we're in a critical section. */
643	if (lock_prof_enable == 0 && lock_prof_resetting == 1)
644		goto out;
645	/*
646	 * If lock profiling is not enabled we still want to remove the
647	 * lpo from our queue.
648	 */
649	LIST_FOREACH(l, head, lpo_link)
650		if (l->lpo_obj == lo)
651			break;
652	if (l == NULL)
653		goto out;
654	if (--l->lpo_ref > 0)
655		goto out;
656	lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
657	if (lp == NULL)
658		goto release;
659	curtime = nanoseconds();
660	if (curtime < l->lpo_acqtime)
661		goto release;
662	holdtime = curtime - l->lpo_acqtime;
663
664	/*
665	 * Record if the lock has been held longer now than ever
666	 * before.
667	 */
668	if (holdtime > lp->cnt_max)
669		lp->cnt_max = holdtime;
670	if (l->lpo_waittime > lp->cnt_wait_max)
671		lp->cnt_wait_max = l->lpo_waittime;
672	lp->cnt_tot += holdtime;
673	lp->cnt_wait += l->lpo_waittime;
674	lp->cnt_contest_locking += l->lpo_contest_locking;
675	lp->cnt_cur += l->lpo_cnt;
676release:
677	LIST_REMOVE(l, lpo_link);
678	type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
679	LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
680out:
681	critical_exit();
682}
683
684static SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL,
685    "lock profiling");
686SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
687    &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
688SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
689    &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
690SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
691    &lock_prof_rejected, 0, "Number of rejected profiling records");
692SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
693    NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
694SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
695    NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
696SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
697    NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
698
699#endif
700