Deleted Added
full compact
subr_lock.c (173444) subr_lock.c (174629)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 19 unchanged lines hidden (view full) ---

28 */
29
30/*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 19 unchanged lines hidden (view full) ---

28 */
29
30/*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 173444 2007-11-08 14:47:55Z ups $");
36__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 174629 2007-12-15 23:13:31Z jeff $");
37
38#include "opt_ddb.h"
39#include "opt_mprof.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
37
38#include "opt_ddb.h"
39#include "opt_mprof.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/kernel.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/lock.h>
44#include <sys/ktr.h>
45#include <sys/linker_set.h>
46#include <sys/lock.h>
47#include <sys/lock_profile.h>
48#include <sys/malloc.h>
49#include <sys/pcpu.h>
50#include <sys/proc.h>
46#include <sys/sbuf.h>
51#include <sys/sbuf.h>
52#include <sys/smp.h>
47#include <sys/sysctl.h>
53#include <sys/sysctl.h>
48#include <sys/lock_profile.h>
49
50#ifdef DDB
51#include <ddb/ddb.h>
52#endif
53
54
55#ifdef DDB
56#include <ddb/ddb.h>
57#endif
58
59#include <machine/cpufunc.h>
60
54CTASSERT(LOCK_CLASS_MAX == 15);
55
56struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
57 &lock_class_mtx_spin,
58 &lock_class_mtx_sleep,
59 &lock_class_sx,
60 &lock_class_rm,
61 &lock_class_rw,
62 &lock_class_lockmgr,
63};
64
61CTASSERT(LOCK_CLASS_MAX == 15);
62
63struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
64 &lock_class_mtx_spin,
65 &lock_class_mtx_sleep,
66 &lock_class_sx,
67 &lock_class_rm,
68 &lock_class_rw,
69 &lock_class_lockmgr,
70};
71
65#ifdef LOCK_PROFILING
66#include <machine/cpufunc.h>
67
68SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
69SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
70int lock_prof_enable = 0;
71SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW,
72 &lock_prof_enable, 0, "Enable lock profiling");
73
74/*
75 * lprof_buf is a static pool of profiling records to avoid possible
76 * reentrance of the memory allocation functions.
77 *
78 * Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE.
79 */
80struct lock_prof lprof_buf[LPROF_HASH_SIZE];
81static int allocated_lprof_buf;
82struct mtx lprof_locks[LPROF_LOCK_SIZE];
83
84
85/* SWAG: sbuf size = avg stat. line size * number of locks */
86#define LPROF_SBUF_SIZE 256 * 400
87
88static int lock_prof_acquisitions;
89SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
90 &lock_prof_acquisitions, 0, "Number of lock acquistions recorded");
91static int lock_prof_records;
92SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD,
93 &lock_prof_records, 0, "Number of profiling records");
94static int lock_prof_maxrecords = LPROF_HASH_SIZE;
95SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
96 &lock_prof_maxrecords, 0, "Maximum number of profiling records");
97static int lock_prof_rejected;
98SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
99 &lock_prof_rejected, 0, "Number of rejected profiling records");
100static int lock_prof_hashsize = LPROF_HASH_SIZE;
101SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD,
102 &lock_prof_hashsize, 0, "Hash size");
103static int lock_prof_collisions = 0;
104SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD,
105 &lock_prof_collisions, 0, "Number of hash collisions");
106
107#ifndef USE_CPU_NANOSECONDS
108u_int64_t
109nanoseconds(void)
110{
111 struct timespec tv;
112
113 nanotime(&tv);
114 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
115}
116#endif
117
118static int
119dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
120{
121 struct sbuf *sb;
122 int error, i;
123 static int multiplier = 1;
124 const char *p;
125
126 if (allocated_lprof_buf == 0)
127 return (SYSCTL_OUT(req, "No locking recorded",
128 sizeof("No locking recorded")));
129
130retry_sbufops:
131 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
132 sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
133 "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
134 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
135 if (lprof_buf[i].name == NULL)
136 continue;
137 for (p = lprof_buf[i].file;
138 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
139 /* nothing */ ;
140 sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
141 lprof_buf[i].cnt_max / 1000,
142 lprof_buf[i].cnt_tot / 1000,
143 lprof_buf[i].cnt_wait / 1000,
144 lprof_buf[i].cnt_cur,
145 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
146 lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000),
147 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
148 lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000),
149 lprof_buf[i].cnt_contest_holding,
150 lprof_buf[i].cnt_contest_locking,
151 p, lprof_buf[i].line,
152 lprof_buf[i].type,
153 lprof_buf[i].name);
154 if (sbuf_overflowed(sb)) {
155 sbuf_delete(sb);
156 multiplier++;
157 goto retry_sbufops;
158 }
159 }
160
161 sbuf_finish(sb);
162 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
163 sbuf_delete(sb);
164 return (error);
165}
166static int
167reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
168{
169 int error, v;
170
171 if (allocated_lprof_buf == 0)
172 return (0);
173
174 v = 0;
175 error = sysctl_handle_int(oidp, &v, 0, req);
176 if (error)
177 return (error);
178 if (req->newptr == NULL)
179 return (error);
180 if (v == 0)
181 return (0);
182
183 bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf));
184 allocated_lprof_buf = 0;
185 return (0);
186}
187
188SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
189 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
190
191SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
192 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
193#endif
194
195void
196lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
197 const char *type, int flags)
198{
199 int i;
200
201 /* Check for double-init and zero object. */
202 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",

--- 8 unchanged lines hidden (view full) ---

211 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
212
213 /* Initialize the lock object. */
214 lock->lo_name = name;
215 lock->lo_type = type != NULL ? type : name;
216 lock->lo_flags |= flags | LO_INITIALIZED;
217 LOCK_LOG_INIT(lock, 0);
218 WITNESS_INIT(lock);
72void
73lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
74 const char *type, int flags)
75{
76 int i;
77
78 /* Check for double-init and zero object. */
79 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",

--- 8 unchanged lines hidden (view full) ---

88 KASSERT(i < LOCK_CLASS_MAX, ("unknown lock class %p", class));
89
90 /* Initialize the lock object. */
91 lock->lo_name = name;
92 lock->lo_type = type != NULL ? type : name;
93 lock->lo_flags |= flags | LO_INITIALIZED;
94 LOCK_LOG_INIT(lock, 0);
95 WITNESS_INIT(lock);
219 lock_profile_object_init(lock, class, name);
220}
221
222void
223lock_destroy(struct lock_object *lock)
224{
225
226 KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
96}
97
98void
99lock_destroy(struct lock_object *lock)
100{
101
102 KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
227 lock_profile_object_destroy(lock);
228 WITNESS_DESTROY(lock);
229 LOCK_LOG_DESTROY(lock, 0);
230 lock->lo_flags &= ~LO_INITIALIZED;
231}
232
233#ifdef DDB
234DB_SHOW_COMMAND(lock, db_show_lock)
235{

--- 12 unchanged lines hidden (view full) ---

248 db_printf(" name: %s\n", lock->lo_name);
249 if (lock->lo_type && lock->lo_type != lock->lo_name)
250 db_printf(" type: %s\n", lock->lo_type);
251 class->lc_ddb_show(lock);
252}
253#endif
254
255#ifdef LOCK_PROFILING
103 WITNESS_DESTROY(lock);
104 LOCK_LOG_DESTROY(lock, 0);
105 lock->lo_flags &= ~LO_INITIALIZED;
106}
107
108#ifdef DDB
109DB_SHOW_COMMAND(lock, db_show_lock)
110{

--- 12 unchanged lines hidden (view full) ---

123 db_printf(" name: %s\n", lock->lo_name);
124 if (lock->lo_type && lock->lo_type != lock->lo_name)
125 db_printf(" type: %s\n", lock->lo_type);
126 class->lc_ddb_show(lock);
127}
128#endif
129
130#ifdef LOCK_PROFILING
256void _lock_profile_obtain_lock_success(struct lock_object *lo, int contested, uint64_t waittime, const char *file, int line)
131
132/*
133 * One object per-thread for each lock the thread owns. Tracks individual
134 * lock instances.
135 */
136struct lock_profile_object {
137 LIST_ENTRY(lock_profile_object) lpo_link;
138 struct lock_object *lpo_obj;
139 const char *lpo_file;
140 int lpo_line;
141 uint16_t lpo_ref;
142 uint16_t lpo_cnt;
143 u_int64_t lpo_acqtime;
144 u_int64_t lpo_waittime;
145 u_int lpo_contest_locking;
146};
147
148/*
149 * One lock_prof for each (file, line, lock object) triple.
150 */
151struct lock_prof {
152 SLIST_ENTRY(lock_prof) link;
153 const char *file;
154 const char *name;
155 int line;
156 int ticks;
157 const char *type;
158 uintmax_t cnt_max;
159 uintmax_t cnt_tot;
160 uintmax_t cnt_wait;
161 uintmax_t cnt_cur;
162 uintmax_t cnt_contest_locking;
163};
164
165SLIST_HEAD(lphead, lock_prof);
166
167#define LPROF_HASH_SIZE 4096
168#define LPROF_HASH_MASK (LPROF_HASH_SIZE - 1)
169#define LPROF_CACHE_SIZE 4096
170
171/*
172 * Array of objects and profs for each type of object for each cpu. Spinlocks
173 * are handled seperately because a thread may be preempted and acquire a
174 * spinlock while in the lock profiling code of a non-spinlock. In this way
175 * we only need a critical section to protect the per-cpu lists.
176 */
177struct lock_prof_type {
178 struct lphead lpt_lpalloc;
179 struct lpohead lpt_lpoalloc;
180 struct lphead lpt_hash[LPROF_HASH_SIZE];
181 struct lock_prof lpt_prof[LPROF_CACHE_SIZE];
182 struct lock_profile_object lpt_objs[LPROF_CACHE_SIZE];
183};
184
185struct lock_prof_cpu {
186 struct lock_prof_type lpc_types[2]; /* One for spin one for other. */
187};
188
189struct lock_prof_cpu *lp_cpu[MAXCPU];
190
191int lock_prof_enable = 0;
192
193/* SWAG: sbuf size = avg stat. line size * number of locks */
194#define LPROF_SBUF_SIZE 256 * 400
195
196static int lock_prof_rejected;
197static int lock_prof_skipspin;
198static int lock_prof_skipcount;
199
200#ifndef USE_CPU_NANOSECONDS
201u_int64_t
202nanoseconds(void)
257{
203{
258 struct lock_profile_object *l = &lo->lo_profile_obj;
204 struct bintime bt;
205 u_int64_t ns;
259
206
260 lo->lo_profile_obj.lpo_contest_holding = 0;
207 binuptime(&bt);
208 /* From bintime2timespec */
209 ns = bt.sec * (u_int64_t)1000000000;
210 ns += ((uint64_t)1000000000 * (uint32_t)(bt.frac >> 32)) >> 32;
211 return (ns);
212}
213#endif
214
215static void
216lock_prof_init_type(struct lock_prof_type *type)
217{
218 int i;
219
220 SLIST_INIT(&type->lpt_lpalloc);
221 LIST_INIT(&type->lpt_lpoalloc);
222 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
223 SLIST_INSERT_HEAD(&type->lpt_lpalloc, &type->lpt_prof[i],
224 link);
225 LIST_INSERT_HEAD(&type->lpt_lpoalloc, &type->lpt_objs[i],
226 lpo_link);
227 }
228}
229
230static void
231lock_prof_init(void *arg)
232{
233 int cpu;
234
235 for (cpu = 0; cpu <= mp_maxid; cpu++) {
236 lp_cpu[cpu] = malloc(sizeof(*lp_cpu[cpu]), M_DEVBUF,
237 M_WAITOK | M_ZERO);
238 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[0]);
239 lock_prof_init_type(&lp_cpu[cpu]->lpc_types[1]);
240 }
241}
242SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
243
244static void
245lock_prof_reset(void)
246{
247 struct lock_prof_cpu *lpc;
248 int enabled, i, cpu;
249
250 enabled = lock_prof_enable;
251 lock_prof_enable = 0;
252 for (cpu = 0; cpu <= mp_maxid; cpu++) {
253 lpc = lp_cpu[cpu];
254 for (i = 0; i < LPROF_CACHE_SIZE; i++) {
255 LIST_REMOVE(&lpc->lpc_types[0].lpt_objs[i], lpo_link);
256 LIST_REMOVE(&lpc->lpc_types[1].lpt_objs[i], lpo_link);
257 }
258 bzero(lpc, sizeof(*lpc));
259 lock_prof_init_type(&lpc->lpc_types[0]);
260 lock_prof_init_type(&lpc->lpc_types[1]);
261 }
262 lock_prof_enable = enabled;
263}
264
265static void
266lock_prof_output(struct lock_prof *lp, struct sbuf *sb)
267{
268 const char *p;
269
270 for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
271 sbuf_printf(sb,
272 "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
273 lp->cnt_max / 1000, lp->cnt_tot / 1000,
274 lp->cnt_wait / 1000, lp->cnt_cur,
275 lp->cnt_cur == 0 ? (uintmax_t)0 :
276 lp->cnt_tot / (lp->cnt_cur * 1000),
277 lp->cnt_cur == 0 ? (uintmax_t)0 :
278 lp->cnt_wait / (lp->cnt_cur * 1000),
279 (uintmax_t)0, lp->cnt_contest_locking,
280 p, lp->line, lp->type, lp->name);
281}
282
283static void
284lock_prof_sum(struct lock_prof *match, struct lock_prof *dst, int hash,
285 int spin, int t)
286{
287 struct lock_prof_type *type;
288 struct lock_prof *l;
289 int cpu;
290
291 dst->file = match->file;
292 dst->line = match->line;
293 dst->type = match->type;
294 dst->name = match->name;
295
296 for (cpu = 0; cpu <= mp_maxid; cpu++) {
297 if (lp_cpu[cpu] == NULL)
298 continue;
299 type = &lp_cpu[cpu]->lpc_types[spin];
300 SLIST_FOREACH(l, &type->lpt_hash[hash], link) {
301 if (l->ticks == t)
302 continue;
303 if (l->file != match->file || l->line != match->line ||
304 l->name != match->name || l->type != match->type)
305 continue;
306 l->ticks = t;
307 if (l->cnt_max > dst->cnt_max)
308 dst->cnt_max = l->cnt_max;
309 dst->cnt_tot += l->cnt_tot;
310 dst->cnt_wait += l->cnt_wait;
311 dst->cnt_cur += l->cnt_cur;
312 dst->cnt_contest_locking += l->cnt_contest_locking;
313 }
314 }
261
315
262 if (contested)
263 lo->lo_profile_obj.lpo_contest_locking++;
264
265 l->lpo_filename = file;
266 l->lpo_lineno = line;
316}
317
318static void
319lock_prof_type_stats(struct lock_prof_type *type, struct sbuf *sb, int spin,
320 int t)
321{
322 struct lock_prof *l;
323 int i;
324
325 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
326 SLIST_FOREACH(l, &type->lpt_hash[i], link) {
327 struct lock_prof lp = {};
328
329 if (l->ticks == t)
330 continue;
331 lock_prof_sum(l, &lp, i, spin, t);
332 lock_prof_output(&lp, sb);
333 if (sbuf_overflowed(sb))
334 return;
335 }
336 }
337}
338
339static int
340dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
341{
342 static int multiplier = 1;
343 struct sbuf *sb;
344 int error, cpu, t;
345
346retry_sbufops:
347 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
348 sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
349 "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
350 t = ticks;
351 for (cpu = 0; cpu <= mp_maxid; cpu++) {
352 if (lp_cpu[cpu] == NULL)
353 continue;
354 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[0], sb, 0, t);
355 lock_prof_type_stats(&lp_cpu[cpu]->lpc_types[1], sb, 1, t);
356 if (sbuf_overflowed(sb)) {
357 sbuf_delete(sb);
358 multiplier++;
359 goto retry_sbufops;
360 }
361 }
362
363 sbuf_finish(sb);
364 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
365 sbuf_delete(sb);
366 return (error);
367}
368
369static int
370enable_lock_prof(SYSCTL_HANDLER_ARGS)
371{
372 int error, v;
373
374 v = lock_prof_enable;
375 error = sysctl_handle_int(oidp, &v, v, req);
376 if (error)
377 return (error);
378 if (req->newptr == NULL)
379 return (error);
380 if (v == lock_prof_enable)
381 return (0);
382 if (v == 1)
383 lock_prof_reset();
384 lock_prof_enable = !!v;
385
386 return (0);
387}
388
389static int
390reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
391{
392 int error, v;
393
394 v = 0;
395 error = sysctl_handle_int(oidp, &v, 0, req);
396 if (error)
397 return (error);
398 if (req->newptr == NULL)
399 return (error);
400 if (v == 0)
401 return (0);
402 lock_prof_reset();
403
404 return (0);
405}
406
407static struct lock_prof *
408lock_profile_lookup(struct lock_object *lo, int spin, const char *file,
409 int line)
410{
411 const char *unknown = "(unknown)";
412 struct lock_prof_type *type;
413 struct lock_prof *lp;
414 struct lphead *head;
415 const char *p;
416 u_int hash;
417
418 p = file;
419 if (p == NULL || *p == '\0')
420 p = unknown;
421 hash = (uintptr_t)lo->lo_name * 31 + (uintptr_t)p * 31 + line;
422 hash &= LPROF_HASH_MASK;
423 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
424 head = &type->lpt_hash[hash];
425 SLIST_FOREACH(lp, head, link) {
426 if (lp->line == line && lp->file == p &&
427 lp->name == lo->lo_name)
428 return (lp);
429
430 }
431 lp = SLIST_FIRST(&type->lpt_lpalloc);
432 if (lp == NULL) {
433 lock_prof_rejected++;
434 return (lp);
435 }
436 SLIST_REMOVE_HEAD(&type->lpt_lpalloc, link);
437 lp->file = p;
438 lp->line = line;
439 lp->type = lo->lo_type;
440 lp->name = lo->lo_name;
441 SLIST_INSERT_HEAD(&type->lpt_hash[hash], lp, link);
442 return (lp);
443}
444
445static struct lock_profile_object *
446lock_profile_object_lookup(struct lock_object *lo, int spin, const char *file,
447 int line)
448{
449 struct lock_profile_object *l;
450 struct lock_prof_type *type;
451 struct lpohead *head;
452
453 head = &curthread->td_lprof[spin];
454 LIST_FOREACH(l, head, lpo_link)
455 if (l->lpo_obj == lo && l->lpo_file == file &&
456 l->lpo_line == line)
457 return (l);
458 critical_enter();
459 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
460 l = LIST_FIRST(&type->lpt_lpoalloc);
461 if (l == NULL) {
462 lock_prof_rejected++;
463 critical_exit();
464 return (NULL);
465 }
466 LIST_REMOVE(l, lpo_link);
467 critical_exit();
468 l->lpo_obj = lo;
469 l->lpo_file = file;
470 l->lpo_line = line;
471 l->lpo_cnt = 0;
472 LIST_INSERT_HEAD(head, l, lpo_link);
473
474 return (l);
475}
476
477void
478lock_profile_obtain_lock_success(struct lock_object *lo, int contested,
479 uint64_t waittime, const char *file, int line)
480{
481 static int lock_prof_count;
482 struct lock_profile_object *l;
483 int spin;
484
485 /* don't reset the timer when/if recursing */
486 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
487 return;
488 if (lock_prof_skipcount &&
489 (++lock_prof_count % lock_prof_skipcount) == 0)
490 return;
491 spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
492 if (spin && lock_prof_skipspin == 1)
493 return;
494 l = lock_profile_object_lookup(lo, spin, file, line);
495 if (l == NULL)
496 return;
497 l->lpo_cnt++;
498 if (++l->lpo_ref > 1)
499 return;
500 l->lpo_contest_locking = contested;
267 l->lpo_acqtime = nanoseconds();
268 if (waittime && (l->lpo_acqtime > waittime))
269 l->lpo_waittime = l->lpo_acqtime - waittime;
270 else
271 l->lpo_waittime = 0;
272}
273
501 l->lpo_acqtime = nanoseconds();
502 if (waittime && (l->lpo_acqtime > waittime))
503 l->lpo_waittime = l->lpo_acqtime - waittime;
504 else
505 l->lpo_waittime = 0;
506}
507
274void _lock_profile_release_lock(struct lock_object *lo)
508void
509lock_profile_release_lock(struct lock_object *lo)
275{
510{
276 struct lock_profile_object *l = &lo->lo_profile_obj;
511 struct lock_profile_object *l;
512 struct lock_prof_type *type;
513 struct lock_prof *lp;
514 u_int64_t holdtime;
515 struct lpohead *head;
516 int spin;
277
517
278 if (l->lpo_acqtime) {
279 const char *unknown = "(unknown)";
280 u_int64_t acqtime, now, waittime;
281 struct lock_prof *mpp;
282 u_int hash;
283 const char *p = l->lpo_filename;
284 int collision = 0;
518 if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE))
519 return;
520 spin = LOCK_CLASS(lo) == &lock_class_mtx_spin;
521 head = &curthread->td_lprof[spin];
522 critical_enter();
523 LIST_FOREACH(l, head, lpo_link)
524 if (l->lpo_obj == lo)
525 break;
526 if (l == NULL)
527 goto out;
528 if (--l->lpo_ref > 0)
529 goto out;
530 lp = lock_profile_lookup(lo, spin, l->lpo_file, l->lpo_line);
531 if (lp == NULL)
532 goto release;
533 holdtime = nanoseconds() - l->lpo_acqtime;
534 if (holdtime < 0)
535 goto release;
536 /*
537 * Record if the lock has been held longer now than ever
538 * before.
539 */
540 if (holdtime > lp->cnt_max)
541 lp->cnt_max = holdtime;
542 lp->cnt_tot += holdtime;
543 lp->cnt_wait += l->lpo_waittime;
544 lp->cnt_contest_locking += l->lpo_contest_locking;
545 lp->cnt_cur += l->lpo_cnt;
546release:
547 LIST_REMOVE(l, lpo_link);
548 type = &lp_cpu[PCPU_GET(cpuid)]->lpc_types[spin];
549 LIST_INSERT_HEAD(&type->lpt_lpoalloc, l, lpo_link);
550out:
551 critical_exit();
552}
285
553
286 now = nanoseconds();
287 acqtime = l->lpo_acqtime;
288 waittime = l->lpo_waittime;
289 if (now <= acqtime)
290 return;
291 if (p == NULL || *p == '\0')
292 p = unknown;
293 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
294 mpp = &lprof_buf[hash];
295 while (mpp->name != NULL) {
296 if (mpp->line == l->lpo_lineno &&
297 mpp->file == p &&
298 mpp->namehash == l->lpo_namehash)
299 break;
300 /* If the lprof_hash entry is allocated to someone
301 * else, try the next one
302 */
303 collision = 1;
304 hash = (hash + 1) & LPROF_HASH_MASK;
305 mpp = &lprof_buf[hash];
306 }
307 if (mpp->name == NULL) {
308 int buf;
554SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
555SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
556SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipspin, CTLFLAG_RW,
557 &lock_prof_skipspin, 0, "Skip profiling on spinlocks.");
558SYSCTL_INT(_debug_lock_prof, OID_AUTO, skipcount, CTLFLAG_RW,
559 &lock_prof_skipcount, 0, "Sample approximately every N lock acquisitions.");
560SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
561 &lock_prof_rejected, 0, "Number of rejected profiling records");
562SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
563 NULL, 0, dump_lock_prof_stats, "A", "Lock profiling statistics");
564SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
565 NULL, 0, reset_lock_prof_stats, "I", "Reset lock profiling statistics");
566SYSCTL_PROC(_debug_lock_prof, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
567 NULL, 0, enable_lock_prof, "I", "Enable lock profiling");
309
568
310 buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
311 /* Just exit if we cannot get a trace buffer */
312 if (buf >= LPROF_HASH_SIZE) {
313 ++lock_prof_rejected;
314 return;
315 }
316 mpp->file = p;
317 mpp->line = l->lpo_lineno;
318 mpp->namehash = l->lpo_namehash;
319 mpp->type = l->lpo_type;
320 mpp->name = lo->lo_name;
321
322 if (collision)
323 ++lock_prof_collisions;
324
325 /*
326 * We might have raced someone else but who cares,
327 * they'll try again next time
328 */
329 ++lock_prof_records;
330 }
331 LPROF_LOCK(hash);
332 /*
333 * Record if the lock has been held longer now than ever
334 * before.
335 */
336 if (now - acqtime > mpp->cnt_max)
337 mpp->cnt_max = now - acqtime;
338 mpp->cnt_tot += now - acqtime;
339 mpp->cnt_wait += waittime;
340 mpp->cnt_cur++;
341 /*
342 * There's a small race, really we should cmpxchg
343 * 0 with the current value, but that would bill
344 * the contention to the wrong lock instance if
345 * it followed this also.
346 */
347 mpp->cnt_contest_holding += l->lpo_contest_holding;
348 mpp->cnt_contest_locking += l->lpo_contest_locking;
349 LPROF_UNLOCK(hash);
350
351 }
352 l->lpo_acqtime = 0;
353 l->lpo_waittime = 0;
354 l->lpo_contest_locking = 0;
355 l->lpo_contest_holding = 0;
356}
357#endif
569#endif