Deleted Added
full compact
subr_lock.c (154941) subr_lock.c (164159)
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 19 unchanged lines hidden (view full) ---

28 */
29
30/*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 19 unchanged lines hidden (view full) ---

28 */
29
30/*
31 * This module holds the global variables and functions used to maintain
32 * lock_object structures.
33 */
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 154941 2006-01-27 23:13:26Z jhb $");
36__FBSDID("$FreeBSD: head/sys/kern/subr_lock.c 164159 2006-11-11 03:18:07Z kmacy $");
37
38#include "opt_ddb.h"
37
38#include "opt_ddb.h"
39#include "opt_mprof.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/ktr.h>
43#include <sys/linker_set.h>
44#include <sys/lock.h>
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/ktr.h>
44#include <sys/linker_set.h>
45#include <sys/lock.h>
46#include <sys/sbuf.h>
47#include <sys/sysctl.h>
48#include <sys/lock_profile.h>
45
46#ifdef DDB
47#include <ddb/ddb.h>
48#endif
49
50CTASSERT(LOCK_CLASS_MAX == 15);
51
52struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
53 &lock_class_mtx_spin,
54 &lock_class_mtx_sleep,
55 &lock_class_sx,
56 &lock_class_rw,
57};
58
49
50#ifdef DDB
51#include <ddb/ddb.h>
52#endif
53
54CTASSERT(LOCK_CLASS_MAX == 15);
55
56struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
57 &lock_class_mtx_spin,
58 &lock_class_mtx_sleep,
59 &lock_class_sx,
60 &lock_class_rw,
61};
62
63#ifdef LOCK_PROFILING
64#include <machine/cpufunc.h>
65
66SYSCTL_NODE(_debug, OID_AUTO, lock, CTLFLAG_RD, NULL, "lock debugging");
67SYSCTL_NODE(_debug_lock, OID_AUTO, prof, CTLFLAG_RD, NULL, "lock profiling");
68int lock_prof_enable = 0;
69SYSCTL_INT(_debug_lock_prof, OID_AUTO, enable, CTLFLAG_RW,
70 &lock_prof_enable, 0, "Enable lock profiling");
71
72/*
73 * lprof_buf is a static pool of profiling records to avoid possible
74 * reentrance of the memory allocation functions.
75 *
76 * Note: NUM_LPROF_BUFFERS must be smaller than LPROF_HASH_SIZE.
77 */
78struct lock_prof lprof_buf[LPROF_HASH_SIZE];
79static int allocated_lprof_buf;
80struct mtx lprof_locks[LPROF_LOCK_SIZE];
81
82
83/* SWAG: sbuf size = avg stat. line size * number of locks */
84#define LPROF_SBUF_SIZE 256 * 400
85
86static int lock_prof_acquisitions;
87SYSCTL_INT(_debug_lock_prof, OID_AUTO, acquisitions, CTLFLAG_RD,
88 &lock_prof_acquisitions, 0, "Number of mutex acquistions recorded");
89static int lock_prof_records;
90SYSCTL_INT(_debug_lock_prof, OID_AUTO, records, CTLFLAG_RD,
91 &lock_prof_records, 0, "Number of profiling records");
92static int lock_prof_maxrecords = LPROF_HASH_SIZE;
93SYSCTL_INT(_debug_lock_prof, OID_AUTO, maxrecords, CTLFLAG_RD,
94 &lock_prof_maxrecords, 0, "Maximum number of profiling records");
95static int lock_prof_rejected;
96SYSCTL_INT(_debug_lock_prof, OID_AUTO, rejected, CTLFLAG_RD,
97 &lock_prof_rejected, 0, "Number of rejected profiling records");
98static int lock_prof_hashsize = LPROF_HASH_SIZE;
99SYSCTL_INT(_debug_lock_prof, OID_AUTO, hashsize, CTLFLAG_RD,
100 &lock_prof_hashsize, 0, "Hash size");
101static int lock_prof_collisions = 0;
102SYSCTL_INT(_debug_lock_prof, OID_AUTO, collisions, CTLFLAG_RD,
103 &lock_prof_collisions, 0, "Number of hash collisions");
104
105#ifndef USE_CPU_NANOSECONDS
106static u_int64_t
107nanoseconds(void)
108{
109 struct timespec tv;
110
111 nanotime(&tv);
112 return (tv.tv_sec * (u_int64_t)1000000000 + tv.tv_nsec);
113}
114#endif
115
116static int
117dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
118{
119 struct sbuf *sb;
120 int error, i;
121 static int multiplier = 1;
122 const char *p;
123
124 if (allocated_lprof_buf == 0)
125 return (SYSCTL_OUT(req, "No locking recorded",
126 sizeof("No locking recorded")));
127
128retry_sbufops:
129 sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
130 sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
131 "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cn\
132t_lock", "name");
133 for (i = 0; i < LPROF_HASH_SIZE; ++i) {
134 if (lprof_buf[i].name == NULL)
135 continue;
136 for (p = lprof_buf[i].file;
137 p != NULL && strncmp(p, "../", 3) == 0; p += 3)
138 /* nothing */ ;
139 sbuf_printf(sb, "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (\
140%s)\n",
141 lprof_buf[i].cnt_max / 1000,
142 lprof_buf[i].cnt_tot / 1000,
143 lprof_buf[i].cnt_wait / 1000,
144 lprof_buf[i].cnt_cur,
145 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
146 lprof_buf[i].cnt_tot / (lprof_buf[i].cnt_cur * 1000),
147 lprof_buf[i].cnt_cur == 0 ? (uintmax_t)0 :
148 lprof_buf[i].cnt_wait / (lprof_buf[i].cnt_cur * 1000),
149 lprof_buf[i].cnt_contest_holding,
150 lprof_buf[i].cnt_contest_locking,
151 p, lprof_buf[i].line, lprof_buf[i].name);
152 if (sbuf_overflowed(sb)) {
153 sbuf_delete(sb);
154 multiplier++;
155 goto retry_sbufops;
156 }
157 }
158
159 sbuf_finish(sb);
160 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
161 sbuf_delete(sb);
162 return (error);
163}
164static int
165reset_lock_prof_stats(SYSCTL_HANDLER_ARGS)
166{
167 int error, v;
168
169 if (allocated_lprof_buf == 0)
170 return (0);
171
172 v = 0;
173 error = sysctl_handle_int(oidp, &v, 0, req);
174 if (error)
175 return (error);
176 if (req->newptr == NULL)
177 return (error);
178 if (v == 0)
179 return (0);
180
181 bzero(lprof_buf, LPROF_HASH_SIZE*sizeof(*lprof_buf));
182 allocated_lprof_buf = 0;
183 return (0);
184}
185
186SYSCTL_PROC(_debug_lock_prof, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
187 NULL, 0, dump_lock_prof_stats, "A", "Mutex profiling statistics");
188
189SYSCTL_PROC(_debug_lock_prof, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
190 NULL, 0, reset_lock_prof_stats, "I", "Reset mutex profiling statistics");
191#endif
192
59void
60lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
61 const char *type, int flags)
62{
63 int i;
64
65 /* Check for double-init and zero object. */
66 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",

--- 41 unchanged lines hidden (view full) ---

108 class = LOCK_CLASS(lock);
109 db_printf(" class: %s\n", class->lc_name);
110 db_printf(" name: %s\n", lock->lo_name);
111 if (lock->lo_type && lock->lo_type != lock->lo_name)
112 db_printf(" type: %s\n", lock->lo_type);
113 class->lc_ddb_show(lock);
114}
115#endif
193void
194lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
195 const char *type, int flags)
196{
197 int i;
198
199 /* Check for double-init and zero object. */
200 KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",

--- 41 unchanged lines hidden (view full) ---

242 class = LOCK_CLASS(lock);
243 db_printf(" class: %s\n", class->lc_name);
244 db_printf(" name: %s\n", lock->lo_name);
245 if (lock->lo_type && lock->lo_type != lock->lo_name)
246 db_printf(" type: %s\n", lock->lo_type);
247 class->lc_ddb_show(lock);
248}
249#endif
250
251#ifdef LOCK_PROFILING
252void _lock_profile_obtain_lock_success(struct lock_object *lo, uint64_t waittime, con\
253st char *file, int line)
254{
255 struct lock_profile_object *l = &lo->lo_profile_obj;
256
257 /* don't reset the timer when/if recursing */
258 if (l->lpo_acqtime == 0) {
259 l->lpo_filename = file;
260 l->lpo_lineno = line;
261 l->lpo_acqtime = nanoseconds();
262 if (waittime) {
263 if (l->lpo_acqtime > waittime)
264 l->lpo_waittime = l->lpo_acqtime - waittime;
265 }
266 }
267}
268
269void _lock_profile_update_wait(struct lock_object *lo, uint64_t waitstart)
270{
271 struct lock_profile_object *l = &lo->lo_profile_obj;
272
273 if (lock_prof_enable && waitstart) {
274 uint64_t now, waittime;
275 struct lock_prof *mpp;
276 u_int hash;
277 const char *p = l->lpo_filename;
278 int collision = 0;
279 now = nanoseconds();
280 if (now < waitstart)
281 return;
282 waittime = now - waitstart;
283 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
284
285 mpp = &lprof_buf[hash];
286 while (mpp->name != NULL) {
287 if (mpp->line == l->lpo_lineno &&
288 mpp->file == p &&
289 mpp->namehash == l->lpo_namehash)
290 break;
291 /* If the lprof_hash entry is allocated to someone else, try the next one */
292 collision = 1;
293 CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file, mpp->line, mpp->name, mpp->namehash);
294 hash = (hash + 1) & LPROF_HASH_MASK;
295 mpp = &lprof_buf[hash];
296 }
297 if (mpp->name == NULL) {
298 int buf;
299
300 buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
301 /* Just exit if we cannot get a trace buffer */
302 if (buf >= LPROF_HASH_SIZE) {
303 ++lock_prof_rejected;
304 return;
305 }
306 mpp->file = p;
307 mpp->line = l->lpo_lineno;
308 mpp->name = lo->lo_name;
309 mpp->namehash = l->lpo_namehash;
310 if (collision)
311 ++lock_prof_collisions;
312 /* We might have raced someone else but who cares, they'll try again next time */
313 ++lock_prof_records;
314 }
315 LPROF_LOCK(hash);
316 mpp->cnt_wait += waittime;
317 LPROF_UNLOCK(hash);
318 }
319}
320
321void _lock_profile_release_lock(struct lock_object *lo)
322{
323 struct lock_profile_object *l = &lo->lo_profile_obj;
324
325 if (l->lpo_acqtime && !(lo->lo_flags & LO_NOPROFILE)) {
326 const char *unknown = "(unknown)";
327 u_int64_t acqtime, now, waittime;
328 struct lock_prof *mpp;
329 u_int hash;
330 const char *p = l->lpo_filename;
331 int collision = 0;
332
333 now = nanoseconds();
334 acqtime = l->lpo_acqtime;
335 waittime = l->lpo_waittime;
336 if (now <= acqtime)
337 return;
338 if (p == NULL || *p == '\0')
339 p = unknown;
340 hash = (l->lpo_namehash * 31 * 31 + (uintptr_t)p * 31 + l->lpo_lineno) & LPROF_HASH_MASK;
341 CTR5(KTR_SPARE1, "Hashing %s(%x) %s:%d to %d", l->lpo_name,
342 l->lpo_namehash, p, l->lpo_lineno, hash);
343 mpp = &lprof_buf[hash];
344 while (mpp->name != NULL) {
345 if (mpp->line == l->lpo_lineno &&
346 mpp->file == p &&
347 mpp->namehash == l->lpo_namehash)
348 break;
349 /* If the lprof_hash entry is allocated to someone
350 * else, try the next one
351 */
352 collision = 1;
353 CTR4(KTR_SPARE1, "Hash collision, %s:%d %s(%x)", mpp->file,
354 mpp->line, mpp->name, mpp->namehash);
355 hash = (hash + 1) & LPROF_HASH_MASK;
356 mpp = &lprof_buf[hash];
357 }
358 if (mpp->name == NULL) {
359 int buf;
360
361 buf = atomic_fetchadd_int(&allocated_lprof_buf, 1);
362 /* Just exit if we cannot get a trace buffer */
363 if (buf >= LPROF_HASH_SIZE) {
364 ++lock_prof_rejected;
365 return;
366 }
367 mpp->file = p;
368 mpp->line = l->lpo_lineno;
369 mpp->name = lo->lo_name;
370 mpp->namehash = l->lpo_namehash;
371 if (collision)
372 ++lock_prof_collisions;
373
374 /*
375 * We might have raced someone else but who cares,
376 * they'll try again next time
377 */
378 ++lock_prof_records;
379 }
380 LPROF_LOCK(hash);
381 /*
382 * Record if the mutex has been held longer now than ever
383 * before.
384 */
385 if (now - acqtime > mpp->cnt_max)
386 mpp->cnt_max = now - acqtime;
387 mpp->cnt_tot += now - acqtime;
388 mpp->cnt_wait += waittime;
389 mpp->cnt_cur++;
390 /*
391 * There's a small race, really we should cmpxchg
392 * 0 with the current value, but that would bill
393 * the contention to the wrong lock instance if
394 * it followed this also.
395 */
396 mpp->cnt_contest_holding += l->lpo_contest_holding;
397 mpp->cnt_contest_locking += l->lpo_contest_locking;
398 LPROF_UNLOCK(hash);
399
400 }
401 l->lpo_acqtime = 0;
402 l->lpo_waittime = 0;
403 l->lpo_contest_locking = 0;
404 l->lpo_contest_holding = 0;
405}
406#endif