1// SPDX-License-Identifier: GPL-2.0
2/*
3 * kernel/lockdep_proc.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *
9 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 *
12 * Code for /proc/lockdep and /proc/lockdep_stats:
13 *
14 */
15#include <linux/export.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/kallsyms.h>
19#include <linux/debug_locks.h>
20#include <linux/vmalloc.h>
21#include <linux/sort.h>
22#include <linux/uaccess.h>
23#include <asm/div64.h>
24
25#include "lockdep_internals.h"
26
27/*
28 * Since iteration of lock_classes is done without holding the lockdep lock,
29 * it is not safe to iterate all_lock_classes list directly as the iteration
30 * may branch off to free_lock_classes or the zapped list. Iteration is done
31 * directly on the lock_classes array by checking the lock_classes_in_use
32 * bitmap and max_lock_class_idx.
33 */
34#define iterate_lock_classes(idx, class)				\
35	for (idx = 0, class = lock_classes; idx <= max_lock_class_idx;	\
36	     idx++, class++)
37
38static void *l_next(struct seq_file *m, void *v, loff_t *pos)
39{
40	struct lock_class *class = v;
41
42	++class;
43	*pos = class - lock_classes;
44	return (*pos > max_lock_class_idx) ? NULL : class;
45}
46
47static void *l_start(struct seq_file *m, loff_t *pos)
48{
49	unsigned long idx = *pos;
50
51	if (idx > max_lock_class_idx)
52		return NULL;
53	return lock_classes + idx;
54}
55
56static void l_stop(struct seq_file *m, void *v)
57{
58}
59
60static void print_name(struct seq_file *m, struct lock_class *class)
61{
62	char str[KSYM_NAME_LEN];
63	const char *name = class->name;
64
65	if (!name) {
66		name = __get_key_name(class->key, str);
67		seq_printf(m, "%s", name);
68	} else{
69		seq_printf(m, "%s", name);
70		if (class->name_version > 1)
71			seq_printf(m, "#%d", class->name_version);
72		if (class->subclass)
73			seq_printf(m, "/%d", class->subclass);
74	}
75}
76
77static int l_show(struct seq_file *m, void *v)
78{
79	struct lock_class *class = v;
80	struct lock_list *entry;
81	char usage[LOCK_USAGE_CHARS];
82	int idx = class - lock_classes;
83
84	if (v == lock_classes)
85		seq_printf(m, "all lock classes:\n");
86
87	if (!test_bit(idx, lock_classes_in_use))
88		return 0;
89
90	seq_printf(m, "%p", class->key);
91#ifdef CONFIG_DEBUG_LOCKDEP
92	seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
93#endif
94	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
95		seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
96		seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
97
98		get_usage_chars(class, usage);
99		seq_printf(m, " %s", usage);
100	}
101
102	seq_printf(m, ": ");
103	print_name(m, class);
104	seq_puts(m, "\n");
105
106	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
107		list_for_each_entry(entry, &class->locks_after, entry) {
108			if (entry->distance == 1) {
109				seq_printf(m, " -> [%p] ", entry->class->key);
110				print_name(m, entry->class);
111				seq_puts(m, "\n");
112			}
113		}
114		seq_puts(m, "\n");
115	}
116
117	return 0;
118}
119
120static const struct seq_operations lockdep_ops = {
121	.start	= l_start,
122	.next	= l_next,
123	.stop	= l_stop,
124	.show	= l_show,
125};
126
127#ifdef CONFIG_PROVE_LOCKING
128static void *lc_start(struct seq_file *m, loff_t *pos)
129{
130	if (*pos < 0)
131		return NULL;
132
133	if (*pos == 0)
134		return SEQ_START_TOKEN;
135
136	return lock_chains + (*pos - 1);
137}
138
139static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
140{
141	*pos = lockdep_next_lockchain(*pos - 1) + 1;
142	return lc_start(m, pos);
143}
144
145static void lc_stop(struct seq_file *m, void *v)
146{
147}
148
149static int lc_show(struct seq_file *m, void *v)
150{
151	struct lock_chain *chain = v;
152	struct lock_class *class;
153	int i;
154	static const char * const irq_strs[] = {
155		[0]			     = "0",
156		[LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq",
157		[LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq",
158		[LOCK_CHAIN_SOFTIRQ_CONTEXT|
159		 LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq",
160	};
161
162	if (v == SEQ_START_TOKEN) {
163		if (!nr_free_chain_hlocks)
164			seq_printf(m, "(buggered) ");
165		seq_printf(m, "all lock chains:\n");
166		return 0;
167	}
168
169	seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]);
170
171	for (i = 0; i < chain->depth; i++) {
172		class = lock_chain_get_class(chain, i);
173		if (!class->key)
174			continue;
175
176		seq_printf(m, "[%p] ", class->key);
177		print_name(m, class);
178		seq_puts(m, "\n");
179	}
180	seq_puts(m, "\n");
181
182	return 0;
183}
184
185static const struct seq_operations lockdep_chains_ops = {
186	.start	= lc_start,
187	.next	= lc_next,
188	.stop	= lc_stop,
189	.show	= lc_show,
190};
191#endif /* CONFIG_PROVE_LOCKING */
192
193static void lockdep_stats_debug_show(struct seq_file *m)
194{
195#ifdef CONFIG_DEBUG_LOCKDEP
196	unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
197			   hi2 = debug_atomic_read(hardirqs_off_events),
198			   hr1 = debug_atomic_read(redundant_hardirqs_on),
199			   hr2 = debug_atomic_read(redundant_hardirqs_off),
200			   si1 = debug_atomic_read(softirqs_on_events),
201			   si2 = debug_atomic_read(softirqs_off_events),
202			   sr1 = debug_atomic_read(redundant_softirqs_on),
203			   sr2 = debug_atomic_read(redundant_softirqs_off);
204
205	seq_printf(m, " chain lookup misses:           %11llu\n",
206		debug_atomic_read(chain_lookup_misses));
207	seq_printf(m, " chain lookup hits:             %11llu\n",
208		debug_atomic_read(chain_lookup_hits));
209	seq_printf(m, " cyclic checks:                 %11llu\n",
210		debug_atomic_read(nr_cyclic_checks));
211	seq_printf(m, " redundant checks:              %11llu\n",
212		debug_atomic_read(nr_redundant_checks));
213	seq_printf(m, " redundant links:               %11llu\n",
214		debug_atomic_read(nr_redundant));
215	seq_printf(m, " find-mask forwards checks:     %11llu\n",
216		debug_atomic_read(nr_find_usage_forwards_checks));
217	seq_printf(m, " find-mask backwards checks:    %11llu\n",
218		debug_atomic_read(nr_find_usage_backwards_checks));
219
220	seq_printf(m, " hardirq on events:             %11llu\n", hi1);
221	seq_printf(m, " hardirq off events:            %11llu\n", hi2);
222	seq_printf(m, " redundant hardirq ons:         %11llu\n", hr1);
223	seq_printf(m, " redundant hardirq offs:        %11llu\n", hr2);
224	seq_printf(m, " softirq on events:             %11llu\n", si1);
225	seq_printf(m, " softirq off events:            %11llu\n", si2);
226	seq_printf(m, " redundant softirq ons:         %11llu\n", sr1);
227	seq_printf(m, " redundant softirq offs:        %11llu\n", sr2);
228#endif
229}
230
231static int lockdep_stats_show(struct seq_file *m, void *v)
232{
233	unsigned long nr_unused = 0, nr_uncategorized = 0,
234		      nr_irq_safe = 0, nr_irq_unsafe = 0,
235		      nr_softirq_safe = 0, nr_softirq_unsafe = 0,
236		      nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
237		      nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
238		      nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
239		      nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
240		      sum_forward_deps = 0;
241
242#ifdef CONFIG_PROVE_LOCKING
243	struct lock_class *class;
244	unsigned long idx;
245
246	iterate_lock_classes(idx, class) {
247		if (!test_bit(idx, lock_classes_in_use))
248			continue;
249
250		if (class->usage_mask == 0)
251			nr_unused++;
252		if (class->usage_mask == LOCKF_USED)
253			nr_uncategorized++;
254		if (class->usage_mask & LOCKF_USED_IN_IRQ)
255			nr_irq_safe++;
256		if (class->usage_mask & LOCKF_ENABLED_IRQ)
257			nr_irq_unsafe++;
258		if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
259			nr_softirq_safe++;
260		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
261			nr_softirq_unsafe++;
262		if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
263			nr_hardirq_safe++;
264		if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
265			nr_hardirq_unsafe++;
266		if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
267			nr_irq_read_safe++;
268		if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
269			nr_irq_read_unsafe++;
270		if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
271			nr_softirq_read_safe++;
272		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
273			nr_softirq_read_unsafe++;
274		if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
275			nr_hardirq_read_safe++;
276		if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
277			nr_hardirq_read_unsafe++;
278
279		sum_forward_deps += lockdep_count_forward_deps(class);
280	}
281
282#ifdef CONFIG_DEBUG_LOCKDEP
283	DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
284#endif
285
286#endif
287	seq_printf(m, " lock-classes:                  %11lu [max: %lu]\n",
288			nr_lock_classes, MAX_LOCKDEP_KEYS);
289	seq_printf(m, " direct dependencies:           %11lu [max: %lu]\n",
290			nr_list_entries, MAX_LOCKDEP_ENTRIES);
291	seq_printf(m, " indirect dependencies:         %11lu\n",
292			sum_forward_deps);
293
294	/*
295	 * Total number of dependencies:
296	 *
297	 * All irq-safe locks may nest inside irq-unsafe locks,
298	 * plus all the other known dependencies:
299	 */
300	seq_printf(m, " all direct dependencies:       %11lu\n",
301			nr_irq_unsafe * nr_irq_safe +
302			nr_hardirq_unsafe * nr_hardirq_safe +
303			nr_list_entries);
304
305#ifdef CONFIG_PROVE_LOCKING
306	seq_printf(m, " dependency chains:             %11lu [max: %lu]\n",
307			lock_chain_count(), MAX_LOCKDEP_CHAINS);
308	seq_printf(m, " dependency chain hlocks used:  %11lu [max: %lu]\n",
309			MAX_LOCKDEP_CHAIN_HLOCKS -
310			(nr_free_chain_hlocks + nr_lost_chain_hlocks),
311			MAX_LOCKDEP_CHAIN_HLOCKS);
312	seq_printf(m, " dependency chain hlocks lost:  %11u\n",
313			nr_lost_chain_hlocks);
314#endif
315
316#ifdef CONFIG_TRACE_IRQFLAGS
317	seq_printf(m, " in-hardirq chains:             %11u\n",
318			nr_hardirq_chains);
319	seq_printf(m, " in-softirq chains:             %11u\n",
320			nr_softirq_chains);
321#endif
322	seq_printf(m, " in-process chains:             %11u\n",
323			nr_process_chains);
324	seq_printf(m, " stack-trace entries:           %11lu [max: %lu]\n",
325			nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
326#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
327	seq_printf(m, " number of stack traces:        %11llu\n",
328		   lockdep_stack_trace_count());
329	seq_printf(m, " number of stack hash chains:   %11llu\n",
330		   lockdep_stack_hash_count());
331#endif
332	seq_printf(m, " combined max dependencies:     %11u\n",
333			(nr_hardirq_chains + 1) *
334			(nr_softirq_chains + 1) *
335			(nr_process_chains + 1)
336	);
337	seq_printf(m, " hardirq-safe locks:            %11lu\n",
338			nr_hardirq_safe);
339	seq_printf(m, " hardirq-unsafe locks:          %11lu\n",
340			nr_hardirq_unsafe);
341	seq_printf(m, " softirq-safe locks:            %11lu\n",
342			nr_softirq_safe);
343	seq_printf(m, " softirq-unsafe locks:          %11lu\n",
344			nr_softirq_unsafe);
345	seq_printf(m, " irq-safe locks:                %11lu\n",
346			nr_irq_safe);
347	seq_printf(m, " irq-unsafe locks:              %11lu\n",
348			nr_irq_unsafe);
349
350	seq_printf(m, " hardirq-read-safe locks:       %11lu\n",
351			nr_hardirq_read_safe);
352	seq_printf(m, " hardirq-read-unsafe locks:     %11lu\n",
353			nr_hardirq_read_unsafe);
354	seq_printf(m, " softirq-read-safe locks:       %11lu\n",
355			nr_softirq_read_safe);
356	seq_printf(m, " softirq-read-unsafe locks:     %11lu\n",
357			nr_softirq_read_unsafe);
358	seq_printf(m, " irq-read-safe locks:           %11lu\n",
359			nr_irq_read_safe);
360	seq_printf(m, " irq-read-unsafe locks:         %11lu\n",
361			nr_irq_read_unsafe);
362
363	seq_printf(m, " uncategorized locks:           %11lu\n",
364			nr_uncategorized);
365	seq_printf(m, " unused locks:                  %11lu\n",
366			nr_unused);
367	seq_printf(m, " max locking depth:             %11u\n",
368			max_lockdep_depth);
369#ifdef CONFIG_PROVE_LOCKING
370	seq_printf(m, " max bfs queue depth:           %11u\n",
371			max_bfs_queue_depth);
372#endif
373	seq_printf(m, " max lock class index:          %11lu\n",
374			max_lock_class_idx);
375	lockdep_stats_debug_show(m);
376	seq_printf(m, " debug_locks:                   %11u\n",
377			debug_locks);
378
379	/*
380	 * Zapped classes and lockdep data buffers reuse statistics.
381	 */
382	seq_puts(m, "\n");
383	seq_printf(m, " zapped classes:                %11lu\n",
384			nr_zapped_classes);
385#ifdef CONFIG_PROVE_LOCKING
386	seq_printf(m, " zapped lock chains:            %11lu\n",
387			nr_zapped_lock_chains);
388	seq_printf(m, " large chain blocks:            %11u\n",
389			nr_large_chain_blocks);
390#endif
391	return 0;
392}
393
394#ifdef CONFIG_LOCK_STAT
395
396struct lock_stat_data {
397	struct lock_class *class;
398	struct lock_class_stats stats;
399};
400
401struct lock_stat_seq {
402	struct lock_stat_data *iter_end;
403	struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
404};
405
406/*
407 * sort on absolute number of contentions
408 */
409static int lock_stat_cmp(const void *l, const void *r)
410{
411	const struct lock_stat_data *dl = l, *dr = r;
412	unsigned long nl, nr;
413
414	nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
415	nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
416
417	return nr - nl;
418}
419
420static void seq_line(struct seq_file *m, char c, int offset, int length)
421{
422	int i;
423
424	for (i = 0; i < offset; i++)
425		seq_puts(m, " ");
426	for (i = 0; i < length; i++)
427		seq_printf(m, "%c", c);
428	seq_puts(m, "\n");
429}
430
431static void snprint_time(char *buf, size_t bufsiz, s64 nr)
432{
433	s64 div;
434	s32 rem;
435
436	nr += 5; /* for display rounding */
437	div = div_s64_rem(nr, 1000, &rem);
438	snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
439}
440
441static void seq_time(struct seq_file *m, s64 time)
442{
443	char num[22];
444
445	snprint_time(num, sizeof(num), time);
446	seq_printf(m, " %14s", num);
447}
448
449static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
450{
451	seq_printf(m, "%14lu", lt->nr);
452	seq_time(m, lt->min);
453	seq_time(m, lt->max);
454	seq_time(m, lt->total);
455	seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
456}
457
458static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
459{
460	const struct lockdep_subclass_key *ckey;
461	struct lock_class_stats *stats;
462	struct lock_class *class;
463	const char *cname;
464	int i, namelen;
465	char name[39];
466
467	class = data->class;
468	stats = &data->stats;
469
470	namelen = 38;
471	if (class->name_version > 1)
472		namelen -= 2; /* XXX truncates versions > 9 */
473	if (class->subclass)
474		namelen -= 2;
475
476	rcu_read_lock_sched();
477	cname = rcu_dereference_sched(class->name);
478	ckey  = rcu_dereference_sched(class->key);
479
480	if (!cname && !ckey) {
481		rcu_read_unlock_sched();
482		return;
483
484	} else if (!cname) {
485		char str[KSYM_NAME_LEN];
486		const char *key_name;
487
488		key_name = __get_key_name(ckey, str);
489		snprintf(name, namelen, "%s", key_name);
490	} else {
491		snprintf(name, namelen, "%s", cname);
492	}
493	rcu_read_unlock_sched();
494
495	namelen = strlen(name);
496	if (class->name_version > 1) {
497		snprintf(name+namelen, 3, "#%d", class->name_version);
498		namelen += 2;
499	}
500	if (class->subclass) {
501		snprintf(name+namelen, 3, "/%d", class->subclass);
502		namelen += 2;
503	}
504
505	if (stats->write_holdtime.nr) {
506		if (stats->read_holdtime.nr)
507			seq_printf(m, "%38s-W:", name);
508		else
509			seq_printf(m, "%40s:", name);
510
511		seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
512		seq_lock_time(m, &stats->write_waittime);
513		seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
514		seq_lock_time(m, &stats->write_holdtime);
515		seq_puts(m, "\n");
516	}
517
518	if (stats->read_holdtime.nr) {
519		seq_printf(m, "%38s-R:", name);
520		seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
521		seq_lock_time(m, &stats->read_waittime);
522		seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
523		seq_lock_time(m, &stats->read_holdtime);
524		seq_puts(m, "\n");
525	}
526
527	if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
528		return;
529
530	if (stats->read_holdtime.nr)
531		namelen += 2;
532
533	for (i = 0; i < LOCKSTAT_POINTS; i++) {
534		char ip[32];
535
536		if (class->contention_point[i] == 0)
537			break;
538
539		if (!i)
540			seq_line(m, '-', 40-namelen, namelen);
541
542		snprintf(ip, sizeof(ip), "[<%p>]",
543				(void *)class->contention_point[i]);
544		seq_printf(m, "%40s %14lu %29s %pS\n",
545			   name, stats->contention_point[i],
546			   ip, (void *)class->contention_point[i]);
547	}
548	for (i = 0; i < LOCKSTAT_POINTS; i++) {
549		char ip[32];
550
551		if (class->contending_point[i] == 0)
552			break;
553
554		if (!i)
555			seq_line(m, '-', 40-namelen, namelen);
556
557		snprintf(ip, sizeof(ip), "[<%p>]",
558				(void *)class->contending_point[i]);
559		seq_printf(m, "%40s %14lu %29s %pS\n",
560			   name, stats->contending_point[i],
561			   ip, (void *)class->contending_point[i]);
562	}
563	if (i) {
564		seq_puts(m, "\n");
565		seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
566		seq_puts(m, "\n");
567	}
568}
569
570static void seq_header(struct seq_file *m)
571{
572	seq_puts(m, "lock_stat version 0.4\n");
573
574	if (unlikely(!debug_locks))
575		seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
576
577	seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
578	seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
579			"%14s %14s\n",
580			"class name",
581			"con-bounces",
582			"contentions",
583			"waittime-min",
584			"waittime-max",
585			"waittime-total",
586			"waittime-avg",
587			"acq-bounces",
588			"acquisitions",
589			"holdtime-min",
590			"holdtime-max",
591			"holdtime-total",
592			"holdtime-avg");
593	seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
594	seq_printf(m, "\n");
595}
596
597static void *ls_start(struct seq_file *m, loff_t *pos)
598{
599	struct lock_stat_seq *data = m->private;
600	struct lock_stat_data *iter;
601
602	if (*pos == 0)
603		return SEQ_START_TOKEN;
604
605	iter = data->stats + (*pos - 1);
606	if (iter >= data->iter_end)
607		iter = NULL;
608
609	return iter;
610}
611
612static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
613{
614	(*pos)++;
615	return ls_start(m, pos);
616}
617
618static void ls_stop(struct seq_file *m, void *v)
619{
620}
621
622static int ls_show(struct seq_file *m, void *v)
623{
624	if (v == SEQ_START_TOKEN)
625		seq_header(m);
626	else
627		seq_stats(m, v);
628
629	return 0;
630}
631
632static const struct seq_operations lockstat_ops = {
633	.start	= ls_start,
634	.next	= ls_next,
635	.stop	= ls_stop,
636	.show	= ls_show,
637};
638
639static int lock_stat_open(struct inode *inode, struct file *file)
640{
641	int res;
642	struct lock_class *class;
643	struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
644
645	if (!data)
646		return -ENOMEM;
647
648	res = seq_open(file, &lockstat_ops);
649	if (!res) {
650		struct lock_stat_data *iter = data->stats;
651		struct seq_file *m = file->private_data;
652		unsigned long idx;
653
654		iterate_lock_classes(idx, class) {
655			if (!test_bit(idx, lock_classes_in_use))
656				continue;
657			iter->class = class;
658			iter->stats = lock_stats(class);
659			iter++;
660		}
661
662		data->iter_end = iter;
663
664		sort(data->stats, data->iter_end - data->stats,
665				sizeof(struct lock_stat_data),
666				lock_stat_cmp, NULL);
667
668		m->private = data;
669	} else
670		vfree(data);
671
672	return res;
673}
674
675static ssize_t lock_stat_write(struct file *file, const char __user *buf,
676			       size_t count, loff_t *ppos)
677{
678	struct lock_class *class;
679	unsigned long idx;
680	char c;
681
682	if (count) {
683		if (get_user(c, buf))
684			return -EFAULT;
685
686		if (c != '0')
687			return count;
688
689		iterate_lock_classes(idx, class) {
690			if (!test_bit(idx, lock_classes_in_use))
691				continue;
692			clear_lock_stats(class);
693		}
694	}
695	return count;
696}
697
698static int lock_stat_release(struct inode *inode, struct file *file)
699{
700	struct seq_file *seq = file->private_data;
701
702	vfree(seq->private);
703	return seq_release(inode, file);
704}
705
706static const struct proc_ops lock_stat_proc_ops = {
707	.proc_open	= lock_stat_open,
708	.proc_write	= lock_stat_write,
709	.proc_read	= seq_read,
710	.proc_lseek	= seq_lseek,
711	.proc_release	= lock_stat_release,
712};
713#endif /* CONFIG_LOCK_STAT */
714
715static int __init lockdep_proc_init(void)
716{
717	proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
718#ifdef CONFIG_PROVE_LOCKING
719	proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
720#endif
721	proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
722#ifdef CONFIG_LOCK_STAT
723	proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
724#endif
725
726	return 0;
727}
728
729__initcall(lockdep_proc_init);
730
731