1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Detect hard lockups on a system using perf
4 *
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 *
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
11 */
12
13#define pr_fmt(fmt) "NMI watchdog: " fmt
14
15#include <linux/nmi.h>
16#include <linux/atomic.h>
17#include <linux/module.h>
18#include <linux/sched/debug.h>
19
20#include <asm/irq_regs.h>
21#include <linux/perf_event.h>
22
23static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
24static DEFINE_PER_CPU(struct perf_event *, dead_event);
25static struct cpumask dead_events_mask;
26
27static atomic_t watchdog_cpus = ATOMIC_INIT(0);
28
29#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
30static DEFINE_PER_CPU(ktime_t, last_timestamp);
31static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
32static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
33
34void watchdog_update_hrtimer_threshold(u64 period)
35{
36	/*
37	 * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
38	 *
39	 * So it runs effectively with 2.5 times the rate of the NMI
40	 * watchdog. That means the hrtimer should fire 2-3 times before
41	 * the NMI watchdog expires. The NMI watchdog on x86 is based on
42	 * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
43	 * might run way faster than expected and the NMI fires in a
44	 * smaller period than the one deduced from the nominal CPU
45	 * frequency. Depending on the Turbo-Mode factor this might be fast
46	 * enough to get the NMI period smaller than the hrtimer watchdog
47	 * period and trigger false positives.
48	 *
49	 * The sample threshold is used to check in the NMI handler whether
50	 * the minimum time between two NMI samples has elapsed. That
51	 * prevents false positives.
52	 *
53	 * Set this to 4/5 of the actual watchdog threshold period so the
54	 * hrtimer is guaranteed to fire at least once within the real
55	 * watchdog threshold.
56	 */
57	watchdog_hrtimer_sample_threshold = period * 2;
58}
59
60static bool watchdog_check_timestamp(void)
61{
62	ktime_t delta, now = ktime_get_mono_fast_ns();
63
64	delta = now - __this_cpu_read(last_timestamp);
65	if (delta < watchdog_hrtimer_sample_threshold) {
66		/*
67		 * If ktime is jiffies based, a stalled timer would prevent
68		 * jiffies from being incremented and the filter would look
69		 * at a stale timestamp and never trigger.
70		 */
71		if (__this_cpu_inc_return(nmi_rearmed) < 10)
72			return false;
73	}
74	__this_cpu_write(nmi_rearmed, 0);
75	__this_cpu_write(last_timestamp, now);
76	return true;
77}
78#else
79static inline bool watchdog_check_timestamp(void)
80{
81	return true;
82}
83#endif
84
85static struct perf_event_attr wd_hw_attr = {
86	.type		= PERF_TYPE_HARDWARE,
87	.config		= PERF_COUNT_HW_CPU_CYCLES,
88	.size		= sizeof(struct perf_event_attr),
89	.pinned		= 1,
90	.disabled	= 1,
91};
92
93/* Callback function for perf event subsystem */
94static void watchdog_overflow_callback(struct perf_event *event,
95				       struct perf_sample_data *data,
96				       struct pt_regs *regs)
97{
98	/* Ensure the watchdog never gets throttled */
99	event->hw.interrupts = 0;
100
101	if (!watchdog_check_timestamp())
102		return;
103
104	watchdog_hardlockup_check(smp_processor_id(), regs);
105}
106
107static int hardlockup_detector_event_create(void)
108{
109	unsigned int cpu;
110	struct perf_event_attr *wd_attr;
111	struct perf_event *evt;
112
113	/*
114	 * Preemption is not disabled because memory will be allocated.
115	 * Ensure CPU-locality by calling this in per-CPU kthread.
116	 */
117	WARN_ON(!is_percpu_thread());
118	cpu = raw_smp_processor_id();
119	wd_attr = &wd_hw_attr;
120	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
121
122	/* Try to register using hardware perf events */
123	evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
124					       watchdog_overflow_callback, NULL);
125	if (IS_ERR(evt)) {
126		pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
127			 PTR_ERR(evt));
128		return PTR_ERR(evt);
129	}
130	this_cpu_write(watchdog_ev, evt);
131	return 0;
132}
133
134/**
135 * watchdog_hardlockup_enable - Enable the local event
136 *
137 * @cpu: The CPU to enable hard lockup on.
138 */
139void watchdog_hardlockup_enable(unsigned int cpu)
140{
141	WARN_ON_ONCE(cpu != smp_processor_id());
142
143	if (hardlockup_detector_event_create())
144		return;
145
146	/* use original value for check */
147	if (!atomic_fetch_inc(&watchdog_cpus))
148		pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
149
150	perf_event_enable(this_cpu_read(watchdog_ev));
151}
152
153/**
154 * watchdog_hardlockup_disable - Disable the local event
155 *
156 * @cpu: The CPU to enable hard lockup on.
157 */
158void watchdog_hardlockup_disable(unsigned int cpu)
159{
160	struct perf_event *event = this_cpu_read(watchdog_ev);
161
162	WARN_ON_ONCE(cpu != smp_processor_id());
163
164	if (event) {
165		perf_event_disable(event);
166		this_cpu_write(watchdog_ev, NULL);
167		this_cpu_write(dead_event, event);
168		cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
169		atomic_dec(&watchdog_cpus);
170	}
171}
172
173/**
174 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
175 *
176 * Called from lockup_detector_cleanup(). Serialized by the caller.
177 */
178void hardlockup_detector_perf_cleanup(void)
179{
180	int cpu;
181
182	for_each_cpu(cpu, &dead_events_mask) {
183		struct perf_event *event = per_cpu(dead_event, cpu);
184
185		/*
186		 * Required because for_each_cpu() reports  unconditionally
187		 * CPU0 as set on UP kernels. Sigh.
188		 */
189		if (event)
190			perf_event_release_kernel(event);
191		per_cpu(dead_event, cpu) = NULL;
192	}
193	cpumask_clear(&dead_events_mask);
194}
195
196/**
197 * hardlockup_detector_perf_stop - Globally stop watchdog events
198 *
199 * Special interface for x86 to handle the perf HT bug.
200 */
201void __init hardlockup_detector_perf_stop(void)
202{
203	int cpu;
204
205	lockdep_assert_cpus_held();
206
207	for_each_online_cpu(cpu) {
208		struct perf_event *event = per_cpu(watchdog_ev, cpu);
209
210		if (event)
211			perf_event_disable(event);
212	}
213}
214
215/**
216 * hardlockup_detector_perf_restart - Globally restart watchdog events
217 *
218 * Special interface for x86 to handle the perf HT bug.
219 */
220void __init hardlockup_detector_perf_restart(void)
221{
222	int cpu;
223
224	lockdep_assert_cpus_held();
225
226	if (!(watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED))
227		return;
228
229	for_each_online_cpu(cpu) {
230		struct perf_event *event = per_cpu(watchdog_ev, cpu);
231
232		if (event)
233			perf_event_enable(event);
234	}
235}
236
237bool __weak __init arch_perf_nmi_is_available(void)
238{
239	return true;
240}
241
242/**
243 * watchdog_hardlockup_probe - Probe whether NMI event is available at all
244 */
245int __init watchdog_hardlockup_probe(void)
246{
247	int ret;
248
249	if (!arch_perf_nmi_is_available())
250		return -ENODEV;
251
252	ret = hardlockup_detector_event_create();
253
254	if (ret) {
255		pr_info("Perf NMI watchdog permanently disabled\n");
256	} else {
257		perf_event_release_kernel(this_cpu_read(watchdog_ev));
258		this_cpu_write(watchdog_ev, NULL);
259	}
260	return ret;
261}
262