1/*-
2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/random.h>
31#include <linux/vmalloc.h>
32#include <linux/hardirq.h>
33#include <linux/delay.h>
34#include <dev/mlx5/driver.h>
35#include <dev/mlx5/mlx5_ifc.h>
36#include "mlx5_core.h"
37
38#define	MLX5_HEALTH_POLL_INTERVAL	(2 * HZ)
39#define	MAX_MISSES			3
40
41enum {
42	MLX5_DROP_NEW_HEALTH_WORK,
43	MLX5_DROP_NEW_RECOVERY_WORK,
44	MLX5_DROP_NEW_WATCHDOG_WORK,
45};
46
47enum  {
48	MLX5_SENSOR_NO_ERR		= 0,
49	MLX5_SENSOR_PCI_COMM_ERR	= 1,
50	MLX5_SENSOR_PCI_ERR		= 2,
51	MLX5_SENSOR_NIC_DISABLED	= 3,
52	MLX5_SENSOR_NIC_SW_RESET	= 4,
53	MLX5_SENSOR_FW_SYND_RFR		= 5,
54};
55
56static int mlx5_fw_reset_enable = 1;
57SYSCTL_INT(_hw_mlx5, OID_AUTO, fw_reset_enable, CTLFLAG_RWTUN,
58    &mlx5_fw_reset_enable, 0,
59    "Enable firmware reset");
60
61static unsigned int sw_reset_to = 1200;
62SYSCTL_UINT(_hw_mlx5, OID_AUTO, sw_reset_timeout, CTLFLAG_RWTUN,
63    &sw_reset_to, 0,
64    "Minimum timeout in seconds between two firmware resets");
65
66
67static int lock_sem_sw_reset(struct mlx5_core_dev *dev)
68{
69	int ret;
70
71	/* Lock GW access */
72	ret = -mlx5_vsc_lock(dev);
73	if (ret) {
74		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
75		return ret;
76	}
77
78	ret = -mlx5_vsc_lock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
79	if (ret) {
80		if (ret == -EBUSY)
81			mlx5_core_dbg(dev,
82			    "SW reset FW semaphore already locked, another function will handle the reset\n");
83		else
84			mlx5_core_warn(dev,
85			    "SW reset semaphore lock return %d\n", ret);
86	}
87
88	/* Unlock GW access */
89	mlx5_vsc_unlock(dev);
90
91	return ret;
92}
93
94static int unlock_sem_sw_reset(struct mlx5_core_dev *dev)
95{
96	int ret;
97
98	/* Lock GW access */
99	ret = -mlx5_vsc_lock(dev);
100	if (ret) {
101		mlx5_core_warn(dev, "Timed out locking gateway %d\n", ret);
102		return ret;
103	}
104
105	ret = -mlx5_vsc_unlock_addr_space(dev, MLX5_SEMAPHORE_SW_RESET);
106
107	/* Unlock GW access */
108	mlx5_vsc_unlock(dev);
109
110	return ret;
111}
112
113u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
114{
115	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
116}
117
118void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
119{
120	u32 cur_cmdq_addr_l_sz;
121
122	cur_cmdq_addr_l_sz = ioread32be(&dev->iseg->cmdq_addr_l_sz);
123	iowrite32be((cur_cmdq_addr_l_sz & 0xFFFFF000) |
124		    state << MLX5_NIC_IFC_OFFSET,
125		    &dev->iseg->cmdq_addr_l_sz);
126}
127
128static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
129{
130	struct mlx5_core_health *health = &dev->priv.health;
131	struct mlx5_health_buffer __iomem *h = health->health;
132	u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
133	u8 synd = ioread8(&h->synd);
134
135	if (rfr && synd)
136		mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
137	return rfr && synd;
138}
139
140static void mlx5_trigger_cmd_completions(struct work_struct *work)
141{
142	struct mlx5_core_dev *dev =
143	    container_of(work, struct mlx5_core_dev, priv.health.work_cmd_completion);
144	unsigned long flags;
145	u64 vector;
146
147	/* wait for pending handlers to complete */
148	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
149	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
150	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
151	if (!vector)
152		goto no_trig;
153
154	vector |= MLX5_TRIGGERED_CMD_COMP;
155	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
156
157	mlx5_core_dbg(dev, "vector 0x%jx\n", (uintmax_t)vector);
158	mlx5_cmd_comp_handler(dev, vector, MLX5_CMD_MODE_EVENTS);
159	return;
160
161no_trig:
162	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
163}
164
165static bool sensor_pci_no_comm(struct mlx5_core_dev *dev)
166{
167	struct mlx5_core_health *health = &dev->priv.health;
168	struct mlx5_health_buffer __iomem *h = health->health;
169	bool err = ioread32be(&h->fw_ver) == 0xffffffff;
170
171	return err;
172}
173
174static bool sensor_nic_disabled(struct mlx5_core_dev *dev)
175{
176	return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED;
177}
178
179static bool sensor_nic_sw_reset(struct mlx5_core_dev *dev)
180{
181	return mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET;
182}
183
184static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
185{
186	if (sensor_pci_no_comm(dev))
187		return MLX5_SENSOR_PCI_COMM_ERR;
188	if (pci_channel_offline(dev->pdev))
189		return MLX5_SENSOR_PCI_ERR;
190	if (sensor_nic_disabled(dev))
191		return MLX5_SENSOR_NIC_DISABLED;
192	if (sensor_nic_sw_reset(dev))
193		return MLX5_SENSOR_NIC_SW_RESET;
194	if (sensor_fw_synd_rfr(dev))
195		return MLX5_SENSOR_FW_SYND_RFR;
196
197	return MLX5_SENSOR_NO_ERR;
198}
199
200static void reset_fw_if_needed(struct mlx5_core_dev *dev)
201{
202	bool supported;
203	u32 cmdq_addr, fatal_error;
204
205	if (!mlx5_fw_reset_enable)
206		return;
207	supported = (ioread32be(&dev->iseg->initializing) >>
208	    MLX5_FW_RESET_SUPPORTED_OFFSET) & 1;
209	if (!supported)
210		return;
211
212	/* The reset only needs to be issued by one PF. The health buffer is
213	 * shared between all functions, and will be cleared during a reset.
214	 * Check again to avoid a redundant 2nd reset. If the fatal erros was
215	 * PCI related a reset won't help.
216	 */
217	fatal_error = check_fatal_sensors(dev);
218	if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
219	    fatal_error == MLX5_SENSOR_NIC_DISABLED ||
220	    fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
221		mlx5_core_warn(dev,
222		    "Not issuing FW reset. Either it's already done or won't help.\n");
223		return;
224	}
225
226	mlx5_core_info(dev, "Issuing FW Reset\n");
227	/* Write the NIC interface field to initiate the reset, the command
228	 * interface address also resides here, don't overwrite it.
229	 */
230	cmdq_addr = ioread32be(&dev->iseg->cmdq_addr_l_sz);
231	iowrite32be((cmdq_addr & 0xFFFFF000) |
232		    MLX5_NIC_IFC_SW_RESET << MLX5_NIC_IFC_OFFSET,
233		    &dev->iseg->cmdq_addr_l_sz);
234}
235
236static bool
237mlx5_health_allow_reset(struct mlx5_core_dev *dev)
238{
239	struct mlx5_core_health *health = &dev->priv.health;
240	unsigned int delta;
241	bool ret;
242
243	if (health->last_reset_req != 0) {
244		delta = ticks - health->last_reset_req;
245		delta /= hz;
246		ret = delta >= sw_reset_to;
247	} else {
248		ret = true;
249	}
250
251	/*
252	 * In principle, ticks may be 0. Setting it to off by one (-1)
253	 * to prevent certain reset in next request.
254	 */
255	health->last_reset_req = ticks ? : -1;
256	if (!ret)
257		mlx5_core_warn(dev,
258		    "Firmware reset elided due to auto-reset frequency threshold.\n");
259	return (ret);
260}
261
262#define MLX5_CRDUMP_WAIT_MS	60000
263#define MLX5_FW_RESET_WAIT_MS	1000
264#define MLX5_NIC_STATE_POLL_MS	5
265void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
266{
267	int end, delay_ms = MLX5_CRDUMP_WAIT_MS;
268	u32 fatal_error;
269	int lock = -EBUSY;
270
271	fatal_error = check_fatal_sensors(dev);
272
273	if (fatal_error || force) {
274		if (xchg(&dev->state, MLX5_DEVICE_STATE_INTERNAL_ERROR) ==
275		    MLX5_DEVICE_STATE_INTERNAL_ERROR)
276			return;
277		if (!force)
278			mlx5_core_err(dev, "internal state error detected\n");
279
280		/*
281		 * Queue the command completion handler on the command
282		 * work queue to avoid racing with the real command
283		 * completion handler and then wait for it to
284		 * complete:
285		 */
286		queue_work(dev->priv.health.wq_cmd, &dev->priv.health.work_cmd_completion);
287		flush_workqueue(dev->priv.health.wq_cmd);
288	}
289
290	mutex_lock(&dev->intf_state_mutex);
291
292	if (force)
293		goto err_state_done;
294
295	if (fatal_error == MLX5_SENSOR_FW_SYND_RFR &&
296	    mlx5_health_allow_reset(dev)) {
297		/* Get cr-dump and reset FW semaphore */
298		if (mlx5_core_is_pf(dev))
299			lock = lock_sem_sw_reset(dev);
300
301		/* Execute cr-dump and SW reset */
302		if (lock != -EBUSY) {
303			(void)mlx5_fwdump(dev);
304			reset_fw_if_needed(dev);
305			delay_ms = MLX5_FW_RESET_WAIT_MS;
306		}
307	}
308
309	/* Recover from SW reset */
310	end = jiffies + msecs_to_jiffies(delay_ms);
311	do {
312		if (sensor_nic_disabled(dev))
313			break;
314
315		msleep(MLX5_NIC_STATE_POLL_MS);
316	} while (!time_after(jiffies, end));
317
318	if (!sensor_nic_disabled(dev)) {
319		mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
320			mlx5_get_nic_state(dev), delay_ms);
321	}
322
323	/* Release FW semaphore if you are the lock owner */
324	if (!lock)
325		unlock_sem_sw_reset(dev);
326
327	mlx5_core_info(dev, "System error event triggered\n");
328
329err_state_done:
330	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
331	mutex_unlock(&dev->intf_state_mutex);
332}
333
334static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
335{
336	u8 nic_mode = mlx5_get_nic_state(dev);
337
338	if (nic_mode == MLX5_NIC_IFC_SW_RESET) {
339		/* The IFC mode field is 3 bits, so it will read 0x7 in two cases:
340		 * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
341		 *    and this is a VF), this is not recoverable by SW reset.
342		 *    Logging of this is handled elsewhere.
343		 * 2. FW reset has been issued by another function, driver can
344		 *    be reloaded to recover after the mode switches to
345		 *    MLX5_NIC_IFC_DISABLED.
346		 */
347		if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
348			mlx5_core_warn(dev,
349			    "NIC SW reset is already progress\n");
350		else
351			mlx5_core_warn(dev,
352			    "Communication with FW over the PCI link is down\n");
353	} else {
354		mlx5_core_warn(dev, "NIC mode %d\n", nic_mode);
355	}
356
357	mlx5_disable_device(dev);
358}
359
360#define MLX5_FW_RESET_WAIT_MS	1000
361#define MLX5_NIC_STATE_POLL_MS	5
362static void health_recover(struct work_struct *work)
363{
364	unsigned long end = jiffies + msecs_to_jiffies(MLX5_FW_RESET_WAIT_MS);
365	struct mlx5_core_health *health;
366	struct delayed_work *dwork;
367	struct mlx5_core_dev *dev;
368	struct mlx5_priv *priv;
369	bool recover = true;
370	u8 nic_mode;
371
372	dwork = container_of(work, struct delayed_work, work);
373	health = container_of(dwork, struct mlx5_core_health, recover_work);
374	priv = container_of(health, struct mlx5_priv, health);
375	dev = container_of(priv, struct mlx5_core_dev, priv);
376
377	mtx_lock(&Giant);	/* XXX newbus needs this */
378
379	if (sensor_pci_no_comm(dev)) {
380		mlx5_core_err(dev,
381		    "health recovery flow aborted, PCI reads still not working\n");
382		recover = false;
383	}
384
385	nic_mode = mlx5_get_nic_state(dev);
386	while (nic_mode != MLX5_NIC_IFC_DISABLED &&
387	       !time_after(jiffies, end)) {
388		msleep(MLX5_NIC_STATE_POLL_MS);
389		nic_mode = mlx5_get_nic_state(dev);
390	}
391
392	if (nic_mode != MLX5_NIC_IFC_DISABLED) {
393		mlx5_core_err(dev,
394		    "health recovery flow aborted, unexpected NIC IFC mode %d.\n",
395		    nic_mode);
396		recover = false;
397	}
398
399	if (recover) {
400		mlx5_core_info(dev, "Starting health recovery flow\n");
401		mlx5_recover_device(dev);
402	}
403
404	mtx_unlock(&Giant);
405}
406
407/* How much time to wait until health resetting the driver (in msecs) */
408#define MLX5_RECOVERY_DELAY_MSECS 60000
409#define MLX5_RECOVERY_NO_DELAY 0
410static unsigned long get_recovery_delay(struct mlx5_core_dev *dev)
411{
412	return dev->priv.health.fatal_error == MLX5_SENSOR_PCI_ERR ||
413		dev->priv.health.fatal_error == MLX5_SENSOR_PCI_COMM_ERR	?
414		MLX5_RECOVERY_DELAY_MSECS : MLX5_RECOVERY_NO_DELAY;
415}
416
417static void health_care(struct work_struct *work)
418{
419	struct mlx5_core_health *health;
420	unsigned long recover_delay;
421	struct mlx5_core_dev *dev;
422	struct mlx5_priv *priv;
423	unsigned long flags;
424
425	health = container_of(work, struct mlx5_core_health, work);
426	priv = container_of(health, struct mlx5_priv, health);
427	dev = container_of(priv, struct mlx5_core_dev, priv);
428
429	mlx5_core_warn(dev, "handling bad device here\n");
430	mlx5_handle_bad_state(dev);
431	recover_delay = msecs_to_jiffies(get_recovery_delay(dev));
432
433	spin_lock_irqsave(&health->wq_lock, flags);
434	if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags)) {
435		mlx5_core_warn(dev,
436		    "Scheduling recovery work with %lums delay\n",
437		    recover_delay);
438		schedule_delayed_work(&health->recover_work, recover_delay);
439	} else {
440		mlx5_core_err(dev,
441		    "new health works are not permitted at this stage\n");
442	}
443	spin_unlock_irqrestore(&health->wq_lock, flags);
444}
445
446static int get_next_poll_jiffies(void)
447{
448	unsigned long next;
449
450	get_random_bytes(&next, sizeof(next));
451	next %= HZ;
452	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
453
454	return next;
455}
456
457void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
458{
459	struct mlx5_core_health *health = &dev->priv.health;
460	unsigned long flags;
461
462	spin_lock_irqsave(&health->wq_lock, flags);
463	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
464		queue_work(health->wq, &health->work);
465	else
466		mlx5_core_err(dev,
467			"new health works are not permitted at this stage\n");
468	spin_unlock_irqrestore(&health->wq_lock, flags);
469}
470
471static const char *hsynd_str(u8 synd)
472{
473	switch (synd) {
474	case MLX5_HEALTH_SYNDR_FW_ERR:
475		return "firmware internal error";
476	case MLX5_HEALTH_SYNDR_IRISC_ERR:
477		return "irisc not responding";
478	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
479		return "unrecoverable hardware error";
480	case MLX5_HEALTH_SYNDR_CRC_ERR:
481		return "firmware CRC error";
482	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
483		return "ICM fetch PCI error";
484	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
485		return "HW fatal error\n";
486	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
487		return "async EQ buffer overrun";
488	case MLX5_HEALTH_SYNDR_EQ_ERR:
489		return "EQ error";
490	case MLX5_HEALTH_SYNDR_EQ_INV:
491		return "Invalid EQ referenced";
492	case MLX5_HEALTH_SYNDR_FFSER_ERR:
493		return "FFSER error";
494	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
495		return "High temperature";
496	default:
497		return "unrecognized error";
498	}
499}
500
501static u8
502print_health_info(struct mlx5_core_dev *dev)
503{
504	struct mlx5_core_health *health = &dev->priv.health;
505	struct mlx5_health_buffer __iomem *h = health->health;
506	u8 synd = ioread8(&h->synd);
507	char fw_str[18];
508	u32 fw;
509	int i;
510
511	/*
512	 * If synd is 0x0 - this indicates that FW is unable to
513	 * respond to initialization segment reads and health buffer
514	 * should not be read.
515	 */
516	if (synd == 0)
517		return (0);
518
519	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
520		mlx5_core_info(dev, "assert_var[%d] 0x%08x\n", i,
521		    ioread32be(h->assert_var + i));
522
523	mlx5_core_info(dev, "assert_exit_ptr 0x%08x\n",
524	    ioread32be(&h->assert_exit_ptr));
525	mlx5_core_info(dev, "assert_callra 0x%08x\n",
526	    ioread32be(&h->assert_callra));
527	snprintf(fw_str, sizeof(fw_str), "%d.%d.%d",
528	    fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
529	mlx5_core_info(dev, "fw_ver %s\n", fw_str);
530	mlx5_core_info(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
531	mlx5_core_info(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
532	mlx5_core_info(dev, "synd 0x%x: %s\n",
533	    ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
534	mlx5_core_info(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
535	fw = ioread32be(&h->fw_ver);
536	mlx5_core_info(dev, "raw fw_ver 0x%08x\n", fw);
537
538	return synd;
539}
540
541static void health_watchdog(struct work_struct *work)
542{
543	struct mlx5_core_dev *dev;
544	u16 power;
545	u8 status;
546	int err;
547
548	dev = container_of(work, struct mlx5_core_dev, priv.health.work_watchdog);
549
550	if (!MLX5_CAP_GEN(dev, mcam_reg) ||
551	    !MLX5_CAP_MCAM_FEATURE(dev, pcie_status_and_power))
552		return;
553
554	err = mlx5_pci_read_power_status(dev, &power, &status);
555	if (err < 0) {
556		mlx5_core_warn(dev, "Failed reading power status: %d\n",
557		    err);
558		return;
559	}
560
561	dev->pwr_value = power;
562
563	if (dev->pwr_status != status) {
564
565		switch (status) {
566		case 0:
567			dev->pwr_status = status;
568			mlx5_core_info(dev,
569			    "PCI power is not published by the PCIe slot.\n");
570			break;
571		case 1:
572			dev->pwr_status = status;
573			mlx5_core_info(dev,
574			    "PCIe slot advertised sufficient power (%uW).\n",
575			    power);
576			break;
577		case 2:
578			dev->pwr_status = status;
579			mlx5_core_warn(dev,
580			    "Detected insufficient power on the PCIe slot (%uW).\n",
581			    power);
582			break;
583		default:
584			dev->pwr_status = 0;
585			mlx5_core_warn(dev,
586			    "Unknown power state detected(%d).\n",
587			    status);
588			break;
589		}
590	}
591}
592
593void
594mlx5_trigger_health_watchdog(struct mlx5_core_dev *dev)
595{
596	struct mlx5_core_health *health = &dev->priv.health;
597	unsigned long flags;
598
599	spin_lock_irqsave(&health->wq_lock, flags);
600	if (!test_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags))
601		queue_work(health->wq_watchdog, &health->work_watchdog);
602	else
603		mlx5_core_err(dev,
604		    "scheduling watchdog is not permitted at this stage\n");
605	spin_unlock_irqrestore(&health->wq_lock, flags);
606}
607
608static void poll_health(unsigned long data)
609{
610	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
611	struct mlx5_core_health *health = &dev->priv.health;
612	u32 fatal_error;
613	u32 count;
614
615	if (dev->state != MLX5_DEVICE_STATE_UP)
616		return;
617
618	count = ioread32be(health->health_counter);
619	if (count == health->prev)
620		++health->miss_counter;
621	else
622		health->miss_counter = 0;
623
624	health->prev = count;
625	if (health->miss_counter == MAX_MISSES) {
626		mlx5_core_err(dev, "device's health compromised - reached miss count\n");
627		if (print_health_info(dev) == 0)
628			mlx5_core_err(dev, "FW is unable to respond to initialization segment reads\n");
629	}
630
631	fatal_error = check_fatal_sensors(dev);
632
633	if (fatal_error && !health->fatal_error) {
634		mlx5_core_err(dev,
635		    "Fatal error %u detected\n", fatal_error);
636		dev->priv.health.fatal_error = fatal_error;
637		print_health_info(dev);
638		mlx5_trigger_health_work(dev);
639	}
640
641	mod_timer(&health->timer, get_next_poll_jiffies());
642}
643
644void mlx5_start_health_poll(struct mlx5_core_dev *dev)
645{
646	struct mlx5_core_health *health = &dev->priv.health;
647
648	init_timer(&health->timer);
649	health->fatal_error = MLX5_SENSOR_NO_ERR;
650	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
651	clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
652	clear_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
653	health->health = &dev->iseg->health;
654	health->health_counter = &dev->iseg->health_counter;
655
656	setup_timer(&health->timer, poll_health, (unsigned long)dev);
657	mod_timer(&health->timer,
658		  round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
659
660	/* do initial PCI power state readout */
661	mlx5_trigger_health_watchdog(dev);
662}
663
664void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
665{
666	struct mlx5_core_health *health = &dev->priv.health;
667	unsigned long flags;
668
669	if (disable_health) {
670		spin_lock_irqsave(&health->wq_lock, flags);
671		set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
672		set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
673		set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
674		spin_unlock_irqrestore(&health->wq_lock, flags);
675	}
676
677	del_timer_sync(&health->timer);
678}
679
680void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
681{
682	struct mlx5_core_health *health = &dev->priv.health;
683	unsigned long flags;
684
685	spin_lock_irqsave(&health->wq_lock, flags);
686	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
687	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
688	set_bit(MLX5_DROP_NEW_WATCHDOG_WORK, &health->flags);
689	spin_unlock_irqrestore(&health->wq_lock, flags);
690	cancel_delayed_work_sync(&health->recover_work);
691	cancel_work_sync(&health->work);
692	cancel_work_sync(&health->work_watchdog);
693}
694
695void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
696{
697	struct mlx5_core_health *health = &dev->priv.health;
698	unsigned long flags;
699
700	spin_lock_irqsave(&health->wq_lock, flags);
701	set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
702	spin_unlock_irqrestore(&health->wq_lock, flags);
703	cancel_delayed_work_sync(&dev->priv.health.recover_work);
704}
705
706void mlx5_health_cleanup(struct mlx5_core_dev *dev)
707{
708	struct mlx5_core_health *health = &dev->priv.health;
709
710	destroy_workqueue(health->wq);
711	destroy_workqueue(health->wq_watchdog);
712	destroy_workqueue(health->wq_cmd);
713}
714
715int mlx5_health_init(struct mlx5_core_dev *dev)
716{
717	struct mlx5_core_health *health;
718	char name[64];
719
720	health = &dev->priv.health;
721
722	snprintf(name, sizeof(name), "%s-rec", dev_name(&dev->pdev->dev));
723	health->wq = create_singlethread_workqueue(name);
724	if (!health->wq)
725		goto err_recovery;
726
727	snprintf(name, sizeof(name), "%s-wdg", dev_name(&dev->pdev->dev));
728	health->wq_watchdog = create_singlethread_workqueue(name);
729	if (!health->wq_watchdog)
730		goto err_watchdog;
731
732	snprintf(name, sizeof(name), "%s-cmd", dev_name(&dev->pdev->dev));
733	health->wq_cmd = create_singlethread_workqueue(name);
734	if (!health->wq_cmd)
735		goto err_cmd;
736
737	spin_lock_init(&health->wq_lock);
738	INIT_WORK(&health->work, health_care);
739	INIT_WORK(&health->work_watchdog, health_watchdog);
740	INIT_WORK(&health->work_cmd_completion, mlx5_trigger_cmd_completions);
741	INIT_DELAYED_WORK(&health->recover_work, health_recover);
742
743	return 0;
744
745err_cmd:
746	destroy_workqueue(health->wq_watchdog);
747err_watchdog:
748	destroy_workqueue(health->wq);
749err_recovery:
750	return -ENOMEM;
751}
752