1// SPDX-License-Identifier: GPL-2.0
2/*
3 * PCI Express Downstream Port Containment services driver
4 * Author: Keith Busch <keith.busch@intel.com>
5 *
6 * Copyright (C) 2016 Intel Corp.
7 */
8
9#define dev_fmt(fmt) "DPC: " fmt
10
11#include <linux/aer.h>
12#include <linux/bitfield.h>
13#include <linux/delay.h>
14#include <linux/interrupt.h>
15#include <linux/init.h>
16#include <linux/pci.h>
17
18#include "portdrv.h"
19#include "../pci.h"
20
21#define PCI_EXP_DPC_CTL_EN_MASK	(PCI_EXP_DPC_CTL_EN_FATAL | \
22				 PCI_EXP_DPC_CTL_EN_NONFATAL)
23
24static const char * const rp_pio_error_string[] = {
25	"Configuration Request received UR Completion",	 /* Bit Position 0  */
26	"Configuration Request received CA Completion",	 /* Bit Position 1  */
27	"Configuration Request Completion Timeout",	 /* Bit Position 2  */
28	NULL,
29	NULL,
30	NULL,
31	NULL,
32	NULL,
33	"I/O Request received UR Completion",		 /* Bit Position 8  */
34	"I/O Request received CA Completion",		 /* Bit Position 9  */
35	"I/O Request Completion Timeout",		 /* Bit Position 10 */
36	NULL,
37	NULL,
38	NULL,
39	NULL,
40	NULL,
41	"Memory Request received UR Completion",	 /* Bit Position 16 */
42	"Memory Request received CA Completion",	 /* Bit Position 17 */
43	"Memory Request Completion Timeout",		 /* Bit Position 18 */
44};
45
46void pci_save_dpc_state(struct pci_dev *dev)
47{
48	struct pci_cap_saved_state *save_state;
49	u16 *cap;
50
51	if (!pci_is_pcie(dev))
52		return;
53
54	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
55	if (!save_state)
56		return;
57
58	cap = (u16 *)&save_state->cap.data[0];
59	pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
60}
61
62void pci_restore_dpc_state(struct pci_dev *dev)
63{
64	struct pci_cap_saved_state *save_state;
65	u16 *cap;
66
67	if (!pci_is_pcie(dev))
68		return;
69
70	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
71	if (!save_state)
72		return;
73
74	cap = (u16 *)&save_state->cap.data[0];
75	pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
76}
77
78static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
79
80#ifdef CONFIG_HOTPLUG_PCI_PCIE
81static bool dpc_completed(struct pci_dev *pdev)
82{
83	u16 status;
84
85	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
86	if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
87		return false;
88
89	if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
90		return false;
91
92	return true;
93}
94
95/**
96 * pci_dpc_recovered - whether DPC triggered and has recovered successfully
97 * @pdev: PCI device
98 *
99 * Return true if DPC was triggered for @pdev and has recovered successfully.
100 * Wait for recovery if it hasn't completed yet.  Called from the PCIe hotplug
101 * driver to recognize and ignore Link Down/Up events caused by DPC.
102 */
103bool pci_dpc_recovered(struct pci_dev *pdev)
104{
105	struct pci_host_bridge *host;
106
107	if (!pdev->dpc_cap)
108		return false;
109
110	/*
111	 * Synchronization between hotplug and DPC is not supported
112	 * if DPC is owned by firmware and EDR is not enabled.
113	 */
114	host = pci_find_host_bridge(pdev->bus);
115	if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
116		return false;
117
118	/*
119	 * Need a timeout in case DPC never completes due to failure of
120	 * dpc_wait_rp_inactive().  The spec doesn't mandate a time limit,
121	 * but reports indicate that DPC completes within 4 seconds.
122	 */
123	wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
124			   msecs_to_jiffies(4000));
125
126	return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
127}
128#endif /* CONFIG_HOTPLUG_PCI_PCIE */
129
130static int dpc_wait_rp_inactive(struct pci_dev *pdev)
131{
132	unsigned long timeout = jiffies + HZ;
133	u16 cap = pdev->dpc_cap, status;
134
135	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
136	while (status & PCI_EXP_DPC_RP_BUSY &&
137					!time_after(jiffies, timeout)) {
138		msleep(10);
139		pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
140	}
141	if (status & PCI_EXP_DPC_RP_BUSY) {
142		pci_warn(pdev, "root port still busy\n");
143		return -EBUSY;
144	}
145	return 0;
146}
147
148pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
149{
150	pci_ers_result_t ret;
151	u16 cap;
152
153	set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
154
155	/*
156	 * DPC disables the Link automatically in hardware, so it has
157	 * already been reset by the time we get here.
158	 */
159	cap = pdev->dpc_cap;
160
161	/*
162	 * Wait until the Link is inactive, then clear DPC Trigger Status
163	 * to allow the Port to leave DPC.
164	 */
165	if (!pcie_wait_for_link(pdev, false))
166		pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
167
168	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
169		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
170		ret = PCI_ERS_RESULT_DISCONNECT;
171		goto out;
172	}
173
174	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
175			      PCI_EXP_DPC_STATUS_TRIGGER);
176
177	if (pci_bridge_wait_for_secondary_bus(pdev, "DPC")) {
178		clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
179		ret = PCI_ERS_RESULT_DISCONNECT;
180	} else {
181		set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
182		ret = PCI_ERS_RESULT_RECOVERED;
183	}
184out:
185	clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
186	wake_up_all(&dpc_completed_waitqueue);
187	return ret;
188}
189
190static void dpc_process_rp_pio_error(struct pci_dev *pdev)
191{
192	u16 cap = pdev->dpc_cap, dpc_status, first_error;
193	u32 status, mask, sev, syserr, exc, log, prefix;
194	struct pcie_tlp_log tlp_log;
195	int i;
196
197	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
198	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
199	pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
200		status, mask);
201
202	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
203	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
204	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
205	pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
206		sev, syserr, exc);
207
208	/* Get First Error Pointer */
209	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
210	first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
211
212	for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
213		if ((status & ~mask) & (1 << i))
214			pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
215				first_error == i ? " (First)" : "");
216	}
217
218	if (pdev->dpc_rp_log_size < 4)
219		goto clear_status;
220	pcie_read_tlp_log(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG, &tlp_log);
221	pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
222		tlp_log.dw[0], tlp_log.dw[1], tlp_log.dw[2], tlp_log.dw[3]);
223
224	if (pdev->dpc_rp_log_size < 5)
225		goto clear_status;
226	pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
227	pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
228
229	for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
230		pci_read_config_dword(pdev,
231			cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
232		pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
233	}
234 clear_status:
235	pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
236}
237
238static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
239					  struct aer_err_info *info)
240{
241	int pos = dev->aer_cap;
242	u32 status, mask, sev;
243
244	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
245	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
246	status &= ~mask;
247	if (!status)
248		return 0;
249
250	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
251	status &= sev;
252	if (status)
253		info->severity = AER_FATAL;
254	else
255		info->severity = AER_NONFATAL;
256
257	return 1;
258}
259
260void dpc_process_error(struct pci_dev *pdev)
261{
262	u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
263	struct aer_err_info info;
264
265	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
266	pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
267
268	pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
269		 status, source);
270
271	reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN;
272	ext_reason = status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT;
273	pci_warn(pdev, "%s detected\n",
274		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR) ?
275		 "unmasked uncorrectable error" :
276		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_NFE) ?
277		 "ERR_NONFATAL" :
278		 (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_FE) ?
279		 "ERR_FATAL" :
280		 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO) ?
281		 "RP PIO error" :
282		 (ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_SW_TRIGGER) ?
283		 "software trigger" :
284		 "reserved error");
285
286	/* show RP PIO error detail information */
287	if (pdev->dpc_rp_extensions &&
288	    reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_IN_EXT &&
289	    ext_reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_RP_PIO)
290		dpc_process_rp_pio_error(pdev);
291	else if (reason == PCI_EXP_DPC_STATUS_TRIGGER_RSN_UNCOR &&
292		 dpc_get_aer_uncorrect_severity(pdev, &info) &&
293		 aer_get_device_error_info(pdev, &info)) {
294		aer_print_error(pdev, &info);
295		pci_aer_clear_nonfatal_status(pdev);
296		pci_aer_clear_fatal_status(pdev);
297	}
298}
299
300static void pci_clear_surpdn_errors(struct pci_dev *pdev)
301{
302	if (pdev->dpc_rp_extensions)
303		pci_write_config_dword(pdev, pdev->dpc_cap +
304				       PCI_EXP_DPC_RP_PIO_STATUS, ~0);
305
306	/*
307	 * In practice, Surprise Down errors have been observed to also set
308	 * error bits in the Status Register as well as the Fatal Error
309	 * Detected bit in the Device Status Register.
310	 */
311	pci_write_config_word(pdev, PCI_STATUS, 0xffff);
312
313	pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_FED);
314}
315
316static void dpc_handle_surprise_removal(struct pci_dev *pdev)
317{
318	if (!pcie_wait_for_link(pdev, false)) {
319		pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
320		goto out;
321	}
322
323	if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
324		goto out;
325
326	pci_aer_raw_clear_status(pdev);
327	pci_clear_surpdn_errors(pdev);
328
329	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS,
330			      PCI_EXP_DPC_STATUS_TRIGGER);
331
332out:
333	clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
334	wake_up_all(&dpc_completed_waitqueue);
335}
336
337static bool dpc_is_surprise_removal(struct pci_dev *pdev)
338{
339	u16 status;
340
341	if (!pdev->is_hotplug_bridge)
342		return false;
343
344	if (pci_read_config_word(pdev, pdev->aer_cap + PCI_ERR_UNCOR_STATUS,
345				 &status))
346		return false;
347
348	return status & PCI_ERR_UNC_SURPDN;
349}
350
351static irqreturn_t dpc_handler(int irq, void *context)
352{
353	struct pci_dev *pdev = context;
354
355	/*
356	 * According to PCIe r6.0 sec 6.7.6, errors are an expected side effect
357	 * of async removal and should be ignored by software.
358	 */
359	if (dpc_is_surprise_removal(pdev)) {
360		dpc_handle_surprise_removal(pdev);
361		return IRQ_HANDLED;
362	}
363
364	dpc_process_error(pdev);
365
366	/* We configure DPC so it only triggers on ERR_FATAL */
367	pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
368
369	return IRQ_HANDLED;
370}
371
372static irqreturn_t dpc_irq(int irq, void *context)
373{
374	struct pci_dev *pdev = context;
375	u16 cap = pdev->dpc_cap, status;
376
377	pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
378
379	if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
380		return IRQ_NONE;
381
382	pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
383			      PCI_EXP_DPC_STATUS_INTERRUPT);
384	if (status & PCI_EXP_DPC_STATUS_TRIGGER)
385		return IRQ_WAKE_THREAD;
386	return IRQ_HANDLED;
387}
388
389void pci_dpc_init(struct pci_dev *pdev)
390{
391	u16 cap;
392
393	pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
394	if (!pdev->dpc_cap)
395		return;
396
397	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
398	if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
399		return;
400
401	pdev->dpc_rp_extensions = true;
402
403	/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
404	if (!pdev->dpc_rp_log_size) {
405		pdev->dpc_rp_log_size =
406				FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
407		if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
408			pci_err(pdev, "RP PIO log size %u is invalid\n",
409				pdev->dpc_rp_log_size);
410			pdev->dpc_rp_log_size = 0;
411		}
412	}
413}
414
415#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
416static int dpc_probe(struct pcie_device *dev)
417{
418	struct pci_dev *pdev = dev->port;
419	struct device *device = &dev->device;
420	int status;
421	u16 ctl, cap;
422
423	if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
424		return -ENOTSUPP;
425
426	status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
427					   dpc_handler, IRQF_SHARED,
428					   "pcie-dpc", pdev);
429	if (status) {
430		pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
431			 status);
432		return status;
433	}
434
435	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
436
437	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
438	ctl &= ~PCI_EXP_DPC_CTL_EN_MASK;
439	ctl |= PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
440	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
441
442	pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
443	pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
444		 cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
445		 FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
446		 FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
447		 FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
448
449	pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
450	return status;
451}
452
453static void dpc_remove(struct pcie_device *dev)
454{
455	struct pci_dev *pdev = dev->port;
456	u16 ctl;
457
458	pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
459	ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
460	pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
461}
462
463static struct pcie_port_service_driver dpcdriver = {
464	.name		= "dpc",
465	.port_type	= PCIE_ANY_PORT,
466	.service	= PCIE_PORT_SERVICE_DPC,
467	.probe		= dpc_probe,
468	.remove		= dpc_remove,
469};
470
471int __init pcie_dpc_init(void)
472{
473	return pcie_port_service_register(&dpcdriver);
474}
475