1// SPDX-License-Identifier: GPL-2.0-only
2/* Atlantic Network Driver
3 *
4 * Copyright (C) 2014-2019 aQuantia Corporation
5 * Copyright (C) 2019-2020 Marvell International Ltd.
6 */
7
8/* File aq_pci_func.c: Definition of PCI functions. */
9
10#include <linux/interrupt.h>
11#include <linux/module.h>
12
13#include "aq_main.h"
14#include "aq_nic.h"
15#include "aq_vec.h"
16#include "aq_hw.h"
17#include "aq_pci_func.h"
18#include "hw_atl/hw_atl_a0.h"
19#include "hw_atl/hw_atl_b0.h"
20#include "hw_atl2/hw_atl2.h"
21#include "aq_filters.h"
22#include "aq_drvinfo.h"
23#include "aq_macsec.h"
24
25static const struct pci_device_id aq_pci_tbl[] = {
26	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
27	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
28	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
29	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
30	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
31
32	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
33	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
34	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
35	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
36	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
37	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
38
39	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
40	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
41	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
42	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
43	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
44	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
45
46	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
47	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
48	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
49	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
50	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
51	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
52	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
53	{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
54
55	{}
56};
57
58static const struct aq_board_revision_s hw_atl_boards[] = {
59	{ AQ_DEVICE_ID_0001,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
60	{ AQ_DEVICE_ID_D100,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
61	{ AQ_DEVICE_ID_D107,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
62	{ AQ_DEVICE_ID_D108,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
63	{ AQ_DEVICE_ID_D109,	AQ_HWREV_1,	&hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
64
65	{ AQ_DEVICE_ID_0001,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
66	{ AQ_DEVICE_ID_D100,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
67	{ AQ_DEVICE_ID_D107,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
68	{ AQ_DEVICE_ID_D108,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
69	{ AQ_DEVICE_ID_D109,	AQ_HWREV_2,	&hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
70
71	{ AQ_DEVICE_ID_AQC100,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
72	{ AQ_DEVICE_ID_AQC107,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
73	{ AQ_DEVICE_ID_AQC108,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
74	{ AQ_DEVICE_ID_AQC109,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
75	{ AQ_DEVICE_ID_AQC111,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
76	{ AQ_DEVICE_ID_AQC112,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
77
78	{ AQ_DEVICE_ID_AQC100S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
79	{ AQ_DEVICE_ID_AQC107S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
80	{ AQ_DEVICE_ID_AQC108S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
81	{ AQ_DEVICE_ID_AQC109S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
82	{ AQ_DEVICE_ID_AQC111S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
83	{ AQ_DEVICE_ID_AQC112S,	AQ_HWREV_ANY,	&hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
84
85	{ AQ_DEVICE_ID_AQC113DEV,	AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
86	{ AQ_DEVICE_ID_AQC113,		AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
87	{ AQ_DEVICE_ID_AQC113CS,	AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
88	{ AQ_DEVICE_ID_AQC114CS,	AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
89	{ AQ_DEVICE_ID_AQC113C,		AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
90	{ AQ_DEVICE_ID_AQC115C,		AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc115c, },
91	{ AQ_DEVICE_ID_AQC113CA,	AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc113, },
92	{ AQ_DEVICE_ID_AQC116C,		AQ_HWREV_ANY,	&hw_atl2_ops, &hw_atl2_caps_aqc116c, },
93
94};
95
96MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
97
98static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
99				     const struct aq_hw_ops **ops,
100				     const struct aq_hw_caps_s **caps)
101{
102	int i;
103
104	if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
105		return -EINVAL;
106
107	for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
108		if (hw_atl_boards[i].devid == pdev->device &&
109		    (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
110		     hw_atl_boards[i].revision == pdev->revision)) {
111			*ops = hw_atl_boards[i].ops;
112			*caps = hw_atl_boards[i].caps;
113			break;
114		}
115	}
116
117	if (i == ARRAY_SIZE(hw_atl_boards))
118		return -EINVAL;
119
120	return 0;
121}
122
123static int aq_pci_func_init(struct pci_dev *pdev)
124{
125	int err;
126
127	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
128	if (err)
129		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
130	if (err) {
131		err = -ENOSR;
132		goto err_exit;
133	}
134
135	err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
136	if (err < 0)
137		goto err_exit;
138
139	pci_set_master(pdev);
140
141	return 0;
142
143err_exit:
144	return err;
145}
146
147int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
148			  char *name, irq_handler_t irq_handler,
149			  void *irq_arg, cpumask_t *affinity_mask)
150{
151	struct pci_dev *pdev = self->pdev;
152	int err;
153
154	if (pdev->msix_enabled || pdev->msi_enabled)
155		err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
156				  name, irq_arg);
157	else
158		err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
159				  IRQF_SHARED, name, irq_arg);
160
161	if (err >= 0) {
162		self->msix_entry_mask |= (1 << i);
163
164		if (pdev->msix_enabled && affinity_mask)
165			irq_set_affinity_hint(pci_irq_vector(pdev, i),
166					      affinity_mask);
167	}
168
169	return err;
170}
171
172void aq_pci_func_free_irqs(struct aq_nic_s *self)
173{
174	struct pci_dev *pdev = self->pdev;
175	unsigned int i;
176	void *irq_data;
177
178	for (i = 32U; i--;) {
179		if (!((1U << i) & self->msix_entry_mask))
180			continue;
181		if (self->aq_nic_cfg.link_irq_vec &&
182		    i == self->aq_nic_cfg.link_irq_vec)
183			irq_data = self;
184		else if (i < AQ_CFG_VECS_MAX)
185			irq_data = self->aq_vec[i];
186		else
187			continue;
188
189		if (pdev->msix_enabled)
190			irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
191		free_irq(pci_irq_vector(pdev, i), irq_data);
192		self->msix_entry_mask &= ~(1U << i);
193	}
194}
195
196unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
197{
198	if (self->pdev->msix_enabled)
199		return AQ_HW_IRQ_MSIX;
200	if (self->pdev->msi_enabled)
201		return AQ_HW_IRQ_MSI;
202
203	return AQ_HW_IRQ_LEGACY;
204}
205
206static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
207{
208	pci_free_irq_vectors(self->pdev);
209}
210
211static int aq_pci_probe(struct pci_dev *pdev,
212			const struct pci_device_id *pci_id)
213{
214	struct net_device *ndev;
215	resource_size_t mmio_pa;
216	struct aq_nic_s *self;
217	u32 numvecs;
218	u32 bar;
219	int err;
220
221	err = pci_enable_device(pdev);
222	if (err)
223		return err;
224
225	err = aq_pci_func_init(pdev);
226	if (err)
227		goto err_pci_func;
228
229	ndev = aq_ndev_alloc();
230	if (!ndev) {
231		err = -ENOMEM;
232		goto err_ndev;
233	}
234
235	self = netdev_priv(ndev);
236	self->pdev = pdev;
237	SET_NETDEV_DEV(ndev, &pdev->dev);
238	pci_set_drvdata(pdev, self);
239
240	mutex_init(&self->fwreq_mutex);
241
242	err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
243					&aq_nic_get_cfg(self)->aq_hw_caps);
244	if (err)
245		goto err_ioremap;
246
247	self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
248	if (!self->aq_hw) {
249		err = -ENOMEM;
250		goto err_ioremap;
251	}
252	self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
253	if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
254		int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
255
256		self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
257		if (!self->aq_hw->priv) {
258			err = -ENOMEM;
259			goto err_free_aq_hw;
260		}
261	}
262
263	for (bar = 0; bar < 4; ++bar) {
264		if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
265			resource_size_t reg_sz;
266
267			mmio_pa = pci_resource_start(pdev, bar);
268			if (mmio_pa == 0U) {
269				err = -EIO;
270				goto err_free_aq_hw_priv;
271			}
272
273			reg_sz = pci_resource_len(pdev, bar);
274			if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
275				err = -EIO;
276				goto err_free_aq_hw_priv;
277			}
278
279			self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
280			if (!self->aq_hw->mmio) {
281				err = -EIO;
282				goto err_free_aq_hw_priv;
283			}
284			break;
285		}
286	}
287
288	if (bar == 4) {
289		err = -EIO;
290		goto err_free_aq_hw_priv;
291	}
292
293	numvecs = min((u8)AQ_CFG_VECS_DEF,
294		      aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
295	numvecs = min(numvecs, num_online_cpus());
296	/* Request IRQ vector for PTP */
297	numvecs += 1;
298
299	numvecs += AQ_HW_SERVICE_IRQS;
300	/*enable interrupts */
301#if !AQ_CFG_FORCE_LEGACY_INT
302	err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
303				    PCI_IRQ_MSIX | PCI_IRQ_MSI |
304				    PCI_IRQ_LEGACY);
305
306	if (err < 0)
307		goto err_hwinit;
308	numvecs = err;
309#endif
310	self->irqvecs = numvecs;
311
312	/* net device init */
313	aq_nic_cfg_start(self);
314
315	aq_nic_ndev_init(self);
316
317	err = aq_nic_ndev_register(self);
318	if (err < 0)
319		goto err_register;
320
321	aq_drvinfo_init(ndev);
322
323	return 0;
324
325err_register:
326	aq_nic_free_vectors(self);
327	aq_pci_free_irq_vectors(self);
328err_hwinit:
329	iounmap(self->aq_hw->mmio);
330err_free_aq_hw_priv:
331	kfree(self->aq_hw->priv);
332err_free_aq_hw:
333	kfree(self->aq_hw);
334err_ioremap:
335	free_netdev(ndev);
336err_ndev:
337	pci_release_regions(pdev);
338err_pci_func:
339	pci_disable_device(pdev);
340
341	return err;
342}
343
344static void aq_pci_remove(struct pci_dev *pdev)
345{
346	struct aq_nic_s *self = pci_get_drvdata(pdev);
347
348	if (self->ndev) {
349		aq_clear_rxnfc_all_rules(self);
350		if (self->ndev->reg_state == NETREG_REGISTERED)
351			unregister_netdev(self->ndev);
352
353#if IS_ENABLED(CONFIG_MACSEC)
354		aq_macsec_free(self);
355#endif
356		aq_nic_free_vectors(self);
357		aq_pci_free_irq_vectors(self);
358		iounmap(self->aq_hw->mmio);
359		kfree(self->aq_hw->priv);
360		kfree(self->aq_hw);
361		pci_release_regions(pdev);
362		free_netdev(self->ndev);
363	}
364
365	pci_disable_device(pdev);
366}
367
368static void aq_pci_shutdown(struct pci_dev *pdev)
369{
370	struct aq_nic_s *self = pci_get_drvdata(pdev);
371
372	aq_nic_shutdown(self);
373
374	pci_disable_device(pdev);
375
376	if (system_state == SYSTEM_POWER_OFF) {
377		pci_wake_from_d3(pdev, false);
378		pci_set_power_state(pdev, PCI_D3hot);
379	}
380}
381
382#ifdef CONFIG_PM
383static int aq_suspend_common(struct device *dev)
384{
385	struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
386
387	rtnl_lock();
388
389	nic->power_state = AQ_HW_POWER_STATE_D3;
390	netif_device_detach(nic->ndev);
391	netif_tx_stop_all_queues(nic->ndev);
392
393	if (netif_running(nic->ndev))
394		aq_nic_stop(nic);
395
396	aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
397	aq_nic_set_power(nic);
398
399	rtnl_unlock();
400
401	return 0;
402}
403
404static int atl_resume_common(struct device *dev)
405{
406	struct pci_dev *pdev = to_pci_dev(dev);
407	struct aq_nic_s *nic;
408	int ret = 0;
409
410	nic = pci_get_drvdata(pdev);
411
412	rtnl_lock();
413
414	pci_set_power_state(pdev, PCI_D0);
415	pci_restore_state(pdev);
416
417	if (netif_running(nic->ndev)) {
418		ret = aq_nic_init(nic);
419		if (ret)
420			goto err_exit;
421
422		ret = aq_nic_start(nic);
423		if (ret)
424			goto err_exit;
425	}
426
427	netif_device_attach(nic->ndev);
428	netif_tx_start_all_queues(nic->ndev);
429
430err_exit:
431	if (ret < 0)
432		aq_nic_deinit(nic, true);
433
434	rtnl_unlock();
435
436	return ret;
437}
438
439static int aq_pm_freeze(struct device *dev)
440{
441	return aq_suspend_common(dev);
442}
443
444static int aq_pm_suspend_poweroff(struct device *dev)
445{
446	return aq_suspend_common(dev);
447}
448
449static int aq_pm_thaw(struct device *dev)
450{
451	return atl_resume_common(dev);
452}
453
454static int aq_pm_resume_restore(struct device *dev)
455{
456	return atl_resume_common(dev);
457}
458
459static const struct dev_pm_ops aq_pm_ops = {
460	.suspend = aq_pm_suspend_poweroff,
461	.poweroff = aq_pm_suspend_poweroff,
462	.freeze = aq_pm_freeze,
463	.resume = aq_pm_resume_restore,
464	.restore = aq_pm_resume_restore,
465	.thaw = aq_pm_thaw,
466};
467#endif
468
469static struct pci_driver aq_pci_ops = {
470	.name = AQ_CFG_DRV_NAME,
471	.id_table = aq_pci_tbl,
472	.probe = aq_pci_probe,
473	.remove = aq_pci_remove,
474	.shutdown = aq_pci_shutdown,
475#ifdef CONFIG_PM
476	.driver.pm = &aq_pm_ops,
477#endif
478};
479
480int aq_pci_func_register_driver(void)
481{
482	return pci_register_driver(&aq_pci_ops);
483}
484
485void aq_pci_func_unregister_driver(void)
486{
487	pci_unregister_driver(&aq_pci_ops);
488}
489
490