1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice unmodified, this list of conditions, and the following
12 *    disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#ifndef	_LINUX_PCI_H_
30#define	_LINUX_PCI_H_
31
32#define	CONFIG_PCI_MSI
33
34#include <linux/types.h>
35
36#include <sys/param.h>
37#include <sys/bus.h>
38#include <sys/pciio.h>
39#include <sys/rman.h>
40#include <dev/pci/pcivar.h>
41#include <dev/pci/pcireg.h>
42#include <dev/pci/pci_private.h>
43
44#include <machine/resource.h>
45
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/dmapool.h>
49#include <linux/dma-mapping.h>
50#include <linux/compiler.h>
51#include <linux/errno.h>
52#include <asm/atomic.h>
53#include <linux/device.h>
54
55struct pci_device_id {
56	uint32_t	vendor;
57	uint32_t	device;
58        uint32_t	subvendor;
59	uint32_t	subdevice;
60	uint32_t	class_mask;
61	uintptr_t	driver_data;
62};
63
64#define	MODULE_DEVICE_TABLE(bus, table)
65#define	PCI_ANY_ID		(-1)
66#define	PCI_VENDOR_ID_MELLANOX			0x15b3
67#define	PCI_VENDOR_ID_TOPSPIN			0x1867
68#define	PCI_DEVICE_ID_MELLANOX_TAVOR		0x5a44
69#define	PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE	0x5a46
70#define	PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT	0x6278
71#define	PCI_DEVICE_ID_MELLANOX_ARBEL		0x6282
72#define	PCI_DEVICE_ID_MELLANOX_SINAI_OLD	0x5e8c
73#define	PCI_DEVICE_ID_MELLANOX_SINAI		0x6274
74
75#define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
76#define PCI_SLOT(devfn)         (((devfn) >> 3) & 0x1f)
77#define PCI_FUNC(devfn)         ((devfn) & 0x07)
78
79#define PCI_VDEVICE(_vendor, _device)					\
80	    .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device),	\
81	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
82#define	PCI_DEVICE(_vendor, _device)					\
83	    .vendor = (_vendor), .device = (_device),			\
84	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
85
86#define	to_pci_dev(n)	container_of(n, struct pci_dev, dev)
87
88#define	PCI_VENDOR_ID	PCIR_DEVVENDOR
89#define	PCI_COMMAND	PCIR_COMMAND
90#define	PCI_EXP_DEVCTL	PCIER_DEVICE_CTL
91#define	PCI_EXP_LNKCTL	PCIER_LINK_CTL
92
93#define	IORESOURCE_MEM	SYS_RES_MEMORY
94#define	IORESOURCE_IO	SYS_RES_IOPORT
95#define	IORESOURCE_IRQ	SYS_RES_IRQ
96
97struct pci_dev;
98
99
100struct pci_driver {
101	struct list_head		links;
102	char				*name;
103	struct pci_device_id		*id_table;
104	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
105	void (*remove)(struct pci_dev *dev);
106        int  (*suspend) (struct pci_dev *dev, pm_message_t state);      /* Device suspended */
107        int  (*resume) (struct pci_dev *dev);                   /* Device woken up */
108	driver_t			driver;
109	devclass_t			bsdclass;
110        struct pci_error_handlers       *err_handler;
111};
112
113extern struct list_head pci_drivers;
114extern struct list_head pci_devices;
115extern spinlock_t pci_lock;
116
117#define	__devexit_p(x)	x
118
119struct pci_dev {
120	struct device		dev;
121	struct list_head	links;
122	struct pci_driver	*pdrv;
123	uint64_t		dma_mask;
124	uint16_t		device;
125	uint16_t		vendor;
126	unsigned int		irq;
127        unsigned int            devfn;
128        u8                      revision;
129        struct pci_devinfo      *bus; /* bus this device is on, equivalent to linux struct pci_bus */
130};
131
132static inline struct resource_list_entry *
133_pci_get_rle(struct pci_dev *pdev, int type, int rid)
134{
135	struct pci_devinfo *dinfo;
136	struct resource_list *rl;
137
138	dinfo = device_get_ivars(pdev->dev.bsddev);
139	rl = &dinfo->resources;
140	return resource_list_find(rl, type, rid);
141}
142
143static inline struct resource_list_entry *
144_pci_get_bar(struct pci_dev *pdev, int bar)
145{
146	struct resource_list_entry *rle;
147
148	bar = PCIR_BAR(bar);
149	if ((rle = _pci_get_rle(pdev, SYS_RES_MEMORY, bar)) == NULL)
150		rle = _pci_get_rle(pdev, SYS_RES_IOPORT, bar);
151	return (rle);
152}
153
154static inline struct device *
155_pci_find_irq_dev(unsigned int irq)
156{
157	struct pci_dev *pdev;
158
159	spin_lock(&pci_lock);
160	list_for_each_entry(pdev, &pci_devices, links) {
161		if (irq == pdev->dev.irq)
162			break;
163		if (irq >= pdev->dev.msix && irq < pdev->dev.msix_max)
164			break;
165	}
166	spin_unlock(&pci_lock);
167	if (pdev)
168		return &pdev->dev;
169	return (NULL);
170}
171
172static inline unsigned long
173pci_resource_start(struct pci_dev *pdev, int bar)
174{
175	struct resource_list_entry *rle;
176
177	if ((rle = _pci_get_bar(pdev, bar)) == NULL)
178		return (0);
179	return rle->start;
180}
181
182static inline unsigned long
183pci_resource_len(struct pci_dev *pdev, int bar)
184{
185	struct resource_list_entry *rle;
186
187	if ((rle = _pci_get_bar(pdev, bar)) == NULL)
188		return (0);
189	return rle->count;
190}
191
192/*
193 * All drivers just seem to want to inspect the type not flags.
194 */
195static inline int
196pci_resource_flags(struct pci_dev *pdev, int bar)
197{
198	struct resource_list_entry *rle;
199
200	if ((rle = _pci_get_bar(pdev, bar)) == NULL)
201		return (0);
202	return rle->type;
203}
204
205static inline const char *
206pci_name(struct pci_dev *d)
207{
208
209	return device_get_desc(d->dev.bsddev);
210}
211
212static inline void *
213pci_get_drvdata(struct pci_dev *pdev)
214{
215
216	return dev_get_drvdata(&pdev->dev);
217}
218
219static inline void
220pci_set_drvdata(struct pci_dev *pdev, void *data)
221{
222
223	dev_set_drvdata(&pdev->dev, data);
224}
225
226static inline int
227pci_enable_device(struct pci_dev *pdev)
228{
229
230	pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
231	pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
232	return (0);
233}
234
235static inline void
236pci_disable_device(struct pci_dev *pdev)
237{
238}
239
240static inline int
241pci_set_master(struct pci_dev *pdev)
242{
243
244	pci_enable_busmaster(pdev->dev.bsddev);
245	return (0);
246}
247
248static inline int
249pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
250{
251	int rid;
252	int type;
253
254	type = pci_resource_flags(pdev, bar);
255	if (type == 0)
256		return (-ENODEV);
257	rid = PCIR_BAR(bar);
258	if (bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
259	    RF_ACTIVE) == NULL)
260		return (-EINVAL);
261	return (0);
262}
263
264static inline void
265pci_release_region(struct pci_dev *pdev, int bar)
266{
267	struct resource_list_entry *rle;
268
269	if ((rle = _pci_get_bar(pdev, bar)) == NULL)
270		return;
271	bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
272}
273
274static inline void
275pci_release_regions(struct pci_dev *pdev)
276{
277	int i;
278
279	for (i = 0; i <= PCIR_MAX_BAR_0; i++)
280		pci_release_region(pdev, i);
281}
282
283static inline int
284pci_request_regions(struct pci_dev *pdev, const char *res_name)
285{
286	int error;
287	int i;
288
289	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
290		error = pci_request_region(pdev, i, res_name);
291		if (error && error != -ENODEV) {
292			pci_release_regions(pdev);
293			return (error);
294		}
295	}
296	return (0);
297}
298
299static inline void
300pci_disable_msix(struct pci_dev *pdev)
301{
302
303	pci_release_msi(pdev->dev.bsddev);
304}
305
306#define	PCI_CAP_ID_EXP	PCIY_EXPRESS
307#define	PCI_CAP_ID_PCIX	PCIY_PCIX
308
309
310static inline int
311pci_find_capability(struct pci_dev *pdev, int capid)
312{
313	int reg;
314
315	if (pci_find_cap(pdev->dev.bsddev, capid, &reg))
316		return (0);
317	return (reg);
318}
319
320
321
322
323/**
324 * pci_pcie_cap - get the saved PCIe capability offset
325 * @dev: PCI device
326 *
327 * PCIe capability offset is calculated at PCI device initialization
328 * time and saved in the data structure. This function returns saved
329 * PCIe capability offset. Using this instead of pci_find_capability()
330 * reduces unnecessary search in the PCI configuration space. If you
331 * need to calculate PCIe capability offset from raw device for some
332 * reasons, please use pci_find_capability() instead.
333 */
334static inline int pci_pcie_cap(struct pci_dev *dev)
335{
336        return pci_find_capability(dev, PCI_CAP_ID_EXP);
337}
338
339
340static inline int
341pci_read_config_byte(struct pci_dev *pdev, int where, u8 *val)
342{
343
344	*val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
345	return (0);
346}
347
348static inline int
349pci_read_config_word(struct pci_dev *pdev, int where, u16 *val)
350{
351
352	*val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
353	return (0);
354}
355
356static inline int
357pci_read_config_dword(struct pci_dev *pdev, int where, u32 *val)
358{
359
360	*val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
361	return (0);
362}
363
364static inline int
365pci_write_config_byte(struct pci_dev *pdev, int where, u8 val)
366{
367
368	pci_write_config(pdev->dev.bsddev, where, val, 1);
369	return (0);
370}
371
372static inline int
373pci_write_config_word(struct pci_dev *pdev, int where, u16 val)
374{
375
376	pci_write_config(pdev->dev.bsddev, where, val, 2);
377	return (0);
378}
379
380static inline int
381pci_write_config_dword(struct pci_dev *pdev, int where, u32 val)
382{
383
384	pci_write_config(pdev->dev.bsddev, where, val, 4);
385	return (0);
386}
387
388static struct pci_driver *
389linux_pci_find(device_t dev, struct pci_device_id **idp)
390{
391	struct pci_device_id *id;
392	struct pci_driver *pdrv;
393	uint16_t vendor;
394	uint16_t device;
395
396	vendor = pci_get_vendor(dev);
397	device = pci_get_device(dev);
398
399	spin_lock(&pci_lock);
400	list_for_each_entry(pdrv, &pci_drivers, links) {
401		for (id = pdrv->id_table; id->vendor != 0; id++) {
402			if (vendor == id->vendor && device == id->device) {
403				*idp = id;
404				spin_unlock(&pci_lock);
405				return (pdrv);
406			}
407		}
408	}
409	spin_unlock(&pci_lock);
410	return (NULL);
411}
412
413static inline int
414linux_pci_probe(device_t dev)
415{
416	struct pci_device_id *id;
417	struct pci_driver *pdrv;
418
419	if ((pdrv = linux_pci_find(dev, &id)) == NULL)
420		return (ENXIO);
421	if (device_get_driver(dev) != &pdrv->driver)
422		return (ENXIO);
423	device_set_desc(dev, pdrv->name);
424	return (0);
425}
426
427static inline int
428linux_pci_attach(device_t dev)
429{
430	struct resource_list_entry *rle;
431	struct pci_dev *pdev;
432	struct pci_driver *pdrv;
433	struct pci_device_id *id;
434	int error;
435
436	pdrv = linux_pci_find(dev, &id);
437	pdev = device_get_softc(dev);
438	pdev->dev.parent = &linux_rootdev;
439	pdev->dev.bsddev = dev;
440	INIT_LIST_HEAD(&pdev->dev.irqents);
441	pdev->device = id->device;
442	pdev->vendor = id->vendor;
443	pdev->dev.dma_mask = &pdev->dma_mask;
444	pdev->pdrv = pdrv;
445	kobject_init(&pdev->dev.kobj, &dev_ktype);
446	kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
447	kobject_add(&pdev->dev.kobj, &linux_rootdev.kobj,
448	    kobject_name(&pdev->dev.kobj));
449	rle = _pci_get_rle(pdev, SYS_RES_IRQ, 0);
450	if (rle)
451		pdev->dev.irq = rle->start;
452	else
453		pdev->dev.irq = 0;
454	pdev->irq = pdev->dev.irq;
455	mtx_unlock(&Giant);
456	spin_lock(&pci_lock);
457	list_add(&pdev->links, &pci_devices);
458	spin_unlock(&pci_lock);
459	error = pdrv->probe(pdev, id);
460	mtx_lock(&Giant);
461	if (error) {
462		spin_lock(&pci_lock);
463		list_del(&pdev->links);
464		spin_unlock(&pci_lock);
465		put_device(&pdev->dev);
466		return (-error);
467	}
468	return (0);
469}
470
471static inline int
472linux_pci_detach(device_t dev)
473{
474	struct pci_dev *pdev;
475
476	pdev = device_get_softc(dev);
477	mtx_unlock(&Giant);
478	pdev->pdrv->remove(pdev);
479	mtx_lock(&Giant);
480	spin_lock(&pci_lock);
481	list_del(&pdev->links);
482	spin_unlock(&pci_lock);
483	put_device(&pdev->dev);
484
485	return (0);
486}
487
488static device_method_t pci_methods[] = {
489	DEVMETHOD(device_probe, linux_pci_probe),
490	DEVMETHOD(device_attach, linux_pci_attach),
491	DEVMETHOD(device_detach, linux_pci_detach),
492	{0, 0}
493};
494
495static inline int
496pci_register_driver(struct pci_driver *pdrv)
497{
498	devclass_t bus;
499	int error;
500
501	spin_lock(&pci_lock);
502	list_add(&pdrv->links, &pci_drivers);
503	spin_unlock(&pci_lock);
504	bus = devclass_find("pci");
505	pdrv->driver.name = pdrv->name;
506	pdrv->driver.methods = pci_methods;
507	pdrv->driver.size = sizeof(struct pci_dev);
508	mtx_lock(&Giant);
509	error = devclass_add_driver(bus, &pdrv->driver, BUS_PASS_DEFAULT,
510	    &pdrv->bsdclass);
511	mtx_unlock(&Giant);
512	if (error)
513		return (-error);
514	return (0);
515}
516
517static inline void
518pci_unregister_driver(struct pci_driver *pdrv)
519{
520	devclass_t bus;
521
522	list_del(&pdrv->links);
523	bus = devclass_find("pci");
524	mtx_lock(&Giant);
525	devclass_delete_driver(bus, &pdrv->driver);
526	mtx_unlock(&Giant);
527}
528
529struct msix_entry {
530	int entry;
531	int vector;
532};
533
534/*
535 * Enable msix, positive errors indicate actual number of available
536 * vectors.  Negative errors are failures.
537 */
538static inline int
539pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
540{
541	struct resource_list_entry *rle;
542	int error;
543	int avail;
544	int i;
545
546	avail = pci_msix_count(pdev->dev.bsddev);
547	if (avail < nreq) {
548		if (avail == 0)
549			return -EINVAL;
550		return avail;
551	}
552	avail = nreq;
553	if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
554		return error;
555	rle = _pci_get_rle(pdev, SYS_RES_IRQ, 1);
556	pdev->dev.msix = rle->start;
557	pdev->dev.msix_max = rle->start + avail;
558	for (i = 0; i < nreq; i++)
559		entries[i].vector = pdev->dev.msix + i;
560	return (0);
561}
562
563static inline int pci_channel_offline(struct pci_dev *pdev)
564{
565        return false;
566}
567
568static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
569{
570        return -ENODEV;
571}
572static inline void pci_disable_sriov(struct pci_dev *dev)
573{
574}
575
576/**
577 * DEFINE_PCI_DEVICE_TABLE - macro used to describe a pci device table
578 * @_table: device table name
579 *
580 * This macro is used to create a struct pci_device_id array (a device table)
581 * in a generic manner.
582 */
583#define DEFINE_PCI_DEVICE_TABLE(_table) \
584	const struct pci_device_id _table[] __devinitdata
585
586
587/* XXX This should not be necessary. */
588#define	pcix_set_mmrbc(d, v)	0
589#define	pcix_get_max_mmrbc(d)	0
590#define	pcie_set_readrq(d, v)	0
591
592#define	PCI_DMA_BIDIRECTIONAL	0
593#define	PCI_DMA_TODEVICE	1
594#define	PCI_DMA_FROMDEVICE	2
595#define	PCI_DMA_NONE		3
596
597#define	pci_pool		dma_pool
598#define pci_pool_destroy	dma_pool_destroy
599#define pci_pool_alloc		dma_pool_alloc
600#define pci_pool_free		dma_pool_free
601#define	pci_pool_create(_name, _pdev, _size, _align, _alloc)		\
602	    dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
603#define	pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle)		\
604	    dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
605		_size, _vaddr, _dma_handle)
606#define	pci_map_sg(_hwdev, _sg, _nents, _dir)				\
607	    dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
608		_sg, _nents, (enum dma_data_direction)_dir)
609#define	pci_map_single(_hwdev, _ptr, _size, _dir)			\
610	    dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
611		(_ptr), (_size), (enum dma_data_direction)_dir)
612#define	pci_unmap_single(_hwdev, _addr, _size, _dir)			\
613	    dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
614		_addr, _size, (enum dma_data_direction)_dir)
615#define	pci_unmap_sg(_hwdev, _sg, _nents, _dir)				\
616	    dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
617		_sg, _nents, (enum dma_data_direction)_dir)
618#define	pci_map_page(_hwdev, _page, _offset, _size, _dir)		\
619	    dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
620		_offset, _size, (enum dma_data_direction)_dir)
621#define	pci_unmap_page(_hwdev, _dma_address, _size, _dir)		\
622	    dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
623		_dma_address, _size, (enum dma_data_direction)_dir)
624#define	pci_set_dma_mask(_pdev, mask)	dma_set_mask(&(_pdev)->dev, (mask))
625#define	pci_dma_mapping_error(_pdev, _dma_addr)				\
626	    dma_mapping_error(&(_pdev)->dev, _dma_addr)
627#define	pci_set_consistent_dma_mask(_pdev, _mask)			\
628	    dma_set_coherent_mask(&(_pdev)->dev, (_mask))
629#define	DECLARE_PCI_UNMAP_ADDR(x)	DEFINE_DMA_UNMAP_ADDR(x);
630#define	DECLARE_PCI_UNMAP_LEN(x)	DEFINE_DMA_UNMAP_LEN(x);
631#define	pci_unmap_addr		dma_unmap_addr
632#define	pci_unmap_addr_set	dma_unmap_addr_set
633#define	pci_unmap_len		dma_unmap_len
634#define	pci_unmap_len_set	dma_unmap_len_set
635
636typedef unsigned int __bitwise pci_channel_state_t;
637typedef unsigned int __bitwise pci_ers_result_t;
638
639enum pci_channel_state {
640        /* I/O channel is in normal state */
641        pci_channel_io_normal = (__force pci_channel_state_t) 1,
642
643        /* I/O to channel is blocked */
644        pci_channel_io_frozen = (__force pci_channel_state_t) 2,
645
646        /* PCI card is dead */
647        pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
648};
649
650enum pci_ers_result {
651        /* no result/none/not supported in device driver */
652        PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
653
654        /* Device driver can recover without slot reset */
655        PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
656
657        /* Device driver wants slot to be reset. */
658        PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
659
660        /* Device has completely failed, is unrecoverable */
661        PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
662
663        /* Device driver is fully recovered and operational */
664        PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
665};
666
667
668/* PCI bus error event callbacks */
669struct pci_error_handlers {
670        /* PCI bus error detected on this device */
671        pci_ers_result_t (*error_detected)(struct pci_dev *dev,
672                        enum pci_channel_state error);
673
674        /* MMIO has been re-enabled, but not DMA */
675        pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
676
677        /* PCI Express link has been reset */
678        pci_ers_result_t (*link_reset)(struct pci_dev *dev);
679
680        /* PCI slot has been reset */
681        pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
682
683        /* Device driver may resume normal operations */
684        void (*resume)(struct pci_dev *dev);
685};
686
687
688
689#endif	/* _LINUX_PCI_H_ */
690