• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/pci/
1/*
2 * drivers/pci/iov.c
3 *
4 * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
5 *
6 * PCI Express I/O Virtualization (IOV) support.
7 *   Single Root IOV 1.0
8 *   Address Translation Service 1.0
9 */
10
11#include <linux/pci.h>
12#include <linux/slab.h>
13#include <linux/mutex.h>
14#include <linux/string.h>
15#include <linux/delay.h>
16#include "pci.h"
17
18#define VIRTFN_ID_LEN	16
19
20static inline u8 virtfn_bus(struct pci_dev *dev, int id)
21{
22	return dev->bus->number + ((dev->devfn + dev->sriov->offset +
23				    dev->sriov->stride * id) >> 8);
24}
25
26static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
27{
28	return (dev->devfn + dev->sriov->offset +
29		dev->sriov->stride * id) & 0xff;
30}
31
32static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
33{
34	int rc;
35	struct pci_bus *child;
36
37	if (bus->number == busnr)
38		return bus;
39
40	child = pci_find_bus(pci_domain_nr(bus), busnr);
41	if (child)
42		return child;
43
44	child = pci_add_new_bus(bus, NULL, busnr);
45	if (!child)
46		return NULL;
47
48	child->subordinate = busnr;
49	child->dev.parent = bus->bridge;
50	rc = pci_bus_add_child(child);
51	if (rc) {
52		pci_remove_bus(child);
53		return NULL;
54	}
55
56	return child;
57}
58
59static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
60{
61	struct pci_bus *child;
62
63	if (bus->number == busnr)
64		return;
65
66	child = pci_find_bus(pci_domain_nr(bus), busnr);
67	BUG_ON(!child);
68
69	if (list_empty(&child->devices))
70		pci_remove_bus(child);
71}
72
73static int virtfn_add(struct pci_dev *dev, int id, int reset)
74{
75	int i;
76	int rc;
77	u64 size;
78	char buf[VIRTFN_ID_LEN];
79	struct pci_dev *virtfn;
80	struct resource *res;
81	struct pci_sriov *iov = dev->sriov;
82
83	virtfn = alloc_pci_dev();
84	if (!virtfn)
85		return -ENOMEM;
86
87	mutex_lock(&iov->dev->sriov->lock);
88	virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
89	if (!virtfn->bus) {
90		kfree(virtfn);
91		mutex_unlock(&iov->dev->sriov->lock);
92		return -ENOMEM;
93	}
94	virtfn->devfn = virtfn_devfn(dev, id);
95	virtfn->vendor = dev->vendor;
96	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
97	pci_setup_device(virtfn);
98	virtfn->dev.parent = dev->dev.parent;
99
100	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
101		res = dev->resource + PCI_IOV_RESOURCES + i;
102		if (!res->parent)
103			continue;
104		virtfn->resource[i].name = pci_name(virtfn);
105		virtfn->resource[i].flags = res->flags;
106		size = resource_size(res);
107		do_div(size, iov->total);
108		virtfn->resource[i].start = res->start + size * id;
109		virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
110		rc = request_resource(res, &virtfn->resource[i]);
111		BUG_ON(rc);
112	}
113
114	if (reset)
115		__pci_reset_function(virtfn);
116
117	pci_device_add(virtfn, virtfn->bus);
118	mutex_unlock(&iov->dev->sriov->lock);
119
120	virtfn->physfn = pci_dev_get(dev);
121	virtfn->is_virtfn = 1;
122
123	rc = pci_bus_add_device(virtfn);
124	if (rc)
125		goto failed1;
126	sprintf(buf, "virtfn%u", id);
127	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
128	if (rc)
129		goto failed1;
130	rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
131	if (rc)
132		goto failed2;
133
134	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
135
136	return 0;
137
138failed2:
139	sysfs_remove_link(&dev->dev.kobj, buf);
140failed1:
141	pci_dev_put(dev);
142	mutex_lock(&iov->dev->sriov->lock);
143	pci_remove_bus_device(virtfn);
144	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
145	mutex_unlock(&iov->dev->sriov->lock);
146
147	return rc;
148}
149
150static void virtfn_remove(struct pci_dev *dev, int id, int reset)
151{
152	char buf[VIRTFN_ID_LEN];
153	struct pci_bus *bus;
154	struct pci_dev *virtfn;
155	struct pci_sriov *iov = dev->sriov;
156
157	bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
158	if (!bus)
159		return;
160
161	virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
162	if (!virtfn)
163		return;
164
165	pci_dev_put(virtfn);
166
167	if (reset) {
168		device_release_driver(&virtfn->dev);
169		__pci_reset_function(virtfn);
170	}
171
172	sprintf(buf, "virtfn%u", id);
173	sysfs_remove_link(&dev->dev.kobj, buf);
174	sysfs_remove_link(&virtfn->dev.kobj, "physfn");
175
176	mutex_lock(&iov->dev->sriov->lock);
177	pci_remove_bus_device(virtfn);
178	virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
179	mutex_unlock(&iov->dev->sriov->lock);
180
181	pci_dev_put(dev);
182}
183
184static int sriov_migration(struct pci_dev *dev)
185{
186	u16 status;
187	struct pci_sriov *iov = dev->sriov;
188
189	if (!iov->nr_virtfn)
190		return 0;
191
192	if (!(iov->cap & PCI_SRIOV_CAP_VFM))
193		return 0;
194
195	pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
196	if (!(status & PCI_SRIOV_STATUS_VFM))
197		return 0;
198
199	schedule_work(&iov->mtask);
200
201	return 1;
202}
203
204static void sriov_migration_task(struct work_struct *work)
205{
206	int i;
207	u8 state;
208	u16 status;
209	struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
210
211	for (i = iov->initial; i < iov->nr_virtfn; i++) {
212		state = readb(iov->mstate + i);
213		if (state == PCI_SRIOV_VFM_MI) {
214			writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
215			state = readb(iov->mstate + i);
216			if (state == PCI_SRIOV_VFM_AV)
217				virtfn_add(iov->self, i, 1);
218		} else if (state == PCI_SRIOV_VFM_MO) {
219			virtfn_remove(iov->self, i, 1);
220			writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
221			state = readb(iov->mstate + i);
222			if (state == PCI_SRIOV_VFM_AV)
223				virtfn_add(iov->self, i, 0);
224		}
225	}
226
227	pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
228	status &= ~PCI_SRIOV_STATUS_VFM;
229	pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
230}
231
232static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
233{
234	int bir;
235	u32 table;
236	resource_size_t pa;
237	struct pci_sriov *iov = dev->sriov;
238
239	if (nr_virtfn <= iov->initial)
240		return 0;
241
242	pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
243	bir = PCI_SRIOV_VFM_BIR(table);
244	if (bir > PCI_STD_RESOURCE_END)
245		return -EIO;
246
247	table = PCI_SRIOV_VFM_OFFSET(table);
248	if (table + nr_virtfn > pci_resource_len(dev, bir))
249		return -EIO;
250
251	pa = pci_resource_start(dev, bir) + table;
252	iov->mstate = ioremap(pa, nr_virtfn);
253	if (!iov->mstate)
254		return -ENOMEM;
255
256	INIT_WORK(&iov->mtask, sriov_migration_task);
257
258	iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
259	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
260
261	return 0;
262}
263
264static void sriov_disable_migration(struct pci_dev *dev)
265{
266	struct pci_sriov *iov = dev->sriov;
267
268	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
269	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
270
271	cancel_work_sync(&iov->mtask);
272	iounmap(iov->mstate);
273}
274
275static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
276{
277	int rc;
278	int i, j;
279	int nres;
280	u16 offset, stride, initial;
281	struct resource *res;
282	struct pci_dev *pdev;
283	struct pci_sriov *iov = dev->sriov;
284
285	if (!nr_virtfn)
286		return 0;
287
288	if (iov->nr_virtfn)
289		return -EINVAL;
290
291	pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
292	if (initial > iov->total ||
293	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
294		return -EIO;
295
296	if (nr_virtfn < 0 || nr_virtfn > iov->total ||
297	    (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
298		return -EINVAL;
299
300	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
301	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
302	pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
303	if (!offset || (nr_virtfn > 1 && !stride))
304		return -EIO;
305
306	nres = 0;
307	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
308		res = dev->resource + PCI_IOV_RESOURCES + i;
309		if (res->parent)
310			nres++;
311	}
312	if (nres != iov->nres) {
313		dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
314		return -ENOMEM;
315	}
316
317	iov->offset = offset;
318	iov->stride = stride;
319
320	if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
321		dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
322		return -ENOMEM;
323	}
324
325	if (iov->link != dev->devfn) {
326		pdev = pci_get_slot(dev->bus, iov->link);
327		if (!pdev)
328			return -ENODEV;
329
330		pci_dev_put(pdev);
331
332		if (!pdev->is_physfn)
333			return -ENODEV;
334
335		rc = sysfs_create_link(&dev->dev.kobj,
336					&pdev->dev.kobj, "dep_link");
337		if (rc)
338			return rc;
339	}
340
341	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
342	pci_block_user_cfg_access(dev);
343	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
344	msleep(100);
345	pci_unblock_user_cfg_access(dev);
346
347	iov->initial = initial;
348	if (nr_virtfn < initial)
349		initial = nr_virtfn;
350
351	for (i = 0; i < initial; i++) {
352		rc = virtfn_add(dev, i, 0);
353		if (rc)
354			goto failed;
355	}
356
357	if (iov->cap & PCI_SRIOV_CAP_VFM) {
358		rc = sriov_enable_migration(dev, nr_virtfn);
359		if (rc)
360			goto failed;
361	}
362
363	kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
364	iov->nr_virtfn = nr_virtfn;
365
366	return 0;
367
368failed:
369	for (j = 0; j < i; j++)
370		virtfn_remove(dev, j, 0);
371
372	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
373	pci_block_user_cfg_access(dev);
374	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
375	ssleep(1);
376	pci_unblock_user_cfg_access(dev);
377
378	if (iov->link != dev->devfn)
379		sysfs_remove_link(&dev->dev.kobj, "dep_link");
380
381	return rc;
382}
383
384static void sriov_disable(struct pci_dev *dev)
385{
386	int i;
387	struct pci_sriov *iov = dev->sriov;
388
389	if (!iov->nr_virtfn)
390		return;
391
392	if (iov->cap & PCI_SRIOV_CAP_VFM)
393		sriov_disable_migration(dev);
394
395	for (i = 0; i < iov->nr_virtfn; i++)
396		virtfn_remove(dev, i, 0);
397
398	iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
399	pci_block_user_cfg_access(dev);
400	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
401	ssleep(1);
402	pci_unblock_user_cfg_access(dev);
403
404	if (iov->link != dev->devfn)
405		sysfs_remove_link(&dev->dev.kobj, "dep_link");
406
407	iov->nr_virtfn = 0;
408}
409
410static int sriov_init(struct pci_dev *dev, int pos)
411{
412	int i;
413	int rc;
414	int nres;
415	u32 pgsz;
416	u16 ctrl, total, offset, stride;
417	struct pci_sriov *iov;
418	struct resource *res;
419	struct pci_dev *pdev;
420
421	if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
422	    dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
423		return -ENODEV;
424
425	pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
426	if (ctrl & PCI_SRIOV_CTRL_VFE) {
427		pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
428		ssleep(1);
429	}
430
431	pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
432	if (!total)
433		return 0;
434
435	ctrl = 0;
436	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
437		if (pdev->is_physfn)
438			goto found;
439
440	pdev = NULL;
441	if (pci_ari_enabled(dev->bus))
442		ctrl |= PCI_SRIOV_CTRL_ARI;
443
444found:
445	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
446	pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
447	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
448	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
449	if (!offset || (total > 1 && !stride))
450		return -EIO;
451
452	pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
453	i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
454	pgsz &= ~((1 << i) - 1);
455	if (!pgsz)
456		return -EIO;
457
458	pgsz &= ~(pgsz - 1);
459	pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
460
461	nres = 0;
462	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
463		res = dev->resource + PCI_IOV_RESOURCES + i;
464		i += __pci_read_base(dev, pci_bar_unknown, res,
465				     pos + PCI_SRIOV_BAR + i * 4);
466		if (!res->flags)
467			continue;
468		if (resource_size(res) & (PAGE_SIZE - 1)) {
469			rc = -EIO;
470			goto failed;
471		}
472		res->end = res->start + resource_size(res) * total - 1;
473		nres++;
474	}
475
476	iov = kzalloc(sizeof(*iov), GFP_KERNEL);
477	if (!iov) {
478		rc = -ENOMEM;
479		goto failed;
480	}
481
482	iov->pos = pos;
483	iov->nres = nres;
484	iov->ctrl = ctrl;
485	iov->total = total;
486	iov->offset = offset;
487	iov->stride = stride;
488	iov->pgsz = pgsz;
489	iov->self = dev;
490	pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
491	pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
492	if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
493		iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
494
495	if (pdev)
496		iov->dev = pci_dev_get(pdev);
497	else
498		iov->dev = dev;
499
500	mutex_init(&iov->lock);
501
502	dev->sriov = iov;
503	dev->is_physfn = 1;
504
505	return 0;
506
507failed:
508	for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
509		res = dev->resource + PCI_IOV_RESOURCES + i;
510		res->flags = 0;
511	}
512
513	return rc;
514}
515
516static void sriov_release(struct pci_dev *dev)
517{
518	BUG_ON(dev->sriov->nr_virtfn);
519
520	if (dev != dev->sriov->dev)
521		pci_dev_put(dev->sriov->dev);
522
523	mutex_destroy(&dev->sriov->lock);
524
525	kfree(dev->sriov);
526	dev->sriov = NULL;
527}
528
529static void sriov_restore_state(struct pci_dev *dev)
530{
531	int i;
532	u16 ctrl;
533	struct pci_sriov *iov = dev->sriov;
534
535	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
536	if (ctrl & PCI_SRIOV_CTRL_VFE)
537		return;
538
539	for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
540		pci_update_resource(dev, i);
541
542	pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
543	pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
544	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
545	if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
546		msleep(100);
547}
548
549/**
550 * pci_iov_init - initialize the IOV capability
551 * @dev: the PCI device
552 *
553 * Returns 0 on success, or negative on failure.
554 */
555int pci_iov_init(struct pci_dev *dev)
556{
557	int pos;
558
559	if (!pci_is_pcie(dev))
560		return -ENODEV;
561
562	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
563	if (pos)
564		return sriov_init(dev, pos);
565
566	return -ENODEV;
567}
568
569/**
570 * pci_iov_release - release resources used by the IOV capability
571 * @dev: the PCI device
572 */
573void pci_iov_release(struct pci_dev *dev)
574{
575	if (dev->is_physfn)
576		sriov_release(dev);
577}
578
579/**
580 * pci_iov_resource_bar - get position of the SR-IOV BAR
581 * @dev: the PCI device
582 * @resno: the resource number
583 * @type: the BAR type to be filled in
584 *
585 * Returns position of the BAR encapsulated in the SR-IOV capability.
586 */
587int pci_iov_resource_bar(struct pci_dev *dev, int resno,
588			 enum pci_bar_type *type)
589{
590	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
591		return 0;
592
593	BUG_ON(!dev->is_physfn);
594
595	*type = pci_bar_unknown;
596
597	return dev->sriov->pos + PCI_SRIOV_BAR +
598		4 * (resno - PCI_IOV_RESOURCES);
599}
600
601/**
602 * pci_sriov_resource_alignment - get resource alignment for VF BAR
603 * @dev: the PCI device
604 * @resno: the resource number
605 *
606 * Returns the alignment of the VF BAR found in the SR-IOV capability.
607 * This is not the same as the resource size which is defined as
608 * the VF BAR size multiplied by the number of VFs.  The alignment
609 * is just the VF BAR size.
610 */
611resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
612{
613	struct resource tmp;
614	enum pci_bar_type type;
615	int reg = pci_iov_resource_bar(dev, resno, &type);
616
617	if (!reg)
618		return 0;
619
620	 __pci_read_base(dev, type, &tmp, reg);
621	return resource_alignment(&tmp);
622}
623
624/**
625 * pci_restore_iov_state - restore the state of the IOV capability
626 * @dev: the PCI device
627 */
628void pci_restore_iov_state(struct pci_dev *dev)
629{
630	if (dev->is_physfn)
631		sriov_restore_state(dev);
632}
633
634/**
635 * pci_iov_bus_range - find bus range used by Virtual Function
636 * @bus: the PCI bus
637 *
638 * Returns max number of buses (exclude current one) used by Virtual
639 * Functions.
640 */
641int pci_iov_bus_range(struct pci_bus *bus)
642{
643	int max = 0;
644	u8 busnr;
645	struct pci_dev *dev;
646
647	list_for_each_entry(dev, &bus->devices, bus_list) {
648		if (!dev->is_physfn)
649			continue;
650		busnr = virtfn_bus(dev, dev->sriov->total - 1);
651		if (busnr > max)
652			max = busnr;
653	}
654
655	return max ? max - bus->number : 0;
656}
657
658/**
659 * pci_enable_sriov - enable the SR-IOV capability
660 * @dev: the PCI device
661 * @nr_virtfn: number of virtual functions to enable
662 *
663 * Returns 0 on success, or negative on failure.
664 */
665int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
666{
667	might_sleep();
668
669	if (!dev->is_physfn)
670		return -ENODEV;
671
672	return sriov_enable(dev, nr_virtfn);
673}
674EXPORT_SYMBOL_GPL(pci_enable_sriov);
675
676/**
677 * pci_disable_sriov - disable the SR-IOV capability
678 * @dev: the PCI device
679 */
680void pci_disable_sriov(struct pci_dev *dev)
681{
682	might_sleep();
683
684	if (!dev->is_physfn)
685		return;
686
687	sriov_disable(dev);
688}
689EXPORT_SYMBOL_GPL(pci_disable_sriov);
690
691/**
692 * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
693 * @dev: the PCI device
694 *
695 * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
696 *
697 * Physical Function driver is responsible to register IRQ handler using
698 * VF Migration Interrupt Message Number, and call this function when the
699 * interrupt is generated by the hardware.
700 */
701irqreturn_t pci_sriov_migration(struct pci_dev *dev)
702{
703	if (!dev->is_physfn)
704		return IRQ_NONE;
705
706	return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
707}
708EXPORT_SYMBOL_GPL(pci_sriov_migration);
709
710/**
711 * pci_num_vf - return number of VFs associated with a PF device_release_driver
712 * @dev: the PCI device
713 *
714 * Returns number of VFs, or 0 if SR-IOV is not enabled.
715 */
716int pci_num_vf(struct pci_dev *dev)
717{
718	if (!dev || !dev->is_physfn)
719		return 0;
720	else
721		return dev->sriov->nr_virtfn;
722}
723EXPORT_SYMBOL_GPL(pci_num_vf);
724
725static int ats_alloc_one(struct pci_dev *dev, int ps)
726{
727	int pos;
728	u16 cap;
729	struct pci_ats *ats;
730
731	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
732	if (!pos)
733		return -ENODEV;
734
735	ats = kzalloc(sizeof(*ats), GFP_KERNEL);
736	if (!ats)
737		return -ENOMEM;
738
739	ats->pos = pos;
740	ats->stu = ps;
741	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
742	ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
743					    PCI_ATS_MAX_QDEP;
744	dev->ats = ats;
745
746	return 0;
747}
748
749static void ats_free_one(struct pci_dev *dev)
750{
751	kfree(dev->ats);
752	dev->ats = NULL;
753}
754
755/**
756 * pci_enable_ats - enable the ATS capability
757 * @dev: the PCI device
758 * @ps: the IOMMU page shift
759 *
760 * Returns 0 on success, or negative on failure.
761 */
762int pci_enable_ats(struct pci_dev *dev, int ps)
763{
764	int rc;
765	u16 ctrl;
766
767	BUG_ON(dev->ats && dev->ats->is_enabled);
768
769	if (ps < PCI_ATS_MIN_STU)
770		return -EINVAL;
771
772	if (dev->is_physfn || dev->is_virtfn) {
773		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
774
775		mutex_lock(&pdev->sriov->lock);
776		if (pdev->ats)
777			rc = pdev->ats->stu == ps ? 0 : -EINVAL;
778		else
779			rc = ats_alloc_one(pdev, ps);
780
781		if (!rc)
782			pdev->ats->ref_cnt++;
783		mutex_unlock(&pdev->sriov->lock);
784		if (rc)
785			return rc;
786	}
787
788	if (!dev->is_physfn) {
789		rc = ats_alloc_one(dev, ps);
790		if (rc)
791			return rc;
792	}
793
794	ctrl = PCI_ATS_CTRL_ENABLE;
795	if (!dev->is_virtfn)
796		ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
797	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
798
799	dev->ats->is_enabled = 1;
800
801	return 0;
802}
803
804/**
805 * pci_disable_ats - disable the ATS capability
806 * @dev: the PCI device
807 */
808void pci_disable_ats(struct pci_dev *dev)
809{
810	u16 ctrl;
811
812	BUG_ON(!dev->ats || !dev->ats->is_enabled);
813
814	pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
815	ctrl &= ~PCI_ATS_CTRL_ENABLE;
816	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
817
818	dev->ats->is_enabled = 0;
819
820	if (dev->is_physfn || dev->is_virtfn) {
821		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
822
823		mutex_lock(&pdev->sriov->lock);
824		pdev->ats->ref_cnt--;
825		if (!pdev->ats->ref_cnt)
826			ats_free_one(pdev);
827		mutex_unlock(&pdev->sriov->lock);
828	}
829
830	if (!dev->is_physfn)
831		ats_free_one(dev);
832}
833
834/**
835 * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
836 * @dev: the PCI device
837 *
838 * Returns the queue depth on success, or negative on failure.
839 *
840 * The ATS spec uses 0 in the Invalidate Queue Depth field to
841 * indicate that the function can accept 32 Invalidate Request.
842 * But here we use the `real' values (i.e. 1~32) for the Queue
843 * Depth; and 0 indicates the function shares the Queue with
844 * other functions (doesn't exclusively own a Queue).
845 */
846int pci_ats_queue_depth(struct pci_dev *dev)
847{
848	int pos;
849	u16 cap;
850
851	if (dev->is_virtfn)
852		return 0;
853
854	if (dev->ats)
855		return dev->ats->qdep;
856
857	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
858	if (!pos)
859		return -ENODEV;
860
861	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
862
863	return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
864				       PCI_ATS_MAX_QDEP;
865}
866