• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/pci/

Lines Matching defs:domain

186  * 8-23: domain id
302 * This domain is a statically identity mapping domain.
303 * 1. This domain creats a static 1:1 mapping to all usable memory.
305 * 3. Each iommu mapps to this domain if successful.
310 /* devices under the same p2p bridge are owned in one domain */
313 /* domain represents a virtual machine, more than one devices
314 * across iommus may be owned in one domain, e.g. kvm guest.
322 int id; /* domain id */
324 unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
327 struct iova_domain iovad; /* iova's that belong to this domain */
335 int flags; /* flags to find out type of domain */
340 spinlock_t iommu_lock; /* protect iommu set in domain */
344 /* PCI domain-device relationship */
346 struct list_head link; /* link to domain siblings */
348 int segment; /* PCI domain */
353 struct dmar_domain *domain; /* pointer to domain */
364 struct dmar_domain *domain[HIGH_WATER_MARK];
378 static void domain_remove_dev_info(struct dmar_domain *domain);
513 /* This functionin only returns single iommu in a domain */
514 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
518 /* si_domain and vm domain should not get here. */
519 BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
520 BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
522 iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
529 static void domain_update_iommu_coherency(struct dmar_domain *domain)
533 domain->iommu_coherency = 1;
535 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
537 domain->iommu_coherency = 0;
543 static void domain_update_iommu_snooping(struct dmar_domain *domain)
547 domain->iommu_snooping = 1;
549 for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
551 domain->iommu_snooping = 0;
558 static void domain_update_iommu_cap(struct dmar_domain *domain)
560 domain_update_iommu_coherency(domain);
561 domain_update_iommu_snooping(domain);
594 static void domain_flush_cache(struct dmar_domain *domain,
597 if (!domain->iommu_coherency)
690 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
693 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
695 int level = agaw_to_level(domain->agaw);
698 BUG_ON(!domain->pgd);
700 parent = domain->pgd;
713 tmp_page = alloc_pgtable_page(domain->nid);
718 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
725 domain_flush_cache(domain, pte, sizeof(*pte));
736 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
741 int total = agaw_to_level(domain->agaw);
744 parent = domain->pgd;
760 static void dma_pte_clear_range(struct dmar_domain *domain,
764 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
773 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
784 domain_flush_cache(domain, first_pte,
791 static void dma_pte_free_pagetable(struct dmar_domain *domain,
795 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
797 int total = agaw_to_level(domain->agaw);
815 first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
830 domain_flush_cache(domain, first_pte,
837 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
838 free_pgtable_page(domain->pgd);
839 domain->pgd = NULL;
984 struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
998 list_for_each_entry(info, &domain->devices, link)
1035 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1043 list_for_each_entry(info, &domain->devices, link) {
1063 * Fallback to domain selective flush if no PSI support or the size is
1152 printk(KERN_ERR "Allocating domain id array failed\n");
1158 printk(KERN_ERR "Allocating domain array failed\n");
1172 static void domain_exit(struct dmar_domain *domain);
1173 static void vm_domain_exit(struct dmar_domain *domain);
1177 struct dmar_domain *domain;
1183 domain = iommu->domains[i];
1186 spin_lock_irqsave(&domain->iommu_lock, flags);
1187 if (--domain->iommu_count == 0) {
1188 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1189 vm_domain_exit(domain);
1191 domain_exit(domain);
1193 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1227 struct dmar_domain *domain;
1229 domain = alloc_domain_mem();
1230 if (!domain)
1233 domain->nid = -1;
1234 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
1235 domain->flags = 0;
1237 return domain;
1240 static int iommu_attach_domain(struct dmar_domain *domain,
1254 printk(KERN_ERR "IOMMU: no free domain ids\n");
1258 domain->id = num;
1260 set_bit(iommu->seq_id, &domain->iommu_bmp);
1261 iommu->domains[num] = domain;
1267 static void iommu_detach_domain(struct dmar_domain *domain,
1277 if (iommu->domains[num] == domain) {
1285 clear_bit(iommu->seq_id, &domain->iommu_bmp);
1329 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1331 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1348 static int domain_init(struct dmar_domain *domain, int guest_width)
1354 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
1355 spin_lock_init(&domain->iommu_lock);
1357 domain_reserve_special_ranges(domain);
1360 iommu = domain_get_iommu(domain);
1363 domain->gaw = guest_width;
1374 domain->agaw = agaw;
1375 INIT_LIST_HEAD(&domain->devices);
1378 domain->iommu_coherency = 1;
1380 domain->iommu_coherency = 0;
1383 domain->iommu_snooping = 1;
1385 domain->iommu_snooping = 0;
1387 domain->iommu_count = 1;
1388 domain->nid = iommu->node;
1391 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1392 if (!domain->pgd)
1394 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1398 static void domain_exit(struct dmar_domain *domain)
1404 if (!domain)
1407 domain_remove_dev_info(domain);
1409 put_iova_domain(&domain->iovad);
1412 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1415 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
1418 if (test_bit(iommu->seq_id, &domain->iommu_bmp))
1419 iommu_detach_domain(domain, iommu);
1421 free_domain_mem(domain);
1424 static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
1440 BUG_ON(!domain->pgd);
1457 id = domain->id;
1458 pgd = domain->pgd;
1460 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
1461 domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
1464 /* find an available domain id for this device in iommu */
1467 if (iommu->domains[num] == domain) {
1478 printk(KERN_ERR "IOMMU: no free domain ids\n");
1483 iommu->domains[num] = domain;
1492 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1505 info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
1523 domain_flush_cache(domain, context, sizeof(*context));
1529 * domain #0, which we have to flush:
1536 iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH);
1543 spin_lock_irqsave(&domain->iommu_lock, flags);
1544 if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
1545 domain->iommu_count++;
1546 if (domain->iommu_count == 1)
1547 domain->nid = iommu->node;
1548 domain_update_iommu_cap(domain);
1550 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1555 domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1561 ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
1574 ret = domain_context_mapping_one(domain,
1583 return domain_context_mapping_one(domain,
1588 return domain_context_mapping_one(domain,
1638 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1644 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1671 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
1691 domain_flush_cache(domain, first_pte,
1704 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1708 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
1711 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1715 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
1729 static void domain_remove_dev_info(struct dmar_domain *domain)
1736 while (!list_empty(&domain->devices)) {
1737 info = list_entry(domain->devices.next,
1764 /* No lock here, assumes no domain exit in normal case */
1767 return info->domain;
1771 /* domain is initialized */
1774 struct dmar_domain *domain, *found = NULL;
1784 domain = find_domain(pdev);
1785 if (domain)
1786 return domain;
1803 found = info->domain;
1808 /* pcie-pci bridge already has a domain, uses it */
1810 domain = found;
1815 domain = alloc_domain();
1816 if (!domain)
1819 /* Allocate new domain for the device */
1828 ret = iommu_attach_domain(domain, iommu);
1830 domain_exit(domain);
1834 if (domain_init(domain, gaw)) {
1835 domain_exit(domain);
1843 domain_exit(domain);
1850 info->domain = domain;
1851 /* This domain is shared by devices under p2p bridge */
1852 domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES;
1854 /* pcie-to-pci bridge already has a domain, uses it */
1860 found = tmp->domain;
1867 domain_exit(domain);
1868 domain = found;
1870 list_add(&info->link, &domain->devices);
1884 info->domain = domain;
1890 if (found != domain) {
1891 domain_exit(domain);
1892 domain = found;
1895 return domain;
1897 list_add(&info->link, &domain->devices);
1901 return domain;
1912 static int iommu_domain_identity_map(struct dmar_domain *domain,
1919 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
1925 pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
1926 start, end, domain->id);
1931 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
1933 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
1942 struct dmar_domain *domain;
1945 domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
1946 if (!domain)
1953 if (domain == si_domain && hw_pass_through) {
1973 if (end >> agaw_to_width(domain->agaw)) {
1976 agaw_to_width(domain->agaw),
1984 ret = iommu_domain_identity_map(domain, start, end);
1989 ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
1996 domain_exit(domain);
2034 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2058 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2087 static void domain_remove_one_dev_info(struct dmar_domain *domain,
2103 static int domain_add_dev_info(struct dmar_domain *domain,
2115 ret = domain_context_mapping(domain, pdev, translation);
2125 info->domain = domain;
2128 list_add(&info->link, &domain->devices);
2148 * We want to start off with all devices in the 1:1 domain, and
2161 * the 1:1 domain, just in _case_ one of their siblings turns out
2175 * take them out of the 1:1 domain later.
2351 * locate drhd for dev, alloc domain for dev
2352 * allocate free domain
2357 * init context with domain, translation etc
2424 struct dmar_domain *domain,
2431 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
2439 iova = alloc_iova(&domain->iovad, nrpages,
2444 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2456 struct dmar_domain *domain;
2459 domain = get_domain_for_dev(pdev,
2461 if (!domain) {
2463 "Allocating domain for %s failed", pci_name(pdev));
2469 ret = domain_context_mapping(domain, pdev,
2479 return domain;
2486 /* No lock here, assumes no domain exit in normal case */
2489 return info->domain;
2555 struct dmar_domain *domain;
2568 domain = get_valid_domain_for_dev(pdev);
2569 if (!domain)
2572 iommu = domain_get_iommu(domain);
2575 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2595 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
2602 iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1);
2612 __free_iova(&domain->iovad, iova);
2649 struct dmar_domain *domain = deferred_flush[i].domain[j];
2653 iommu_flush_iotlb_psi(iommu, domain->id,
2657 iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
2660 __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
2691 deferred_flush[iommu_id].domain[next] = dom;
2708 struct dmar_domain *domain;
2716 domain = find_domain(pdev);
2717 BUG_ON(!domain);
2719 iommu = domain_get_iommu(domain);
2721 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
2733 dma_pte_clear_range(domain, start_pfn, last_pfn);
2736 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2739 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2742 __free_iova(&domain->iovad, iova);
2744 add_unmap(domain, iova);
2801 struct dmar_domain *domain;
2809 domain = find_domain(pdev);
2810 BUG_ON(!domain);
2812 iommu = domain_get_iommu(domain);
2814 iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
2823 dma_pte_clear_range(domain, start_pfn, last_pfn);
2826 dma_pte_free_pagetable(domain, start_pfn, last_pfn);
2829 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
2832 __free_iova(&domain->iovad, iova);
2834 add_unmap(domain, iova);
2861 struct dmar_domain *domain;
2874 domain = get_valid_domain_for_dev(pdev);
2875 if (!domain)
2878 iommu = domain_get_iommu(domain);
2883 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
2902 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
2905 dma_pte_clear_range(domain, start_vpfn,
2908 dma_pte_free_pagetable(domain, start_vpfn,
2911 __free_iova(&domain->iovad, iova);
2917 iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1);
3238 * Added device is not attached to its DMAR domain here yet. That will happen
3246 struct dmar_domain *domain;
3251 domain = find_domain(pdev);
3252 if (!domain)
3256 domain_remove_one_dev_info(domain, pdev);
3351 static void domain_remove_one_dev_info(struct dmar_domain *domain,
3366 list_for_each_safe(entry, tmp, &domain->devices) {
3368 /* No need to compare PCI domain; it has to be the same */
3391 * owned by this domain, clear this iommu in iommu_bmp
3401 spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
3402 clear_bit(iommu->seq_id, &domain->iommu_bmp);
3403 domain->iommu_count--;
3404 domain_update_iommu_cap(domain);
3405 spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
3411 static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3418 while (!list_empty(&domain->devices)) {
3419 info = list_entry(domain->devices.next,
3436 spin_lock_irqsave(&domain->iommu_lock, flags2);
3438 &domain->iommu_bmp)) {
3439 domain->iommu_count--;
3440 domain_update_iommu_cap(domain);
3442 spin_unlock_irqrestore(&domain->iommu_lock, flags2);
3450 /* domain id for virtual machine, it won't be set in context */
3455 struct dmar_domain *domain;
3457 domain = alloc_domain_mem();
3458 if (!domain)
3461 domain->id = vm_domid++;
3462 domain->nid = -1;
3463 memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
3464 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
3466 return domain;
3469 static int md_domain_init(struct dmar_domain *domain, int guest_width)
3473 init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
3474 spin_lock_init(&domain->iommu_lock);
3476 domain_reserve_special_ranges(domain);
3479 domain->gaw = guest_width;
3481 domain->agaw = width_to_agaw(adjust_width);
3483 INIT_LIST_HEAD(&domain->devices);
3485 domain->iommu_count = 0;
3486 domain->iommu_coherency = 0;
3487 domain->iommu_snooping = 0;
3488 domain->max_addr = 0;
3489 domain->nid = -1;
3492 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
3493 if (!domain->pgd)
3495 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
3499 static void iommu_free_vm_domain(struct dmar_domain *domain)
3514 if (iommu->domains[i] == domain) {
3525 static void vm_domain_exit(struct dmar_domain *domain)
3528 if (!domain)
3531 vm_domain_remove_all_dev_info(domain);
3533 put_iova_domain(&domain->iovad);
3536 dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3539 dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
3541 iommu_free_vm_domain(domain);
3542 free_domain_mem(domain);
3545 static int intel_iommu_domain_init(struct iommu_domain *domain)
3561 domain->priv = dmar_domain;
3566 static void intel_iommu_domain_destroy(struct iommu_domain *domain)
3568 struct dmar_domain *dmar_domain = domain->priv;
3570 domain->priv = NULL;
3574 static int intel_iommu_attach_device(struct iommu_domain *domain,
3577 struct dmar_domain *dmar_domain = domain->priv;
3632 static void intel_iommu_detach_device(struct iommu_domain *domain,
3635 struct dmar_domain *dmar_domain = domain->priv;
3641 static int intel_iommu_map(struct iommu_domain *domain,
3645 struct dmar_domain *dmar_domain = domain->priv;
3681 static int intel_iommu_unmap(struct iommu_domain *domain,
3684 struct dmar_domain *dmar_domain = domain->priv;
3696 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
3699 struct dmar_domain *dmar_domain = domain->priv;
3710 static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
3713 struct dmar_domain *dmar_domain = domain->priv;