• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/alpha/kernel/

Lines Matching defs:arena

62 	struct pci_iommu_arena *arena;
67 not addition, so the required arena alignment is based on
69 particular systems can over-align the arena. */
76 arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
77 if (!NODE_DATA(nid) || !arena) {
78 printk("%s: couldn't allocate arena from node %d\n"
81 arena = alloc_bootmem(sizeof(*arena));
84 arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
85 if (!NODE_DATA(nid) || !arena->ptes) {
86 printk("%s: couldn't allocate arena ptes from node %d\n"
89 arena->ptes = __alloc_bootmem(mem_size, align, 0);
94 arena = alloc_bootmem(sizeof(*arena));
95 arena->ptes = __alloc_bootmem(mem_size, align, 0);
99 spin_lock_init(&arena->lock);
100 arena->hose = hose;
101 arena->dma_base = base;
102 arena->size = window_size;
103 arena->next_entry = 0;
107 arena->align_entry = 1;
109 return arena;
119 /* Must be called with the arena lock held */
121 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
130 base = arena->dma_base >> PAGE_SHIFT;
139 ptes = arena->ptes;
140 nent = arena->size >> PAGE_SHIFT;
141 p = ALIGN(arena->next_entry, mask + 1);
163 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
179 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
186 spin_lock_irqsave(&arena->lock, flags);
189 ptes = arena->ptes;
190 mask = max(align, arena->align_entry) - 1;
191 p = iommu_arena_find_pages(dev, arena, n, mask);
193 spin_unlock_irqrestore(&arena->lock, flags);
204 arena->next_entry = p + n;
205 spin_unlock_irqrestore(&arena->lock, flags);
211 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
216 p = arena->ptes + ofs;
256 struct pci_iommu_arena *arena;
296 arena = hose->sg_pci;
297 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
298 arena = hose->sg_isa;
305 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
314 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
316 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
378 struct pci_iommu_arena *arena;
400 arena = hose->sg_pci;
401 if (!arena || dma_addr < arena->dma_base)
402 arena = hose->sg_isa;
404 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
405 if (dma_ofs * PAGE_SIZE >= arena->size) {
408 dma_addr, arena->dma_base, arena->size);
415 spin_lock_irqsave(&arena->lock, flags);
417 iommu_arena_free(arena, dma_ofs, npages);
422 if (dma_ofs >= arena->next_entry)
425 spin_unlock_irqrestore(&arena->lock, flags);
555 struct scatterlist *out, struct pci_iommu_arena *arena,
596 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
605 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
608 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
616 ptes = &arena->ptes[dma_ofs];
659 struct pci_iommu_arena *arena;
687 arena = hose->sg_pci;
688 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
689 arena = hose->sg_isa;
692 arena = NULL;
701 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
738 struct pci_iommu_arena *arena;
751 arena = hose->sg_pci;
752 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
753 arena = hose->sg_isa;
757 spin_lock_irqsave(&arena->lock, flags);
789 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
790 iommu_arena_free(arena, ofs, npages);
800 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
803 spin_unlock_irqrestore(&arena->lock, flags);
815 struct pci_iommu_arena *arena;
825 /* Check that we have a scatter-gather arena that fits. */
827 arena = hose->sg_isa;
828 if (arena && arena->dma_base + arena->size - 1 <= mask)
830 arena = hose->sg_pci;
831 if (arena && arena->dma_base + arena->size - 1 <= mask)
847 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
853 if (!arena) return -EINVAL;
855 spin_lock_irqsave(&arena->lock, flags);
858 ptes = arena->ptes;
859 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
861 spin_unlock_irqrestore(&arena->lock, flags);
871 arena->next_entry = p + pg_count;
872 spin_unlock_irqrestore(&arena->lock, flags);
878 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
883 if (!arena) return -EINVAL;
885 ptes = arena->ptes;
892 iommu_arena_free(arena, pg_start, pg_count);
897 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
904 if (!arena) return -EINVAL;
906 spin_lock_irqsave(&arena->lock, flags);
908 ptes = arena->ptes;
912 spin_unlock_irqrestore(&arena->lock, flags);
920 spin_unlock_irqrestore(&arena->lock, flags);
926 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
931 if (!arena) return -EINVAL;
933 p = arena->ptes + pg_start;