• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/arch/powerpc/kernel/

Lines Matching refs:tbl

85 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
111 start = largealloc ? tbl->it_largehint : tbl->it_hint;
114 limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
116 if (largealloc && start < tbl->it_halfpoint)
117 start = tbl->it_halfpoint;
124 start = largealloc ? tbl->it_largehint : tbl->it_hint;
128 if (limit + tbl->it_offset > mask) {
129 limit = mask - tbl->it_offset + 1;
140 n = find_next_zero_bit(tbl->it_map, limit, start);
152 start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
153 limit = pass ? tbl->it_size : limit;
163 if (test_bit(i, tbl->it_map)) {
169 __set_bit(i, tbl->it_map);
174 tbl->it_largehint = end;
177 tbl->it_hint = (end + tbl->it_blocksize - 1) &
178 ~(tbl->it_blocksize - 1);
188 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
195 spin_lock_irqsave(&(tbl->it_lock), flags);
197 entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
200 spin_unlock_irqrestore(&(tbl->it_lock), flags);
204 entry += tbl->it_offset; /* Offset into real TCE table */
208 ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
214 ppc_md.tce_flush(tbl);
216 spin_unlock_irqrestore(&(tbl->it_lock), flags);
224 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
231 free_entry = entry - tbl->it_offset;
233 if (((free_entry + npages) > tbl->it_size) ||
234 (entry < tbl->it_offset)) {
239 printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl);
240 printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno);
241 printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size);
242 printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset);
243 printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index);
249 ppc_md.tce_free(tbl, entry, npages);
252 __clear_bit(free_entry+i, tbl->it_map);
255 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
260 spin_lock_irqsave(&(tbl->it_lock), flags);
262 __iommu_free(tbl, dma_addr, npages);
269 ppc_md.tce_flush(tbl);
271 spin_unlock_irqrestore(&(tbl->it_lock), flags);
274 int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
286 if ((nelems == 0) || !tbl)
299 spin_lock_irqsave(&(tbl->it_lock), flags);
313 entry = iommu_range_alloc(tbl, npages, &handle, mask >> IOMMU_PAGE_SHIFT, 0);
320 printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
321 " npages %lx\n", tbl, vaddr, npages);
326 entry += tbl->it_offset;
334 ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
368 ppc_md.tce_flush(tbl);
370 spin_unlock_irqrestore(&(tbl->it_lock), flags);
395 __iommu_free(tbl, vaddr, npages);
400 spin_unlock_irqrestore(&(tbl->it_lock), flags);
405 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
412 if (!tbl)
415 spin_lock_irqsave(&(tbl->it_lock), flags);
424 __iommu_free(tbl, dma_handle, npages);
433 ppc_md.tce_flush(tbl);
435 spin_unlock_irqrestore(&(tbl->it_lock), flags);
442 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
452 tbl->it_halfpoint = tbl->it_size * 3 / 4;
455 sz = (tbl->it_size + 7) >> 3;
460 tbl->it_map = page_address(page);
461 memset(tbl->it_map, 0, sz);
463 tbl->it_hint = 0;
464 tbl->it_largehint = tbl->it_halfpoint;
465 spin_lock_init(&tbl->it_lock);
475 for (index = 0; index < tbl->it_size; index++) {
476 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
481 __set_bit(index, tbl->it_map);
485 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
489 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
490 index < tbl->it_size; index++)
491 __clear_bit(index, tbl->it_map);
496 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
507 start_index = tbl->it_offset | (entries_per_4g - 1);
508 start_index -= tbl->it_offset;
510 end_index = tbl->it_size;
513 __set_bit(index, tbl->it_map);
522 return tbl;
528 struct iommu_table *tbl = pdn->iommu_table;
532 if (!tbl || !tbl->it_map) {
540 for (i = 0; i < (tbl->it_size/64); i++) {
541 if (tbl->it_map[i] != 0) {
549 bitmap_sz = (tbl->it_size + 7) / 8;
553 free_pages((unsigned long) tbl->it_map, order);
556 kfree(tbl);
565 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
578 if (tbl) {
579 dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
584 "tbl %p vaddr %p npages %d\n",
585 tbl, vaddr, npages);
594 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
601 if (tbl) {
603 iommu_free(tbl, dma_handle, npages);
611 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
633 if (!tbl)
646 mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
656 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
659 if (tbl) {
664 iommu_free(tbl, dma_handle, nio_pages);