• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/sparc/kernel/
1/* iommu.c: Generic sparc64 IOMMU support.
2 *
3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/dma-mapping.h>
13#include <linux/errno.h>
14#include <linux/iommu-helper.h>
15#include <linux/bitmap.h>
16
17#ifdef CONFIG_PCI
18#include <linux/pci.h>
19#endif
20
21#include <asm/iommu.h>
22
23#include "iommu_common.h"
24
25#define STC_CTXMATCH_ADDR(STC, CTX)	\
26	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
27#define STC_FLUSHFLAG_INIT(STC) \
28	(*((STC)->strbuf_flushflag) = 0UL)
29#define STC_FLUSHFLAG_SET(STC) \
30	(*((STC)->strbuf_flushflag) != 0UL)
31
32#define iommu_read(__reg) \
33({	u64 __ret; \
34	__asm__ __volatile__("ldxa [%1] %2, %0" \
35			     : "=r" (__ret) \
36			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37			     : "memory"); \
38	__ret; \
39})
40#define iommu_write(__reg, __val) \
41	__asm__ __volatile__("stxa %0, [%1] %2" \
42			     : /* no outputs */ \
43			     : "r" (__val), "r" (__reg), \
44			       "i" (ASI_PHYS_BYPASS_EC_E))
45
46/* Must be invoked under the IOMMU lock. */
47static void iommu_flushall(struct iommu *iommu)
48{
49	if (iommu->iommu_flushinv) {
50		iommu_write(iommu->iommu_flushinv, ~(u64)0);
51	} else {
52		unsigned long tag;
53		int entry;
54
55		tag = iommu->iommu_tags;
56		for (entry = 0; entry < 16; entry++) {
57			iommu_write(tag, 0);
58			tag += 8;
59		}
60
61		/* Ensure completion of previous PIO writes. */
62		(void) iommu_read(iommu->write_complete_reg);
63	}
64}
65
66#define IOPTE_CONSISTENT(CTX) \
67	(IOPTE_VALID | IOPTE_CACHE | \
68	 (((CTX) << 47) & IOPTE_CONTEXT))
69
70#define IOPTE_STREAMING(CTX) \
71	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
72
73/* Existing mappings are never marked invalid, instead they
74 * are pointed to a dummy page.
75 */
76#define IOPTE_IS_DUMMY(iommu, iopte)	\
77	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
78
79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
80{
81	unsigned long val = iopte_val(*iopte);
82
83	val &= ~IOPTE_PAGE;
84	val |= iommu->dummy_page_pa;
85
86	iopte_val(*iopte) = val;
87}
88
89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
90 * facility it must all be done in one pass while under the iommu lock.
91 *
92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
93 * over the entire page table doing allocations.  Therefore we only ever advance
94 * the hint and cannot backtrack it.
95 */
96unsigned long iommu_range_alloc(struct device *dev,
97				struct iommu *iommu,
98				unsigned long npages,
99				unsigned long *handle)
100{
101	unsigned long n, end, start, limit, boundary_size;
102	struct iommu_arena *arena = &iommu->arena;
103	int pass = 0;
104
105	/* This allocator was derived from x86_64's bit string search */
106
107	/* Sanity check */
108	if (unlikely(npages == 0)) {
109		if (printk_ratelimit())
110			WARN_ON(1);
111		return DMA_ERROR_CODE;
112	}
113
114	if (handle && *handle)
115		start = *handle;
116	else
117		start = arena->hint;
118
119	limit = arena->limit;
120
121	/* The case below can happen if we have a small segment appended
122	 * to a large, or when the previous alloc was at the very end of
123	 * the available space. If so, go back to the beginning and flush.
124	 */
125	if (start >= limit) {
126		start = 0;
127		if (iommu->flush_all)
128			iommu->flush_all(iommu);
129	}
130
131 again:
132
133	if (dev)
134		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135				      1 << IO_PAGE_SHIFT);
136	else
137		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139	n = iommu_area_alloc(arena->map, limit, start, npages,
140			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
141			     boundary_size >> IO_PAGE_SHIFT, 0);
142	if (n == -1) {
143		if (likely(pass < 1)) {
144			/* First failure, rescan from the beginning.  */
145			start = 0;
146			if (iommu->flush_all)
147				iommu->flush_all(iommu);
148			pass++;
149			goto again;
150		} else {
151			/* Second failure, give up */
152			return DMA_ERROR_CODE;
153		}
154	}
155
156	end = n + npages;
157
158	arena->hint = end;
159
160	/* Update handle for SG allocations */
161	if (handle)
162		*handle = end;
163
164	return n;
165}
166
167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168{
169	struct iommu_arena *arena = &iommu->arena;
170	unsigned long entry;
171
172	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174	bitmap_clear(arena->map, entry, npages);
175}
176
177int iommu_table_init(struct iommu *iommu, int tsbsize,
178		     u32 dma_offset, u32 dma_addr_mask,
179		     int numa_node)
180{
181	unsigned long i, order, sz, num_tsb_entries;
182	struct page *page;
183
184	num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186	/* Setup initial software IOMMU state. */
187	spin_lock_init(&iommu->lock);
188	iommu->ctx_lowest_free = 1;
189	iommu->page_table_map_base = dma_offset;
190	iommu->dma_addr_mask = dma_addr_mask;
191
192	/* Allocate and initialize the free area map.  */
193	sz = num_tsb_entries / 8;
194	sz = (sz + 7UL) & ~7UL;
195	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196	if (!iommu->arena.map) {
197		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198		return -ENOMEM;
199	}
200	memset(iommu->arena.map, 0, sz);
201	iommu->arena.limit = num_tsb_entries;
202
203	if (tlb_type != hypervisor)
204		iommu->flush_all = iommu_flushall;
205
206	/* Allocate and initialize the dummy page which we
207	 * set inactive IO PTEs to point to.
208	 */
209	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210	if (!page) {
211		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212		goto out_free_map;
213	}
214	iommu->dummy_page = (unsigned long) page_address(page);
215	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218	/* Now allocate and setup the IOMMU page table itself.  */
219	order = get_order(tsbsize);
220	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221	if (!page) {
222		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223		goto out_free_dummy_page;
224	}
225	iommu->page_table = (iopte_t *)page_address(page);
226
227	for (i = 0; i < num_tsb_entries; i++)
228		iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230	return 0;
231
232out_free_dummy_page:
233	free_page(iommu->dummy_page);
234	iommu->dummy_page = 0UL;
235
236out_free_map:
237	kfree(iommu->arena.map);
238	iommu->arena.map = NULL;
239
240	return -ENOMEM;
241}
242
243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
244				    unsigned long npages)
245{
246	unsigned long entry;
247
248	entry = iommu_range_alloc(dev, iommu, npages, NULL);
249	if (unlikely(entry == DMA_ERROR_CODE))
250		return NULL;
251
252	return iommu->page_table + entry;
253}
254
255static int iommu_alloc_ctx(struct iommu *iommu)
256{
257	int lowest = iommu->ctx_lowest_free;
258	int sz = IOMMU_NUM_CTXS - lowest;
259	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
260
261	if (unlikely(n == sz)) {
262		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
263		if (unlikely(n == lowest)) {
264			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
265			n = 0;
266		}
267	}
268	if (n)
269		__set_bit(n, iommu->ctx_bitmap);
270
271	return n;
272}
273
274static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
275{
276	if (likely(ctx)) {
277		__clear_bit(ctx, iommu->ctx_bitmap);
278		if (ctx < iommu->ctx_lowest_free)
279			iommu->ctx_lowest_free = ctx;
280	}
281}
282
283static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
284				   dma_addr_t *dma_addrp, gfp_t gfp)
285{
286	unsigned long flags, order, first_page;
287	struct iommu *iommu;
288	struct page *page;
289	int npages, nid;
290	iopte_t *iopte;
291	void *ret;
292
293	size = IO_PAGE_ALIGN(size);
294	order = get_order(size);
295	if (order >= 10)
296		return NULL;
297
298	nid = dev->archdata.numa_node;
299	page = alloc_pages_node(nid, gfp, order);
300	if (unlikely(!page))
301		return NULL;
302
303	first_page = (unsigned long) page_address(page);
304	memset((char *)first_page, 0, PAGE_SIZE << order);
305
306	iommu = dev->archdata.iommu;
307
308	spin_lock_irqsave(&iommu->lock, flags);
309	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
310	spin_unlock_irqrestore(&iommu->lock, flags);
311
312	if (unlikely(iopte == NULL)) {
313		free_pages(first_page, order);
314		return NULL;
315	}
316
317	*dma_addrp = (iommu->page_table_map_base +
318		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
319	ret = (void *) first_page;
320	npages = size >> IO_PAGE_SHIFT;
321	first_page = __pa(first_page);
322	while (npages--) {
323		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
324				     IOPTE_WRITE |
325				     (first_page & IOPTE_PAGE));
326		iopte++;
327		first_page += IO_PAGE_SIZE;
328	}
329
330	return ret;
331}
332
333static void dma_4u_free_coherent(struct device *dev, size_t size,
334				 void *cpu, dma_addr_t dvma)
335{
336	struct iommu *iommu;
337	iopte_t *iopte;
338	unsigned long flags, order, npages;
339
340	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
341	iommu = dev->archdata.iommu;
342	iopte = iommu->page_table +
343		((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
344
345	spin_lock_irqsave(&iommu->lock, flags);
346
347	iommu_range_free(iommu, dvma, npages);
348
349	spin_unlock_irqrestore(&iommu->lock, flags);
350
351	order = get_order(size);
352	if (order < 10)
353		free_pages((unsigned long)cpu, order);
354}
355
356static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
357				  unsigned long offset, size_t sz,
358				  enum dma_data_direction direction,
359				  struct dma_attrs *attrs)
360{
361	struct iommu *iommu;
362	struct strbuf *strbuf;
363	iopte_t *base;
364	unsigned long flags, npages, oaddr;
365	unsigned long i, base_paddr, ctx;
366	u32 bus_addr, ret;
367	unsigned long iopte_protection;
368
369	iommu = dev->archdata.iommu;
370	strbuf = dev->archdata.stc;
371
372	if (unlikely(direction == DMA_NONE))
373		goto bad_no_ctx;
374
375	oaddr = (unsigned long)(page_address(page) + offset);
376	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
377	npages >>= IO_PAGE_SHIFT;
378
379	spin_lock_irqsave(&iommu->lock, flags);
380	base = alloc_npages(dev, iommu, npages);
381	ctx = 0;
382	if (iommu->iommu_ctxflush)
383		ctx = iommu_alloc_ctx(iommu);
384	spin_unlock_irqrestore(&iommu->lock, flags);
385
386	if (unlikely(!base))
387		goto bad;
388
389	bus_addr = (iommu->page_table_map_base +
390		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
391	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
392	base_paddr = __pa(oaddr & IO_PAGE_MASK);
393	if (strbuf->strbuf_enabled)
394		iopte_protection = IOPTE_STREAMING(ctx);
395	else
396		iopte_protection = IOPTE_CONSISTENT(ctx);
397	if (direction != DMA_TO_DEVICE)
398		iopte_protection |= IOPTE_WRITE;
399
400	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
401		iopte_val(*base) = iopte_protection | base_paddr;
402
403	return ret;
404
405bad:
406	iommu_free_ctx(iommu, ctx);
407bad_no_ctx:
408	if (printk_ratelimit())
409		WARN_ON(1);
410	return DMA_ERROR_CODE;
411}
412
413static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
414			 u32 vaddr, unsigned long ctx, unsigned long npages,
415			 enum dma_data_direction direction)
416{
417	int limit;
418
419	if (strbuf->strbuf_ctxflush &&
420	    iommu->iommu_ctxflush) {
421		unsigned long matchreg, flushreg;
422		u64 val;
423
424		flushreg = strbuf->strbuf_ctxflush;
425		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
426
427		iommu_write(flushreg, ctx);
428		val = iommu_read(matchreg);
429		val &= 0xffff;
430		if (!val)
431			goto do_flush_sync;
432
433		while (val) {
434			if (val & 0x1)
435				iommu_write(flushreg, ctx);
436			val >>= 1;
437		}
438		val = iommu_read(matchreg);
439		if (unlikely(val)) {
440			printk(KERN_WARNING "strbuf_flush: ctx flush "
441			       "timeout matchreg[%llx] ctx[%lx]\n",
442			       val, ctx);
443			goto do_page_flush;
444		}
445	} else {
446		unsigned long i;
447
448	do_page_flush:
449		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
450			iommu_write(strbuf->strbuf_pflush, vaddr);
451	}
452
453do_flush_sync:
454	/* If the device could not have possibly put dirty data into
455	 * the streaming cache, no flush-flag synchronization needs
456	 * to be performed.
457	 */
458	if (direction == DMA_TO_DEVICE)
459		return;
460
461	STC_FLUSHFLAG_INIT(strbuf);
462	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
463	(void) iommu_read(iommu->write_complete_reg);
464
465	limit = 100000;
466	while (!STC_FLUSHFLAG_SET(strbuf)) {
467		limit--;
468		if (!limit)
469			break;
470		udelay(1);
471		rmb();
472	}
473	if (!limit)
474		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
475		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
476		       vaddr, ctx, npages);
477}
478
479static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
480			      size_t sz, enum dma_data_direction direction,
481			      struct dma_attrs *attrs)
482{
483	struct iommu *iommu;
484	struct strbuf *strbuf;
485	iopte_t *base;
486	unsigned long flags, npages, ctx, i;
487
488	if (unlikely(direction == DMA_NONE)) {
489		if (printk_ratelimit())
490			WARN_ON(1);
491		return;
492	}
493
494	iommu = dev->archdata.iommu;
495	strbuf = dev->archdata.stc;
496
497	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
498	npages >>= IO_PAGE_SHIFT;
499	base = iommu->page_table +
500		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
501	bus_addr &= IO_PAGE_MASK;
502
503	spin_lock_irqsave(&iommu->lock, flags);
504
505	/* Record the context, if any. */
506	ctx = 0;
507	if (iommu->iommu_ctxflush)
508		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
509
510	/* Step 1: Kick data out of streaming buffers if necessary. */
511	if (strbuf->strbuf_enabled)
512		strbuf_flush(strbuf, iommu, bus_addr, ctx,
513			     npages, direction);
514
515	/* Step 2: Clear out TSB entries. */
516	for (i = 0; i < npages; i++)
517		iopte_make_dummy(iommu, base + i);
518
519	iommu_range_free(iommu, bus_addr, npages);
520
521	iommu_free_ctx(iommu, ctx);
522
523	spin_unlock_irqrestore(&iommu->lock, flags);
524}
525
526static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
527			 int nelems, enum dma_data_direction direction,
528			 struct dma_attrs *attrs)
529{
530	struct scatterlist *s, *outs, *segstart;
531	unsigned long flags, handle, prot, ctx;
532	dma_addr_t dma_next = 0, dma_addr;
533	unsigned int max_seg_size;
534	unsigned long seg_boundary_size;
535	int outcount, incount, i;
536	struct strbuf *strbuf;
537	struct iommu *iommu;
538	unsigned long base_shift;
539
540	BUG_ON(direction == DMA_NONE);
541
542	iommu = dev->archdata.iommu;
543	strbuf = dev->archdata.stc;
544	if (nelems == 0 || !iommu)
545		return 0;
546
547	spin_lock_irqsave(&iommu->lock, flags);
548
549	ctx = 0;
550	if (iommu->iommu_ctxflush)
551		ctx = iommu_alloc_ctx(iommu);
552
553	if (strbuf->strbuf_enabled)
554		prot = IOPTE_STREAMING(ctx);
555	else
556		prot = IOPTE_CONSISTENT(ctx);
557	if (direction != DMA_TO_DEVICE)
558		prot |= IOPTE_WRITE;
559
560	outs = s = segstart = &sglist[0];
561	outcount = 1;
562	incount = nelems;
563	handle = 0;
564
565	/* Init first segment length for backout at failure */
566	outs->dma_length = 0;
567
568	max_seg_size = dma_get_max_seg_size(dev);
569	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
570				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
571	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
572	for_each_sg(sglist, s, nelems, i) {
573		unsigned long paddr, npages, entry, out_entry = 0, slen;
574		iopte_t *base;
575
576		slen = s->length;
577		/* Sanity check */
578		if (slen == 0) {
579			dma_next = 0;
580			continue;
581		}
582		/* Allocate iommu entries for that segment */
583		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
584		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
585		entry = iommu_range_alloc(dev, iommu, npages, &handle);
586
587		/* Handle failure */
588		if (unlikely(entry == DMA_ERROR_CODE)) {
589			if (printk_ratelimit())
590				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
591				       " npages %lx\n", iommu, paddr, npages);
592			goto iommu_map_failed;
593		}
594
595		base = iommu->page_table + entry;
596
597		/* Convert entry to a dma_addr_t */
598		dma_addr = iommu->page_table_map_base +
599			(entry << IO_PAGE_SHIFT);
600		dma_addr |= (s->offset & ~IO_PAGE_MASK);
601
602		/* Insert into HW table */
603		paddr &= IO_PAGE_MASK;
604		while (npages--) {
605			iopte_val(*base) = prot | paddr;
606			base++;
607			paddr += IO_PAGE_SIZE;
608		}
609
610		/* If we are in an open segment, try merging */
611		if (segstart != s) {
612			/* We cannot merge if:
613			 * - allocated dma_addr isn't contiguous to previous allocation
614			 */
615			if ((dma_addr != dma_next) ||
616			    (outs->dma_length + s->length > max_seg_size) ||
617			    (is_span_boundary(out_entry, base_shift,
618					      seg_boundary_size, outs, s))) {
619				/* Can't merge: create a new segment */
620				segstart = s;
621				outcount++;
622				outs = sg_next(outs);
623			} else {
624				outs->dma_length += s->length;
625			}
626		}
627
628		if (segstart == s) {
629			/* This is a new segment, fill entries */
630			outs->dma_address = dma_addr;
631			outs->dma_length = slen;
632			out_entry = entry;
633		}
634
635		/* Calculate next page pointer for contiguous check */
636		dma_next = dma_addr + slen;
637	}
638
639	spin_unlock_irqrestore(&iommu->lock, flags);
640
641	if (outcount < incount) {
642		outs = sg_next(outs);
643		outs->dma_address = DMA_ERROR_CODE;
644		outs->dma_length = 0;
645	}
646
647	return outcount;
648
649iommu_map_failed:
650	for_each_sg(sglist, s, nelems, i) {
651		if (s->dma_length != 0) {
652			unsigned long vaddr, npages, entry, j;
653			iopte_t *base;
654
655			vaddr = s->dma_address & IO_PAGE_MASK;
656			npages = iommu_num_pages(s->dma_address, s->dma_length,
657						 IO_PAGE_SIZE);
658			iommu_range_free(iommu, vaddr, npages);
659
660			entry = (vaddr - iommu->page_table_map_base)
661				>> IO_PAGE_SHIFT;
662			base = iommu->page_table + entry;
663
664			for (j = 0; j < npages; j++)
665				iopte_make_dummy(iommu, base + j);
666
667			s->dma_address = DMA_ERROR_CODE;
668			s->dma_length = 0;
669		}
670		if (s == outs)
671			break;
672	}
673	spin_unlock_irqrestore(&iommu->lock, flags);
674
675	return 0;
676}
677
678/* If contexts are being used, they are the same in all of the mappings
679 * we make for a particular SG.
680 */
681static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
682{
683	unsigned long ctx = 0;
684
685	if (iommu->iommu_ctxflush) {
686		iopte_t *base;
687		u32 bus_addr;
688
689		bus_addr = sg->dma_address & IO_PAGE_MASK;
690		base = iommu->page_table +
691			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
692
693		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
694	}
695	return ctx;
696}
697
698static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
699			    int nelems, enum dma_data_direction direction,
700			    struct dma_attrs *attrs)
701{
702	unsigned long flags, ctx;
703	struct scatterlist *sg;
704	struct strbuf *strbuf;
705	struct iommu *iommu;
706
707	BUG_ON(direction == DMA_NONE);
708
709	iommu = dev->archdata.iommu;
710	strbuf = dev->archdata.stc;
711
712	ctx = fetch_sg_ctx(iommu, sglist);
713
714	spin_lock_irqsave(&iommu->lock, flags);
715
716	sg = sglist;
717	while (nelems--) {
718		dma_addr_t dma_handle = sg->dma_address;
719		unsigned int len = sg->dma_length;
720		unsigned long npages, entry;
721		iopte_t *base;
722		int i;
723
724		if (!len)
725			break;
726		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
727		iommu_range_free(iommu, dma_handle, npages);
728
729		entry = ((dma_handle - iommu->page_table_map_base)
730			 >> IO_PAGE_SHIFT);
731		base = iommu->page_table + entry;
732
733		dma_handle &= IO_PAGE_MASK;
734		if (strbuf->strbuf_enabled)
735			strbuf_flush(strbuf, iommu, dma_handle, ctx,
736				     npages, direction);
737
738		for (i = 0; i < npages; i++)
739			iopte_make_dummy(iommu, base + i);
740
741		sg = sg_next(sg);
742	}
743
744	iommu_free_ctx(iommu, ctx);
745
746	spin_unlock_irqrestore(&iommu->lock, flags);
747}
748
749static void dma_4u_sync_single_for_cpu(struct device *dev,
750				       dma_addr_t bus_addr, size_t sz,
751				       enum dma_data_direction direction)
752{
753	struct iommu *iommu;
754	struct strbuf *strbuf;
755	unsigned long flags, ctx, npages;
756
757	iommu = dev->archdata.iommu;
758	strbuf = dev->archdata.stc;
759
760	if (!strbuf->strbuf_enabled)
761		return;
762
763	spin_lock_irqsave(&iommu->lock, flags);
764
765	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
766	npages >>= IO_PAGE_SHIFT;
767	bus_addr &= IO_PAGE_MASK;
768
769	/* Step 1: Record the context, if any. */
770	ctx = 0;
771	if (iommu->iommu_ctxflush &&
772	    strbuf->strbuf_ctxflush) {
773		iopte_t *iopte;
774
775		iopte = iommu->page_table +
776			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
777		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
778	}
779
780	/* Step 2: Kick data out of streaming buffers. */
781	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
782
783	spin_unlock_irqrestore(&iommu->lock, flags);
784}
785
786static void dma_4u_sync_sg_for_cpu(struct device *dev,
787				   struct scatterlist *sglist, int nelems,
788				   enum dma_data_direction direction)
789{
790	struct iommu *iommu;
791	struct strbuf *strbuf;
792	unsigned long flags, ctx, npages, i;
793	struct scatterlist *sg, *sgprv;
794	u32 bus_addr;
795
796	iommu = dev->archdata.iommu;
797	strbuf = dev->archdata.stc;
798
799	if (!strbuf->strbuf_enabled)
800		return;
801
802	spin_lock_irqsave(&iommu->lock, flags);
803
804	/* Step 1: Record the context, if any. */
805	ctx = 0;
806	if (iommu->iommu_ctxflush &&
807	    strbuf->strbuf_ctxflush) {
808		iopte_t *iopte;
809
810		iopte = iommu->page_table +
811			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
812		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
813	}
814
815	/* Step 2: Kick data out of streaming buffers. */
816	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
817	sgprv = NULL;
818	for_each_sg(sglist, sg, nelems, i) {
819		if (sg->dma_length == 0)
820			break;
821		sgprv = sg;
822	}
823
824	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
825		  - bus_addr) >> IO_PAGE_SHIFT;
826	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
827
828	spin_unlock_irqrestore(&iommu->lock, flags);
829}
830
831static struct dma_map_ops sun4u_dma_ops = {
832	.alloc_coherent		= dma_4u_alloc_coherent,
833	.free_coherent		= dma_4u_free_coherent,
834	.map_page		= dma_4u_map_page,
835	.unmap_page		= dma_4u_unmap_page,
836	.map_sg			= dma_4u_map_sg,
837	.unmap_sg		= dma_4u_unmap_sg,
838	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
839	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
840};
841
842struct dma_map_ops *dma_ops = &sun4u_dma_ops;
843EXPORT_SYMBOL(dma_ops);
844
845extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
846
847int dma_supported(struct device *dev, u64 device_mask)
848{
849	struct iommu *iommu = dev->archdata.iommu;
850	u64 dma_addr_mask = iommu->dma_addr_mask;
851
852	if (device_mask >= (1UL << 32UL))
853		return 0;
854
855	if ((device_mask & dma_addr_mask) == dma_addr_mask)
856		return 1;
857
858#ifdef CONFIG_PCI
859	if (dev->bus == &pci_bus_type)
860		return pci64_dma_supported(to_pci_dev(dev), device_mask);
861#endif
862
863	return 0;
864}
865EXPORT_SYMBOL(dma_supported);
866