1/* pci_iommu.c: UltraSparc PCI controller IOM/STC support.
2 *
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/delay.h>
11#include <linux/pci.h>
12
13#include <asm/oplib.h>
14
15#include "iommu_common.h"
16#include "pci_impl.h"
17
18#define PCI_STC_CTXMATCH_ADDR(STC, CTX)	\
19	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
20
21/* Accessing IOMMU and Streaming Buffer registers.
22 * REG parameter is a physical address.  All registers
23 * are 64-bits in size.
24 */
25#define pci_iommu_read(__reg) \
26({	u64 __ret; \
27	__asm__ __volatile__("ldxa [%1] %2, %0" \
28			     : "=r" (__ret) \
29			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
30			     : "memory"); \
31	__ret; \
32})
33#define pci_iommu_write(__reg, __val) \
34	__asm__ __volatile__("stxa %0, [%1] %2" \
35			     : /* no outputs */ \
36			     : "r" (__val), "r" (__reg), \
37			       "i" (ASI_PHYS_BYPASS_EC_E))
38
39/* Must be invoked under the IOMMU lock. */
40static void __iommu_flushall(struct iommu *iommu)
41{
42	if (iommu->iommu_flushinv) {
43		pci_iommu_write(iommu->iommu_flushinv, ~(u64)0);
44	} else {
45		unsigned long tag;
46		int entry;
47
48		tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
49		for (entry = 0; entry < 16; entry++) {
50			pci_iommu_write(tag, 0);
51			tag += 8;
52		}
53
54		/* Ensure completion of previous PIO writes. */
55		(void) pci_iommu_read(iommu->write_complete_reg);
56	}
57}
58
59#define IOPTE_CONSISTENT(CTX) \
60	(IOPTE_VALID | IOPTE_CACHE | \
61	 (((CTX) << 47) & IOPTE_CONTEXT))
62
63#define IOPTE_STREAMING(CTX) \
64	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
65
66/* Existing mappings are never marked invalid, instead they
67 * are pointed to a dummy page.
68 */
69#define IOPTE_IS_DUMMY(iommu, iopte)	\
70	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
71
72static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
73{
74	unsigned long val = iopte_val(*iopte);
75
76	val &= ~IOPTE_PAGE;
77	val |= iommu->dummy_page_pa;
78
79	iopte_val(*iopte) = val;
80}
81
82/* Based largely upon the ppc64 iommu allocator.  */
83static long pci_arena_alloc(struct iommu *iommu, unsigned long npages)
84{
85	struct iommu_arena *arena = &iommu->arena;
86	unsigned long n, i, start, end, limit;
87	int pass;
88
89	limit = arena->limit;
90	start = arena->hint;
91	pass = 0;
92
93again:
94	n = find_next_zero_bit(arena->map, limit, start);
95	end = n + npages;
96	if (unlikely(end >= limit)) {
97		if (likely(pass < 1)) {
98			limit = start;
99			start = 0;
100			__iommu_flushall(iommu);
101			pass++;
102			goto again;
103		} else {
104			/* Scanned the whole thing, give up. */
105			return -1;
106		}
107	}
108
109	for (i = n; i < end; i++) {
110		if (test_bit(i, arena->map)) {
111			start = i + 1;
112			goto again;
113		}
114	}
115
116	for (i = n; i < end; i++)
117		__set_bit(i, arena->map);
118
119	arena->hint = end;
120
121	return n;
122}
123
124static void pci_arena_free(struct iommu_arena *arena, unsigned long base, unsigned long npages)
125{
126	unsigned long i;
127
128	for (i = base; i < (base + npages); i++)
129		__clear_bit(i, arena->map);
130}
131
132void pci_iommu_table_init(struct iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
133{
134	unsigned long i, tsbbase, order, sz, num_tsb_entries;
135
136	num_tsb_entries = tsbsize / sizeof(iopte_t);
137
138	/* Setup initial software IOMMU state. */
139	spin_lock_init(&iommu->lock);
140	iommu->ctx_lowest_free = 1;
141	iommu->page_table_map_base = dma_offset;
142	iommu->dma_addr_mask = dma_addr_mask;
143
144	/* Allocate and initialize the free area map.  */
145	sz = num_tsb_entries / 8;
146	sz = (sz + 7UL) & ~7UL;
147	iommu->arena.map = kzalloc(sz, GFP_KERNEL);
148	if (!iommu->arena.map) {
149		prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
150		prom_halt();
151	}
152	iommu->arena.limit = num_tsb_entries;
153
154	/* Allocate and initialize the dummy page which we
155	 * set inactive IO PTEs to point to.
156	 */
157	iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
158	if (!iommu->dummy_page) {
159		prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
160		prom_halt();
161	}
162	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
163	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
164
165	/* Now allocate and setup the IOMMU page table itself.  */
166	order = get_order(tsbsize);
167	tsbbase = __get_free_pages(GFP_KERNEL, order);
168	if (!tsbbase) {
169		prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
170		prom_halt();
171	}
172	iommu->page_table = (iopte_t *)tsbbase;
173
174	for (i = 0; i < num_tsb_entries; i++)
175		iopte_make_dummy(iommu, &iommu->page_table[i]);
176}
177
178static inline iopte_t *alloc_npages(struct iommu *iommu, unsigned long npages)
179{
180	long entry;
181
182	entry = pci_arena_alloc(iommu, npages);
183	if (unlikely(entry < 0))
184		return NULL;
185
186	return iommu->page_table + entry;
187}
188
189static inline void free_npages(struct iommu *iommu, dma_addr_t base, unsigned long npages)
190{
191	pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
192}
193
194static int iommu_alloc_ctx(struct iommu *iommu)
195{
196	int lowest = iommu->ctx_lowest_free;
197	int sz = IOMMU_NUM_CTXS - lowest;
198	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
199
200	if (unlikely(n == sz)) {
201		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
202		if (unlikely(n == lowest)) {
203			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
204			n = 0;
205		}
206	}
207	if (n)
208		__set_bit(n, iommu->ctx_bitmap);
209
210	return n;
211}
212
213static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
214{
215	if (likely(ctx)) {
216		__clear_bit(ctx, iommu->ctx_bitmap);
217		if (ctx < iommu->ctx_lowest_free)
218			iommu->ctx_lowest_free = ctx;
219	}
220}
221
222/* Allocate and map kernel buffer of size SIZE using consistent mode
223 * DMA for PCI device PDEV.  Return non-NULL cpu-side address if
224 * successful and set *DMA_ADDRP to the PCI side dma address.
225 */
226static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp)
227{
228	struct iommu *iommu;
229	iopte_t *iopte;
230	unsigned long flags, order, first_page;
231	void *ret;
232	int npages;
233
234	size = IO_PAGE_ALIGN(size);
235	order = get_order(size);
236	if (order >= 10)
237		return NULL;
238
239	first_page = __get_free_pages(gfp, order);
240	if (first_page == 0UL)
241		return NULL;
242	memset((char *)first_page, 0, PAGE_SIZE << order);
243
244	iommu = pdev->dev.archdata.iommu;
245
246	spin_lock_irqsave(&iommu->lock, flags);
247	iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
248	spin_unlock_irqrestore(&iommu->lock, flags);
249
250	if (unlikely(iopte == NULL)) {
251		free_pages(first_page, order);
252		return NULL;
253	}
254
255	*dma_addrp = (iommu->page_table_map_base +
256		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
257	ret = (void *) first_page;
258	npages = size >> IO_PAGE_SHIFT;
259	first_page = __pa(first_page);
260	while (npages--) {
261		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
262				     IOPTE_WRITE |
263				     (first_page & IOPTE_PAGE));
264		iopte++;
265		first_page += IO_PAGE_SIZE;
266	}
267
268	return ret;
269}
270
271/* Free and unmap a consistent DMA translation. */
272static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
273{
274	struct iommu *iommu;
275	iopte_t *iopte;
276	unsigned long flags, order, npages;
277
278	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
279	iommu = pdev->dev.archdata.iommu;
280	iopte = iommu->page_table +
281		((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
282
283	spin_lock_irqsave(&iommu->lock, flags);
284
285	free_npages(iommu, dvma - iommu->page_table_map_base, npages);
286
287	spin_unlock_irqrestore(&iommu->lock, flags);
288
289	order = get_order(size);
290	if (order < 10)
291		free_pages((unsigned long)cpu, order);
292}
293
294/* Map a single buffer at PTR of SZ bytes for PCI DMA
295 * in streaming mode.
296 */
297static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
298{
299	struct iommu *iommu;
300	struct strbuf *strbuf;
301	iopte_t *base;
302	unsigned long flags, npages, oaddr;
303	unsigned long i, base_paddr, ctx;
304	u32 bus_addr, ret;
305	unsigned long iopte_protection;
306
307	iommu = pdev->dev.archdata.iommu;
308	strbuf = pdev->dev.archdata.stc;
309
310	if (unlikely(direction == PCI_DMA_NONE))
311		goto bad_no_ctx;
312
313	oaddr = (unsigned long)ptr;
314	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
315	npages >>= IO_PAGE_SHIFT;
316
317	spin_lock_irqsave(&iommu->lock, flags);
318	base = alloc_npages(iommu, npages);
319	ctx = 0;
320	if (iommu->iommu_ctxflush)
321		ctx = iommu_alloc_ctx(iommu);
322	spin_unlock_irqrestore(&iommu->lock, flags);
323
324	if (unlikely(!base))
325		goto bad;
326
327	bus_addr = (iommu->page_table_map_base +
328		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
329	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
330	base_paddr = __pa(oaddr & IO_PAGE_MASK);
331	if (strbuf->strbuf_enabled)
332		iopte_protection = IOPTE_STREAMING(ctx);
333	else
334		iopte_protection = IOPTE_CONSISTENT(ctx);
335	if (direction != PCI_DMA_TODEVICE)
336		iopte_protection |= IOPTE_WRITE;
337
338	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
339		iopte_val(*base) = iopte_protection | base_paddr;
340
341	return ret;
342
343bad:
344	iommu_free_ctx(iommu, ctx);
345bad_no_ctx:
346	if (printk_ratelimit())
347		WARN_ON(1);
348	return PCI_DMA_ERROR_CODE;
349}
350
351static void pci_strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
352{
353	int limit;
354
355	if (strbuf->strbuf_ctxflush &&
356	    iommu->iommu_ctxflush) {
357		unsigned long matchreg, flushreg;
358		u64 val;
359
360		flushreg = strbuf->strbuf_ctxflush;
361		matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
362
363		pci_iommu_write(flushreg, ctx);
364		val = pci_iommu_read(matchreg);
365		val &= 0xffff;
366		if (!val)
367			goto do_flush_sync;
368
369		while (val) {
370			if (val & 0x1)
371				pci_iommu_write(flushreg, ctx);
372			val >>= 1;
373		}
374		val = pci_iommu_read(matchreg);
375		if (unlikely(val)) {
376			printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
377			       "timeout matchreg[%lx] ctx[%lx]\n",
378			       val, ctx);
379			goto do_page_flush;
380		}
381	} else {
382		unsigned long i;
383
384	do_page_flush:
385		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
386			pci_iommu_write(strbuf->strbuf_pflush, vaddr);
387	}
388
389do_flush_sync:
390	/* If the device could not have possibly put dirty data into
391	 * the streaming cache, no flush-flag synchronization needs
392	 * to be performed.
393	 */
394	if (direction == PCI_DMA_TODEVICE)
395		return;
396
397	PCI_STC_FLUSHFLAG_INIT(strbuf);
398	pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
399	(void) pci_iommu_read(iommu->write_complete_reg);
400
401	limit = 100000;
402	while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
403		limit--;
404		if (!limit)
405			break;
406		udelay(1);
407		rmb();
408	}
409	if (!limit)
410		printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
411		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
412		       vaddr, ctx, npages);
413}
414
415/* Unmap a single streaming mode DMA translation. */
416static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
417{
418	struct iommu *iommu;
419	struct strbuf *strbuf;
420	iopte_t *base;
421	unsigned long flags, npages, ctx, i;
422
423	if (unlikely(direction == PCI_DMA_NONE)) {
424		if (printk_ratelimit())
425			WARN_ON(1);
426		return;
427	}
428
429	iommu = pdev->dev.archdata.iommu;
430	strbuf = pdev->dev.archdata.stc;
431
432	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
433	npages >>= IO_PAGE_SHIFT;
434	base = iommu->page_table +
435		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
436#ifdef DEBUG_PCI_IOMMU
437	if (IOPTE_IS_DUMMY(iommu, base))
438		printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
439		       bus_addr, sz, __builtin_return_address(0));
440#endif
441	bus_addr &= IO_PAGE_MASK;
442
443	spin_lock_irqsave(&iommu->lock, flags);
444
445	/* Record the context, if any. */
446	ctx = 0;
447	if (iommu->iommu_ctxflush)
448		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
449
450	/* Step 1: Kick data out of streaming buffers if necessary. */
451	if (strbuf->strbuf_enabled)
452		pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
453				 npages, direction);
454
455	/* Step 2: Clear out TSB entries. */
456	for (i = 0; i < npages; i++)
457		iopte_make_dummy(iommu, base + i);
458
459	free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
460
461	iommu_free_ctx(iommu, ctx);
462
463	spin_unlock_irqrestore(&iommu->lock, flags);
464}
465
466#define SG_ENT_PHYS_ADDRESS(SG)	\
467	(__pa(page_address((SG)->page)) + (SG)->offset)
468
469static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
470			   int nused, int nelems, unsigned long iopte_protection)
471{
472	struct scatterlist *dma_sg = sg;
473	struct scatterlist *sg_end = sg + nelems;
474	int i;
475
476	for (i = 0; i < nused; i++) {
477		unsigned long pteval = ~0UL;
478		u32 dma_npages;
479
480		dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
481			      dma_sg->dma_length +
482			      ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
483		do {
484			unsigned long offset;
485			signed int len;
486
487			/* If we are here, we know we have at least one
488			 * more page to map.  So walk forward until we
489			 * hit a page crossing, and begin creating new
490			 * mappings from that spot.
491			 */
492			for (;;) {
493				unsigned long tmp;
494
495				tmp = SG_ENT_PHYS_ADDRESS(sg);
496				len = sg->length;
497				if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
498					pteval = tmp & IO_PAGE_MASK;
499					offset = tmp & (IO_PAGE_SIZE - 1UL);
500					break;
501				}
502				if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
503					pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
504					offset = 0UL;
505					len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
506					break;
507				}
508				sg++;
509			}
510
511			pteval = iopte_protection | (pteval & IOPTE_PAGE);
512			while (len > 0) {
513				*iopte++ = __iopte(pteval);
514				pteval += IO_PAGE_SIZE;
515				len -= (IO_PAGE_SIZE - offset);
516				offset = 0;
517				dma_npages--;
518			}
519
520			pteval = (pteval & IOPTE_PAGE) + len;
521			sg++;
522
523			/* Skip over any tail mappings we've fully mapped,
524			 * adjusting pteval along the way.  Stop when we
525			 * detect a page crossing event.
526			 */
527			while (sg < sg_end &&
528			       (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
529			       (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
530			       ((pteval ^
531				 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
532				pteval += sg->length;
533				sg++;
534			}
535			if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
536				pteval = ~0UL;
537		} while (dma_npages != 0);
538		dma_sg++;
539	}
540}
541
542/* Map a set of buffers described by SGLIST with NELEMS array
543 * elements in streaming mode for PCI DMA.
544 * When making changes here, inspect the assembly output. I was having
545 * hard time to keep this routine out of using stack slots for holding variables.
546 */
547static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
548{
549	struct iommu *iommu;
550	struct strbuf *strbuf;
551	unsigned long flags, ctx, npages, iopte_protection;
552	iopte_t *base;
553	u32 dma_base;
554	struct scatterlist *sgtmp;
555	int used;
556
557	/* Fast path single entry scatterlists. */
558	if (nelems == 1) {
559		sglist->dma_address =
560			pci_4u_map_single(pdev,
561					  (page_address(sglist->page) + sglist->offset),
562					  sglist->length, direction);
563		if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
564			return 0;
565		sglist->dma_length = sglist->length;
566		return 1;
567	}
568
569	iommu = pdev->dev.archdata.iommu;
570	strbuf = pdev->dev.archdata.stc;
571
572	if (unlikely(direction == PCI_DMA_NONE))
573		goto bad_no_ctx;
574
575	/* Step 1: Prepare scatter list. */
576
577	npages = prepare_sg(sglist, nelems);
578
579	/* Step 2: Allocate a cluster and context, if necessary. */
580
581	spin_lock_irqsave(&iommu->lock, flags);
582
583	base = alloc_npages(iommu, npages);
584	ctx = 0;
585	if (iommu->iommu_ctxflush)
586		ctx = iommu_alloc_ctx(iommu);
587
588	spin_unlock_irqrestore(&iommu->lock, flags);
589
590	if (base == NULL)
591		goto bad;
592
593	dma_base = iommu->page_table_map_base +
594		((base - iommu->page_table) << IO_PAGE_SHIFT);
595
596	/* Step 3: Normalize DMA addresses. */
597	used = nelems;
598
599	sgtmp = sglist;
600	while (used && sgtmp->dma_length) {
601		sgtmp->dma_address += dma_base;
602		sgtmp++;
603		used--;
604	}
605	used = nelems - used;
606
607	/* Step 4: Create the mappings. */
608	if (strbuf->strbuf_enabled)
609		iopte_protection = IOPTE_STREAMING(ctx);
610	else
611		iopte_protection = IOPTE_CONSISTENT(ctx);
612	if (direction != PCI_DMA_TODEVICE)
613		iopte_protection |= IOPTE_WRITE;
614
615	fill_sg(base, sglist, used, nelems, iopte_protection);
616
617#ifdef VERIFY_SG
618	verify_sglist(sglist, nelems, base, npages);
619#endif
620
621	return used;
622
623bad:
624	iommu_free_ctx(iommu, ctx);
625bad_no_ctx:
626	if (printk_ratelimit())
627		WARN_ON(1);
628	return 0;
629}
630
631/* Unmap a set of streaming mode DMA translations. */
632static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
633{
634	struct iommu *iommu;
635	struct strbuf *strbuf;
636	iopte_t *base;
637	unsigned long flags, ctx, i, npages;
638	u32 bus_addr;
639
640	if (unlikely(direction == PCI_DMA_NONE)) {
641		if (printk_ratelimit())
642			WARN_ON(1);
643	}
644
645	iommu = pdev->dev.archdata.iommu;
646	strbuf = pdev->dev.archdata.stc;
647
648	bus_addr = sglist->dma_address & IO_PAGE_MASK;
649
650	for (i = 1; i < nelems; i++)
651		if (sglist[i].dma_length == 0)
652			break;
653	i--;
654	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
655		  bus_addr) >> IO_PAGE_SHIFT;
656
657	base = iommu->page_table +
658		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
659
660#ifdef DEBUG_PCI_IOMMU
661	if (IOPTE_IS_DUMMY(iommu, base))
662		printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
663#endif
664
665	spin_lock_irqsave(&iommu->lock, flags);
666
667	/* Record the context, if any. */
668	ctx = 0;
669	if (iommu->iommu_ctxflush)
670		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
671
672	/* Step 1: Kick data out of streaming buffers if necessary. */
673	if (strbuf->strbuf_enabled)
674		pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
675
676	/* Step 2: Clear out the TSB entries. */
677	for (i = 0; i < npages; i++)
678		iopte_make_dummy(iommu, base + i);
679
680	free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
681
682	iommu_free_ctx(iommu, ctx);
683
684	spin_unlock_irqrestore(&iommu->lock, flags);
685}
686
687/* Make physical memory consistent for a single
688 * streaming mode DMA translation after a transfer.
689 */
690static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
691{
692	struct iommu *iommu;
693	struct strbuf *strbuf;
694	unsigned long flags, ctx, npages;
695
696	iommu = pdev->dev.archdata.iommu;
697	strbuf = pdev->dev.archdata.stc;
698
699	if (!strbuf->strbuf_enabled)
700		return;
701
702	spin_lock_irqsave(&iommu->lock, flags);
703
704	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
705	npages >>= IO_PAGE_SHIFT;
706	bus_addr &= IO_PAGE_MASK;
707
708	/* Step 1: Record the context, if any. */
709	ctx = 0;
710	if (iommu->iommu_ctxflush &&
711	    strbuf->strbuf_ctxflush) {
712		iopte_t *iopte;
713
714		iopte = iommu->page_table +
715			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
716		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
717	}
718
719	/* Step 2: Kick data out of streaming buffers. */
720	pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
721
722	spin_unlock_irqrestore(&iommu->lock, flags);
723}
724
725/* Make physical memory consistent for a set of streaming
726 * mode DMA translations after a transfer.
727 */
728static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
729{
730	struct iommu *iommu;
731	struct strbuf *strbuf;
732	unsigned long flags, ctx, npages, i;
733	u32 bus_addr;
734
735	iommu = pdev->dev.archdata.iommu;
736	strbuf = pdev->dev.archdata.stc;
737
738	if (!strbuf->strbuf_enabled)
739		return;
740
741	spin_lock_irqsave(&iommu->lock, flags);
742
743	/* Step 1: Record the context, if any. */
744	ctx = 0;
745	if (iommu->iommu_ctxflush &&
746	    strbuf->strbuf_ctxflush) {
747		iopte_t *iopte;
748
749		iopte = iommu->page_table +
750			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
751		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
752	}
753
754	/* Step 2: Kick data out of streaming buffers. */
755	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
756	for(i = 1; i < nelems; i++)
757		if (!sglist[i].dma_length)
758			break;
759	i--;
760	npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
761		  - bus_addr) >> IO_PAGE_SHIFT;
762	pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
763
764	spin_unlock_irqrestore(&iommu->lock, flags);
765}
766
767const struct pci_iommu_ops pci_sun4u_iommu_ops = {
768	.alloc_consistent		= pci_4u_alloc_consistent,
769	.free_consistent		= pci_4u_free_consistent,
770	.map_single			= pci_4u_map_single,
771	.unmap_single			= pci_4u_unmap_single,
772	.map_sg				= pci_4u_map_sg,
773	.unmap_sg			= pci_4u_unmap_sg,
774	.dma_sync_single_for_cpu	= pci_4u_dma_sync_single_for_cpu,
775	.dma_sync_sg_for_cpu		= pci_4u_dma_sync_sg_for_cpu,
776};
777
778static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
779{
780	struct pci_dev *ali_isa_bridge;
781	u8 val;
782
783	/* ALI sound chips generate 31-bits of DMA, a special register
784	 * determines what bit 31 is emitted as.
785	 */
786	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
787					 PCI_DEVICE_ID_AL_M1533,
788					 NULL);
789
790	pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
791	if (set_bit)
792		val |= 0x01;
793	else
794		val &= ~0x01;
795	pci_write_config_byte(ali_isa_bridge, 0x7e, val);
796	pci_dev_put(ali_isa_bridge);
797}
798
799int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
800{
801	u64 dma_addr_mask;
802
803	if (pdev == NULL) {
804		dma_addr_mask = 0xffffffff;
805	} else {
806		struct iommu *iommu = pdev->dev.archdata.iommu;
807
808		dma_addr_mask = iommu->dma_addr_mask;
809
810		if (pdev->vendor == PCI_VENDOR_ID_AL &&
811		    pdev->device == PCI_DEVICE_ID_AL_M5451 &&
812		    device_mask == 0x7fffffff) {
813			ali_sound_dma_hack(pdev,
814					   (dma_addr_mask & 0x80000000) != 0);
815			return 1;
816		}
817	}
818
819	if (device_mask >= (1UL << 32UL))
820		return 0;
821
822	return (device_mask & dma_addr_mask) == dma_addr_mask;
823}
824