1/* $Id: io-unit.c,v 1.1.1.1 2007/08/03 18:52:18 Exp $
2 * io-unit.c:  IO-UNIT specific routines for memory management.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/mm.h>
12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
13#include <linux/bitops.h>
14
15#include <asm/scatterlist.h>
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/sbus.h>
19#include <asm/io.h>
20#include <asm/io-unit.h>
21#include <asm/mxcc.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/dma.h>
25#include <asm/oplib.h>
26
27/* #define IOUNIT_DEBUG */
28#ifdef IOUNIT_DEBUG
29#define IOD(x) printk(x)
30#else
31#define IOD(x) do { } while (0)
32#endif
33
34#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
35#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
36
37void __init
38iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
39{
40	iopte_t *xpt, *xptend;
41	struct iounit_struct *iounit;
42	struct linux_prom_registers iommu_promregs[PROMREG_MAX];
43	struct resource r;
44
45	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
46	if (!iounit) {
47		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
48		prom_halt();
49	}
50
51	iounit->limit[0] = IOUNIT_BMAP1_START;
52	iounit->limit[1] = IOUNIT_BMAP2_START;
53	iounit->limit[2] = IOUNIT_BMAPM_START;
54	iounit->limit[3] = IOUNIT_BMAPM_END;
55	iounit->rotor[1] = IOUNIT_BMAP2_START;
56	iounit->rotor[2] = IOUNIT_BMAPM_START;
57
58	xpt = NULL;
59	if(prom_getproperty(sbi_node, "reg", (void *) iommu_promregs,
60			    sizeof(iommu_promregs)) != -1) {
61		prom_apply_generic_ranges(io_node, 0, iommu_promregs, 3);
62		memset(&r, 0, sizeof(r));
63		r.flags = iommu_promregs[2].which_io;
64		r.start = iommu_promregs[2].phys_addr;
65		xpt = (iopte_t *) sbus_ioremap(&r, 0, PAGE_SIZE * 16, "XPT");
66	}
67	if(!xpt) panic("Cannot map External Page Table.");
68
69	sbus->iommu = (struct iommu_struct *)iounit;
70	iounit->page_table = xpt;
71	spin_lock_init(&iounit->lock);
72
73	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
74	     xpt < xptend;)
75	     	iopte_val(*xpt++) = 0;
76}
77
78/* One has to hold iounit->lock to call this */
79static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
80{
81	int i, j, k, npages;
82	unsigned long rotor, scan, limit;
83	iopte_t iopte;
84
85        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
86
87	/* A tiny bit of magic ingredience :) */
88	switch (npages) {
89	case 1: i = 0x0231; break;
90	case 2: i = 0x0132; break;
91	default: i = 0x0213; break;
92	}
93
94	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
95
96next:	j = (i & 15);
97	rotor = iounit->rotor[j - 1];
98	limit = iounit->limit[j];
99	scan = rotor;
100nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
101	if (scan + npages > limit) {
102		if (limit != rotor) {
103			limit = rotor;
104			scan = iounit->limit[j - 1];
105			goto nexti;
106		}
107		i >>= 4;
108		if (!(i & 15))
109			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
110		goto next;
111	}
112	for (k = 1, scan++; k < npages; k++)
113		if (test_bit(scan++, iounit->bmap))
114			goto nexti;
115	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
116	scan -= npages;
117	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
118	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
119	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
120		set_bit(scan, iounit->bmap);
121		iounit->page_table[scan] = iopte;
122	}
123	IOD(("%08lx\n", vaddr));
124	return vaddr;
125}
126
127static __u32 iounit_get_scsi_one(char *vaddr, unsigned long len, struct sbus_bus *sbus)
128{
129	unsigned long ret, flags;
130	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
131
132	spin_lock_irqsave(&iounit->lock, flags);
133	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
134	spin_unlock_irqrestore(&iounit->lock, flags);
135	return ret;
136}
137
138static void iounit_get_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
139{
140	unsigned long flags;
141	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
142
143	spin_lock_irqsave(&iounit->lock, flags);
144	while (sz != 0) {
145		--sz;
146		sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
147		sg[sz].dvma_length = sg[sz].length;
148	}
149	spin_unlock_irqrestore(&iounit->lock, flags);
150}
151
152static void iounit_release_scsi_one(__u32 vaddr, unsigned long len, struct sbus_bus *sbus)
153{
154	unsigned long flags;
155	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
156
157	spin_lock_irqsave(&iounit->lock, flags);
158	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
159	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
160	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
161	for (len += vaddr; vaddr < len; vaddr++)
162		clear_bit(vaddr, iounit->bmap);
163	spin_unlock_irqrestore(&iounit->lock, flags);
164}
165
166static void iounit_release_scsi_sgl(struct scatterlist *sg, int sz, struct sbus_bus *sbus)
167{
168	unsigned long flags;
169	unsigned long vaddr, len;
170	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
171
172	spin_lock_irqsave(&iounit->lock, flags);
173	while (sz != 0) {
174		--sz;
175		len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
176		vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
177		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
178		for (len += vaddr; vaddr < len; vaddr++)
179			clear_bit(vaddr, iounit->bmap);
180	}
181	spin_unlock_irqrestore(&iounit->lock, flags);
182}
183
184#ifdef CONFIG_SBUS
185static int iounit_map_dma_area(dma_addr_t *pba, unsigned long va, __u32 addr, int len)
186{
187	unsigned long page, end;
188	pgprot_t dvma_prot;
189	iopte_t *iopte;
190	struct sbus_bus *sbus;
191
192	*pba = addr;
193
194	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
195	end = PAGE_ALIGN((addr + len));
196	while(addr < end) {
197		page = va;
198		{
199			pgd_t *pgdp;
200			pmd_t *pmdp;
201			pte_t *ptep;
202			long i;
203
204			pgdp = pgd_offset(&init_mm, addr);
205			pmdp = pmd_offset(pgdp, addr);
206			ptep = pte_offset_map(pmdp, addr);
207
208			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
209
210			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
211
212			for_each_sbus(sbus) {
213				struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
214
215				iopte = (iopte_t *)(iounit->page_table + i);
216				*iopte = MKIOPTE(__pa(page));
217			}
218		}
219		addr += PAGE_SIZE;
220		va += PAGE_SIZE;
221	}
222	flush_cache_all();
223	flush_tlb_all();
224
225	return 0;
226}
227
228static void iounit_unmap_dma_area(unsigned long addr, int len)
229{
230}
231
232static struct page *iounit_translate_dvma(unsigned long addr)
233{
234	struct sbus_bus *sbus = sbus_root;	/* They are all the same */
235	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
236	int i;
237	iopte_t *iopte;
238
239	i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
240	iopte = (iopte_t *)(iounit->page_table + i);
241	return pfn_to_page(iopte_val(*iopte) >> (PAGE_SHIFT-4));
242}
243#endif
244
245static char *iounit_lockarea(char *vaddr, unsigned long len)
246{
247	return vaddr;
248}
249
250static void iounit_unlockarea(char *vaddr, unsigned long len)
251{
252}
253
254void __init ld_mmu_iounit(void)
255{
256	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
257	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
258
259	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
260	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
261	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
262	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
263
264#ifdef CONFIG_SBUS
265	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
266	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
267	BTFIXUPSET_CALL(mmu_translate_dvma, iounit_translate_dvma, BTFIXUPCALL_NORM);
268#endif
269}
270
271__u32 iounit_map_dma_init(struct sbus_bus *sbus, int size)
272{
273	int i, j, k, npages;
274	unsigned long rotor, scan, limit;
275	unsigned long flags;
276	__u32 ret;
277	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
278
279        npages = (size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
280	i = 0x0213;
281	spin_lock_irqsave(&iounit->lock, flags);
282next:	j = (i & 15);
283	rotor = iounit->rotor[j - 1];
284	limit = iounit->limit[j];
285	scan = rotor;
286nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
287	if (scan + npages > limit) {
288		if (limit != rotor) {
289			limit = rotor;
290			scan = iounit->limit[j - 1];
291			goto nexti;
292		}
293		i >>= 4;
294		if (!(i & 15))
295			panic("iounit_map_dma_init: Couldn't find free iopte slots for %d bytes\n", size);
296		goto next;
297	}
298	for (k = 1, scan++; k < npages; k++)
299		if (test_bit(scan++, iounit->bmap))
300			goto nexti;
301	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
302	scan -= npages;
303	ret = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT);
304	for (k = 0; k < npages; k++, scan++)
305		set_bit(scan, iounit->bmap);
306	spin_unlock_irqrestore(&iounit->lock, flags);
307	return ret;
308}
309
310__u32 iounit_map_dma_page(__u32 vaddr, void *addr, struct sbus_bus *sbus)
311{
312	int scan = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
313	struct iounit_struct *iounit = (struct iounit_struct *)sbus->iommu;
314
315	iounit->page_table[scan] = MKIOPTE(__pa(((unsigned long)addr) & PAGE_MASK));
316	return vaddr + (((unsigned long)addr) & ~PAGE_MASK);
317}
318