• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/sparc/mm/
1/*
2 * io-unit.c:  IO-UNIT specific routines for memory management.
3 *
4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
5 */
6
7#include <linux/kernel.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/spinlock.h>
11#include <linux/mm.h>
12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
13#include <linux/bitops.h>
14#include <linux/scatterlist.h>
15#include <linux/of.h>
16#include <linux/of_device.h>
17
18#include <asm/pgalloc.h>
19#include <asm/pgtable.h>
20#include <asm/io.h>
21#include <asm/io-unit.h>
22#include <asm/mxcc.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/dma.h>
26#include <asm/oplib.h>
27
28/* #define IOUNIT_DEBUG */
29#ifdef IOUNIT_DEBUG
30#define IOD(x) printk(x)
31#else
32#define IOD(x) do { } while (0)
33#endif
34
35#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
36#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
37
38static void __init iounit_iommu_init(struct platform_device *op)
39{
40	struct iounit_struct *iounit;
41	iopte_t *xpt, *xptend;
42
43	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
44	if (!iounit) {
45		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
46		prom_halt();
47	}
48
49	iounit->limit[0] = IOUNIT_BMAP1_START;
50	iounit->limit[1] = IOUNIT_BMAP2_START;
51	iounit->limit[2] = IOUNIT_BMAPM_START;
52	iounit->limit[3] = IOUNIT_BMAPM_END;
53	iounit->rotor[1] = IOUNIT_BMAP2_START;
54	iounit->rotor[2] = IOUNIT_BMAPM_START;
55
56	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
57	if (!xpt) {
58		prom_printf("SUN4D: Cannot map External Page Table.");
59		prom_halt();
60	}
61
62	op->dev.archdata.iommu = iounit;
63	iounit->page_table = xpt;
64	spin_lock_init(&iounit->lock);
65
66	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
67	     xpt < xptend;)
68	     	iopte_val(*xpt++) = 0;
69}
70
71static int __init iounit_init(void)
72{
73	extern void sun4d_init_sbi_irq(void);
74	struct device_node *dp;
75
76	for_each_node_by_name(dp, "sbi") {
77		struct platform_device *op = of_find_device_by_node(dp);
78
79		iounit_iommu_init(op);
80		of_propagate_archdata(op);
81	}
82
83	sun4d_init_sbi_irq();
84
85	return 0;
86}
87
88subsys_initcall(iounit_init);
89
90/* One has to hold iounit->lock to call this */
91static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
92{
93	int i, j, k, npages;
94	unsigned long rotor, scan, limit;
95	iopte_t iopte;
96
97        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
98
99	/* A tiny bit of magic ingredience :) */
100	switch (npages) {
101	case 1: i = 0x0231; break;
102	case 2: i = 0x0132; break;
103	default: i = 0x0213; break;
104	}
105
106	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
107
108next:	j = (i & 15);
109	rotor = iounit->rotor[j - 1];
110	limit = iounit->limit[j];
111	scan = rotor;
112nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
113	if (scan + npages > limit) {
114		if (limit != rotor) {
115			limit = rotor;
116			scan = iounit->limit[j - 1];
117			goto nexti;
118		}
119		i >>= 4;
120		if (!(i & 15))
121			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
122		goto next;
123	}
124	for (k = 1, scan++; k < npages; k++)
125		if (test_bit(scan++, iounit->bmap))
126			goto nexti;
127	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
128	scan -= npages;
129	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
130	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132		set_bit(scan, iounit->bmap);
133		iounit->page_table[scan] = iopte;
134	}
135	IOD(("%08lx\n", vaddr));
136	return vaddr;
137}
138
139static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
140{
141	struct iounit_struct *iounit = dev->archdata.iommu;
142	unsigned long ret, flags;
143
144	spin_lock_irqsave(&iounit->lock, flags);
145	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
146	spin_unlock_irqrestore(&iounit->lock, flags);
147	return ret;
148}
149
150static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
151{
152	struct iounit_struct *iounit = dev->archdata.iommu;
153	unsigned long flags;
154
155	spin_lock_irqsave(&iounit->lock, flags);
156	while (sz != 0) {
157		--sz;
158		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
159		sg->dma_length = sg->length;
160		sg = sg_next(sg);
161	}
162	spin_unlock_irqrestore(&iounit->lock, flags);
163}
164
165static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
166{
167	struct iounit_struct *iounit = dev->archdata.iommu;
168	unsigned long flags;
169
170	spin_lock_irqsave(&iounit->lock, flags);
171	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
172	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
173	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
174	for (len += vaddr; vaddr < len; vaddr++)
175		clear_bit(vaddr, iounit->bmap);
176	spin_unlock_irqrestore(&iounit->lock, flags);
177}
178
179static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
180{
181	struct iounit_struct *iounit = dev->archdata.iommu;
182	unsigned long flags;
183	unsigned long vaddr, len;
184
185	spin_lock_irqsave(&iounit->lock, flags);
186	while (sz != 0) {
187		--sz;
188		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
189		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
190		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
191		for (len += vaddr; vaddr < len; vaddr++)
192			clear_bit(vaddr, iounit->bmap);
193		sg = sg_next(sg);
194	}
195	spin_unlock_irqrestore(&iounit->lock, flags);
196}
197
198#ifdef CONFIG_SBUS
199static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
200{
201	struct iounit_struct *iounit = dev->archdata.iommu;
202	unsigned long page, end;
203	pgprot_t dvma_prot;
204	iopte_t *iopte;
205
206	*pba = addr;
207
208	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
209	end = PAGE_ALIGN((addr + len));
210	while(addr < end) {
211		page = va;
212		{
213			pgd_t *pgdp;
214			pmd_t *pmdp;
215			pte_t *ptep;
216			long i;
217
218			pgdp = pgd_offset(&init_mm, addr);
219			pmdp = pmd_offset(pgdp, addr);
220			ptep = pte_offset_map(pmdp, addr);
221
222			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
223
224			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
225
226			iopte = (iopte_t *)(iounit->page_table + i);
227			*iopte = MKIOPTE(__pa(page));
228		}
229		addr += PAGE_SIZE;
230		va += PAGE_SIZE;
231	}
232	flush_cache_all();
233	flush_tlb_all();
234
235	return 0;
236}
237
238static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
239{
240}
241#endif
242
243static char *iounit_lockarea(char *vaddr, unsigned long len)
244{
245	return vaddr;
246}
247
248static void iounit_unlockarea(char *vaddr, unsigned long len)
249{
250}
251
252void __init ld_mmu_iounit(void)
253{
254	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
255	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
256
257	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
258	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
259	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
260	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
261
262#ifdef CONFIG_SBUS
263	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
264	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
265#endif
266}
267