1/*
2 *  linux/arch/m68k/mm/kmap.c
3 *
4 *  Copyright (C) 1997 Roman Hodek
5 *
6 *  10/01/99 cleaned up the code and changing to the same interface
7 *	     used by other architectures		/Roman Zippel
8 */
9
10#include <linux/module.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18#include <asm/setup.h>
19#include <asm/segment.h>
20#include <asm/page.h>
21#include <asm/pgalloc.h>
22#include <asm/io.h>
23#include <asm/system.h>
24
25#undef DEBUG
26
27#define PTRTREESIZE	(256*1024)
28
29/*
30 * For 040/060 we can use the virtual memory area like other architectures,
31 * but for 020/030 we want to use early termination page descriptor and we
32 * can't mix this with normal page descriptors, so we have to copy that code
33 * (mm/vmalloc.c) and return appriorate aligned addresses.
34 */
35
36#ifdef CPU_M68040_OR_M68060_ONLY
37
38#define IO_SIZE		PAGE_SIZE
39
40static inline struct vm_struct *get_io_area(unsigned long size)
41{
42	return get_vm_area(size, VM_IOREMAP);
43}
44
45
46static inline void free_io_area(void *addr)
47{
48	vfree((void *)(PAGE_MASK & (unsigned long)addr));
49}
50
51#else
52
53#define IO_SIZE		(256*1024)
54
55static struct vm_struct *iolist;
56
57static struct vm_struct *get_io_area(unsigned long size)
58{
59	unsigned long addr;
60	struct vm_struct **p, *tmp, *area;
61
62	area = kmalloc(sizeof(*area), GFP_KERNEL);
63	if (!area)
64		return NULL;
65	addr = KMAP_START;
66	for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67		if (size + addr < (unsigned long)tmp->addr)
68			break;
69		if (addr > KMAP_END-size)
70			return NULL;
71		addr = tmp->size + (unsigned long)tmp->addr;
72	}
73	area->addr = (void *)addr;
74	area->size = size + IO_SIZE;
75	area->next = *p;
76	*p = area;
77	return area;
78}
79
80static inline void free_io_area(void *addr)
81{
82	struct vm_struct **p, *tmp;
83
84	if (!addr)
85		return;
86	addr = (void *)((unsigned long)addr & -IO_SIZE);
87	for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
88		if (tmp->addr == addr) {
89			*p = tmp->next;
90			__iounmap(tmp->addr, tmp->size);
91			kfree(tmp);
92			return;
93		}
94	}
95}
96
97#endif
98
99/*
100 * Map some physical address range into the kernel address space. The
101 * code is copied and adapted from map_chunk().
102 */
103/* Rewritten by Andreas Schwab to remove all races. */
104
105void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
106{
107	struct vm_struct *area;
108	unsigned long virtaddr, retaddr;
109	long offset;
110	pgd_t *pgd_dir;
111	pmd_t *pmd_dir;
112	pte_t *pte_dir;
113
114	/*
115	 * Don't allow mappings that wrap..
116	 */
117	if (!size || size > physaddr + size)
118		return NULL;
119
120#ifdef CONFIG_AMIGA
121	if (MACH_IS_AMIGA) {
122		if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
123		    && (cacheflag == IOMAP_NOCACHE_SER))
124			return (void __iomem *)physaddr;
125	}
126#endif
127
128#ifdef DEBUG
129	printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
130#endif
131	/*
132	 * Mappings have to be aligned
133	 */
134	offset = physaddr & (IO_SIZE - 1);
135	physaddr &= -IO_SIZE;
136	size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
137
138	/*
139	 * Ok, go for it..
140	 */
141	area = get_io_area(size);
142	if (!area)
143		return NULL;
144
145	virtaddr = (unsigned long)area->addr;
146	retaddr = virtaddr + offset;
147#ifdef DEBUG
148	printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
149#endif
150
151	/*
152	 * add cache and table flags to physical address
153	 */
154	if (CPU_IS_040_OR_060) {
155		physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
156			     _PAGE_ACCESSED | _PAGE_DIRTY);
157		switch (cacheflag) {
158		case IOMAP_FULL_CACHING:
159			physaddr |= _PAGE_CACHE040;
160			break;
161		case IOMAP_NOCACHE_SER:
162		default:
163			physaddr |= _PAGE_NOCACHE_S;
164			break;
165		case IOMAP_NOCACHE_NONSER:
166			physaddr |= _PAGE_NOCACHE;
167			break;
168		case IOMAP_WRITETHROUGH:
169			physaddr |= _PAGE_CACHE040W;
170			break;
171		}
172	} else {
173		physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
174		switch (cacheflag) {
175		case IOMAP_NOCACHE_SER:
176		case IOMAP_NOCACHE_NONSER:
177		default:
178			physaddr |= _PAGE_NOCACHE030;
179			break;
180		case IOMAP_FULL_CACHING:
181		case IOMAP_WRITETHROUGH:
182			break;
183		}
184	}
185
186	while ((long)size > 0) {
187#ifdef DEBUG
188		if (!(virtaddr & (PTRTREESIZE-1)))
189			printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
190#endif
191		pgd_dir = pgd_offset_k(virtaddr);
192		pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
193		if (!pmd_dir) {
194			printk("ioremap: no mem for pmd_dir\n");
195			return NULL;
196		}
197
198		if (CPU_IS_020_OR_030) {
199			pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
200			physaddr += PTRTREESIZE;
201			virtaddr += PTRTREESIZE;
202			size -= PTRTREESIZE;
203		} else {
204			pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
205			if (!pte_dir) {
206				printk("ioremap: no mem for pte_dir\n");
207				return NULL;
208			}
209
210			pte_val(*pte_dir) = physaddr;
211			virtaddr += PAGE_SIZE;
212			physaddr += PAGE_SIZE;
213			size -= PAGE_SIZE;
214		}
215	}
216#ifdef DEBUG
217	printk("\n");
218#endif
219	flush_tlb_all();
220
221	return (void __iomem *)retaddr;
222}
223EXPORT_SYMBOL(__ioremap);
224
225/*
226 * Unmap a ioremap()ed region again
227 */
228void iounmap(void __iomem *addr)
229{
230#ifdef CONFIG_AMIGA
231	if ((!MACH_IS_AMIGA) ||
232	    (((unsigned long)addr < 0x40000000) ||
233	     ((unsigned long)addr > 0x60000000)))
234			free_io_area((__force void *)addr);
235#else
236	free_io_area((__force void *)addr);
237#endif
238}
239EXPORT_SYMBOL(iounmap);
240
241/*
242 * __iounmap unmaps nearly everything, so be careful
243 * it doesn't free currently pointer/page tables anymore but it
244 * wans't used anyway and might be added later.
245 */
246void __iounmap(void *addr, unsigned long size)
247{
248	unsigned long virtaddr = (unsigned long)addr;
249	pgd_t *pgd_dir;
250	pmd_t *pmd_dir;
251	pte_t *pte_dir;
252
253	while ((long)size > 0) {
254		pgd_dir = pgd_offset_k(virtaddr);
255		if (pgd_bad(*pgd_dir)) {
256			printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
257			pgd_clear(pgd_dir);
258			return;
259		}
260		pmd_dir = pmd_offset(pgd_dir, virtaddr);
261
262		if (CPU_IS_020_OR_030) {
263			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
264			int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
265
266			if (pmd_type == _PAGE_PRESENT) {
267				pmd_dir->pmd[pmd_off] = 0;
268				virtaddr += PTRTREESIZE;
269				size -= PTRTREESIZE;
270				continue;
271			} else if (pmd_type == 0)
272				continue;
273		}
274
275		if (pmd_bad(*pmd_dir)) {
276			printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
277			pmd_clear(pmd_dir);
278			return;
279		}
280		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
281
282		pte_val(*pte_dir) = 0;
283		virtaddr += PAGE_SIZE;
284		size -= PAGE_SIZE;
285	}
286
287	flush_tlb_all();
288}
289
290/*
291 * Set new cache mode for some kernel address space.
292 * The caller must push data for that range itself, if such data may already
293 * be in the cache.
294 */
295void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
296{
297	unsigned long virtaddr = (unsigned long)addr;
298	pgd_t *pgd_dir;
299	pmd_t *pmd_dir;
300	pte_t *pte_dir;
301
302	if (CPU_IS_040_OR_060) {
303		switch (cmode) {
304		case IOMAP_FULL_CACHING:
305			cmode = _PAGE_CACHE040;
306			break;
307		case IOMAP_NOCACHE_SER:
308		default:
309			cmode = _PAGE_NOCACHE_S;
310			break;
311		case IOMAP_NOCACHE_NONSER:
312			cmode = _PAGE_NOCACHE;
313			break;
314		case IOMAP_WRITETHROUGH:
315			cmode = _PAGE_CACHE040W;
316			break;
317		}
318	} else {
319		switch (cmode) {
320		case IOMAP_NOCACHE_SER:
321		case IOMAP_NOCACHE_NONSER:
322		default:
323			cmode = _PAGE_NOCACHE030;
324			break;
325		case IOMAP_FULL_CACHING:
326		case IOMAP_WRITETHROUGH:
327			cmode = 0;
328		}
329	}
330
331	while ((long)size > 0) {
332		pgd_dir = pgd_offset_k(virtaddr);
333		if (pgd_bad(*pgd_dir)) {
334			printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
335			pgd_clear(pgd_dir);
336			return;
337		}
338		pmd_dir = pmd_offset(pgd_dir, virtaddr);
339
340		if (CPU_IS_020_OR_030) {
341			int pmd_off = (virtaddr/PTRTREESIZE) & 15;
342
343			if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
344				pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
345							 _CACHEMASK040) | cmode;
346				virtaddr += PTRTREESIZE;
347				size -= PTRTREESIZE;
348				continue;
349			}
350		}
351
352		if (pmd_bad(*pmd_dir)) {
353			printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
354			pmd_clear(pmd_dir);
355			return;
356		}
357		pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
358
359		pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
360		virtaddr += PAGE_SIZE;
361		size -= PAGE_SIZE;
362	}
363
364	flush_tlb_all();
365}
366EXPORT_SYMBOL(kernel_set_cachemode);
367