1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10#ifndef _ASM_IO_H
11#define _ASM_IO_H
12
13#include <linux/config.h>
14#include <asm/addrspace.h>
15#include <asm/page.h>
16#include <asm/byteorder.h>
17
18#ifdef CONFIG_DECSTATION
19#include <asm/dec/io.h>
20#endif
21
22#ifdef CONFIG_MIPS_ATLAS
23#include <asm/mips-boards/io.h>
24#endif
25
26#ifdef CONFIG_MIPS_MALTA
27#include <asm/mips-boards/io.h>
28#endif
29
30#ifdef CONFIG_MIPS_SEAD
31#include <asm/mips-boards/io.h>
32#endif
33
34#ifdef CONFIG_SGI_IP22
35#include <asm/sgi/io.h>
36#endif
37
38#ifdef CONFIG_SGI_IP27
39#include <asm/sn/io.h>
40#endif
41
42#ifdef CONFIG_SGI_IP32
43#include <asm/ip32/io.h>
44#endif
45
46#ifdef CONFIG_SIBYTE_SB1250
47#include <asm/sibyte/io.h>
48#endif
49
50#ifdef CONFIG_SGI_IP27
51extern unsigned long bus_to_baddr[256];
52
53#define bus_to_baddr(bus, addr)	(bus_to_baddr[(bus)->number] + (addr))
54#define baddr_to_bus(bus, addr)	((addr) - bus_to_baddr[(bus)->number])
55#define __swizzle_addr_w(port)	((port) ^ 2)
56#else
57#define bus_to_baddr(bus, addr)	(addr)
58#define baddr_to_bus(bus, addr)	(addr)
59#define __swizzle_addr_w(port)	(port)
60#endif
61
62/*
63 * Slowdown I/O port space accesses for antique hardware.
64 */
65#undef CONF_SLOWDOWN_IO
66
67/*
68 * Sane hardware offers swapping of I/O space accesses in hardware; less
69 * sane hardware forces software to fiddle with this.  Totally insane hardware
70 * introduces special cases like:
71 *
72 * IP22 seems braindead enough to swap 16-bits values in hardware, but not
73 * 32-bits.  Go figure... Can't tell without documentation.
74 *
75 * We only do the swapping to keep the kernel config bits of bi-endian
76 * machines a bit saner.
77 */
78#if defined(CONFIG_SWAP_IO_SPACE_W) && defined(__MIPSEB__)
79#define __ioswab16(x) swab16(x)
80#else
81#define __ioswab16(x) (x)
82#endif
83#if defined(CONFIG_SWAP_IO_SPACE_L) && defined(__MIPSEB__)
84#define __ioswab32(x) swab32(x)
85#else
86#define __ioswab32(x) (x)
87#endif
88
89/*
90 * Change "struct page" to physical address.
91 */
92#define page_to_phys(page)	PAGE_TO_PA(page)
93
94/*
95 *     ioremap         -       map bus memory into CPU space
96 *     @offset:        bus address of the memory
97 *     @size:          size of the resource to map
98 *
99 *     ioremap performs a platform specific sequence of operations to
100 *     make bus memory CPU accessible via the readb/readw/readl/writeb/
101 *     writew/writel functions and the other mmio helpers. The returned
102 *     address is not guaranteed to be usable directly as a virtual
103 *     address.
104 */
105static inline void * ioremap(unsigned long offset, unsigned long size)
106{
107	return (void *) (IO_SPACE_BASE | offset);
108}
109
110/*
111 *     ioremap_nocache         -       map bus memory into CPU space
112 *     @offset:        bus address of the memory
113 *     @size:          size of the resource to map
114 *
115 *     ioremap_nocache performs a platform specific sequence of operations to
116 *     make bus memory CPU accessible via the readb/readw/readl/writeb/
117 *     writew/writel functions and the other mmio helpers. The returned
118 *     address is not guaranteed to be usable directly as a virtual
119 *     address.
120 *
121 *     This version of ioremap ensures that the memory is marked uncachable
122 *     on the CPU as well as honouring existing caching rules from things like
123 *     the PCI bus. Note that there are other caches and buffers on many
124 *     busses. In paticular driver authors should read up on PCI writes
125 *
126 *     It's useful if some control registers are in such an area and
127 *     write combining or read caching is not desirable:
128 */
129static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
130{
131	return (void *) (IO_SPACE_BASE | offset);
132}
133
134static inline void iounmap(void *addr)
135{
136}
137
138#define readb(addr)		(*(volatile unsigned char *)(addr))
139#define readw(addr)		__ioswab16((*(volatile unsigned short *)(addr)))
140#define readl(addr)		__ioswab32((*(volatile unsigned int *)(addr)))
141
142#define __raw_readb(addr)	(*(volatile unsigned char *)(addr))
143#define __raw_readw(addr)	(*(volatile unsigned short *)(addr))
144#define __raw_readl(addr)	(*(volatile unsigned int *)(addr))
145
146#define writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b))
147#define writew(b,addr) ((*(volatile unsigned short *)(addr)) = (__ioswab16(b)))
148#define writel(b,addr) ((*(volatile unsigned int *)(addr)) = (__ioswab32(b)))
149
150#define __raw_writeb(b,addr)	((*(volatile unsigned char *)(addr)) = (b))
151#define __raw_writew(w,addr)	((*(volatile unsigned short *)(addr)) = (w))
152#define __raw_writel(l,addr)	((*(volatile unsigned int *)(addr)) = (l))
153
154#define memset_io(a,b,c)	memset((void *)(a),(b),(c))
155#define memcpy_fromio(a,b,c)	memcpy((a),(void *)(b),(c))
156#define memcpy_toio(a,b,c)	memcpy((void *)(a),(b),(c))
157
158/*
159 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
160 * for the processor.  This implies the assumption that there is only
161 * one of these busses.
162 */
163extern unsigned long isa_slot_offset;
164
165/*
166 * ISA space is 'always mapped' on currently supported MIPS systems, no need
167 * to explicitly ioremap() it. The fact that the ISA IO space is mapped
168 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
169 * are physical addresses. The following constant pointer can be
170 * used as the IO-area pointer (it can be iounmapped as well, so the
171 * analogy with PCI is quite large):
172 */
173#define __ISA_IO_base ((char *)(isa_slot_offset))
174
175#define isa_readb(a) readb(__ISA_IO_base + (a))
176#define isa_readw(a) readw(__ISA_IO_base + (a))
177#define isa_readl(a) readl(__ISA_IO_base + (a))
178#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
179#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
180#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
181#define isa_memset_io(a,b,c)		memset_io(__ISA_IO_base + (a),(b),(c))
182#define isa_memcpy_fromio(a,b,c)	memcpy_fromio((a),__ISA_IO_base + (b),(c))
183#define isa_memcpy_toio(a,b,c)		memcpy_toio(__ISA_IO_base + (a),(b),(c))
184
185/*
186 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
187 * just copy it. The net code will then do the checksum later.
188 */
189#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
190#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
191
192/*
193 *     check_signature         -       find BIOS signatures
194 *     @io_addr: mmio address to check
195 *     @signature:  signature block
196 *     @length: length of signature
197 *
198 *     Perform a signature comparison with the mmio address io_addr. This
199 *     address should have been obtained by ioremap.
200 *     Returns 1 on a match.
201 */
202static inline int check_signature(unsigned long io_addr,
203	const unsigned char *signature, int length)
204{
205	int retval = 0;
206	do {
207		if (readb(io_addr) != *signature)
208			goto out;
209		io_addr++;
210		signature++;
211		length--;
212	} while (length);
213	retval = 1;
214out:
215	return retval;
216}
217
218/*
219 *     isa_check_signature             -       find BIOS signatures
220 *     @io_addr: mmio address to check
221 *     @signature:  signature block
222 *     @length: length of signature
223 *
224 *     Perform a signature comparison with the ISA mmio address io_addr.
225 *     Returns 1 on a match.
226 *
227 *     This function is deprecated. New drivers should use ioremap and
228 *     check_signature.
229 */
230
231static inline int isa_check_signature(unsigned long io_addr,
232	const unsigned char *signature, int length)
233{
234	int retval = 0;
235	do {
236		if (isa_readb(io_addr) != *signature)
237			goto out;
238		io_addr++;
239		signature++;
240		length--;
241	} while (length);
242	retval = 1;
243out:
244	return retval;
245}
246
247/*
248 *     virt_to_phys    -       map virtual addresses to physical
249 *     @address: address to remap
250 *
251 *     The returned physical address is the physical (CPU) mapping for
252 *     the memory address given. It is only valid to use this function on
253 *     addresses directly mapped or allocated via kmalloc.
254 *
255 *     This function does not give bus mappings for DMA transfers. In
256 *     almost all conceivable cases a device driver should not be using
257 *     this function
258 */
259
260static inline unsigned long virt_to_phys(volatile void * address)
261{
262	return (unsigned long)address - PAGE_OFFSET;
263}
264
265/*
266 *     phys_to_virt    -       map physical address to virtual
267 *     @address: address to remap
268 *
269 *     The returned virtual address is a current CPU mapping for
270 *     the memory address given. It is only valid to use this function on
271 *     addresses that have a kernel mapping
272 *
273 *     This function does not handle bus mappings for DMA transfers. In
274 *     almost all conceivable cases a device driver should not be using
275 *     this function
276 */
277
278static inline void * phys_to_virt(unsigned long address)
279{
280	return (void *)(address + PAGE_OFFSET);
281}
282
283/*
284 * IO bus memory addresses are also 1:1 with the physical address
285 */
286static inline unsigned long virt_to_bus(volatile void * address)
287{
288	return (unsigned long)address - PAGE_OFFSET;
289}
290
291static inline void * bus_to_virt(unsigned long address)
292{
293	return (void *)(address + PAGE_OFFSET);
294}
295
296/* This is too simpleminded for more sophisticated than dumb hardware ...  */
297#define page_to_bus page_to_phys
298
299/*
300 * On MIPS I/O ports are memory mapped, so we access them using normal
301 * load/store instructions. mips_io_port_base is the virtual address to
302 * which all ports are being mapped.  For sake of efficiency some code
303 * assumes that this is an address that can be loaded with a single lui
304 * instruction, so the lower 16 bits must be zero.  Should be true on
305 * on any sane architecture; generic code does not use this assumption.
306 */
307extern const unsigned long mips_io_port_base;
308
309#define set_io_port_base(base) \
310	do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
311
312#define __SLOW_DOWN_IO \
313	__asm__ __volatile__( \
314		"sb\t$0,0x80(%0)" \
315		: : "r" (mips_io_port_base));
316
317#ifdef CONF_SLOWDOWN_IO
318#ifdef REALLY_SLOW_IO
319#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
320#else
321#define SLOW_DOWN_IO __SLOW_DOWN_IO
322#endif
323#else
324#define SLOW_DOWN_IO
325#endif
326
327#define outb(val,port)							\
328do {									\
329	*(volatile u8 *)(mips_io_port_base + (port)) = (val);		\
330} while(0)
331
332#define outw(val,port)							\
333do {									\
334	*(volatile u16 *)(mips_io_port_base + __swizzle_addr_w(port)) =	\
335		__ioswab16(val);					\
336} while(0)
337
338#define outl(val,port)							\
339do {									\
340	*(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
341} while(0)
342
343#define outb_p(val,port)						\
344do {									\
345	*(volatile u8 *)(mips_io_port_base + (port)) = (val);		\
346	SLOW_DOWN_IO;							\
347} while(0)
348
349#define outw_p(val,port)						\
350do {									\
351	*(volatile u16 *)(mips_io_port_base + __swizzle_addr_w(port)) =	\
352		__ioswab16(val);					\
353	SLOW_DOWN_IO;							\
354} while(0)
355
356#define outl_p(val,port)						\
357do {									\
358	*(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
359	SLOW_DOWN_IO;							\
360} while(0)
361
362static inline unsigned char inb(unsigned long port)
363{
364	return *(volatile u8 *)(mips_io_port_base + port);
365}
366
367static inline unsigned short inw(unsigned long port)
368{
369	port = __swizzle_addr_w(port);
370
371	return __ioswab16(*(volatile u16 *)(mips_io_port_base + port));
372}
373
374static inline unsigned int inl(unsigned long port)
375{
376	return __ioswab32(*(volatile u32 *)(mips_io_port_base + port));
377}
378
379static inline unsigned char inb_p(unsigned long port)
380{
381	u8 __val;
382
383	__val = *(volatile u8 *)(mips_io_port_base + port);
384	SLOW_DOWN_IO;
385
386	return __val;
387}
388
389static inline unsigned short inw_p(unsigned long port)
390{
391	u16 __val;
392
393	port = __swizzle_addr_w(port);
394	__val = *(volatile u16 *)(mips_io_port_base + port);
395	SLOW_DOWN_IO;
396
397	return __ioswab16(__val);
398}
399
400static inline unsigned int inl_p(unsigned long port)
401{
402	u32 __val;
403
404	__val = *(volatile u32 *)(mips_io_port_base + port);
405	SLOW_DOWN_IO;
406	return __ioswab32(__val);
407}
408
409static inline void __outsb(unsigned long port, void *addr, unsigned int count)
410{
411	while (count--) {
412		outb(*(u8 *)addr, port);
413		addr++;
414	}
415}
416
417static inline void __insb(unsigned long port, void *addr, unsigned int count)
418{
419	while (count--) {
420		*(u8 *)addr = inb(port);
421		addr++;
422	}
423}
424
425static inline void __outsw(unsigned long port, void *addr, unsigned int count)
426{
427	while (count--) {
428		outw(*(u16 *)addr, port);
429		addr += 2;
430	}
431}
432
433static inline void __insw(unsigned long port, void *addr, unsigned int count)
434{
435	while (count--) {
436		*(u16 *)addr = inw(port);
437		addr += 2;
438	}
439}
440
441static inline void __outsl(unsigned long port, void *addr, unsigned int count)
442{
443	while (count--) {
444		outl(*(u32 *)addr, port);
445		addr += 4;
446	}
447}
448
449static inline void __insl(unsigned long port, void *addr, unsigned int count)
450{
451	while (count--) {
452		*(u32 *)addr = inl(port);
453		addr += 4;
454	}
455}
456
457#define outsb(port, addr, count) __outsb(port, addr, count)
458#define insb(port, addr, count) __insb(port, addr, count)
459#define outsw(port, addr, count) __outsw(port, addr, count)
460#define insw(port, addr, count) __insw(port, addr, count)
461#define outsl(port, addr, count) __outsl(port, addr, count)
462#define insl(port, addr, count) __insl(port, addr, count)
463
464/*
465 * The caches on some architectures aren't dma-coherent and have need to
466 * handle this in software.  There are three types of operations that
467 * can be applied to dma buffers.
468 *
469 *  - dma_cache_wback_inv(start, size) makes caches and coherent by
470 *    writing the content of the caches back to memory, if necessary.
471 *    The function also invalidates the affected part of the caches as
472 *    necessary before DMA transfers from outside to memory.
473 *  - dma_cache_wback(start, size) makes caches and coherent by
474 *    writing the content of the caches back to memory, if necessary.
475 *    The function also invalidates the affected part of the caches as
476 *    necessary before DMA transfers from outside to memory.
477 *  - dma_cache_inv(start, size) invalidates the affected parts of the
478 *    caches.  Dirty lines of the caches may be written back or simply
479 *    be discarded.  This operation is necessary before dma operations
480 *    to the memory.
481 */
482#ifdef CONFIG_NONCOHERENT_IO
483
484extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
485extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
486extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
487
488#define dma_cache_wback_inv(start,size)	_dma_cache_wback_inv(start,size)
489#define dma_cache_wback(start,size)	_dma_cache_wback(start,size)
490#define dma_cache_inv(start,size)	_dma_cache_inv(start,size)
491
492#else /* Sane hardware */
493
494#define dma_cache_wback_inv(start,size)	\
495	do { (void) (start); (void) (size); } while (0)
496#define dma_cache_wback(start,size)	\
497	do { (void) (start); (void) (size); } while (0)
498#define dma_cache_inv(start,size)	\
499	do { (void) (start); (void) (size); } while (0)
500
501#endif /* CONFIG_NONCOHERENT_IO */
502
503#endif /* _ASM_IO_H */
504