1#ifndef __ASM_AVR32_IO_H
2#define __ASM_AVR32_IO_H
3
4#include <linux/kernel.h>
5#include <linux/string.h>
6#include <linux/types.h>
7
8#include <asm/addrspace.h>
9#include <asm/byteorder.h>
10
11#include <asm/arch/io.h>
12
13/* virt_to_phys will only work when address is in P1 or P2 */
14static __inline__ unsigned long virt_to_phys(volatile void *address)
15{
16	return PHYSADDR(address);
17}
18
19static __inline__ void * phys_to_virt(unsigned long address)
20{
21	return (void *)P1SEGADDR(address);
22}
23
24#define cached_to_phys(addr)	((unsigned long)PHYSADDR(addr))
25#define uncached_to_phys(addr)	((unsigned long)PHYSADDR(addr))
26#define phys_to_cached(addr)	((void *)P1SEGADDR(addr))
27#define phys_to_uncached(addr)	((void *)P2SEGADDR(addr))
28
29/*
30 * Generic IO read/write.  These perform native-endian accesses.  Note
31 * that some architectures will want to re-define __raw_{read,write}w.
32 */
33extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
34extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
35extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
36
37extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
38extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
39extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
40
41static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
42{
43	*(volatile u8 __force *)addr = v;
44}
45static inline void __raw_writew(u16 v, volatile void __iomem *addr)
46{
47	*(volatile u16 __force *)addr = v;
48}
49static inline void __raw_writel(u32 v, volatile void __iomem *addr)
50{
51	*(volatile u32 __force *)addr = v;
52}
53
54static inline u8 __raw_readb(const volatile void __iomem *addr)
55{
56	return *(const volatile u8 __force *)addr;
57}
58static inline u16 __raw_readw(const volatile void __iomem *addr)
59{
60	return *(const volatile u16 __force *)addr;
61}
62static inline u32 __raw_readl(const volatile void __iomem *addr)
63{
64	return *(const volatile u32 __force *)addr;
65}
66
67/* Convert I/O port address to virtual address */
68#ifndef __io
69# define __io(p)	((void *)phys_to_uncached(p))
70#endif
71
72/*
73 * Not really sure about the best way to slow down I/O on
74 * AVR32. Defining it as a no-op until we have an actual test case.
75 */
76#define SLOW_DOWN_IO	do { } while (0)
77
78#define __BUILD_MEMORY_SINGLE(pfx, bwl, type)				\
79static inline void							\
80pfx##write##bwl(type val, volatile void __iomem *addr)			\
81{									\
82	volatile type *__addr;						\
83	type __val;							\
84									\
85	__addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));	\
86	__val = pfx##ioswab##bwl(__addr, val);				\
87									\
88	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
89									\
90	*__addr = __val;						\
91}									\
92									\
93static inline type pfx##read##bwl(const volatile void __iomem *addr)	\
94{									\
95	volatile type *__addr;						\
96	type __val;							\
97									\
98	__addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));	\
99									\
100	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
101									\
102	__val = *__addr;						\
103	return pfx##ioswab##bwl(__addr, __val);				\
104}
105
106#define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow)			\
107static inline void pfx##out##bwl##p(type val, unsigned long port)	\
108{									\
109	volatile type *__addr;						\
110	type __val;							\
111									\
112	__addr = __io(__swizzle_addr_##bwl(port));			\
113	__val = pfx##ioswab##bwl(__addr, val);				\
114									\
115	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
116									\
117	*__addr = __val;						\
118	slow;								\
119}									\
120									\
121static inline type pfx##in##bwl##p(unsigned long port)			\
122{									\
123	volatile type *__addr;						\
124	type __val;							\
125									\
126	__addr = __io(__swizzle_addr_##bwl(port));			\
127									\
128	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
129									\
130	__val = *__addr;						\
131	slow;								\
132									\
133	return pfx##ioswab##bwl(__addr, __val);				\
134}
135
136#define __BUILD_MEMORY_PFX(bus, bwl, type)				\
137	__BUILD_MEMORY_SINGLE(bus, bwl, type)
138
139#define BUILDIO_MEM(bwl, type)						\
140	__BUILD_MEMORY_PFX(, bwl, type)					\
141	__BUILD_MEMORY_PFX(__mem_, bwl, type)
142
143#define __BUILD_IOPORT_PFX(bus, bwl, type)				\
144	__BUILD_IOPORT_SINGLE(bus, bwl, type, ,)			\
145	__BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
146
147#define BUILDIO_IOPORT(bwl, type)					\
148	__BUILD_IOPORT_PFX(, bwl, type)					\
149	__BUILD_IOPORT_PFX(__mem_, bwl, type)
150
151BUILDIO_MEM(b, u8)
152BUILDIO_MEM(w, u16)
153BUILDIO_MEM(l, u32)
154
155BUILDIO_IOPORT(b, u8)
156BUILDIO_IOPORT(w, u16)
157BUILDIO_IOPORT(l, u32)
158
159#define readb_relaxed			readb
160#define readw_relaxed			readw
161#define readl_relaxed			readl
162
163#define __BUILD_MEMORY_STRING(bwl, type)				\
164static inline void writes##bwl(volatile void __iomem *addr,		\
165			       const void *data, unsigned int count)	\
166{									\
167	const type *__data = data;					\
168									\
169	while (count--)							\
170		__mem_write##bwl(*__data++, addr);			\
171}									\
172									\
173static inline void reads##bwl(const volatile void __iomem *addr,	\
174			      void *data, unsigned int count)		\
175{									\
176	type *__data = data;						\
177									\
178	while (count--)							\
179		*__data++ = __mem_read##bwl(addr);			\
180}
181
182#define __BUILD_IOPORT_STRING(bwl, type)				\
183static inline void outs##bwl(unsigned long port, const void *data,	\
184			     unsigned int count)			\
185{									\
186	const type *__data = data;					\
187									\
188	while (count--)							\
189		__mem_out##bwl(*__data++, port);			\
190}									\
191									\
192static inline void ins##bwl(unsigned long port, void *data,		\
193			   unsigned int count)				\
194{									\
195	type *__data = data;						\
196									\
197	while (count--)							\
198		*__data++ = __mem_in##bwl(port);			\
199}
200
201#define BUILDSTRING(bwl, type)						\
202	__BUILD_MEMORY_STRING(bwl, type)				\
203	__BUILD_IOPORT_STRING(bwl, type)
204
205BUILDSTRING(b, u8)
206BUILDSTRING(w, u16)
207BUILDSTRING(l, u32)
208
209/*
210 * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
211 */
212#ifndef ioread8
213
214#define ioread8(p)		((unsigned int)readb(p))
215
216#define ioread16(p)		((unsigned int)readw(p))
217#define ioread16be(p)		((unsigned int)__raw_readw(p))
218
219#define ioread32(p)		((unsigned int)readl(p))
220#define ioread32be(p)		((unsigned int)__raw_readl(p))
221
222#define iowrite8(v,p)		writeb(v, p)
223
224#define iowrite16(v,p)		writew(v, p)
225#define iowrite16be(v,p)	__raw_writew(v, p)
226
227#define iowrite32(v,p)		writel(v, p)
228#define iowrite32be(v,p)	__raw_writel(v, p)
229
230#define ioread8_rep(p,d,c)	readsb(p,d,c)
231#define ioread16_rep(p,d,c)	readsw(p,d,c)
232#define ioread32_rep(p,d,c)	readsl(p,d,c)
233
234#define iowrite8_rep(p,s,c)	writesb(p,s,c)
235#define iowrite16_rep(p,s,c)	writesw(p,s,c)
236#define iowrite32_rep(p,s,c)	writesl(p,s,c)
237
238#endif
239
240static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
241				 unsigned long count)
242{
243	memcpy(to, (const void __force *)from, count);
244}
245
246static inline void  memcpy_toio(volatile void __iomem *to, const void * from,
247				unsigned long count)
248{
249	memcpy((void __force *)to, from, count);
250}
251
252static inline void memset_io(volatile void __iomem *addr, unsigned char val,
253			     unsigned long count)
254{
255	memset((void __force *)addr, val, count);
256}
257
258#define IO_SPACE_LIMIT	0xffffffff
259
260extern void __iomem *__ioremap(unsigned long offset, size_t size,
261			       unsigned long flags);
262extern void __iounmap(void __iomem *addr);
263
264/*
265 * ioremap	-   map bus memory into CPU space
266 * @offset	bus address of the memory
267 * @size	size of the resource to map
268 *
269 * ioremap performs a platform specific sequence of operations to make
270 * bus memory CPU accessible via the readb/.../writel functions and
271 * the other mmio helpers. The returned address is not guaranteed to
272 * be usable directly as a virtual address.
273 */
274#define ioremap(offset, size)			\
275	__ioremap((offset), (size), 0)
276
277#define ioremap_nocache(offset, size)		\
278	__ioremap((offset), (size), 0)
279
280#define iounmap(addr)				\
281	__iounmap(addr)
282
283#define cached(addr) P1SEGADDR(addr)
284#define uncached(addr) P2SEGADDR(addr)
285
286#define virt_to_bus virt_to_phys
287#define bus_to_virt phys_to_virt
288#define page_to_bus page_to_phys
289#define bus_to_page phys_to_page
290
291/*
292 * Create a virtual mapping cookie for an IO port range.  There exists
293 * no such thing as port-based I/O on AVR32, so a regular ioremap()
294 * should do what we need.
295 */
296#define ioport_map(port, nr)	ioremap(port, nr)
297#define ioport_unmap(port)	iounmap(port)
298
299#define dma_cache_wback_inv(_start, _size)	\
300	flush_dcache_region(_start, _size)
301#define dma_cache_inv(_start, _size)		\
302	invalidate_dcache_region(_start, _size)
303#define dma_cache_wback(_start, _size)		\
304	clean_dcache_region(_start, _size)
305
306/*
307 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
308 * access
309 */
310#define xlate_dev_mem_ptr(p)    __va(p)
311
312/*
313 * Convert a virtual cached pointer to an uncached pointer
314 */
315#define xlate_dev_kmem_ptr(p)   p
316
317#endif /* __ASM_AVR32_IO_H */
318