1/*
2 * I/O device access primitives. Based on early versions from the Linux kernel.
3 *
4 *  Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_IO_H
11#define __ASM_ARM_IO_H
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <asm/byteorder.h>
16#include <asm/memory.h>
17#include <asm/barriers.h>
18
19static inline void sync(void)
20{
21}
22
23/* Generic virtual read/write. */
24#define __arch_getb(a)			(*(volatile unsigned char *)(a))
25#define __arch_getw(a)			(*(volatile unsigned short *)(a))
26#define __arch_getl(a)			(*(volatile unsigned int *)(a))
27#define __arch_getq(a)			(*(volatile unsigned long long *)(a))
28
29#define __arch_putb(v,a)		(*(volatile unsigned char *)(a) = (v))
30#define __arch_putw(v,a)		(*(volatile unsigned short *)(a) = (v))
31#define __arch_putl(v,a)		(*(volatile unsigned int *)(a) = (v))
32#define __arch_putq(v,a)		(*(volatile unsigned long long *)(a) = (v))
33
34static inline void __raw_writesb(unsigned long addr, const void *data,
35				 int bytelen)
36{
37	uint8_t *buf = (uint8_t *)data;
38	while(bytelen--)
39		__arch_putb(*buf++, addr);
40}
41
42static inline void __raw_writesw(unsigned long addr, const void *data,
43				 int wordlen)
44{
45	uint16_t *buf = (uint16_t *)data;
46	while(wordlen--)
47		__arch_putw(*buf++, addr);
48}
49
50static inline void __raw_writesl(unsigned long addr, const void *data,
51				 int longlen)
52{
53	uint32_t *buf = (uint32_t *)data;
54	while(longlen--)
55		__arch_putl(*buf++, addr);
56}
57
58static inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
59{
60	uint8_t *buf = (uint8_t *)data;
61	while(bytelen--)
62		*buf++ = __arch_getb(addr);
63}
64
65static inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
66{
67	uint16_t *buf = (uint16_t *)data;
68	while(wordlen--)
69		*buf++ = __arch_getw(addr);
70}
71
72static inline void __raw_readsl(unsigned long addr, void *data, int longlen)
73{
74	uint32_t *buf = (uint32_t *)data;
75	while(longlen--)
76		*buf++ = __arch_getl(addr);
77}
78
79#define __raw_writeb(v,a)	__arch_putb(v,a)
80#define __raw_writew(v,a)	__arch_putw(v,a)
81#define __raw_writel(v,a)	__arch_putl(v,a)
82#define __raw_writeq(v,a)	__arch_putq(v,a)
83
84#define __raw_readb(a)		__arch_getb(a)
85#define __raw_readw(a)		__arch_getw(a)
86#define __raw_readl(a)		__arch_getl(a)
87#define __raw_readq(a)		__arch_getq(a)
88
89/*
90 * TODO: The kernel offers some more advanced versions of barriers, it might
91 * have some advantages to use them instead of the simple one here.
92 */
93#define mb()		dsb()
94#define rmb()		dsb()
95#define wmb()		dsb()
96#define __iormb()	dmb()
97#define __iowmb()	dmb()
98
99#define smp_processor_id()	0
100
101#define writeb(v,c)	({ u8  __v = v; __iowmb(); __arch_putb(__v,c); __v; })
102#define writew(v,c)	({ u16 __v = v; __iowmb(); __arch_putw(__v,c); __v; })
103#define writel(v,c)	({ u32 __v = v; __iowmb(); __arch_putl(__v,c); __v; })
104#define writeq(v,c)	({ u64 __v = v; __iowmb(); __arch_putq(__v,c); __v; })
105
106#define readb(c)	({ u8  __v = __arch_getb(c); __iormb(); __v; })
107#define readw(c)	({ u16 __v = __arch_getw(c); __iormb(); __v; })
108#define readl(c)	({ u32 __v = __arch_getl(c); __iormb(); __v; })
109#define readq(c)	({ u64 __v = __arch_getq(c); __iormb(); __v; })
110
111/*
112 * Relaxed I/O memory access primitives. These follow the Device memory
113 * ordering rules but do not guarantee any ordering relative to Normal memory
114 * accesses.
115 */
116#define readb_relaxed(c)	({ u8  __r = __raw_readb(c); __r; })
117#define readw_relaxed(c)	({ u16 __r = le16_to_cpu((__force __le16) \
118						__raw_readw(c)); __r; })
119#define readl_relaxed(c)	({ u32 __r = le32_to_cpu((__force __le32) \
120						__raw_readl(c)); __r; })
121#define readq_relaxed(c)	({ u64 __r = le64_to_cpu((__force __le64) \
122						__raw_readq(c)); __r; })
123
124#define writeb_relaxed(v, c)	((void)__raw_writeb((v), (c)))
125#define writew_relaxed(v, c)	((void)__raw_writew((__force u16) \
126						    cpu_to_le16(v), (c)))
127#define writel_relaxed(v, c)	((void)__raw_writel((__force u32) \
128						    cpu_to_le32(v), (c)))
129#define writeq_relaxed(v, c)	((void)__raw_writeq((__force u64) \
130						    cpu_to_le64(v), (c)))
131
132/*
133 * The compiler seems to be incapable of optimising constants
134 * properly.  Spell it out to the compiler in some cases.
135 * These are only valid for small values of "off" (< 1<<12)
136 */
137#define __raw_base_writeb(val,base,off)	__arch_base_putb(val,base,off)
138#define __raw_base_writew(val,base,off)	__arch_base_putw(val,base,off)
139#define __raw_base_writel(val,base,off)	__arch_base_putl(val,base,off)
140
141#define __raw_base_readb(base,off)	__arch_base_getb(base,off)
142#define __raw_base_readw(base,off)	__arch_base_getw(base,off)
143#define __raw_base_readl(base,off)	__arch_base_getl(base,off)
144
145/*
146 * Clear and set bits in one shot. These macros can be used to clear and
147 * set multiple bits in a register using a single call. These macros can
148 * also be used to set a multiple-bit bit pattern using a mask, by
149 * specifying the mask in the 'clear' parameter and the new bit pattern
150 * in the 'set' parameter.
151 */
152
153#define out_arch(type,endian,a,v)	__raw_write##type(cpu_to_##endian(v),a)
154#define in_arch(type,endian,a)		endian##_to_cpu(__raw_read##type(a))
155
156#define out_le64(a,v)	out_arch(q,le64,a,v)
157#define out_le32(a,v)	out_arch(l,le32,a,v)
158#define out_le16(a,v)	out_arch(w,le16,a,v)
159
160#define in_le64(a)	in_arch(q,le64,a)
161#define in_le32(a)	in_arch(l,le32,a)
162#define in_le16(a)	in_arch(w,le16,a)
163
164#define out_be64(a,v)	out_arch(l,be64,a,v)
165#define out_be32(a,v)	out_arch(l,be32,a,v)
166#define out_be16(a,v)	out_arch(w,be16,a,v)
167
168#define in_be64(a)	in_arch(l,be64,a)
169#define in_be32(a)	in_arch(l,be32,a)
170#define in_be16(a)	in_arch(w,be16,a)
171
172#define out_64(a,v)	__raw_writeq(v,a)
173#define out_32(a,v)	__raw_writel(v,a)
174#define out_16(a,v)	__raw_writew(v,a)
175#define out_8(a,v)	__raw_writeb(v,a)
176
177#define in_64(a)	__raw_readq(a)
178#define in_32(a)	__raw_readl(a)
179#define in_16(a)	__raw_readw(a)
180#define in_8(a)		__raw_readb(a)
181
182#define clrbits(type, addr, clear) \
183	out_##type((addr), in_##type(addr) & ~(clear))
184
185#define setbits(type, addr, set) \
186	out_##type((addr), in_##type(addr) | (set))
187
188#define clrsetbits(type, addr, clear, set) \
189	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
190
191#define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
192#define setbits_be32(addr, set) setbits(be32, addr, set)
193#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
194
195#define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
196#define setbits_le32(addr, set) setbits(le32, addr, set)
197#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
198
199#define clrbits_32(addr, clear) clrbits(32, addr, clear)
200#define setbits_32(addr, set) setbits(32, addr, set)
201#define clrsetbits_32(addr, clear, set) clrsetbits(32, addr, clear, set)
202
203#define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
204#define setbits_be16(addr, set) setbits(be16, addr, set)
205#define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
206
207#define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
208#define setbits_le16(addr, set) setbits(le16, addr, set)
209#define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
210
211#define clrbits_16(addr, clear) clrbits(16, addr, clear)
212#define setbits_16(addr, set) setbits(16, addr, set)
213#define clrsetbits_16(addr, clear, set) clrsetbits(16, addr, clear, set)
214
215#define clrbits_8(addr, clear) clrbits(8, addr, clear)
216#define setbits_8(addr, set) setbits(8, addr, set)
217#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
218
219#define clrbits_be64(addr, clear) clrbits(be64, addr, clear)
220#define setbits_be64(addr, set) setbits(be64, addr, set)
221#define clrsetbits_be64(addr, clear, set) clrsetbits(be64, addr, clear, set)
222
223#define clrbits_le64(addr, clear) clrbits(le64, addr, clear)
224#define setbits_le64(addr, set) setbits(le64, addr, set)
225#define clrsetbits_le64(addr, clear, set) clrsetbits(le64, addr, clear, set)
226
227#define clrbits_64(addr, clear) clrbits(64, addr, clear)
228#define setbits_64(addr, set) setbits(64, addr, set)
229#define clrsetbits_64(addr, clear, set) clrsetbits(64, addr, clear, set)
230
231/*
232 *  IO port access primitives
233 *  -------------------------
234 *
235 * The ARM doesn't have special IO access instructions; all IO is memory
236 * mapped.  Note that these are defined to perform little endian accesses
237 * only.  Their primary purpose is to access PCI and ISA peripherals.
238 *
239 * Note that for a big endian machine, this implies that the following
240 * big endian mode connectivity is in place, as described by numerous
241 * ARM documents:
242 *
243 *    PCI:  D0-D7   D8-D15 D16-D23 D24-D31
244 *    ARM: D24-D31 D16-D23  D8-D15  D0-D7
245 *
246 * The machine specific io.h include defines __io to translate an "IO"
247 * address to a memory address.
248 *
249 * Note that we prevent GCC re-ordering or caching values in expressions
250 * by introducing sequence points into the in*() definitions.  Note that
251 * __raw_* do not guarantee this behaviour.
252 *
253 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
254 */
255#ifdef __io
256#define outb(v,p)			__raw_writeb(v,__io(p))
257#define outw(v,p)			__raw_writew(cpu_to_le16(v),__io(p))
258#define outl(v,p)			__raw_writel(cpu_to_le32(v),__io(p))
259
260#define inb(p)	({ unsigned int __v = __raw_readb(__io(p)); __v; })
261#define inw(p)	({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
262#define inl(p)	({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
263
264#define outsb(p,d,l)			__raw_writesb(__io(p),d,l)
265#define outsw(p,d,l)			__raw_writesw(__io(p),d,l)
266#define outsl(p,d,l)			__raw_writesl(__io(p),d,l)
267
268#define insb(p,d,l)			__raw_readsb(__io(p),d,l)
269#define insw(p,d,l)			__raw_readsw(__io(p),d,l)
270#define insl(p,d,l)			__raw_readsl(__io(p),d,l)
271#endif
272
273#define outb_p(val,port)		outb((val),(port))
274#define outw_p(val,port)		outw((val),(port))
275#define outl_p(val,port)		outl((val),(port))
276#define inb_p(port)			inb((port))
277#define inw_p(port)			inw((port))
278#define inl_p(port)			inl((port))
279
280#define outsb_p(port,from,len)		outsb(port,from,len)
281#define outsw_p(port,from,len)		outsw(port,from,len)
282#define outsl_p(port,from,len)		outsl(port,from,len)
283#define insb_p(port,to,len)		insb(port,to,len)
284#define insw_p(port,to,len)		insw(port,to,len)
285#define insl_p(port,to,len)		insl(port,to,len)
286
287#define writesl(a, d, s)	__raw_writesl((unsigned long)a, d, s)
288#define readsl(a, d, s)		__raw_readsl((unsigned long)a, d, s)
289#define writesw(a, d, s)	__raw_writesw((unsigned long)a, d, s)
290#define readsw(a, d, s)		__raw_readsw((unsigned long)a, d, s)
291#define writesb(a, d, s)	__raw_writesb((unsigned long)a, d, s)
292#define readsb(a, d, s)		__raw_readsb((unsigned long)a, d, s)
293
294/*
295 * String version of IO memory access ops:
296 */
297extern void _memcpy_fromio(void *, unsigned long, size_t);
298extern void _memcpy_toio(unsigned long, const void *, size_t);
299extern void _memset_io(unsigned long, int, size_t);
300
301/* Optimized copy functions to read from/write to IO sapce */
302#ifdef CONFIG_ARM64
303#include <cpu_func.h>
304/*
305 * Copy data from IO memory space to "real" memory space.
306 */
307static inline
308void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
309{
310	while (count && !IS_ALIGNED((unsigned long)from, 8)) {
311		*(u8 *)to = __raw_readb(from);
312		from++;
313		to++;
314		count--;
315	}
316
317	if (mmu_status()) {
318		while (count >= 8) {
319			*(u64 *)to = __raw_readq(from);
320			from += 8;
321			to += 8;
322			count -= 8;
323		}
324	}
325
326	while (count) {
327		*(u8 *)to = __raw_readb(from);
328		from++;
329		to++;
330		count--;
331	}
332}
333
334/*
335 * Copy data from "real" memory space to IO memory space.
336 */
337static inline
338void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
339{
340	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
341		__raw_writeb(*(u8 *)from, to);
342		from++;
343		to++;
344		count--;
345	}
346
347	if (mmu_status()) {
348		while (count >= 8) {
349			__raw_writeq(*(u64 *)from, to);
350			from += 8;
351			to += 8;
352			count -= 8;
353		}
354	}
355
356	while (count) {
357		__raw_writeb(*(u8 *)from, to);
358		from++;
359		to++;
360		count--;
361	}
362}
363
364/*
365 * "memset" on IO memory space.
366 */
367static inline
368void __memset_io(volatile void __iomem *dst, int c, size_t count)
369{
370	u64 qc = (u8)c;
371
372	qc |= qc << 8;
373	qc |= qc << 16;
374	qc |= qc << 32;
375
376	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
377		__raw_writeb(c, dst);
378		dst++;
379		count--;
380	}
381
382	while (count >= 8) {
383		__raw_writeq(qc, dst);
384		dst += 8;
385		count -= 8;
386	}
387
388	while (count) {
389		__raw_writeb(c, dst);
390		dst++;
391		count--;
392	}
393}
394#endif /* CONFIG_ARM64 */
395
396#ifdef CONFIG_ARM64
397#define memset_io(a, b, c)		__memset_io((a), (b), (c))
398#define memcpy_fromio(a, b, c)		__memcpy_fromio((a), (b), (c))
399#define memcpy_toio(a, b, c)		__memcpy_toio((a), (b), (c))
400#else
401#define memset_io(a, b, c)		memset((void *)(a), (b), (c))
402#define memcpy_fromio(a, b, c)		memcpy((a), (void *)(b), (c))
403#define memcpy_toio(a, b, c)		memcpy((void *)(a), (b), (c))
404#endif
405
406#include <asm-generic/io.h>
407#include <iotrace.h>
408
409#endif	/* __ASM_ARM_IO_H */
410