1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#ifndef _ASM_ARC_IO_H
7#define _ASM_ARC_IO_H
8
9#include <linux/types.h>
10#include <asm/byteorder.h>
11#include <asm/page.h>
12#include <asm/unaligned.h>
13
14#ifdef CONFIG_ISA_ARCV2
15#include <asm/barrier.h>
16#define __iormb()		rmb()
17#define __iowmb()		wmb()
18#else
19#define __iormb()		do { } while (0)
20#define __iowmb()		do { } while (0)
21#endif
22
23extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
24#define ioremap ioremap
25#define ioremap_prot ioremap_prot
26#define iounmap iounmap
27static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
28{
29	return (void __iomem *)port;
30}
31
32static inline void ioport_unmap(void __iomem *addr)
33{
34}
35
36/*
37 * io{read,write}{16,32}be() macros
38 */
39#define ioread16be(p)		({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
40#define ioread32be(p)		({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
41
42#define iowrite16be(v,p)	({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
43#define iowrite32be(v,p)	({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
44
45/* Change struct page to physical address */
46#define page_to_phys(page)		(page_to_pfn(page) << PAGE_SHIFT)
47
48#define __raw_readb __raw_readb
49static inline u8 __raw_readb(const volatile void __iomem *addr)
50{
51	u8 b;
52
53	__asm__ __volatile__(
54	"	ldb%U1 %0, %1	\n"
55	: "=r" (b)
56	: "m" (*(volatile u8 __force *)addr)
57	: "memory");
58
59	return b;
60}
61
62#define __raw_readw __raw_readw
63static inline u16 __raw_readw(const volatile void __iomem *addr)
64{
65	u16 s;
66
67	__asm__ __volatile__(
68	"	ldw%U1 %0, %1	\n"
69	: "=r" (s)
70	: "m" (*(volatile u16 __force *)addr)
71	: "memory");
72
73	return s;
74}
75
76#define __raw_readl __raw_readl
77static inline u32 __raw_readl(const volatile void __iomem *addr)
78{
79	u32 w;
80
81	__asm__ __volatile__(
82	"	ld%U1 %0, %1	\n"
83	: "=r" (w)
84	: "m" (*(volatile u32 __force *)addr)
85	: "memory");
86
87	return w;
88}
89
90/*
91 * {read,write}s{b,w,l}() repeatedly access the same IO address in
92 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
93 * @count times
94 */
95#define __raw_readsx(t,f) \
96static inline void __raw_reads##f(const volatile void __iomem *addr,	\
97				  void *ptr, unsigned int count)	\
98{									\
99	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
100	u##t *buf = ptr;						\
101									\
102	if (!count)							\
103		return;							\
104									\
105	/* Some ARC CPU's don't support unaligned accesses */		\
106	if (is_aligned) {						\
107		do {							\
108			u##t x = __raw_read##f(addr);			\
109			*buf++ = x;					\
110		} while (--count);					\
111	} else {							\
112		do {							\
113			u##t x = __raw_read##f(addr);			\
114			put_unaligned(x, buf++);			\
115		} while (--count);					\
116	}								\
117}
118
119#define __raw_readsb __raw_readsb
120__raw_readsx(8, b)
121#define __raw_readsw __raw_readsw
122__raw_readsx(16, w)
123#define __raw_readsl __raw_readsl
124__raw_readsx(32, l)
125
126#define __raw_writeb __raw_writeb
127static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
128{
129	__asm__ __volatile__(
130	"	stb%U1 %0, %1	\n"
131	:
132	: "r" (b), "m" (*(volatile u8 __force *)addr)
133	: "memory");
134}
135
136#define __raw_writew __raw_writew
137static inline void __raw_writew(u16 s, volatile void __iomem *addr)
138{
139	__asm__ __volatile__(
140	"	stw%U1 %0, %1	\n"
141	:
142	: "r" (s), "m" (*(volatile u16 __force *)addr)
143	: "memory");
144
145}
146
147#define __raw_writel __raw_writel
148static inline void __raw_writel(u32 w, volatile void __iomem *addr)
149{
150	__asm__ __volatile__(
151	"	st%U1 %0, %1	\n"
152	:
153	: "r" (w), "m" (*(volatile u32 __force *)addr)
154	: "memory");
155
156}
157
158#define __raw_writesx(t,f)						\
159static inline void __raw_writes##f(volatile void __iomem *addr, 	\
160				   const void *ptr, unsigned int count)	\
161{									\
162	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
163	const u##t *buf = ptr;						\
164									\
165	if (!count)							\
166		return;							\
167									\
168	/* Some ARC CPU's don't support unaligned accesses */		\
169	if (is_aligned) {						\
170		do {							\
171			__raw_write##f(*buf++, addr);			\
172		} while (--count);					\
173	} else {							\
174		do {							\
175			__raw_write##f(get_unaligned(buf++), addr);	\
176		} while (--count);					\
177	}								\
178}
179
180#define __raw_writesb __raw_writesb
181__raw_writesx(8, b)
182#define __raw_writesw __raw_writesw
183__raw_writesx(16, w)
184#define __raw_writesl __raw_writesl
185__raw_writesx(32, l)
186
187/*
188 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
189 * Based on ARM model for the typical use case
190 *
191 *	<ST [DMA buffer]>
192 *	<writel MMIO "go" reg>
193 *  or:
194 *	<readl MMIO "status" reg>
195 *	<LD [DMA buffer]>
196 *
197 * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
198 */
199#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
200#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
201#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
202#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
203#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
204#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })
205
206#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
207#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
208#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
209#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
210#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
211#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })
212
213/*
214 * Relaxed API for drivers which can handle barrier ordering themselves
215 *
216 * Also these are defined to perform little endian accesses.
217 * To provide the typical device register semantics of fixed endian,
218 * swap the byte order for Big Endian
219 *
220 * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
221 */
222#define readb_relaxed(c)	__raw_readb(c)
223#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
224					__raw_readw(c)); __r; })
225#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
226					__raw_readl(c)); __r; })
227
228#define writeb_relaxed(v,c)	__raw_writeb(v,c)
229#define writew_relaxed(v,c)	__raw_writew((__force u16) cpu_to_le16(v),c)
230#define writel_relaxed(v,c)	__raw_writel((__force u32) cpu_to_le32(v),c)
231
232#include <asm-generic/io.h>
233
234#endif /* _ASM_ARC_IO_H */
235