io.h revision 352330
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/io.h 352330 2019-09-14 13:33:52Z hselasky $
30 */
31#ifndef	_LINUX_IO_H_
32#define	_LINUX_IO_H_
33
34#include <machine/vm.h>
35#include <sys/endian.h>
36#include <sys/types.h>
37
38#include <linux/compiler.h>
39#include <linux/types.h>
40
41/*
42 * XXX This is all x86 specific.  It should be bus space access.
43 */
44
45/* Access MMIO registers atomically without barriers and byte swapping. */
46
47static inline uint8_t
48__raw_readb(const volatile void *addr)
49{
50	return (*(const volatile uint8_t *)addr);
51}
52#define	__raw_readb(addr)	__raw_readb(addr)
53
54static inline void
55__raw_writeb(uint8_t v, volatile void *addr)
56{
57	*(volatile uint8_t *)addr = v;
58}
59#define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
60
61static inline uint16_t
62__raw_readw(const volatile void *addr)
63{
64	return (*(const volatile uint16_t *)addr);
65}
66#define	__raw_readw(addr)	__raw_readw(addr)
67
68static inline void
69__raw_writew(uint16_t v, volatile void *addr)
70{
71	*(volatile uint16_t *)addr = v;
72}
73#define	__raw_writew(v, addr)	__raw_writew(v, addr)
74
75static inline uint32_t
76__raw_readl(const volatile void *addr)
77{
78	return (*(const volatile uint32_t *)addr);
79}
80#define	__raw_readl(addr)	__raw_readl(addr)
81
82static inline void
83__raw_writel(uint32_t v, volatile void *addr)
84{
85	*(volatile uint32_t *)addr = v;
86}
87#define	__raw_writel(v, addr)	__raw_writel(v, addr)
88
89#ifdef __LP64__
90static inline uint64_t
91__raw_readq(const volatile void *addr)
92{
93	return (*(const volatile uint64_t *)addr);
94}
95#define	__raw_readq(addr)	__raw_readq(addr)
96
97static inline void
98__raw_writeq(uint64_t v, volatile void *addr)
99{
100	*(volatile uint64_t *)addr = v;
101}
102#define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
103#endif
104
105#define	mmiowb()	barrier()
106
107/* Access little-endian MMIO registers atomically with memory barriers. */
108
109#undef readb
110static inline uint8_t
111readb(const volatile void *addr)
112{
113	uint8_t v;
114
115	__compiler_membar();
116	v = *(const volatile uint8_t *)addr;
117	__compiler_membar();
118	return (v);
119}
120#define	readb(addr)		readb(addr)
121
122#undef writeb
123static inline void
124writeb(uint8_t v, volatile void *addr)
125{
126	__compiler_membar();
127	*(volatile uint8_t *)addr = v;
128	__compiler_membar();
129}
130#define	writeb(v, addr)		writeb(v, addr)
131
132#undef readw
133static inline uint16_t
134readw(const volatile void *addr)
135{
136	uint16_t v;
137
138	__compiler_membar();
139	v = *(const volatile uint16_t *)addr;
140	__compiler_membar();
141	return (v);
142}
143#define	readw(addr)		readw(addr)
144
145#undef writew
146static inline void
147writew(uint16_t v, volatile void *addr)
148{
149	__compiler_membar();
150	*(volatile uint16_t *)addr = v;
151	__compiler_membar();
152}
153#define	writew(v, addr)		writew(v, addr)
154
155#undef readl
156static inline uint32_t
157readl(const volatile void *addr)
158{
159	uint32_t v;
160
161	__compiler_membar();
162	v = *(const volatile uint32_t *)addr;
163	__compiler_membar();
164	return (v);
165}
166#define	readl(addr)		readl(addr)
167
168#undef writel
169static inline void
170writel(uint32_t v, volatile void *addr)
171{
172	__compiler_membar();
173	*(volatile uint32_t *)addr = v;
174	__compiler_membar();
175}
176#define	writel(v, addr)		writel(v, addr)
177
178#undef readq
179#undef writeq
180#ifdef __LP64__
181static inline uint64_t
182readq(const volatile void *addr)
183{
184	uint64_t v;
185
186	__compiler_membar();
187	v = *(const volatile uint64_t *)addr;
188	__compiler_membar();
189	return (v);
190}
191#define	readq(addr)		readq(addr)
192
193static inline void
194writeq(uint64_t v, volatile void *addr)
195{
196	__compiler_membar();
197	*(volatile uint64_t *)addr = v;
198	__compiler_membar();
199}
200#define	writeq(v, addr)		writeq(v, addr)
201#endif
202
203/* Access little-endian MMIO registers atomically without memory barriers. */
204
205#undef readb_relaxed
206static inline uint8_t
207readb_relaxed(const volatile void *addr)
208{
209	return (*(const volatile uint8_t *)addr);
210}
211#define	readb_relaxed(addr)	readb_relaxed(addr)
212
213#undef writeb_relaxed
214static inline void
215writeb_relaxed(uint8_t v, volatile void *addr)
216{
217	*(volatile uint8_t *)addr = v;
218}
219#define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
220
221#undef readw_relaxed
222static inline uint16_t
223readw_relaxed(const volatile void *addr)
224{
225	return (*(const volatile uint16_t *)addr);
226}
227#define	readw_relaxed(addr)	readw_relaxed(addr)
228
229#undef writew_relaxed
230static inline void
231writew_relaxed(uint16_t v, volatile void *addr)
232{
233	*(volatile uint16_t *)addr = v;
234}
235#define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
236
237#undef readl_relaxed
238static inline uint32_t
239readl_relaxed(const volatile void *addr)
240{
241	return (*(const volatile uint32_t *)addr);
242}
243#define	readl_relaxed(addr)	readl_relaxed(addr)
244
245#undef writel_relaxed
246static inline void
247writel_relaxed(uint32_t v, volatile void *addr)
248{
249	*(volatile uint32_t *)addr = v;
250}
251#define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
252
253#undef readq_relaxed
254#undef writeq_relaxed
255#ifdef __LP64__
256static inline uint64_t
257readq_relaxed(const volatile void *addr)
258{
259	return (*(const volatile uint64_t *)addr);
260}
261#define	readq_relaxed(addr)	readq_relaxed(addr)
262
263static inline void
264writeq_relaxed(uint64_t v, volatile void *addr)
265{
266	*(volatile uint64_t *)addr = v;
267}
268#define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
269#endif
270
271/* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
272
273#undef ioread8
274static inline uint8_t
275ioread8(const volatile void *addr)
276{
277	return (readb(addr));
278}
279#define	ioread8(addr)		ioread8(addr)
280
281#undef ioread16
282static inline uint16_t
283ioread16(const volatile void *addr)
284{
285	return (readw(addr));
286}
287#define	ioread16(addr)		ioread16(addr)
288
289#undef ioread16be
290static inline uint16_t
291ioread16be(const volatile void *addr)
292{
293	return (bswap16(readw(addr)));
294}
295#define	ioread16be(addr)	ioread16be(addr)
296
297#undef ioread32
298static inline uint32_t
299ioread32(const volatile void *addr)
300{
301	return (readl(addr));
302}
303#define	ioread32(addr)		ioread32(addr)
304
305#undef ioread32be
306static inline uint32_t
307ioread32be(const volatile void *addr)
308{
309	return (bswap32(readl(addr)));
310}
311#define	ioread32be(addr)	ioread32be(addr)
312
313#undef iowrite8
314static inline void
315iowrite8(uint8_t v, volatile void *addr)
316{
317	writeb(v, addr);
318}
319#define	iowrite8(v, addr)	iowrite8(v, addr)
320
321#undef iowrite16
322static inline void
323iowrite16(uint16_t v, volatile void *addr)
324{
325	writew(v, addr);
326}
327#define	iowrite16	iowrite16
328
329#undef iowrite32
330static inline void
331iowrite32(uint32_t v, volatile void *addr)
332{
333	writel(v, addr);
334}
335#define	iowrite32(v, addr)	iowrite32(v, addr)
336
337#undef iowrite32be
338static inline void
339iowrite32be(uint32_t v, volatile void *addr)
340{
341	writel(bswap32(v), addr);
342}
343#define	iowrite32be(v, addr)	iowrite32be(v, addr)
344
345#if defined(__i386__) || defined(__amd64__)
346static inline void
347_outb(u_char data, u_int port)
348{
349	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
350}
351#endif
352
353#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
354void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
355#else
356#define	_ioremap_attr(...) NULL
357#endif
358
359#ifdef VM_MEMATTR_DEVICE
360#define	ioremap_nocache(addr, size)					\
361    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
362#define	ioremap_wt(addr, size)						\
363    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
364#define	ioremap(addr, size)						\
365    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
366#else
367#define	ioremap_nocache(addr, size)					\
368    _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
369#define	ioremap_wt(addr, size)						\
370    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
371#define	ioremap(addr, size)						\
372    _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
373#endif
374#define	ioremap_wc(addr, size)						\
375    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
376#define	ioremap_wb(addr, size)						\
377    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
378void iounmap(void *addr);
379
380#define	memset_io(a, b, c)	memset((a), (b), (c))
381#define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
382#define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
383
384static inline void
385__iowrite32_copy(void *to, void *from, size_t count)
386{
387	uint32_t *src;
388	uint32_t *dst;
389	int i;
390
391	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
392		__raw_writel(*src, dst);
393}
394
395static inline void
396__iowrite64_copy(void *to, void *from, size_t count)
397{
398#ifdef __LP64__
399	uint64_t *src;
400	uint64_t *dst;
401	int i;
402
403	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
404		__raw_writeq(*src, dst);
405#else
406	__iowrite32_copy(to, from, count * 2);
407#endif
408}
409
410enum {
411	MEMREMAP_WB = 1 << 0,
412	MEMREMAP_WT = 1 << 1,
413	MEMREMAP_WC = 1 << 2,
414};
415
416static inline void *
417memremap(resource_size_t offset, size_t size, unsigned long flags)
418{
419	void *addr = NULL;
420
421	if ((flags & MEMREMAP_WB) &&
422	    (addr = ioremap_wb(offset, size)) != NULL)
423		goto done;
424	if ((flags & MEMREMAP_WT) &&
425	    (addr = ioremap_wt(offset, size)) != NULL)
426		goto done;
427	if ((flags & MEMREMAP_WC) &&
428	    (addr = ioremap_wc(offset, size)) != NULL)
429		goto done;
430done:
431	return (addr);
432}
433
434static inline void
435memunmap(void *addr)
436{
437	/* XXX May need to check if this is RAM */
438	iounmap(addr);
439}
440
441#endif	/* _LINUX_IO_H_ */
442