1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD: stable/11/sys/compat/linuxkpi/common/include/linux/io.h 363149 2020-07-13 15:29:11Z hselasky $
30 */
31#ifndef	_LINUX_IO_H_
32#define	_LINUX_IO_H_
33
34#include <sys/endian.h>
35#include <sys/types.h>
36
37#include <machine/vm.h>
38
39#include <linux/compiler.h>
40#include <linux/types.h>
41
42/*
43 * XXX This is all x86 specific.  It should be bus space access.
44 */
45
46/* Access MMIO registers atomically without barriers and byte swapping. */
47
48static inline uint8_t
49__raw_readb(const volatile void *addr)
50{
51	return (*(const volatile uint8_t *)addr);
52}
53#define	__raw_readb(addr)	__raw_readb(addr)
54
55static inline void
56__raw_writeb(uint8_t v, volatile void *addr)
57{
58	*(volatile uint8_t *)addr = v;
59}
60#define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
61
62static inline uint16_t
63__raw_readw(const volatile void *addr)
64{
65	return (*(const volatile uint16_t *)addr);
66}
67#define	__raw_readw(addr)	__raw_readw(addr)
68
69static inline void
70__raw_writew(uint16_t v, volatile void *addr)
71{
72	*(volatile uint16_t *)addr = v;
73}
74#define	__raw_writew(v, addr)	__raw_writew(v, addr)
75
76static inline uint32_t
77__raw_readl(const volatile void *addr)
78{
79	return (*(const volatile uint32_t *)addr);
80}
81#define	__raw_readl(addr)	__raw_readl(addr)
82
83static inline void
84__raw_writel(uint32_t v, volatile void *addr)
85{
86	*(volatile uint32_t *)addr = v;
87}
88#define	__raw_writel(v, addr)	__raw_writel(v, addr)
89
90#ifdef __LP64__
91static inline uint64_t
92__raw_readq(const volatile void *addr)
93{
94	return (*(const volatile uint64_t *)addr);
95}
96#define	__raw_readq(addr)	__raw_readq(addr)
97
98static inline void
99__raw_writeq(uint64_t v, volatile void *addr)
100{
101	*(volatile uint64_t *)addr = v;
102}
103#define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
104#endif
105
106#define	mmiowb()	barrier()
107
108/* Access little-endian MMIO registers atomically with memory barriers. */
109
110#undef readb
111static inline uint8_t
112readb(const volatile void *addr)
113{
114	uint8_t v;
115
116	__compiler_membar();
117	v = *(const volatile uint8_t *)addr;
118	__compiler_membar();
119	return (v);
120}
121#define	readb(addr)		readb(addr)
122
123#undef writeb
124static inline void
125writeb(uint8_t v, volatile void *addr)
126{
127	__compiler_membar();
128	*(volatile uint8_t *)addr = v;
129	__compiler_membar();
130}
131#define	writeb(v, addr)		writeb(v, addr)
132
133#undef readw
134static inline uint16_t
135readw(const volatile void *addr)
136{
137	uint16_t v;
138
139	__compiler_membar();
140	v = *(const volatile uint16_t *)addr;
141	__compiler_membar();
142	return (v);
143}
144#define	readw(addr)		readw(addr)
145
146#undef writew
147static inline void
148writew(uint16_t v, volatile void *addr)
149{
150	__compiler_membar();
151	*(volatile uint16_t *)addr = v;
152	__compiler_membar();
153}
154#define	writew(v, addr)		writew(v, addr)
155
156#undef readl
157static inline uint32_t
158readl(const volatile void *addr)
159{
160	uint32_t v;
161
162	__compiler_membar();
163	v = *(const volatile uint32_t *)addr;
164	__compiler_membar();
165	return (v);
166}
167#define	readl(addr)		readl(addr)
168
169#undef writel
170static inline void
171writel(uint32_t v, volatile void *addr)
172{
173	__compiler_membar();
174	*(volatile uint32_t *)addr = v;
175	__compiler_membar();
176}
177#define	writel(v, addr)		writel(v, addr)
178
179#undef readq
180#undef writeq
181#ifdef __LP64__
182static inline uint64_t
183readq(const volatile void *addr)
184{
185	uint64_t v;
186
187	__compiler_membar();
188	v = *(const volatile uint64_t *)addr;
189	__compiler_membar();
190	return (v);
191}
192#define	readq(addr)		readq(addr)
193
194static inline void
195writeq(uint64_t v, volatile void *addr)
196{
197	__compiler_membar();
198	*(volatile uint64_t *)addr = v;
199	__compiler_membar();
200}
201#define	writeq(v, addr)		writeq(v, addr)
202#endif
203
204/* Access little-endian MMIO registers atomically without memory barriers. */
205
206#undef readb_relaxed
207static inline uint8_t
208readb_relaxed(const volatile void *addr)
209{
210	return (*(const volatile uint8_t *)addr);
211}
212#define	readb_relaxed(addr)	readb_relaxed(addr)
213
214#undef writeb_relaxed
215static inline void
216writeb_relaxed(uint8_t v, volatile void *addr)
217{
218	*(volatile uint8_t *)addr = v;
219}
220#define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
221
222#undef readw_relaxed
223static inline uint16_t
224readw_relaxed(const volatile void *addr)
225{
226	return (*(const volatile uint16_t *)addr);
227}
228#define	readw_relaxed(addr)	readw_relaxed(addr)
229
230#undef writew_relaxed
231static inline void
232writew_relaxed(uint16_t v, volatile void *addr)
233{
234	*(volatile uint16_t *)addr = v;
235}
236#define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
237
238#undef readl_relaxed
239static inline uint32_t
240readl_relaxed(const volatile void *addr)
241{
242	return (*(const volatile uint32_t *)addr);
243}
244#define	readl_relaxed(addr)	readl_relaxed(addr)
245
246#undef writel_relaxed
247static inline void
248writel_relaxed(uint32_t v, volatile void *addr)
249{
250	*(volatile uint32_t *)addr = v;
251}
252#define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
253
254#undef readq_relaxed
255#undef writeq_relaxed
256#ifdef __LP64__
257static inline uint64_t
258readq_relaxed(const volatile void *addr)
259{
260	return (*(const volatile uint64_t *)addr);
261}
262#define	readq_relaxed(addr)	readq_relaxed(addr)
263
264static inline void
265writeq_relaxed(uint64_t v, volatile void *addr)
266{
267	*(volatile uint64_t *)addr = v;
268}
269#define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
270#endif
271
272/* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
273
274#undef ioread8
275static inline uint8_t
276ioread8(const volatile void *addr)
277{
278	return (readb(addr));
279}
280#define	ioread8(addr)		ioread8(addr)
281
282#undef ioread16
283static inline uint16_t
284ioread16(const volatile void *addr)
285{
286	return (readw(addr));
287}
288#define	ioread16(addr)		ioread16(addr)
289
290#undef ioread16be
291static inline uint16_t
292ioread16be(const volatile void *addr)
293{
294	return (bswap16(readw(addr)));
295}
296#define	ioread16be(addr)	ioread16be(addr)
297
298#undef ioread32
299static inline uint32_t
300ioread32(const volatile void *addr)
301{
302	return (readl(addr));
303}
304#define	ioread32(addr)		ioread32(addr)
305
306#undef ioread32be
307static inline uint32_t
308ioread32be(const volatile void *addr)
309{
310	return (bswap32(readl(addr)));
311}
312#define	ioread32be(addr)	ioread32be(addr)
313
314#undef iowrite8
315static inline void
316iowrite8(uint8_t v, volatile void *addr)
317{
318	writeb(v, addr);
319}
320#define	iowrite8(v, addr)	iowrite8(v, addr)
321
322#undef iowrite16
323static inline void
324iowrite16(uint16_t v, volatile void *addr)
325{
326	writew(v, addr);
327}
328#define	iowrite16	iowrite16
329
330#undef iowrite32
331static inline void
332iowrite32(uint32_t v, volatile void *addr)
333{
334	writel(v, addr);
335}
336#define	iowrite32(v, addr)	iowrite32(v, addr)
337
338#undef iowrite32be
339static inline void
340iowrite32be(uint32_t v, volatile void *addr)
341{
342	writel(bswap32(v), addr);
343}
344#define	iowrite32be(v, addr)	iowrite32be(v, addr)
345
346#if defined(__i386__) || defined(__amd64__)
347static inline void
348_outb(u_char data, u_int port)
349{
350	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
351}
352#endif
353
354#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
355void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
356#else
357#define	_ioremap_attr(...) NULL
358#endif
359
360#ifdef VM_MEMATTR_DEVICE
361#define	ioremap_nocache(addr, size)					\
362    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
363#define	ioremap_wt(addr, size)						\
364    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
365#define	ioremap(addr, size)						\
366    _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
367#else
368#define	ioremap_nocache(addr, size)					\
369    _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
370#define	ioremap_wt(addr, size)						\
371    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
372#define	ioremap(addr, size)						\
373    _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
374#endif
375#define	ioremap_wc(addr, size)						\
376    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
377#define	ioremap_wb(addr, size)						\
378    _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
379void iounmap(void *addr);
380
381#define	memset_io(a, b, c)	memset((a), (b), (c))
382#define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
383#define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
384
385static inline void
386__iowrite32_copy(void *to, void *from, size_t count)
387{
388	uint32_t *src;
389	uint32_t *dst;
390	int i;
391
392	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
393		__raw_writel(*src, dst);
394}
395
396static inline void
397__iowrite64_copy(void *to, void *from, size_t count)
398{
399#ifdef __LP64__
400	uint64_t *src;
401	uint64_t *dst;
402	int i;
403
404	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
405		__raw_writeq(*src, dst);
406#else
407	__iowrite32_copy(to, from, count * 2);
408#endif
409}
410
411enum {
412	MEMREMAP_WB = 1 << 0,
413	MEMREMAP_WT = 1 << 1,
414	MEMREMAP_WC = 1 << 2,
415};
416
417static inline void *
418memremap(resource_size_t offset, size_t size, unsigned long flags)
419{
420	void *addr = NULL;
421
422	if ((flags & MEMREMAP_WB) &&
423	    (addr = ioremap_wb(offset, size)) != NULL)
424		goto done;
425	if ((flags & MEMREMAP_WT) &&
426	    (addr = ioremap_wt(offset, size)) != NULL)
427		goto done;
428	if ((flags & MEMREMAP_WC) &&
429	    (addr = ioremap_wc(offset, size)) != NULL)
430		goto done;
431done:
432	return (addr);
433}
434
435static inline void
436memunmap(void *addr)
437{
438	/* XXX May need to check if this is RAM */
439	iounmap(addr);
440}
441
442#endif	/* _LINUX_IO_H_ */
443