cpufunc.h revision 295138
1/*	$OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $	*/
2
3/*-
4 * Copyright (c) 2002-2004 Juli Mallett.  All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27/*
28 * Copyright (c) 1995-1999 Per Fogelstrom.  All rights reserved.
29 *
30 * Redistribution and use in source and binary forms, with or without
31 * modification, are permitted provided that the following conditions
32 * are met:
33 * 1. Redistributions of source code must retain the above copyright
34 *    notice, this list of conditions and the following disclaimer.
35 * 2. Redistributions in binary form must reproduce the above copyright
36 *    notice, this list of conditions and the following disclaimer in the
37 *    documentation and/or other materials provided with the distribution.
38 * 3. All advertising materials mentioning features or use of this software
39 *    must display the following acknowledgement:
40 *      This product includes software developed by Per Fogelstrom.
41 * 4. The name of the author may not be used to endorse or promote products
42 *    derived from this software without specific prior written permission
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 *	JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD: head/sys/mips/include/cpufunc.h 295138 2016-02-02 07:47:38Z adrian $
57 */
58
59#ifndef _MACHINE_CPUFUNC_H_
60#define	_MACHINE_CPUFUNC_H_
61
62#include <sys/types.h>
63#include <machine/cpuregs.h>
64
65/*
66 * These functions are required by user-land atomi ops
67 */
68
69static __inline void
70mips_barrier(void)
71{
72#if defined(CPU_CNMIPS) || defined(CPU_RMI) || defined(CPU_NLM)
73	__compiler_membar();
74#else
75	__asm __volatile (".set noreorder\n\t"
76			  "nop\n\t"
77			  "nop\n\t"
78			  "nop\n\t"
79			  "nop\n\t"
80			  "nop\n\t"
81			  "nop\n\t"
82			  "nop\n\t"
83			  "nop\n\t"
84			  ".set reorder\n\t"
85			  : : : "memory");
86#endif
87}
88
89static __inline void
90mips_cp0_sync(void)
91{
92	__asm __volatile (__XSTRING(COP0_SYNC));
93}
94
95static __inline void
96mips_wbflush(void)
97{
98#if defined(CPU_CNMIPS)
99	__asm __volatile (".set noreorder\n\t"
100			"syncw\n\t"
101			".set reorder\n"
102			: : : "memory");
103#else
104	__asm __volatile ("sync" : : : "memory");
105	mips_barrier();
106#endif
107}
108
109#ifdef _KERNEL
110/*
111 * XXX
112 * It would be nice to add variants that read/write register_t, to avoid some
113 * ABI checks.
114 */
115#if defined(__mips_n32) || defined(__mips_n64)
116#define	MIPS_RW64_COP0(n,r)					\
117static __inline uint64_t					\
118mips_rd_ ## n (void)						\
119{								\
120	int v0;							\
121	__asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";"	\
122			  : [v0] "=&r"(v0));			\
123	mips_barrier();						\
124	return (v0);						\
125}								\
126static __inline void						\
127mips_wr_ ## n (uint64_t a0)					\
128{								\
129	__asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";"	\
130			 __XSTRING(COP0_SYNC)";"		\
131			 "nop;"					\
132			 "nop;"					\
133			 :					\
134			 : [a0] "r"(a0));			\
135	mips_barrier();						\
136} struct __hack
137
138#define	MIPS_RW64_COP0_SEL(n,r,s)				\
139static __inline uint64_t					\
140mips_rd_ ## n(void)						\
141{								\
142	int v0;							\
143	__asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"	\
144			  : [v0] "=&r"(v0));			\
145	mips_barrier();						\
146	return (v0);						\
147}								\
148static __inline void						\
149mips_wr_ ## n(uint64_t a0)					\
150{								\
151	__asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"	\
152			 __XSTRING(COP0_SYNC)";"		\
153			 :					\
154			 : [a0] "r"(a0));			\
155	mips_barrier();						\
156} struct __hack
157
158#if defined(__mips_n64)
159MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
160MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
161MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
162#ifdef CPU_CNMIPS
163MIPS_RW64_COP0_SEL(cvmcount, MIPS_COP_0_COUNT, 6);
164MIPS_RW64_COP0_SEL(cvmctl, MIPS_COP_0_COUNT, 7);
165MIPS_RW64_COP0_SEL(cvmmemctl, MIPS_COP_0_COMPARE, 7);
166MIPS_RW64_COP0_SEL(icache_err, MIPS_COP_0_CACHE_ERR, 0);
167MIPS_RW64_COP0_SEL(dcache_err, MIPS_COP_0_CACHE_ERR, 1);
168#endif
169#endif
170#if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
171MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
172MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
173#endif
174MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
175
176#undef	MIPS_RW64_COP0
177#undef	MIPS_RW64_COP0_SEL
178#endif
179
180#define	MIPS_RW32_COP0(n,r)					\
181static __inline uint32_t					\
182mips_rd_ ## n (void)						\
183{								\
184	int v0;							\
185	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";"	\
186			  : [v0] "=&r"(v0));			\
187	mips_barrier();						\
188	return (v0);						\
189}								\
190static __inline void						\
191mips_wr_ ## n (uint32_t a0)					\
192{								\
193	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";"	\
194			 __XSTRING(COP0_SYNC)";"		\
195			 "nop;"					\
196			 "nop;"					\
197			 :					\
198			 : [a0] "r"(a0));			\
199	mips_barrier();						\
200} struct __hack
201
202#define	MIPS_RW32_COP0_SEL(n,r,s)				\
203static __inline uint32_t					\
204mips_rd_ ## n(void)						\
205{								\
206	int v0;							\
207	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"	\
208			  : [v0] "=&r"(v0));			\
209	mips_barrier();						\
210	return (v0);						\
211}								\
212static __inline void						\
213mips_wr_ ## n(uint32_t a0)					\
214{								\
215	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"	\
216			 __XSTRING(COP0_SYNC)";"		\
217			 "nop;"					\
218			 "nop;"					\
219			 :					\
220			 : [a0] "r"(a0));			\
221	mips_barrier();						\
222} struct __hack
223
224#ifdef CPU_CNMIPS
225static __inline void mips_sync_icache (void)
226{
227	__asm __volatile (
228		".set push\n"
229		".set mips64\n"
230		".word 0x041f0000\n"		/* xxx ICACHE */
231		"nop\n"
232		".set pop\n"
233		: : );
234}
235#endif
236
237MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
238MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
239MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
240MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
241MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
242#ifdef CPU_CNMIPS
243MIPS_RW32_COP0_SEL(config4, MIPS_COP_0_CONFIG, 4);
244#endif
245#ifdef BERI_LARGE_TLB
246MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5);
247#endif
248#if defined(CPU_NLM) || defined(BERI_LARGE_TLB)
249MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6);
250#endif
251#if defined(CPU_NLM) || defined(CPU_MIPS1004K)
252MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7);
253#endif
254MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
255MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
256MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
257MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
258#if !defined(__mips_n64)
259MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
260#endif
261MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
262MIPS_RW32_COP0_SEL(cmgcrbase, 15, 3);
263
264/* XXX: Some of these registers are specific to MIPS32. */
265#if !defined(__mips_n64)
266MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
267MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
268#endif
269#ifdef CPU_NLM
270MIPS_RW32_COP0_SEL(pagegrain, MIPS_COP_0_TLB_PG_MASK, 1);
271#endif
272#if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
273MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
274MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
275#endif
276MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
277/* XXX 64-bit?  */
278MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
279MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
280MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
281MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
282MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
283MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
284MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
285MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
286MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
287
288MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
289MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
290MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
291MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
292
293#undef	MIPS_RW32_COP0
294#undef	MIPS_RW32_COP0_SEL
295
296static __inline register_t
297intr_disable(void)
298{
299	register_t s;
300
301	s = mips_rd_status();
302	mips_wr_status(s & ~MIPS_SR_INT_IE);
303
304	return (s & MIPS_SR_INT_IE);
305}
306
307static __inline register_t
308intr_enable(void)
309{
310	register_t s;
311
312	s = mips_rd_status();
313	mips_wr_status(s | MIPS_SR_INT_IE);
314
315	return (s);
316}
317
318static __inline void
319intr_restore(register_t ie)
320{
321	if (ie == MIPS_SR_INT_IE) {
322		intr_enable();
323	}
324}
325
326static __inline uint32_t
327set_intr_mask(uint32_t mask)
328{
329	uint32_t ostatus;
330
331	ostatus = mips_rd_status();
332	mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
333	mips_wr_status(mask);
334	return (ostatus);
335}
336
337static __inline uint32_t
338get_intr_mask(void)
339{
340
341	return (mips_rd_status() & MIPS_SR_INT_MASK);
342}
343
344static __inline void
345breakpoint(void)
346{
347	__asm __volatile ("break");
348}
349
350#if defined(__GNUC__) && !defined(__mips_o32)
351#define	mips3_ld(a)	(*(const volatile uint64_t *)(a))
352#define	mips3_sd(a, v)	(*(volatile uint64_t *)(a) = (v))
353#else
354uint64_t mips3_ld(volatile uint64_t *va);
355void mips3_sd(volatile uint64_t *, uint64_t);
356#endif	/* __GNUC__ */
357
358#endif /* _KERNEL */
359
360#define	readb(va)	(*(volatile uint8_t *) (va))
361#define	readw(va)	(*(volatile uint16_t *) (va))
362#define	readl(va)	(*(volatile uint32_t *) (va))
363#if defined(__GNUC__) && !defined(__mips_o32)
364#define	readq(a)	(*(volatile uint64_t *)(a))
365#endif
366
367#define	writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
368#define	writew(va, d)	(*(volatile uint16_t *) (va) = (d))
369#define	writel(va, d)	(*(volatile uint32_t *) (va) = (d))
370#if defined(__GNUC__) && !defined(__mips_o32)
371#define	writeq(va, d)	(*(volatile uint64_t *) (va) = (d))
372#endif
373
374#endif /* !_MACHINE_CPUFUNC_H_ */
375