cpufunc.h revision 217354
1198090Srdivacky/*	$OpenBSD: pio.h,v 1.2 1998/09/15 10:50:12 pefo Exp $	*/
2198090Srdivacky
3198090Srdivacky/*-
4198090Srdivacky * Copyright (c) 2002-2004 Juli Mallett.  All rights reserved.
5198090Srdivacky *
6198090Srdivacky * Redistribution and use in source and binary forms, with or without
7198090Srdivacky * modification, are permitted provided that the following conditions
8198090Srdivacky * are met:
9198090Srdivacky * 1. Redistributions of source code must retain the above copyright
10198090Srdivacky *    notice, this list of conditions and the following disclaimer.
11198090Srdivacky * 2. Redistributions in binary form must reproduce the above copyright
12198090Srdivacky *    notice, this list of conditions and the following disclaimer in the
13198090Srdivacky *    documentation and/or other materials provided with the distribution.
14249423Sdim *
15249423Sdim * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16198090Srdivacky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17198090Srdivacky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18198090Srdivacky * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19198090Srdivacky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20198090Srdivacky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21198090Srdivacky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22249423Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23198090Srdivacky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24198090Srdivacky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25198090Srdivacky * SUCH DAMAGE.
26198090Srdivacky */
27198090Srdivacky/*
28198090Srdivacky * Copyright (c) 1995-1999 Per Fogelstrom.  All rights reserved.
29198090Srdivacky *
30203954Srdivacky * Redistribution and use in source and binary forms, with or without
31198090Srdivacky * modification, are permitted provided that the following conditions
32198090Srdivacky * are met:
33234353Sdim * 1. Redistributions of source code must retain the above copyright
34198090Srdivacky *    notice, this list of conditions and the following disclaimer.
35198090Srdivacky * 2. Redistributions in binary form must reproduce the above copyright
36198090Srdivacky *    notice, this list of conditions and the following disclaimer in the
37218893Sdim *    documentation and/or other materials provided with the distribution.
38218893Sdim * 3. All advertising materials mentioning features or use of this software
39218893Sdim *    must display the following acknowledgement:
40218893Sdim *      This product includes software developed by Per Fogelstrom.
41198090Srdivacky * 4. The name of the author may not be used to endorse or promote products
42198090Srdivacky *    derived from this software without specific prior written permission
43206274Srdivacky *
44198090Srdivacky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
45198090Srdivacky * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46198090Srdivacky * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47198090Srdivacky * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
48198090Srdivacky * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
49198090Srdivacky * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50198090Srdivacky * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51198090Srdivacky * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
53 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 *
55 *	JNPR: cpufunc.h,v 1.5 2007/08/09 11:23:32 katta
56 * $FreeBSD: head/sys/mips/include/cpufunc.h 217354 2011-01-13 15:17:29Z jchandra $
57 */
58
59#ifndef _MACHINE_CPUFUNC_H_
60#define	_MACHINE_CPUFUNC_H_
61
62#include <sys/types.h>
63#include <machine/cpuregs.h>
64
65/*
66 * These functions are required by user-land atomi ops
67 */
68
69static __inline void
70mips_barrier(void)
71{
72	__asm __volatile (".set noreorder\n\t"
73			  "nop\n\t"
74			  "nop\n\t"
75			  "nop\n\t"
76			  "nop\n\t"
77			  "nop\n\t"
78			  "nop\n\t"
79			  "nop\n\t"
80			  "nop\n\t"
81			  ".set reorder\n\t"
82			  : : : "memory");
83}
84
85static __inline void
86mips_cp0_sync(void)
87{
88	__asm __volatile (__XSTRING(COP0_SYNC));
89}
90
91static __inline void
92mips_wbflush(void)
93{
94	__asm __volatile ("sync" : : : "memory");
95	mips_barrier();
96}
97
98static __inline void
99mips_read_membar(void)
100{
101	/* Nil */
102}
103
104static __inline void
105mips_write_membar(void)
106{
107	mips_wbflush();
108}
109
110#ifdef _KERNEL
111/*
112 * XXX
113 * It would be nice to add variants that read/write register_t, to avoid some
114 * ABI checks.
115 */
116#if defined(__mips_n32) || defined(__mips_n64)
117#define	MIPS_RW64_COP0(n,r)					\
118static __inline uint64_t					\
119mips_rd_ ## n (void)						\
120{								\
121	int v0;							\
122	__asm __volatile ("dmfc0 %[v0], $"__XSTRING(r)";"	\
123			  : [v0] "=&r"(v0));			\
124	mips_barrier();						\
125	return (v0);						\
126}								\
127static __inline void						\
128mips_wr_ ## n (uint64_t a0)					\
129{								\
130	__asm __volatile ("dmtc0 %[a0], $"__XSTRING(r)";"	\
131			 __XSTRING(COP0_SYNC)";"		\
132			 "nop;"					\
133			 "nop;"					\
134			 :					\
135			 : [a0] "r"(a0));			\
136	mips_barrier();						\
137} struct __hack
138
139#if defined(__mips_n64)
140MIPS_RW64_COP0(excpc, MIPS_COP_0_EXC_PC);
141MIPS_RW64_COP0(entryhi, MIPS_COP_0_TLB_HI);
142MIPS_RW64_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
143#endif
144#if defined(__mips_n64) || defined(__mips_n32) /* PHYSADDR_64_BIT */
145MIPS_RW64_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
146MIPS_RW64_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
147#endif
148MIPS_RW64_COP0(xcontext, MIPS_COP_0_TLB_XCONTEXT);
149
150#undef	MIPS_RW64_COP0
151#endif
152
153#define	MIPS_RW32_COP0(n,r)					\
154static __inline uint32_t					\
155mips_rd_ ## n (void)						\
156{								\
157	int v0;							\
158	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)";"	\
159			  : [v0] "=&r"(v0));			\
160	mips_barrier();						\
161	return (v0);						\
162}								\
163static __inline void						\
164mips_wr_ ## n (uint32_t a0)					\
165{								\
166	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)";"	\
167			 __XSTRING(COP0_SYNC)";"		\
168			 "nop;"					\
169			 "nop;"					\
170			 :					\
171			 : [a0] "r"(a0));			\
172	mips_barrier();						\
173} struct __hack
174
175#define	MIPS_RW32_COP0_SEL(n,r,s)				\
176static __inline uint32_t					\
177mips_rd_ ## n(void)						\
178{								\
179	int v0;							\
180	__asm __volatile ("mfc0 %[v0], $"__XSTRING(r)", "__XSTRING(s)";"	\
181			  : [v0] "=&r"(v0));			\
182	mips_barrier();						\
183	return (v0);						\
184}								\
185static __inline void						\
186mips_wr_ ## n(uint32_t a0)					\
187{								\
188	__asm __volatile ("mtc0 %[a0], $"__XSTRING(r)", "__XSTRING(s)";"	\
189			 __XSTRING(COP0_SYNC)";"		\
190			 "nop;"					\
191			 "nop;"					\
192			 :					\
193			 : [a0] "r"(a0));			\
194	mips_barrier();						\
195} struct __hack
196
197#ifdef CPU_CNMIPS
198static __inline void mips_sync_icache (void)
199{
200	__asm __volatile (
201		".set push\n"
202		".set mips64\n"
203		".word 0x041f0000\n"		/* xxx ICACHE */
204		"nop\n"
205		".set pop\n"
206		: : );
207}
208#endif
209
210MIPS_RW32_COP0(compare, MIPS_COP_0_COMPARE);
211MIPS_RW32_COP0(config, MIPS_COP_0_CONFIG);
212MIPS_RW32_COP0_SEL(config1, MIPS_COP_0_CONFIG, 1);
213MIPS_RW32_COP0_SEL(config2, MIPS_COP_0_CONFIG, 2);
214MIPS_RW32_COP0_SEL(config3, MIPS_COP_0_CONFIG, 3);
215MIPS_RW32_COP0(count, MIPS_COP_0_COUNT);
216MIPS_RW32_COP0(index, MIPS_COP_0_TLB_INDEX);
217MIPS_RW32_COP0(wired, MIPS_COP_0_TLB_WIRED);
218MIPS_RW32_COP0(cause, MIPS_COP_0_CAUSE);
219#if !defined(__mips_n64)
220MIPS_RW32_COP0(excpc, MIPS_COP_0_EXC_PC);
221#endif
222MIPS_RW32_COP0(status, MIPS_COP_0_STATUS);
223
224/* XXX: Some of these registers are specific to MIPS32. */
225#if !defined(__mips_n64)
226MIPS_RW32_COP0(entryhi, MIPS_COP_0_TLB_HI);
227MIPS_RW32_COP0(pagemask, MIPS_COP_0_TLB_PG_MASK);
228#endif
229#if !defined(__mips_n64) && !defined(__mips_n32) /* !PHYSADDR_64_BIT */
230MIPS_RW32_COP0(entrylo0, MIPS_COP_0_TLB_LO0);
231MIPS_RW32_COP0(entrylo1, MIPS_COP_0_TLB_LO1);
232#endif
233MIPS_RW32_COP0(prid, MIPS_COP_0_PRID);
234/* XXX 64-bit?  */
235MIPS_RW32_COP0_SEL(ebase, MIPS_COP_0_PRID, 1);
236MIPS_RW32_COP0(watchlo, MIPS_COP_0_WATCH_LO);
237MIPS_RW32_COP0_SEL(watchlo1, MIPS_COP_0_WATCH_LO, 1);
238MIPS_RW32_COP0_SEL(watchlo2, MIPS_COP_0_WATCH_LO, 2);
239MIPS_RW32_COP0_SEL(watchlo3, MIPS_COP_0_WATCH_LO, 3);
240MIPS_RW32_COP0(watchhi, MIPS_COP_0_WATCH_HI);
241MIPS_RW32_COP0_SEL(watchhi1, MIPS_COP_0_WATCH_HI, 1);
242MIPS_RW32_COP0_SEL(watchhi2, MIPS_COP_0_WATCH_HI, 2);
243MIPS_RW32_COP0_SEL(watchhi3, MIPS_COP_0_WATCH_HI, 3);
244
245MIPS_RW32_COP0_SEL(perfcnt0, MIPS_COP_0_PERFCNT, 0);
246MIPS_RW32_COP0_SEL(perfcnt1, MIPS_COP_0_PERFCNT, 1);
247MIPS_RW32_COP0_SEL(perfcnt2, MIPS_COP_0_PERFCNT, 2);
248MIPS_RW32_COP0_SEL(perfcnt3, MIPS_COP_0_PERFCNT, 3);
249
250#undef	MIPS_RW32_COP0
251#undef	MIPS_RW32_COP0_SEL
252
253static __inline register_t
254intr_disable(void)
255{
256	register_t s;
257
258	s = mips_rd_status();
259	mips_wr_status(s & ~MIPS_SR_INT_IE);
260
261	return (s & MIPS_SR_INT_IE);
262}
263
264static __inline register_t
265intr_enable(void)
266{
267	register_t s;
268
269	s = mips_rd_status();
270	mips_wr_status(s | MIPS_SR_INT_IE);
271
272	return (s);
273}
274
275static __inline void
276intr_restore(register_t ie)
277{
278	if (ie == MIPS_SR_INT_IE) {
279		intr_enable();
280	}
281}
282
283static __inline uint32_t
284set_intr_mask(uint32_t mask)
285{
286	uint32_t ostatus;
287
288	ostatus = mips_rd_status();
289	mask = (ostatus & ~MIPS_SR_INT_MASK) | (mask & MIPS_SR_INT_MASK);
290	mips_wr_status(mask);
291	return (ostatus);
292}
293
294static __inline uint32_t
295get_intr_mask(void)
296{
297
298	return (mips_rd_status() & MIPS_SR_INT_MASK);
299}
300
301static __inline void
302breakpoint(void)
303{
304	__asm __volatile ("break");
305}
306
307#if defined(__GNUC__) && !defined(__mips_o32)
308static inline uint64_t
309mips3_ld(const volatile uint64_t *va)
310{
311	uint64_t rv;
312
313#if defined(_LP64)
314	rv = *va;
315#else
316	__asm volatile("ld	%0,0(%1)" : "=d"(rv) : "r"(va));
317#endif
318
319	return (rv);
320}
321
322static inline void
323mips3_sd(volatile uint64_t *va, uint64_t v)
324{
325#if defined(_LP64)
326	*va = v;
327#else
328	__asm volatile("sd	%0,0(%1)" :: "r"(v), "r"(va));
329#endif
330}
331#else
332uint64_t mips3_ld(volatile uint64_t *va);
333void mips3_sd(volatile uint64_t *, uint64_t);
334#endif	/* __GNUC__ */
335
336#endif /* _KERNEL */
337
338#define	readb(va)	(*(volatile uint8_t *) (va))
339#define	readw(va)	(*(volatile uint16_t *) (va))
340#define	readl(va)	(*(volatile uint32_t *) (va))
341
342#define	writeb(va, d)	(*(volatile uint8_t *) (va) = (d))
343#define	writew(va, d)	(*(volatile uint16_t *) (va) = (d))
344#define	writel(va, d)	(*(volatile uint32_t *) (va) = (d))
345
346/*
347 * I/O macros.
348 */
349
350#define	outb(a,v)	(*(volatile unsigned char*)(a) = (v))
351#define	out8(a,v)	(*(volatile unsigned char*)(a) = (v))
352#define	outw(a,v)	(*(volatile unsigned short*)(a) = (v))
353#define	out16(a,v)	outw(a,v)
354#define	outl(a,v)	(*(volatile unsigned int*)(a) = (v))
355#define	out32(a,v)	outl(a,v)
356#define	inb(a)		(*(volatile unsigned char*)(a))
357#define	in8(a)		(*(volatile unsigned char*)(a))
358#define	inw(a)		(*(volatile unsigned short*)(a))
359#define	in16(a)		inw(a)
360#define	inl(a)		(*(volatile unsigned int*)(a))
361#define	in32(a)		inl(a)
362
363#define	out8rb(a,v)	(*(volatile unsigned char*)(a) = (v))
364#define	out16rb(a,v)	(__out16rb((volatile uint16_t *)(a), v))
365#define	out32rb(a,v)	(__out32rb((volatile uint32_t *)(a), v))
366#define	in8rb(a)	(*(volatile unsigned char*)(a))
367#define	in16rb(a)	(__in16rb((volatile uint16_t *)(a)))
368#define	in32rb(a)	(__in32rb((volatile uint32_t *)(a)))
369
370#define	_swap_(x)	(((x) >> 24) | ((x) << 24) | \
371	    (((x) >> 8) & 0xff00) | (((x) & 0xff00) << 8))
372
373static __inline void __out32rb(volatile uint32_t *, uint32_t);
374static __inline void __out16rb(volatile uint16_t *, uint16_t);
375static __inline uint32_t __in32rb(volatile uint32_t *);
376static __inline uint16_t __in16rb(volatile uint16_t *);
377
378static __inline void
379__out32rb(volatile uint32_t *a, uint32_t v)
380{
381	uint32_t _v_ = v;
382
383	_v_ = _swap_(_v_);
384	out32(a, _v_);
385}
386
387static __inline void
388__out16rb(volatile uint16_t *a, uint16_t v)
389{
390	uint16_t _v_;
391
392	_v_ = ((v >> 8) & 0xff) | (v << 8);
393	out16(a, _v_);
394}
395
396static __inline uint32_t
397__in32rb(volatile uint32_t *a)
398{
399	uint32_t _v_;
400
401	_v_ = in32(a);
402	_v_ = _swap_(_v_);
403	return _v_;
404}
405
406static __inline uint16_t
407__in16rb(volatile uint16_t *a)
408{
409	uint16_t _v_;
410
411	_v_ = in16(a);
412	_v_ = ((_v_ >> 8) & 0xff) | (_v_ << 8);
413	return _v_;
414}
415
416void insb(uint8_t *, uint8_t *,int);
417void insw(uint16_t *, uint16_t *,int);
418void insl(uint32_t *, uint32_t *,int);
419void outsb(uint8_t *, const uint8_t *,int);
420void outsw(uint16_t *, const uint16_t *,int);
421void outsl(uint32_t *, const uint32_t *,int);
422u_int loadandclear(volatile u_int *addr);
423
424#endif /* !_MACHINE_CPUFUNC_H_ */
425