cpufunc.h revision 215182
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/include/cpufunc.h 215182 2010-11-12 15:20:10Z nwhitehorn $
27 */
28
29#ifndef _MACHINE_CPUFUNC_H_
30#define	_MACHINE_CPUFUNC_H_
31
32/*
33 * Required for user-space atomic.h includes
34 */
35static __inline void
36powerpc_mb(void)
37{
38
39	__asm __volatile("eieio; sync" : : : "memory");
40}
41
42#ifdef _KERNEL
43
44#include <sys/types.h>
45
46#include <machine/psl.h>
47#include <machine/spr.h>
48
49struct thread;
50
51#ifdef KDB
52void breakpoint(void);
53#else
54static __inline void
55breakpoint(void)
56{
57
58	return;
59}
60#endif
61
62/* CPU register mangling inlines */
63
64static __inline void
65mtmsr(register_t value)
66{
67
68	__asm __volatile ("mtmsr %0; isync" :: "r"(value));
69}
70
71#ifdef __powerpc64__
72static __inline void
73mtmsrd(register_t value)
74{
75
76	__asm __volatile ("mtmsrd %0; isync" :: "r"(value));
77}
78#endif
79
80static __inline register_t
81mfmsr(void)
82{
83	register_t value;
84
85	__asm __volatile ("mfmsr %0" : "=r"(value));
86
87	return (value);
88}
89
90#ifndef __powerpc64__
91static __inline void
92mtsrin(vm_offset_t va, register_t value)
93{
94
95	__asm __volatile ("mtsrin %0,%1" :: "r"(value), "r"(va));
96}
97
98static __inline register_t
99mfsrin(vm_offset_t va)
100{
101	register_t value;
102
103	__asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
104
105	return (value);
106}
107#endif
108
109static __inline register_t
110mfctrl(void)
111{
112	register_t value;
113
114	__asm __volatile ("mfspr %0,136" : "=r"(value));
115
116	return (value);
117}
118
119
120static __inline void
121mtdec(register_t value)
122{
123
124	__asm __volatile ("mtdec %0" :: "r"(value));
125}
126
127static __inline register_t
128mfdec(void)
129{
130	register_t value;
131
132	__asm __volatile ("mfdec %0" : "=r"(value));
133
134	return (value);
135}
136
137static __inline register_t
138mfpvr(void)
139{
140	register_t value;
141
142	__asm __volatile ("mfpvr %0" : "=r"(value));
143
144	return (value);
145}
146
147static __inline u_quad_t
148mftb(void)
149{
150	u_quad_t tb;
151      #ifdef __powerpc64__
152	__asm __volatile ("mftb %0" : "=r"(tb));
153      #else
154	uint32_t *tbup = (uint32_t *)&tb;
155	uint32_t *tblp = tbup + 1;
156
157	do {
158		*tbup = mfspr(TBR_TBU);
159		*tblp = mfspr(TBR_TBL);
160	} while (*tbup != mfspr(TBR_TBU));
161      #endif
162
163	return (tb);
164}
165
166static __inline void
167mttb(u_quad_t time)
168{
169
170	mtspr(TBR_TBWL, 0);
171	mtspr(TBR_TBWU, (uint32_t)(time >> 32));
172	mtspr(TBR_TBWL, (uint32_t)(time & 0xffffffff));
173}
174
175static __inline void
176eieio(void)
177{
178
179	__asm __volatile ("eieio");
180}
181
182static __inline void
183isync(void)
184{
185
186	__asm __volatile ("isync");
187}
188
189static __inline void
190powerpc_sync(void)
191{
192
193	__asm __volatile ("sync");
194}
195
196static __inline register_t
197intr_disable(void)
198{
199	register_t msr;
200
201	msr = mfmsr();
202	mtmsr(msr & ~PSL_EE);
203	return (msr);
204}
205
206static __inline void
207intr_restore(register_t msr)
208{
209
210	mtmsr(msr);
211}
212
213static __inline struct pcpu *
214powerpc_get_pcpup(void)
215{
216	struct pcpu *ret;
217
218	__asm __volatile("mfsprg %0, 0" : "=r"(ret));
219
220	return (ret);
221}
222
223#endif /* _KERNEL */
224
225#endif /* !_MACHINE_CPUFUNC_H_ */
226