cpufunc.h revision 178628
1/*-
2 * Copyright (c) 1998 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/include/cpufunc.h 178628 2008-04-27 22:33:43Z marcel $
27 */
28
29#ifndef _MACHINE_CPUFUNC_H_
30#define	_MACHINE_CPUFUNC_H_
31
32/*
33 * Required for user-space atomic.h includes
34 */
35static __inline void
36powerpc_mb(void)
37{
38
39	__asm __volatile("eieio; sync" : : : "memory");
40}
41
42#ifdef _KERNEL
43
44#include <sys/types.h>
45
46#include <machine/psl.h>
47#include <machine/spr.h>
48
49struct thread;
50
51#ifdef KDB
52void breakpoint(void);
53#endif
54
55/* CPU register mangling inlines */
56
57static __inline void
58mtmsr(register_t value)
59{
60
61	__asm __volatile ("mtmsr %0; isync" :: "r"(value));
62}
63
64static __inline register_t
65mfmsr(void)
66{
67	register_t	value;
68
69	__asm __volatile ("mfmsr %0" : "=r"(value));
70
71	return (value);
72}
73
74static __inline void
75mtsrin(vm_offset_t va, register_t value)
76{
77
78	__asm __volatile ("mtsrin %0,%1" :: "r"(value), "r"(va));
79}
80
81static __inline register_t
82mfsrin(vm_offset_t va)
83{
84	register_t	value;
85
86	__asm __volatile ("mfsrin %0,%1" : "=r"(value) : "r"(va));
87
88	return (value);
89}
90
91static __inline void
92mtdec(register_t value)
93{
94
95	__asm __volatile ("mtdec %0" :: "r"(value));
96}
97
98static __inline register_t
99mfdec(void)
100{
101	register_t	value;
102
103	__asm __volatile ("mfdec %0" : "=r"(value));
104
105	return (value);
106}
107
108static __inline register_t
109mfpvr(void)
110{
111	register_t	value;
112
113	__asm __volatile ("mfpvr %0" : "=r"(value));
114
115	return (value);
116}
117
118static __inline void
119eieio(void)
120{
121
122	__asm __volatile ("eieio");
123}
124
125static __inline void
126isync(void)
127{
128
129	__asm __volatile ("isync");
130}
131
132static __inline register_t
133intr_disable(void)
134{
135	register_t	msr;
136
137	msr = mfmsr();
138	mtmsr(msr & ~PSL_EE);
139	return (msr);
140}
141
142static __inline void
143intr_restore(register_t msr)
144{
145
146	mtmsr(msr);
147}
148
149static __inline void
150restore_intr(unsigned int msr)
151{
152
153	mtmsr(msr);
154}
155
156static __inline struct pcpu *
157powerpc_get_pcpup(void)
158{
159	struct pcpu	*ret;
160
161	__asm ("mfsprg %0, 0" : "=r"(ret));
162
163	return(ret);
164}
165
166#endif /* _KERNEL */
167
168#endif /* !_MACHINE_CPUFUNC_H_ */
169