1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#ifndef _MACHINE_CPUFUNC_H_
30#define	_MACHINE_CPUFUNC_H_
31
32static __inline void
33breakpoint(void)
34{
35
36	__asm("brk #0");
37}
38
39#ifdef _KERNEL
40
41#define	HAVE_INLINE_FFS
42
43static __inline __pure2 int
44ffs(int mask)
45{
46
47	return (__builtin_ffs(mask));
48}
49
50#define	HAVE_INLINE_FFSL
51
52static __inline __pure2 int
53ffsl(long mask)
54{
55
56	return (__builtin_ffsl(mask));
57}
58
59#define	HAVE_INLINE_FFSLL
60
61static __inline __pure2 int
62ffsll(long long mask)
63{
64
65	return (__builtin_ffsll(mask));
66}
67
68#define	HAVE_INLINE_FLS
69
70static __inline __pure2 int
71fls(int mask)
72{
73
74	return (mask == 0 ? 0 :
75	    8 * sizeof(mask) - __builtin_clz((u_int)mask));
76}
77
78#define	HAVE_INLINE_FLSL
79
80static __inline __pure2 int
81flsl(long mask)
82{
83
84	return (mask == 0 ? 0 :
85	    8 * sizeof(mask) - __builtin_clzl((u_long)mask));
86}
87
88#define	HAVE_INLINE_FLSLL
89
90static __inline __pure2 int
91flsll(long long mask)
92{
93
94	return (mask == 0 ? 0 :
95	    8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
96}
97
98#include <machine/armreg.h>
99
100void pan_enable(void);
101
102static __inline register_t
103dbg_disable(void)
104{
105	uint32_t ret;
106
107	__asm __volatile(
108	    "mrs %x0, daif   \n"
109	    "msr daifset, #8 \n"
110	    : "=&r" (ret));
111
112	return (ret);
113}
114
115static __inline void
116dbg_enable(void)
117{
118
119	__asm __volatile("msr daifclr, #8");
120}
121
122static __inline register_t
123intr_disable(void)
124{
125	/* DAIF is a 32-bit register */
126	uint32_t ret;
127
128	__asm __volatile(
129	    "mrs %x0, daif   \n"
130	    "msr daifset, #2 \n"
131	    : "=&r" (ret));
132
133	return (ret);
134}
135
136static __inline void
137intr_restore(register_t s)
138{
139
140	WRITE_SPECIALREG(daif, s);
141}
142
143static __inline void
144intr_enable(void)
145{
146
147	__asm __volatile("msr daifclr, #2");
148}
149
150static __inline register_t
151get_midr(void)
152{
153	uint64_t midr;
154
155	midr = READ_SPECIALREG(midr_el1);
156
157	return (midr);
158}
159
160static __inline register_t
161get_mpidr(void)
162{
163	uint64_t mpidr;
164
165	mpidr = READ_SPECIALREG(mpidr_el1);
166
167	return (mpidr);
168}
169
170static __inline void
171clrex(void)
172{
173
174	/*
175	 * Ensure compiler barrier, otherwise the monitor clear might
176	 * occur too late for us ?
177	 */
178	__asm __volatile("clrex" : : : "memory");
179}
180
181static __inline void
182set_ttbr0(uint64_t ttbr0)
183{
184
185	__asm __volatile(
186	    "msr ttbr0_el1, %0 \n"
187	    "isb               \n"
188	    :
189	    : "r" (ttbr0));
190}
191
192static __inline void
193invalidate_icache(void)
194{
195
196	__asm __volatile(
197	    "ic ialluis        \n"
198	    "dsb ish           \n"
199	    "isb               \n");
200}
201
202static __inline void
203invalidate_local_icache(void)
204{
205
206	__asm __volatile(
207	    "ic iallu          \n"
208	    "dsb nsh           \n"
209	    "isb               \n");
210}
211
212extern bool icache_aliasing;
213extern bool icache_vmid;
214
215extern int64_t dcache_line_size;
216extern int64_t icache_line_size;
217extern int64_t idcache_line_size;
218extern int64_t dczva_line_size;
219
220#define	cpu_nullop()			arm64_nullop()
221#define	cpufunc_nullop()		arm64_nullop()
222
223#define	cpu_tlb_flushID()		arm64_tlb_flushID()
224
225#define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
226#define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
227#define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
228
229extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
230
231#define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
232#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
233
234void arm64_nullop(void);
235void arm64_tlb_flushID(void);
236void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
237void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
238int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
239void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
240void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
241void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
242
243#endif	/* _KERNEL */
244#endif	/* _MACHINE_CPUFUNC_H_ */
245