cpufunc.h revision 338514
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: stable/11/sys/arm64/include/cpufunc.h 338514 2018-09-06 22:23:39Z jhb $
27 */
28
29#ifndef _MACHINE_CPUFUNC_H_
30#define	_MACHINE_CPUFUNC_H_
31
32static __inline void
33breakpoint(void)
34{
35
36	__asm("brk #0");
37}
38
39#ifdef _KERNEL
40
41#include <machine/armreg.h>
42
43void pan_enable(void);
44
45static __inline register_t
46dbg_disable(void)
47{
48	uint32_t ret;
49
50	__asm __volatile(
51	    "mrs %x0, daif   \n"
52	    "msr daifset, #8 \n"
53	    : "=&r" (ret));
54
55	return (ret);
56}
57
58static __inline void
59dbg_enable(void)
60{
61
62	__asm __volatile("msr daifclr, #8");
63}
64
65static __inline register_t
66intr_disable(void)
67{
68	/* DAIF is a 32-bit register */
69	uint32_t ret;
70
71	__asm __volatile(
72	    "mrs %x0, daif   \n"
73	    "msr daifset, #2 \n"
74	    : "=&r" (ret));
75
76	return (ret);
77}
78
79static __inline void
80intr_restore(register_t s)
81{
82
83	WRITE_SPECIALREG(daif, s);
84}
85
86static __inline void
87intr_enable(void)
88{
89
90	__asm __volatile("msr daifclr, #2");
91}
92
93static __inline register_t
94get_midr(void)
95{
96	uint64_t midr;
97
98	midr = READ_SPECIALREG(midr_el1);
99
100	return (midr);
101}
102
103static __inline register_t
104get_mpidr(void)
105{
106	uint64_t mpidr;
107
108	mpidr = READ_SPECIALREG(mpidr_el1);
109
110	return (mpidr);
111}
112
113static __inline void
114clrex(void)
115{
116
117	/*
118	 * Ensure compiler barrier, otherwise the monitor clear might
119	 * occur too late for us ?
120	 */
121	__asm __volatile("clrex" : : : "memory");
122}
123
124extern int64_t dcache_line_size;
125extern int64_t icache_line_size;
126extern int64_t idcache_line_size;
127extern int64_t dczva_line_size;
128
129#define	cpu_nullop()			arm64_nullop()
130#define	cpufunc_nullop()		arm64_nullop()
131#define	cpu_setttb(a)			arm64_setttb(a)
132
133#define	cpu_tlb_flushID()		arm64_tlb_flushID()
134#define	cpu_tlb_flushID_SE(e)		arm64_tlb_flushID_SE(e)
135
136#define	cpu_dcache_wbinv_range(a, s)	arm64_dcache_wbinv_range((a), (s))
137#define	cpu_dcache_inv_range(a, s)	arm64_dcache_inv_range((a), (s))
138#define	cpu_dcache_wb_range(a, s)	arm64_dcache_wb_range((a), (s))
139
140#define	cpu_idcache_wbinv_range(a, s)	arm64_idcache_wbinv_range((a), (s))
141#define	cpu_icache_sync_range(a, s)	arm64_icache_sync_range((a), (s))
142
143void arm64_nullop(void);
144void arm64_setttb(vm_offset_t);
145void arm64_tlb_flushID(void);
146void arm64_tlb_flushID_SE(vm_offset_t);
147void arm64_icache_sync_range(vm_offset_t, vm_size_t);
148void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
149void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
150void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
151void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
152
153#endif	/* _KERNEL */
154#endif	/* _MACHINE_CPUFUNC_H_ */
155