cpu-v4.h revision 300533
1/*-
2 * Copyright 2016 Svatopluk Kraus <skra@FreeBSD.org>
3 * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/arm/include/cpu-v4.h 300533 2016-05-23 20:07:17Z ian $
28 */
29#ifndef MACHINE_CPU_V4_H
30#define MACHINE_CPU_V4_H
31
32/* There are no user serviceable parts here, they may change without notice */
33#ifndef _KERNEL
34#error Only include this file in the kernel
35#endif
36
37#include <machine/acle-compat.h>
38#include <machine/atomic.h>
39#include <machine/cpufunc.h>
40#include <machine/cpuinfo.h>
41#include <machine/sysreg.h>
42
43#if __ARM_ARCH >= 6
44#error Never include this file for ARMv6
45#else
46
47#define CPU_ASID_KERNEL 0
48
49/*
50 * Macros to generate CP15 (system control processor) read/write functions.
51 */
52#define _FX(s...) #s
53
54#define _RF0(fname, aname...)						\
55static __inline register_t						\
56fname(void)								\
57{									\
58	register_t reg;							\
59	__asm __volatile("mrc\t" _FX(aname): "=r" (reg));		\
60	return(reg);							\
61}
62
63#define _R64F0(fname, aname)						\
64static __inline uint64_t						\
65fname(void)								\
66{									\
67	uint64_t reg;							\
68	__asm __volatile("mrrc\t" _FX(aname): "=r" (reg));		\
69	return(reg);							\
70}
71
72#define _WF0(fname, aname...)						\
73static __inline void							\
74fname(void)								\
75{									\
76	__asm __volatile("mcr\t" _FX(aname));				\
77}
78
79#define _WF1(fname, aname...)						\
80static __inline void							\
81fname(register_t reg)							\
82{									\
83	__asm __volatile("mcr\t" _FX(aname):: "r" (reg));		\
84}
85
86
87/*
88 * Publicly accessible functions
89 */
90
91
92/* Various control registers */
93
94_RF0(cp15_cpacr_get, CP15_CPACR(%0))
95_WF1(cp15_cpacr_set, CP15_CPACR(%0))
96_RF0(cp15_dfsr_get, CP15_DFSR(%0))
97_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
98_RF0(cp15_dfar_get, CP15_DFAR(%0))
99/* XScale */
100_RF0(cp15_actlr_get, CP15_ACTLR(%0))
101_WF1(cp15_actlr_set, CP15_ACTLR(%0))
102
103/*CPU id registers */
104_RF0(cp15_midr_get, CP15_MIDR(%0))
105_RF0(cp15_ctr_get, CP15_CTR(%0))
106_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
107_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
108_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
109
110#undef	_FX
111#undef	_RF0
112#undef	_WF0
113#undef	_WF1
114
115
116/*
117 * armv4/5 compatibility shims.
118 *
119 * These functions provide armv4 cache maintenance using the new armv6 names.
120 * Included here are just the functions actually used now in common code; it may
121 * be necessary to add things here over time.
122 *
123 * The callers of the dcache functions expect these routines to handle address
124 * and size values which are not aligned to cacheline boundaries; the armv4 and
125 * armv5 asm code handles that.
126 */
127
128static __inline void
129tlb_flush_all(void)
130{
131	cpu_tlb_flushID();
132	cpu_cpwait();
133}
134
135static __inline void
136icache_sync(vm_offset_t va, vm_size_t size)
137{
138	cpu_icache_sync_range(va, size);
139}
140
141static __inline void
142dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
143{
144
145	cpu_dcache_inv_range(va, size);
146#ifdef ARM_L2_PIPT
147	cpu_l2cache_inv_range(pa, size);
148#else
149	cpu_l2cache_inv_range(va, size);
150#endif
151}
152
153static __inline void
154dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
155{
156
157	/* See armv6 code, above, for why we do L2 before L1 in this case. */
158#ifdef ARM_L2_PIPT
159	cpu_l2cache_inv_range(pa, size);
160#else
161	cpu_l2cache_inv_range(va, size);
162#endif
163	cpu_dcache_inv_range(va, size);
164}
165
166static __inline void
167dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
168{
169
170	cpu_dcache_wb_range(va, size);
171#ifdef ARM_L2_PIPT
172	cpu_l2cache_wb_range(pa, size);
173#else
174	cpu_l2cache_wb_range(va, size);
175#endif
176}
177
178static __inline void
179dcache_wbinv_poc_all(void)
180{
181	cpu_idcache_wbinv_all();
182	cpu_l2cache_wbinv_all();
183}
184
185#endif /* _KERNEL */
186
187#endif /* MACHINE_CPU_V4_H */
188