1/*-
2 * Copyright 2016 Svatopluk Kraus <skra@FreeBSD.org>
3 * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/11/sys/arm/include/cpu-v4.h 328966 2018-02-07 06:27:29Z mmel $
28 */
29#ifndef MACHINE_CPU_V4_H
30#define MACHINE_CPU_V4_H
31
32/* There are no user serviceable parts here, they may change without notice */
33#ifndef _KERNEL
34#error Only include this file in the kernel
35#endif
36
37#include <machine/atomic.h>
38#include <machine/cpufunc.h>
39#include <machine/cpuinfo.h>
40#include <machine/sysreg.h>
41
42#if __ARM_ARCH >= 6
43#error Never include this file for ARMv6
44#else
45
46#define CPU_ASID_KERNEL 0
47
48/*
49 * Macros to generate CP15 (system control processor) read/write functions.
50 */
51#define _FX(s...) #s
52
53#define _RF0(fname, aname...)						\
54static __inline uint32_t						\
55fname(void)								\
56{									\
57	uint32_t reg;							\
58	__asm __volatile("mrc\t" _FX(aname): "=r" (reg));		\
59	return(reg);							\
60}
61
62#define _R64F0(fname, aname)						\
63static __inline uint64_t						\
64fname(void)								\
65{									\
66	uint64_t reg;							\
67	__asm __volatile("mrrc\t" _FX(aname): "=r" (reg));		\
68	return(reg);							\
69}
70
71#define _WF0(fname, aname...)						\
72static __inline void							\
73fname(void)								\
74{									\
75	__asm __volatile("mcr\t" _FX(aname));				\
76}
77
78#define _WF1(fname, aname...)						\
79static __inline void							\
80fname(uint32_t reg)							\
81{									\
82	__asm __volatile("mcr\t" _FX(aname):: "r" (reg));		\
83}
84
85
86/*
87 * Publicly accessible functions
88 */
89
90
91/* Various control registers */
92
93_RF0(cp15_cpacr_get, CP15_CPACR(%0))
94_WF1(cp15_cpacr_set, CP15_CPACR(%0))
95_RF0(cp15_dfsr_get, CP15_DFSR(%0))
96_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
97_RF0(cp15_dfar_get, CP15_DFAR(%0))
98/* XScale */
99_RF0(cp15_actlr_get, CP15_ACTLR(%0))
100_WF1(cp15_actlr_set, CP15_ACTLR(%0))
101
102/*CPU id registers */
103_RF0(cp15_midr_get, CP15_MIDR(%0))
104_RF0(cp15_ctr_get, CP15_CTR(%0))
105_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
106_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
107_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
108
109#undef	_FX
110#undef	_RF0
111#undef	_WF0
112#undef	_WF1
113
114
115/*
116 * armv4/5 compatibility shims.
117 *
118 * These functions provide armv4 cache maintenance using the new armv6 names.
119 * Included here are just the functions actually used now in common code; it may
120 * be necessary to add things here over time.
121 *
122 * The callers of the dcache functions expect these routines to handle address
123 * and size values which are not aligned to cacheline boundaries; the armv4 and
124 * armv5 asm code handles that.
125 */
126
127static __inline void
128tlb_flush_all(void)
129{
130	cpu_tlb_flushID();
131	cpu_cpwait();
132}
133
134static __inline void
135icache_sync(vm_offset_t va, vm_size_t size)
136{
137	cpu_icache_sync_range(va, size);
138}
139
140static __inline void
141dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
142{
143
144	cpu_dcache_inv_range(va, size);
145#ifdef ARM_L2_PIPT
146	cpu_l2cache_inv_range(pa, size);
147#else
148	cpu_l2cache_inv_range(va, size);
149#endif
150}
151
152static __inline void
153dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
154{
155
156	/* See armv6 code, above, for why we do L2 before L1 in this case. */
157#ifdef ARM_L2_PIPT
158	cpu_l2cache_inv_range(pa, size);
159#else
160	cpu_l2cache_inv_range(va, size);
161#endif
162	cpu_dcache_inv_range(va, size);
163}
164
165static __inline void
166dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
167{
168
169	cpu_dcache_wb_range(va, size);
170#ifdef ARM_L2_PIPT
171	cpu_l2cache_wb_range(pa, size);
172#else
173	cpu_l2cache_wb_range(va, size);
174#endif
175}
176
177static __inline void
178dcache_wbinv_poc_all(void)
179{
180	cpu_idcache_wbinv_all();
181	cpu_l2cache_wbinv_all();
182}
183
184#endif /* _KERNEL */
185
186#endif /* MACHINE_CPU_V4_H */
187