cpufunc.c revision 282830
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 282830 2015-05-13 05:46:04Z ganbold $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62
63#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
64#include <arm/xscale/i80321/i80321reg.h>
65#include <arm/xscale/i80321/i80321var.h>
66#endif
67
68/*
69 * Some definitions in i81342reg.h clash with i80321reg.h.
70 * This only happens for the LINT kernel. As it happens,
71 * we don't need anything from i81342reg.h that we already
72 * got from somewhere else during a LINT compile.
73 */
74#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
75#include <arm/xscale/i8134x/i81342reg.h>
76#endif
77
78#ifdef CPU_XSCALE_IXP425
79#include <arm/xscale/ixp425/ixp425reg.h>
80#include <arm/xscale/ixp425/ixp425var.h>
81#endif
82
83/* PRIMARY CACHE VARIABLES */
84int	arm_picache_size;
85int	arm_picache_line_size;
86int	arm_picache_ways;
87
88int	arm_pdcache_size;	/* and unified */
89int	arm_pdcache_line_size;
90int	arm_pdcache_ways;
91
92int	arm_pcache_type;
93int	arm_pcache_unified;
94
95int	arm_dcache_align;
96int	arm_dcache_align_mask;
97
98u_int	arm_cache_level;
99u_int	arm_cache_type[14];
100u_int	arm_cache_loc;
101
102int ctrl;
103
104#ifdef CPU_ARM9
105struct cpu_functions arm9_cpufuncs = {
106	/* CPU functions */
107
108	cpufunc_id,			/* id			*/
109	cpufunc_nullop,			/* cpwait		*/
110
111	/* MMU functions */
112
113	cpufunc_control,		/* control		*/
114	cpufunc_domains,		/* Domain		*/
115	arm9_setttb,			/* Setttb		*/
116	cpufunc_faultstatus,		/* Faultstatus		*/
117	cpufunc_faultaddress,		/* Faultaddress		*/
118
119	/* TLB functions */
120
121	armv4_tlb_flushID,		/* tlb_flushID		*/
122	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
123	armv4_tlb_flushI,		/* tlb_flushI		*/
124	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
125	armv4_tlb_flushD,		/* tlb_flushD		*/
126	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
127
128	/* Cache operations */
129
130	arm9_icache_sync_all,		/* icache_sync_all	*/
131	arm9_icache_sync_range,		/* icache_sync_range	*/
132
133	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
134	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
135	arm9_dcache_inv_range,		/* dcache_inv_range	*/
136	arm9_dcache_wb_range,		/* dcache_wb_range	*/
137
138	armv4_idcache_inv_all,		/* idcache_inv_all	*/
139	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
140	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
141	cpufunc_nullop,			/* l2cache_wbinv_all	*/
142	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
143	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
144	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
145	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
146
147	/* Other functions */
148
149	cpufunc_nullop,			/* flush_prefetchbuf	*/
150	armv4_drain_writebuf,		/* drain_writebuf	*/
151	cpufunc_nullop,			/* flush_brnchtgt_C	*/
152	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
153
154	(void *)cpufunc_nullop,		/* sleep		*/
155
156	/* Soft functions */
157
158	cpufunc_null_fixup,		/* dataabt_fixup	*/
159	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
160
161	arm9_context_switch,		/* context_switch	*/
162
163	arm9_setup			/* cpu setup		*/
164
165};
166#endif /* CPU_ARM9 */
167
168#if defined(CPU_ARM9E)
169struct cpu_functions armv5_ec_cpufuncs = {
170	/* CPU functions */
171
172	cpufunc_id,			/* id			*/
173	cpufunc_nullop,			/* cpwait		*/
174
175	/* MMU functions */
176
177	cpufunc_control,		/* control		*/
178	cpufunc_domains,		/* Domain		*/
179	armv5_ec_setttb,		/* Setttb		*/
180	cpufunc_faultstatus,		/* Faultstatus		*/
181	cpufunc_faultaddress,		/* Faultaddress		*/
182
183	/* TLB functions */
184
185	armv4_tlb_flushID,		/* tlb_flushID		*/
186	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
187	armv4_tlb_flushI,		/* tlb_flushI		*/
188	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
189	armv4_tlb_flushD,		/* tlb_flushD		*/
190	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
191
192	/* Cache operations */
193
194	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
195	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
196
197	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
198	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
199	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
200	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
201
202	armv4_idcache_inv_all,		/* idcache_inv_all	*/
203	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
204	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
205
206	cpufunc_nullop,                 /* l2cache_wbinv_all    */
207	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
208      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
209	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
210	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
211
212	/* Other functions */
213
214	cpufunc_nullop,			/* flush_prefetchbuf	*/
215	armv4_drain_writebuf,		/* drain_writebuf	*/
216	cpufunc_nullop,			/* flush_brnchtgt_C	*/
217	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
218
219	(void *)cpufunc_nullop,		/* sleep		*/
220
221	/* Soft functions */
222
223	cpufunc_null_fixup,		/* dataabt_fixup	*/
224	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
225
226	arm10_context_switch,		/* context_switch	*/
227
228	arm10_setup			/* cpu setup		*/
229
230};
231
232struct cpu_functions sheeva_cpufuncs = {
233	/* CPU functions */
234
235	cpufunc_id,			/* id			*/
236	cpufunc_nullop,			/* cpwait		*/
237
238	/* MMU functions */
239
240	cpufunc_control,		/* control		*/
241	cpufunc_domains,		/* Domain		*/
242	sheeva_setttb,			/* Setttb		*/
243	cpufunc_faultstatus,		/* Faultstatus		*/
244	cpufunc_faultaddress,		/* Faultaddress		*/
245
246	/* TLB functions */
247
248	armv4_tlb_flushID,		/* tlb_flushID		*/
249	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
250	armv4_tlb_flushI,		/* tlb_flushI		*/
251	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
252	armv4_tlb_flushD,		/* tlb_flushD		*/
253	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
254
255	/* Cache operations */
256
257	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
258	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
259
260	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
261	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
262	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
263	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
264
265	armv4_idcache_inv_all,		/* idcache_inv_all	*/
266	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
267	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
268
269	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
270	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
271	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
272	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
273	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
274
275	/* Other functions */
276
277	cpufunc_nullop,			/* flush_prefetchbuf	*/
278	armv4_drain_writebuf,		/* drain_writebuf	*/
279	cpufunc_nullop,			/* flush_brnchtgt_C	*/
280	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
281
282	sheeva_cpu_sleep,		/* sleep		*/
283
284	/* Soft functions */
285
286	cpufunc_null_fixup,		/* dataabt_fixup	*/
287	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
288
289	arm10_context_switch,		/* context_switch	*/
290
291	arm10_setup			/* cpu setup		*/
292};
293#endif /* CPU_ARM9E */
294
295#ifdef CPU_MV_PJ4B
296struct cpu_functions pj4bv7_cpufuncs = {
297	/* CPU functions */
298
299	cpufunc_id,			/* id			*/
300	armv7_drain_writebuf,		/* cpwait		*/
301
302	/* MMU functions */
303
304	cpufunc_control,		/* control		*/
305	cpufunc_domains,		/* Domain		*/
306	armv7_setttb,			/* Setttb		*/
307	cpufunc_faultstatus,		/* Faultstatus		*/
308	cpufunc_faultaddress,		/* Faultaddress		*/
309
310	/* TLB functions */
311
312	armv7_tlb_flushID,		/* tlb_flushID		*/
313	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
314	armv7_tlb_flushID,		/* tlb_flushI		*/
315	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
316	armv7_tlb_flushID,		/* tlb_flushD		*/
317	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
318
319	/* Cache operations */
320	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
321	armv7_icache_sync_range,	/* icache_sync_range	*/
322
323	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
324	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
325	armv7_dcache_inv_range,		/* dcache_inv_range	*/
326	armv7_dcache_wb_range,		/* dcache_wb_range	*/
327
328	armv7_idcache_inv_all,		/* idcache_inv_all	*/
329	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
330	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
331
332	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
333	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
334	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
335	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
336	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
337
338	/* Other functions */
339
340	cpufunc_nullop,			/* flush_prefetchbuf	*/
341	armv7_drain_writebuf,		/* drain_writebuf	*/
342	cpufunc_nullop,			/* flush_brnchtgt_C	*/
343	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
344
345	(void *)cpufunc_nullop,		/* sleep		*/
346
347	/* Soft functions */
348
349	cpufunc_null_fixup,		/* dataabt_fixup	*/
350	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
351
352	armv7_context_switch,		/* context_switch	*/
353
354	pj4bv7_setup			/* cpu setup		*/
355};
356#endif /* CPU_MV_PJ4B */
357
358#if defined(CPU_XSCALE_80321) || \
359  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
360  defined(CPU_XSCALE_80219)
361
362struct cpu_functions xscale_cpufuncs = {
363	/* CPU functions */
364
365	cpufunc_id,			/* id			*/
366	xscale_cpwait,			/* cpwait		*/
367
368	/* MMU functions */
369
370	xscale_control,			/* control		*/
371	cpufunc_domains,		/* domain		*/
372	xscale_setttb,			/* setttb		*/
373	cpufunc_faultstatus,		/* faultstatus		*/
374	cpufunc_faultaddress,		/* faultaddress		*/
375
376	/* TLB functions */
377
378	armv4_tlb_flushID,		/* tlb_flushID		*/
379	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
380	armv4_tlb_flushI,		/* tlb_flushI		*/
381	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
382	armv4_tlb_flushD,		/* tlb_flushD		*/
383	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
384
385	/* Cache operations */
386
387	xscale_cache_syncI,		/* icache_sync_all	*/
388	xscale_cache_syncI_rng,		/* icache_sync_range	*/
389
390	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
391	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
392	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
393	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
394
395	xscale_cache_flushID,		/* idcache_inv_all	*/
396	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
397	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
398	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
399	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
400	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
401	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
402	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
403
404	/* Other functions */
405
406	cpufunc_nullop,			/* flush_prefetchbuf	*/
407	armv4_drain_writebuf,		/* drain_writebuf	*/
408	cpufunc_nullop,			/* flush_brnchtgt_C	*/
409	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
410
411	xscale_cpu_sleep,		/* sleep		*/
412
413	/* Soft functions */
414
415	cpufunc_null_fixup,		/* dataabt_fixup	*/
416	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
417
418	xscale_context_switch,		/* context_switch	*/
419
420	xscale_setup			/* cpu setup		*/
421};
422#endif
423/* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
424   CPU_XSCALE_80219 */
425
426#ifdef CPU_XSCALE_81342
427struct cpu_functions xscalec3_cpufuncs = {
428	/* CPU functions */
429
430	cpufunc_id,			/* id			*/
431	xscale_cpwait,			/* cpwait		*/
432
433	/* MMU functions */
434
435	xscale_control,			/* control		*/
436	cpufunc_domains,		/* domain		*/
437	xscalec3_setttb,		/* setttb		*/
438	cpufunc_faultstatus,		/* faultstatus		*/
439	cpufunc_faultaddress,		/* faultaddress		*/
440
441	/* TLB functions */
442
443	armv4_tlb_flushID,		/* tlb_flushID		*/
444	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
445	armv4_tlb_flushI,		/* tlb_flushI		*/
446	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
447	armv4_tlb_flushD,		/* tlb_flushD		*/
448	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
449
450	/* Cache operations */
451
452	xscalec3_cache_syncI,		/* icache_sync_all	*/
453	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
454
455	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
456	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
457	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
458	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
459
460	xscale_cache_flushID,		/* idcache_inv_all	*/
461	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
462	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
463	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
464	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
465	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
466	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
467	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
468
469	/* Other functions */
470
471	cpufunc_nullop,			/* flush_prefetchbuf	*/
472	armv4_drain_writebuf,		/* drain_writebuf	*/
473	cpufunc_nullop,			/* flush_brnchtgt_C	*/
474	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
475
476	xscale_cpu_sleep,		/* sleep		*/
477
478	/* Soft functions */
479
480	cpufunc_null_fixup,		/* dataabt_fixup	*/
481	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
482
483	xscalec3_context_switch,	/* context_switch	*/
484
485	xscale_setup			/* cpu setup		*/
486};
487#endif /* CPU_XSCALE_81342 */
488
489
490#if defined(CPU_FA526)
491struct cpu_functions fa526_cpufuncs = {
492	/* CPU functions */
493
494	cpufunc_id,			/* id			*/
495	cpufunc_nullop,			/* cpwait		*/
496
497	/* MMU functions */
498
499	cpufunc_control,		/* control		*/
500	cpufunc_domains,		/* domain		*/
501	fa526_setttb,			/* setttb		*/
502	cpufunc_faultstatus,		/* faultstatus		*/
503	cpufunc_faultaddress,		/* faultaddress		*/
504
505	/* TLB functions */
506
507	armv4_tlb_flushID,		/* tlb_flushID		*/
508	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
509	armv4_tlb_flushI,		/* tlb_flushI		*/
510	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
511	armv4_tlb_flushD,		/* tlb_flushD		*/
512	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
513
514	/* Cache operations */
515
516	fa526_icache_sync_all,		/* icache_sync_all	*/
517	fa526_icache_sync_range,	/* icache_sync_range	*/
518
519	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
520	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
521	fa526_dcache_inv_range,		/* dcache_inv_range	*/
522	fa526_dcache_wb_range,		/* dcache_wb_range	*/
523
524	armv4_idcache_inv_all,		/* idcache_inv_all	*/
525	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
526	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
527	cpufunc_nullop,			/* l2cache_wbinv_all	*/
528	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
529	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
530	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
531	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
532
533	/* Other functions */
534
535	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
536	armv4_drain_writebuf,		/* drain_writebuf	*/
537	cpufunc_nullop,			/* flush_brnchtgt_C	*/
538	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
539
540	fa526_cpu_sleep,		/* sleep		*/
541
542	/* Soft functions */
543
544	cpufunc_null_fixup,		/* dataabt_fixup	*/
545	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
546
547	fa526_context_switch,		/* context_switch	*/
548
549	fa526_setup			/* cpu setup 		*/
550};
551#endif	/* CPU_FA526 */
552
553#if defined(CPU_ARM1176)
554struct cpu_functions arm1176_cpufuncs = {
555	/* CPU functions */
556
557	cpufunc_id,                     /* id                   */
558	cpufunc_nullop,                 /* cpwait               */
559
560	/* MMU functions */
561
562	cpufunc_control,                /* control              */
563	cpufunc_domains,                /* Domain               */
564	arm11x6_setttb,                 /* Setttb               */
565	cpufunc_faultstatus,            /* Faultstatus          */
566	cpufunc_faultaddress,           /* Faultaddress         */
567
568	/* TLB functions */
569
570	arm11_tlb_flushID,              /* tlb_flushID          */
571	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
572	arm11_tlb_flushI,               /* tlb_flushI           */
573	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
574	arm11_tlb_flushD,               /* tlb_flushD           */
575	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
576
577	/* Cache operations */
578
579	arm11x6_icache_sync_all,        /* icache_sync_all      */
580	arm11x6_icache_sync_range,      /* icache_sync_range    */
581
582	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
583	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
584	armv6_dcache_inv_range,         /* dcache_inv_range     */
585	armv6_dcache_wb_range,          /* dcache_wb_range      */
586
587	armv6_idcache_inv_all,		/* idcache_inv_all	*/
588	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
589	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
590
591	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
592	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
593	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
594	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
595	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
596
597	/* Other functions */
598
599	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
600	arm11_drain_writebuf,           /* drain_writebuf       */
601	cpufunc_nullop,                 /* flush_brnchtgt_C     */
602	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
603
604	arm11x6_sleep,                  /* sleep                */
605
606	/* Soft functions */
607
608	cpufunc_null_fixup,             /* dataabt_fixup        */
609	cpufunc_null_fixup,             /* prefetchabt_fixup    */
610
611	arm11_context_switch,           /* context_switch       */
612
613	arm11x6_setup                   /* cpu setup            */
614};
615#endif /*CPU_ARM1176 */
616
617#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
618struct cpu_functions cortexa_cpufuncs = {
619	/* CPU functions */
620
621	cpufunc_id,                     /* id                   */
622	cpufunc_nullop,                 /* cpwait               */
623
624	/* MMU functions */
625
626	cpufunc_control,                /* control              */
627	cpufunc_domains,                /* Domain               */
628	armv7_setttb,                   /* Setttb               */
629	cpufunc_faultstatus,            /* Faultstatus          */
630	cpufunc_faultaddress,           /* Faultaddress         */
631
632	/*
633	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
634	 * whether the hardware implements separate I+D or not, so we use the
635	 * same 'ID' functions for all 3 variations.
636	 */
637
638	armv7_tlb_flushID,              /* tlb_flushID          */
639	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
640	armv7_tlb_flushID,              /* tlb_flushI           */
641	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
642	armv7_tlb_flushID,              /* tlb_flushD           */
643	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
644
645	/* Cache operations */
646
647	armv7_icache_sync_all, 	        /* icache_sync_all      */
648	armv7_icache_sync_range,        /* icache_sync_range    */
649
650	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
651	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
652	armv7_dcache_inv_range,         /* dcache_inv_range     */
653	armv7_dcache_wb_range,          /* dcache_wb_range      */
654
655	armv7_idcache_inv_all,		/* idcache_inv_all	*/
656	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
657	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
658
659	/*
660	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
661	 * L2 cache controller is actually enabled.
662	 */
663	cpufunc_nullop,                 /* l2cache_wbinv_all    */
664	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
665	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
666	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
667	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
668
669	/* Other functions */
670
671	cpufunc_nullop,                 /* flush_prefetchbuf    */
672	armv7_drain_writebuf,           /* drain_writebuf       */
673	cpufunc_nullop,                 /* flush_brnchtgt_C     */
674	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
675
676	armv7_sleep,                    /* sleep                */
677
678	/* Soft functions */
679
680	cpufunc_null_fixup,             /* dataabt_fixup        */
681	cpufunc_null_fixup,             /* prefetchabt_fixup    */
682
683	armv7_context_switch,           /* context_switch       */
684
685	cortexa_setup                     /* cpu setup            */
686};
687#endif /* CPU_CORTEXA */
688
689/*
690 * Global constants also used by locore.s
691 */
692
693struct cpu_functions cpufuncs;
694u_int cputype;
695u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
696
697#if defined(CPU_ARM9) ||	\
698  defined (CPU_ARM9E) ||	\
699  defined(CPU_ARM1176) || defined(CPU_XSCALE_80321) ||		\
700  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
701  defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||			\
702  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
703  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
704
705/* Global cache line sizes, use 32 as default */
706int	arm_dcache_min_line_size = 32;
707int	arm_icache_min_line_size = 32;
708int	arm_idcache_min_line_size = 32;
709
710static void get_cachetype_cp15(void);
711
712/* Additional cache information local to this file.  Log2 of some of the
713   above numbers.  */
714static int	arm_dcache_l2_nsets;
715static int	arm_dcache_l2_assoc;
716static int	arm_dcache_l2_linesize;
717
718static void
719get_cachetype_cp15()
720{
721	u_int ctype, isize, dsize, cpuid;
722	u_int clevel, csize, i, sel;
723	u_int multiplier;
724	u_char type;
725
726	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
727		: "=r" (ctype));
728
729	cpuid = cpufunc_id();
730	/*
731	 * ...and thus spake the ARM ARM:
732	 *
733	 * If an <opcode2> value corresponding to an unimplemented or
734	 * reserved ID register is encountered, the System Control
735	 * processor returns the value of the main ID register.
736	 */
737	if (ctype == cpuid)
738		goto out;
739
740	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
741		/* Resolve minimal cache line sizes */
742		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
743		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
744		arm_idcache_min_line_size =
745		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
746
747		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
748		    : "=r" (clevel));
749		arm_cache_level = clevel;
750		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
751		i = 0;
752		while ((type = (clevel & 0x7)) && i < 7) {
753			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
754			    type == CACHE_SEP_CACHE) {
755				sel = i << 1;
756				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
757				    : : "r" (sel));
758				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
759				    : "=r" (csize));
760				arm_cache_type[sel] = csize;
761				arm_dcache_align = 1 <<
762				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
763				arm_dcache_align_mask = arm_dcache_align - 1;
764			}
765			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
766				sel = (i << 1) | 1;
767				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
768				    : : "r" (sel));
769				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
770				    : "=r" (csize));
771				arm_cache_type[sel] = csize;
772			}
773			i++;
774			clevel >>= 3;
775		}
776	} else {
777		if ((ctype & CPU_CT_S) == 0)
778			arm_pcache_unified = 1;
779
780		/*
781		 * If you want to know how this code works, go read the ARM ARM.
782		 */
783
784		arm_pcache_type = CPU_CT_CTYPE(ctype);
785
786		if (arm_pcache_unified == 0) {
787			isize = CPU_CT_ISIZE(ctype);
788			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
789			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
790			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
791				if (isize & CPU_CT_xSIZE_M)
792					arm_picache_line_size = 0; /* not present */
793				else
794					arm_picache_ways = 1;
795			} else {
796				arm_picache_ways = multiplier <<
797				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
798			}
799			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
800		}
801
802		dsize = CPU_CT_DSIZE(ctype);
803		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
804		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
805		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
806			if (dsize & CPU_CT_xSIZE_M)
807				arm_pdcache_line_size = 0; /* not present */
808			else
809				arm_pdcache_ways = 1;
810		} else {
811			arm_pdcache_ways = multiplier <<
812			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
813		}
814		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
815
816		arm_dcache_align = arm_pdcache_line_size;
817
818		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
819		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
820		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
821		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
822
823	out:
824		arm_dcache_align_mask = arm_dcache_align - 1;
825	}
826}
827#endif /* ARM9 || XSCALE */
828
829/*
830 * Cannot panic here as we may not have a console yet ...
831 */
832
833int
834set_cpufuncs()
835{
836	cputype = cpufunc_id();
837	cputype &= CPU_ID_CPU_MASK;
838
839#ifdef CPU_ARM9
840	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
841	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
842	    (cputype & 0x0000f000) == 0x00009000) {
843		cpufuncs = arm9_cpufuncs;
844		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
845		get_cachetype_cp15();
846		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
847		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
848		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
849		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
850		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
851		pmap_pte_init_generic();
852		goto out;
853	}
854#endif /* CPU_ARM9 */
855#if defined(CPU_ARM9E)
856	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
857	    cputype == CPU_ID_MV88FR571_41) {
858		uint32_t sheeva_ctrl;
859
860		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
861		    MV_L2_ENABLE);
862		/*
863		 * Workaround for Marvell MV78100 CPU: Cache prefetch
864		 * mechanism may affect the cache coherency validity,
865		 * so it needs to be disabled.
866		 *
867		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
868		 * L2 Prefetching Mechanism) for details.
869		 */
870		if (cputype == CPU_ID_MV88FR571_VD ||
871		    cputype == CPU_ID_MV88FR571_41)
872			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
873
874		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
875
876		cpufuncs = sheeva_cpufuncs;
877		get_cachetype_cp15();
878		pmap_pte_init_generic();
879		goto out;
880	} else if (cputype == CPU_ID_ARM926EJS) {
881		cpufuncs = armv5_ec_cpufuncs;
882		get_cachetype_cp15();
883		pmap_pte_init_generic();
884		goto out;
885	}
886#endif /* CPU_ARM9E */
887#if defined(CPU_ARM1176)
888	if (cputype == CPU_ID_ARM1176JZS) {
889		cpufuncs = arm1176_cpufuncs;
890		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
891		get_cachetype_cp15();
892
893		pmap_pte_init_mmu_v6();
894
895		goto out;
896	}
897#endif /* CPU_ARM1176 */
898#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
899	if (cputype == CPU_ID_CORTEXA5 ||
900	    cputype == CPU_ID_CORTEXA7 ||
901	    cputype == CPU_ID_CORTEXA8R1 ||
902	    cputype == CPU_ID_CORTEXA8R2 ||
903	    cputype == CPU_ID_CORTEXA8R3 ||
904	    cputype == CPU_ID_CORTEXA9R1 ||
905	    cputype == CPU_ID_CORTEXA9R2 ||
906	    cputype == CPU_ID_CORTEXA9R3 ||
907	    cputype == CPU_ID_CORTEXA12R0 ||
908	    cputype == CPU_ID_CORTEXA15R0 ||
909	    cputype == CPU_ID_CORTEXA15R1 ||
910	    cputype == CPU_ID_CORTEXA15R2 ||
911	    cputype == CPU_ID_CORTEXA15R3 ||
912	    cputype == CPU_ID_KRAIT ) {
913		cpufuncs = cortexa_cpufuncs;
914		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
915		get_cachetype_cp15();
916
917		pmap_pte_init_mmu_v6();
918		goto out;
919	}
920#endif /* CPU_CORTEXA */
921
922#if defined(CPU_MV_PJ4B)
923	if (cputype == CPU_ID_MV88SV581X_V7 ||
924	    cputype == CPU_ID_MV88SV584X_V7 ||
925	    cputype == CPU_ID_ARM_88SV581X_V7) {
926		cpufuncs = pj4bv7_cpufuncs;
927		get_cachetype_cp15();
928		pmap_pte_init_mmu_v6();
929		goto out;
930	}
931#endif /* CPU_MV_PJ4B */
932
933#if defined(CPU_FA526)
934	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
935		cpufuncs = fa526_cpufuncs;
936		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
937		get_cachetype_cp15();
938		pmap_pte_init_generic();
939
940		goto out;
941	}
942#endif	/* CPU_FA526 */
943
944#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
945	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
946	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
947	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
948		cpufuncs = xscale_cpufuncs;
949		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
950		get_cachetype_cp15();
951		pmap_pte_init_xscale();
952		goto out;
953	}
954#endif /* CPU_XSCALE_80321 */
955
956#if defined(CPU_XSCALE_81342)
957	if (cputype == CPU_ID_81342) {
958		cpufuncs = xscalec3_cpufuncs;
959		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
960		get_cachetype_cp15();
961		pmap_pte_init_xscale();
962		goto out;
963	}
964#endif /* CPU_XSCALE_81342 */
965#ifdef CPU_XSCALE_PXA2X0
966	/* ignore core revision to test PXA2xx CPUs */
967	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
968	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
969	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
970
971		cpufuncs = xscale_cpufuncs;
972		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
973		get_cachetype_cp15();
974		pmap_pte_init_xscale();
975
976		goto out;
977	}
978#endif /* CPU_XSCALE_PXA2X0 */
979#ifdef CPU_XSCALE_IXP425
980	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
981            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
982
983		cpufuncs = xscale_cpufuncs;
984		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
985		get_cachetype_cp15();
986		pmap_pte_init_xscale();
987
988		goto out;
989	}
990#endif /* CPU_XSCALE_IXP425 */
991	/*
992	 * Bzzzz. And the answer was ...
993	 */
994	panic("No support for this CPU type (%08x) in kernel", cputype);
995	return(ARCHITECTURE_NOT_PRESENT);
996out:
997	uma_set_align(arm_dcache_align_mask);
998	return (0);
999}
1000
1001/*
1002 * Fixup routines for data and prefetch aborts.
1003 *
1004 * Several compile time symbols are used
1005 *
1006 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1007 * correction of registers after a fault.
1008 */
1009
1010
1011/*
1012 * Null abort fixup routine.
1013 * For use when no fixup is required.
1014 */
1015int
1016cpufunc_null_fixup(arg)
1017	void *arg;
1018{
1019	return(ABORT_FIXUP_OK);
1020}
1021
1022/*
1023 * CPU Setup code
1024 */
1025
1026#ifdef CPU_ARM9
1027void
1028arm9_setup(void)
1029{
1030	int cpuctrl, cpuctrlmask;
1031
1032	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1033	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1034	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1035	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1036	    CPU_CONTROL_ROUNDROBIN;
1037	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1038		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1039		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1040		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1041		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1042		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1043		 | CPU_CONTROL_ROUNDROBIN;
1044
1045#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1046	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1047#endif
1048
1049#ifdef __ARMEB__
1050	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1051#endif
1052	if (vector_page == ARM_VECTORS_HIGH)
1053		cpuctrl |= CPU_CONTROL_VECRELOC;
1054
1055	/* Clear out the cache */
1056	cpu_idcache_wbinv_all();
1057
1058	/* Set the control register */
1059	cpu_control(cpuctrlmask, cpuctrl);
1060	ctrl = cpuctrl;
1061
1062}
1063#endif	/* CPU_ARM9 */
1064
1065#if defined(CPU_ARM9E)
1066void
1067arm10_setup(void)
1068{
1069	int cpuctrl, cpuctrlmask;
1070
1071	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1072	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1073	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1074	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1075	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1076	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1077	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1078	    | CPU_CONTROL_BPRD_ENABLE
1079	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1080
1081#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1082	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1083#endif
1084
1085#ifdef __ARMEB__
1086	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1087#endif
1088
1089	/* Clear out the cache */
1090	cpu_idcache_wbinv_all();
1091
1092	/* Now really make sure they are clean.  */
1093	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1094
1095	if (vector_page == ARM_VECTORS_HIGH)
1096		cpuctrl |= CPU_CONTROL_VECRELOC;
1097
1098	/* Set the control register */
1099	ctrl = cpuctrl;
1100	cpu_control(0xffffffff, cpuctrl);
1101
1102	/* And again. */
1103	cpu_idcache_wbinv_all();
1104}
1105#endif	/* CPU_ARM9E || CPU_ARM10 */
1106
1107#if defined(CPU_ARM1176) \
1108 || defined(CPU_MV_PJ4B) \
1109 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1110static __inline void
1111cpu_scc_setup_ccnt(void)
1112{
1113/* This is how you give userland access to the CCNT and PMCn
1114 * registers.
1115 * BEWARE! This gives write access also, which may not be what
1116 * you want!
1117 */
1118#ifdef _PMC_USER_READ_WRITE_
1119#if defined(CPU_ARM1176)
1120	/* Use the Secure User and Non-secure Access Validation Control Register
1121	 * to allow userland access
1122	 */
1123	__asm volatile ("mcr	p15, 0, %0, c15, c9, 0\n\t"
1124			:
1125			: "r"(0x00000001));
1126#else
1127	/* Set PMUSERENR[0] to allow userland access */
1128	__asm volatile ("mcr	p15, 0, %0, c9, c14, 0\n\t"
1129			:
1130			: "r"(0x00000001));
1131#endif
1132#endif
1133#if defined(CPU_ARM1176)
1134	/* Set PMCR[2,0] to enable counters and reset CCNT */
1135	__asm volatile ("mcr	p15, 0, %0, c15, c12, 0\n\t"
1136			:
1137			: "r"(0x00000005));
1138#else
1139	/* Set up the PMCCNTR register as a cyclecounter:
1140	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1141	 * Set PMCR[2,0] to enable counters and reset CCNT
1142	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1143	__asm volatile ("mcr	p15, 0, %0, c9, c14, 2\n\t"
1144			"mcr	p15, 0, %1, c9, c12, 0\n\t"
1145			"mcr	p15, 0, %2, c9, c12, 1\n\t"
1146			:
1147			: "r"(0xFFFFFFFF),
1148			  "r"(0x00000005),
1149			  "r"(0x80000000));
1150#endif
1151}
1152#endif
1153
1154#if defined(CPU_ARM1176)
1155void
1156arm11x6_setup(void)
1157{
1158	int cpuctrl, cpuctrl_wax;
1159	uint32_t auxctrl, auxctrl_wax;
1160	uint32_t tmp, tmp2;
1161	uint32_t sbz=0;
1162	uint32_t cpuid;
1163
1164	cpuid = cpufunc_id();
1165
1166	cpuctrl =
1167		CPU_CONTROL_MMU_ENABLE  |
1168		CPU_CONTROL_DC_ENABLE   |
1169		CPU_CONTROL_WBUF_ENABLE |
1170		CPU_CONTROL_32BP_ENABLE |
1171		CPU_CONTROL_32BD_ENABLE |
1172		CPU_CONTROL_LABT_ENABLE |
1173		CPU_CONTROL_SYST_ENABLE |
1174		CPU_CONTROL_IC_ENABLE   |
1175		CPU_CONTROL_UNAL_ENABLE;
1176
1177	/*
1178	 * "write as existing" bits
1179	 * inverse of this is mask
1180	 */
1181	cpuctrl_wax =
1182		(3 << 30) | /* SBZ */
1183		(1 << 29) | /* FA */
1184		(1 << 28) | /* TR */
1185		(3 << 26) | /* SBZ */
1186		(3 << 19) | /* SBZ */
1187		(1 << 17);  /* SBZ */
1188
1189	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1190	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1191
1192#ifdef __ARMEB__
1193	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1194#endif
1195
1196	if (vector_page == ARM_VECTORS_HIGH)
1197		cpuctrl |= CPU_CONTROL_VECRELOC;
1198
1199	auxctrl = 0;
1200	auxctrl_wax = ~0;
1201
1202	/*
1203	 * Enable an errata workaround
1204	 */
1205	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1206		auxctrl = ARM1176_AUXCTL_PHD;
1207		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1208	}
1209
1210	/* Clear out the cache */
1211	cpu_idcache_wbinv_all();
1212
1213	/* Now really make sure they are clean.  */
1214	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1215
1216	/* Allow detection code to find the VFP if it's fitted.  */
1217	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1218
1219	/* Set the control register */
1220	ctrl = cpuctrl;
1221	cpu_control(~cpuctrl_wax, cpuctrl);
1222
1223	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1224			"and	%1, %0, %2\n\t"
1225			"orr	%1, %1, %3\n\t"
1226			"teq	%0, %1\n\t"
1227			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1228			: "=r"(tmp), "=r"(tmp2) :
1229			  "r"(auxctrl_wax), "r"(auxctrl));
1230
1231	/* And again. */
1232	cpu_idcache_wbinv_all();
1233
1234	cpu_scc_setup_ccnt();
1235}
1236#endif  /* CPU_ARM1176 */
1237
1238#ifdef CPU_MV_PJ4B
1239void
1240pj4bv7_setup(void)
1241{
1242	int cpuctrl;
1243
1244	pj4b_config();
1245
1246	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1247#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1248	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1249#endif
1250	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1251	cpuctrl |= (0xf << 3);
1252	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1253	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1254	if (vector_page == ARM_VECTORS_HIGH)
1255		cpuctrl |= CPU_CONTROL_VECRELOC;
1256	cpuctrl |= (0x5 << 16) | (1 < 22);
1257	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1258
1259	/* Clear out the cache */
1260	cpu_idcache_wbinv_all();
1261
1262	/* Set the control register */
1263	ctrl = cpuctrl;
1264	cpu_control(0xFFFFFFFF, cpuctrl);
1265
1266	/* And again. */
1267	cpu_idcache_wbinv_all();
1268
1269	cpu_scc_setup_ccnt();
1270}
1271#endif /* CPU_MV_PJ4B */
1272
1273#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1274
1275void
1276cortexa_setup(void)
1277{
1278	int cpuctrl, cpuctrlmask;
1279
1280	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1281	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1282	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1283	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1284	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1285	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1286
1287	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1288	    CPU_CONTROL_IC_ENABLE |
1289	    CPU_CONTROL_DC_ENABLE |
1290	    CPU_CONTROL_BPRD_ENABLE;
1291
1292#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1293	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1294#endif
1295
1296	/* Switch to big endian */
1297#ifdef __ARMEB__
1298	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1299#endif
1300
1301	/* Check if the vector page is at the high address (0xffff0000) */
1302	if (vector_page == ARM_VECTORS_HIGH)
1303		cpuctrl |= CPU_CONTROL_VECRELOC;
1304
1305	/* Clear out the cache */
1306	cpu_idcache_wbinv_all();
1307
1308	/* Set the control register */
1309	ctrl = cpuctrl;
1310	cpu_control(cpuctrlmask, cpuctrl);
1311
1312	/* And again. */
1313	cpu_idcache_wbinv_all();
1314#ifdef SMP
1315	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1316#endif
1317
1318	cpu_scc_setup_ccnt();
1319}
1320#endif  /* CPU_CORTEXA */
1321
1322#if defined(CPU_FA526)
1323void
1324fa526_setup(void)
1325{
1326	int cpuctrl, cpuctrlmask;
1327
1328	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1329		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1330		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1331		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1332		| CPU_CONTROL_BPRD_ENABLE;
1333	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1334		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1335		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1336		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1337		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1338		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1339		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1340
1341#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1342	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1343#endif
1344
1345#ifdef __ARMEB__
1346	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1347#endif
1348
1349	if (vector_page == ARM_VECTORS_HIGH)
1350		cpuctrl |= CPU_CONTROL_VECRELOC;
1351
1352	/* Clear out the cache */
1353	cpu_idcache_wbinv_all();
1354
1355	/* Set the control register */
1356	ctrl = cpuctrl;
1357	cpu_control(0xffffffff, cpuctrl);
1358}
1359#endif	/* CPU_FA526 */
1360
1361#if defined(CPU_XSCALE_80321) || \
1362  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1363  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1364void
1365xscale_setup(void)
1366{
1367	uint32_t auxctl;
1368	int cpuctrl, cpuctrlmask;
1369
1370	/*
1371	 * The XScale Write Buffer is always enabled.  Our option
1372	 * is to enable/disable coalescing.  Note that bits 6:3
1373	 * must always be enabled.
1374	 */
1375
1376	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1377		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1378		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1379		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1380		 | CPU_CONTROL_BPRD_ENABLE;
1381	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1382		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1383		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1384		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1385		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1386		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1387		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1388		 CPU_CONTROL_L2_ENABLE;
1389
1390#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1391	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1392#endif
1393
1394#ifdef __ARMEB__
1395	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1396#endif
1397
1398	if (vector_page == ARM_VECTORS_HIGH)
1399		cpuctrl |= CPU_CONTROL_VECRELOC;
1400#ifdef CPU_XSCALE_CORE3
1401	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1402#endif
1403
1404	/* Clear out the cache */
1405	cpu_idcache_wbinv_all();
1406
1407	/*
1408	 * Set the control register.  Note that bits 6:3 must always
1409	 * be set to 1.
1410	 */
1411	ctrl = cpuctrl;
1412/*	cpu_control(cpuctrlmask, cpuctrl);*/
1413	cpu_control(0xffffffff, cpuctrl);
1414
1415	/* Make sure write coalescing is turned on */
1416	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1417		: "=r" (auxctl));
1418#ifdef XSCALE_NO_COALESCE_WRITES
1419	auxctl |= XSCALE_AUXCTL_K;
1420#else
1421	auxctl &= ~XSCALE_AUXCTL_K;
1422#endif
1423#ifdef CPU_XSCALE_CORE3
1424	auxctl |= XSCALE_AUXCTL_LLR;
1425	auxctl |= XSCALE_AUXCTL_MD_MASK;
1426#endif
1427	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1428		: : "r" (auxctl));
1429}
1430#endif	/* CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1431	   CPU_XSCALE_80219 */
1432