cpufunc.c revision 295252
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 295252 2016-02-04 12:11:18Z mmel $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62
63#if defined(CPU_XSCALE_81342)
64#include <arm/xscale/i8134x/i81342reg.h>
65#endif
66
67#ifdef CPU_XSCALE_IXP425
68#include <arm/xscale/ixp425/ixp425reg.h>
69#include <arm/xscale/ixp425/ixp425var.h>
70#endif
71
72/* PRIMARY CACHE VARIABLES */
73int	arm_picache_size;
74int	arm_picache_line_size;
75int	arm_picache_ways;
76
77int	arm_pdcache_size;	/* and unified */
78int	arm_pdcache_line_size;
79int	arm_pdcache_ways;
80
81int	arm_pcache_type;
82int	arm_pcache_unified;
83
84int	arm_dcache_align;
85int	arm_dcache_align_mask;
86
87u_int	arm_cache_level;
88u_int	arm_cache_type[14];
89u_int	arm_cache_loc;
90
91#ifdef CPU_ARM9
92struct cpu_functions arm9_cpufuncs = {
93	/* CPU functions */
94
95	cpufunc_nullop,			/* cpwait		*/
96
97	/* MMU functions */
98
99	cpufunc_control,		/* control		*/
100	arm9_setttb,			/* Setttb		*/
101
102	/* TLB functions */
103
104	armv4_tlb_flushID,		/* tlb_flushID		*/
105	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
106	armv4_tlb_flushD,		/* tlb_flushD		*/
107	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
108
109	/* Cache operations */
110
111	arm9_icache_sync_range,		/* icache_sync_range	*/
112
113	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
114	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
115	arm9_dcache_inv_range,		/* dcache_inv_range	*/
116	arm9_dcache_wb_range,		/* dcache_wb_range	*/
117
118	armv4_idcache_inv_all,		/* idcache_inv_all	*/
119	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
120	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
121	cpufunc_nullop,			/* l2cache_wbinv_all	*/
122	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
123	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
124	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
125	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
126
127	/* Other functions */
128
129	armv4_drain_writebuf,		/* drain_writebuf	*/
130
131	(void *)cpufunc_nullop,		/* sleep		*/
132
133	/* Soft functions */
134
135	arm9_context_switch,		/* context_switch	*/
136
137	arm9_setup			/* cpu setup		*/
138
139};
140#endif /* CPU_ARM9 */
141
142#if defined(CPU_ARM9E)
143struct cpu_functions armv5_ec_cpufuncs = {
144	/* CPU functions */
145
146	cpufunc_nullop,			/* cpwait		*/
147
148	/* MMU functions */
149
150	cpufunc_control,		/* control		*/
151	armv5_ec_setttb,		/* Setttb		*/
152
153	/* TLB functions */
154
155	armv4_tlb_flushID,		/* tlb_flushID		*/
156	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
157	armv4_tlb_flushD,		/* tlb_flushD		*/
158	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
159
160	/* Cache operations */
161
162	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
163
164	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
165	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
166	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
167	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
168
169	armv4_idcache_inv_all,		/* idcache_inv_all	*/
170	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
171	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
172
173	cpufunc_nullop,                 /* l2cache_wbinv_all    */
174	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
175      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
176	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
177	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
178
179	/* Other functions */
180
181	armv4_drain_writebuf,		/* drain_writebuf	*/
182
183	(void *)cpufunc_nullop,		/* sleep		*/
184
185	/* Soft functions */
186
187	arm9_context_switch,		/* context_switch	*/
188
189	arm10_setup			/* cpu setup		*/
190
191};
192
193struct cpu_functions sheeva_cpufuncs = {
194	/* CPU functions */
195
196	cpufunc_nullop,			/* cpwait		*/
197
198	/* MMU functions */
199
200	cpufunc_control,		/* control		*/
201	sheeva_setttb,			/* Setttb		*/
202
203	/* TLB functions */
204
205	armv4_tlb_flushID,		/* tlb_flushID		*/
206	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
207	armv4_tlb_flushD,		/* tlb_flushD		*/
208	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
209
210	/* Cache operations */
211
212	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
213
214	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
215	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
216	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
217	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
218
219	armv4_idcache_inv_all,		/* idcache_inv_all	*/
220	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
221	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
222
223	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
224	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
225	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
226	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
227	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
228
229	/* Other functions */
230
231	armv4_drain_writebuf,		/* drain_writebuf	*/
232
233	sheeva_cpu_sleep,		/* sleep		*/
234
235	/* Soft functions */
236
237	arm9_context_switch,		/* context_switch	*/
238
239	arm10_setup			/* cpu setup		*/
240};
241#endif /* CPU_ARM9E */
242
243#ifdef CPU_MV_PJ4B
244struct cpu_functions pj4bv7_cpufuncs = {
245	/* CPU functions */
246
247	armv7_drain_writebuf,		/* cpwait		*/
248
249	/* MMU functions */
250
251	cpufunc_control,		/* control		*/
252	armv7_setttb,			/* Setttb		*/
253
254	/* TLB functions */
255
256	armv7_tlb_flushID,		/* tlb_flushID		*/
257	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
258	armv7_tlb_flushID,		/* tlb_flushD		*/
259	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
260
261	/* Cache operations */
262	armv7_icache_sync_range,	/* icache_sync_range	*/
263
264	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
265	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
266	armv7_dcache_inv_range,		/* dcache_inv_range	*/
267	armv7_dcache_wb_range,		/* dcache_wb_range	*/
268
269	armv7_idcache_inv_all,		/* idcache_inv_all	*/
270	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
271	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
272
273	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
274	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
275	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
276	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
277	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
278
279	/* Other functions */
280
281	armv7_drain_writebuf,		/* drain_writebuf	*/
282
283	(void *)cpufunc_nullop,		/* sleep		*/
284
285	/* Soft functions */
286	armv7_context_switch,		/* context_switch	*/
287
288	pj4bv7_setup			/* cpu setup		*/
289};
290#endif /* CPU_MV_PJ4B */
291
292#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425)
293
294struct cpu_functions xscale_cpufuncs = {
295	/* CPU functions */
296
297	xscale_cpwait,			/* cpwait		*/
298
299	/* MMU functions */
300
301	xscale_control,			/* control		*/
302	xscale_setttb,			/* setttb		*/
303
304	/* TLB functions */
305
306	armv4_tlb_flushID,		/* tlb_flushID		*/
307	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
308	armv4_tlb_flushD,		/* tlb_flushD		*/
309	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
310
311	/* Cache operations */
312
313	xscale_cache_syncI_rng,		/* icache_sync_range	*/
314
315	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
316	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
317	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
318	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
319
320	xscale_cache_flushID,		/* idcache_inv_all	*/
321	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
322	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
323	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
324	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
325	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
326	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
327	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
328
329	/* Other functions */
330
331	armv4_drain_writebuf,		/* drain_writebuf	*/
332
333	xscale_cpu_sleep,		/* sleep		*/
334
335	/* Soft functions */
336
337	xscale_context_switch,		/* context_switch	*/
338
339	xscale_setup			/* cpu setup		*/
340};
341#endif
342/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
343
344#ifdef CPU_XSCALE_81342
345struct cpu_functions xscalec3_cpufuncs = {
346	/* CPU functions */
347
348	xscale_cpwait,			/* cpwait		*/
349
350	/* MMU functions */
351
352	xscale_control,			/* control		*/
353	xscalec3_setttb,		/* setttb		*/
354
355	/* TLB functions */
356
357	armv4_tlb_flushID,		/* tlb_flushID		*/
358	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
359	armv4_tlb_flushD,		/* tlb_flushD		*/
360	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
361
362	/* Cache operations */
363
364	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
365
366	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
367	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
368	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
369	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
370
371	xscale_cache_flushID,		/* idcache_inv_all	*/
372	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
373	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
374	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
375	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
376	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
377	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
378	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
379
380	/* Other functions */
381
382	armv4_drain_writebuf,		/* drain_writebuf	*/
383
384	xscale_cpu_sleep,		/* sleep		*/
385
386	/* Soft functions */
387
388	xscalec3_context_switch,	/* context_switch	*/
389
390	xscale_setup			/* cpu setup		*/
391};
392#endif /* CPU_XSCALE_81342 */
393
394
395#if defined(CPU_FA526)
396struct cpu_functions fa526_cpufuncs = {
397	/* CPU functions */
398
399	cpufunc_nullop,			/* cpwait		*/
400
401	/* MMU functions */
402
403	cpufunc_control,		/* control		*/
404	fa526_setttb,			/* setttb		*/
405
406	/* TLB functions */
407
408	armv4_tlb_flushID,		/* tlb_flushID		*/
409	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
410	armv4_tlb_flushD,		/* tlb_flushD		*/
411	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
412
413	/* Cache operations */
414
415	fa526_icache_sync_range,	/* icache_sync_range	*/
416
417	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
418	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
419	fa526_dcache_inv_range,		/* dcache_inv_range	*/
420	fa526_dcache_wb_range,		/* dcache_wb_range	*/
421
422	armv4_idcache_inv_all,		/* idcache_inv_all	*/
423	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
424	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
425	cpufunc_nullop,			/* l2cache_wbinv_all	*/
426	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
427	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
428	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
429	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
430
431	/* Other functions */
432
433	armv4_drain_writebuf,		/* drain_writebuf	*/
434
435	fa526_cpu_sleep,		/* sleep		*/
436
437	/* Soft functions */
438
439
440	fa526_context_switch,		/* context_switch	*/
441
442	fa526_setup			/* cpu setup 		*/
443};
444#endif	/* CPU_FA526 */
445
446#if defined(CPU_ARM1176)
447struct cpu_functions arm1176_cpufuncs = {
448	/* CPU functions */
449
450	cpufunc_nullop,                 /* cpwait               */
451
452	/* MMU functions */
453
454	cpufunc_control,                /* control              */
455	arm11x6_setttb,                 /* Setttb               */
456
457	/* TLB functions */
458
459	arm11_tlb_flushID,              /* tlb_flushID          */
460	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
461	arm11_tlb_flushD,               /* tlb_flushD           */
462	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
463
464	/* Cache operations */
465
466	arm11x6_icache_sync_range,      /* icache_sync_range    */
467
468	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
469	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
470	armv6_dcache_inv_range,         /* dcache_inv_range     */
471	armv6_dcache_wb_range,          /* dcache_wb_range      */
472
473	armv6_idcache_inv_all,		/* idcache_inv_all	*/
474	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
475	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
476
477	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
478	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
479	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
480	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
481	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
482
483	/* Other functions */
484
485	arm11_drain_writebuf,           /* drain_writebuf       */
486
487	arm11x6_sleep,                  /* sleep                */
488
489	/* Soft functions */
490
491	arm11_context_switch,           /* context_switch       */
492
493	arm11x6_setup                   /* cpu setup            */
494};
495#endif /*CPU_ARM1176 */
496
497#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
498struct cpu_functions cortexa_cpufuncs = {
499	/* CPU functions */
500
501	cpufunc_nullop,                 /* cpwait               */
502
503	/* MMU functions */
504
505	cpufunc_control,                /* control              */
506	armv7_setttb,                   /* Setttb               */
507
508	/*
509	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
510	 * whether the hardware implements separate I+D or not, so we use the
511	 * same 'ID' functions for all 3 variations.
512	 */
513
514	armv7_tlb_flushID,              /* tlb_flushID          */
515	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
516	armv7_tlb_flushID,              /* tlb_flushD           */
517	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
518
519	/* Cache operations */
520
521	armv7_icache_sync_range,        /* icache_sync_range    */
522
523	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
524	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
525	armv7_dcache_inv_range,         /* dcache_inv_range     */
526	armv7_dcache_wb_range,          /* dcache_wb_range      */
527
528	armv7_idcache_inv_all,		/* idcache_inv_all	*/
529	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
530	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
531
532	/*
533	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
534	 * L2 cache controller is actually enabled.
535	 */
536	cpufunc_nullop,                 /* l2cache_wbinv_all    */
537	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
538	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
539	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
540	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
541
542	/* Other functions */
543
544	armv7_drain_writebuf,           /* drain_writebuf       */
545
546	armv7_cpu_sleep,                /* sleep                */
547
548	/* Soft functions */
549
550	armv7_context_switch,           /* context_switch       */
551
552	cortexa_setup                     /* cpu setup            */
553};
554#endif /* CPU_CORTEXA */
555
556/*
557 * Global constants also used by locore.s
558 */
559
560struct cpu_functions cpufuncs;
561u_int cputype;
562u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
563
564#if defined(CPU_ARM9) ||	\
565  defined (CPU_ARM9E) ||	\
566  defined(CPU_ARM1176) ||	\
567  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
568  defined(CPU_FA526) || defined(CPU_MV_PJ4B) ||			\
569  defined(CPU_XSCALE_81342) || \
570  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
571
572/* Global cache line sizes, use 32 as default */
573int	arm_dcache_min_line_size = 32;
574int	arm_icache_min_line_size = 32;
575int	arm_idcache_min_line_size = 32;
576
577static void get_cachetype_cp15(void);
578
579/* Additional cache information local to this file.  Log2 of some of the
580   above numbers.  */
581static int	arm_dcache_l2_nsets;
582static int	arm_dcache_l2_assoc;
583static int	arm_dcache_l2_linesize;
584
585static void
586get_cachetype_cp15()
587{
588	u_int ctype, isize, dsize, cpuid;
589	u_int clevel, csize, i, sel;
590	u_int multiplier;
591	u_char type;
592
593	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
594		: "=r" (ctype));
595
596	cpuid = cpu_ident();
597	/*
598	 * ...and thus spake the ARM ARM:
599	 *
600	 * If an <opcode2> value corresponding to an unimplemented or
601	 * reserved ID register is encountered, the System Control
602	 * processor returns the value of the main ID register.
603	 */
604	if (ctype == cpuid)
605		goto out;
606
607	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
608		/* Resolve minimal cache line sizes */
609		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
610		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
611		arm_idcache_min_line_size =
612		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
613
614		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
615		    : "=r" (clevel));
616		arm_cache_level = clevel;
617		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
618		i = 0;
619		while ((type = (clevel & 0x7)) && i < 7) {
620			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
621			    type == CACHE_SEP_CACHE) {
622				sel = i << 1;
623				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
624				    : : "r" (sel));
625				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
626				    : "=r" (csize));
627				arm_cache_type[sel] = csize;
628				arm_dcache_align = 1 <<
629				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
630				arm_dcache_align_mask = arm_dcache_align - 1;
631			}
632			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
633				sel = (i << 1) | 1;
634				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
635				    : : "r" (sel));
636				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
637				    : "=r" (csize));
638				arm_cache_type[sel] = csize;
639			}
640			i++;
641			clevel >>= 3;
642		}
643	} else {
644		if ((ctype & CPU_CT_S) == 0)
645			arm_pcache_unified = 1;
646
647		/*
648		 * If you want to know how this code works, go read the ARM ARM.
649		 */
650
651		arm_pcache_type = CPU_CT_CTYPE(ctype);
652
653		if (arm_pcache_unified == 0) {
654			isize = CPU_CT_ISIZE(ctype);
655			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
656			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
657			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
658				if (isize & CPU_CT_xSIZE_M)
659					arm_picache_line_size = 0; /* not present */
660				else
661					arm_picache_ways = 1;
662			} else {
663				arm_picache_ways = multiplier <<
664				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
665			}
666			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
667		}
668
669		dsize = CPU_CT_DSIZE(ctype);
670		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
671		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
672		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
673			if (dsize & CPU_CT_xSIZE_M)
674				arm_pdcache_line_size = 0; /* not present */
675			else
676				arm_pdcache_ways = 1;
677		} else {
678			arm_pdcache_ways = multiplier <<
679			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
680		}
681		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
682
683		arm_dcache_align = arm_pdcache_line_size;
684
685		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
686		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
687		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
688		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
689
690	out:
691		arm_dcache_align_mask = arm_dcache_align - 1;
692	}
693}
694#endif /* ARM9 || XSCALE */
695
696/*
697 * Cannot panic here as we may not have a console yet ...
698 */
699
700int
701set_cpufuncs()
702{
703	cputype = cpu_ident();
704	cputype &= CPU_ID_CPU_MASK;
705
706#ifdef CPU_ARM9
707	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
708	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
709	    (cputype & 0x0000f000) == 0x00009000) {
710		cpufuncs = arm9_cpufuncs;
711		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
712		get_cachetype_cp15();
713		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
714		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
715		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
716		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
717		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
718		pmap_pte_init_generic();
719		goto out;
720	}
721#endif /* CPU_ARM9 */
722#if defined(CPU_ARM9E)
723	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
724	    cputype == CPU_ID_MV88FR571_41) {
725		uint32_t sheeva_ctrl;
726
727		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
728		    MV_L2_ENABLE);
729		/*
730		 * Workaround for Marvell MV78100 CPU: Cache prefetch
731		 * mechanism may affect the cache coherency validity,
732		 * so it needs to be disabled.
733		 *
734		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
735		 * L2 Prefetching Mechanism) for details.
736		 */
737		if (cputype == CPU_ID_MV88FR571_VD ||
738		    cputype == CPU_ID_MV88FR571_41)
739			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
740
741		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
742
743		cpufuncs = sheeva_cpufuncs;
744		get_cachetype_cp15();
745		pmap_pte_init_generic();
746		goto out;
747	} else if (cputype == CPU_ID_ARM926EJS) {
748		cpufuncs = armv5_ec_cpufuncs;
749		get_cachetype_cp15();
750		pmap_pte_init_generic();
751		goto out;
752	}
753#endif /* CPU_ARM9E */
754#if defined(CPU_ARM1176)
755	if (cputype == CPU_ID_ARM1176JZS) {
756		cpufuncs = arm1176_cpufuncs;
757		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
758		get_cachetype_cp15();
759		goto out;
760	}
761#endif /* CPU_ARM1176 */
762#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
763	if (cputype == CPU_ID_CORTEXA5 ||
764	    cputype == CPU_ID_CORTEXA7 ||
765	    cputype == CPU_ID_CORTEXA8R1 ||
766	    cputype == CPU_ID_CORTEXA8R2 ||
767	    cputype == CPU_ID_CORTEXA8R3 ||
768	    cputype == CPU_ID_CORTEXA9R1 ||
769	    cputype == CPU_ID_CORTEXA9R2 ||
770	    cputype == CPU_ID_CORTEXA9R3 ||
771	    cputype == CPU_ID_CORTEXA9R4 ||
772	    cputype == CPU_ID_CORTEXA12R0 ||
773	    cputype == CPU_ID_CORTEXA15R0 ||
774	    cputype == CPU_ID_CORTEXA15R1 ||
775	    cputype == CPU_ID_CORTEXA15R2 ||
776	    cputype == CPU_ID_CORTEXA15R3 ||
777	    cputype == CPU_ID_KRAIT300R0 ||
778	    cputype == CPU_ID_KRAIT300R1 ) {
779		cpufuncs = cortexa_cpufuncs;
780		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
781		get_cachetype_cp15();
782		goto out;
783	}
784#endif /* CPU_CORTEXA */
785
786#if defined(CPU_MV_PJ4B)
787	if (cputype == CPU_ID_MV88SV581X_V7 ||
788	    cputype == CPU_ID_MV88SV584X_V7 ||
789	    cputype == CPU_ID_ARM_88SV581X_V7) {
790		cpufuncs = pj4bv7_cpufuncs;
791		get_cachetype_cp15();
792		goto out;
793	}
794#endif /* CPU_MV_PJ4B */
795
796#if defined(CPU_FA526)
797	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
798		cpufuncs = fa526_cpufuncs;
799		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
800		get_cachetype_cp15();
801		pmap_pte_init_generic();
802
803		goto out;
804	}
805#endif	/* CPU_FA526 */
806
807#if defined(CPU_XSCALE_81342)
808	if (cputype == CPU_ID_81342) {
809		cpufuncs = xscalec3_cpufuncs;
810		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
811		get_cachetype_cp15();
812		pmap_pte_init_xscale();
813		goto out;
814	}
815#endif /* CPU_XSCALE_81342 */
816#ifdef CPU_XSCALE_PXA2X0
817	/* ignore core revision to test PXA2xx CPUs */
818	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
819	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
820	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
821
822		cpufuncs = xscale_cpufuncs;
823		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
824		get_cachetype_cp15();
825		pmap_pte_init_xscale();
826
827		goto out;
828	}
829#endif /* CPU_XSCALE_PXA2X0 */
830#ifdef CPU_XSCALE_IXP425
831	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
832            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
833
834		cpufuncs = xscale_cpufuncs;
835		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
836		get_cachetype_cp15();
837		pmap_pte_init_xscale();
838
839		goto out;
840	}
841#endif /* CPU_XSCALE_IXP425 */
842	/*
843	 * Bzzzz. And the answer was ...
844	 */
845	panic("No support for this CPU type (%08x) in kernel", cputype);
846	return(ARCHITECTURE_NOT_PRESENT);
847out:
848	uma_set_align(arm_dcache_align_mask);
849	return (0);
850}
851
852/*
853 * CPU Setup code
854 */
855
856#ifdef CPU_ARM9
857void
858arm9_setup(void)
859{
860	int cpuctrl, cpuctrlmask;
861
862	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
863	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
864	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
865	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
866	    CPU_CONTROL_ROUNDROBIN;
867	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
868		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
869		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
870		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
871		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
872		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
873		 | CPU_CONTROL_ROUNDROBIN;
874
875#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
876	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
877#endif
878
879#ifdef __ARMEB__
880	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
881#endif
882	if (vector_page == ARM_VECTORS_HIGH)
883		cpuctrl |= CPU_CONTROL_VECRELOC;
884
885	/* Clear out the cache */
886	cpu_idcache_wbinv_all();
887
888	/* Set the control register */
889	cpu_control(cpuctrlmask, cpuctrl);
890
891}
892#endif	/* CPU_ARM9 */
893
894#if defined(CPU_ARM9E)
895void
896arm10_setup(void)
897{
898	int cpuctrl, cpuctrlmask;
899
900	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
901	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
902	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
903	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
904	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
905	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
906	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
907	    | CPU_CONTROL_BPRD_ENABLE
908	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
909
910#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
911	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
912#endif
913
914#ifdef __ARMEB__
915	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
916#endif
917
918	/* Clear out the cache */
919	cpu_idcache_wbinv_all();
920
921	/* Now really make sure they are clean.  */
922	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
923
924	if (vector_page == ARM_VECTORS_HIGH)
925		cpuctrl |= CPU_CONTROL_VECRELOC;
926
927	/* Set the control register */
928	cpu_control(0xffffffff, cpuctrl);
929
930	/* And again. */
931	cpu_idcache_wbinv_all();
932}
933#endif	/* CPU_ARM9E || CPU_ARM10 */
934
935#if defined(CPU_ARM1176) \
936 || defined(CPU_MV_PJ4B) \
937 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
938static __inline void
939cpu_scc_setup_ccnt(void)
940{
941/* This is how you give userland access to the CCNT and PMCn
942 * registers.
943 * BEWARE! This gives write access also, which may not be what
944 * you want!
945 */
946#ifdef _PMC_USER_READ_WRITE_
947	/* Set PMUSERENR[0] to allow userland access */
948	cp15_pmuserenr_set(1);
949#endif
950#if defined(CPU_ARM1176)
951	/* Set PMCR[2,0] to enable counters and reset CCNT */
952	cp15_pmcr_set(5);
953#else
954	/* Set up the PMCCNTR register as a cyclecounter:
955	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
956	 * Set PMCR[2,0] to enable counters and reset CCNT
957	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
958	cp15_pminten_clr(0xFFFFFFFF);
959	cp15_pmcr_set(5);
960	cp15_pmcnten_set(0x80000000);
961#endif
962}
963#endif
964
965#if defined(CPU_ARM1176)
966void
967arm11x6_setup(void)
968{
969	int cpuctrl, cpuctrl_wax;
970	uint32_t auxctrl, auxctrl_wax;
971	uint32_t tmp, tmp2;
972	uint32_t sbz=0;
973	uint32_t cpuid;
974
975	cpuid = cpu_ident();
976
977	cpuctrl =
978		CPU_CONTROL_MMU_ENABLE  |
979		CPU_CONTROL_DC_ENABLE   |
980		CPU_CONTROL_WBUF_ENABLE |
981		CPU_CONTROL_32BP_ENABLE |
982		CPU_CONTROL_32BD_ENABLE |
983		CPU_CONTROL_LABT_ENABLE |
984		CPU_CONTROL_SYST_ENABLE |
985		CPU_CONTROL_IC_ENABLE   |
986		CPU_CONTROL_UNAL_ENABLE;
987
988	/*
989	 * "write as existing" bits
990	 * inverse of this is mask
991	 */
992	cpuctrl_wax =
993		(3 << 30) | /* SBZ */
994		(1 << 29) | /* FA */
995		(1 << 28) | /* TR */
996		(3 << 26) | /* SBZ */
997		(3 << 19) | /* SBZ */
998		(1 << 17);  /* SBZ */
999
1000	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1001	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1002
1003#ifdef __ARMEB__
1004	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1005#endif
1006
1007	if (vector_page == ARM_VECTORS_HIGH)
1008		cpuctrl |= CPU_CONTROL_VECRELOC;
1009
1010	auxctrl = 0;
1011	auxctrl_wax = ~0;
1012
1013	/*
1014	 * Enable an errata workaround
1015	 */
1016	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1017		auxctrl = ARM1176_AUXCTL_PHD;
1018		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1019	}
1020
1021	/* Clear out the cache */
1022	cpu_idcache_wbinv_all();
1023
1024	/* Now really make sure they are clean.  */
1025	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1026
1027	/* Allow detection code to find the VFP if it's fitted.  */
1028	cp15_cpacr_set(0x0fffffff);
1029
1030	/* Set the control register */
1031	cpu_control(~cpuctrl_wax, cpuctrl);
1032
1033	tmp = cp15_actlr_get();
1034	tmp2 = tmp;
1035	tmp &= auxctrl_wax;
1036	tmp |= auxctrl;
1037	if (tmp != tmp2)
1038		cp15_actlr_set(tmp);
1039
1040	/* And again. */
1041	cpu_idcache_wbinv_all();
1042
1043	cpu_scc_setup_ccnt();
1044}
1045#endif  /* CPU_ARM1176 */
1046
1047#ifdef CPU_MV_PJ4B
1048void
1049pj4bv7_setup(void)
1050{
1051	int cpuctrl;
1052
1053	pj4b_config();
1054
1055	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1056#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1057	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1058#endif
1059	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1060	cpuctrl |= (0xf << 3);
1061	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1062	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1063	if (vector_page == ARM_VECTORS_HIGH)
1064		cpuctrl |= CPU_CONTROL_VECRELOC;
1065	cpuctrl |= (0x5 << 16) | (1 < 22);
1066	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1067
1068	/* Clear out the cache */
1069	cpu_idcache_wbinv_all();
1070
1071	/* Set the control register */
1072	cpu_control(0xFFFFFFFF, cpuctrl);
1073
1074	/* And again. */
1075	cpu_idcache_wbinv_all();
1076
1077	cpu_scc_setup_ccnt();
1078}
1079#endif /* CPU_MV_PJ4B */
1080
1081#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1082
1083void
1084cortexa_setup(void)
1085{
1086	int cpuctrl, cpuctrlmask;
1087
1088	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1089	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1090	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1091	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1092	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1093	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1094
1095	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1096	    CPU_CONTROL_IC_ENABLE |
1097	    CPU_CONTROL_DC_ENABLE |
1098	    CPU_CONTROL_BPRD_ENABLE;
1099
1100#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1101	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1102#endif
1103
1104	/* Switch to big endian */
1105#ifdef __ARMEB__
1106	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1107#endif
1108
1109	/* Check if the vector page is at the high address (0xffff0000) */
1110	if (vector_page == ARM_VECTORS_HIGH)
1111		cpuctrl |= CPU_CONTROL_VECRELOC;
1112
1113	/* Clear out the cache */
1114	cpu_idcache_wbinv_all();
1115
1116	/* Set the control register */
1117	cpu_control(cpuctrlmask, cpuctrl);
1118
1119	/* And again. */
1120	cpu_idcache_wbinv_all();
1121#if defined(SMP) && !defined(ARM_NEW_PMAP)
1122	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1123#endif
1124
1125	cpu_scc_setup_ccnt();
1126}
1127#endif  /* CPU_CORTEXA */
1128
1129#if defined(CPU_FA526)
1130void
1131fa526_setup(void)
1132{
1133	int cpuctrl, cpuctrlmask;
1134
1135	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1136		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1137		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1138		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1139		| CPU_CONTROL_BPRD_ENABLE;
1140	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1141		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1142		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1143		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1144		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1145		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1146		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1147
1148#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1149	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1150#endif
1151
1152#ifdef __ARMEB__
1153	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1154#endif
1155
1156	if (vector_page == ARM_VECTORS_HIGH)
1157		cpuctrl |= CPU_CONTROL_VECRELOC;
1158
1159	/* Clear out the cache */
1160	cpu_idcache_wbinv_all();
1161
1162	/* Set the control register */
1163	cpu_control(0xffffffff, cpuctrl);
1164}
1165#endif	/* CPU_FA526 */
1166
1167#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1168  defined(CPU_XSCALE_81342)
1169void
1170xscale_setup(void)
1171{
1172	uint32_t auxctl;
1173	int cpuctrl, cpuctrlmask;
1174
1175	/*
1176	 * The XScale Write Buffer is always enabled.  Our option
1177	 * is to enable/disable coalescing.  Note that bits 6:3
1178	 * must always be enabled.
1179	 */
1180
1181	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1182		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1183		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1184		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1185		 | CPU_CONTROL_BPRD_ENABLE;
1186	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1187		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1188		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1189		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1190		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1191		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1192		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1193		 CPU_CONTROL_L2_ENABLE;
1194
1195#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1196	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1197#endif
1198
1199#ifdef __ARMEB__
1200	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1201#endif
1202
1203	if (vector_page == ARM_VECTORS_HIGH)
1204		cpuctrl |= CPU_CONTROL_VECRELOC;
1205#ifdef CPU_XSCALE_CORE3
1206	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1207#endif
1208
1209	/* Clear out the cache */
1210	cpu_idcache_wbinv_all();
1211
1212	/*
1213	 * Set the control register.  Note that bits 6:3 must always
1214	 * be set to 1.
1215	 */
1216/*	cpu_control(cpuctrlmask, cpuctrl);*/
1217	cpu_control(0xffffffff, cpuctrl);
1218
1219	/* Make sure write coalescing is turned on */
1220	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1221		: "=r" (auxctl));
1222#ifdef XSCALE_NO_COALESCE_WRITES
1223	auxctl |= XSCALE_AUXCTL_K;
1224#else
1225	auxctl &= ~XSCALE_AUXCTL_K;
1226#endif
1227#ifdef CPU_XSCALE_CORE3
1228	auxctl |= XSCALE_AUXCTL_LLR;
1229	auxctl |= XSCALE_AUXCTL_MD_MASK;
1230#endif
1231	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1232		: : "r" (auxctl));
1233}
1234#endif	/* CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425 */
1235