cpufunc.c revision 277156
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 277156 2015-01-14 01:23:50Z ganbold $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62#include <machine/bootconfig.h>
63
64#ifdef CPU_XSCALE_80200
65#include <arm/xscale/i80200/i80200reg.h>
66#include <arm/xscale/i80200/i80200var.h>
67#endif
68
69#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70#include <arm/xscale/i80321/i80321reg.h>
71#include <arm/xscale/i80321/i80321var.h>
72#endif
73
74/*
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
79 */
80#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81#include <arm/xscale/i8134x/i81342reg.h>
82#endif
83
84#ifdef CPU_XSCALE_IXP425
85#include <arm/xscale/ixp425/ixp425reg.h>
86#include <arm/xscale/ixp425/ixp425var.h>
87#endif
88
89/* PRIMARY CACHE VARIABLES */
90int	arm_picache_size;
91int	arm_picache_line_size;
92int	arm_picache_ways;
93
94int	arm_pdcache_size;	/* and unified */
95int	arm_pdcache_line_size;
96int	arm_pdcache_ways;
97
98int	arm_pcache_type;
99int	arm_pcache_unified;
100
101int	arm_dcache_align;
102int	arm_dcache_align_mask;
103
104u_int	arm_cache_level;
105u_int	arm_cache_type[14];
106u_int	arm_cache_loc;
107
108/* 1 == use cpu_sleep(), 0 == don't */
109int cpu_do_powersave;
110int ctrl;
111
112#ifdef CPU_ARM9
113struct cpu_functions arm9_cpufuncs = {
114	/* CPU functions */
115
116	cpufunc_id,			/* id			*/
117	cpufunc_nullop,			/* cpwait		*/
118
119	/* MMU functions */
120
121	cpufunc_control,		/* control		*/
122	cpufunc_domains,		/* Domain		*/
123	arm9_setttb,			/* Setttb		*/
124	cpufunc_faultstatus,		/* Faultstatus		*/
125	cpufunc_faultaddress,		/* Faultaddress		*/
126
127	/* TLB functions */
128
129	armv4_tlb_flushID,		/* tlb_flushID		*/
130	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
131	armv4_tlb_flushI,		/* tlb_flushI		*/
132	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
133	armv4_tlb_flushD,		/* tlb_flushD		*/
134	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
135
136	/* Cache operations */
137
138	arm9_icache_sync_all,		/* icache_sync_all	*/
139	arm9_icache_sync_range,		/* icache_sync_range	*/
140
141	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
142	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
143	arm9_dcache_inv_range,		/* dcache_inv_range	*/
144	arm9_dcache_wb_range,		/* dcache_wb_range	*/
145
146	armv4_idcache_inv_all,		/* idcache_inv_all	*/
147	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
148	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
149	cpufunc_nullop,			/* l2cache_wbinv_all	*/
150	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
152	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
153	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
154
155	/* Other functions */
156
157	cpufunc_nullop,			/* flush_prefetchbuf	*/
158	armv4_drain_writebuf,		/* drain_writebuf	*/
159	cpufunc_nullop,			/* flush_brnchtgt_C	*/
160	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
161
162	(void *)cpufunc_nullop,		/* sleep		*/
163
164	/* Soft functions */
165
166	cpufunc_null_fixup,		/* dataabt_fixup	*/
167	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
168
169	arm9_context_switch,		/* context_switch	*/
170
171	arm9_setup			/* cpu setup		*/
172
173};
174#endif /* CPU_ARM9 */
175
176#if defined(CPU_ARM9E) || defined(CPU_ARM10)
177struct cpu_functions armv5_ec_cpufuncs = {
178	/* CPU functions */
179
180	cpufunc_id,			/* id			*/
181	cpufunc_nullop,			/* cpwait		*/
182
183	/* MMU functions */
184
185	cpufunc_control,		/* control		*/
186	cpufunc_domains,		/* Domain		*/
187	armv5_ec_setttb,		/* Setttb		*/
188	cpufunc_faultstatus,		/* Faultstatus		*/
189	cpufunc_faultaddress,		/* Faultaddress		*/
190
191	/* TLB functions */
192
193	armv4_tlb_flushID,		/* tlb_flushID		*/
194	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
195	armv4_tlb_flushI,		/* tlb_flushI		*/
196	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
197	armv4_tlb_flushD,		/* tlb_flushD		*/
198	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
199
200	/* Cache operations */
201
202	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
203	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
204
205	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
206	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
207	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
208	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
209
210	armv4_idcache_inv_all,		/* idcache_inv_all	*/
211	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
212	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
213
214	cpufunc_nullop,                 /* l2cache_wbinv_all    */
215	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
216      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
217	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
218	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
219
220	/* Other functions */
221
222	cpufunc_nullop,			/* flush_prefetchbuf	*/
223	armv4_drain_writebuf,		/* drain_writebuf	*/
224	cpufunc_nullop,			/* flush_brnchtgt_C	*/
225	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
226
227	(void *)cpufunc_nullop,		/* sleep		*/
228
229	/* Soft functions */
230
231	cpufunc_null_fixup,		/* dataabt_fixup	*/
232	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
233
234	arm10_context_switch,		/* context_switch	*/
235
236	arm10_setup			/* cpu setup		*/
237
238};
239
240struct cpu_functions sheeva_cpufuncs = {
241	/* CPU functions */
242
243	cpufunc_id,			/* id			*/
244	cpufunc_nullop,			/* cpwait		*/
245
246	/* MMU functions */
247
248	cpufunc_control,		/* control		*/
249	cpufunc_domains,		/* Domain		*/
250	sheeva_setttb,			/* Setttb		*/
251	cpufunc_faultstatus,		/* Faultstatus		*/
252	cpufunc_faultaddress,		/* Faultaddress		*/
253
254	/* TLB functions */
255
256	armv4_tlb_flushID,		/* tlb_flushID		*/
257	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
258	armv4_tlb_flushI,		/* tlb_flushI		*/
259	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
260	armv4_tlb_flushD,		/* tlb_flushD		*/
261	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
262
263	/* Cache operations */
264
265	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
266	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
267
268	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
269	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
270	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
271	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
272
273	armv4_idcache_inv_all,		/* idcache_inv_all	*/
274	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
275	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
276
277	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
278	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
279	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
280	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
281	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
282
283	/* Other functions */
284
285	cpufunc_nullop,			/* flush_prefetchbuf	*/
286	armv4_drain_writebuf,		/* drain_writebuf	*/
287	cpufunc_nullop,			/* flush_brnchtgt_C	*/
288	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
289
290	sheeva_cpu_sleep,		/* sleep		*/
291
292	/* Soft functions */
293
294	cpufunc_null_fixup,		/* dataabt_fixup	*/
295	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
296
297	arm10_context_switch,		/* context_switch	*/
298
299	arm10_setup			/* cpu setup		*/
300};
301#endif /* CPU_ARM9E || CPU_ARM10 */
302
303#ifdef CPU_ARM10
304struct cpu_functions arm10_cpufuncs = {
305	/* CPU functions */
306
307	cpufunc_id,			/* id			*/
308	cpufunc_nullop,			/* cpwait		*/
309
310	/* MMU functions */
311
312	cpufunc_control,		/* control		*/
313	cpufunc_domains,		/* Domain		*/
314	arm10_setttb,			/* Setttb		*/
315	cpufunc_faultstatus,		/* Faultstatus		*/
316	cpufunc_faultaddress,		/* Faultaddress		*/
317
318	/* TLB functions */
319
320	armv4_tlb_flushID,		/* tlb_flushID		*/
321	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
322	armv4_tlb_flushI,		/* tlb_flushI		*/
323	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
324	armv4_tlb_flushD,		/* tlb_flushD		*/
325	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
326
327	/* Cache operations */
328
329	arm10_icache_sync_all,		/* icache_sync_all	*/
330	arm10_icache_sync_range,	/* icache_sync_range	*/
331
332	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
333	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
334	arm10_dcache_inv_range,		/* dcache_inv_range	*/
335	arm10_dcache_wb_range,		/* dcache_wb_range	*/
336
337	armv4_idcache_inv_all,		/* idcache_inv_all	*/
338	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
339	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
340	cpufunc_nullop,			/* l2cache_wbinv_all	*/
341	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
342	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
343	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
344	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
345
346	/* Other functions */
347
348	cpufunc_nullop,			/* flush_prefetchbuf	*/
349	armv4_drain_writebuf,		/* drain_writebuf	*/
350	cpufunc_nullop,			/* flush_brnchtgt_C	*/
351	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
352
353	(void *)cpufunc_nullop,		/* sleep		*/
354
355	/* Soft functions */
356
357	cpufunc_null_fixup,		/* dataabt_fixup	*/
358	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
359
360	arm10_context_switch,		/* context_switch	*/
361
362	arm10_setup			/* cpu setup		*/
363
364};
365#endif /* CPU_ARM10 */
366
367#ifdef CPU_MV_PJ4B
368struct cpu_functions pj4bv7_cpufuncs = {
369	/* CPU functions */
370
371	cpufunc_id,			/* id			*/
372	armv7_drain_writebuf,		/* cpwait		*/
373
374	/* MMU functions */
375
376	cpufunc_control,		/* control		*/
377	cpufunc_domains,		/* Domain		*/
378	armv7_setttb,			/* Setttb		*/
379	cpufunc_faultstatus,		/* Faultstatus		*/
380	cpufunc_faultaddress,		/* Faultaddress		*/
381
382	/* TLB functions */
383
384	armv7_tlb_flushID,		/* tlb_flushID		*/
385	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
386	armv7_tlb_flushID,		/* tlb_flushI		*/
387	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
388	armv7_tlb_flushID,		/* tlb_flushD		*/
389	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
390
391	/* Cache operations */
392	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
393	armv7_icache_sync_range,	/* icache_sync_range	*/
394
395	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
396	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
397	armv7_dcache_inv_range,		/* dcache_inv_range	*/
398	armv7_dcache_wb_range,		/* dcache_wb_range	*/
399
400	armv7_idcache_inv_all,		/* idcache_inv_all	*/
401	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
402	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
403
404	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
405	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
406	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
407	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
408	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
409
410	/* Other functions */
411
412	cpufunc_nullop,			/* flush_prefetchbuf	*/
413	armv7_drain_writebuf,		/* drain_writebuf	*/
414	cpufunc_nullop,			/* flush_brnchtgt_C	*/
415	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
416
417	(void *)cpufunc_nullop,		/* sleep		*/
418
419	/* Soft functions */
420
421	cpufunc_null_fixup,		/* dataabt_fixup	*/
422	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
423
424	armv7_context_switch,		/* context_switch	*/
425
426	pj4bv7_setup			/* cpu setup		*/
427};
428#endif /* CPU_MV_PJ4B */
429
430#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
431  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
432  defined(CPU_XSCALE_80219)
433
434struct cpu_functions xscale_cpufuncs = {
435	/* CPU functions */
436
437	cpufunc_id,			/* id			*/
438	xscale_cpwait,			/* cpwait		*/
439
440	/* MMU functions */
441
442	xscale_control,			/* control		*/
443	cpufunc_domains,		/* domain		*/
444	xscale_setttb,			/* setttb		*/
445	cpufunc_faultstatus,		/* faultstatus		*/
446	cpufunc_faultaddress,		/* faultaddress		*/
447
448	/* TLB functions */
449
450	armv4_tlb_flushID,		/* tlb_flushID		*/
451	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
452	armv4_tlb_flushI,		/* tlb_flushI		*/
453	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
454	armv4_tlb_flushD,		/* tlb_flushD		*/
455	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
456
457	/* Cache operations */
458
459	xscale_cache_syncI,		/* icache_sync_all	*/
460	xscale_cache_syncI_rng,		/* icache_sync_range	*/
461
462	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
463	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
464	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
465	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
466
467	xscale_cache_flushID,		/* idcache_inv_all	*/
468	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
469	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
470	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
471	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
472	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
473	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
474	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
475
476	/* Other functions */
477
478	cpufunc_nullop,			/* flush_prefetchbuf	*/
479	armv4_drain_writebuf,		/* drain_writebuf	*/
480	cpufunc_nullop,			/* flush_brnchtgt_C	*/
481	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
482
483	xscale_cpu_sleep,		/* sleep		*/
484
485	/* Soft functions */
486
487	cpufunc_null_fixup,		/* dataabt_fixup	*/
488	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
489
490	xscale_context_switch,		/* context_switch	*/
491
492	xscale_setup			/* cpu setup		*/
493};
494#endif
495/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
496   CPU_XSCALE_80219 */
497
498#ifdef CPU_XSCALE_81342
499struct cpu_functions xscalec3_cpufuncs = {
500	/* CPU functions */
501
502	cpufunc_id,			/* id			*/
503	xscale_cpwait,			/* cpwait		*/
504
505	/* MMU functions */
506
507	xscale_control,			/* control		*/
508	cpufunc_domains,		/* domain		*/
509	xscalec3_setttb,		/* setttb		*/
510	cpufunc_faultstatus,		/* faultstatus		*/
511	cpufunc_faultaddress,		/* faultaddress		*/
512
513	/* TLB functions */
514
515	armv4_tlb_flushID,		/* tlb_flushID		*/
516	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
517	armv4_tlb_flushI,		/* tlb_flushI		*/
518	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
519	armv4_tlb_flushD,		/* tlb_flushD		*/
520	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
521
522	/* Cache operations */
523
524	xscalec3_cache_syncI,		/* icache_sync_all	*/
525	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
526
527	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
528	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
529	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
530	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
531
532	xscale_cache_flushID,		/* idcache_inv_all	*/
533	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
534	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
535	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
536	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
537	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
538	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
539	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
540
541	/* Other functions */
542
543	cpufunc_nullop,			/* flush_prefetchbuf	*/
544	armv4_drain_writebuf,		/* drain_writebuf	*/
545	cpufunc_nullop,			/* flush_brnchtgt_C	*/
546	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
547
548	xscale_cpu_sleep,		/* sleep		*/
549
550	/* Soft functions */
551
552	cpufunc_null_fixup,		/* dataabt_fixup	*/
553	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
554
555	xscalec3_context_switch,	/* context_switch	*/
556
557	xscale_setup			/* cpu setup		*/
558};
559#endif /* CPU_XSCALE_81342 */
560
561
562#if defined(CPU_FA526) || defined(CPU_FA626TE)
563struct cpu_functions fa526_cpufuncs = {
564	/* CPU functions */
565
566	cpufunc_id,			/* id			*/
567	cpufunc_nullop,			/* cpwait		*/
568
569	/* MMU functions */
570
571	cpufunc_control,		/* control		*/
572	cpufunc_domains,		/* domain		*/
573	fa526_setttb,			/* setttb		*/
574	cpufunc_faultstatus,		/* faultstatus		*/
575	cpufunc_faultaddress,		/* faultaddress		*/
576
577	/* TLB functions */
578
579	armv4_tlb_flushID,		/* tlb_flushID		*/
580	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
581	armv4_tlb_flushI,		/* tlb_flushI		*/
582	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
583	armv4_tlb_flushD,		/* tlb_flushD		*/
584	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
585
586	/* Cache operations */
587
588	fa526_icache_sync_all,		/* icache_sync_all	*/
589	fa526_icache_sync_range,	/* icache_sync_range	*/
590
591	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
592	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
593	fa526_dcache_inv_range,		/* dcache_inv_range	*/
594	fa526_dcache_wb_range,		/* dcache_wb_range	*/
595
596	armv4_idcache_inv_all,		/* idcache_inv_all	*/
597	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
598	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
599	cpufunc_nullop,			/* l2cache_wbinv_all	*/
600	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
601	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
602	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
603	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
604
605	/* Other functions */
606
607	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
608	armv4_drain_writebuf,		/* drain_writebuf	*/
609	cpufunc_nullop,			/* flush_brnchtgt_C	*/
610	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
611
612	fa526_cpu_sleep,		/* sleep		*/
613
614	/* Soft functions */
615
616	cpufunc_null_fixup,		/* dataabt_fixup	*/
617	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
618
619	fa526_context_switch,		/* context_switch	*/
620
621	fa526_setup			/* cpu setup 		*/
622};
623#endif	/* CPU_FA526 || CPU_FA626TE */
624
625#if defined(CPU_ARM1136)
626struct cpu_functions arm1136_cpufuncs = {
627	/* CPU functions */
628
629	cpufunc_id,                     /* id                   */
630	cpufunc_nullop,                 /* cpwait               */
631
632	/* MMU functions */
633
634	cpufunc_control,                /* control              */
635	cpufunc_domains,                /* Domain               */
636	arm11x6_setttb,                 /* Setttb               */
637	cpufunc_faultstatus,            /* Faultstatus          */
638	cpufunc_faultaddress,           /* Faultaddress         */
639
640	/* TLB functions */
641
642	arm11_tlb_flushID,              /* tlb_flushID          */
643	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
644	arm11_tlb_flushI,               /* tlb_flushI           */
645	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
646	arm11_tlb_flushD,               /* tlb_flushD           */
647	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
648
649	/* Cache operations */
650
651	arm11x6_icache_sync_all,        /* icache_sync_all      */
652	arm11x6_icache_sync_range,      /* icache_sync_range    */
653
654	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
655	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
656	armv6_dcache_inv_range,         /* dcache_inv_range     */
657	armv6_dcache_wb_range,          /* dcache_wb_range      */
658
659	armv6_idcache_inv_all,		/* idcache_inv_all	*/
660	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
661	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
662
663	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
664	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
665	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
666	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
667	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
668
669	/* Other functions */
670
671	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
672	arm11_drain_writebuf,           /* drain_writebuf       */
673	cpufunc_nullop,                 /* flush_brnchtgt_C     */
674	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
675
676	arm11_sleep,                  	/* sleep                */
677
678	/* Soft functions */
679
680	cpufunc_null_fixup,             /* dataabt_fixup        */
681	cpufunc_null_fixup,             /* prefetchabt_fixup    */
682
683	arm11_context_switch,           /* context_switch       */
684
685	arm11x6_setup                   /* cpu setup            */
686};
687#endif /* CPU_ARM1136 */
688#if defined(CPU_ARM1176)
689struct cpu_functions arm1176_cpufuncs = {
690	/* CPU functions */
691
692	cpufunc_id,                     /* id                   */
693	cpufunc_nullop,                 /* cpwait               */
694
695	/* MMU functions */
696
697	cpufunc_control,                /* control              */
698	cpufunc_domains,                /* Domain               */
699	arm11x6_setttb,                 /* Setttb               */
700	cpufunc_faultstatus,            /* Faultstatus          */
701	cpufunc_faultaddress,           /* Faultaddress         */
702
703	/* TLB functions */
704
705	arm11_tlb_flushID,              /* tlb_flushID          */
706	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
707	arm11_tlb_flushI,               /* tlb_flushI           */
708	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
709	arm11_tlb_flushD,               /* tlb_flushD           */
710	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
711
712	/* Cache operations */
713
714	arm11x6_icache_sync_all,        /* icache_sync_all      */
715	arm11x6_icache_sync_range,      /* icache_sync_range    */
716
717	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
718	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
719	armv6_dcache_inv_range,         /* dcache_inv_range     */
720	armv6_dcache_wb_range,          /* dcache_wb_range      */
721
722	armv6_idcache_inv_all,		/* idcache_inv_all	*/
723	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
724	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
725
726	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
727	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
728	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
729	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
730	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
731
732	/* Other functions */
733
734	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
735	arm11_drain_writebuf,           /* drain_writebuf       */
736	cpufunc_nullop,                 /* flush_brnchtgt_C     */
737	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
738
739	arm11x6_sleep,                  /* sleep                */
740
741	/* Soft functions */
742
743	cpufunc_null_fixup,             /* dataabt_fixup        */
744	cpufunc_null_fixup,             /* prefetchabt_fixup    */
745
746	arm11_context_switch,           /* context_switch       */
747
748	arm11x6_setup                   /* cpu setup            */
749};
750#endif /*CPU_ARM1176 */
751
752#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
753struct cpu_functions cortexa_cpufuncs = {
754	/* CPU functions */
755
756	cpufunc_id,                     /* id                   */
757	cpufunc_nullop,                 /* cpwait               */
758
759	/* MMU functions */
760
761	cpufunc_control,                /* control              */
762	cpufunc_domains,                /* Domain               */
763	armv7_setttb,                   /* Setttb               */
764	cpufunc_faultstatus,            /* Faultstatus          */
765	cpufunc_faultaddress,           /* Faultaddress         */
766
767	/*
768	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
769	 * whether the hardware implements separate I+D or not, so we use the
770	 * same 'ID' functions for all 3 variations.
771	 */
772
773	armv7_tlb_flushID,              /* tlb_flushID          */
774	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
775	armv7_tlb_flushID,              /* tlb_flushI           */
776	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
777	armv7_tlb_flushID,              /* tlb_flushD           */
778	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
779
780	/* Cache operations */
781
782	armv7_icache_sync_all, 	        /* icache_sync_all      */
783	armv7_icache_sync_range,        /* icache_sync_range    */
784
785	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
786	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
787	armv7_dcache_inv_range,         /* dcache_inv_range     */
788	armv7_dcache_wb_range,          /* dcache_wb_range      */
789
790	armv7_idcache_inv_all,		/* idcache_inv_all	*/
791	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
792	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
793
794	/*
795	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
796	 * L2 cache controller is actually enabled.
797	 */
798	cpufunc_nullop,                 /* l2cache_wbinv_all    */
799	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
800	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
801	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
802	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
803
804	/* Other functions */
805
806	cpufunc_nullop,                 /* flush_prefetchbuf    */
807	armv7_drain_writebuf,           /* drain_writebuf       */
808	cpufunc_nullop,                 /* flush_brnchtgt_C     */
809	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
810
811	armv7_sleep,                    /* sleep                */
812
813	/* Soft functions */
814
815	cpufunc_null_fixup,             /* dataabt_fixup        */
816	cpufunc_null_fixup,             /* prefetchabt_fixup    */
817
818	armv7_context_switch,           /* context_switch       */
819
820	cortexa_setup                     /* cpu setup            */
821};
822#endif /* CPU_CORTEXA */
823
824/*
825 * Global constants also used by locore.s
826 */
827
828struct cpu_functions cpufuncs;
829u_int cputype;
830u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
831
832#if defined(CPU_ARM9) ||	\
833  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
834  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
835  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
836  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
837  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
838  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
839
840static void get_cachetype_cp15(void);
841
842/* Additional cache information local to this file.  Log2 of some of the
843   above numbers.  */
844static int	arm_dcache_l2_nsets;
845static int	arm_dcache_l2_assoc;
846static int	arm_dcache_l2_linesize;
847
848static void
849get_cachetype_cp15()
850{
851	u_int ctype, isize, dsize, cpuid;
852	u_int clevel, csize, i, sel;
853	u_int multiplier;
854	u_char type;
855
856	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
857		: "=r" (ctype));
858
859	cpuid = cpufunc_id();
860	/*
861	 * ...and thus spake the ARM ARM:
862	 *
863	 * If an <opcode2> value corresponding to an unimplemented or
864	 * reserved ID register is encountered, the System Control
865	 * processor returns the value of the main ID register.
866	 */
867	if (ctype == cpuid)
868		goto out;
869
870	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
871		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
872		    : "=r" (clevel));
873		arm_cache_level = clevel;
874		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
875		i = 0;
876		while ((type = (clevel & 0x7)) && i < 7) {
877			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
878			    type == CACHE_SEP_CACHE) {
879				sel = i << 1;
880				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
881				    : : "r" (sel));
882				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
883				    : "=r" (csize));
884				arm_cache_type[sel] = csize;
885				arm_dcache_align = 1 <<
886				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
887				arm_dcache_align_mask = arm_dcache_align - 1;
888			}
889			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
890				sel = (i << 1) | 1;
891				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
892				    : : "r" (sel));
893				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
894				    : "=r" (csize));
895				arm_cache_type[sel] = csize;
896			}
897			i++;
898			clevel >>= 3;
899		}
900	} else {
901		if ((ctype & CPU_CT_S) == 0)
902			arm_pcache_unified = 1;
903
904		/*
905		 * If you want to know how this code works, go read the ARM ARM.
906		 */
907
908		arm_pcache_type = CPU_CT_CTYPE(ctype);
909
910		if (arm_pcache_unified == 0) {
911			isize = CPU_CT_ISIZE(ctype);
912			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
913			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
914			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
915				if (isize & CPU_CT_xSIZE_M)
916					arm_picache_line_size = 0; /* not present */
917				else
918					arm_picache_ways = 1;
919			} else {
920				arm_picache_ways = multiplier <<
921				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
922			}
923			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
924		}
925
926		dsize = CPU_CT_DSIZE(ctype);
927		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
928		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
929		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
930			if (dsize & CPU_CT_xSIZE_M)
931				arm_pdcache_line_size = 0; /* not present */
932			else
933				arm_pdcache_ways = 1;
934		} else {
935			arm_pdcache_ways = multiplier <<
936			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
937		}
938		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
939
940		arm_dcache_align = arm_pdcache_line_size;
941
942		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
943		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
944		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
945		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
946
947	out:
948		arm_dcache_align_mask = arm_dcache_align - 1;
949	}
950}
951#endif /* ARM9 || XSCALE */
952
953/*
954 * Cannot panic here as we may not have a console yet ...
955 */
956
957int
958set_cpufuncs()
959{
960	cputype = cpufunc_id();
961	cputype &= CPU_ID_CPU_MASK;
962
963	/*
964	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
965	 * CPU type where we want to use it by default, then we set it.
966	 */
967
968#ifdef CPU_ARM9
969	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
970	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
971	    (cputype & 0x0000f000) == 0x00009000) {
972		cpufuncs = arm9_cpufuncs;
973		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
974		get_cachetype_cp15();
975		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
976		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
977		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
978		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
979		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
980#ifdef ARM9_CACHE_WRITE_THROUGH
981		pmap_pte_init_arm9();
982#else
983		pmap_pte_init_generic();
984#endif
985		goto out;
986	}
987#endif /* CPU_ARM9 */
988#if defined(CPU_ARM9E) || defined(CPU_ARM10)
989	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
990	    cputype == CPU_ID_MV88FR571_41) {
991		uint32_t sheeva_ctrl;
992
993		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
994		    MV_L2_ENABLE);
995		/*
996		 * Workaround for Marvell MV78100 CPU: Cache prefetch
997		 * mechanism may affect the cache coherency validity,
998		 * so it needs to be disabled.
999		 *
1000		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1001		 * L2 Prefetching Mechanism) for details.
1002		 */
1003		if (cputype == CPU_ID_MV88FR571_VD ||
1004		    cputype == CPU_ID_MV88FR571_41)
1005			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1006
1007		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1008
1009		cpufuncs = sheeva_cpufuncs;
1010		get_cachetype_cp15();
1011		pmap_pte_init_generic();
1012		goto out;
1013	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1014		cpufuncs = armv5_ec_cpufuncs;
1015		get_cachetype_cp15();
1016		pmap_pte_init_generic();
1017		goto out;
1018	}
1019#endif /* CPU_ARM9E || CPU_ARM10 */
1020#ifdef CPU_ARM10
1021	if (/* cputype == CPU_ID_ARM1020T || */
1022	    cputype == CPU_ID_ARM1020E) {
1023		/*
1024		 * Select write-through cacheing (this isn't really an
1025		 * option on ARM1020T).
1026		 */
1027		cpufuncs = arm10_cpufuncs;
1028		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1029		get_cachetype_cp15();
1030		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1031		arm10_dcache_sets_max =
1032		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1033		    arm10_dcache_sets_inc;
1034		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1035		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1036		pmap_pte_init_generic();
1037		goto out;
1038	}
1039#endif /* CPU_ARM10 */
1040#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1041	if (cputype == CPU_ID_ARM1136JS
1042	    || cputype == CPU_ID_ARM1136JSR1
1043	    || cputype == CPU_ID_ARM1176JZS) {
1044#ifdef CPU_ARM1136
1045		if (cputype == CPU_ID_ARM1136JS
1046		    || cputype == CPU_ID_ARM1136JSR1)
1047			cpufuncs = arm1136_cpufuncs;
1048#endif
1049#ifdef CPU_ARM1176
1050		if (cputype == CPU_ID_ARM1176JZS)
1051			cpufuncs = arm1176_cpufuncs;
1052#endif
1053		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1054		get_cachetype_cp15();
1055
1056		pmap_pte_init_mmu_v6();
1057
1058		goto out;
1059	}
1060#endif /* CPU_ARM1136 || CPU_ARM1176 */
1061#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1062	if (cputype == CPU_ID_CORTEXA5 ||
1063	    cputype == CPU_ID_CORTEXA7 ||
1064	    cputype == CPU_ID_CORTEXA8R1 ||
1065	    cputype == CPU_ID_CORTEXA8R2 ||
1066	    cputype == CPU_ID_CORTEXA8R3 ||
1067	    cputype == CPU_ID_CORTEXA9R1 ||
1068	    cputype == CPU_ID_CORTEXA9R2 ||
1069	    cputype == CPU_ID_CORTEXA9R3 ||
1070	    cputype == CPU_ID_CORTEXA12R0 ||
1071	    cputype == CPU_ID_CORTEXA15R0 ||
1072	    cputype == CPU_ID_CORTEXA15R1 ||
1073	    cputype == CPU_ID_CORTEXA15R2 ||
1074	    cputype == CPU_ID_CORTEXA15R3 ||
1075	    cputype == CPU_ID_KRAIT ) {
1076		cpufuncs = cortexa_cpufuncs;
1077		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1078		get_cachetype_cp15();
1079
1080		pmap_pte_init_mmu_v6();
1081		/* Use powersave on this CPU. */
1082		cpu_do_powersave = 1;
1083		goto out;
1084	}
1085#endif /* CPU_CORTEXA */
1086
1087#if defined(CPU_MV_PJ4B)
1088	if (cputype == CPU_ID_MV88SV581X_V7 ||
1089	    cputype == CPU_ID_MV88SV584X_V7 ||
1090	    cputype == CPU_ID_ARM_88SV581X_V7) {
1091		cpufuncs = pj4bv7_cpufuncs;
1092		get_cachetype_cp15();
1093		pmap_pte_init_mmu_v6();
1094		goto out;
1095	}
1096#endif /* CPU_MV_PJ4B */
1097
1098#if defined(CPU_FA526) || defined(CPU_FA626TE)
1099	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1100		cpufuncs = fa526_cpufuncs;
1101		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1102		get_cachetype_cp15();
1103		pmap_pte_init_generic();
1104
1105		/* Use powersave on this CPU. */
1106		cpu_do_powersave = 1;
1107
1108		goto out;
1109	}
1110#endif	/* CPU_FA526 || CPU_FA626TE */
1111
1112#ifdef CPU_XSCALE_80200
1113	if (cputype == CPU_ID_80200) {
1114		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1115
1116		i80200_icu_init();
1117
1118#if defined(XSCALE_CCLKCFG)
1119		/*
1120		 * Crank CCLKCFG to maximum legal value.
1121		 */
1122		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1123			:
1124			: "r" (XSCALE_CCLKCFG));
1125#endif
1126
1127		/*
1128		 * XXX Disable ECC in the Bus Controller Unit; we
1129		 * don't really support it, yet.  Clear any pending
1130		 * error indications.
1131		 */
1132		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1133			:
1134			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1135
1136		cpufuncs = xscale_cpufuncs;
1137		/*
1138		 * i80200 errata: Step-A0 and A1 have a bug where
1139		 * D$ dirty bits are not cleared on "invalidate by
1140		 * address".
1141		 *
1142		 * Workaround: Clean cache line before invalidating.
1143		 */
1144		if (rev == 0 || rev == 1)
1145			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1146
1147		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1148		get_cachetype_cp15();
1149		pmap_pte_init_xscale();
1150		goto out;
1151	}
1152#endif /* CPU_XSCALE_80200 */
1153#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1154	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1155	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1156	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1157		cpufuncs = xscale_cpufuncs;
1158		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1159		get_cachetype_cp15();
1160		pmap_pte_init_xscale();
1161		goto out;
1162	}
1163#endif /* CPU_XSCALE_80321 */
1164
1165#if defined(CPU_XSCALE_81342)
1166	if (cputype == CPU_ID_81342) {
1167		cpufuncs = xscalec3_cpufuncs;
1168		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1169		get_cachetype_cp15();
1170		pmap_pte_init_xscale();
1171		goto out;
1172	}
1173#endif /* CPU_XSCALE_81342 */
1174#ifdef CPU_XSCALE_PXA2X0
1175	/* ignore core revision to test PXA2xx CPUs */
1176	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1177	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1178	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1179
1180		cpufuncs = xscale_cpufuncs;
1181		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1182		get_cachetype_cp15();
1183		pmap_pte_init_xscale();
1184
1185		/* Use powersave on this CPU. */
1186		cpu_do_powersave = 1;
1187
1188		goto out;
1189	}
1190#endif /* CPU_XSCALE_PXA2X0 */
1191#ifdef CPU_XSCALE_IXP425
1192	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1193            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1194
1195		cpufuncs = xscale_cpufuncs;
1196		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1197		get_cachetype_cp15();
1198		pmap_pte_init_xscale();
1199
1200		goto out;
1201	}
1202#endif /* CPU_XSCALE_IXP425 */
1203	/*
1204	 * Bzzzz. And the answer was ...
1205	 */
1206	panic("No support for this CPU type (%08x) in kernel", cputype);
1207	return(ARCHITECTURE_NOT_PRESENT);
1208out:
1209	uma_set_align(arm_dcache_align_mask);
1210	return (0);
1211}
1212
1213/*
1214 * Fixup routines for data and prefetch aborts.
1215 *
1216 * Several compile time symbols are used
1217 *
1218 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1219 * correction of registers after a fault.
1220 */
1221
1222
1223/*
1224 * Null abort fixup routine.
1225 * For use when no fixup is required.
1226 */
1227int
1228cpufunc_null_fixup(arg)
1229	void *arg;
1230{
1231	return(ABORT_FIXUP_OK);
1232}
1233
1234/*
1235 * CPU Setup code
1236 */
1237
1238#if defined (CPU_ARM9) || \
1239  defined(CPU_ARM9E) || \
1240  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1241  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1242  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1243  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1244  defined(CPU_FA526) || defined(CPU_FA626TE)
1245
1246#define IGN	0
1247#define OR	1
1248#define BIC	2
1249
1250struct cpu_option {
1251	char	*co_name;
1252	int	co_falseop;
1253	int	co_trueop;
1254	int	co_value;
1255};
1256
1257static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1258
1259static u_int
1260parse_cpu_options(args, optlist, cpuctrl)
1261	char *args;
1262	struct cpu_option *optlist;
1263	u_int cpuctrl;
1264{
1265	int integer;
1266
1267	if (args == NULL)
1268		return(cpuctrl);
1269
1270	while (optlist->co_name) {
1271		if (get_bootconf_option(args, optlist->co_name,
1272		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1273			if (integer) {
1274				if (optlist->co_trueop == OR)
1275					cpuctrl |= optlist->co_value;
1276				else if (optlist->co_trueop == BIC)
1277					cpuctrl &= ~optlist->co_value;
1278			} else {
1279				if (optlist->co_falseop == OR)
1280					cpuctrl |= optlist->co_value;
1281				else if (optlist->co_falseop == BIC)
1282					cpuctrl &= ~optlist->co_value;
1283			}
1284		}
1285		++optlist;
1286	}
1287	return(cpuctrl);
1288}
1289#endif /* CPU_ARM9 || XSCALE*/
1290
1291#ifdef CPU_ARM9
1292struct cpu_option arm9_options[] = {
1293	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1294	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1295	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1296	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1297	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1298	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1299	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1300	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1301	{ NULL,			IGN, IGN, 0 }
1302};
1303
1304void
1305arm9_setup(args)
1306	char *args;
1307{
1308	int cpuctrl, cpuctrlmask;
1309
1310	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1311	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1312	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1313	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1314	    CPU_CONTROL_ROUNDROBIN;
1315	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1316		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1317		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1318		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1319		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1320		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1321		 | CPU_CONTROL_ROUNDROBIN;
1322
1323#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1324	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1325#endif
1326
1327	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1328
1329#ifdef __ARMEB__
1330	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1331#endif
1332	if (vector_page == ARM_VECTORS_HIGH)
1333		cpuctrl |= CPU_CONTROL_VECRELOC;
1334
1335	/* Clear out the cache */
1336	cpu_idcache_wbinv_all();
1337
1338	/* Set the control register */
1339	cpu_control(cpuctrlmask, cpuctrl);
1340	ctrl = cpuctrl;
1341
1342}
1343#endif	/* CPU_ARM9 */
1344
1345#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1346struct cpu_option arm10_options[] = {
1347	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1348	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1349	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1350	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1351	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1352	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1353	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1354	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1355	{ NULL,			IGN, IGN, 0 }
1356};
1357
1358void
1359arm10_setup(args)
1360	char *args;
1361{
1362	int cpuctrl, cpuctrlmask;
1363
1364	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1365	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1366	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1367	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1368	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1369	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1370	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1371	    | CPU_CONTROL_BPRD_ENABLE
1372	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1373
1374#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1375	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1376#endif
1377
1378	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1379
1380#ifdef __ARMEB__
1381	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1382#endif
1383
1384	/* Clear out the cache */
1385	cpu_idcache_wbinv_all();
1386
1387	/* Now really make sure they are clean.  */
1388	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1389
1390	if (vector_page == ARM_VECTORS_HIGH)
1391		cpuctrl |= CPU_CONTROL_VECRELOC;
1392
1393	/* Set the control register */
1394	ctrl = cpuctrl;
1395	cpu_control(0xffffffff, cpuctrl);
1396
1397	/* And again. */
1398	cpu_idcache_wbinv_all();
1399}
1400#endif	/* CPU_ARM9E || CPU_ARM10 */
1401
1402#if defined(CPU_ARM1136) || defined(CPU_ARM1176) \
1403 || defined(CPU_MV_PJ4B) \
1404 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1405static __inline void
1406cpu_scc_setup_ccnt(void)
1407{
1408/* This is how you give userland access to the CCNT and PMCn
1409 * registers.
1410 * BEWARE! This gives write access also, which may not be what
1411 * you want!
1412 */
1413#ifdef _PMC_USER_READ_WRITE_
1414#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1415	/* Use the Secure User and Non-secure Access Validation Control Register
1416	 * to allow userland access
1417	 */
1418	__asm volatile ("mcr	p15, 0, %0, c15, c9, 0\n\t"
1419			:
1420			: "r"(0x00000001));
1421#else
1422	/* Set PMUSERENR[0] to allow userland access */
1423	__asm volatile ("mcr	p15, 0, %0, c9, c14, 0\n\t"
1424			:
1425			: "r"(0x00000001));
1426#endif
1427#endif
1428#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1429	/* Set PMCR[2,0] to enable counters and reset CCNT */
1430	__asm volatile ("mcr	p15, 0, %0, c15, c12, 0\n\t"
1431			:
1432			: "r"(0x00000005));
1433#else
1434	/* Set up the PMCCNTR register as a cyclecounter:
1435	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1436	 * Set PMCR[2,0] to enable counters and reset CCNT
1437	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1438	__asm volatile ("mcr	p15, 0, %0, c9, c14, 2\n\t"
1439			"mcr	p15, 0, %1, c9, c12, 0\n\t"
1440			"mcr	p15, 0, %2, c9, c12, 1\n\t"
1441			:
1442			: "r"(0xFFFFFFFF),
1443			  "r"(0x00000005),
1444			  "r"(0x80000000));
1445#endif
1446}
1447#endif
1448
1449#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1450struct cpu_option arm11_options[] = {
1451	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1452	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1453	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1454	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1455	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1456	{ NULL,			IGN, IGN, 0 }
1457};
1458
1459void
1460arm11x6_setup(char *args)
1461{
1462	int cpuctrl, cpuctrl_wax;
1463	uint32_t auxctrl, auxctrl_wax;
1464	uint32_t tmp, tmp2;
1465	uint32_t sbz=0;
1466	uint32_t cpuid;
1467
1468	cpuid = cpufunc_id();
1469
1470	cpuctrl =
1471		CPU_CONTROL_MMU_ENABLE  |
1472		CPU_CONTROL_DC_ENABLE   |
1473		CPU_CONTROL_WBUF_ENABLE |
1474		CPU_CONTROL_32BP_ENABLE |
1475		CPU_CONTROL_32BD_ENABLE |
1476		CPU_CONTROL_LABT_ENABLE |
1477		CPU_CONTROL_SYST_ENABLE |
1478		CPU_CONTROL_IC_ENABLE;
1479
1480	/*
1481	 * "write as existing" bits
1482	 * inverse of this is mask
1483	 */
1484	cpuctrl_wax =
1485		(3 << 30) | /* SBZ */
1486		(1 << 29) | /* FA */
1487		(1 << 28) | /* TR */
1488		(3 << 26) | /* SBZ */
1489		(3 << 19) | /* SBZ */
1490		(1 << 17);  /* SBZ */
1491
1492	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1493	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1494
1495	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1496
1497#ifdef __ARMEB__
1498	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1499#endif
1500
1501	if (vector_page == ARM_VECTORS_HIGH)
1502		cpuctrl |= CPU_CONTROL_VECRELOC;
1503
1504	auxctrl = 0;
1505	auxctrl_wax = ~0;
1506	/*
1507	 * This options enables the workaround for the 364296 ARM1136
1508	 * r0pX errata (possible cache data corruption with
1509	 * hit-under-miss enabled). It sets the undocumented bit 31 in
1510	 * the auxiliary control register and the FI bit in the control
1511	 * register, thus disabling hit-under-miss without putting the
1512	 * processor into full low interrupt latency mode. ARM11MPCore
1513	 * is not affected.
1514	 */
1515	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1516		cpuctrl |= CPU_CONTROL_FI_ENABLE;
1517		auxctrl = ARM1136_AUXCTL_PFI;
1518		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1519	}
1520
1521	/*
1522	 * Enable an errata workaround
1523	 */
1524	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1525		auxctrl = ARM1176_AUXCTL_PHD;
1526		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1527	}
1528
1529	/* Clear out the cache */
1530	cpu_idcache_wbinv_all();
1531
1532	/* Now really make sure they are clean.  */
1533	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1534
1535	/* Allow detection code to find the VFP if it's fitted.  */
1536	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1537
1538	/* Set the control register */
1539	ctrl = cpuctrl;
1540	cpu_control(~cpuctrl_wax, cpuctrl);
1541
1542	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1543			"and	%1, %0, %2\n\t"
1544			"orr	%1, %1, %3\n\t"
1545			"teq	%0, %1\n\t"
1546			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1547			: "=r"(tmp), "=r"(tmp2) :
1548			  "r"(auxctrl_wax), "r"(auxctrl));
1549
1550	/* And again. */
1551	cpu_idcache_wbinv_all();
1552
1553	cpu_scc_setup_ccnt();
1554}
1555#endif  /* CPU_ARM1136 || CPU_ARM1176 */
1556
1557#ifdef CPU_MV_PJ4B
1558void
1559pj4bv7_setup(args)
1560	char *args;
1561{
1562	int cpuctrl;
1563
1564	pj4b_config();
1565
1566	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1567#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1568	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1569#endif
1570	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1571	cpuctrl |= (0xf << 3);
1572	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1573	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1574	if (vector_page == ARM_VECTORS_HIGH)
1575		cpuctrl |= CPU_CONTROL_VECRELOC;
1576	cpuctrl |= (0x5 << 16) | (1 < 22);
1577	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1578
1579	/* Clear out the cache */
1580	cpu_idcache_wbinv_all();
1581
1582	/* Set the control register */
1583	ctrl = cpuctrl;
1584	cpu_control(0xFFFFFFFF, cpuctrl);
1585
1586	/* And again. */
1587	cpu_idcache_wbinv_all();
1588
1589	cpu_scc_setup_ccnt();
1590}
1591#endif /* CPU_MV_PJ4B */
1592
1593#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1594
1595void
1596cortexa_setup(char *args)
1597{
1598	int cpuctrl, cpuctrlmask;
1599
1600	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1601	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1602	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1603	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1604	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1605	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1606
1607	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1608	    CPU_CONTROL_IC_ENABLE |
1609	    CPU_CONTROL_DC_ENABLE |
1610	    CPU_CONTROL_BPRD_ENABLE;
1611
1612#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1613	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1614#endif
1615
1616	/* Switch to big endian */
1617#ifdef __ARMEB__
1618	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1619#endif
1620
1621	/* Check if the vector page is at the high address (0xffff0000) */
1622	if (vector_page == ARM_VECTORS_HIGH)
1623		cpuctrl |= CPU_CONTROL_VECRELOC;
1624
1625	/* Clear out the cache */
1626	cpu_idcache_wbinv_all();
1627
1628	/* Set the control register */
1629	ctrl = cpuctrl;
1630	cpu_control(cpuctrlmask, cpuctrl);
1631
1632	/* And again. */
1633	cpu_idcache_wbinv_all();
1634#ifdef SMP
1635	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1636#endif
1637
1638	cpu_scc_setup_ccnt();
1639}
1640#endif  /* CPU_CORTEXA */
1641
1642#if defined(CPU_FA526) || defined(CPU_FA626TE)
1643struct cpu_option fa526_options[] = {
1644#ifdef COMPAT_12
1645	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1646					   CPU_CONTROL_DC_ENABLE) },
1647	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1648#endif	/* COMPAT_12 */
1649	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1650					   CPU_CONTROL_DC_ENABLE) },
1651	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1652					   CPU_CONTROL_DC_ENABLE) },
1653	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1654	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1655	{ NULL,			IGN, IGN, 0 }
1656};
1657
1658void
1659fa526_setup(char *args)
1660{
1661	int cpuctrl, cpuctrlmask;
1662
1663	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1664		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1665		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1666		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1667		| CPU_CONTROL_BPRD_ENABLE;
1668	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1669		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1670		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1671		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1672		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1673		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1674		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1675
1676#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1677	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1678#endif
1679
1680	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1681
1682#ifdef __ARMEB__
1683	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1684#endif
1685
1686	if (vector_page == ARM_VECTORS_HIGH)
1687		cpuctrl |= CPU_CONTROL_VECRELOC;
1688
1689	/* Clear out the cache */
1690	cpu_idcache_wbinv_all();
1691
1692	/* Set the control register */
1693	ctrl = cpuctrl;
1694	cpu_control(0xffffffff, cpuctrl);
1695}
1696#endif	/* CPU_FA526 || CPU_FA626TE */
1697
1698#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1699  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1700  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1701struct cpu_option xscale_options[] = {
1702#ifdef COMPAT_12
1703	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1704	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1705#endif	/* COMPAT_12 */
1706	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1707	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1708	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1709	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1710	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1711	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1712	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1713	{ NULL,			IGN, IGN, 0 }
1714};
1715
1716void
1717xscale_setup(args)
1718	char *args;
1719{
1720	uint32_t auxctl;
1721	int cpuctrl, cpuctrlmask;
1722
1723	/*
1724	 * The XScale Write Buffer is always enabled.  Our option
1725	 * is to enable/disable coalescing.  Note that bits 6:3
1726	 * must always be enabled.
1727	 */
1728
1729	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1730		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1731		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1732		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1733		 | CPU_CONTROL_BPRD_ENABLE;
1734	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1735		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1736		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1737		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1738		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1739		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1740		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1741		 CPU_CONTROL_L2_ENABLE;
1742
1743#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1744	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1745#endif
1746
1747	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1748
1749#ifdef __ARMEB__
1750	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1751#endif
1752
1753	if (vector_page == ARM_VECTORS_HIGH)
1754		cpuctrl |= CPU_CONTROL_VECRELOC;
1755#ifdef CPU_XSCALE_CORE3
1756	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1757#endif
1758
1759	/* Clear out the cache */
1760	cpu_idcache_wbinv_all();
1761
1762	/*
1763	 * Set the control register.  Note that bits 6:3 must always
1764	 * be set to 1.
1765	 */
1766	ctrl = cpuctrl;
1767/*	cpu_control(cpuctrlmask, cpuctrl);*/
1768	cpu_control(0xffffffff, cpuctrl);
1769
1770	/* Make sure write coalescing is turned on */
1771	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1772		: "=r" (auxctl));
1773#ifdef XSCALE_NO_COALESCE_WRITES
1774	auxctl |= XSCALE_AUXCTL_K;
1775#else
1776	auxctl &= ~XSCALE_AUXCTL_K;
1777#endif
1778#ifdef CPU_XSCALE_CORE3
1779	auxctl |= XSCALE_AUXCTL_LLR;
1780	auxctl |= XSCALE_AUXCTL_MD_MASK;
1781#endif
1782	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1783		: : "r" (auxctl));
1784}
1785#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1786	   CPU_XSCALE_80219 */
1787