cpufunc.c revision 197733
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 197733 2009-10-03 13:59:15Z rpaulo $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77#if defined(CPU_XSCALE_81342)
78#include <arm/xscale/i8134x/i81342reg.h>
79#endif
80
81#ifdef CPU_XSCALE_IXP425
82#include <arm/xscale/ixp425/ixp425reg.h>
83#include <arm/xscale/ixp425/ixp425var.h>
84#endif
85
86/* PRIMARY CACHE VARIABLES */
87int	arm_picache_size;
88int	arm_picache_line_size;
89int	arm_picache_ways;
90
91int	arm_pdcache_size;	/* and unified */
92int	arm_pdcache_line_size;
93int	arm_pdcache_ways;
94
95int	arm_pcache_type;
96int	arm_pcache_unified;
97
98int	arm_dcache_align;
99int	arm_dcache_align_mask;
100
101/* 1 == use cpu_sleep(), 0 == don't */
102int cpu_do_powersave;
103int ctrl;
104
105#ifdef CPU_ARM7TDMI
106struct cpu_functions arm7tdmi_cpufuncs = {
107	/* CPU functions */
108
109	cpufunc_id,			/* id			*/
110	cpufunc_nullop,			/* cpwait		*/
111
112	/* MMU functions */
113
114	cpufunc_control,		/* control		*/
115	cpufunc_domains,		/* domain		*/
116	arm7tdmi_setttb,		/* setttb		*/
117	cpufunc_faultstatus,		/* faultstatus		*/
118	cpufunc_faultaddress,		/* faultaddress		*/
119
120	/* TLB functions */
121
122	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
123	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
124	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
125	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
126	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
127	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
128
129	/* Cache operations */
130
131	cpufunc_nullop,			/* icache_sync_all	*/
132	(void *)cpufunc_nullop,		/* icache_sync_range	*/
133
134	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
135	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
136	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
137	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
138
139	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
140	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
141	cpufunc_nullop,			/* l2cache_wbinv_all	*/
142	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
143	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
144	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
145
146	/* Other functions */
147
148	cpufunc_nullop,			/* flush_prefetchbuf	*/
149	cpufunc_nullop,			/* drain_writebuf	*/
150	cpufunc_nullop,			/* flush_brnchtgt_C	*/
151	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
152
153	(void *)cpufunc_nullop,		/* sleep		*/
154
155	/* Soft functions */
156
157	late_abort_fixup,		/* dataabt_fixup	*/
158	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
159
160	arm7tdmi_context_switch,	/* context_switch	*/
161
162	arm7tdmi_setup			/* cpu setup		*/
163
164};
165#endif	/* CPU_ARM7TDMI */
166
167#ifdef CPU_ARM8
168struct cpu_functions arm8_cpufuncs = {
169	/* CPU functions */
170
171	cpufunc_id,			/* id			*/
172	cpufunc_nullop,			/* cpwait		*/
173
174	/* MMU functions */
175
176	cpufunc_control,		/* control		*/
177	cpufunc_domains,		/* domain		*/
178	arm8_setttb,			/* setttb		*/
179	cpufunc_faultstatus,		/* faultstatus		*/
180	cpufunc_faultaddress,		/* faultaddress		*/
181
182	/* TLB functions */
183
184	arm8_tlb_flushID,		/* tlb_flushID		*/
185	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
186	arm8_tlb_flushID,		/* tlb_flushI		*/
187	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
188	arm8_tlb_flushID,		/* tlb_flushD		*/
189	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
190
191	/* Cache operations */
192
193	cpufunc_nullop,			/* icache_sync_all	*/
194	(void *)cpufunc_nullop,		/* icache_sync_range	*/
195
196	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
197	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
198/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
199	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
200
201	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
202	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
203	cpufunc_nullop,			/* l2cache_wbinv_all	*/
204	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
205	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
206	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
207
208	/* Other functions */
209
210	cpufunc_nullop,			/* flush_prefetchbuf	*/
211	cpufunc_nullop,			/* drain_writebuf	*/
212	cpufunc_nullop,			/* flush_brnchtgt_C	*/
213	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
214
215	(void *)cpufunc_nullop,		/* sleep		*/
216
217	/* Soft functions */
218
219	cpufunc_null_fixup,		/* dataabt_fixup	*/
220	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
221
222	arm8_context_switch,		/* context_switch	*/
223
224	arm8_setup			/* cpu setup		*/
225};
226#endif	/* CPU_ARM8 */
227
228#ifdef CPU_ARM9
229struct cpu_functions arm9_cpufuncs = {
230	/* CPU functions */
231
232	cpufunc_id,			/* id			*/
233	cpufunc_nullop,			/* cpwait		*/
234
235	/* MMU functions */
236
237	cpufunc_control,		/* control		*/
238	cpufunc_domains,		/* Domain		*/
239	arm9_setttb,			/* Setttb		*/
240	cpufunc_faultstatus,		/* Faultstatus		*/
241	cpufunc_faultaddress,		/* Faultaddress		*/
242
243	/* TLB functions */
244
245	armv4_tlb_flushID,		/* tlb_flushID		*/
246	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
247	armv4_tlb_flushI,		/* tlb_flushI		*/
248	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
249	armv4_tlb_flushD,		/* tlb_flushD		*/
250	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
251
252	/* Cache operations */
253
254	arm9_icache_sync_all,		/* icache_sync_all	*/
255	arm9_icache_sync_range,		/* icache_sync_range	*/
256
257	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
258	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
259	arm9_dcache_inv_range,		/* dcache_inv_range	*/
260	arm9_dcache_wb_range,		/* dcache_wb_range	*/
261
262	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
263	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
264	cpufunc_nullop,			/* l2cache_wbinv_all	*/
265	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
266	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
267	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
268
269	/* Other functions */
270
271	cpufunc_nullop,			/* flush_prefetchbuf	*/
272	armv4_drain_writebuf,		/* drain_writebuf	*/
273	cpufunc_nullop,			/* flush_brnchtgt_C	*/
274	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
275
276	(void *)cpufunc_nullop,		/* sleep		*/
277
278	/* Soft functions */
279
280	cpufunc_null_fixup,		/* dataabt_fixup	*/
281	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
282
283	arm9_context_switch,		/* context_switch	*/
284
285	arm9_setup			/* cpu setup		*/
286
287};
288#endif /* CPU_ARM9 */
289
290#if defined(CPU_ARM9E) || defined(CPU_ARM10)
291struct cpu_functions armv5_ec_cpufuncs = {
292	/* CPU functions */
293
294	cpufunc_id,			/* id			*/
295	cpufunc_nullop,			/* cpwait		*/
296
297	/* MMU functions */
298
299	cpufunc_control,		/* control		*/
300	cpufunc_domains,		/* Domain		*/
301	armv5_ec_setttb,		/* Setttb		*/
302	cpufunc_faultstatus,		/* Faultstatus		*/
303	cpufunc_faultaddress,		/* Faultaddress		*/
304
305	/* TLB functions */
306
307	armv4_tlb_flushID,		/* tlb_flushID		*/
308	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
309	armv4_tlb_flushI,		/* tlb_flushI		*/
310	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
311	armv4_tlb_flushD,		/* tlb_flushD		*/
312	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
313
314	/* Cache operations */
315
316	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
317	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
318
319	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
320	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
321	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
322	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
323
324	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
325	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
326
327	cpufunc_nullop,                 /* l2cache_wbinv_all    */
328	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
329      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
330	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
331
332	/* Other functions */
333
334	cpufunc_nullop,			/* flush_prefetchbuf	*/
335	armv4_drain_writebuf,		/* drain_writebuf	*/
336	cpufunc_nullop,			/* flush_brnchtgt_C	*/
337	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
338
339	(void *)cpufunc_nullop,		/* sleep		*/
340
341	/* Soft functions */
342
343	cpufunc_null_fixup,		/* dataabt_fixup	*/
344	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
345
346	arm10_context_switch,		/* context_switch	*/
347
348	arm10_setup			/* cpu setup		*/
349
350};
351
352struct cpu_functions sheeva_cpufuncs = {
353	/* CPU functions */
354
355	cpufunc_id,			/* id			*/
356	cpufunc_nullop,			/* cpwait		*/
357
358	/* MMU functions */
359
360	cpufunc_control,		/* control		*/
361	cpufunc_domains,		/* Domain		*/
362	sheeva_setttb,			/* Setttb		*/
363	cpufunc_faultstatus,		/* Faultstatus		*/
364	cpufunc_faultaddress,		/* Faultaddress		*/
365
366	/* TLB functions */
367
368	armv4_tlb_flushID,		/* tlb_flushID		*/
369	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
370	armv4_tlb_flushI,		/* tlb_flushI		*/
371	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
372	armv4_tlb_flushD,		/* tlb_flushD		*/
373	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
374
375	/* Cache operations */
376
377	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
378	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
379
380	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
381	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
382	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
383	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
384
385	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
386	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
387
388	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
389	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
390	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
391	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
392
393	/* Other functions */
394
395	cpufunc_nullop,			/* flush_prefetchbuf	*/
396	armv4_drain_writebuf,		/* drain_writebuf	*/
397	cpufunc_nullop,			/* flush_brnchtgt_C	*/
398	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
399
400	(void *)cpufunc_nullop,		/* sleep		*/
401
402	/* Soft functions */
403
404	cpufunc_null_fixup,		/* dataabt_fixup	*/
405	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
406
407	arm10_context_switch,		/* context_switch	*/
408
409	arm10_setup			/* cpu setup		*/
410};
411#endif /* CPU_ARM9E || CPU_ARM10 */
412
413#ifdef CPU_ARM10
414struct cpu_functions arm10_cpufuncs = {
415	/* CPU functions */
416
417	cpufunc_id,			/* id			*/
418	cpufunc_nullop,			/* cpwait		*/
419
420	/* MMU functions */
421
422	cpufunc_control,		/* control		*/
423	cpufunc_domains,		/* Domain		*/
424	arm10_setttb,			/* Setttb		*/
425	cpufunc_faultstatus,		/* Faultstatus		*/
426	cpufunc_faultaddress,		/* Faultaddress		*/
427
428	/* TLB functions */
429
430	armv4_tlb_flushID,		/* tlb_flushID		*/
431	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
432	armv4_tlb_flushI,		/* tlb_flushI		*/
433	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
434	armv4_tlb_flushD,		/* tlb_flushD		*/
435	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
436
437	/* Cache operations */
438
439	arm10_icache_sync_all,		/* icache_sync_all	*/
440	arm10_icache_sync_range,	/* icache_sync_range	*/
441
442	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
443	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
444	arm10_dcache_inv_range,		/* dcache_inv_range	*/
445	arm10_dcache_wb_range,		/* dcache_wb_range	*/
446
447	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
448	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
449	cpufunc_nullop,			/* l2cache_wbinv_all	*/
450	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
451	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
452	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
453
454	/* Other functions */
455
456	cpufunc_nullop,			/* flush_prefetchbuf	*/
457	armv4_drain_writebuf,		/* drain_writebuf	*/
458	cpufunc_nullop,			/* flush_brnchtgt_C	*/
459	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
460
461	(void *)cpufunc_nullop,		/* sleep		*/
462
463	/* Soft functions */
464
465	cpufunc_null_fixup,		/* dataabt_fixup	*/
466	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
467
468	arm10_context_switch,		/* context_switch	*/
469
470	arm10_setup			/* cpu setup		*/
471
472};
473#endif /* CPU_ARM10 */
474
475#ifdef CPU_SA110
476struct cpu_functions sa110_cpufuncs = {
477	/* CPU functions */
478
479	cpufunc_id,			/* id			*/
480	cpufunc_nullop,			/* cpwait		*/
481
482	/* MMU functions */
483
484	cpufunc_control,		/* control		*/
485	cpufunc_domains,		/* domain		*/
486	sa1_setttb,			/* setttb		*/
487	cpufunc_faultstatus,		/* faultstatus		*/
488	cpufunc_faultaddress,		/* faultaddress		*/
489
490	/* TLB functions */
491
492	armv4_tlb_flushID,		/* tlb_flushID		*/
493	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
494	armv4_tlb_flushI,		/* tlb_flushI		*/
495	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
496	armv4_tlb_flushD,		/* tlb_flushD		*/
497	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
498
499	/* Cache operations */
500
501	sa1_cache_syncI,		/* icache_sync_all	*/
502	sa1_cache_syncI_rng,		/* icache_sync_range	*/
503
504	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
505	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
506/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
507	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
508
509	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
510	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
511	cpufunc_nullop,			/* l2cache_wbinv_all	*/
512	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
513	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
514	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
515
516	/* Other functions */
517
518	cpufunc_nullop,			/* flush_prefetchbuf	*/
519	armv4_drain_writebuf,		/* drain_writebuf	*/
520	cpufunc_nullop,			/* flush_brnchtgt_C	*/
521	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
522
523	(void *)cpufunc_nullop,		/* sleep		*/
524
525	/* Soft functions */
526
527	cpufunc_null_fixup,		/* dataabt_fixup	*/
528	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
529
530	sa110_context_switch,		/* context_switch	*/
531
532	sa110_setup			/* cpu setup		*/
533};
534#endif	/* CPU_SA110 */
535
536#if defined(CPU_SA1100) || defined(CPU_SA1110)
537struct cpu_functions sa11x0_cpufuncs = {
538	/* CPU functions */
539
540	cpufunc_id,			/* id			*/
541	cpufunc_nullop,			/* cpwait		*/
542
543	/* MMU functions */
544
545	cpufunc_control,		/* control		*/
546	cpufunc_domains,		/* domain		*/
547	sa1_setttb,			/* setttb		*/
548	cpufunc_faultstatus,		/* faultstatus		*/
549	cpufunc_faultaddress,		/* faultaddress		*/
550
551	/* TLB functions */
552
553	armv4_tlb_flushID,		/* tlb_flushID		*/
554	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
555	armv4_tlb_flushI,		/* tlb_flushI		*/
556	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
557	armv4_tlb_flushD,		/* tlb_flushD		*/
558	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
559
560	/* Cache operations */
561
562	sa1_cache_syncI,		/* icache_sync_all	*/
563	sa1_cache_syncI_rng,		/* icache_sync_range	*/
564
565	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
566	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
567/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
568	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
569
570	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
571	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
572	cpufunc_nullop,			/* l2cache_wbinv_all	*/
573	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
574	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
575	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
576
577	/* Other functions */
578
579	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
580	armv4_drain_writebuf,		/* drain_writebuf	*/
581	cpufunc_nullop,			/* flush_brnchtgt_C	*/
582	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
583
584	sa11x0_cpu_sleep,		/* sleep		*/
585
586	/* Soft functions */
587
588	cpufunc_null_fixup,		/* dataabt_fixup	*/
589	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
590
591	sa11x0_context_switch,		/* context_switch	*/
592
593	sa11x0_setup			/* cpu setup		*/
594};
595#endif	/* CPU_SA1100 || CPU_SA1110 */
596
597#ifdef CPU_IXP12X0
598struct cpu_functions ixp12x0_cpufuncs = {
599	/* CPU functions */
600
601	cpufunc_id,			/* id			*/
602	cpufunc_nullop,			/* cpwait		*/
603
604	/* MMU functions */
605
606	cpufunc_control,		/* control		*/
607	cpufunc_domains,		/* domain		*/
608	sa1_setttb,			/* setttb		*/
609	cpufunc_faultstatus,		/* faultstatus		*/
610	cpufunc_faultaddress,		/* faultaddress		*/
611
612	/* TLB functions */
613
614	armv4_tlb_flushID,		/* tlb_flushID		*/
615	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
616	armv4_tlb_flushI,		/* tlb_flushI		*/
617	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
618	armv4_tlb_flushD,		/* tlb_flushD		*/
619	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
620
621	/* Cache operations */
622
623	sa1_cache_syncI,		/* icache_sync_all	*/
624	sa1_cache_syncI_rng,		/* icache_sync_range	*/
625
626	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
627	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
628/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
629	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
630
631	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
632	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
633	cpufunc_nullop,			/* l2cache_wbinv_all	*/
634	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
635	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
636	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
637
638	/* Other functions */
639
640	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
641	armv4_drain_writebuf,		/* drain_writebuf	*/
642	cpufunc_nullop,			/* flush_brnchtgt_C	*/
643	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
644
645	(void *)cpufunc_nullop,		/* sleep		*/
646
647	/* Soft functions */
648
649	cpufunc_null_fixup,		/* dataabt_fixup	*/
650	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
651
652	ixp12x0_context_switch,		/* context_switch	*/
653
654	ixp12x0_setup			/* cpu setup		*/
655};
656#endif	/* CPU_IXP12X0 */
657
658#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
659  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
660  defined(CPU_XSCALE_80219)
661
662struct cpu_functions xscale_cpufuncs = {
663	/* CPU functions */
664
665	cpufunc_id,			/* id			*/
666	xscale_cpwait,			/* cpwait		*/
667
668	/* MMU functions */
669
670	xscale_control,			/* control		*/
671	cpufunc_domains,		/* domain		*/
672	xscale_setttb,			/* setttb		*/
673	cpufunc_faultstatus,		/* faultstatus		*/
674	cpufunc_faultaddress,		/* faultaddress		*/
675
676	/* TLB functions */
677
678	armv4_tlb_flushID,		/* tlb_flushID		*/
679	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
680	armv4_tlb_flushI,		/* tlb_flushI		*/
681	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
682	armv4_tlb_flushD,		/* tlb_flushD		*/
683	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
684
685	/* Cache operations */
686
687	xscale_cache_syncI,		/* icache_sync_all	*/
688	xscale_cache_syncI_rng,		/* icache_sync_range	*/
689
690	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
691	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
692	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
693	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
694
695	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
696	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
697	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
698	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
699	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
700	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
701
702	/* Other functions */
703
704	cpufunc_nullop,			/* flush_prefetchbuf	*/
705	armv4_drain_writebuf,		/* drain_writebuf	*/
706	cpufunc_nullop,			/* flush_brnchtgt_C	*/
707	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
708
709	xscale_cpu_sleep,		/* sleep		*/
710
711	/* Soft functions */
712
713	cpufunc_null_fixup,		/* dataabt_fixup	*/
714	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
715
716	xscale_context_switch,		/* context_switch	*/
717
718	xscale_setup			/* cpu setup		*/
719};
720#endif
721/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
722   CPU_XSCALE_80219 */
723
724#ifdef CPU_XSCALE_81342
725struct cpu_functions xscalec3_cpufuncs = {
726	/* CPU functions */
727
728	cpufunc_id,			/* id			*/
729	xscale_cpwait,			/* cpwait		*/
730
731	/* MMU functions */
732
733	xscale_control,			/* control		*/
734	cpufunc_domains,		/* domain		*/
735	xscalec3_setttb,		/* setttb		*/
736	cpufunc_faultstatus,		/* faultstatus		*/
737	cpufunc_faultaddress,		/* faultaddress		*/
738
739	/* TLB functions */
740
741	armv4_tlb_flushID,		/* tlb_flushID		*/
742	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
743	armv4_tlb_flushI,		/* tlb_flushI		*/
744	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
745	armv4_tlb_flushD,		/* tlb_flushD		*/
746	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
747
748	/* Cache operations */
749
750	xscalec3_cache_syncI,		/* icache_sync_all	*/
751	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
752
753	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
754	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
755	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
756	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
757
758	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
759	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
760	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
761	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
762	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
763	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
764
765	/* Other functions */
766
767	cpufunc_nullop,			/* flush_prefetchbuf	*/
768	armv4_drain_writebuf,		/* drain_writebuf	*/
769	cpufunc_nullop,			/* flush_brnchtgt_C	*/
770	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
771
772	xscale_cpu_sleep,		/* sleep		*/
773
774	/* Soft functions */
775
776	cpufunc_null_fixup,		/* dataabt_fixup	*/
777	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
778
779	xscalec3_context_switch,	/* context_switch	*/
780
781	xscale_setup			/* cpu setup		*/
782};
783#endif /* CPU_XSCALE_81342 */
784/*
785 * Global constants also used by locore.s
786 */
787
788struct cpu_functions cpufuncs;
789u_int cputype;
790u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
791
792#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
793  defined (CPU_ARM9E) || defined (CPU_ARM10) ||			       \
794  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||	       \
795  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||	       \
796  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
797
798static void get_cachetype_cp15(void);
799
800/* Additional cache information local to this file.  Log2 of some of the
801   above numbers.  */
802static int	arm_dcache_l2_nsets;
803static int	arm_dcache_l2_assoc;
804static int	arm_dcache_l2_linesize;
805
806static void
807get_cachetype_cp15()
808{
809	u_int ctype, isize, dsize;
810	u_int multiplier;
811
812	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
813		: "=r" (ctype));
814
815	/*
816	 * ...and thus spake the ARM ARM:
817	 *
818	 * If an <opcode2> value corresponding to an unimplemented or
819	 * reserved ID register is encountered, the System Control
820	 * processor returns the value of the main ID register.
821	 */
822	if (ctype == cpufunc_id())
823		goto out;
824
825	if ((ctype & CPU_CT_S) == 0)
826		arm_pcache_unified = 1;
827
828	/*
829	 * If you want to know how this code works, go read the ARM ARM.
830	 */
831
832	arm_pcache_type = CPU_CT_CTYPE(ctype);
833
834	if (arm_pcache_unified == 0) {
835		isize = CPU_CT_ISIZE(ctype);
836		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
837		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
838		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
839			if (isize & CPU_CT_xSIZE_M)
840				arm_picache_line_size = 0; /* not present */
841			else
842				arm_picache_ways = 1;
843		} else {
844			arm_picache_ways = multiplier <<
845			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
846		}
847		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
848	}
849
850	dsize = CPU_CT_DSIZE(ctype);
851	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
852	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
853	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
854		if (dsize & CPU_CT_xSIZE_M)
855			arm_pdcache_line_size = 0; /* not present */
856		else
857			arm_pdcache_ways = 1;
858	} else {
859		arm_pdcache_ways = multiplier <<
860		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
861	}
862	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
863
864	arm_dcache_align = arm_pdcache_line_size;
865
866	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
867	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
868	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
869	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
870
871 out:
872	arm_dcache_align_mask = arm_dcache_align - 1;
873}
874#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
875
876#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
877    defined(CPU_IXP12X0)
878/* Cache information for CPUs without cache type registers. */
879struct cachetab {
880	u_int32_t ct_cpuid;
881	int	ct_pcache_type;
882	int	ct_pcache_unified;
883	int	ct_pdcache_size;
884	int	ct_pdcache_line_size;
885	int	ct_pdcache_ways;
886	int	ct_picache_size;
887	int	ct_picache_line_size;
888	int	ct_picache_ways;
889};
890
891struct cachetab cachetab[] = {
892    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
893    /* XXX is this type right for SA-1? */
894    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
895    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
896    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
897    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
898    { 0, 0, 0, 0, 0, 0, 0, 0}
899};
900
901static void get_cachetype_table(void);
902
903static void
904get_cachetype_table()
905{
906	int i;
907	u_int32_t cpuid = cpufunc_id();
908
909	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
910		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
911			arm_pcache_type = cachetab[i].ct_pcache_type;
912			arm_pcache_unified = cachetab[i].ct_pcache_unified;
913			arm_pdcache_size = cachetab[i].ct_pdcache_size;
914			arm_pdcache_line_size =
915			    cachetab[i].ct_pdcache_line_size;
916			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
917			arm_picache_size = cachetab[i].ct_picache_size;
918			arm_picache_line_size =
919			    cachetab[i].ct_picache_line_size;
920			arm_picache_ways = cachetab[i].ct_picache_ways;
921		}
922	}
923	arm_dcache_align = arm_pdcache_line_size;
924
925	arm_dcache_align_mask = arm_dcache_align - 1;
926}
927
928#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
929
930/*
931 * Cannot panic here as we may not have a console yet ...
932 */
933
934int
935set_cpufuncs()
936{
937	cputype = cpufunc_id();
938	cputype &= CPU_ID_CPU_MASK;
939
940	/*
941	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
942	 * CPU type where we want to use it by default, then we set it.
943	 */
944
945#ifdef CPU_ARM7TDMI
946	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
947	    CPU_ID_IS7(cputype) &&
948	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
949		cpufuncs = arm7tdmi_cpufuncs;
950		cpu_reset_needs_v4_MMU_disable = 0;
951		get_cachetype_cp15();
952		pmap_pte_init_generic();
953		goto out;
954	}
955#endif
956#ifdef CPU_ARM8
957	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
958	    (cputype & 0x0000f000) == 0x00008000) {
959		cpufuncs = arm8_cpufuncs;
960		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
961		get_cachetype_cp15();
962		pmap_pte_init_arm8();
963		goto out;
964	}
965#endif	/* CPU_ARM8 */
966#ifdef CPU_ARM9
967	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
968	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
969	    (cputype & 0x0000f000) == 0x00009000) {
970		cpufuncs = arm9_cpufuncs;
971		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
972		get_cachetype_cp15();
973		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
974		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
975		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
976		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
977		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
978#ifdef ARM9_CACHE_WRITE_THROUGH
979		pmap_pte_init_arm9();
980#else
981		pmap_pte_init_generic();
982#endif
983		goto out;
984	}
985#endif /* CPU_ARM9 */
986#if defined(CPU_ARM9E) || defined(CPU_ARM10)
987	if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS ||
988	    cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
989	    cputype == CPU_ID_MV88FR571_41) {
990		if (cputype == CPU_ID_MV88FR131 ||
991		    cputype == CPU_ID_MV88FR571_VD ||
992		    cputype == CPU_ID_MV88FR571_41) {
993
994			cpufuncs = sheeva_cpufuncs;
995			/*
996			 * Workaround for Marvell MV78100 CPU: Cache prefetch
997			 * mechanism may affect the cache coherency validity,
998			 * so it needs to be disabled.
999			 *
1000			 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1001			 * L2 Prefetching Mechanism) for details.
1002			 */
1003			if (cputype == CPU_ID_MV88FR571_VD ||
1004			    cputype == CPU_ID_MV88FR571_41) {
1005				sheeva_control_ext(0xffffffff,
1006				    FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1007				    FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN |
1008				    FC_L2_PREF_DIS);
1009			} else {
1010				sheeva_control_ext(0xffffffff,
1011				    FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN |
1012				    FC_BRANCH_TARG_BUF_DIS | FC_L2CACHE_EN);
1013			}
1014		} else
1015			cpufuncs = armv5_ec_cpufuncs;
1016
1017		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1018		get_cachetype_cp15();
1019		pmap_pte_init_generic();
1020		goto out;
1021	}
1022#endif /* CPU_ARM9E || CPU_ARM10 */
1023#ifdef CPU_ARM10
1024	if (/* cputype == CPU_ID_ARM1020T || */
1025	    cputype == CPU_ID_ARM1020E) {
1026		/*
1027		 * Select write-through cacheing (this isn't really an
1028		 * option on ARM1020T).
1029		 */
1030		cpufuncs = arm10_cpufuncs;
1031		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1032		get_cachetype_cp15();
1033		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1034		arm10_dcache_sets_max =
1035		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1036		    arm10_dcache_sets_inc;
1037		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1038		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1039		pmap_pte_init_generic();
1040		goto out;
1041	}
1042#endif /* CPU_ARM10 */
1043#ifdef CPU_SA110
1044	if (cputype == CPU_ID_SA110) {
1045		cpufuncs = sa110_cpufuncs;
1046		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1047		get_cachetype_table();
1048		pmap_pte_init_sa1();
1049		goto out;
1050	}
1051#endif	/* CPU_SA110 */
1052#ifdef CPU_SA1100
1053	if (cputype == CPU_ID_SA1100) {
1054		cpufuncs = sa11x0_cpufuncs;
1055		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1056		get_cachetype_table();
1057		pmap_pte_init_sa1();
1058		/* Use powersave on this CPU. */
1059		cpu_do_powersave = 1;
1060
1061		goto out;
1062	}
1063#endif	/* CPU_SA1100 */
1064#ifdef CPU_SA1110
1065	if (cputype == CPU_ID_SA1110) {
1066		cpufuncs = sa11x0_cpufuncs;
1067		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1068		get_cachetype_table();
1069		pmap_pte_init_sa1();
1070		/* Use powersave on this CPU. */
1071		cpu_do_powersave = 1;
1072
1073		goto out;
1074	}
1075#endif	/* CPU_SA1110 */
1076#ifdef CPU_IXP12X0
1077        if (cputype == CPU_ID_IXP1200) {
1078                cpufuncs = ixp12x0_cpufuncs;
1079                cpu_reset_needs_v4_MMU_disable = 1;
1080                get_cachetype_table();
1081                pmap_pte_init_sa1();
1082		goto out;
1083        }
1084#endif  /* CPU_IXP12X0 */
1085#ifdef CPU_XSCALE_80200
1086	if (cputype == CPU_ID_80200) {
1087		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1088
1089		i80200_icu_init();
1090
1091#if defined(XSCALE_CCLKCFG)
1092		/*
1093		 * Crank CCLKCFG to maximum legal value.
1094		 */
1095		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1096			:
1097			: "r" (XSCALE_CCLKCFG));
1098#endif
1099
1100		/*
1101		 * XXX Disable ECC in the Bus Controller Unit; we
1102		 * don't really support it, yet.  Clear any pending
1103		 * error indications.
1104		 */
1105		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1106			:
1107			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1108
1109		cpufuncs = xscale_cpufuncs;
1110		/*
1111		 * i80200 errata: Step-A0 and A1 have a bug where
1112		 * D$ dirty bits are not cleared on "invalidate by
1113		 * address".
1114		 *
1115		 * Workaround: Clean cache line before invalidating.
1116		 */
1117		if (rev == 0 || rev == 1)
1118			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1119
1120		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1121		get_cachetype_cp15();
1122		pmap_pte_init_xscale();
1123		goto out;
1124	}
1125#endif /* CPU_XSCALE_80200 */
1126#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1127	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1128	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1129	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1130		cpufuncs = xscale_cpufuncs;
1131		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1132		get_cachetype_cp15();
1133		pmap_pte_init_xscale();
1134		goto out;
1135	}
1136#endif /* CPU_XSCALE_80321 */
1137
1138#if defined(CPU_XSCALE_81342)
1139	if (cputype == CPU_ID_81342) {
1140		cpufuncs = xscalec3_cpufuncs;
1141		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1142		get_cachetype_cp15();
1143		pmap_pte_init_xscale();
1144		goto out;
1145	}
1146#endif /* CPU_XSCALE_81342 */
1147#ifdef CPU_XSCALE_PXA2X0
1148	/* ignore core revision to test PXA2xx CPUs */
1149	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1150	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1151	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1152
1153		cpufuncs = xscale_cpufuncs;
1154		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1155		get_cachetype_cp15();
1156		pmap_pte_init_xscale();
1157
1158		/* Use powersave on this CPU. */
1159		cpu_do_powersave = 1;
1160
1161		goto out;
1162	}
1163#endif /* CPU_XSCALE_PXA2X0 */
1164#ifdef CPU_XSCALE_IXP425
1165	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1166            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1167
1168		cpufuncs = xscale_cpufuncs;
1169		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1170		get_cachetype_cp15();
1171		pmap_pte_init_xscale();
1172
1173		goto out;
1174	}
1175#endif /* CPU_XSCALE_IXP425 */
1176	/*
1177	 * Bzzzz. And the answer was ...
1178	 */
1179	panic("No support for this CPU type (%08x) in kernel", cputype);
1180	return(ARCHITECTURE_NOT_PRESENT);
1181out:
1182	uma_set_align(arm_dcache_align_mask);
1183	return (0);
1184}
1185
1186/*
1187 * Fixup routines for data and prefetch aborts.
1188 *
1189 * Several compile time symbols are used
1190 *
1191 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1192 * correction of registers after a fault.
1193 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1194 * when defined should use late aborts
1195 */
1196
1197
1198/*
1199 * Null abort fixup routine.
1200 * For use when no fixup is required.
1201 */
1202int
1203cpufunc_null_fixup(arg)
1204	void *arg;
1205{
1206	return(ABORT_FIXUP_OK);
1207}
1208
1209
1210#if defined(CPU_ARM7TDMI)
1211
1212#ifdef DEBUG_FAULT_CORRECTION
1213#define DFC_PRINTF(x)		printf x
1214#define DFC_DISASSEMBLE(x)	disassemble(x)
1215#else
1216#define DFC_PRINTF(x)		/* nothing */
1217#define DFC_DISASSEMBLE(x)	/* nothing */
1218#endif
1219
1220/*
1221 * "Early" data abort fixup.
1222 *
1223 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1224 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1225 *
1226 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1227 */
1228int
1229early_abort_fixup(arg)
1230	void *arg;
1231{
1232	trapframe_t *frame = arg;
1233	u_int fault_pc;
1234	u_int fault_instruction;
1235	int saved_lr = 0;
1236
1237	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1238
1239		/* Ok an abort in SVC mode */
1240
1241		/*
1242		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1243		 * as the fault happened in svc mode but we need it in the
1244		 * usr slot so we can treat the registers as an array of ints
1245		 * during fixing.
1246		 * NOTE: This PC is in the position but writeback is not
1247		 * allowed on r15.
1248		 * Doing it like this is more efficient than trapping this
1249		 * case in all possible locations in the following fixup code.
1250		 */
1251
1252		saved_lr = frame->tf_usr_lr;
1253		frame->tf_usr_lr = frame->tf_svc_lr;
1254
1255		/*
1256		 * Note the trapframe does not have the SVC r13 so a fault
1257		 * from an instruction with writeback to r13 in SVC mode is
1258		 * not allowed. This should not happen as the kstack is
1259		 * always valid.
1260		 */
1261	}
1262
1263	/* Get fault address and status from the CPU */
1264
1265	fault_pc = frame->tf_pc;
1266	fault_instruction = *((volatile unsigned int *)fault_pc);
1267
1268	/* Decode the fault instruction and fix the registers as needed */
1269
1270	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1271		int base;
1272		int loop;
1273		int count;
1274		int *registers = &frame->tf_r0;
1275
1276		DFC_PRINTF(("LDM/STM\n"));
1277		DFC_DISASSEMBLE(fault_pc);
1278		if (fault_instruction & (1 << 21)) {
1279			DFC_PRINTF(("This instruction must be corrected\n"));
1280			base = (fault_instruction >> 16) & 0x0f;
1281			if (base == 15)
1282				return ABORT_FIXUP_FAILED;
1283			/* Count registers transferred */
1284			count = 0;
1285			for (loop = 0; loop < 16; ++loop) {
1286				if (fault_instruction & (1<<loop))
1287					++count;
1288			}
1289			DFC_PRINTF(("%d registers used\n", count));
1290			DFC_PRINTF(("Corrected r%d by %d bytes ",
1291				       base, count * 4));
1292			if (fault_instruction & (1 << 23)) {
1293				DFC_PRINTF(("down\n"));
1294				registers[base] -= count * 4;
1295			} else {
1296				DFC_PRINTF(("up\n"));
1297				registers[base] += count * 4;
1298			}
1299		}
1300	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1301		int base;
1302		int offset;
1303		int *registers = &frame->tf_r0;
1304
1305		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1306
1307		DFC_DISASSEMBLE(fault_pc);
1308
1309		/* Only need to fix registers if write back is turned on */
1310
1311		if ((fault_instruction & (1 << 21)) != 0) {
1312			base = (fault_instruction >> 16) & 0x0f;
1313			if (base == 13 &&
1314			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1315				return ABORT_FIXUP_FAILED;
1316			if (base == 15)
1317				return ABORT_FIXUP_FAILED;
1318
1319			offset = (fault_instruction & 0xff) << 2;
1320			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1321			if ((fault_instruction & (1 << 23)) != 0)
1322				offset = -offset;
1323			registers[base] += offset;
1324			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1325		}
1326	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1327		return ABORT_FIXUP_FAILED;
1328
1329	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1330
1331		/* Ok an abort in SVC mode */
1332
1333		/*
1334		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1335		 * as the fault happened in svc mode but we need it in the
1336		 * usr slot so we can treat the registers as an array of ints
1337		 * during fixing.
1338		 * NOTE: This PC is in the position but writeback is not
1339		 * allowed on r15.
1340		 * Doing it like this is more efficient than trapping this
1341		 * case in all possible locations in the prior fixup code.
1342		 */
1343
1344		frame->tf_svc_lr = frame->tf_usr_lr;
1345		frame->tf_usr_lr = saved_lr;
1346
1347		/*
1348		 * Note the trapframe does not have the SVC r13 so a fault
1349		 * from an instruction with writeback to r13 in SVC mode is
1350		 * not allowed. This should not happen as the kstack is
1351		 * always valid.
1352		 */
1353	}
1354
1355	return(ABORT_FIXUP_OK);
1356}
1357#endif	/* CPU_ARM2/250/3/6/7 */
1358
1359
1360#if defined(CPU_ARM7TDMI)
1361/*
1362 * "Late" (base updated) data abort fixup
1363 *
1364 * For ARM6 (in late-abort mode) and ARM7.
1365 *
1366 * In this model, all data-transfer instructions need fixing up.  We defer
1367 * LDM, STM, LDC and STC fixup to the early-abort handler.
1368 */
1369int
1370late_abort_fixup(arg)
1371	void *arg;
1372{
1373	trapframe_t *frame = arg;
1374	u_int fault_pc;
1375	u_int fault_instruction;
1376	int saved_lr = 0;
1377
1378	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1379
1380		/* Ok an abort in SVC mode */
1381
1382		/*
1383		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1384		 * as the fault happened in svc mode but we need it in the
1385		 * usr slot so we can treat the registers as an array of ints
1386		 * during fixing.
1387		 * NOTE: This PC is in the position but writeback is not
1388		 * allowed on r15.
1389		 * Doing it like this is more efficient than trapping this
1390		 * case in all possible locations in the following fixup code.
1391		 */
1392
1393		saved_lr = frame->tf_usr_lr;
1394		frame->tf_usr_lr = frame->tf_svc_lr;
1395
1396		/*
1397		 * Note the trapframe does not have the SVC r13 so a fault
1398		 * from an instruction with writeback to r13 in SVC mode is
1399		 * not allowed. This should not happen as the kstack is
1400		 * always valid.
1401		 */
1402	}
1403
1404	/* Get fault address and status from the CPU */
1405
1406	fault_pc = frame->tf_pc;
1407	fault_instruction = *((volatile unsigned int *)fault_pc);
1408
1409	/* Decode the fault instruction and fix the registers as needed */
1410
1411	/* Was is a swap instruction ? */
1412
1413	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1414		DFC_DISASSEMBLE(fault_pc);
1415	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1416
1417		/* Was is a ldr/str instruction */
1418		/* This is for late abort only */
1419
1420		int base;
1421		int offset;
1422		int *registers = &frame->tf_r0;
1423
1424		DFC_DISASSEMBLE(fault_pc);
1425
1426		/* This is for late abort only */
1427
1428		if ((fault_instruction & (1 << 24)) == 0
1429		    || (fault_instruction & (1 << 21)) != 0) {
1430			/* postindexed ldr/str with no writeback */
1431
1432			base = (fault_instruction >> 16) & 0x0f;
1433			if (base == 13 &&
1434			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1435				return ABORT_FIXUP_FAILED;
1436			if (base == 15)
1437				return ABORT_FIXUP_FAILED;
1438			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1439				       base, registers[base]));
1440			if ((fault_instruction & (1 << 25)) == 0) {
1441				/* Immediate offset - easy */
1442
1443				offset = fault_instruction & 0xfff;
1444				if ((fault_instruction & (1 << 23)))
1445					offset = -offset;
1446				registers[base] += offset;
1447				DFC_PRINTF(("imm=%08x ", offset));
1448			} else {
1449				/* offset is a shifted register */
1450				int shift;
1451
1452				offset = fault_instruction & 0x0f;
1453				if (offset == base)
1454					return ABORT_FIXUP_FAILED;
1455
1456				/*
1457				 * Register offset - hard we have to
1458				 * cope with shifts !
1459				 */
1460				offset = registers[offset];
1461
1462				if ((fault_instruction & (1 << 4)) == 0)
1463					/* shift with amount */
1464					shift = (fault_instruction >> 7) & 0x1f;
1465				else {
1466					/* shift with register */
1467					if ((fault_instruction & (1 << 7)) != 0)
1468						/* undefined for now so bail out */
1469						return ABORT_FIXUP_FAILED;
1470					shift = ((fault_instruction >> 8) & 0xf);
1471					if (base == shift)
1472						return ABORT_FIXUP_FAILED;
1473					DFC_PRINTF(("shift reg=%d ", shift));
1474					shift = registers[shift];
1475				}
1476				DFC_PRINTF(("shift=%08x ", shift));
1477				switch (((fault_instruction >> 5) & 0x3)) {
1478				case 0 : /* Logical left */
1479					offset = (int)(((u_int)offset) << shift);
1480					break;
1481				case 1 : /* Logical Right */
1482					if (shift == 0) shift = 32;
1483					offset = (int)(((u_int)offset) >> shift);
1484					break;
1485				case 2 : /* Arithmetic Right */
1486					if (shift == 0) shift = 32;
1487					offset = (int)(((int)offset) >> shift);
1488					break;
1489				case 3 : /* Rotate right (rol or rxx) */
1490					return ABORT_FIXUP_FAILED;
1491					break;
1492				}
1493
1494				DFC_PRINTF(("abt: fixed LDR/STR with "
1495					       "register offset\n"));
1496				if ((fault_instruction & (1 << 23)))
1497					offset = -offset;
1498				DFC_PRINTF(("offset=%08x ", offset));
1499				registers[base] += offset;
1500			}
1501			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1502		}
1503	}
1504
1505	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1506
1507		/* Ok an abort in SVC mode */
1508
1509		/*
1510		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1511		 * as the fault happened in svc mode but we need it in the
1512		 * usr slot so we can treat the registers as an array of ints
1513		 * during fixing.
1514		 * NOTE: This PC is in the position but writeback is not
1515		 * allowed on r15.
1516		 * Doing it like this is more efficient than trapping this
1517		 * case in all possible locations in the prior fixup code.
1518		 */
1519
1520		frame->tf_svc_lr = frame->tf_usr_lr;
1521		frame->tf_usr_lr = saved_lr;
1522
1523		/*
1524		 * Note the trapframe does not have the SVC r13 so a fault
1525		 * from an instruction with writeback to r13 in SVC mode is
1526		 * not allowed. This should not happen as the kstack is
1527		 * always valid.
1528		 */
1529	}
1530
1531	/*
1532	 * Now let the early-abort fixup routine have a go, in case it
1533	 * was an LDM, STM, LDC or STC that faulted.
1534	 */
1535
1536	return early_abort_fixup(arg);
1537}
1538#endif	/* CPU_ARM7TDMI */
1539
1540/*
1541 * CPU Setup code
1542 */
1543
1544#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1545  defined(CPU_ARM9E) || \
1546  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1547  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1548  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1549  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1550  defined(CPU_ARM10) ||  defined(CPU_ARM11)
1551
1552#define IGN	0
1553#define OR	1
1554#define BIC	2
1555
1556struct cpu_option {
1557	char	*co_name;
1558	int	co_falseop;
1559	int	co_trueop;
1560	int	co_value;
1561};
1562
1563static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1564
1565static u_int
1566parse_cpu_options(args, optlist, cpuctrl)
1567	char *args;
1568	struct cpu_option *optlist;
1569	u_int cpuctrl;
1570{
1571	int integer;
1572
1573	if (args == NULL)
1574		return(cpuctrl);
1575
1576	while (optlist->co_name) {
1577		if (get_bootconf_option(args, optlist->co_name,
1578		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1579			if (integer) {
1580				if (optlist->co_trueop == OR)
1581					cpuctrl |= optlist->co_value;
1582				else if (optlist->co_trueop == BIC)
1583					cpuctrl &= ~optlist->co_value;
1584			} else {
1585				if (optlist->co_falseop == OR)
1586					cpuctrl |= optlist->co_value;
1587				else if (optlist->co_falseop == BIC)
1588					cpuctrl &= ~optlist->co_value;
1589			}
1590		}
1591		++optlist;
1592	}
1593	return(cpuctrl);
1594}
1595#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1596
1597#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1598struct cpu_option arm678_options[] = {
1599#ifdef COMPAT_12
1600	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1601	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1602#endif	/* COMPAT_12 */
1603	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1604	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1605	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1606	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1607	{ NULL,			IGN, IGN, 0 }
1608};
1609
1610#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1611
1612#ifdef CPU_ARM7TDMI
1613struct cpu_option arm7tdmi_options[] = {
1614	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1615	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1616	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1617	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1618#ifdef COMPAT_12
1619	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1620#endif	/* COMPAT_12 */
1621	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1622	{ NULL,			IGN, IGN, 0 }
1623};
1624
1625void
1626arm7tdmi_setup(args)
1627	char *args;
1628{
1629	int cpuctrl;
1630
1631	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1632		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1633		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1634
1635	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1636	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1637
1638#ifdef __ARMEB__
1639	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1640#endif
1641
1642	/* Clear out the cache */
1643	cpu_idcache_wbinv_all();
1644
1645	/* Set the control register */
1646	ctrl = cpuctrl;
1647	cpu_control(0xffffffff, cpuctrl);
1648}
1649#endif	/* CPU_ARM7TDMI */
1650
1651#ifdef CPU_ARM8
1652struct cpu_option arm8_options[] = {
1653	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1654	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1655	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1656	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1657#ifdef COMPAT_12
1658	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1659#endif	/* COMPAT_12 */
1660	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1661	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1662	{ NULL,			IGN, IGN, 0 }
1663};
1664
1665void
1666arm8_setup(args)
1667	char *args;
1668{
1669	int integer;
1670	int cpuctrl, cpuctrlmask;
1671	int clocktest;
1672	int setclock = 0;
1673
1674	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1675		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1676		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1677	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1678		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1679		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1680		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1681		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1682
1683#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1684	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1685#endif
1686
1687	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1688	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1689
1690#ifdef __ARMEB__
1691	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1692#endif
1693
1694	/* Get clock configuration */
1695	clocktest = arm8_clock_config(0, 0) & 0x0f;
1696
1697	/* Special ARM8 clock and test configuration */
1698	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1699		clocktest = 0;
1700		setclock = 1;
1701	}
1702	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1703		if (integer)
1704			clocktest |= 0x01;
1705		else
1706			clocktest &= ~(0x01);
1707		setclock = 1;
1708	}
1709	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1710		if (integer)
1711			clocktest |= 0x02;
1712		else
1713			clocktest &= ~(0x02);
1714		setclock = 1;
1715	}
1716	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1717		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1718		setclock = 1;
1719	}
1720	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1721		clocktest |= (integer & 7) << 5;
1722		setclock = 1;
1723	}
1724
1725	/* Clear out the cache */
1726	cpu_idcache_wbinv_all();
1727
1728	/* Set the control register */
1729	ctrl = cpuctrl;
1730	cpu_control(0xffffffff, cpuctrl);
1731
1732	/* Set the clock/test register */
1733	if (setclock)
1734		arm8_clock_config(0x7f, clocktest);
1735}
1736#endif	/* CPU_ARM8 */
1737
1738#ifdef CPU_ARM9
1739struct cpu_option arm9_options[] = {
1740	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1741	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1742	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1743	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1744	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1745	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1746	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1747	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1748	{ NULL,			IGN, IGN, 0 }
1749};
1750
1751void
1752arm9_setup(args)
1753	char *args;
1754{
1755	int cpuctrl, cpuctrlmask;
1756
1757	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1758	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1759	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1760	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1761	    CPU_CONTROL_ROUNDROBIN;
1762	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1763		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1764		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1765		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1766		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1767		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1768		 | CPU_CONTROL_ROUNDROBIN;
1769
1770#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1771	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1772#endif
1773
1774	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1775
1776#ifdef __ARMEB__
1777	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1778#endif
1779	if (vector_page == ARM_VECTORS_HIGH)
1780		cpuctrl |= CPU_CONTROL_VECRELOC;
1781
1782	/* Clear out the cache */
1783	cpu_idcache_wbinv_all();
1784
1785	/* Set the control register */
1786	cpu_control(cpuctrlmask, cpuctrl);
1787	ctrl = cpuctrl;
1788
1789}
1790#endif	/* CPU_ARM9 */
1791
1792#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1793struct cpu_option arm10_options[] = {
1794	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1795	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1796	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1797	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1798	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1799	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1800	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1801	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1802	{ NULL,			IGN, IGN, 0 }
1803};
1804
1805void
1806arm10_setup(args)
1807	char *args;
1808{
1809	int cpuctrl, cpuctrlmask;
1810
1811	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1812	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1813	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1814	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1815	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1816	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1817	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1818	    | CPU_CONTROL_BPRD_ENABLE
1819	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1820
1821#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1822	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1823#endif
1824
1825	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1826
1827#ifdef __ARMEB__
1828	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1829#endif
1830
1831	/* Clear out the cache */
1832	cpu_idcache_wbinv_all();
1833
1834	/* Now really make sure they are clean.  */
1835	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1836
1837	if (vector_page == ARM_VECTORS_HIGH)
1838		cpuctrl |= CPU_CONTROL_VECRELOC;
1839
1840	/* Set the control register */
1841	ctrl = cpuctrl;
1842	cpu_control(0xffffffff, cpuctrl);
1843
1844	/* And again. */
1845	cpu_idcache_wbinv_all();
1846}
1847#endif	/* CPU_ARM9E || CPU_ARM10 */
1848
1849#ifdef CPU_ARM11
1850struct cpu_option arm11_options[] = {
1851	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1852	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1853	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1854	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1855	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1856	{ NULL,			IGN, IGN, 0 }
1857};
1858
1859void
1860arm11_setup(args)
1861	char *args;
1862{
1863	int cpuctrl, cpuctrlmask;
1864
1865	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1866	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1867	    /* | CPU_CONTROL_BPRD_ENABLE */;
1868	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1869	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1870	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1871	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1872	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1873
1874#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1875	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1876#endif
1877
1878	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1879
1880#ifdef __ARMEB__
1881	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1882#endif
1883
1884	/* Clear out the cache */
1885	cpu_idcache_wbinv_all();
1886
1887	/* Now really make sure they are clean.  */
1888	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1889
1890	/* Set the control register */
1891	curcpu()->ci_ctrl = cpuctrl;
1892	cpu_control(0xffffffff, cpuctrl);
1893
1894	/* And again. */
1895	cpu_idcache_wbinv_all();
1896}
1897#endif	/* CPU_ARM11 */
1898
1899#ifdef CPU_SA110
1900struct cpu_option sa110_options[] = {
1901#ifdef COMPAT_12
1902	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1903	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1904#endif	/* COMPAT_12 */
1905	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1906	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1907	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1908	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1909	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1910	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1911	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1912	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1913	{ NULL,			IGN, IGN, 0 }
1914};
1915
1916void
1917sa110_setup(args)
1918	char *args;
1919{
1920	int cpuctrl, cpuctrlmask;
1921
1922	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1923		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1924		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1925		 | CPU_CONTROL_WBUF_ENABLE;
1926	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1927		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1928		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1929		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1930		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1931		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1932		 | CPU_CONTROL_CPCLK;
1933
1934#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1935	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1936#endif
1937
1938	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1939
1940#ifdef __ARMEB__
1941	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1942#endif
1943
1944	/* Clear out the cache */
1945	cpu_idcache_wbinv_all();
1946
1947	/* Set the control register */
1948	ctrl = cpuctrl;
1949/*	cpu_control(cpuctrlmask, cpuctrl);*/
1950	cpu_control(0xffffffff, cpuctrl);
1951
1952	/*
1953	 * enable clockswitching, note that this doesn't read or write to r0,
1954	 * r0 is just to make it valid asm
1955	 */
1956	__asm ("mcr 15, 0, r0, c15, c1, 2");
1957}
1958#endif	/* CPU_SA110 */
1959
1960#if defined(CPU_SA1100) || defined(CPU_SA1110)
1961struct cpu_option sa11x0_options[] = {
1962#ifdef COMPAT_12
1963	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1964	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1965#endif	/* COMPAT_12 */
1966	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1967	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1968	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1969	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1970	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1971	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1972	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1973	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1974	{ NULL,			IGN, IGN, 0 }
1975};
1976
1977void
1978sa11x0_setup(args)
1979	char *args;
1980{
1981	int cpuctrl, cpuctrlmask;
1982
1983	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1984		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1985		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1986		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1987	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1988		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1989		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1990		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1991		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1992		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1993		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1994
1995#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1996	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1997#endif
1998
1999
2000	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2001
2002#ifdef __ARMEB__
2003	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2004#endif
2005
2006	if (vector_page == ARM_VECTORS_HIGH)
2007		cpuctrl |= CPU_CONTROL_VECRELOC;
2008	/* Clear out the cache */
2009	cpu_idcache_wbinv_all();
2010	/* Set the control register */
2011	ctrl = cpuctrl;
2012	cpu_control(0xffffffff, cpuctrl);
2013}
2014#endif	/* CPU_SA1100 || CPU_SA1110 */
2015
2016#if defined(CPU_IXP12X0)
2017struct cpu_option ixp12x0_options[] = {
2018	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2019	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2020	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2021	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2022	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2023	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2024	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2025	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2026	{ NULL,			IGN, IGN, 0 }
2027};
2028
2029void
2030ixp12x0_setup(args)
2031	char *args;
2032{
2033	int cpuctrl, cpuctrlmask;
2034
2035
2036	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2037		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2038		 | CPU_CONTROL_IC_ENABLE;
2039
2040	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2041		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2042		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2043		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2044		 | CPU_CONTROL_VECRELOC;
2045
2046#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2047	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2048#endif
2049
2050	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2051
2052#ifdef __ARMEB__
2053	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2054#endif
2055
2056	if (vector_page == ARM_VECTORS_HIGH)
2057		cpuctrl |= CPU_CONTROL_VECRELOC;
2058
2059	/* Clear out the cache */
2060	cpu_idcache_wbinv_all();
2061
2062	/* Set the control register */
2063	ctrl = cpuctrl;
2064	/* cpu_control(0xffffffff, cpuctrl); */
2065	cpu_control(cpuctrlmask, cpuctrl);
2066}
2067#endif /* CPU_IXP12X0 */
2068
2069#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2070  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2071  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2072struct cpu_option xscale_options[] = {
2073#ifdef COMPAT_12
2074	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2075	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2076#endif	/* COMPAT_12 */
2077	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2078	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2079	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2080	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2081	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2082	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2083	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2084	{ NULL,			IGN, IGN, 0 }
2085};
2086
2087void
2088xscale_setup(args)
2089	char *args;
2090{
2091	uint32_t auxctl;
2092	int cpuctrl, cpuctrlmask;
2093
2094	/*
2095	 * The XScale Write Buffer is always enabled.  Our option
2096	 * is to enable/disable coalescing.  Note that bits 6:3
2097	 * must always be enabled.
2098	 */
2099
2100	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2101		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2102		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2103		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2104		 | CPU_CONTROL_BPRD_ENABLE;
2105	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2106		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2107		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2108		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2109		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2110		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2111		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2112		 CPU_CONTROL_L2_ENABLE;
2113
2114#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2115	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2116#endif
2117
2118	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2119
2120#ifdef __ARMEB__
2121	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2122#endif
2123
2124	if (vector_page == ARM_VECTORS_HIGH)
2125		cpuctrl |= CPU_CONTROL_VECRELOC;
2126#ifdef CPU_XSCALE_CORE3
2127	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2128#endif
2129
2130	/* Clear out the cache */
2131	cpu_idcache_wbinv_all();
2132
2133	/*
2134	 * Set the control register.  Note that bits 6:3 must always
2135	 * be set to 1.
2136	 */
2137	ctrl = cpuctrl;
2138/*	cpu_control(cpuctrlmask, cpuctrl);*/
2139	cpu_control(0xffffffff, cpuctrl);
2140
2141	/* Make sure write coalescing is turned on */
2142	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2143		: "=r" (auxctl));
2144#ifdef XSCALE_NO_COALESCE_WRITES
2145	auxctl |= XSCALE_AUXCTL_K;
2146#else
2147	auxctl &= ~XSCALE_AUXCTL_K;
2148#endif
2149#ifdef CPU_XSCALE_CORE3
2150	auxctl |= XSCALE_AUXCTL_LLR;
2151	auxctl |= XSCALE_AUXCTL_MD_MASK;
2152#endif
2153	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2154		: : "r" (auxctl));
2155}
2156#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2157	   CPU_XSCALE_80219 */
2158