cpufunc.c revision 266203
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: stable/10/sys/arm/arm/cpufunc.c 266203 2014-05-16 00:14:50Z ian $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77/*
78 * Some definitions in i81342reg.h clash with i80321reg.h.
79 * This only happens for the LINT kernel. As it happens,
80 * we don't need anything from i81342reg.h that we already
81 * got from somewhere else during a LINT compile.
82 */
83#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
84#include <arm/xscale/i8134x/i81342reg.h>
85#endif
86
87#ifdef CPU_XSCALE_IXP425
88#include <arm/xscale/ixp425/ixp425reg.h>
89#include <arm/xscale/ixp425/ixp425var.h>
90#endif
91
92/* PRIMARY CACHE VARIABLES */
93int	arm_picache_size;
94int	arm_picache_line_size;
95int	arm_picache_ways;
96
97int	arm_pdcache_size;	/* and unified */
98int	arm_pdcache_line_size;
99int	arm_pdcache_ways;
100
101int	arm_pcache_type;
102int	arm_pcache_unified;
103
104int	arm_dcache_align;
105int	arm_dcache_align_mask;
106
107u_int	arm_cache_level;
108u_int	arm_cache_type[14];
109u_int	arm_cache_loc;
110
111/* 1 == use cpu_sleep(), 0 == don't */
112int cpu_do_powersave;
113int ctrl;
114
115#ifdef CPU_ARM7TDMI
116struct cpu_functions arm7tdmi_cpufuncs = {
117	/* CPU functions */
118
119	cpufunc_id,			/* id			*/
120	cpufunc_nullop,			/* cpwait		*/
121
122	/* MMU functions */
123
124	cpufunc_control,		/* control		*/
125	cpufunc_domains,		/* domain		*/
126	arm7tdmi_setttb,		/* setttb		*/
127	cpufunc_faultstatus,		/* faultstatus		*/
128	cpufunc_faultaddress,		/* faultaddress		*/
129
130	/* TLB functions */
131
132	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
133	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
134	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
135	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
136	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
137	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
138
139	/* Cache operations */
140
141	cpufunc_nullop,			/* icache_sync_all	*/
142	(void *)cpufunc_nullop,		/* icache_sync_range	*/
143
144	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
145	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
146	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
147	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
148
149	cpufunc_nullop,			/* idcache_inv_all	*/
150	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
151	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
152	cpufunc_nullop,			/* l2cache_wbinv_all	*/
153	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
154	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
155	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
156
157	/* Other functions */
158
159	cpufunc_nullop,			/* flush_prefetchbuf	*/
160	cpufunc_nullop,			/* drain_writebuf	*/
161	cpufunc_nullop,			/* flush_brnchtgt_C	*/
162	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
163
164	(void *)cpufunc_nullop,		/* sleep		*/
165
166	/* Soft functions */
167
168	late_abort_fixup,		/* dataabt_fixup	*/
169	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
170
171	arm7tdmi_context_switch,	/* context_switch	*/
172
173	arm7tdmi_setup			/* cpu setup		*/
174
175};
176#endif	/* CPU_ARM7TDMI */
177
178#ifdef CPU_ARM8
179struct cpu_functions arm8_cpufuncs = {
180	/* CPU functions */
181
182	cpufunc_id,			/* id			*/
183	cpufunc_nullop,			/* cpwait		*/
184
185	/* MMU functions */
186
187	cpufunc_control,		/* control		*/
188	cpufunc_domains,		/* domain		*/
189	arm8_setttb,			/* setttb		*/
190	cpufunc_faultstatus,		/* faultstatus		*/
191	cpufunc_faultaddress,		/* faultaddress		*/
192
193	/* TLB functions */
194
195	arm8_tlb_flushID,		/* tlb_flushID		*/
196	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
197	arm8_tlb_flushID,		/* tlb_flushI		*/
198	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
199	arm8_tlb_flushID,		/* tlb_flushD		*/
200	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
201
202	/* Cache operations */
203
204	cpufunc_nullop,			/* icache_sync_all	*/
205	(void *)cpufunc_nullop,		/* icache_sync_range	*/
206
207	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
208	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
209/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
210	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
211
212	cpufunc_nullop,			/* idcache_inv_all	*/
213	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
214	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
215	cpufunc_nullop,			/* l2cache_wbinv_all	*/
216	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
217	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
218	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
219
220	/* Other functions */
221
222	cpufunc_nullop,			/* flush_prefetchbuf	*/
223	cpufunc_nullop,			/* drain_writebuf	*/
224	cpufunc_nullop,			/* flush_brnchtgt_C	*/
225	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
226
227	(void *)cpufunc_nullop,		/* sleep		*/
228
229	/* Soft functions */
230
231	cpufunc_null_fixup,		/* dataabt_fixup	*/
232	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
233
234	arm8_context_switch,		/* context_switch	*/
235
236	arm8_setup			/* cpu setup		*/
237};
238#endif	/* CPU_ARM8 */
239
240#ifdef CPU_ARM9
241struct cpu_functions arm9_cpufuncs = {
242	/* CPU functions */
243
244	cpufunc_id,			/* id			*/
245	cpufunc_nullop,			/* cpwait		*/
246
247	/* MMU functions */
248
249	cpufunc_control,		/* control		*/
250	cpufunc_domains,		/* Domain		*/
251	arm9_setttb,			/* Setttb		*/
252	cpufunc_faultstatus,		/* Faultstatus		*/
253	cpufunc_faultaddress,		/* Faultaddress		*/
254
255	/* TLB functions */
256
257	armv4_tlb_flushID,		/* tlb_flushID		*/
258	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
259	armv4_tlb_flushI,		/* tlb_flushI		*/
260	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
261	armv4_tlb_flushD,		/* tlb_flushD		*/
262	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
263
264	/* Cache operations */
265
266	arm9_icache_sync_all,		/* icache_sync_all	*/
267	arm9_icache_sync_range,		/* icache_sync_range	*/
268
269	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
270	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
271	arm9_dcache_inv_range,		/* dcache_inv_range	*/
272	arm9_dcache_wb_range,		/* dcache_wb_range	*/
273
274	armv4_idcache_inv_all,		/* idcache_inv_all	*/
275	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
276	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
277	cpufunc_nullop,			/* l2cache_wbinv_all	*/
278	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
279	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
280	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
281
282	/* Other functions */
283
284	cpufunc_nullop,			/* flush_prefetchbuf	*/
285	armv4_drain_writebuf,		/* drain_writebuf	*/
286	cpufunc_nullop,			/* flush_brnchtgt_C	*/
287	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
288
289	(void *)cpufunc_nullop,		/* sleep		*/
290
291	/* Soft functions */
292
293	cpufunc_null_fixup,		/* dataabt_fixup	*/
294	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
295
296	arm9_context_switch,		/* context_switch	*/
297
298	arm9_setup			/* cpu setup		*/
299
300};
301#endif /* CPU_ARM9 */
302
303#if defined(CPU_ARM9E) || defined(CPU_ARM10)
304struct cpu_functions armv5_ec_cpufuncs = {
305	/* CPU functions */
306
307	cpufunc_id,			/* id			*/
308	cpufunc_nullop,			/* cpwait		*/
309
310	/* MMU functions */
311
312	cpufunc_control,		/* control		*/
313	cpufunc_domains,		/* Domain		*/
314	armv5_ec_setttb,		/* Setttb		*/
315	cpufunc_faultstatus,		/* Faultstatus		*/
316	cpufunc_faultaddress,		/* Faultaddress		*/
317
318	/* TLB functions */
319
320	armv4_tlb_flushID,		/* tlb_flushID		*/
321	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
322	armv4_tlb_flushI,		/* tlb_flushI		*/
323	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
324	armv4_tlb_flushD,		/* tlb_flushD		*/
325	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
326
327	/* Cache operations */
328
329	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
330	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
331
332	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
333	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
334	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
335	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
336
337	armv4_idcache_inv_all,		/* idcache_inv_all	*/
338	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
339	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
340
341	cpufunc_nullop,                 /* l2cache_wbinv_all    */
342	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
343      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
344	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
345
346	/* Other functions */
347
348	cpufunc_nullop,			/* flush_prefetchbuf	*/
349	armv4_drain_writebuf,		/* drain_writebuf	*/
350	cpufunc_nullop,			/* flush_brnchtgt_C	*/
351	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
352
353	(void *)cpufunc_nullop,		/* sleep		*/
354
355	/* Soft functions */
356
357	cpufunc_null_fixup,		/* dataabt_fixup	*/
358	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
359
360	arm10_context_switch,		/* context_switch	*/
361
362	arm10_setup			/* cpu setup		*/
363
364};
365
366struct cpu_functions sheeva_cpufuncs = {
367	/* CPU functions */
368
369	cpufunc_id,			/* id			*/
370	cpufunc_nullop,			/* cpwait		*/
371
372	/* MMU functions */
373
374	cpufunc_control,		/* control		*/
375	cpufunc_domains,		/* Domain		*/
376	sheeva_setttb,			/* Setttb		*/
377	cpufunc_faultstatus,		/* Faultstatus		*/
378	cpufunc_faultaddress,		/* Faultaddress		*/
379
380	/* TLB functions */
381
382	armv4_tlb_flushID,		/* tlb_flushID		*/
383	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
384	armv4_tlb_flushI,		/* tlb_flushI		*/
385	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
386	armv4_tlb_flushD,		/* tlb_flushD		*/
387	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
388
389	/* Cache operations */
390
391	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
392	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
393
394	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
395	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
396	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
397	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
398
399	armv4_idcache_inv_all,		/* idcache_inv_all	*/
400	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
401	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
402
403	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
404	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
405	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
406	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
407
408	/* Other functions */
409
410	cpufunc_nullop,			/* flush_prefetchbuf	*/
411	armv4_drain_writebuf,		/* drain_writebuf	*/
412	cpufunc_nullop,			/* flush_brnchtgt_C	*/
413	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
414
415	sheeva_cpu_sleep,		/* sleep		*/
416
417	/* Soft functions */
418
419	cpufunc_null_fixup,		/* dataabt_fixup	*/
420	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
421
422	arm10_context_switch,		/* context_switch	*/
423
424	arm10_setup			/* cpu setup		*/
425};
426#endif /* CPU_ARM9E || CPU_ARM10 */
427
428#ifdef CPU_ARM10
429struct cpu_functions arm10_cpufuncs = {
430	/* CPU functions */
431
432	cpufunc_id,			/* id			*/
433	cpufunc_nullop,			/* cpwait		*/
434
435	/* MMU functions */
436
437	cpufunc_control,		/* control		*/
438	cpufunc_domains,		/* Domain		*/
439	arm10_setttb,			/* Setttb		*/
440	cpufunc_faultstatus,		/* Faultstatus		*/
441	cpufunc_faultaddress,		/* Faultaddress		*/
442
443	/* TLB functions */
444
445	armv4_tlb_flushID,		/* tlb_flushID		*/
446	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
447	armv4_tlb_flushI,		/* tlb_flushI		*/
448	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
449	armv4_tlb_flushD,		/* tlb_flushD		*/
450	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
451
452	/* Cache operations */
453
454	arm10_icache_sync_all,		/* icache_sync_all	*/
455	arm10_icache_sync_range,	/* icache_sync_range	*/
456
457	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
458	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
459	arm10_dcache_inv_range,		/* dcache_inv_range	*/
460	arm10_dcache_wb_range,		/* dcache_wb_range	*/
461
462	armv4_idcache_inv_all,		/* idcache_inv_all	*/
463	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
464	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
465	cpufunc_nullop,			/* l2cache_wbinv_all	*/
466	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
467	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
468	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
469
470	/* Other functions */
471
472	cpufunc_nullop,			/* flush_prefetchbuf	*/
473	armv4_drain_writebuf,		/* drain_writebuf	*/
474	cpufunc_nullop,			/* flush_brnchtgt_C	*/
475	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
476
477	(void *)cpufunc_nullop,		/* sleep		*/
478
479	/* Soft functions */
480
481	cpufunc_null_fixup,		/* dataabt_fixup	*/
482	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
483
484	arm10_context_switch,		/* context_switch	*/
485
486	arm10_setup			/* cpu setup		*/
487
488};
489#endif /* CPU_ARM10 */
490
491#ifdef CPU_MV_PJ4B
492struct cpu_functions pj4bv7_cpufuncs = {
493	/* CPU functions */
494
495	cpufunc_id,			/* id			*/
496	arm11_drain_writebuf,		/* cpwait		*/
497
498	/* MMU functions */
499
500	cpufunc_control,		/* control		*/
501	cpufunc_domains,		/* Domain		*/
502	pj4b_setttb,			/* Setttb		*/
503	cpufunc_faultstatus,		/* Faultstatus		*/
504	cpufunc_faultaddress,		/* Faultaddress		*/
505
506	/* TLB functions */
507
508	armv7_tlb_flushID,		/* tlb_flushID		*/
509	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
510	armv7_tlb_flushID,		/* tlb_flushI		*/
511	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
512	armv7_tlb_flushID,		/* tlb_flushD		*/
513	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
514
515	/* Cache operations */
516	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
517	armv7_icache_sync_range,	/* icache_sync_range	*/
518
519	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
520	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
521	armv7_dcache_inv_range,		/* dcache_inv_range	*/
522	armv7_dcache_wb_range,		/* dcache_wb_range	*/
523
524	armv7_idcache_inv_all,		/* idcache_inv_all	*/
525	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
526	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
527
528	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
529	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
530	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
531	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
532
533	/* Other functions */
534
535	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
536	arm11_drain_writebuf,		/* drain_writebuf	*/
537	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
538	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
539
540	(void *)cpufunc_nullop,		/* sleep		*/
541
542	/* Soft functions */
543
544	cpufunc_null_fixup,		/* dataabt_fixup	*/
545	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
546
547	arm11_context_switch,		/* context_switch	*/
548
549	pj4bv7_setup			/* cpu setup		*/
550};
551#endif /* CPU_MV_PJ4B */
552
553#ifdef CPU_SA110
554struct cpu_functions sa110_cpufuncs = {
555	/* CPU functions */
556
557	cpufunc_id,			/* id			*/
558	cpufunc_nullop,			/* cpwait		*/
559
560	/* MMU functions */
561
562	cpufunc_control,		/* control		*/
563	cpufunc_domains,		/* domain		*/
564	sa1_setttb,			/* setttb		*/
565	cpufunc_faultstatus,		/* faultstatus		*/
566	cpufunc_faultaddress,		/* faultaddress		*/
567
568	/* TLB functions */
569
570	armv4_tlb_flushID,		/* tlb_flushID		*/
571	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
572	armv4_tlb_flushI,		/* tlb_flushI		*/
573	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
574	armv4_tlb_flushD,		/* tlb_flushD		*/
575	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
576
577	/* Cache operations */
578
579	sa1_cache_syncI,		/* icache_sync_all	*/
580	sa1_cache_syncI_rng,		/* icache_sync_range	*/
581
582	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
583	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
584/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
585	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
586
587	sa1_cache_flushID,		/* idcache_inv_all	*/
588	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
589	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
590	cpufunc_nullop,			/* l2cache_wbinv_all	*/
591	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
592	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
593	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
594
595	/* Other functions */
596
597	cpufunc_nullop,			/* flush_prefetchbuf	*/
598	armv4_drain_writebuf,		/* drain_writebuf	*/
599	cpufunc_nullop,			/* flush_brnchtgt_C	*/
600	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
601
602	(void *)cpufunc_nullop,		/* sleep		*/
603
604	/* Soft functions */
605
606	cpufunc_null_fixup,		/* dataabt_fixup	*/
607	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
608
609	sa110_context_switch,		/* context_switch	*/
610
611	sa110_setup			/* cpu setup		*/
612};
613#endif	/* CPU_SA110 */
614
615#if defined(CPU_SA1100) || defined(CPU_SA1110)
616struct cpu_functions sa11x0_cpufuncs = {
617	/* CPU functions */
618
619	cpufunc_id,			/* id			*/
620	cpufunc_nullop,			/* cpwait		*/
621
622	/* MMU functions */
623
624	cpufunc_control,		/* control		*/
625	cpufunc_domains,		/* domain		*/
626	sa1_setttb,			/* setttb		*/
627	cpufunc_faultstatus,		/* faultstatus		*/
628	cpufunc_faultaddress,		/* faultaddress		*/
629
630	/* TLB functions */
631
632	armv4_tlb_flushID,		/* tlb_flushID		*/
633	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
634	armv4_tlb_flushI,		/* tlb_flushI		*/
635	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
636	armv4_tlb_flushD,		/* tlb_flushD		*/
637	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
638
639	/* Cache operations */
640
641	sa1_cache_syncI,		/* icache_sync_all	*/
642	sa1_cache_syncI_rng,		/* icache_sync_range	*/
643
644	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
645	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
646/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
647	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
648
649	sa1_cache_flushID,		/* idcache_inv_all	*/
650	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
651	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
652	cpufunc_nullop,			/* l2cache_wbinv_all	*/
653	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
654	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
655	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
656
657	/* Other functions */
658
659	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
660	armv4_drain_writebuf,		/* drain_writebuf	*/
661	cpufunc_nullop,			/* flush_brnchtgt_C	*/
662	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
663
664	sa11x0_cpu_sleep,		/* sleep		*/
665
666	/* Soft functions */
667
668	cpufunc_null_fixup,		/* dataabt_fixup	*/
669	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
670
671	sa11x0_context_switch,		/* context_switch	*/
672
673	sa11x0_setup			/* cpu setup		*/
674};
675#endif	/* CPU_SA1100 || CPU_SA1110 */
676
677#ifdef CPU_IXP12X0
678struct cpu_functions ixp12x0_cpufuncs = {
679	/* CPU functions */
680
681	cpufunc_id,			/* id			*/
682	cpufunc_nullop,			/* cpwait		*/
683
684	/* MMU functions */
685
686	cpufunc_control,		/* control		*/
687	cpufunc_domains,		/* domain		*/
688	sa1_setttb,			/* setttb		*/
689	cpufunc_faultstatus,		/* faultstatus		*/
690	cpufunc_faultaddress,		/* faultaddress		*/
691
692	/* TLB functions */
693
694	armv4_tlb_flushID,		/* tlb_flushID		*/
695	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
696	armv4_tlb_flushI,		/* tlb_flushI		*/
697	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
698	armv4_tlb_flushD,		/* tlb_flushD		*/
699	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
700
701	/* Cache operations */
702
703	sa1_cache_syncI,		/* icache_sync_all	*/
704	sa1_cache_syncI_rng,		/* icache_sync_range	*/
705
706	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
707	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
708/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
709	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
710
711	sa1_cache_flushID,		/* idcache_inv_all	*/
712	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
713	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
714	cpufunc_nullop,			/* l2cache_wbinv_all	*/
715	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
716	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
717	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
718
719	/* Other functions */
720
721	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
722	armv4_drain_writebuf,		/* drain_writebuf	*/
723	cpufunc_nullop,			/* flush_brnchtgt_C	*/
724	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
725
726	(void *)cpufunc_nullop,		/* sleep		*/
727
728	/* Soft functions */
729
730	cpufunc_null_fixup,		/* dataabt_fixup	*/
731	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
732
733	ixp12x0_context_switch,		/* context_switch	*/
734
735	ixp12x0_setup			/* cpu setup		*/
736};
737#endif	/* CPU_IXP12X0 */
738
739#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
740  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
741  defined(CPU_XSCALE_80219)
742
743struct cpu_functions xscale_cpufuncs = {
744	/* CPU functions */
745
746	cpufunc_id,			/* id			*/
747	xscale_cpwait,			/* cpwait		*/
748
749	/* MMU functions */
750
751	xscale_control,			/* control		*/
752	cpufunc_domains,		/* domain		*/
753	xscale_setttb,			/* setttb		*/
754	cpufunc_faultstatus,		/* faultstatus		*/
755	cpufunc_faultaddress,		/* faultaddress		*/
756
757	/* TLB functions */
758
759	armv4_tlb_flushID,		/* tlb_flushID		*/
760	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
761	armv4_tlb_flushI,		/* tlb_flushI		*/
762	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
763	armv4_tlb_flushD,		/* tlb_flushD		*/
764	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
765
766	/* Cache operations */
767
768	xscale_cache_syncI,		/* icache_sync_all	*/
769	xscale_cache_syncI_rng,		/* icache_sync_range	*/
770
771	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
772	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
773	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
774	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
775
776	xscale_cache_flushID,		/* idcache_inv_all	*/
777	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
778	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
779	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
780	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
781	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
782	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
783
784	/* Other functions */
785
786	cpufunc_nullop,			/* flush_prefetchbuf	*/
787	armv4_drain_writebuf,		/* drain_writebuf	*/
788	cpufunc_nullop,			/* flush_brnchtgt_C	*/
789	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
790
791	xscale_cpu_sleep,		/* sleep		*/
792
793	/* Soft functions */
794
795	cpufunc_null_fixup,		/* dataabt_fixup	*/
796	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
797
798	xscale_context_switch,		/* context_switch	*/
799
800	xscale_setup			/* cpu setup		*/
801};
802#endif
803/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
804   CPU_XSCALE_80219 */
805
806#ifdef CPU_XSCALE_81342
807struct cpu_functions xscalec3_cpufuncs = {
808	/* CPU functions */
809
810	cpufunc_id,			/* id			*/
811	xscale_cpwait,			/* cpwait		*/
812
813	/* MMU functions */
814
815	xscale_control,			/* control		*/
816	cpufunc_domains,		/* domain		*/
817	xscalec3_setttb,		/* setttb		*/
818	cpufunc_faultstatus,		/* faultstatus		*/
819	cpufunc_faultaddress,		/* faultaddress		*/
820
821	/* TLB functions */
822
823	armv4_tlb_flushID,		/* tlb_flushID		*/
824	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
825	armv4_tlb_flushI,		/* tlb_flushI		*/
826	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
827	armv4_tlb_flushD,		/* tlb_flushD		*/
828	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
829
830	/* Cache operations */
831
832	xscalec3_cache_syncI,		/* icache_sync_all	*/
833	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
834
835	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
836	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
837	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
838	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
839
840	xscale_cache_flushID,		/* idcache_inv_all	*/
841	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
842	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
843	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
844	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
845	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
846	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
847
848	/* Other functions */
849
850	cpufunc_nullop,			/* flush_prefetchbuf	*/
851	armv4_drain_writebuf,		/* drain_writebuf	*/
852	cpufunc_nullop,			/* flush_brnchtgt_C	*/
853	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
854
855	xscale_cpu_sleep,		/* sleep		*/
856
857	/* Soft functions */
858
859	cpufunc_null_fixup,		/* dataabt_fixup	*/
860	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
861
862	xscalec3_context_switch,	/* context_switch	*/
863
864	xscale_setup			/* cpu setup		*/
865};
866#endif /* CPU_XSCALE_81342 */
867
868
869#if defined(CPU_FA526) || defined(CPU_FA626TE)
870struct cpu_functions fa526_cpufuncs = {
871	/* CPU functions */
872
873	cpufunc_id,			/* id			*/
874	cpufunc_nullop,			/* cpwait		*/
875
876	/* MMU functions */
877
878	cpufunc_control,		/* control		*/
879	cpufunc_domains,		/* domain		*/
880	fa526_setttb,			/* setttb		*/
881	cpufunc_faultstatus,		/* faultstatus		*/
882	cpufunc_faultaddress,		/* faultaddress		*/
883
884	/* TLB functions */
885
886	armv4_tlb_flushID,		/* tlb_flushID		*/
887	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
888	armv4_tlb_flushI,		/* tlb_flushI		*/
889	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
890	armv4_tlb_flushD,		/* tlb_flushD		*/
891	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
892
893	/* Cache operations */
894
895	fa526_icache_sync_all,		/* icache_sync_all	*/
896	fa526_icache_sync_range,	/* icache_sync_range	*/
897
898	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
899	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
900	fa526_dcache_inv_range,		/* dcache_inv_range	*/
901	fa526_dcache_wb_range,		/* dcache_wb_range	*/
902
903	armv4_idcache_inv_all,		/* idcache_inv_all	*/
904	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
905	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
906	cpufunc_nullop,			/* l2cache_wbinv_all	*/
907	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
908	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
909	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
910
911	/* Other functions */
912
913	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
914	armv4_drain_writebuf,		/* drain_writebuf	*/
915	cpufunc_nullop,			/* flush_brnchtgt_C	*/
916	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
917
918	fa526_cpu_sleep,		/* sleep		*/
919
920	/* Soft functions */
921
922	cpufunc_null_fixup,		/* dataabt_fixup	*/
923	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
924
925	fa526_context_switch,		/* context_switch	*/
926
927	fa526_setup			/* cpu setup 		*/
928};
929#endif	/* CPU_FA526 || CPU_FA626TE */
930
931#if defined(CPU_ARM1136)
932struct cpu_functions arm1136_cpufuncs = {
933	/* CPU functions */
934
935	cpufunc_id,                     /* id                   */
936	cpufunc_nullop,                 /* cpwait               */
937
938	/* MMU functions */
939
940	cpufunc_control,                /* control              */
941	cpufunc_domains,                /* Domain               */
942	arm11x6_setttb,                 /* Setttb               */
943	cpufunc_faultstatus,            /* Faultstatus          */
944	cpufunc_faultaddress,           /* Faultaddress         */
945
946	/* TLB functions */
947
948	arm11_tlb_flushID,              /* tlb_flushID          */
949	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
950	arm11_tlb_flushI,               /* tlb_flushI           */
951	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
952	arm11_tlb_flushD,               /* tlb_flushD           */
953	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
954
955	/* Cache operations */
956
957	arm11x6_icache_sync_all,        /* icache_sync_all      */
958	arm11x6_icache_sync_range,      /* icache_sync_range    */
959
960	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
961	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
962	armv6_dcache_inv_range,         /* dcache_inv_range     */
963	armv6_dcache_wb_range,          /* dcache_wb_range      */
964
965	armv6_idcache_inv_all,		/* idcache_inv_all	*/
966	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
967	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
968
969	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
970	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
971	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
972	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
973
974	/* Other functions */
975
976	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
977	arm11_drain_writebuf,           /* drain_writebuf       */
978	cpufunc_nullop,                 /* flush_brnchtgt_C     */
979	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
980
981	arm11_sleep,                  	/* sleep                */
982
983	/* Soft functions */
984
985	cpufunc_null_fixup,             /* dataabt_fixup        */
986	cpufunc_null_fixup,             /* prefetchabt_fixup    */
987
988	arm11_context_switch,           /* context_switch       */
989
990	arm11x6_setup                   /* cpu setup            */
991};
992#endif /* CPU_ARM1136 */
993#if defined(CPU_ARM1176)
994struct cpu_functions arm1176_cpufuncs = {
995	/* CPU functions */
996
997	cpufunc_id,                     /* id                   */
998	cpufunc_nullop,                 /* cpwait               */
999
1000	/* MMU functions */
1001
1002	cpufunc_control,                /* control              */
1003	cpufunc_domains,                /* Domain               */
1004	arm11x6_setttb,                 /* Setttb               */
1005	cpufunc_faultstatus,            /* Faultstatus          */
1006	cpufunc_faultaddress,           /* Faultaddress         */
1007
1008	/* TLB functions */
1009
1010	arm11_tlb_flushID,              /* tlb_flushID          */
1011	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
1012	arm11_tlb_flushI,               /* tlb_flushI           */
1013	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1014	arm11_tlb_flushD,               /* tlb_flushD           */
1015	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1016
1017	/* Cache operations */
1018
1019	arm11x6_icache_sync_all,        /* icache_sync_all      */
1020	arm11x6_icache_sync_range,      /* icache_sync_range    */
1021
1022	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
1023	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
1024	armv6_dcache_inv_range,         /* dcache_inv_range     */
1025	armv6_dcache_wb_range,          /* dcache_wb_range      */
1026
1027	armv6_idcache_inv_all,		/* idcache_inv_all	*/
1028	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
1029	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
1030
1031	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
1032	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1033	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1034	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1035
1036	/* Other functions */
1037
1038	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
1039	arm11_drain_writebuf,           /* drain_writebuf       */
1040	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1041	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1042
1043	arm11x6_sleep,                  /* sleep                */
1044
1045	/* Soft functions */
1046
1047	cpufunc_null_fixup,             /* dataabt_fixup        */
1048	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1049
1050	arm11_context_switch,           /* context_switch       */
1051
1052	arm11x6_setup                   /* cpu setup            */
1053};
1054#endif /*CPU_ARM1176 */
1055
1056#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1057struct cpu_functions cortexa_cpufuncs = {
1058	/* CPU functions */
1059
1060	cpufunc_id,                     /* id                   */
1061	cpufunc_nullop,                 /* cpwait               */
1062
1063	/* MMU functions */
1064
1065	cpufunc_control,                /* control              */
1066	cpufunc_domains,                /* Domain               */
1067	armv7_setttb,                   /* Setttb               */
1068	cpufunc_faultstatus,            /* Faultstatus          */
1069	cpufunc_faultaddress,           /* Faultaddress         */
1070
1071	/* TLB functions */
1072
1073	armv7_tlb_flushID,              /* tlb_flushID          */
1074	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
1075	arm11_tlb_flushI,               /* tlb_flushI           */
1076	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1077	arm11_tlb_flushD,               /* tlb_flushD           */
1078	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1079
1080	/* Cache operations */
1081
1082	armv7_idcache_wbinv_all,         /* icache_sync_all      */
1083	armv7_icache_sync_range,        /* icache_sync_range    */
1084
1085	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
1086	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
1087	armv7_dcache_inv_range,         /* dcache_inv_range     */
1088	armv7_dcache_wb_range,          /* dcache_wb_range      */
1089
1090	armv7_idcache_inv_all,		/* idcache_inv_all	*/
1091	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
1092	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
1093
1094	/*
1095	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
1096	 * L2 cache controller is actually enabled.
1097	 */
1098	cpufunc_nullop,                 /* l2cache_wbinv_all    */
1099	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1100	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1101	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1102
1103	/* Other functions */
1104
1105	cpufunc_nullop,                 /* flush_prefetchbuf    */
1106	armv7_drain_writebuf,           /* drain_writebuf       */
1107	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1108	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1109
1110	arm11_sleep,                    /* sleep                */
1111
1112	/* Soft functions */
1113
1114	cpufunc_null_fixup,             /* dataabt_fixup        */
1115	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1116
1117	armv7_context_switch,           /* context_switch       */
1118
1119	cortexa_setup                     /* cpu setup            */
1120};
1121#endif /* CPU_CORTEXA */
1122
1123/*
1124 * Global constants also used by locore.s
1125 */
1126
1127struct cpu_functions cpufuncs;
1128u_int cputype;
1129u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1130
1131#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||	\
1132  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
1133  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1134  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1135  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
1136  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1137  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1138
1139static void get_cachetype_cp15(void);
1140
1141/* Additional cache information local to this file.  Log2 of some of the
1142   above numbers.  */
1143static int	arm_dcache_l2_nsets;
1144static int	arm_dcache_l2_assoc;
1145static int	arm_dcache_l2_linesize;
1146
1147static void
1148get_cachetype_cp15()
1149{
1150	u_int ctype, isize, dsize, cpuid;
1151	u_int clevel, csize, i, sel;
1152	u_int multiplier;
1153	u_char type;
1154
1155	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
1156		: "=r" (ctype));
1157
1158	cpuid = cpufunc_id();
1159	/*
1160	 * ...and thus spake the ARM ARM:
1161	 *
1162	 * If an <opcode2> value corresponding to an unimplemented or
1163	 * reserved ID register is encountered, the System Control
1164	 * processor returns the value of the main ID register.
1165	 */
1166	if (ctype == cpuid)
1167		goto out;
1168
1169	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
1170		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
1171		    : "=r" (clevel));
1172		arm_cache_level = clevel;
1173		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
1174		i = 0;
1175		while ((type = (clevel & 0x7)) && i < 7) {
1176			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
1177			    type == CACHE_SEP_CACHE) {
1178				sel = i << 1;
1179				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1180				    : : "r" (sel));
1181				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1182				    : "=r" (csize));
1183				arm_cache_type[sel] = csize;
1184				arm_dcache_align = 1 <<
1185				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
1186				arm_dcache_align_mask = arm_dcache_align - 1;
1187			}
1188			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
1189				sel = (i << 1) | 1;
1190				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1191				    : : "r" (sel));
1192				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1193				    : "=r" (csize));
1194				arm_cache_type[sel] = csize;
1195			}
1196			i++;
1197			clevel >>= 3;
1198		}
1199	} else {
1200		if ((ctype & CPU_CT_S) == 0)
1201			arm_pcache_unified = 1;
1202
1203		/*
1204		 * If you want to know how this code works, go read the ARM ARM.
1205		 */
1206
1207		arm_pcache_type = CPU_CT_CTYPE(ctype);
1208
1209		if (arm_pcache_unified == 0) {
1210			isize = CPU_CT_ISIZE(ctype);
1211			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1212			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1213			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1214				if (isize & CPU_CT_xSIZE_M)
1215					arm_picache_line_size = 0; /* not present */
1216				else
1217					arm_picache_ways = 1;
1218			} else {
1219				arm_picache_ways = multiplier <<
1220				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1221			}
1222			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1223		}
1224
1225		dsize = CPU_CT_DSIZE(ctype);
1226		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1227		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1228		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1229			if (dsize & CPU_CT_xSIZE_M)
1230				arm_pdcache_line_size = 0; /* not present */
1231			else
1232				arm_pdcache_ways = 1;
1233		} else {
1234			arm_pdcache_ways = multiplier <<
1235			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1236		}
1237		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1238
1239		arm_dcache_align = arm_pdcache_line_size;
1240
1241		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1242		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1243		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1244		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1245
1246	out:
1247		arm_dcache_align_mask = arm_dcache_align - 1;
1248	}
1249}
1250#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1251
1252#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1253    defined(CPU_IXP12X0)
1254/* Cache information for CPUs without cache type registers. */
1255struct cachetab {
1256	u_int32_t ct_cpuid;
1257	int	ct_pcache_type;
1258	int	ct_pcache_unified;
1259	int	ct_pdcache_size;
1260	int	ct_pdcache_line_size;
1261	int	ct_pdcache_ways;
1262	int	ct_picache_size;
1263	int	ct_picache_line_size;
1264	int	ct_picache_ways;
1265};
1266
1267struct cachetab cachetab[] = {
1268    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1269    /* XXX is this type right for SA-1? */
1270    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1271    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1272    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1273    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1274    { 0, 0, 0, 0, 0, 0, 0, 0}
1275};
1276
1277static void get_cachetype_table(void);
1278
1279static void
1280get_cachetype_table()
1281{
1282	int i;
1283	u_int32_t cpuid = cpufunc_id();
1284
1285	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1286		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1287			arm_pcache_type = cachetab[i].ct_pcache_type;
1288			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1289			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1290			arm_pdcache_line_size =
1291			    cachetab[i].ct_pdcache_line_size;
1292			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1293			arm_picache_size = cachetab[i].ct_picache_size;
1294			arm_picache_line_size =
1295			    cachetab[i].ct_picache_line_size;
1296			arm_picache_ways = cachetab[i].ct_picache_ways;
1297		}
1298	}
1299	arm_dcache_align = arm_pdcache_line_size;
1300
1301	arm_dcache_align_mask = arm_dcache_align - 1;
1302}
1303
1304#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
1305
1306/*
1307 * Cannot panic here as we may not have a console yet ...
1308 */
1309
1310int
1311set_cpufuncs()
1312{
1313	cputype = cpufunc_id();
1314	cputype &= CPU_ID_CPU_MASK;
1315
1316	/*
1317	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1318	 * CPU type where we want to use it by default, then we set it.
1319	 */
1320
1321#ifdef CPU_ARM7TDMI
1322	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1323	    CPU_ID_IS7(cputype) &&
1324	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1325		cpufuncs = arm7tdmi_cpufuncs;
1326		cpu_reset_needs_v4_MMU_disable = 0;
1327		get_cachetype_cp15();
1328		pmap_pte_init_generic();
1329		goto out;
1330	}
1331#endif
1332#ifdef CPU_ARM8
1333	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1334	    (cputype & 0x0000f000) == 0x00008000) {
1335		cpufuncs = arm8_cpufuncs;
1336		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1337		get_cachetype_cp15();
1338		pmap_pte_init_arm8();
1339		goto out;
1340	}
1341#endif	/* CPU_ARM8 */
1342#ifdef CPU_ARM9
1343	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1344	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1345	    (cputype & 0x0000f000) == 0x00009000) {
1346		cpufuncs = arm9_cpufuncs;
1347		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1348		get_cachetype_cp15();
1349		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1350		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1351		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
1352		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1353		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1354#ifdef ARM9_CACHE_WRITE_THROUGH
1355		pmap_pte_init_arm9();
1356#else
1357		pmap_pte_init_generic();
1358#endif
1359		goto out;
1360	}
1361#endif /* CPU_ARM9 */
1362#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1363	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1364	    cputype == CPU_ID_MV88FR571_41) {
1365		uint32_t sheeva_ctrl;
1366
1367		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1368		    MV_L2_ENABLE);
1369		/*
1370		 * Workaround for Marvell MV78100 CPU: Cache prefetch
1371		 * mechanism may affect the cache coherency validity,
1372		 * so it needs to be disabled.
1373		 *
1374		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1375		 * L2 Prefetching Mechanism) for details.
1376		 */
1377		if (cputype == CPU_ID_MV88FR571_VD ||
1378		    cputype == CPU_ID_MV88FR571_41)
1379			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1380
1381		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1382
1383		cpufuncs = sheeva_cpufuncs;
1384		get_cachetype_cp15();
1385		pmap_pte_init_generic();
1386		goto out;
1387	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1388		cpufuncs = armv5_ec_cpufuncs;
1389		get_cachetype_cp15();
1390		pmap_pte_init_generic();
1391		goto out;
1392	}
1393#endif /* CPU_ARM9E || CPU_ARM10 */
1394#ifdef CPU_ARM10
1395	if (/* cputype == CPU_ID_ARM1020T || */
1396	    cputype == CPU_ID_ARM1020E) {
1397		/*
1398		 * Select write-through cacheing (this isn't really an
1399		 * option on ARM1020T).
1400		 */
1401		cpufuncs = arm10_cpufuncs;
1402		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1403		get_cachetype_cp15();
1404		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1405		arm10_dcache_sets_max =
1406		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1407		    arm10_dcache_sets_inc;
1408		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1409		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1410		pmap_pte_init_generic();
1411		goto out;
1412	}
1413#endif /* CPU_ARM10 */
1414#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1415	if (cputype == CPU_ID_ARM1136JS
1416	    || cputype == CPU_ID_ARM1136JSR1
1417	    || cputype == CPU_ID_ARM1176JZS) {
1418#ifdef CPU_ARM1136
1419		if (cputype == CPU_ID_ARM1136JS
1420		    || cputype == CPU_ID_ARM1136JSR1)
1421			cpufuncs = arm1136_cpufuncs;
1422#endif
1423#ifdef CPU_ARM1176
1424		if (cputype == CPU_ID_ARM1176JZS)
1425			cpufuncs = arm1176_cpufuncs;
1426#endif
1427		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1428		get_cachetype_cp15();
1429
1430		pmap_pte_init_mmu_v6();
1431
1432		goto out;
1433	}
1434#endif /* CPU_ARM1136 || CPU_ARM1176 */
1435#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1436	if (cputype == CPU_ID_CORTEXA7 ||
1437	    cputype == CPU_ID_CORTEXA8R1 ||
1438	    cputype == CPU_ID_CORTEXA8R2 ||
1439	    cputype == CPU_ID_CORTEXA8R3 ||
1440	    cputype == CPU_ID_CORTEXA9R1 ||
1441	    cputype == CPU_ID_CORTEXA9R2 ||
1442	    cputype == CPU_ID_CORTEXA9R3 ||
1443	    cputype == CPU_ID_CORTEXA15 ||
1444	    cputype == CPU_ID_KRAIT ) {
1445		cpufuncs = cortexa_cpufuncs;
1446		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1447		get_cachetype_cp15();
1448
1449		pmap_pte_init_mmu_v6();
1450		/* Use powersave on this CPU. */
1451		cpu_do_powersave = 1;
1452		goto out;
1453	}
1454#endif /* CPU_CORTEXA */
1455
1456#if defined(CPU_MV_PJ4B)
1457	if (cputype == CPU_ID_MV88SV581X_V7 ||
1458	    cputype == CPU_ID_MV88SV584X_V7 ||
1459	    cputype == CPU_ID_ARM_88SV581X_V7) {
1460		cpufuncs = pj4bv7_cpufuncs;
1461		get_cachetype_cp15();
1462		pmap_pte_init_mmu_v6();
1463		goto out;
1464	}
1465#endif /* CPU_MV_PJ4B */
1466#ifdef CPU_SA110
1467	if (cputype == CPU_ID_SA110) {
1468		cpufuncs = sa110_cpufuncs;
1469		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1470		get_cachetype_table();
1471		pmap_pte_init_sa1();
1472		goto out;
1473	}
1474#endif	/* CPU_SA110 */
1475#ifdef CPU_SA1100
1476	if (cputype == CPU_ID_SA1100) {
1477		cpufuncs = sa11x0_cpufuncs;
1478		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1479		get_cachetype_table();
1480		pmap_pte_init_sa1();
1481		/* Use powersave on this CPU. */
1482		cpu_do_powersave = 1;
1483
1484		goto out;
1485	}
1486#endif	/* CPU_SA1100 */
1487#ifdef CPU_SA1110
1488	if (cputype == CPU_ID_SA1110) {
1489		cpufuncs = sa11x0_cpufuncs;
1490		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1491		get_cachetype_table();
1492		pmap_pte_init_sa1();
1493		/* Use powersave on this CPU. */
1494		cpu_do_powersave = 1;
1495
1496		goto out;
1497	}
1498#endif	/* CPU_SA1110 */
1499#if defined(CPU_FA526) || defined(CPU_FA626TE)
1500	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1501		cpufuncs = fa526_cpufuncs;
1502		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1503		get_cachetype_cp15();
1504		pmap_pte_init_generic();
1505
1506		/* Use powersave on this CPU. */
1507		cpu_do_powersave = 1;
1508
1509		goto out;
1510	}
1511#endif	/* CPU_FA526 || CPU_FA626TE */
1512#ifdef CPU_IXP12X0
1513        if (cputype == CPU_ID_IXP1200) {
1514                cpufuncs = ixp12x0_cpufuncs;
1515                cpu_reset_needs_v4_MMU_disable = 1;
1516                get_cachetype_table();
1517                pmap_pte_init_sa1();
1518		goto out;
1519        }
1520#endif  /* CPU_IXP12X0 */
1521#ifdef CPU_XSCALE_80200
1522	if (cputype == CPU_ID_80200) {
1523		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1524
1525		i80200_icu_init();
1526
1527#if defined(XSCALE_CCLKCFG)
1528		/*
1529		 * Crank CCLKCFG to maximum legal value.
1530		 */
1531		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1532			:
1533			: "r" (XSCALE_CCLKCFG));
1534#endif
1535
1536		/*
1537		 * XXX Disable ECC in the Bus Controller Unit; we
1538		 * don't really support it, yet.  Clear any pending
1539		 * error indications.
1540		 */
1541		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1542			:
1543			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1544
1545		cpufuncs = xscale_cpufuncs;
1546		/*
1547		 * i80200 errata: Step-A0 and A1 have a bug where
1548		 * D$ dirty bits are not cleared on "invalidate by
1549		 * address".
1550		 *
1551		 * Workaround: Clean cache line before invalidating.
1552		 */
1553		if (rev == 0 || rev == 1)
1554			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1555
1556		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1557		get_cachetype_cp15();
1558		pmap_pte_init_xscale();
1559		goto out;
1560	}
1561#endif /* CPU_XSCALE_80200 */
1562#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1563	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1564	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1565	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1566		cpufuncs = xscale_cpufuncs;
1567		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1568		get_cachetype_cp15();
1569		pmap_pte_init_xscale();
1570		goto out;
1571	}
1572#endif /* CPU_XSCALE_80321 */
1573
1574#if defined(CPU_XSCALE_81342)
1575	if (cputype == CPU_ID_81342) {
1576		cpufuncs = xscalec3_cpufuncs;
1577		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1578		get_cachetype_cp15();
1579		pmap_pte_init_xscale();
1580		goto out;
1581	}
1582#endif /* CPU_XSCALE_81342 */
1583#ifdef CPU_XSCALE_PXA2X0
1584	/* ignore core revision to test PXA2xx CPUs */
1585	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1586	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1587	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1588
1589		cpufuncs = xscale_cpufuncs;
1590		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1591		get_cachetype_cp15();
1592		pmap_pte_init_xscale();
1593
1594		/* Use powersave on this CPU. */
1595		cpu_do_powersave = 1;
1596
1597		goto out;
1598	}
1599#endif /* CPU_XSCALE_PXA2X0 */
1600#ifdef CPU_XSCALE_IXP425
1601	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1602            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1603
1604		cpufuncs = xscale_cpufuncs;
1605		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1606		get_cachetype_cp15();
1607		pmap_pte_init_xscale();
1608
1609		goto out;
1610	}
1611#endif /* CPU_XSCALE_IXP425 */
1612	/*
1613	 * Bzzzz. And the answer was ...
1614	 */
1615	panic("No support for this CPU type (%08x) in kernel", cputype);
1616	return(ARCHITECTURE_NOT_PRESENT);
1617out:
1618	uma_set_align(arm_dcache_align_mask);
1619	return (0);
1620}
1621
1622/*
1623 * Fixup routines for data and prefetch aborts.
1624 *
1625 * Several compile time symbols are used
1626 *
1627 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1628 * correction of registers after a fault.
1629 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1630 * when defined should use late aborts
1631 */
1632
1633
1634/*
1635 * Null abort fixup routine.
1636 * For use when no fixup is required.
1637 */
1638int
1639cpufunc_null_fixup(arg)
1640	void *arg;
1641{
1642	return(ABORT_FIXUP_OK);
1643}
1644
1645
1646#if defined(CPU_ARM7TDMI)
1647
1648#ifdef DEBUG_FAULT_CORRECTION
1649#define DFC_PRINTF(x)		printf x
1650#define DFC_DISASSEMBLE(x)	disassemble(x)
1651#else
1652#define DFC_PRINTF(x)		/* nothing */
1653#define DFC_DISASSEMBLE(x)	/* nothing */
1654#endif
1655
1656/*
1657 * "Early" data abort fixup.
1658 *
1659 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1660 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1661 *
1662 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1663 */
1664int
1665early_abort_fixup(arg)
1666	void *arg;
1667{
1668	struct trapframe *frame = arg;
1669	u_int fault_pc;
1670	u_int fault_instruction;
1671	int saved_lr = 0;
1672
1673	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1674
1675		/* Ok an abort in SVC mode */
1676
1677		/*
1678		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1679		 * as the fault happened in svc mode but we need it in the
1680		 * usr slot so we can treat the registers as an array of ints
1681		 * during fixing.
1682		 * NOTE: This PC is in the position but writeback is not
1683		 * allowed on r15.
1684		 * Doing it like this is more efficient than trapping this
1685		 * case in all possible locations in the following fixup code.
1686		 */
1687
1688		saved_lr = frame->tf_usr_lr;
1689		frame->tf_usr_lr = frame->tf_svc_lr;
1690
1691		/*
1692		 * Note the trapframe does not have the SVC r13 so a fault
1693		 * from an instruction with writeback to r13 in SVC mode is
1694		 * not allowed. This should not happen as the kstack is
1695		 * always valid.
1696		 */
1697	}
1698
1699	/* Get fault address and status from the CPU */
1700
1701	fault_pc = frame->tf_pc;
1702	fault_instruction = *((volatile unsigned int *)fault_pc);
1703
1704	/* Decode the fault instruction and fix the registers as needed */
1705
1706	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1707		int base;
1708		int loop;
1709		int count;
1710		int *registers = &frame->tf_r0;
1711
1712		DFC_PRINTF(("LDM/STM\n"));
1713		DFC_DISASSEMBLE(fault_pc);
1714		if (fault_instruction & (1 << 21)) {
1715			DFC_PRINTF(("This instruction must be corrected\n"));
1716			base = (fault_instruction >> 16) & 0x0f;
1717			if (base == 15)
1718				return ABORT_FIXUP_FAILED;
1719			/* Count registers transferred */
1720			count = 0;
1721			for (loop = 0; loop < 16; ++loop) {
1722				if (fault_instruction & (1<<loop))
1723					++count;
1724			}
1725			DFC_PRINTF(("%d registers used\n", count));
1726			DFC_PRINTF(("Corrected r%d by %d bytes ",
1727				       base, count * 4));
1728			if (fault_instruction & (1 << 23)) {
1729				DFC_PRINTF(("down\n"));
1730				registers[base] -= count * 4;
1731			} else {
1732				DFC_PRINTF(("up\n"));
1733				registers[base] += count * 4;
1734			}
1735		}
1736	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1737		int base;
1738		int offset;
1739		int *registers = &frame->tf_r0;
1740
1741		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1742
1743		DFC_DISASSEMBLE(fault_pc);
1744
1745		/* Only need to fix registers if write back is turned on */
1746
1747		if ((fault_instruction & (1 << 21)) != 0) {
1748			base = (fault_instruction >> 16) & 0x0f;
1749			if (base == 13 &&
1750			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1751				return ABORT_FIXUP_FAILED;
1752			if (base == 15)
1753				return ABORT_FIXUP_FAILED;
1754
1755			offset = (fault_instruction & 0xff) << 2;
1756			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1757			if ((fault_instruction & (1 << 23)) != 0)
1758				offset = -offset;
1759			registers[base] += offset;
1760			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1761		}
1762	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1763		return ABORT_FIXUP_FAILED;
1764
1765	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1766
1767		/* Ok an abort in SVC mode */
1768
1769		/*
1770		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1771		 * as the fault happened in svc mode but we need it in the
1772		 * usr slot so we can treat the registers as an array of ints
1773		 * during fixing.
1774		 * NOTE: This PC is in the position but writeback is not
1775		 * allowed on r15.
1776		 * Doing it like this is more efficient than trapping this
1777		 * case in all possible locations in the prior fixup code.
1778		 */
1779
1780		frame->tf_svc_lr = frame->tf_usr_lr;
1781		frame->tf_usr_lr = saved_lr;
1782
1783		/*
1784		 * Note the trapframe does not have the SVC r13 so a fault
1785		 * from an instruction with writeback to r13 in SVC mode is
1786		 * not allowed. This should not happen as the kstack is
1787		 * always valid.
1788		 */
1789	}
1790
1791	return(ABORT_FIXUP_OK);
1792}
1793#endif	/* CPU_ARM2/250/3/6/7 */
1794
1795
1796#if defined(CPU_ARM7TDMI)
1797/*
1798 * "Late" (base updated) data abort fixup
1799 *
1800 * For ARM6 (in late-abort mode) and ARM7.
1801 *
1802 * In this model, all data-transfer instructions need fixing up.  We defer
1803 * LDM, STM, LDC and STC fixup to the early-abort handler.
1804 */
1805int
1806late_abort_fixup(arg)
1807	void *arg;
1808{
1809	struct trapframe *frame = arg;
1810	u_int fault_pc;
1811	u_int fault_instruction;
1812	int saved_lr = 0;
1813
1814	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1815
1816		/* Ok an abort in SVC mode */
1817
1818		/*
1819		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1820		 * as the fault happened in svc mode but we need it in the
1821		 * usr slot so we can treat the registers as an array of ints
1822		 * during fixing.
1823		 * NOTE: This PC is in the position but writeback is not
1824		 * allowed on r15.
1825		 * Doing it like this is more efficient than trapping this
1826		 * case in all possible locations in the following fixup code.
1827		 */
1828
1829		saved_lr = frame->tf_usr_lr;
1830		frame->tf_usr_lr = frame->tf_svc_lr;
1831
1832		/*
1833		 * Note the trapframe does not have the SVC r13 so a fault
1834		 * from an instruction with writeback to r13 in SVC mode is
1835		 * not allowed. This should not happen as the kstack is
1836		 * always valid.
1837		 */
1838	}
1839
1840	/* Get fault address and status from the CPU */
1841
1842	fault_pc = frame->tf_pc;
1843	fault_instruction = *((volatile unsigned int *)fault_pc);
1844
1845	/* Decode the fault instruction and fix the registers as needed */
1846
1847	/* Was is a swap instruction ? */
1848
1849	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1850		DFC_DISASSEMBLE(fault_pc);
1851	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1852
1853		/* Was is a ldr/str instruction */
1854		/* This is for late abort only */
1855
1856		int base;
1857		int offset;
1858		int *registers = &frame->tf_r0;
1859
1860		DFC_DISASSEMBLE(fault_pc);
1861
1862		/* This is for late abort only */
1863
1864		if ((fault_instruction & (1 << 24)) == 0
1865		    || (fault_instruction & (1 << 21)) != 0) {
1866			/* postindexed ldr/str with no writeback */
1867
1868			base = (fault_instruction >> 16) & 0x0f;
1869			if (base == 13 &&
1870			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1871				return ABORT_FIXUP_FAILED;
1872			if (base == 15)
1873				return ABORT_FIXUP_FAILED;
1874			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1875				       base, registers[base]));
1876			if ((fault_instruction & (1 << 25)) == 0) {
1877				/* Immediate offset - easy */
1878
1879				offset = fault_instruction & 0xfff;
1880				if ((fault_instruction & (1 << 23)))
1881					offset = -offset;
1882				registers[base] += offset;
1883				DFC_PRINTF(("imm=%08x ", offset));
1884			} else {
1885				/* offset is a shifted register */
1886				int shift;
1887
1888				offset = fault_instruction & 0x0f;
1889				if (offset == base)
1890					return ABORT_FIXUP_FAILED;
1891
1892				/*
1893				 * Register offset - hard we have to
1894				 * cope with shifts !
1895				 */
1896				offset = registers[offset];
1897
1898				if ((fault_instruction & (1 << 4)) == 0)
1899					/* shift with amount */
1900					shift = (fault_instruction >> 7) & 0x1f;
1901				else {
1902					/* shift with register */
1903					if ((fault_instruction & (1 << 7)) != 0)
1904						/* undefined for now so bail out */
1905						return ABORT_FIXUP_FAILED;
1906					shift = ((fault_instruction >> 8) & 0xf);
1907					if (base == shift)
1908						return ABORT_FIXUP_FAILED;
1909					DFC_PRINTF(("shift reg=%d ", shift));
1910					shift = registers[shift];
1911				}
1912				DFC_PRINTF(("shift=%08x ", shift));
1913				switch (((fault_instruction >> 5) & 0x3)) {
1914				case 0 : /* Logical left */
1915					offset = (int)(((u_int)offset) << shift);
1916					break;
1917				case 1 : /* Logical Right */
1918					if (shift == 0) shift = 32;
1919					offset = (int)(((u_int)offset) >> shift);
1920					break;
1921				case 2 : /* Arithmetic Right */
1922					if (shift == 0) shift = 32;
1923					offset = (int)(((int)offset) >> shift);
1924					break;
1925				case 3 : /* Rotate right (rol or rxx) */
1926					return ABORT_FIXUP_FAILED;
1927					break;
1928				}
1929
1930				DFC_PRINTF(("abt: fixed LDR/STR with "
1931					       "register offset\n"));
1932				if ((fault_instruction & (1 << 23)))
1933					offset = -offset;
1934				DFC_PRINTF(("offset=%08x ", offset));
1935				registers[base] += offset;
1936			}
1937			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1938		}
1939	}
1940
1941	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1942
1943		/* Ok an abort in SVC mode */
1944
1945		/*
1946		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1947		 * as the fault happened in svc mode but we need it in the
1948		 * usr slot so we can treat the registers as an array of ints
1949		 * during fixing.
1950		 * NOTE: This PC is in the position but writeback is not
1951		 * allowed on r15.
1952		 * Doing it like this is more efficient than trapping this
1953		 * case in all possible locations in the prior fixup code.
1954		 */
1955
1956		frame->tf_svc_lr = frame->tf_usr_lr;
1957		frame->tf_usr_lr = saved_lr;
1958
1959		/*
1960		 * Note the trapframe does not have the SVC r13 so a fault
1961		 * from an instruction with writeback to r13 in SVC mode is
1962		 * not allowed. This should not happen as the kstack is
1963		 * always valid.
1964		 */
1965	}
1966
1967	/*
1968	 * Now let the early-abort fixup routine have a go, in case it
1969	 * was an LDM, STM, LDC or STC that faulted.
1970	 */
1971
1972	return early_abort_fixup(arg);
1973}
1974#endif	/* CPU_ARM7TDMI */
1975
1976/*
1977 * CPU Setup code
1978 */
1979
1980#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1981  defined(CPU_ARM9E) || \
1982  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1983  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1984  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1985  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1986  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1987  defined(CPU_FA526) || defined(CPU_FA626TE)
1988
1989#define IGN	0
1990#define OR	1
1991#define BIC	2
1992
1993struct cpu_option {
1994	char	*co_name;
1995	int	co_falseop;
1996	int	co_trueop;
1997	int	co_value;
1998};
1999
2000static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2001
2002static u_int
2003parse_cpu_options(args, optlist, cpuctrl)
2004	char *args;
2005	struct cpu_option *optlist;
2006	u_int cpuctrl;
2007{
2008	int integer;
2009
2010	if (args == NULL)
2011		return(cpuctrl);
2012
2013	while (optlist->co_name) {
2014		if (get_bootconf_option(args, optlist->co_name,
2015		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2016			if (integer) {
2017				if (optlist->co_trueop == OR)
2018					cpuctrl |= optlist->co_value;
2019				else if (optlist->co_trueop == BIC)
2020					cpuctrl &= ~optlist->co_value;
2021			} else {
2022				if (optlist->co_falseop == OR)
2023					cpuctrl |= optlist->co_value;
2024				else if (optlist->co_falseop == BIC)
2025					cpuctrl &= ~optlist->co_value;
2026			}
2027		}
2028		++optlist;
2029	}
2030	return(cpuctrl);
2031}
2032#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
2033
2034#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
2035struct cpu_option arm678_options[] = {
2036#ifdef COMPAT_12
2037	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2038	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2039#endif	/* COMPAT_12 */
2040	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2041	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2042	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2043	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2044	{ NULL,			IGN, IGN, 0 }
2045};
2046
2047#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2048
2049#ifdef CPU_ARM7TDMI
2050struct cpu_option arm7tdmi_options[] = {
2051	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2052	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2053	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2054	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2055#ifdef COMPAT_12
2056	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2057#endif	/* COMPAT_12 */
2058	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2059	{ NULL,			IGN, IGN, 0 }
2060};
2061
2062void
2063arm7tdmi_setup(args)
2064	char *args;
2065{
2066	int cpuctrl;
2067
2068	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2069		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2070		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2071
2072	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2073	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2074
2075#ifdef __ARMEB__
2076	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2077#endif
2078
2079	/* Clear out the cache */
2080	cpu_idcache_wbinv_all();
2081
2082	/* Set the control register */
2083	ctrl = cpuctrl;
2084	cpu_control(0xffffffff, cpuctrl);
2085}
2086#endif	/* CPU_ARM7TDMI */
2087
2088#ifdef CPU_ARM8
2089struct cpu_option arm8_options[] = {
2090	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2091	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2092	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2093	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2094#ifdef COMPAT_12
2095	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2096#endif	/* COMPAT_12 */
2097	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2098	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2099	{ NULL,			IGN, IGN, 0 }
2100};
2101
2102void
2103arm8_setup(args)
2104	char *args;
2105{
2106	int integer;
2107	int cpuctrl, cpuctrlmask;
2108	int clocktest;
2109	int setclock = 0;
2110
2111	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2112		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2113		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2114	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2115		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2116		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2117		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2118		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2119
2120#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2121	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2122#endif
2123
2124	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2125	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2126
2127#ifdef __ARMEB__
2128	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2129#endif
2130
2131	/* Get clock configuration */
2132	clocktest = arm8_clock_config(0, 0) & 0x0f;
2133
2134	/* Special ARM8 clock and test configuration */
2135	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2136		clocktest = 0;
2137		setclock = 1;
2138	}
2139	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2140		if (integer)
2141			clocktest |= 0x01;
2142		else
2143			clocktest &= ~(0x01);
2144		setclock = 1;
2145	}
2146	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2147		if (integer)
2148			clocktest |= 0x02;
2149		else
2150			clocktest &= ~(0x02);
2151		setclock = 1;
2152	}
2153	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2154		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2155		setclock = 1;
2156	}
2157	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2158		clocktest |= (integer & 7) << 5;
2159		setclock = 1;
2160	}
2161
2162	/* Clear out the cache */
2163	cpu_idcache_wbinv_all();
2164
2165	/* Set the control register */
2166	ctrl = cpuctrl;
2167	cpu_control(0xffffffff, cpuctrl);
2168
2169	/* Set the clock/test register */
2170	if (setclock)
2171		arm8_clock_config(0x7f, clocktest);
2172}
2173#endif	/* CPU_ARM8 */
2174
2175#ifdef CPU_ARM9
2176struct cpu_option arm9_options[] = {
2177	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2178	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2179	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2180	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2181	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2182	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2183	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2184	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2185	{ NULL,			IGN, IGN, 0 }
2186};
2187
2188void
2189arm9_setup(args)
2190	char *args;
2191{
2192	int cpuctrl, cpuctrlmask;
2193
2194	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2195	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2196	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2197	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
2198	    CPU_CONTROL_ROUNDROBIN;
2199	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2200		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2201		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2202		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2203		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2204		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2205		 | CPU_CONTROL_ROUNDROBIN;
2206
2207#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2208	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2209#endif
2210
2211	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2212
2213#ifdef __ARMEB__
2214	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2215#endif
2216	if (vector_page == ARM_VECTORS_HIGH)
2217		cpuctrl |= CPU_CONTROL_VECRELOC;
2218
2219	/* Clear out the cache */
2220	cpu_idcache_wbinv_all();
2221
2222	/* Set the control register */
2223	cpu_control(cpuctrlmask, cpuctrl);
2224	ctrl = cpuctrl;
2225
2226}
2227#endif	/* CPU_ARM9 */
2228
2229#if defined(CPU_ARM9E) || defined(CPU_ARM10)
2230struct cpu_option arm10_options[] = {
2231	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2232	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2233	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2234	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2235	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2236	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2237	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2238	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2239	{ NULL,			IGN, IGN, 0 }
2240};
2241
2242void
2243arm10_setup(args)
2244	char *args;
2245{
2246	int cpuctrl, cpuctrlmask;
2247
2248	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2249	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2250	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2251	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2252	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2253	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2254	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2255	    | CPU_CONTROL_BPRD_ENABLE
2256	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2257
2258#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2259	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2260#endif
2261
2262	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2263
2264#ifdef __ARMEB__
2265	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2266#endif
2267
2268	/* Clear out the cache */
2269	cpu_idcache_wbinv_all();
2270
2271	/* Now really make sure they are clean.  */
2272	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2273
2274	if (vector_page == ARM_VECTORS_HIGH)
2275		cpuctrl |= CPU_CONTROL_VECRELOC;
2276
2277	/* Set the control register */
2278	ctrl = cpuctrl;
2279	cpu_control(0xffffffff, cpuctrl);
2280
2281	/* And again. */
2282	cpu_idcache_wbinv_all();
2283}
2284#endif	/* CPU_ARM9E || CPU_ARM10 */
2285
2286#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
2287struct cpu_option arm11_options[] = {
2288	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2289	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2290	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2291	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2292	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2293	{ NULL,			IGN, IGN, 0 }
2294};
2295
2296void
2297arm11x6_setup(char *args)
2298{
2299	int cpuctrl, cpuctrl_wax;
2300	uint32_t auxctrl, auxctrl_wax;
2301	uint32_t tmp, tmp2;
2302	uint32_t sbz=0;
2303	uint32_t cpuid;
2304
2305	cpuid = cpufunc_id();
2306
2307	cpuctrl =
2308		CPU_CONTROL_MMU_ENABLE  |
2309		CPU_CONTROL_DC_ENABLE   |
2310		CPU_CONTROL_WBUF_ENABLE |
2311		CPU_CONTROL_32BP_ENABLE |
2312		CPU_CONTROL_32BD_ENABLE |
2313		CPU_CONTROL_LABT_ENABLE |
2314		CPU_CONTROL_SYST_ENABLE |
2315		CPU_CONTROL_IC_ENABLE;
2316
2317	/*
2318	 * "write as existing" bits
2319	 * inverse of this is mask
2320	 */
2321	cpuctrl_wax =
2322		(3 << 30) | /* SBZ */
2323		(1 << 29) | /* FA */
2324		(1 << 28) | /* TR */
2325		(3 << 26) | /* SBZ */
2326		(3 << 19) | /* SBZ */
2327		(1 << 17);  /* SBZ */
2328
2329	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2330	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2331
2332	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2333
2334#ifdef __ARMEB__
2335	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2336#endif
2337
2338	if (vector_page == ARM_VECTORS_HIGH)
2339		cpuctrl |= CPU_CONTROL_VECRELOC;
2340
2341	auxctrl = 0;
2342	auxctrl_wax = ~0;
2343	/*
2344	 * This options enables the workaround for the 364296 ARM1136
2345	 * r0pX errata (possible cache data corruption with
2346	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2347	 * the auxiliary control register and the FI bit in the control
2348	 * register, thus disabling hit-under-miss without putting the
2349	 * processor into full low interrupt latency mode. ARM11MPCore
2350	 * is not affected.
2351	 */
2352	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2353		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2354		auxctrl = ARM1136_AUXCTL_PFI;
2355		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
2356	}
2357
2358	/*
2359	 * Enable an errata workaround
2360	 */
2361	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
2362		auxctrl = ARM1176_AUXCTL_PHD;
2363		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
2364	}
2365
2366	/* Clear out the cache */
2367	cpu_idcache_wbinv_all();
2368
2369	/* Now really make sure they are clean.  */
2370	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2371
2372	/* Allow detection code to find the VFP if it's fitted.  */
2373	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2374
2375	/* Set the control register */
2376	ctrl = cpuctrl;
2377	cpu_control(~cpuctrl_wax, cpuctrl);
2378
2379	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2380			"and	%1, %0, %2\n\t"
2381			"orr	%1, %1, %3\n\t"
2382			"teq	%0, %1\n\t"
2383			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2384			: "=r"(tmp), "=r"(tmp2) :
2385			  "r"(auxctrl_wax), "r"(auxctrl));
2386
2387	/* And again. */
2388	cpu_idcache_wbinv_all();
2389}
2390#endif  /* CPU_ARM1136 || CPU_ARM1176 */
2391
2392#ifdef CPU_MV_PJ4B
2393void
2394pj4bv7_setup(args)
2395	char *args;
2396{
2397	int cpuctrl;
2398
2399	pj4b_config();
2400
2401	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2402#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2403	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2404#endif
2405	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2406	cpuctrl |= (0xf << 3);
2407	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2408	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2409	if (vector_page == ARM_VECTORS_HIGH)
2410		cpuctrl |= CPU_CONTROL_VECRELOC;
2411	cpuctrl |= (0x5 << 16) | (1 < 22);
2412	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2413
2414	/* Clear out the cache */
2415	cpu_idcache_wbinv_all();
2416
2417	/* Set the control register */
2418	ctrl = cpuctrl;
2419	cpu_control(0xFFFFFFFF, cpuctrl);
2420
2421	/* And again. */
2422	cpu_idcache_wbinv_all();
2423}
2424#endif /* CPU_MV_PJ4B */
2425
2426#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
2427
2428void
2429cortexa_setup(char *args)
2430{
2431	int cpuctrl, cpuctrlmask;
2432
2433	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
2434	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
2435	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
2436	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
2437	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
2438	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
2439
2440	cpuctrl = CPU_CONTROL_MMU_ENABLE |
2441	    CPU_CONTROL_IC_ENABLE |
2442	    CPU_CONTROL_DC_ENABLE |
2443	    CPU_CONTROL_BPRD_ENABLE;
2444
2445#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2446	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2447#endif
2448
2449	/* Switch to big endian */
2450#ifdef __ARMEB__
2451	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2452#endif
2453
2454	/* Check if the vector page is at the high address (0xffff0000) */
2455	if (vector_page == ARM_VECTORS_HIGH)
2456		cpuctrl |= CPU_CONTROL_VECRELOC;
2457
2458	/* Clear out the cache */
2459	cpu_idcache_wbinv_all();
2460
2461	/* Set the control register */
2462	ctrl = cpuctrl;
2463	cpu_control(cpuctrlmask, cpuctrl);
2464
2465	/* And again. */
2466	cpu_idcache_wbinv_all();
2467#ifdef SMP
2468	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
2469#endif
2470}
2471#endif  /* CPU_CORTEXA */
2472
2473
2474#ifdef CPU_SA110
2475struct cpu_option sa110_options[] = {
2476#ifdef COMPAT_12
2477	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2478	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2479#endif	/* COMPAT_12 */
2480	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2481	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2482	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2483	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2484	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2485	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2486	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2487	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2488	{ NULL,			IGN, IGN, 0 }
2489};
2490
2491void
2492sa110_setup(args)
2493	char *args;
2494{
2495	int cpuctrl, cpuctrlmask;
2496
2497	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2498		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2499		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2500		 | CPU_CONTROL_WBUF_ENABLE;
2501	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2502		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2503		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2504		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2505		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2506		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2507		 | CPU_CONTROL_CPCLK;
2508
2509#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2510	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2511#endif
2512
2513	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2514
2515#ifdef __ARMEB__
2516	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2517#endif
2518
2519	/* Clear out the cache */
2520	cpu_idcache_wbinv_all();
2521
2522	/* Set the control register */
2523	ctrl = cpuctrl;
2524/*	cpu_control(cpuctrlmask, cpuctrl);*/
2525	cpu_control(0xffffffff, cpuctrl);
2526
2527	/*
2528	 * enable clockswitching, note that this doesn't read or write to r0,
2529	 * r0 is just to make it valid asm
2530	 */
2531	__asm ("mcr 15, 0, r0, c15, c1, 2");
2532}
2533#endif	/* CPU_SA110 */
2534
2535#if defined(CPU_SA1100) || defined(CPU_SA1110)
2536struct cpu_option sa11x0_options[] = {
2537#ifdef COMPAT_12
2538	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2539	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2540#endif	/* COMPAT_12 */
2541	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2542	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2543	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2544	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2545	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2546	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2547	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2548	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2549	{ NULL,			IGN, IGN, 0 }
2550};
2551
2552void
2553sa11x0_setup(args)
2554	char *args;
2555{
2556	int cpuctrl, cpuctrlmask;
2557
2558	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2559		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2560		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2561		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2562	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2563		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2564		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2565		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2566		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2567		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2568		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2569
2570#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2571	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2572#endif
2573
2574
2575	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2576
2577#ifdef __ARMEB__
2578	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2579#endif
2580
2581	if (vector_page == ARM_VECTORS_HIGH)
2582		cpuctrl |= CPU_CONTROL_VECRELOC;
2583	/* Clear out the cache */
2584	cpu_idcache_wbinv_all();
2585	/* Set the control register */
2586	ctrl = cpuctrl;
2587	cpu_control(0xffffffff, cpuctrl);
2588}
2589#endif	/* CPU_SA1100 || CPU_SA1110 */
2590
2591#if defined(CPU_FA526) || defined(CPU_FA626TE)
2592struct cpu_option fa526_options[] = {
2593#ifdef COMPAT_12
2594	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
2595					   CPU_CONTROL_DC_ENABLE) },
2596	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2597#endif	/* COMPAT_12 */
2598	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
2599					   CPU_CONTROL_DC_ENABLE) },
2600	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
2601					   CPU_CONTROL_DC_ENABLE) },
2602	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2603	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2604	{ NULL,			IGN, IGN, 0 }
2605};
2606
2607void
2608fa526_setup(char *args)
2609{
2610	int cpuctrl, cpuctrlmask;
2611
2612	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2613		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2614		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2615		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2616		| CPU_CONTROL_BPRD_ENABLE;
2617	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2618		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2619		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2620		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2621		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2622		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2623		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2624
2625#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2626	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2627#endif
2628
2629	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2630
2631#ifdef __ARMEB__
2632	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2633#endif
2634
2635	if (vector_page == ARM_VECTORS_HIGH)
2636		cpuctrl |= CPU_CONTROL_VECRELOC;
2637
2638	/* Clear out the cache */
2639	cpu_idcache_wbinv_all();
2640
2641	/* Set the control register */
2642	ctrl = cpuctrl;
2643	cpu_control(0xffffffff, cpuctrl);
2644}
2645#endif	/* CPU_FA526 || CPU_FA626TE */
2646
2647
2648#if defined(CPU_IXP12X0)
2649struct cpu_option ixp12x0_options[] = {
2650	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2651	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2652	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2653	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2654	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2655	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2656	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2657	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2658	{ NULL,			IGN, IGN, 0 }
2659};
2660
2661void
2662ixp12x0_setup(args)
2663	char *args;
2664{
2665	int cpuctrl, cpuctrlmask;
2666
2667
2668	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2669		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2670		 | CPU_CONTROL_IC_ENABLE;
2671
2672	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2673		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2674		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2675		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2676		 | CPU_CONTROL_VECRELOC;
2677
2678#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2679	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2680#endif
2681
2682	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2683
2684#ifdef __ARMEB__
2685	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2686#endif
2687
2688	if (vector_page == ARM_VECTORS_HIGH)
2689		cpuctrl |= CPU_CONTROL_VECRELOC;
2690
2691	/* Clear out the cache */
2692	cpu_idcache_wbinv_all();
2693
2694	/* Set the control register */
2695	ctrl = cpuctrl;
2696	/* cpu_control(0xffffffff, cpuctrl); */
2697	cpu_control(cpuctrlmask, cpuctrl);
2698}
2699#endif /* CPU_IXP12X0 */
2700
2701#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2702  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2703  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2704struct cpu_option xscale_options[] = {
2705#ifdef COMPAT_12
2706	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2707	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2708#endif	/* COMPAT_12 */
2709	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2710	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2711	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2712	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2713	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2714	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2715	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2716	{ NULL,			IGN, IGN, 0 }
2717};
2718
2719void
2720xscale_setup(args)
2721	char *args;
2722{
2723	uint32_t auxctl;
2724	int cpuctrl, cpuctrlmask;
2725
2726	/*
2727	 * The XScale Write Buffer is always enabled.  Our option
2728	 * is to enable/disable coalescing.  Note that bits 6:3
2729	 * must always be enabled.
2730	 */
2731
2732	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2733		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2734		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2735		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2736		 | CPU_CONTROL_BPRD_ENABLE;
2737	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2738		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2739		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2740		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2741		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2742		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2743		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2744		 CPU_CONTROL_L2_ENABLE;
2745
2746#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2747	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2748#endif
2749
2750	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2751
2752#ifdef __ARMEB__
2753	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2754#endif
2755
2756	if (vector_page == ARM_VECTORS_HIGH)
2757		cpuctrl |= CPU_CONTROL_VECRELOC;
2758#ifdef CPU_XSCALE_CORE3
2759	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2760#endif
2761
2762	/* Clear out the cache */
2763	cpu_idcache_wbinv_all();
2764
2765	/*
2766	 * Set the control register.  Note that bits 6:3 must always
2767	 * be set to 1.
2768	 */
2769	ctrl = cpuctrl;
2770/*	cpu_control(cpuctrlmask, cpuctrl);*/
2771	cpu_control(0xffffffff, cpuctrl);
2772
2773	/* Make sure write coalescing is turned on */
2774	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2775		: "=r" (auxctl));
2776#ifdef XSCALE_NO_COALESCE_WRITES
2777	auxctl |= XSCALE_AUXCTL_K;
2778#else
2779	auxctl &= ~XSCALE_AUXCTL_K;
2780#endif
2781#ifdef CPU_XSCALE_CORE3
2782	auxctl |= XSCALE_AUXCTL_LLR;
2783	auxctl |= XSCALE_AUXCTL_MD_MASK;
2784#endif
2785	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2786		: : "r" (auxctl));
2787}
2788#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2789	   CPU_XSCALE_80219 */
2790