cpufunc.c revision 262420
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 262420 2014-02-24 01:41:58Z ian $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77/*
78 * Some definitions in i81342reg.h clash with i80321reg.h.
79 * This only happens for the LINT kernel. As it happens,
80 * we don't need anything from i81342reg.h that we already
81 * got from somewhere else during a LINT compile.
82 */
83#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
84#include <arm/xscale/i8134x/i81342reg.h>
85#endif
86
87#ifdef CPU_XSCALE_IXP425
88#include <arm/xscale/ixp425/ixp425reg.h>
89#include <arm/xscale/ixp425/ixp425var.h>
90#endif
91
92/* PRIMARY CACHE VARIABLES */
93int	arm_picache_size;
94int	arm_picache_line_size;
95int	arm_picache_ways;
96
97int	arm_pdcache_size;	/* and unified */
98int	arm_pdcache_line_size;
99int	arm_pdcache_ways;
100
101int	arm_pcache_type;
102int	arm_pcache_unified;
103
104int	arm_dcache_align;
105int	arm_dcache_align_mask;
106
107u_int	arm_cache_level;
108u_int	arm_cache_type[14];
109u_int	arm_cache_loc;
110
111/* 1 == use cpu_sleep(), 0 == don't */
112int cpu_do_powersave;
113int ctrl;
114
115#ifdef CPU_ARM7TDMI
116struct cpu_functions arm7tdmi_cpufuncs = {
117	/* CPU functions */
118
119	cpufunc_id,			/* id			*/
120	cpufunc_nullop,			/* cpwait		*/
121
122	/* MMU functions */
123
124	cpufunc_control,		/* control		*/
125	cpufunc_domains,		/* domain		*/
126	arm7tdmi_setttb,		/* setttb		*/
127	cpufunc_faultstatus,		/* faultstatus		*/
128	cpufunc_faultaddress,		/* faultaddress		*/
129
130	/* TLB functions */
131
132	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
133	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
134	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
135	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
136	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
137	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
138
139	/* Cache operations */
140
141	cpufunc_nullop,			/* icache_sync_all	*/
142	(void *)cpufunc_nullop,		/* icache_sync_range	*/
143
144	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
145	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
146	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
147	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
148
149	cpufunc_nullop,			/* idcache_inv_all	*/
150	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
151	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
152	cpufunc_nullop,			/* l2cache_wbinv_all	*/
153	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
154	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
155	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
156
157	/* Other functions */
158
159	cpufunc_nullop,			/* flush_prefetchbuf	*/
160	cpufunc_nullop,			/* drain_writebuf	*/
161	cpufunc_nullop,			/* flush_brnchtgt_C	*/
162	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
163
164	(void *)cpufunc_nullop,		/* sleep		*/
165
166	/* Soft functions */
167
168	late_abort_fixup,		/* dataabt_fixup	*/
169	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
170
171	arm7tdmi_context_switch,	/* context_switch	*/
172
173	arm7tdmi_setup			/* cpu setup		*/
174
175};
176#endif	/* CPU_ARM7TDMI */
177
178#ifdef CPU_ARM8
179struct cpu_functions arm8_cpufuncs = {
180	/* CPU functions */
181
182	cpufunc_id,			/* id			*/
183	cpufunc_nullop,			/* cpwait		*/
184
185	/* MMU functions */
186
187	cpufunc_control,		/* control		*/
188	cpufunc_domains,		/* domain		*/
189	arm8_setttb,			/* setttb		*/
190	cpufunc_faultstatus,		/* faultstatus		*/
191	cpufunc_faultaddress,		/* faultaddress		*/
192
193	/* TLB functions */
194
195	arm8_tlb_flushID,		/* tlb_flushID		*/
196	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
197	arm8_tlb_flushID,		/* tlb_flushI		*/
198	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
199	arm8_tlb_flushID,		/* tlb_flushD		*/
200	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
201
202	/* Cache operations */
203
204	cpufunc_nullop,			/* icache_sync_all	*/
205	(void *)cpufunc_nullop,		/* icache_sync_range	*/
206
207	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
208	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
209/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
210	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
211
212	cpufunc_nullop,			/* idcache_inv_all	*/
213	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
214	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
215	cpufunc_nullop,			/* l2cache_wbinv_all	*/
216	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
217	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
218	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
219
220	/* Other functions */
221
222	cpufunc_nullop,			/* flush_prefetchbuf	*/
223	cpufunc_nullop,			/* drain_writebuf	*/
224	cpufunc_nullop,			/* flush_brnchtgt_C	*/
225	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
226
227	(void *)cpufunc_nullop,		/* sleep		*/
228
229	/* Soft functions */
230
231	cpufunc_null_fixup,		/* dataabt_fixup	*/
232	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
233
234	arm8_context_switch,		/* context_switch	*/
235
236	arm8_setup			/* cpu setup		*/
237};
238#endif	/* CPU_ARM8 */
239
240#ifdef CPU_ARM9
241struct cpu_functions arm9_cpufuncs = {
242	/* CPU functions */
243
244	cpufunc_id,			/* id			*/
245	cpufunc_nullop,			/* cpwait		*/
246
247	/* MMU functions */
248
249	cpufunc_control,		/* control		*/
250	cpufunc_domains,		/* Domain		*/
251	arm9_setttb,			/* Setttb		*/
252	cpufunc_faultstatus,		/* Faultstatus		*/
253	cpufunc_faultaddress,		/* Faultaddress		*/
254
255	/* TLB functions */
256
257	armv4_tlb_flushID,		/* tlb_flushID		*/
258	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
259	armv4_tlb_flushI,		/* tlb_flushI		*/
260	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
261	armv4_tlb_flushD,		/* tlb_flushD		*/
262	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
263
264	/* Cache operations */
265
266	arm9_icache_sync_all,		/* icache_sync_all	*/
267	arm9_icache_sync_range,		/* icache_sync_range	*/
268
269	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
270	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
271	arm9_dcache_inv_range,		/* dcache_inv_range	*/
272	arm9_dcache_wb_range,		/* dcache_wb_range	*/
273
274	armv4_idcache_inv_all,		/* idcache_inv_all	*/
275	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
276	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
277	cpufunc_nullop,			/* l2cache_wbinv_all	*/
278	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
279	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
280	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
281
282	/* Other functions */
283
284	cpufunc_nullop,			/* flush_prefetchbuf	*/
285	armv4_drain_writebuf,		/* drain_writebuf	*/
286	cpufunc_nullop,			/* flush_brnchtgt_C	*/
287	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
288
289	(void *)cpufunc_nullop,		/* sleep		*/
290
291	/* Soft functions */
292
293	cpufunc_null_fixup,		/* dataabt_fixup	*/
294	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
295
296	arm9_context_switch,		/* context_switch	*/
297
298	arm9_setup			/* cpu setup		*/
299
300};
301#endif /* CPU_ARM9 */
302
303#if defined(CPU_ARM9E) || defined(CPU_ARM10)
304struct cpu_functions armv5_ec_cpufuncs = {
305	/* CPU functions */
306
307	cpufunc_id,			/* id			*/
308	cpufunc_nullop,			/* cpwait		*/
309
310	/* MMU functions */
311
312	cpufunc_control,		/* control		*/
313	cpufunc_domains,		/* Domain		*/
314	armv5_ec_setttb,		/* Setttb		*/
315	cpufunc_faultstatus,		/* Faultstatus		*/
316	cpufunc_faultaddress,		/* Faultaddress		*/
317
318	/* TLB functions */
319
320	armv4_tlb_flushID,		/* tlb_flushID		*/
321	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
322	armv4_tlb_flushI,		/* tlb_flushI		*/
323	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
324	armv4_tlb_flushD,		/* tlb_flushD		*/
325	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
326
327	/* Cache operations */
328
329	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
330	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
331
332	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
333	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
334	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
335	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
336
337	armv4_idcache_inv_all,		/* idcache_inv_all	*/
338	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
339	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
340
341	cpufunc_nullop,                 /* l2cache_wbinv_all    */
342	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
343      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
344	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
345
346	/* Other functions */
347
348	cpufunc_nullop,			/* flush_prefetchbuf	*/
349	armv4_drain_writebuf,		/* drain_writebuf	*/
350	cpufunc_nullop,			/* flush_brnchtgt_C	*/
351	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
352
353	(void *)cpufunc_nullop,		/* sleep		*/
354
355	/* Soft functions */
356
357	cpufunc_null_fixup,		/* dataabt_fixup	*/
358	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
359
360	arm10_context_switch,		/* context_switch	*/
361
362	arm10_setup			/* cpu setup		*/
363
364};
365
366struct cpu_functions sheeva_cpufuncs = {
367	/* CPU functions */
368
369	cpufunc_id,			/* id			*/
370	cpufunc_nullop,			/* cpwait		*/
371
372	/* MMU functions */
373
374	cpufunc_control,		/* control		*/
375	cpufunc_domains,		/* Domain		*/
376	sheeva_setttb,			/* Setttb		*/
377	cpufunc_faultstatus,		/* Faultstatus		*/
378	cpufunc_faultaddress,		/* Faultaddress		*/
379
380	/* TLB functions */
381
382	armv4_tlb_flushID,		/* tlb_flushID		*/
383	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
384	armv4_tlb_flushI,		/* tlb_flushI		*/
385	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
386	armv4_tlb_flushD,		/* tlb_flushD		*/
387	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
388
389	/* Cache operations */
390
391	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
392	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
393
394	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
395	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
396	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
397	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
398
399	armv4_idcache_inv_all,		/* idcache_inv_all	*/
400	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
401	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
402
403	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
404	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
405	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
406	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
407
408	/* Other functions */
409
410	cpufunc_nullop,			/* flush_prefetchbuf	*/
411	armv4_drain_writebuf,		/* drain_writebuf	*/
412	cpufunc_nullop,			/* flush_brnchtgt_C	*/
413	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
414
415	sheeva_cpu_sleep,		/* sleep		*/
416
417	/* Soft functions */
418
419	cpufunc_null_fixup,		/* dataabt_fixup	*/
420	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
421
422	arm10_context_switch,		/* context_switch	*/
423
424	arm10_setup			/* cpu setup		*/
425};
426#endif /* CPU_ARM9E || CPU_ARM10 */
427
428#ifdef CPU_ARM10
429struct cpu_functions arm10_cpufuncs = {
430	/* CPU functions */
431
432	cpufunc_id,			/* id			*/
433	cpufunc_nullop,			/* cpwait		*/
434
435	/* MMU functions */
436
437	cpufunc_control,		/* control		*/
438	cpufunc_domains,		/* Domain		*/
439	arm10_setttb,			/* Setttb		*/
440	cpufunc_faultstatus,		/* Faultstatus		*/
441	cpufunc_faultaddress,		/* Faultaddress		*/
442
443	/* TLB functions */
444
445	armv4_tlb_flushID,		/* tlb_flushID		*/
446	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
447	armv4_tlb_flushI,		/* tlb_flushI		*/
448	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
449	armv4_tlb_flushD,		/* tlb_flushD		*/
450	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
451
452	/* Cache operations */
453
454	arm10_icache_sync_all,		/* icache_sync_all	*/
455	arm10_icache_sync_range,	/* icache_sync_range	*/
456
457	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
458	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
459	arm10_dcache_inv_range,		/* dcache_inv_range	*/
460	arm10_dcache_wb_range,		/* dcache_wb_range	*/
461
462	armv4_idcache_inv_all,		/* idcache_inv_all	*/
463	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
464	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
465	cpufunc_nullop,			/* l2cache_wbinv_all	*/
466	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
467	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
468	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
469
470	/* Other functions */
471
472	cpufunc_nullop,			/* flush_prefetchbuf	*/
473	armv4_drain_writebuf,		/* drain_writebuf	*/
474	cpufunc_nullop,			/* flush_brnchtgt_C	*/
475	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
476
477	(void *)cpufunc_nullop,		/* sleep		*/
478
479	/* Soft functions */
480
481	cpufunc_null_fixup,		/* dataabt_fixup	*/
482	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
483
484	arm10_context_switch,		/* context_switch	*/
485
486	arm10_setup			/* cpu setup		*/
487
488};
489#endif /* CPU_ARM10 */
490
491#ifdef CPU_MV_PJ4B
492struct cpu_functions pj4bv7_cpufuncs = {
493	/* CPU functions */
494
495	cpufunc_id,			/* id			*/
496	arm11_drain_writebuf,		/* cpwait		*/
497
498	/* MMU functions */
499
500	cpufunc_control,		/* control		*/
501	cpufunc_domains,		/* Domain		*/
502	pj4b_setttb,			/* Setttb		*/
503	cpufunc_faultstatus,		/* Faultstatus		*/
504	cpufunc_faultaddress,		/* Faultaddress		*/
505
506	/* TLB functions */
507
508	armv7_tlb_flushID,		/* tlb_flushID		*/
509	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
510	armv7_tlb_flushID,		/* tlb_flushI		*/
511	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
512	armv7_tlb_flushID,		/* tlb_flushD		*/
513	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
514
515	/* Cache operations */
516	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
517	armv7_icache_sync_range,	/* icache_sync_range	*/
518
519	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
520	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
521	armv7_dcache_inv_range,		/* dcache_inv_range	*/
522	armv7_dcache_wb_range,		/* dcache_wb_range	*/
523
524	armv7_idcache_inv_all,		/* idcache_inv_all	*/
525	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
526	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
527
528	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
529	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
530	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
531	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
532
533	/* Other functions */
534
535	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
536	arm11_drain_writebuf,		/* drain_writebuf	*/
537	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
538	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
539
540	(void *)cpufunc_nullop,		/* sleep		*/
541
542	/* Soft functions */
543
544	cpufunc_null_fixup,		/* dataabt_fixup	*/
545	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
546
547	arm11_context_switch,		/* context_switch	*/
548
549	pj4bv7_setup			/* cpu setup		*/
550};
551#endif /* CPU_MV_PJ4B */
552
553#ifdef CPU_SA110
554struct cpu_functions sa110_cpufuncs = {
555	/* CPU functions */
556
557	cpufunc_id,			/* id			*/
558	cpufunc_nullop,			/* cpwait		*/
559
560	/* MMU functions */
561
562	cpufunc_control,		/* control		*/
563	cpufunc_domains,		/* domain		*/
564	sa1_setttb,			/* setttb		*/
565	cpufunc_faultstatus,		/* faultstatus		*/
566	cpufunc_faultaddress,		/* faultaddress		*/
567
568	/* TLB functions */
569
570	armv4_tlb_flushID,		/* tlb_flushID		*/
571	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
572	armv4_tlb_flushI,		/* tlb_flushI		*/
573	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
574	armv4_tlb_flushD,		/* tlb_flushD		*/
575	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
576
577	/* Cache operations */
578
579	sa1_cache_syncI,		/* icache_sync_all	*/
580	sa1_cache_syncI_rng,		/* icache_sync_range	*/
581
582	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
583	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
584/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
585	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
586
587	sa1_cache_flushID,		/* idcache_inv_all	*/
588	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
589	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
590	cpufunc_nullop,			/* l2cache_wbinv_all	*/
591	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
592	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
593	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
594
595	/* Other functions */
596
597	cpufunc_nullop,			/* flush_prefetchbuf	*/
598	armv4_drain_writebuf,		/* drain_writebuf	*/
599	cpufunc_nullop,			/* flush_brnchtgt_C	*/
600	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
601
602	(void *)cpufunc_nullop,		/* sleep		*/
603
604	/* Soft functions */
605
606	cpufunc_null_fixup,		/* dataabt_fixup	*/
607	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
608
609	sa110_context_switch,		/* context_switch	*/
610
611	sa110_setup			/* cpu setup		*/
612};
613#endif	/* CPU_SA110 */
614
615#if defined(CPU_SA1100) || defined(CPU_SA1110)
616struct cpu_functions sa11x0_cpufuncs = {
617	/* CPU functions */
618
619	cpufunc_id,			/* id			*/
620	cpufunc_nullop,			/* cpwait		*/
621
622	/* MMU functions */
623
624	cpufunc_control,		/* control		*/
625	cpufunc_domains,		/* domain		*/
626	sa1_setttb,			/* setttb		*/
627	cpufunc_faultstatus,		/* faultstatus		*/
628	cpufunc_faultaddress,		/* faultaddress		*/
629
630	/* TLB functions */
631
632	armv4_tlb_flushID,		/* tlb_flushID		*/
633	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
634	armv4_tlb_flushI,		/* tlb_flushI		*/
635	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
636	armv4_tlb_flushD,		/* tlb_flushD		*/
637	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
638
639	/* Cache operations */
640
641	sa1_cache_syncI,		/* icache_sync_all	*/
642	sa1_cache_syncI_rng,		/* icache_sync_range	*/
643
644	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
645	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
646/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
647	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
648
649	sa1_cache_flushID,		/* idcache_inv_all	*/
650	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
651	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
652	cpufunc_nullop,			/* l2cache_wbinv_all	*/
653	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
654	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
655	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
656
657	/* Other functions */
658
659	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
660	armv4_drain_writebuf,		/* drain_writebuf	*/
661	cpufunc_nullop,			/* flush_brnchtgt_C	*/
662	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
663
664	sa11x0_cpu_sleep,		/* sleep		*/
665
666	/* Soft functions */
667
668	cpufunc_null_fixup,		/* dataabt_fixup	*/
669	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
670
671	sa11x0_context_switch,		/* context_switch	*/
672
673	sa11x0_setup			/* cpu setup		*/
674};
675#endif	/* CPU_SA1100 || CPU_SA1110 */
676
677#ifdef CPU_IXP12X0
678struct cpu_functions ixp12x0_cpufuncs = {
679	/* CPU functions */
680
681	cpufunc_id,			/* id			*/
682	cpufunc_nullop,			/* cpwait		*/
683
684	/* MMU functions */
685
686	cpufunc_control,		/* control		*/
687	cpufunc_domains,		/* domain		*/
688	sa1_setttb,			/* setttb		*/
689	cpufunc_faultstatus,		/* faultstatus		*/
690	cpufunc_faultaddress,		/* faultaddress		*/
691
692	/* TLB functions */
693
694	armv4_tlb_flushID,		/* tlb_flushID		*/
695	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
696	armv4_tlb_flushI,		/* tlb_flushI		*/
697	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
698	armv4_tlb_flushD,		/* tlb_flushD		*/
699	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
700
701	/* Cache operations */
702
703	sa1_cache_syncI,		/* icache_sync_all	*/
704	sa1_cache_syncI_rng,		/* icache_sync_range	*/
705
706	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
707	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
708/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
709	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
710
711	sa1_cache_flushID,		/* idcache_inv_all	*/
712	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
713	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
714	cpufunc_nullop,			/* l2cache_wbinv_all	*/
715	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
716	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
717	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
718
719	/* Other functions */
720
721	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
722	armv4_drain_writebuf,		/* drain_writebuf	*/
723	cpufunc_nullop,			/* flush_brnchtgt_C	*/
724	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
725
726	(void *)cpufunc_nullop,		/* sleep		*/
727
728	/* Soft functions */
729
730	cpufunc_null_fixup,		/* dataabt_fixup	*/
731	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
732
733	ixp12x0_context_switch,		/* context_switch	*/
734
735	ixp12x0_setup			/* cpu setup		*/
736};
737#endif	/* CPU_IXP12X0 */
738
739#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
740  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
741  defined(CPU_XSCALE_80219)
742
743struct cpu_functions xscale_cpufuncs = {
744	/* CPU functions */
745
746	cpufunc_id,			/* id			*/
747	xscale_cpwait,			/* cpwait		*/
748
749	/* MMU functions */
750
751	xscale_control,			/* control		*/
752	cpufunc_domains,		/* domain		*/
753	xscale_setttb,			/* setttb		*/
754	cpufunc_faultstatus,		/* faultstatus		*/
755	cpufunc_faultaddress,		/* faultaddress		*/
756
757	/* TLB functions */
758
759	armv4_tlb_flushID,		/* tlb_flushID		*/
760	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
761	armv4_tlb_flushI,		/* tlb_flushI		*/
762	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
763	armv4_tlb_flushD,		/* tlb_flushD		*/
764	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
765
766	/* Cache operations */
767
768	xscale_cache_syncI,		/* icache_sync_all	*/
769	xscale_cache_syncI_rng,		/* icache_sync_range	*/
770
771	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
772	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
773	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
774	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
775
776	xscale_cache_flushID,		/* idcache_inv_all	*/
777	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
778	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
779	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
780	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
781	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
782	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
783
784	/* Other functions */
785
786	cpufunc_nullop,			/* flush_prefetchbuf	*/
787	armv4_drain_writebuf,		/* drain_writebuf	*/
788	cpufunc_nullop,			/* flush_brnchtgt_C	*/
789	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
790
791	xscale_cpu_sleep,		/* sleep		*/
792
793	/* Soft functions */
794
795	cpufunc_null_fixup,		/* dataabt_fixup	*/
796	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
797
798	xscale_context_switch,		/* context_switch	*/
799
800	xscale_setup			/* cpu setup		*/
801};
802#endif
803/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
804   CPU_XSCALE_80219 */
805
806#ifdef CPU_XSCALE_81342
807struct cpu_functions xscalec3_cpufuncs = {
808	/* CPU functions */
809
810	cpufunc_id,			/* id			*/
811	xscale_cpwait,			/* cpwait		*/
812
813	/* MMU functions */
814
815	xscale_control,			/* control		*/
816	cpufunc_domains,		/* domain		*/
817	xscalec3_setttb,		/* setttb		*/
818	cpufunc_faultstatus,		/* faultstatus		*/
819	cpufunc_faultaddress,		/* faultaddress		*/
820
821	/* TLB functions */
822
823	armv4_tlb_flushID,		/* tlb_flushID		*/
824	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
825	armv4_tlb_flushI,		/* tlb_flushI		*/
826	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
827	armv4_tlb_flushD,		/* tlb_flushD		*/
828	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
829
830	/* Cache operations */
831
832	xscalec3_cache_syncI,		/* icache_sync_all	*/
833	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
834
835	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
836	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
837	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
838	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
839
840	xscale_cache_flushID,		/* idcache_inv_all	*/
841	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
842	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
843	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
844	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
845	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
846	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
847
848	/* Other functions */
849
850	cpufunc_nullop,			/* flush_prefetchbuf	*/
851	armv4_drain_writebuf,		/* drain_writebuf	*/
852	cpufunc_nullop,			/* flush_brnchtgt_C	*/
853	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
854
855	xscale_cpu_sleep,		/* sleep		*/
856
857	/* Soft functions */
858
859	cpufunc_null_fixup,		/* dataabt_fixup	*/
860	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
861
862	xscalec3_context_switch,	/* context_switch	*/
863
864	xscale_setup			/* cpu setup		*/
865};
866#endif /* CPU_XSCALE_81342 */
867
868
869#if defined(CPU_FA526) || defined(CPU_FA626TE)
870struct cpu_functions fa526_cpufuncs = {
871	/* CPU functions */
872
873	cpufunc_id,			/* id			*/
874	cpufunc_nullop,			/* cpwait		*/
875
876	/* MMU functions */
877
878	cpufunc_control,		/* control		*/
879	cpufunc_domains,		/* domain		*/
880	fa526_setttb,			/* setttb		*/
881	cpufunc_faultstatus,		/* faultstatus		*/
882	cpufunc_faultaddress,		/* faultaddress		*/
883
884	/* TLB functions */
885
886	armv4_tlb_flushID,		/* tlb_flushID		*/
887	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
888	armv4_tlb_flushI,		/* tlb_flushI		*/
889	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
890	armv4_tlb_flushD,		/* tlb_flushD		*/
891	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
892
893	/* Cache operations */
894
895	fa526_icache_sync_all,		/* icache_sync_all	*/
896	fa526_icache_sync_range,	/* icache_sync_range	*/
897
898	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
899	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
900	fa526_dcache_inv_range,		/* dcache_inv_range	*/
901	fa526_dcache_wb_range,		/* dcache_wb_range	*/
902
903	armv4_idcache_inv_all,		/* idcache_inv_all	*/
904	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
905	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
906	cpufunc_nullop,			/* l2cache_wbinv_all	*/
907	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
908	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
909	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
910
911	/* Other functions */
912
913	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
914	armv4_drain_writebuf,		/* drain_writebuf	*/
915	cpufunc_nullop,			/* flush_brnchtgt_C	*/
916	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
917
918	fa526_cpu_sleep,		/* sleep		*/
919
920	/* Soft functions */
921
922	cpufunc_null_fixup,		/* dataabt_fixup	*/
923	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
924
925	fa526_context_switch,		/* context_switch	*/
926
927	fa526_setup			/* cpu setup 		*/
928};
929#endif	/* CPU_FA526 || CPU_FA626TE */
930
931#if defined(CPU_ARM1136)
932struct cpu_functions arm1136_cpufuncs = {
933	/* CPU functions */
934
935	cpufunc_id,                     /* id                   */
936	cpufunc_nullop,                 /* cpwait               */
937
938	/* MMU functions */
939
940	cpufunc_control,                /* control              */
941	cpufunc_domains,                /* Domain               */
942	arm11x6_setttb,                 /* Setttb               */
943	cpufunc_faultstatus,            /* Faultstatus          */
944	cpufunc_faultaddress,           /* Faultaddress         */
945
946	/* TLB functions */
947
948	arm11_tlb_flushID,              /* tlb_flushID          */
949	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
950	arm11_tlb_flushI,               /* tlb_flushI           */
951	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
952	arm11_tlb_flushD,               /* tlb_flushD           */
953	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
954
955	/* Cache operations */
956
957	arm11x6_icache_sync_all,        /* icache_sync_all      */
958	arm11x6_icache_sync_range,      /* icache_sync_range    */
959
960	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
961	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
962	armv6_dcache_inv_range,         /* dcache_inv_range     */
963	armv6_dcache_wb_range,          /* dcache_wb_range      */
964
965	armv6_idcache_inv_all,		/* idcache_inv_all	*/
966	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
967	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
968
969	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
970	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
971	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
972	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
973
974	/* Other functions */
975
976	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
977	arm11_drain_writebuf,           /* drain_writebuf       */
978	cpufunc_nullop,                 /* flush_brnchtgt_C     */
979	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
980
981	arm11_sleep,                  	/* sleep                */
982
983	/* Soft functions */
984
985	cpufunc_null_fixup,             /* dataabt_fixup        */
986	cpufunc_null_fixup,             /* prefetchabt_fixup    */
987
988	arm11_context_switch,           /* context_switch       */
989
990	arm11x6_setup                   /* cpu setup            */
991};
992#endif /* CPU_ARM1136 */
993#if defined(CPU_ARM1176)
994struct cpu_functions arm1176_cpufuncs = {
995	/* CPU functions */
996
997	cpufunc_id,                     /* id                   */
998	cpufunc_nullop,                 /* cpwait               */
999
1000	/* MMU functions */
1001
1002	cpufunc_control,                /* control              */
1003	cpufunc_domains,                /* Domain               */
1004	arm11x6_setttb,                 /* Setttb               */
1005	cpufunc_faultstatus,            /* Faultstatus          */
1006	cpufunc_faultaddress,           /* Faultaddress         */
1007
1008	/* TLB functions */
1009
1010	arm11_tlb_flushID,              /* tlb_flushID          */
1011	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
1012	arm11_tlb_flushI,               /* tlb_flushI           */
1013	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1014	arm11_tlb_flushD,               /* tlb_flushD           */
1015	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1016
1017	/* Cache operations */
1018
1019	arm11x6_icache_sync_all,        /* icache_sync_all      */
1020	arm11x6_icache_sync_range,      /* icache_sync_range    */
1021
1022	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
1023	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
1024	armv6_dcache_inv_range,         /* dcache_inv_range     */
1025	armv6_dcache_wb_range,          /* dcache_wb_range      */
1026
1027	armv6_idcache_inv_all,		/* idcache_inv_all	*/
1028	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
1029	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
1030
1031	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
1032	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1033	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1034	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1035
1036	/* Other functions */
1037
1038	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
1039	arm11_drain_writebuf,           /* drain_writebuf       */
1040	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1041	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1042
1043	arm11x6_sleep,                  /* sleep                */
1044
1045	/* Soft functions */
1046
1047	cpufunc_null_fixup,             /* dataabt_fixup        */
1048	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1049
1050	arm11_context_switch,           /* context_switch       */
1051
1052	arm11x6_setup                   /* cpu setup            */
1053};
1054#endif /*CPU_ARM1176 */
1055
1056#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1057struct cpu_functions cortexa_cpufuncs = {
1058	/* CPU functions */
1059
1060	cpufunc_id,                     /* id                   */
1061	cpufunc_nullop,                 /* cpwait               */
1062
1063	/* MMU functions */
1064
1065	cpufunc_control,                /* control              */
1066	cpufunc_domains,                /* Domain               */
1067	armv7_setttb,                   /* Setttb               */
1068	cpufunc_faultstatus,            /* Faultstatus          */
1069	cpufunc_faultaddress,           /* Faultaddress         */
1070
1071	/* TLB functions */
1072
1073	armv7_tlb_flushID,              /* tlb_flushID          */
1074	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
1075	arm11_tlb_flushI,               /* tlb_flushI           */
1076	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1077	arm11_tlb_flushD,               /* tlb_flushD           */
1078	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1079
1080	/* Cache operations */
1081
1082	armv7_idcache_wbinv_all,         /* icache_sync_all      */
1083	armv7_icache_sync_range,        /* icache_sync_range    */
1084
1085	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
1086	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
1087	armv7_dcache_inv_range,         /* dcache_inv_range     */
1088	armv7_dcache_wb_range,          /* dcache_wb_range      */
1089
1090	armv7_idcache_inv_all,		/* idcache_inv_all	*/
1091	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
1092	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
1093
1094	/*
1095	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
1096	 * L2 cache controller is actually enabled.
1097	 */
1098	cpufunc_nullop,                 /* l2cache_wbinv_all    */
1099	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1100	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1101	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1102
1103	/* Other functions */
1104
1105	cpufunc_nullop,                 /* flush_prefetchbuf    */
1106	armv7_drain_writebuf,           /* drain_writebuf       */
1107	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1108	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1109
1110	arm11_sleep,                    /* sleep                */
1111
1112	/* Soft functions */
1113
1114	cpufunc_null_fixup,             /* dataabt_fixup        */
1115	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1116
1117	armv7_context_switch,           /* context_switch       */
1118
1119	cortexa_setup                     /* cpu setup            */
1120};
1121#endif /* CPU_CORTEXA */
1122
1123/*
1124 * Global constants also used by locore.s
1125 */
1126
1127struct cpu_functions cpufuncs;
1128u_int cputype;
1129u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1130
1131#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||	\
1132  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
1133  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1134  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1135  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
1136  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1137  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1138
1139static void get_cachetype_cp15(void);
1140
1141/* Additional cache information local to this file.  Log2 of some of the
1142   above numbers.  */
1143static int	arm_dcache_l2_nsets;
1144static int	arm_dcache_l2_assoc;
1145static int	arm_dcache_l2_linesize;
1146
1147static void
1148get_cachetype_cp15()
1149{
1150	u_int ctype, isize, dsize, cpuid;
1151	u_int clevel, csize, i, sel;
1152	u_int multiplier;
1153	u_char type;
1154
1155	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
1156		: "=r" (ctype));
1157
1158	cpuid = cpufunc_id();
1159	/*
1160	 * ...and thus spake the ARM ARM:
1161	 *
1162	 * If an <opcode2> value corresponding to an unimplemented or
1163	 * reserved ID register is encountered, the System Control
1164	 * processor returns the value of the main ID register.
1165	 */
1166	if (ctype == cpuid)
1167		goto out;
1168
1169	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
1170		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
1171		    : "=r" (clevel));
1172		arm_cache_level = clevel;
1173		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
1174		i = 0;
1175		while ((type = (clevel & 0x7)) && i < 7) {
1176			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
1177			    type == CACHE_SEP_CACHE) {
1178				sel = i << 1;
1179				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1180				    : : "r" (sel));
1181				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1182				    : "=r" (csize));
1183				arm_cache_type[sel] = csize;
1184				arm_dcache_align = 1 <<
1185				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
1186				arm_dcache_align_mask = arm_dcache_align - 1;
1187			}
1188			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
1189				sel = (i << 1) | 1;
1190				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1191				    : : "r" (sel));
1192				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1193				    : "=r" (csize));
1194				arm_cache_type[sel] = csize;
1195			}
1196			i++;
1197			clevel >>= 3;
1198		}
1199	} else {
1200		if ((ctype & CPU_CT_S) == 0)
1201			arm_pcache_unified = 1;
1202
1203		/*
1204		 * If you want to know how this code works, go read the ARM ARM.
1205		 */
1206
1207		arm_pcache_type = CPU_CT_CTYPE(ctype);
1208
1209		if (arm_pcache_unified == 0) {
1210			isize = CPU_CT_ISIZE(ctype);
1211			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1212			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1213			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1214				if (isize & CPU_CT_xSIZE_M)
1215					arm_picache_line_size = 0; /* not present */
1216				else
1217					arm_picache_ways = 1;
1218			} else {
1219				arm_picache_ways = multiplier <<
1220				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1221			}
1222			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1223		}
1224
1225		dsize = CPU_CT_DSIZE(ctype);
1226		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1227		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1228		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1229			if (dsize & CPU_CT_xSIZE_M)
1230				arm_pdcache_line_size = 0; /* not present */
1231			else
1232				arm_pdcache_ways = 1;
1233		} else {
1234			arm_pdcache_ways = multiplier <<
1235			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1236		}
1237		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1238
1239		arm_dcache_align = arm_pdcache_line_size;
1240
1241		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1242		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1243		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1244		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1245
1246	out:
1247		arm_dcache_align_mask = arm_dcache_align - 1;
1248	}
1249}
1250#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1251
1252#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1253    defined(CPU_IXP12X0)
1254/* Cache information for CPUs without cache type registers. */
1255struct cachetab {
1256	u_int32_t ct_cpuid;
1257	int	ct_pcache_type;
1258	int	ct_pcache_unified;
1259	int	ct_pdcache_size;
1260	int	ct_pdcache_line_size;
1261	int	ct_pdcache_ways;
1262	int	ct_picache_size;
1263	int	ct_picache_line_size;
1264	int	ct_picache_ways;
1265};
1266
1267struct cachetab cachetab[] = {
1268    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1269    /* XXX is this type right for SA-1? */
1270    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1271    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1272    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1273    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1274    { 0, 0, 0, 0, 0, 0, 0, 0}
1275};
1276
1277static void get_cachetype_table(void);
1278
1279static void
1280get_cachetype_table()
1281{
1282	int i;
1283	u_int32_t cpuid = cpufunc_id();
1284
1285	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1286		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1287			arm_pcache_type = cachetab[i].ct_pcache_type;
1288			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1289			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1290			arm_pdcache_line_size =
1291			    cachetab[i].ct_pdcache_line_size;
1292			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1293			arm_picache_size = cachetab[i].ct_picache_size;
1294			arm_picache_line_size =
1295			    cachetab[i].ct_picache_line_size;
1296			arm_picache_ways = cachetab[i].ct_picache_ways;
1297		}
1298	}
1299	arm_dcache_align = arm_pdcache_line_size;
1300
1301	arm_dcache_align_mask = arm_dcache_align - 1;
1302}
1303
1304#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
1305
1306/*
1307 * Cannot panic here as we may not have a console yet ...
1308 */
1309
1310int
1311set_cpufuncs()
1312{
1313	cputype = cpufunc_id();
1314	cputype &= CPU_ID_CPU_MASK;
1315
1316	/*
1317	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1318	 * CPU type where we want to use it by default, then we set it.
1319	 */
1320
1321#ifdef CPU_ARM7TDMI
1322	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1323	    CPU_ID_IS7(cputype) &&
1324	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1325		cpufuncs = arm7tdmi_cpufuncs;
1326		cpu_reset_needs_v4_MMU_disable = 0;
1327		get_cachetype_cp15();
1328		pmap_pte_init_generic();
1329		goto out;
1330	}
1331#endif
1332#ifdef CPU_ARM8
1333	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1334	    (cputype & 0x0000f000) == 0x00008000) {
1335		cpufuncs = arm8_cpufuncs;
1336		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1337		get_cachetype_cp15();
1338		pmap_pte_init_arm8();
1339		goto out;
1340	}
1341#endif	/* CPU_ARM8 */
1342#ifdef CPU_ARM9
1343	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1344	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1345	    (cputype & 0x0000f000) == 0x00009000) {
1346		cpufuncs = arm9_cpufuncs;
1347		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1348		get_cachetype_cp15();
1349		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1350		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1351		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
1352		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1353		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1354#ifdef ARM9_CACHE_WRITE_THROUGH
1355		pmap_pte_init_arm9();
1356#else
1357		pmap_pte_init_generic();
1358#endif
1359		goto out;
1360	}
1361#endif /* CPU_ARM9 */
1362#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1363	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1364	    cputype == CPU_ID_MV88FR571_41) {
1365		uint32_t sheeva_ctrl;
1366
1367		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1368		    MV_L2_ENABLE);
1369		/*
1370		 * Workaround for Marvell MV78100 CPU: Cache prefetch
1371		 * mechanism may affect the cache coherency validity,
1372		 * so it needs to be disabled.
1373		 *
1374		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1375		 * L2 Prefetching Mechanism) for details.
1376		 */
1377		if (cputype == CPU_ID_MV88FR571_VD ||
1378		    cputype == CPU_ID_MV88FR571_41)
1379			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1380
1381		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1382
1383		cpufuncs = sheeva_cpufuncs;
1384		get_cachetype_cp15();
1385		pmap_pte_init_generic();
1386		goto out;
1387	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1388		cpufuncs = armv5_ec_cpufuncs;
1389		get_cachetype_cp15();
1390		pmap_pte_init_generic();
1391		goto out;
1392	}
1393#endif /* CPU_ARM9E || CPU_ARM10 */
1394#ifdef CPU_ARM10
1395	if (/* cputype == CPU_ID_ARM1020T || */
1396	    cputype == CPU_ID_ARM1020E) {
1397		/*
1398		 * Select write-through cacheing (this isn't really an
1399		 * option on ARM1020T).
1400		 */
1401		cpufuncs = arm10_cpufuncs;
1402		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1403		get_cachetype_cp15();
1404		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1405		arm10_dcache_sets_max =
1406		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1407		    arm10_dcache_sets_inc;
1408		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1409		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1410		pmap_pte_init_generic();
1411		goto out;
1412	}
1413#endif /* CPU_ARM10 */
1414#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1415	if (cputype == CPU_ID_ARM1136JS
1416	    || cputype == CPU_ID_ARM1136JSR1
1417	    || cputype == CPU_ID_ARM1176JZS) {
1418#ifdef CPU_ARM1136
1419		if (cputype == CPU_ID_ARM1136JS
1420		    || cputype == CPU_ID_ARM1136JSR1)
1421			cpufuncs = arm1136_cpufuncs;
1422#endif
1423#ifdef CPU_ARM1176
1424		if (cputype == CPU_ID_ARM1176JZS)
1425			cpufuncs = arm1176_cpufuncs;
1426#endif
1427		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1428		get_cachetype_cp15();
1429
1430		pmap_pte_init_mmu_v6();
1431
1432		goto out;
1433	}
1434#endif /* CPU_ARM1136 || CPU_ARM1176 */
1435#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1436	if (cputype == CPU_ID_CORTEXA5 ||
1437	    cputype == CPU_ID_CORTEXA7 ||
1438	    cputype == CPU_ID_CORTEXA8R1 ||
1439	    cputype == CPU_ID_CORTEXA8R2 ||
1440	    cputype == CPU_ID_CORTEXA8R3 ||
1441	    cputype == CPU_ID_CORTEXA9R1 ||
1442	    cputype == CPU_ID_CORTEXA9R2 ||
1443	    cputype == CPU_ID_CORTEXA9R3 ||
1444	    cputype == CPU_ID_CORTEXA15 ||
1445	    cputype == CPU_ID_KRAIT ) {
1446		cpufuncs = cortexa_cpufuncs;
1447		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1448		get_cachetype_cp15();
1449
1450		pmap_pte_init_mmu_v6();
1451		/* Use powersave on this CPU. */
1452		cpu_do_powersave = 1;
1453		goto out;
1454	}
1455#endif /* CPU_CORTEXA */
1456
1457#if defined(CPU_MV_PJ4B)
1458	if (cputype == CPU_ID_MV88SV581X_V7 ||
1459	    cputype == CPU_ID_MV88SV584X_V7 ||
1460	    cputype == CPU_ID_ARM_88SV581X_V7) {
1461		cpufuncs = pj4bv7_cpufuncs;
1462		get_cachetype_cp15();
1463		pmap_pte_init_mmu_v6();
1464		goto out;
1465	}
1466#endif /* CPU_MV_PJ4B */
1467#ifdef CPU_SA110
1468	if (cputype == CPU_ID_SA110) {
1469		cpufuncs = sa110_cpufuncs;
1470		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1471		get_cachetype_table();
1472		pmap_pte_init_sa1();
1473		goto out;
1474	}
1475#endif	/* CPU_SA110 */
1476#ifdef CPU_SA1100
1477	if (cputype == CPU_ID_SA1100) {
1478		cpufuncs = sa11x0_cpufuncs;
1479		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1480		get_cachetype_table();
1481		pmap_pte_init_sa1();
1482		/* Use powersave on this CPU. */
1483		cpu_do_powersave = 1;
1484
1485		goto out;
1486	}
1487#endif	/* CPU_SA1100 */
1488#ifdef CPU_SA1110
1489	if (cputype == CPU_ID_SA1110) {
1490		cpufuncs = sa11x0_cpufuncs;
1491		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1492		get_cachetype_table();
1493		pmap_pte_init_sa1();
1494		/* Use powersave on this CPU. */
1495		cpu_do_powersave = 1;
1496
1497		goto out;
1498	}
1499#endif	/* CPU_SA1110 */
1500#if defined(CPU_FA526) || defined(CPU_FA626TE)
1501	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1502		cpufuncs = fa526_cpufuncs;
1503		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1504		get_cachetype_cp15();
1505		pmap_pte_init_generic();
1506
1507		/* Use powersave on this CPU. */
1508		cpu_do_powersave = 1;
1509
1510		goto out;
1511	}
1512#endif	/* CPU_FA526 || CPU_FA626TE */
1513#ifdef CPU_IXP12X0
1514        if (cputype == CPU_ID_IXP1200) {
1515                cpufuncs = ixp12x0_cpufuncs;
1516                cpu_reset_needs_v4_MMU_disable = 1;
1517                get_cachetype_table();
1518                pmap_pte_init_sa1();
1519		goto out;
1520        }
1521#endif  /* CPU_IXP12X0 */
1522#ifdef CPU_XSCALE_80200
1523	if (cputype == CPU_ID_80200) {
1524		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1525
1526		i80200_icu_init();
1527
1528#if defined(XSCALE_CCLKCFG)
1529		/*
1530		 * Crank CCLKCFG to maximum legal value.
1531		 */
1532		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1533			:
1534			: "r" (XSCALE_CCLKCFG));
1535#endif
1536
1537		/*
1538		 * XXX Disable ECC in the Bus Controller Unit; we
1539		 * don't really support it, yet.  Clear any pending
1540		 * error indications.
1541		 */
1542		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1543			:
1544			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1545
1546		cpufuncs = xscale_cpufuncs;
1547		/*
1548		 * i80200 errata: Step-A0 and A1 have a bug where
1549		 * D$ dirty bits are not cleared on "invalidate by
1550		 * address".
1551		 *
1552		 * Workaround: Clean cache line before invalidating.
1553		 */
1554		if (rev == 0 || rev == 1)
1555			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1556
1557		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1558		get_cachetype_cp15();
1559		pmap_pte_init_xscale();
1560		goto out;
1561	}
1562#endif /* CPU_XSCALE_80200 */
1563#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1564	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1565	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1566	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1567		cpufuncs = xscale_cpufuncs;
1568		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1569		get_cachetype_cp15();
1570		pmap_pte_init_xscale();
1571		goto out;
1572	}
1573#endif /* CPU_XSCALE_80321 */
1574
1575#if defined(CPU_XSCALE_81342)
1576	if (cputype == CPU_ID_81342) {
1577		cpufuncs = xscalec3_cpufuncs;
1578		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1579		get_cachetype_cp15();
1580		pmap_pte_init_xscale();
1581		goto out;
1582	}
1583#endif /* CPU_XSCALE_81342 */
1584#ifdef CPU_XSCALE_PXA2X0
1585	/* ignore core revision to test PXA2xx CPUs */
1586	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1587	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1588	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1589
1590		cpufuncs = xscale_cpufuncs;
1591		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1592		get_cachetype_cp15();
1593		pmap_pte_init_xscale();
1594
1595		/* Use powersave on this CPU. */
1596		cpu_do_powersave = 1;
1597
1598		goto out;
1599	}
1600#endif /* CPU_XSCALE_PXA2X0 */
1601#ifdef CPU_XSCALE_IXP425
1602	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1603            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1604
1605		cpufuncs = xscale_cpufuncs;
1606		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1607		get_cachetype_cp15();
1608		pmap_pte_init_xscale();
1609
1610		goto out;
1611	}
1612#endif /* CPU_XSCALE_IXP425 */
1613	/*
1614	 * Bzzzz. And the answer was ...
1615	 */
1616	panic("No support for this CPU type (%08x) in kernel", cputype);
1617	return(ARCHITECTURE_NOT_PRESENT);
1618out:
1619	uma_set_align(arm_dcache_align_mask);
1620	return (0);
1621}
1622
1623/*
1624 * Fixup routines for data and prefetch aborts.
1625 *
1626 * Several compile time symbols are used
1627 *
1628 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1629 * correction of registers after a fault.
1630 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1631 * when defined should use late aborts
1632 */
1633
1634
1635/*
1636 * Null abort fixup routine.
1637 * For use when no fixup is required.
1638 */
1639int
1640cpufunc_null_fixup(arg)
1641	void *arg;
1642{
1643	return(ABORT_FIXUP_OK);
1644}
1645
1646
1647#if defined(CPU_ARM7TDMI)
1648
1649#ifdef DEBUG_FAULT_CORRECTION
1650#define DFC_PRINTF(x)		printf x
1651#define DFC_DISASSEMBLE(x)	disassemble(x)
1652#else
1653#define DFC_PRINTF(x)		/* nothing */
1654#define DFC_DISASSEMBLE(x)	/* nothing */
1655#endif
1656
1657/*
1658 * "Early" data abort fixup.
1659 *
1660 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1661 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1662 *
1663 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1664 */
1665int
1666early_abort_fixup(arg)
1667	void *arg;
1668{
1669	struct trapframe *frame = arg;
1670	u_int fault_pc;
1671	u_int fault_instruction;
1672	int saved_lr = 0;
1673
1674	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1675
1676		/* Ok an abort in SVC mode */
1677
1678		/*
1679		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1680		 * as the fault happened in svc mode but we need it in the
1681		 * usr slot so we can treat the registers as an array of ints
1682		 * during fixing.
1683		 * NOTE: This PC is in the position but writeback is not
1684		 * allowed on r15.
1685		 * Doing it like this is more efficient than trapping this
1686		 * case in all possible locations in the following fixup code.
1687		 */
1688
1689		saved_lr = frame->tf_usr_lr;
1690		frame->tf_usr_lr = frame->tf_svc_lr;
1691
1692		/*
1693		 * Note the trapframe does not have the SVC r13 so a fault
1694		 * from an instruction with writeback to r13 in SVC mode is
1695		 * not allowed. This should not happen as the kstack is
1696		 * always valid.
1697		 */
1698	}
1699
1700	/* Get fault address and status from the CPU */
1701
1702	fault_pc = frame->tf_pc;
1703	fault_instruction = *((volatile unsigned int *)fault_pc);
1704
1705	/* Decode the fault instruction and fix the registers as needed */
1706
1707	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1708		int base;
1709		int loop;
1710		int count;
1711		int *registers = &frame->tf_r0;
1712
1713		DFC_PRINTF(("LDM/STM\n"));
1714		DFC_DISASSEMBLE(fault_pc);
1715		if (fault_instruction & (1 << 21)) {
1716			DFC_PRINTF(("This instruction must be corrected\n"));
1717			base = (fault_instruction >> 16) & 0x0f;
1718			if (base == 15)
1719				return ABORT_FIXUP_FAILED;
1720			/* Count registers transferred */
1721			count = 0;
1722			for (loop = 0; loop < 16; ++loop) {
1723				if (fault_instruction & (1<<loop))
1724					++count;
1725			}
1726			DFC_PRINTF(("%d registers used\n", count));
1727			DFC_PRINTF(("Corrected r%d by %d bytes ",
1728				       base, count * 4));
1729			if (fault_instruction & (1 << 23)) {
1730				DFC_PRINTF(("down\n"));
1731				registers[base] -= count * 4;
1732			} else {
1733				DFC_PRINTF(("up\n"));
1734				registers[base] += count * 4;
1735			}
1736		}
1737	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1738		int base;
1739		int offset;
1740		int *registers = &frame->tf_r0;
1741
1742		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1743
1744		DFC_DISASSEMBLE(fault_pc);
1745
1746		/* Only need to fix registers if write back is turned on */
1747
1748		if ((fault_instruction & (1 << 21)) != 0) {
1749			base = (fault_instruction >> 16) & 0x0f;
1750			if (base == 13 &&
1751			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1752				return ABORT_FIXUP_FAILED;
1753			if (base == 15)
1754				return ABORT_FIXUP_FAILED;
1755
1756			offset = (fault_instruction & 0xff) << 2;
1757			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1758			if ((fault_instruction & (1 << 23)) != 0)
1759				offset = -offset;
1760			registers[base] += offset;
1761			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1762		}
1763	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1764		return ABORT_FIXUP_FAILED;
1765
1766	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1767
1768		/* Ok an abort in SVC mode */
1769
1770		/*
1771		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1772		 * as the fault happened in svc mode but we need it in the
1773		 * usr slot so we can treat the registers as an array of ints
1774		 * during fixing.
1775		 * NOTE: This PC is in the position but writeback is not
1776		 * allowed on r15.
1777		 * Doing it like this is more efficient than trapping this
1778		 * case in all possible locations in the prior fixup code.
1779		 */
1780
1781		frame->tf_svc_lr = frame->tf_usr_lr;
1782		frame->tf_usr_lr = saved_lr;
1783
1784		/*
1785		 * Note the trapframe does not have the SVC r13 so a fault
1786		 * from an instruction with writeback to r13 in SVC mode is
1787		 * not allowed. This should not happen as the kstack is
1788		 * always valid.
1789		 */
1790	}
1791
1792	return(ABORT_FIXUP_OK);
1793}
1794#endif	/* CPU_ARM2/250/3/6/7 */
1795
1796
1797#if defined(CPU_ARM7TDMI)
1798/*
1799 * "Late" (base updated) data abort fixup
1800 *
1801 * For ARM6 (in late-abort mode) and ARM7.
1802 *
1803 * In this model, all data-transfer instructions need fixing up.  We defer
1804 * LDM, STM, LDC and STC fixup to the early-abort handler.
1805 */
1806int
1807late_abort_fixup(arg)
1808	void *arg;
1809{
1810	struct trapframe *frame = arg;
1811	u_int fault_pc;
1812	u_int fault_instruction;
1813	int saved_lr = 0;
1814
1815	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1816
1817		/* Ok an abort in SVC mode */
1818
1819		/*
1820		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1821		 * as the fault happened in svc mode but we need it in the
1822		 * usr slot so we can treat the registers as an array of ints
1823		 * during fixing.
1824		 * NOTE: This PC is in the position but writeback is not
1825		 * allowed on r15.
1826		 * Doing it like this is more efficient than trapping this
1827		 * case in all possible locations in the following fixup code.
1828		 */
1829
1830		saved_lr = frame->tf_usr_lr;
1831		frame->tf_usr_lr = frame->tf_svc_lr;
1832
1833		/*
1834		 * Note the trapframe does not have the SVC r13 so a fault
1835		 * from an instruction with writeback to r13 in SVC mode is
1836		 * not allowed. This should not happen as the kstack is
1837		 * always valid.
1838		 */
1839	}
1840
1841	/* Get fault address and status from the CPU */
1842
1843	fault_pc = frame->tf_pc;
1844	fault_instruction = *((volatile unsigned int *)fault_pc);
1845
1846	/* Decode the fault instruction and fix the registers as needed */
1847
1848	/* Was is a swap instruction ? */
1849
1850	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1851		DFC_DISASSEMBLE(fault_pc);
1852	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1853
1854		/* Was is a ldr/str instruction */
1855		/* This is for late abort only */
1856
1857		int base;
1858		int offset;
1859		int *registers = &frame->tf_r0;
1860
1861		DFC_DISASSEMBLE(fault_pc);
1862
1863		/* This is for late abort only */
1864
1865		if ((fault_instruction & (1 << 24)) == 0
1866		    || (fault_instruction & (1 << 21)) != 0) {
1867			/* postindexed ldr/str with no writeback */
1868
1869			base = (fault_instruction >> 16) & 0x0f;
1870			if (base == 13 &&
1871			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1872				return ABORT_FIXUP_FAILED;
1873			if (base == 15)
1874				return ABORT_FIXUP_FAILED;
1875			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1876				       base, registers[base]));
1877			if ((fault_instruction & (1 << 25)) == 0) {
1878				/* Immediate offset - easy */
1879
1880				offset = fault_instruction & 0xfff;
1881				if ((fault_instruction & (1 << 23)))
1882					offset = -offset;
1883				registers[base] += offset;
1884				DFC_PRINTF(("imm=%08x ", offset));
1885			} else {
1886				/* offset is a shifted register */
1887				int shift;
1888
1889				offset = fault_instruction & 0x0f;
1890				if (offset == base)
1891					return ABORT_FIXUP_FAILED;
1892
1893				/*
1894				 * Register offset - hard we have to
1895				 * cope with shifts !
1896				 */
1897				offset = registers[offset];
1898
1899				if ((fault_instruction & (1 << 4)) == 0)
1900					/* shift with amount */
1901					shift = (fault_instruction >> 7) & 0x1f;
1902				else {
1903					/* shift with register */
1904					if ((fault_instruction & (1 << 7)) != 0)
1905						/* undefined for now so bail out */
1906						return ABORT_FIXUP_FAILED;
1907					shift = ((fault_instruction >> 8) & 0xf);
1908					if (base == shift)
1909						return ABORT_FIXUP_FAILED;
1910					DFC_PRINTF(("shift reg=%d ", shift));
1911					shift = registers[shift];
1912				}
1913				DFC_PRINTF(("shift=%08x ", shift));
1914				switch (((fault_instruction >> 5) & 0x3)) {
1915				case 0 : /* Logical left */
1916					offset = (int)(((u_int)offset) << shift);
1917					break;
1918				case 1 : /* Logical Right */
1919					if (shift == 0) shift = 32;
1920					offset = (int)(((u_int)offset) >> shift);
1921					break;
1922				case 2 : /* Arithmetic Right */
1923					if (shift == 0) shift = 32;
1924					offset = (int)(((int)offset) >> shift);
1925					break;
1926				case 3 : /* Rotate right (rol or rxx) */
1927					return ABORT_FIXUP_FAILED;
1928					break;
1929				}
1930
1931				DFC_PRINTF(("abt: fixed LDR/STR with "
1932					       "register offset\n"));
1933				if ((fault_instruction & (1 << 23)))
1934					offset = -offset;
1935				DFC_PRINTF(("offset=%08x ", offset));
1936				registers[base] += offset;
1937			}
1938			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1939		}
1940	}
1941
1942	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1943
1944		/* Ok an abort in SVC mode */
1945
1946		/*
1947		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1948		 * as the fault happened in svc mode but we need it in the
1949		 * usr slot so we can treat the registers as an array of ints
1950		 * during fixing.
1951		 * NOTE: This PC is in the position but writeback is not
1952		 * allowed on r15.
1953		 * Doing it like this is more efficient than trapping this
1954		 * case in all possible locations in the prior fixup code.
1955		 */
1956
1957		frame->tf_svc_lr = frame->tf_usr_lr;
1958		frame->tf_usr_lr = saved_lr;
1959
1960		/*
1961		 * Note the trapframe does not have the SVC r13 so a fault
1962		 * from an instruction with writeback to r13 in SVC mode is
1963		 * not allowed. This should not happen as the kstack is
1964		 * always valid.
1965		 */
1966	}
1967
1968	/*
1969	 * Now let the early-abort fixup routine have a go, in case it
1970	 * was an LDM, STM, LDC or STC that faulted.
1971	 */
1972
1973	return early_abort_fixup(arg);
1974}
1975#endif	/* CPU_ARM7TDMI */
1976
1977/*
1978 * CPU Setup code
1979 */
1980
1981#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1982  defined(CPU_ARM9E) || \
1983  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1984  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1985  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1986  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1987  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1988  defined(CPU_FA526) || defined(CPU_FA626TE)
1989
1990#define IGN	0
1991#define OR	1
1992#define BIC	2
1993
1994struct cpu_option {
1995	char	*co_name;
1996	int	co_falseop;
1997	int	co_trueop;
1998	int	co_value;
1999};
2000
2001static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
2002
2003static u_int
2004parse_cpu_options(args, optlist, cpuctrl)
2005	char *args;
2006	struct cpu_option *optlist;
2007	u_int cpuctrl;
2008{
2009	int integer;
2010
2011	if (args == NULL)
2012		return(cpuctrl);
2013
2014	while (optlist->co_name) {
2015		if (get_bootconf_option(args, optlist->co_name,
2016		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
2017			if (integer) {
2018				if (optlist->co_trueop == OR)
2019					cpuctrl |= optlist->co_value;
2020				else if (optlist->co_trueop == BIC)
2021					cpuctrl &= ~optlist->co_value;
2022			} else {
2023				if (optlist->co_falseop == OR)
2024					cpuctrl |= optlist->co_value;
2025				else if (optlist->co_falseop == BIC)
2026					cpuctrl &= ~optlist->co_value;
2027			}
2028		}
2029		++optlist;
2030	}
2031	return(cpuctrl);
2032}
2033#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
2034
2035#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
2036struct cpu_option arm678_options[] = {
2037#ifdef COMPAT_12
2038	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2039	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2040#endif	/* COMPAT_12 */
2041	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2042	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2043	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2044	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2045	{ NULL,			IGN, IGN, 0 }
2046};
2047
2048#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2049
2050#ifdef CPU_ARM7TDMI
2051struct cpu_option arm7tdmi_options[] = {
2052	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2053	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2054	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2055	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2056#ifdef COMPAT_12
2057	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2058#endif	/* COMPAT_12 */
2059	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2060	{ NULL,			IGN, IGN, 0 }
2061};
2062
2063void
2064arm7tdmi_setup(args)
2065	char *args;
2066{
2067	int cpuctrl;
2068
2069	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2070		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2071		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2072
2073	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2074	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2075
2076#ifdef __ARMEB__
2077	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2078#endif
2079
2080	/* Clear out the cache */
2081	cpu_idcache_wbinv_all();
2082
2083	/* Set the control register */
2084	ctrl = cpuctrl;
2085	cpu_control(0xffffffff, cpuctrl);
2086}
2087#endif	/* CPU_ARM7TDMI */
2088
2089#ifdef CPU_ARM8
2090struct cpu_option arm8_options[] = {
2091	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2092	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2093	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2094	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2095#ifdef COMPAT_12
2096	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2097#endif	/* COMPAT_12 */
2098	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2099	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2100	{ NULL,			IGN, IGN, 0 }
2101};
2102
2103void
2104arm8_setup(args)
2105	char *args;
2106{
2107	int integer;
2108	int cpuctrl, cpuctrlmask;
2109	int clocktest;
2110	int setclock = 0;
2111
2112	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2113		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2114		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2115	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2116		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2117		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2118		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2119		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2120
2121#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2122	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2123#endif
2124
2125	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2126	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2127
2128#ifdef __ARMEB__
2129	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2130#endif
2131
2132	/* Get clock configuration */
2133	clocktest = arm8_clock_config(0, 0) & 0x0f;
2134
2135	/* Special ARM8 clock and test configuration */
2136	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2137		clocktest = 0;
2138		setclock = 1;
2139	}
2140	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2141		if (integer)
2142			clocktest |= 0x01;
2143		else
2144			clocktest &= ~(0x01);
2145		setclock = 1;
2146	}
2147	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2148		if (integer)
2149			clocktest |= 0x02;
2150		else
2151			clocktest &= ~(0x02);
2152		setclock = 1;
2153	}
2154	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2155		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2156		setclock = 1;
2157	}
2158	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2159		clocktest |= (integer & 7) << 5;
2160		setclock = 1;
2161	}
2162
2163	/* Clear out the cache */
2164	cpu_idcache_wbinv_all();
2165
2166	/* Set the control register */
2167	ctrl = cpuctrl;
2168	cpu_control(0xffffffff, cpuctrl);
2169
2170	/* Set the clock/test register */
2171	if (setclock)
2172		arm8_clock_config(0x7f, clocktest);
2173}
2174#endif	/* CPU_ARM8 */
2175
2176#ifdef CPU_ARM9
2177struct cpu_option arm9_options[] = {
2178	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2179	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2180	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2181	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2182	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2183	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2184	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2185	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2186	{ NULL,			IGN, IGN, 0 }
2187};
2188
2189void
2190arm9_setup(args)
2191	char *args;
2192{
2193	int cpuctrl, cpuctrlmask;
2194
2195	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2196	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2197	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2198	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
2199	    CPU_CONTROL_ROUNDROBIN;
2200	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2201		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2202		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2203		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2204		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2205		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2206		 | CPU_CONTROL_ROUNDROBIN;
2207
2208#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2209	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2210#endif
2211
2212	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2213
2214#ifdef __ARMEB__
2215	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2216#endif
2217	if (vector_page == ARM_VECTORS_HIGH)
2218		cpuctrl |= CPU_CONTROL_VECRELOC;
2219
2220	/* Clear out the cache */
2221	cpu_idcache_wbinv_all();
2222
2223	/* Set the control register */
2224	cpu_control(cpuctrlmask, cpuctrl);
2225	ctrl = cpuctrl;
2226
2227}
2228#endif	/* CPU_ARM9 */
2229
2230#if defined(CPU_ARM9E) || defined(CPU_ARM10)
2231struct cpu_option arm10_options[] = {
2232	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2233	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2234	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2235	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2236	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2237	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2238	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2239	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2240	{ NULL,			IGN, IGN, 0 }
2241};
2242
2243void
2244arm10_setup(args)
2245	char *args;
2246{
2247	int cpuctrl, cpuctrlmask;
2248
2249	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2250	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2251	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2252	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2253	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2254	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2255	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2256	    | CPU_CONTROL_BPRD_ENABLE
2257	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2258
2259#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2260	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2261#endif
2262
2263	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2264
2265#ifdef __ARMEB__
2266	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2267#endif
2268
2269	/* Clear out the cache */
2270	cpu_idcache_wbinv_all();
2271
2272	/* Now really make sure they are clean.  */
2273	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2274
2275	if (vector_page == ARM_VECTORS_HIGH)
2276		cpuctrl |= CPU_CONTROL_VECRELOC;
2277
2278	/* Set the control register */
2279	ctrl = cpuctrl;
2280	cpu_control(0xffffffff, cpuctrl);
2281
2282	/* And again. */
2283	cpu_idcache_wbinv_all();
2284}
2285#endif	/* CPU_ARM9E || CPU_ARM10 */
2286
2287#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
2288struct cpu_option arm11_options[] = {
2289	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2290	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2291	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2292	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2293	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2294	{ NULL,			IGN, IGN, 0 }
2295};
2296
2297void
2298arm11x6_setup(char *args)
2299{
2300	int cpuctrl, cpuctrl_wax;
2301	uint32_t auxctrl, auxctrl_wax;
2302	uint32_t tmp, tmp2;
2303	uint32_t sbz=0;
2304	uint32_t cpuid;
2305
2306	cpuid = cpufunc_id();
2307
2308	cpuctrl =
2309		CPU_CONTROL_MMU_ENABLE  |
2310		CPU_CONTROL_DC_ENABLE   |
2311		CPU_CONTROL_WBUF_ENABLE |
2312		CPU_CONTROL_32BP_ENABLE |
2313		CPU_CONTROL_32BD_ENABLE |
2314		CPU_CONTROL_LABT_ENABLE |
2315		CPU_CONTROL_SYST_ENABLE |
2316		CPU_CONTROL_IC_ENABLE;
2317
2318	/*
2319	 * "write as existing" bits
2320	 * inverse of this is mask
2321	 */
2322	cpuctrl_wax =
2323		(3 << 30) | /* SBZ */
2324		(1 << 29) | /* FA */
2325		(1 << 28) | /* TR */
2326		(3 << 26) | /* SBZ */
2327		(3 << 19) | /* SBZ */
2328		(1 << 17);  /* SBZ */
2329
2330	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2331	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2332
2333	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2334
2335#ifdef __ARMEB__
2336	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2337#endif
2338
2339	if (vector_page == ARM_VECTORS_HIGH)
2340		cpuctrl |= CPU_CONTROL_VECRELOC;
2341
2342	auxctrl = 0;
2343	auxctrl_wax = ~0;
2344	/*
2345	 * This options enables the workaround for the 364296 ARM1136
2346	 * r0pX errata (possible cache data corruption with
2347	 * hit-under-miss enabled). It sets the undocumented bit 31 in
2348	 * the auxiliary control register and the FI bit in the control
2349	 * register, thus disabling hit-under-miss without putting the
2350	 * processor into full low interrupt latency mode. ARM11MPCore
2351	 * is not affected.
2352	 */
2353	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
2354		cpuctrl |= CPU_CONTROL_FI_ENABLE;
2355		auxctrl = ARM1136_AUXCTL_PFI;
2356		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
2357	}
2358
2359	/*
2360	 * Enable an errata workaround
2361	 */
2362	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
2363		auxctrl = ARM1176_AUXCTL_PHD;
2364		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
2365	}
2366
2367	/* Clear out the cache */
2368	cpu_idcache_wbinv_all();
2369
2370	/* Now really make sure they are clean.  */
2371	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
2372
2373	/* Allow detection code to find the VFP if it's fitted.  */
2374	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
2375
2376	/* Set the control register */
2377	ctrl = cpuctrl;
2378	cpu_control(~cpuctrl_wax, cpuctrl);
2379
2380	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
2381			"and	%1, %0, %2\n\t"
2382			"orr	%1, %1, %3\n\t"
2383			"teq	%0, %1\n\t"
2384			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
2385			: "=r"(tmp), "=r"(tmp2) :
2386			  "r"(auxctrl_wax), "r"(auxctrl));
2387
2388	/* And again. */
2389	cpu_idcache_wbinv_all();
2390}
2391#endif  /* CPU_ARM1136 || CPU_ARM1176 */
2392
2393#ifdef CPU_MV_PJ4B
2394void
2395pj4bv7_setup(args)
2396	char *args;
2397{
2398	int cpuctrl;
2399
2400	pj4b_config();
2401
2402	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2403#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2404	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2405#endif
2406	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2407	cpuctrl |= (0xf << 3);
2408	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2409	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2410	if (vector_page == ARM_VECTORS_HIGH)
2411		cpuctrl |= CPU_CONTROL_VECRELOC;
2412	cpuctrl |= (0x5 << 16) | (1 < 22);
2413	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2414
2415	/* Clear out the cache */
2416	cpu_idcache_wbinv_all();
2417
2418	/* Set the control register */
2419	ctrl = cpuctrl;
2420	cpu_control(0xFFFFFFFF, cpuctrl);
2421
2422	/* And again. */
2423	cpu_idcache_wbinv_all();
2424}
2425#endif /* CPU_MV_PJ4B */
2426
2427#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
2428
2429void
2430cortexa_setup(char *args)
2431{
2432	int cpuctrl, cpuctrlmask;
2433
2434	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
2435	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
2436	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
2437	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
2438	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
2439	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
2440
2441	cpuctrl = CPU_CONTROL_MMU_ENABLE |
2442	    CPU_CONTROL_IC_ENABLE |
2443	    CPU_CONTROL_DC_ENABLE |
2444	    CPU_CONTROL_BPRD_ENABLE;
2445
2446#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2447	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2448#endif
2449
2450	/* Switch to big endian */
2451#ifdef __ARMEB__
2452	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2453#endif
2454
2455	/* Check if the vector page is at the high address (0xffff0000) */
2456	if (vector_page == ARM_VECTORS_HIGH)
2457		cpuctrl |= CPU_CONTROL_VECRELOC;
2458
2459	/* Clear out the cache */
2460	cpu_idcache_wbinv_all();
2461
2462	/* Set the control register */
2463	ctrl = cpuctrl;
2464	cpu_control(cpuctrlmask, cpuctrl);
2465
2466	/* And again. */
2467	cpu_idcache_wbinv_all();
2468#ifdef SMP
2469	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
2470#endif
2471}
2472#endif  /* CPU_CORTEXA */
2473
2474
2475#ifdef CPU_SA110
2476struct cpu_option sa110_options[] = {
2477#ifdef COMPAT_12
2478	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2479	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2480#endif	/* COMPAT_12 */
2481	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2482	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2483	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2484	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2485	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2486	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2487	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2488	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2489	{ NULL,			IGN, IGN, 0 }
2490};
2491
2492void
2493sa110_setup(args)
2494	char *args;
2495{
2496	int cpuctrl, cpuctrlmask;
2497
2498	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2499		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2500		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2501		 | CPU_CONTROL_WBUF_ENABLE;
2502	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2503		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2504		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2505		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2506		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2507		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2508		 | CPU_CONTROL_CPCLK;
2509
2510#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2511	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2512#endif
2513
2514	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2515
2516#ifdef __ARMEB__
2517	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2518#endif
2519
2520	/* Clear out the cache */
2521	cpu_idcache_wbinv_all();
2522
2523	/* Set the control register */
2524	ctrl = cpuctrl;
2525/*	cpu_control(cpuctrlmask, cpuctrl);*/
2526	cpu_control(0xffffffff, cpuctrl);
2527
2528	/*
2529	 * enable clockswitching, note that this doesn't read or write to r0,
2530	 * r0 is just to make it valid asm
2531	 */
2532	__asm ("mcr 15, 0, r0, c15, c1, 2");
2533}
2534#endif	/* CPU_SA110 */
2535
2536#if defined(CPU_SA1100) || defined(CPU_SA1110)
2537struct cpu_option sa11x0_options[] = {
2538#ifdef COMPAT_12
2539	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2540	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2541#endif	/* COMPAT_12 */
2542	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2543	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2544	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2545	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2546	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2547	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2548	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2549	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2550	{ NULL,			IGN, IGN, 0 }
2551};
2552
2553void
2554sa11x0_setup(args)
2555	char *args;
2556{
2557	int cpuctrl, cpuctrlmask;
2558
2559	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2560		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2561		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2562		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2563	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2564		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2565		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2566		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2567		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2568		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2569		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2570
2571#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2572	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2573#endif
2574
2575
2576	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2577
2578#ifdef __ARMEB__
2579	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2580#endif
2581
2582	if (vector_page == ARM_VECTORS_HIGH)
2583		cpuctrl |= CPU_CONTROL_VECRELOC;
2584	/* Clear out the cache */
2585	cpu_idcache_wbinv_all();
2586	/* Set the control register */
2587	ctrl = cpuctrl;
2588	cpu_control(0xffffffff, cpuctrl);
2589}
2590#endif	/* CPU_SA1100 || CPU_SA1110 */
2591
2592#if defined(CPU_FA526) || defined(CPU_FA626TE)
2593struct cpu_option fa526_options[] = {
2594#ifdef COMPAT_12
2595	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
2596					   CPU_CONTROL_DC_ENABLE) },
2597	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2598#endif	/* COMPAT_12 */
2599	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
2600					   CPU_CONTROL_DC_ENABLE) },
2601	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
2602					   CPU_CONTROL_DC_ENABLE) },
2603	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2604	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2605	{ NULL,			IGN, IGN, 0 }
2606};
2607
2608void
2609fa526_setup(char *args)
2610{
2611	int cpuctrl, cpuctrlmask;
2612
2613	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2614		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2615		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2616		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2617		| CPU_CONTROL_BPRD_ENABLE;
2618	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2619		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2620		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2621		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2622		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2623		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2624		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2625
2626#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2627	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2628#endif
2629
2630	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2631
2632#ifdef __ARMEB__
2633	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2634#endif
2635
2636	if (vector_page == ARM_VECTORS_HIGH)
2637		cpuctrl |= CPU_CONTROL_VECRELOC;
2638
2639	/* Clear out the cache */
2640	cpu_idcache_wbinv_all();
2641
2642	/* Set the control register */
2643	ctrl = cpuctrl;
2644	cpu_control(0xffffffff, cpuctrl);
2645}
2646#endif	/* CPU_FA526 || CPU_FA626TE */
2647
2648
2649#if defined(CPU_IXP12X0)
2650struct cpu_option ixp12x0_options[] = {
2651	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2652	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2653	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2654	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2655	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2656	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2657	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2658	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2659	{ NULL,			IGN, IGN, 0 }
2660};
2661
2662void
2663ixp12x0_setup(args)
2664	char *args;
2665{
2666	int cpuctrl, cpuctrlmask;
2667
2668
2669	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2670		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2671		 | CPU_CONTROL_IC_ENABLE;
2672
2673	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2674		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2675		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2676		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2677		 | CPU_CONTROL_VECRELOC;
2678
2679#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2680	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2681#endif
2682
2683	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2684
2685#ifdef __ARMEB__
2686	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2687#endif
2688
2689	if (vector_page == ARM_VECTORS_HIGH)
2690		cpuctrl |= CPU_CONTROL_VECRELOC;
2691
2692	/* Clear out the cache */
2693	cpu_idcache_wbinv_all();
2694
2695	/* Set the control register */
2696	ctrl = cpuctrl;
2697	/* cpu_control(0xffffffff, cpuctrl); */
2698	cpu_control(cpuctrlmask, cpuctrl);
2699}
2700#endif /* CPU_IXP12X0 */
2701
2702#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2703  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2704  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2705struct cpu_option xscale_options[] = {
2706#ifdef COMPAT_12
2707	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2708	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2709#endif	/* COMPAT_12 */
2710	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2711	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2712	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2713	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2714	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2715	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2716	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2717	{ NULL,			IGN, IGN, 0 }
2718};
2719
2720void
2721xscale_setup(args)
2722	char *args;
2723{
2724	uint32_t auxctl;
2725	int cpuctrl, cpuctrlmask;
2726
2727	/*
2728	 * The XScale Write Buffer is always enabled.  Our option
2729	 * is to enable/disable coalescing.  Note that bits 6:3
2730	 * must always be enabled.
2731	 */
2732
2733	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2734		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2735		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2736		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2737		 | CPU_CONTROL_BPRD_ENABLE;
2738	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2739		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2740		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2741		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2742		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2743		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2744		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2745		 CPU_CONTROL_L2_ENABLE;
2746
2747#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2748	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2749#endif
2750
2751	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2752
2753#ifdef __ARMEB__
2754	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2755#endif
2756
2757	if (vector_page == ARM_VECTORS_HIGH)
2758		cpuctrl |= CPU_CONTROL_VECRELOC;
2759#ifdef CPU_XSCALE_CORE3
2760	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2761#endif
2762
2763	/* Clear out the cache */
2764	cpu_idcache_wbinv_all();
2765
2766	/*
2767	 * Set the control register.  Note that bits 6:3 must always
2768	 * be set to 1.
2769	 */
2770	ctrl = cpuctrl;
2771/*	cpu_control(cpuctrlmask, cpuctrl);*/
2772	cpu_control(0xffffffff, cpuctrl);
2773
2774	/* Make sure write coalescing is turned on */
2775	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2776		: "=r" (auxctl));
2777#ifdef XSCALE_NO_COALESCE_WRITES
2778	auxctl |= XSCALE_AUXCTL_K;
2779#else
2780	auxctl &= ~XSCALE_AUXCTL_K;
2781#endif
2782#ifdef CPU_XSCALE_CORE3
2783	auxctl |= XSCALE_AUXCTL_LLR;
2784	auxctl |= XSCALE_AUXCTL_MD_MASK;
2785#endif
2786	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2787		: : "r" (auxctl));
2788}
2789#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2790	   CPU_XSCALE_80219 */
2791