cpufunc.c revision 240486
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 240486 2012-09-14 09:38:54Z gber $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77#if defined(CPU_XSCALE_81342)
78#include <arm/xscale/i8134x/i81342reg.h>
79#endif
80
81#ifdef CPU_XSCALE_IXP425
82#include <arm/xscale/ixp425/ixp425reg.h>
83#include <arm/xscale/ixp425/ixp425var.h>
84#endif
85
86/* PRIMARY CACHE VARIABLES */
87int	arm_picache_size;
88int	arm_picache_line_size;
89int	arm_picache_ways;
90
91int	arm_pdcache_size;	/* and unified */
92int	arm_pdcache_line_size;
93int	arm_pdcache_ways;
94
95int	arm_pcache_type;
96int	arm_pcache_unified;
97
98int	arm_dcache_align;
99int	arm_dcache_align_mask;
100
101u_int	arm_cache_level;
102u_int	arm_cache_type[14];
103u_int	arm_cache_loc;
104
105/* 1 == use cpu_sleep(), 0 == don't */
106int cpu_do_powersave;
107int ctrl;
108
109#ifdef CPU_ARM7TDMI
110struct cpu_functions arm7tdmi_cpufuncs = {
111	/* CPU functions */
112
113	cpufunc_id,			/* id			*/
114	cpufunc_nullop,			/* cpwait		*/
115
116	/* MMU functions */
117
118	cpufunc_control,		/* control		*/
119	cpufunc_domains,		/* domain		*/
120	arm7tdmi_setttb,		/* setttb		*/
121	cpufunc_faultstatus,		/* faultstatus		*/
122	cpufunc_faultaddress,		/* faultaddress		*/
123
124	/* TLB functions */
125
126	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
127	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
128	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
129	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
130	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
131	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
132
133	/* Cache operations */
134
135	cpufunc_nullop,			/* icache_sync_all	*/
136	(void *)cpufunc_nullop,		/* icache_sync_range	*/
137
138	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
139	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
140	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
141	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
142
143	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
144	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
145	cpufunc_nullop,			/* l2cache_wbinv_all	*/
146	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
147	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
148	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
149
150	/* Other functions */
151
152	cpufunc_nullop,			/* flush_prefetchbuf	*/
153	cpufunc_nullop,			/* drain_writebuf	*/
154	cpufunc_nullop,			/* flush_brnchtgt_C	*/
155	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
156
157	(void *)cpufunc_nullop,		/* sleep		*/
158
159	/* Soft functions */
160
161	late_abort_fixup,		/* dataabt_fixup	*/
162	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
163
164	arm7tdmi_context_switch,	/* context_switch	*/
165
166	arm7tdmi_setup			/* cpu setup		*/
167
168};
169#endif	/* CPU_ARM7TDMI */
170
171#ifdef CPU_ARM8
172struct cpu_functions arm8_cpufuncs = {
173	/* CPU functions */
174
175	cpufunc_id,			/* id			*/
176	cpufunc_nullop,			/* cpwait		*/
177
178	/* MMU functions */
179
180	cpufunc_control,		/* control		*/
181	cpufunc_domains,		/* domain		*/
182	arm8_setttb,			/* setttb		*/
183	cpufunc_faultstatus,		/* faultstatus		*/
184	cpufunc_faultaddress,		/* faultaddress		*/
185
186	/* TLB functions */
187
188	arm8_tlb_flushID,		/* tlb_flushID		*/
189	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
190	arm8_tlb_flushID,		/* tlb_flushI		*/
191	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
192	arm8_tlb_flushID,		/* tlb_flushD		*/
193	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
194
195	/* Cache operations */
196
197	cpufunc_nullop,			/* icache_sync_all	*/
198	(void *)cpufunc_nullop,		/* icache_sync_range	*/
199
200	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
201	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
202/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
203	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
204
205	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
206	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
207	cpufunc_nullop,			/* l2cache_wbinv_all	*/
208	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
209	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
210	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
211
212	/* Other functions */
213
214	cpufunc_nullop,			/* flush_prefetchbuf	*/
215	cpufunc_nullop,			/* drain_writebuf	*/
216	cpufunc_nullop,			/* flush_brnchtgt_C	*/
217	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
218
219	(void *)cpufunc_nullop,		/* sleep		*/
220
221	/* Soft functions */
222
223	cpufunc_null_fixup,		/* dataabt_fixup	*/
224	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
225
226	arm8_context_switch,		/* context_switch	*/
227
228	arm8_setup			/* cpu setup		*/
229};
230#endif	/* CPU_ARM8 */
231
232#ifdef CPU_ARM9
233struct cpu_functions arm9_cpufuncs = {
234	/* CPU functions */
235
236	cpufunc_id,			/* id			*/
237	cpufunc_nullop,			/* cpwait		*/
238
239	/* MMU functions */
240
241	cpufunc_control,		/* control		*/
242	cpufunc_domains,		/* Domain		*/
243	arm9_setttb,			/* Setttb		*/
244	cpufunc_faultstatus,		/* Faultstatus		*/
245	cpufunc_faultaddress,		/* Faultaddress		*/
246
247	/* TLB functions */
248
249	armv4_tlb_flushID,		/* tlb_flushID		*/
250	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
251	armv4_tlb_flushI,		/* tlb_flushI		*/
252	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
253	armv4_tlb_flushD,		/* tlb_flushD		*/
254	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
255
256	/* Cache operations */
257
258	arm9_icache_sync_all,		/* icache_sync_all	*/
259	arm9_icache_sync_range,		/* icache_sync_range	*/
260
261	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
262	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
263	arm9_dcache_inv_range,		/* dcache_inv_range	*/
264	arm9_dcache_wb_range,		/* dcache_wb_range	*/
265
266	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
267	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
268	cpufunc_nullop,			/* l2cache_wbinv_all	*/
269	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
270	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
271	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
272
273	/* Other functions */
274
275	cpufunc_nullop,			/* flush_prefetchbuf	*/
276	armv4_drain_writebuf,		/* drain_writebuf	*/
277	cpufunc_nullop,			/* flush_brnchtgt_C	*/
278	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
279
280	(void *)cpufunc_nullop,		/* sleep		*/
281
282	/* Soft functions */
283
284	cpufunc_null_fixup,		/* dataabt_fixup	*/
285	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
286
287	arm9_context_switch,		/* context_switch	*/
288
289	arm9_setup			/* cpu setup		*/
290
291};
292#endif /* CPU_ARM9 */
293
294#if defined(CPU_ARM9E) || defined(CPU_ARM10)
295struct cpu_functions armv5_ec_cpufuncs = {
296	/* CPU functions */
297
298	cpufunc_id,			/* id			*/
299	cpufunc_nullop,			/* cpwait		*/
300
301	/* MMU functions */
302
303	cpufunc_control,		/* control		*/
304	cpufunc_domains,		/* Domain		*/
305	armv5_ec_setttb,		/* Setttb		*/
306	cpufunc_faultstatus,		/* Faultstatus		*/
307	cpufunc_faultaddress,		/* Faultaddress		*/
308
309	/* TLB functions */
310
311	armv4_tlb_flushID,		/* tlb_flushID		*/
312	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
313	armv4_tlb_flushI,		/* tlb_flushI		*/
314	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
315	armv4_tlb_flushD,		/* tlb_flushD		*/
316	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
317
318	/* Cache operations */
319
320	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
321	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
322
323	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
324	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
325	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
326	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
327
328	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
329	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
330
331	cpufunc_nullop,                 /* l2cache_wbinv_all    */
332	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
333      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
334	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
335
336	/* Other functions */
337
338	cpufunc_nullop,			/* flush_prefetchbuf	*/
339	armv4_drain_writebuf,		/* drain_writebuf	*/
340	cpufunc_nullop,			/* flush_brnchtgt_C	*/
341	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
342
343	(void *)cpufunc_nullop,		/* sleep		*/
344
345	/* Soft functions */
346
347	cpufunc_null_fixup,		/* dataabt_fixup	*/
348	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
349
350	arm10_context_switch,		/* context_switch	*/
351
352	arm10_setup			/* cpu setup		*/
353
354};
355
356struct cpu_functions sheeva_cpufuncs = {
357	/* CPU functions */
358
359	cpufunc_id,			/* id			*/
360	cpufunc_nullop,			/* cpwait		*/
361
362	/* MMU functions */
363
364	cpufunc_control,		/* control		*/
365	cpufunc_domains,		/* Domain		*/
366	sheeva_setttb,			/* Setttb		*/
367	cpufunc_faultstatus,		/* Faultstatus		*/
368	cpufunc_faultaddress,		/* Faultaddress		*/
369
370	/* TLB functions */
371
372	armv4_tlb_flushID,		/* tlb_flushID		*/
373	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
374	armv4_tlb_flushI,		/* tlb_flushI		*/
375	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
376	armv4_tlb_flushD,		/* tlb_flushD		*/
377	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
378
379	/* Cache operations */
380
381	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
382	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
383
384	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
385	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
386	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
387	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
388
389	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
390	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
391
392	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
393	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
394	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
395	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
396
397	/* Other functions */
398
399	cpufunc_nullop,			/* flush_prefetchbuf	*/
400	armv4_drain_writebuf,		/* drain_writebuf	*/
401	cpufunc_nullop,			/* flush_brnchtgt_C	*/
402	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
403
404	sheeva_cpu_sleep,		/* sleep		*/
405
406	/* Soft functions */
407
408	cpufunc_null_fixup,		/* dataabt_fixup	*/
409	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
410
411	arm10_context_switch,		/* context_switch	*/
412
413	arm10_setup			/* cpu setup		*/
414};
415#endif /* CPU_ARM9E || CPU_ARM10 */
416
417#ifdef CPU_ARM10
418struct cpu_functions arm10_cpufuncs = {
419	/* CPU functions */
420
421	cpufunc_id,			/* id			*/
422	cpufunc_nullop,			/* cpwait		*/
423
424	/* MMU functions */
425
426	cpufunc_control,		/* control		*/
427	cpufunc_domains,		/* Domain		*/
428	arm10_setttb,			/* Setttb		*/
429	cpufunc_faultstatus,		/* Faultstatus		*/
430	cpufunc_faultaddress,		/* Faultaddress		*/
431
432	/* TLB functions */
433
434	armv4_tlb_flushID,		/* tlb_flushID		*/
435	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
436	armv4_tlb_flushI,		/* tlb_flushI		*/
437	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
438	armv4_tlb_flushD,		/* tlb_flushD		*/
439	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
440
441	/* Cache operations */
442
443	arm10_icache_sync_all,		/* icache_sync_all	*/
444	arm10_icache_sync_range,	/* icache_sync_range	*/
445
446	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
447	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
448	arm10_dcache_inv_range,		/* dcache_inv_range	*/
449	arm10_dcache_wb_range,		/* dcache_wb_range	*/
450
451	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
452	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
453	cpufunc_nullop,			/* l2cache_wbinv_all	*/
454	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
455	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
456	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
457
458	/* Other functions */
459
460	cpufunc_nullop,			/* flush_prefetchbuf	*/
461	armv4_drain_writebuf,		/* drain_writebuf	*/
462	cpufunc_nullop,			/* flush_brnchtgt_C	*/
463	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
464
465	(void *)cpufunc_nullop,		/* sleep		*/
466
467	/* Soft functions */
468
469	cpufunc_null_fixup,		/* dataabt_fixup	*/
470	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
471
472	arm10_context_switch,		/* context_switch	*/
473
474	arm10_setup			/* cpu setup		*/
475
476};
477#endif /* CPU_ARM10 */
478
479#ifdef CPU_MV_PJ4B
480struct cpu_functions pj4bv7_cpufuncs = {
481	/* CPU functions */
482
483	cpufunc_id,			/* id			*/
484	arm11_drain_writebuf,		/* cpwait		*/
485
486	/* MMU functions */
487
488	cpufunc_control,		/* control		*/
489	cpufunc_domains,		/* Domain		*/
490	pj4b_setttb,			/* Setttb		*/
491	cpufunc_faultstatus,		/* Faultstatus		*/
492	cpufunc_faultaddress,		/* Faultaddress		*/
493
494	/* TLB functions */
495
496	armv7_tlb_flushID,		/* tlb_flushID		*/
497	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
498	armv7_tlb_flushID,		/* tlb_flushI		*/
499	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
500	armv7_tlb_flushID,		/* tlb_flushD		*/
501	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
502
503	/* Cache operations */
504	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
505	armv7_icache_sync_range,	/* icache_sync_range	*/
506
507	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
508	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
509	armv7_dcache_inv_range,		/* dcache_inv_range	*/
510	armv7_dcache_wb_range,		/* dcache_wb_range	*/
511
512	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
513	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
514
515	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
516	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
517	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
518	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
519
520	/* Other functions */
521
522	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
523	arm11_drain_writebuf,		/* drain_writebuf	*/
524	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
525	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
526
527	(void *)cpufunc_nullop,		/* sleep		*/
528
529	/* Soft functions */
530
531	cpufunc_null_fixup,		/* dataabt_fixup	*/
532	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
533
534	arm11_context_switch,		/* context_switch	*/
535
536	pj4bv7_setup			/* cpu setup		*/
537};
538
539struct cpu_functions pj4bv6_cpufuncs = {
540	/* CPU functions */
541
542	cpufunc_id,			/* id			*/
543	arm11_drain_writebuf,		/* cpwait		*/
544
545	/* MMU functions */
546
547	cpufunc_control,		/* control		*/
548	cpufunc_domains,		/* Domain		*/
549	pj4b_setttb,			/* Setttb		*/
550	cpufunc_faultstatus,		/* Faultstatus		*/
551	cpufunc_faultaddress,		/* Faultaddress		*/
552
553	/* TLB functions */
554
555	arm11_tlb_flushID,		/* tlb_flushID		*/
556	arm11_tlb_flushID_SE,		/* tlb_flushID_SE	*/
557	arm11_tlb_flushI,		/* tlb_flushI		*/
558	arm11_tlb_flushI_SE,		/* tlb_flushI_SE	*/
559	arm11_tlb_flushD,		/* tlb_flushD		*/
560	arm11_tlb_flushD_SE,		/* tlb_flushD_SE	*/
561
562	/* Cache operations */
563	armv6_icache_sync_all,		/* icache_sync_all	*/
564	pj4b_icache_sync_range,		/* icache_sync_range	*/
565
566	armv6_dcache_wbinv_all,		/* dcache_wbinv_all	*/
567	pj4b_dcache_wbinv_range,	/* dcache_wbinv_range	*/
568	pj4b_dcache_inv_range,		/* dcache_inv_range	*/
569	pj4b_dcache_wb_range,		/* dcache_wb_range	*/
570
571	armv6_idcache_wbinv_all,	/* idcache_wbinv_all	*/
572	pj4b_idcache_wbinv_range,	/* idcache_wbinv_all	*/
573
574	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
575	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
576	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
577	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
578
579	/* Other functions */
580
581	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
582	arm11_drain_writebuf,		/* drain_writebuf	*/
583	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
584	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
585
586	(void *)cpufunc_nullop,		/* sleep		*/
587
588	/* Soft functions */
589
590	cpufunc_null_fixup,		/* dataabt_fixup	*/
591	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
592
593	arm11_context_switch,		/* context_switch	*/
594
595	pj4bv6_setup			/* cpu setup		*/
596};
597#endif /* CPU_MV_PJ4B */
598
599#ifdef CPU_SA110
600struct cpu_functions sa110_cpufuncs = {
601	/* CPU functions */
602
603	cpufunc_id,			/* id			*/
604	cpufunc_nullop,			/* cpwait		*/
605
606	/* MMU functions */
607
608	cpufunc_control,		/* control		*/
609	cpufunc_domains,		/* domain		*/
610	sa1_setttb,			/* setttb		*/
611	cpufunc_faultstatus,		/* faultstatus		*/
612	cpufunc_faultaddress,		/* faultaddress		*/
613
614	/* TLB functions */
615
616	armv4_tlb_flushID,		/* tlb_flushID		*/
617	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
618	armv4_tlb_flushI,		/* tlb_flushI		*/
619	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
620	armv4_tlb_flushD,		/* tlb_flushD		*/
621	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
622
623	/* Cache operations */
624
625	sa1_cache_syncI,		/* icache_sync_all	*/
626	sa1_cache_syncI_rng,		/* icache_sync_range	*/
627
628	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
629	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
630/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
631	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
632
633	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
634	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
635	cpufunc_nullop,			/* l2cache_wbinv_all	*/
636	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
637	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
638	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
639
640	/* Other functions */
641
642	cpufunc_nullop,			/* flush_prefetchbuf	*/
643	armv4_drain_writebuf,		/* drain_writebuf	*/
644	cpufunc_nullop,			/* flush_brnchtgt_C	*/
645	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
646
647	(void *)cpufunc_nullop,		/* sleep		*/
648
649	/* Soft functions */
650
651	cpufunc_null_fixup,		/* dataabt_fixup	*/
652	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
653
654	sa110_context_switch,		/* context_switch	*/
655
656	sa110_setup			/* cpu setup		*/
657};
658#endif	/* CPU_SA110 */
659
660#if defined(CPU_SA1100) || defined(CPU_SA1110)
661struct cpu_functions sa11x0_cpufuncs = {
662	/* CPU functions */
663
664	cpufunc_id,			/* id			*/
665	cpufunc_nullop,			/* cpwait		*/
666
667	/* MMU functions */
668
669	cpufunc_control,		/* control		*/
670	cpufunc_domains,		/* domain		*/
671	sa1_setttb,			/* setttb		*/
672	cpufunc_faultstatus,		/* faultstatus		*/
673	cpufunc_faultaddress,		/* faultaddress		*/
674
675	/* TLB functions */
676
677	armv4_tlb_flushID,		/* tlb_flushID		*/
678	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
679	armv4_tlb_flushI,		/* tlb_flushI		*/
680	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
681	armv4_tlb_flushD,		/* tlb_flushD		*/
682	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
683
684	/* Cache operations */
685
686	sa1_cache_syncI,		/* icache_sync_all	*/
687	sa1_cache_syncI_rng,		/* icache_sync_range	*/
688
689	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
690	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
691/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
692	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
693
694	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
695	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
696	cpufunc_nullop,			/* l2cache_wbinv_all	*/
697	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
698	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
699	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
700
701	/* Other functions */
702
703	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
704	armv4_drain_writebuf,		/* drain_writebuf	*/
705	cpufunc_nullop,			/* flush_brnchtgt_C	*/
706	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
707
708	sa11x0_cpu_sleep,		/* sleep		*/
709
710	/* Soft functions */
711
712	cpufunc_null_fixup,		/* dataabt_fixup	*/
713	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
714
715	sa11x0_context_switch,		/* context_switch	*/
716
717	sa11x0_setup			/* cpu setup		*/
718};
719#endif	/* CPU_SA1100 || CPU_SA1110 */
720
721#ifdef CPU_IXP12X0
722struct cpu_functions ixp12x0_cpufuncs = {
723	/* CPU functions */
724
725	cpufunc_id,			/* id			*/
726	cpufunc_nullop,			/* cpwait		*/
727
728	/* MMU functions */
729
730	cpufunc_control,		/* control		*/
731	cpufunc_domains,		/* domain		*/
732	sa1_setttb,			/* setttb		*/
733	cpufunc_faultstatus,		/* faultstatus		*/
734	cpufunc_faultaddress,		/* faultaddress		*/
735
736	/* TLB functions */
737
738	armv4_tlb_flushID,		/* tlb_flushID		*/
739	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
740	armv4_tlb_flushI,		/* tlb_flushI		*/
741	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
742	armv4_tlb_flushD,		/* tlb_flushD		*/
743	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
744
745	/* Cache operations */
746
747	sa1_cache_syncI,		/* icache_sync_all	*/
748	sa1_cache_syncI_rng,		/* icache_sync_range	*/
749
750	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
751	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
752/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
753	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
754
755	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
756	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
757	cpufunc_nullop,			/* l2cache_wbinv_all	*/
758	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
759	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
760	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
761
762	/* Other functions */
763
764	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
765	armv4_drain_writebuf,		/* drain_writebuf	*/
766	cpufunc_nullop,			/* flush_brnchtgt_C	*/
767	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
768
769	(void *)cpufunc_nullop,		/* sleep		*/
770
771	/* Soft functions */
772
773	cpufunc_null_fixup,		/* dataabt_fixup	*/
774	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
775
776	ixp12x0_context_switch,		/* context_switch	*/
777
778	ixp12x0_setup			/* cpu setup		*/
779};
780#endif	/* CPU_IXP12X0 */
781
782#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
783  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
784  defined(CPU_XSCALE_80219)
785
786struct cpu_functions xscale_cpufuncs = {
787	/* CPU functions */
788
789	cpufunc_id,			/* id			*/
790	xscale_cpwait,			/* cpwait		*/
791
792	/* MMU functions */
793
794	xscale_control,			/* control		*/
795	cpufunc_domains,		/* domain		*/
796	xscale_setttb,			/* setttb		*/
797	cpufunc_faultstatus,		/* faultstatus		*/
798	cpufunc_faultaddress,		/* faultaddress		*/
799
800	/* TLB functions */
801
802	armv4_tlb_flushID,		/* tlb_flushID		*/
803	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
804	armv4_tlb_flushI,		/* tlb_flushI		*/
805	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
806	armv4_tlb_flushD,		/* tlb_flushD		*/
807	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
808
809	/* Cache operations */
810
811	xscale_cache_syncI,		/* icache_sync_all	*/
812	xscale_cache_syncI_rng,		/* icache_sync_range	*/
813
814	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
815	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
816	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
817	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
818
819	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
820	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
821	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
822	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
823	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
824	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
825
826	/* Other functions */
827
828	cpufunc_nullop,			/* flush_prefetchbuf	*/
829	armv4_drain_writebuf,		/* drain_writebuf	*/
830	cpufunc_nullop,			/* flush_brnchtgt_C	*/
831	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
832
833	xscale_cpu_sleep,		/* sleep		*/
834
835	/* Soft functions */
836
837	cpufunc_null_fixup,		/* dataabt_fixup	*/
838	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
839
840	xscale_context_switch,		/* context_switch	*/
841
842	xscale_setup			/* cpu setup		*/
843};
844#endif
845/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
846   CPU_XSCALE_80219 */
847
848#ifdef CPU_XSCALE_81342
849struct cpu_functions xscalec3_cpufuncs = {
850	/* CPU functions */
851
852	cpufunc_id,			/* id			*/
853	xscale_cpwait,			/* cpwait		*/
854
855	/* MMU functions */
856
857	xscale_control,			/* control		*/
858	cpufunc_domains,		/* domain		*/
859	xscalec3_setttb,		/* setttb		*/
860	cpufunc_faultstatus,		/* faultstatus		*/
861	cpufunc_faultaddress,		/* faultaddress		*/
862
863	/* TLB functions */
864
865	armv4_tlb_flushID,		/* tlb_flushID		*/
866	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
867	armv4_tlb_flushI,		/* tlb_flushI		*/
868	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
869	armv4_tlb_flushD,		/* tlb_flushD		*/
870	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
871
872	/* Cache operations */
873
874	xscalec3_cache_syncI,		/* icache_sync_all	*/
875	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
876
877	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
878	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
879	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
880	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
881
882	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
883	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
884	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
885	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
886	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
887	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
888
889	/* Other functions */
890
891	cpufunc_nullop,			/* flush_prefetchbuf	*/
892	armv4_drain_writebuf,		/* drain_writebuf	*/
893	cpufunc_nullop,			/* flush_brnchtgt_C	*/
894	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
895
896	xscale_cpu_sleep,		/* sleep		*/
897
898	/* Soft functions */
899
900	cpufunc_null_fixup,		/* dataabt_fixup	*/
901	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
902
903	xscalec3_context_switch,	/* context_switch	*/
904
905	xscale_setup			/* cpu setup		*/
906};
907#endif /* CPU_XSCALE_81342 */
908
909
910#if defined(CPU_FA526) || defined(CPU_FA626TE)
911struct cpu_functions fa526_cpufuncs = {
912	/* CPU functions */
913
914	cpufunc_id,			/* id			*/
915	cpufunc_nullop,			/* cpwait		*/
916
917	/* MMU functions */
918
919	cpufunc_control,		/* control		*/
920	cpufunc_domains,		/* domain		*/
921	fa526_setttb,			/* setttb		*/
922	cpufunc_faultstatus,		/* faultstatus		*/
923	cpufunc_faultaddress,		/* faultaddress		*/
924
925	/* TLB functions */
926
927	armv4_tlb_flushID,		/* tlb_flushID		*/
928	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
929	armv4_tlb_flushI,		/* tlb_flushI		*/
930	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
931	armv4_tlb_flushD,		/* tlb_flushD		*/
932	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
933
934	/* Cache operations */
935
936	fa526_icache_sync_all,		/* icache_sync_all	*/
937	fa526_icache_sync_range,	/* icache_sync_range	*/
938
939	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
940	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
941	fa526_dcache_inv_range,		/* dcache_inv_range	*/
942	fa526_dcache_wb_range,		/* dcache_wb_range	*/
943
944	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
945	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
946	cpufunc_nullop,			/* l2cache_wbinv_all	*/
947	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
948	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
949	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
950
951	/* Other functions */
952
953	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
954	armv4_drain_writebuf,		/* drain_writebuf	*/
955	cpufunc_nullop,			/* flush_brnchtgt_C	*/
956	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
957
958	fa526_cpu_sleep,		/* sleep		*/
959
960	/* Soft functions */
961
962	cpufunc_null_fixup,		/* dataabt_fixup	*/
963	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
964
965	fa526_context_switch,		/* context_switch	*/
966
967	fa526_setup			/* cpu setup 		*/
968};
969#endif	/* CPU_FA526 || CPU_FA626TE */
970
971#if defined(CPU_ARM11)
972struct cpu_functions arm11_cpufuncs = {
973	/* CPU functions */
974
975	cpufunc_id,                     /* id                   */
976	arm11_drain_writebuf,           /* cpwait               */
977
978	/* MMU functions */
979
980	cpufunc_control,                /* control              */
981	cpufunc_domains,                /* Domain               */
982	arm11_setttb,                   /* Setttb               */
983	cpufunc_faultstatus,            /* Faultstatus          */
984	cpufunc_faultaddress,           /* Faultaddress         */
985
986	/* TLB functions */
987
988	arm11_tlb_flushID,              /* tlb_flushID          */
989	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
990	arm11_tlb_flushI,               /* tlb_flushI           */
991	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
992	arm11_tlb_flushD,               /* tlb_flushD           */
993	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
994
995	/* Cache operations */
996
997	armv6_icache_sync_all,          /* icache_sync_all      */
998	armv6_icache_sync_range,        /* icache_sync_range    */
999
1000	armv6_dcache_wbinv_all,         /* dcache_wbinv_all     */
1001	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
1002	armv6_dcache_inv_range,         /* dcache_inv_range     */
1003	armv6_dcache_wb_range,          /* dcache_wb_range      */
1004
1005	armv6_idcache_wbinv_all,        /* idcache_wbinv_all    */
1006	armv6_idcache_wbinv_range,      /* idcache_wbinv_range  */
1007
1008	(void*)cpufunc_nullop,          /* l2cache_wbinv_all    */
1009	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1010	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1011	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1012
1013	/* Other functions */
1014
1015	cpufunc_nullop,                 /* flush_prefetchbuf    */
1016	arm11_drain_writebuf,           /* drain_writebuf       */
1017	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1018	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1019
1020	arm11_sleep,                    /* sleep                */
1021
1022	/* Soft functions */
1023
1024	cpufunc_null_fixup,             /* dataabt_fixup        */
1025	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1026
1027	arm11_context_switch,           /* context_switch       */
1028
1029	arm11_setup                     /* cpu setup            */
1030};
1031#endif /* CPU_ARM11 */
1032
1033#if defined(CPU_CORTEXA)
1034struct cpu_functions cortexa_cpufuncs = {
1035	/* CPU functions */
1036
1037	cpufunc_id,                     /* id                   */
1038	cpufunc_nullop,                 /* cpwait               */
1039
1040	/* MMU functions */
1041
1042	cpufunc_control,                /* control              */
1043	cpufunc_domains,                /* Domain               */
1044	armv7_setttb,                   /* Setttb               */
1045	cpufunc_faultstatus,            /* Faultstatus          */
1046	cpufunc_faultaddress,           /* Faultaddress         */
1047
1048	/* TLB functions */
1049
1050	arm11_tlb_flushID,              /* tlb_flushID          */
1051	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
1052	arm11_tlb_flushI,               /* tlb_flushI           */
1053	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
1054	arm11_tlb_flushD,               /* tlb_flushD           */
1055	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
1056
1057	/* Cache operations */
1058
1059	armv7_idcache_wbinv_all,         /* icache_sync_all      */
1060	armv7_icache_sync_range,        /* icache_sync_range    */
1061
1062	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
1063	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
1064	armv7_dcache_inv_range,         /* dcache_inv_range     */
1065	armv7_dcache_wb_range,          /* dcache_wb_range      */
1066
1067	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
1068	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
1069
1070	/* Note: From OMAP4 the L2 ops are filled in when the
1071	 * L2 cache controller is actually enabled.
1072	 */
1073	cpufunc_nullop,                 /* l2cache_wbinv_all    */
1074	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
1075	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
1076	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
1077
1078	/* Other functions */
1079
1080	cpufunc_nullop,                 /* flush_prefetchbuf    */
1081	arm11_drain_writebuf,           /* drain_writebuf       */
1082	cpufunc_nullop,                 /* flush_brnchtgt_C     */
1083	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
1084
1085	arm11_sleep,                    /* sleep                */
1086
1087	/* Soft functions */
1088
1089	cpufunc_null_fixup,             /* dataabt_fixup        */
1090	cpufunc_null_fixup,             /* prefetchabt_fixup    */
1091
1092	arm11_context_switch,           /* context_switch       */
1093
1094	cortexa_setup                     /* cpu setup            */
1095};
1096#endif /* CPU_CORTEXA */
1097
1098/*
1099 * Global constants also used by locore.s
1100 */
1101
1102struct cpu_functions cpufuncs;
1103u_int cputype;
1104u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
1105
1106#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) ||	\
1107  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) ||	\
1108  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1109  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1110  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
1111  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1112  defined(CPU_CORTEXA)
1113
1114static void get_cachetype_cp15(void);
1115
1116/* Additional cache information local to this file.  Log2 of some of the
1117   above numbers.  */
1118static int	arm_dcache_l2_nsets;
1119static int	arm_dcache_l2_assoc;
1120static int	arm_dcache_l2_linesize;
1121
1122static void
1123get_cachetype_cp15()
1124{
1125	u_int ctype, isize, dsize, cpuid;
1126	u_int clevel, csize, i, sel;
1127	u_int multiplier;
1128	u_char type;
1129
1130	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
1131		: "=r" (ctype));
1132
1133	cpuid = cpufunc_id();
1134	/*
1135	 * ...and thus spake the ARM ARM:
1136	 *
1137	 * If an <opcode2> value corresponding to an unimplemented or
1138	 * reserved ID register is encountered, the System Control
1139	 * processor returns the value of the main ID register.
1140	 */
1141	if (ctype == cpuid)
1142		goto out;
1143
1144	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
1145		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
1146		    : "=r" (clevel));
1147		arm_cache_level = clevel;
1148		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
1149		i = 0;
1150		while ((type = (clevel & 0x7)) && i < 7) {
1151			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
1152			    type == CACHE_SEP_CACHE) {
1153				sel = i << 1;
1154				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1155				    : : "r" (sel));
1156				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1157				    : "=r" (csize));
1158				arm_cache_type[sel] = csize;
1159				arm_dcache_align = 1 <<
1160				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
1161				arm_dcache_align_mask = arm_dcache_align - 1;
1162			}
1163			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
1164				sel = (i << 1) | 1;
1165				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
1166				    : : "r" (sel));
1167				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
1168				    : "=r" (csize));
1169				arm_cache_type[sel] = csize;
1170			}
1171			i++;
1172			clevel >>= 3;
1173		}
1174	} else {
1175		if ((ctype & CPU_CT_S) == 0)
1176			arm_pcache_unified = 1;
1177
1178		/*
1179		 * If you want to know how this code works, go read the ARM ARM.
1180		 */
1181
1182		arm_pcache_type = CPU_CT_CTYPE(ctype);
1183
1184		if (arm_pcache_unified == 0) {
1185			isize = CPU_CT_ISIZE(ctype);
1186			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
1187			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
1188			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
1189				if (isize & CPU_CT_xSIZE_M)
1190					arm_picache_line_size = 0; /* not present */
1191				else
1192					arm_picache_ways = 1;
1193			} else {
1194				arm_picache_ways = multiplier <<
1195				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
1196			}
1197			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
1198		}
1199
1200		dsize = CPU_CT_DSIZE(ctype);
1201		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
1202		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
1203		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
1204			if (dsize & CPU_CT_xSIZE_M)
1205				arm_pdcache_line_size = 0; /* not present */
1206			else
1207				arm_pdcache_ways = 1;
1208		} else {
1209			arm_pdcache_ways = multiplier <<
1210			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
1211		}
1212		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
1213
1214		arm_dcache_align = arm_pdcache_line_size;
1215
1216		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
1217		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
1218		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
1219		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
1220
1221	out:
1222		arm_dcache_align_mask = arm_dcache_align - 1;
1223	}
1224}
1225#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
1226
1227#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
1228    defined(CPU_IXP12X0)
1229/* Cache information for CPUs without cache type registers. */
1230struct cachetab {
1231	u_int32_t ct_cpuid;
1232	int	ct_pcache_type;
1233	int	ct_pcache_unified;
1234	int	ct_pdcache_size;
1235	int	ct_pdcache_line_size;
1236	int	ct_pdcache_ways;
1237	int	ct_picache_size;
1238	int	ct_picache_line_size;
1239	int	ct_picache_ways;
1240};
1241
1242struct cachetab cachetab[] = {
1243    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
1244    /* XXX is this type right for SA-1? */
1245    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
1246    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1247    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
1248    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
1249    { 0, 0, 0, 0, 0, 0, 0, 0}
1250};
1251
1252static void get_cachetype_table(void);
1253
1254static void
1255get_cachetype_table()
1256{
1257	int i;
1258	u_int32_t cpuid = cpufunc_id();
1259
1260	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
1261		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
1262			arm_pcache_type = cachetab[i].ct_pcache_type;
1263			arm_pcache_unified = cachetab[i].ct_pcache_unified;
1264			arm_pdcache_size = cachetab[i].ct_pdcache_size;
1265			arm_pdcache_line_size =
1266			    cachetab[i].ct_pdcache_line_size;
1267			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
1268			arm_picache_size = cachetab[i].ct_picache_size;
1269			arm_picache_line_size =
1270			    cachetab[i].ct_picache_line_size;
1271			arm_picache_ways = cachetab[i].ct_picache_ways;
1272		}
1273	}
1274	arm_dcache_align = arm_pdcache_line_size;
1275
1276	arm_dcache_align_mask = arm_dcache_align - 1;
1277}
1278
1279#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
1280
1281/*
1282 * Cannot panic here as we may not have a console yet ...
1283 */
1284
1285int
1286set_cpufuncs()
1287{
1288	cputype = cpufunc_id();
1289	cputype &= CPU_ID_CPU_MASK;
1290
1291	/*
1292	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
1293	 * CPU type where we want to use it by default, then we set it.
1294	 */
1295
1296#ifdef CPU_ARM7TDMI
1297	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1298	    CPU_ID_IS7(cputype) &&
1299	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
1300		cpufuncs = arm7tdmi_cpufuncs;
1301		cpu_reset_needs_v4_MMU_disable = 0;
1302		get_cachetype_cp15();
1303		pmap_pte_init_generic();
1304		goto out;
1305	}
1306#endif
1307#ifdef CPU_ARM8
1308	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
1309	    (cputype & 0x0000f000) == 0x00008000) {
1310		cpufuncs = arm8_cpufuncs;
1311		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
1312		get_cachetype_cp15();
1313		pmap_pte_init_arm8();
1314		goto out;
1315	}
1316#endif	/* CPU_ARM8 */
1317#ifdef CPU_ARM9
1318	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
1319	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
1320	    (cputype & 0x0000f000) == 0x00009000) {
1321		cpufuncs = arm9_cpufuncs;
1322		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1323		get_cachetype_cp15();
1324		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1325		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
1326		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
1327		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1328		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
1329#ifdef ARM9_CACHE_WRITE_THROUGH
1330		pmap_pte_init_arm9();
1331#else
1332		pmap_pte_init_generic();
1333#endif
1334		goto out;
1335	}
1336#endif /* CPU_ARM9 */
1337#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1338	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
1339	    cputype == CPU_ID_MV88FR571_41) {
1340		uint32_t sheeva_ctrl;
1341
1342		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
1343		    MV_L2_ENABLE);
1344		/*
1345		 * Workaround for Marvell MV78100 CPU: Cache prefetch
1346		 * mechanism may affect the cache coherency validity,
1347		 * so it needs to be disabled.
1348		 *
1349		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
1350		 * L2 Prefetching Mechanism) for details.
1351		 */
1352		if (cputype == CPU_ID_MV88FR571_VD ||
1353		    cputype == CPU_ID_MV88FR571_41)
1354			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
1355
1356		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
1357
1358		cpufuncs = sheeva_cpufuncs;
1359		get_cachetype_cp15();
1360		pmap_pte_init_generic();
1361		goto out;
1362	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
1363		cpufuncs = armv5_ec_cpufuncs;
1364		get_cachetype_cp15();
1365		pmap_pte_init_generic();
1366		goto out;
1367	}
1368#endif /* CPU_ARM9E || CPU_ARM10 */
1369#ifdef CPU_ARM10
1370	if (/* cputype == CPU_ID_ARM1020T || */
1371	    cputype == CPU_ID_ARM1020E) {
1372		/*
1373		 * Select write-through cacheing (this isn't really an
1374		 * option on ARM1020T).
1375		 */
1376		cpufuncs = arm10_cpufuncs;
1377		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1378		get_cachetype_cp15();
1379		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1380		arm10_dcache_sets_max =
1381		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1382		    arm10_dcache_sets_inc;
1383		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1384		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1385		pmap_pte_init_generic();
1386		goto out;
1387	}
1388#endif /* CPU_ARM10 */
1389#ifdef CPU_ARM11
1390	cpufuncs = arm11_cpufuncs;
1391	cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1392	get_cachetype_cp15();
1393
1394	pmap_pte_init_mmu_v6();
1395
1396	goto out;
1397#endif /* CPU_ARM11 */
1398#ifdef CPU_CORTEXA
1399	if (cputype == CPU_ID_CORTEXA8R1 ||
1400	    cputype == CPU_ID_CORTEXA8R2 ||
1401	    cputype == CPU_ID_CORTEXA8R3 ||
1402	    cputype == CPU_ID_CORTEXA9R1 ||
1403	    cputype == CPU_ID_CORTEXA9R2) {
1404		cpufuncs = cortexa_cpufuncs;
1405		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1406		get_cachetype_cp15();
1407
1408		pmap_pte_init_mmu_v6();
1409		/* Use powersave on this CPU. */
1410		cpu_do_powersave = 1;
1411		goto out;
1412	}
1413#endif /* CPU_CORTEXA */
1414
1415#if defined(CPU_MV_PJ4B)
1416	if (cputype == CPU_ID_MV88SV581X_V6 ||
1417	    cputype == CPU_ID_MV88SV581X_V7 ||
1418	    cputype == CPU_ID_MV88SV584X_V7 ||
1419	    cputype == CPU_ID_ARM_88SV581X_V6 ||
1420	    cputype == CPU_ID_ARM_88SV581X_V7) {
1421		if (cpu_pfr(0) & ARM_PFR0_THUMBEE_MASK)
1422			cpufuncs = pj4bv7_cpufuncs;
1423		else
1424			cpufuncs = pj4bv6_cpufuncs;
1425
1426		get_cachetype_cp15();
1427		pmap_pte_init_mmu_v6();
1428		goto out;
1429	} else if (cputype == CPU_ID_ARM_88SV584X_V6 ||
1430	    cputype == CPU_ID_MV88SV584X_V6) {
1431		cpufuncs = pj4bv6_cpufuncs;
1432		get_cachetype_cp15();
1433		pmap_pte_init_mmu_v6();
1434		goto out;
1435	}
1436
1437#endif /* CPU_MV_PJ4B */
1438#ifdef CPU_SA110
1439	if (cputype == CPU_ID_SA110) {
1440		cpufuncs = sa110_cpufuncs;
1441		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
1442		get_cachetype_table();
1443		pmap_pte_init_sa1();
1444		goto out;
1445	}
1446#endif	/* CPU_SA110 */
1447#ifdef CPU_SA1100
1448	if (cputype == CPU_ID_SA1100) {
1449		cpufuncs = sa11x0_cpufuncs;
1450		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1451		get_cachetype_table();
1452		pmap_pte_init_sa1();
1453		/* Use powersave on this CPU. */
1454		cpu_do_powersave = 1;
1455
1456		goto out;
1457	}
1458#endif	/* CPU_SA1100 */
1459#ifdef CPU_SA1110
1460	if (cputype == CPU_ID_SA1110) {
1461		cpufuncs = sa11x0_cpufuncs;
1462		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1463		get_cachetype_table();
1464		pmap_pte_init_sa1();
1465		/* Use powersave on this CPU. */
1466		cpu_do_powersave = 1;
1467
1468		goto out;
1469	}
1470#endif	/* CPU_SA1110 */
1471#if defined(CPU_FA526) || defined(CPU_FA626TE)
1472	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1473		cpufuncs = fa526_cpufuncs;
1474		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1475		get_cachetype_cp15();
1476		pmap_pte_init_generic();
1477
1478		/* Use powersave on this CPU. */
1479		cpu_do_powersave = 1;
1480
1481		goto out;
1482	}
1483#endif	/* CPU_FA526 || CPU_FA626TE */
1484#ifdef CPU_IXP12X0
1485        if (cputype == CPU_ID_IXP1200) {
1486                cpufuncs = ixp12x0_cpufuncs;
1487                cpu_reset_needs_v4_MMU_disable = 1;
1488                get_cachetype_table();
1489                pmap_pte_init_sa1();
1490		goto out;
1491        }
1492#endif  /* CPU_IXP12X0 */
1493#ifdef CPU_XSCALE_80200
1494	if (cputype == CPU_ID_80200) {
1495		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1496
1497		i80200_icu_init();
1498
1499#if defined(XSCALE_CCLKCFG)
1500		/*
1501		 * Crank CCLKCFG to maximum legal value.
1502		 */
1503		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1504			:
1505			: "r" (XSCALE_CCLKCFG));
1506#endif
1507
1508		/*
1509		 * XXX Disable ECC in the Bus Controller Unit; we
1510		 * don't really support it, yet.  Clear any pending
1511		 * error indications.
1512		 */
1513		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1514			:
1515			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1516
1517		cpufuncs = xscale_cpufuncs;
1518		/*
1519		 * i80200 errata: Step-A0 and A1 have a bug where
1520		 * D$ dirty bits are not cleared on "invalidate by
1521		 * address".
1522		 *
1523		 * Workaround: Clean cache line before invalidating.
1524		 */
1525		if (rev == 0 || rev == 1)
1526			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1527
1528		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1529		get_cachetype_cp15();
1530		pmap_pte_init_xscale();
1531		goto out;
1532	}
1533#endif /* CPU_XSCALE_80200 */
1534#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1535	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1536	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1537	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1538		cpufuncs = xscale_cpufuncs;
1539		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1540		get_cachetype_cp15();
1541		pmap_pte_init_xscale();
1542		goto out;
1543	}
1544#endif /* CPU_XSCALE_80321 */
1545
1546#if defined(CPU_XSCALE_81342)
1547	if (cputype == CPU_ID_81342) {
1548		cpufuncs = xscalec3_cpufuncs;
1549		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1550		get_cachetype_cp15();
1551		pmap_pte_init_xscale();
1552		goto out;
1553	}
1554#endif /* CPU_XSCALE_81342 */
1555#ifdef CPU_XSCALE_PXA2X0
1556	/* ignore core revision to test PXA2xx CPUs */
1557	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1558	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1559	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1560
1561		cpufuncs = xscale_cpufuncs;
1562		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1563		get_cachetype_cp15();
1564		pmap_pte_init_xscale();
1565
1566		/* Use powersave on this CPU. */
1567		cpu_do_powersave = 1;
1568
1569		goto out;
1570	}
1571#endif /* CPU_XSCALE_PXA2X0 */
1572#ifdef CPU_XSCALE_IXP425
1573	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1574            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1575
1576		cpufuncs = xscale_cpufuncs;
1577		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1578		get_cachetype_cp15();
1579		pmap_pte_init_xscale();
1580
1581		goto out;
1582	}
1583#endif /* CPU_XSCALE_IXP425 */
1584	/*
1585	 * Bzzzz. And the answer was ...
1586	 */
1587	panic("No support for this CPU type (%08x) in kernel", cputype);
1588	return(ARCHITECTURE_NOT_PRESENT);
1589out:
1590	uma_set_align(arm_dcache_align_mask);
1591	return (0);
1592}
1593
1594/*
1595 * Fixup routines for data and prefetch aborts.
1596 *
1597 * Several compile time symbols are used
1598 *
1599 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1600 * correction of registers after a fault.
1601 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1602 * when defined should use late aborts
1603 */
1604
1605
1606/*
1607 * Null abort fixup routine.
1608 * For use when no fixup is required.
1609 */
1610int
1611cpufunc_null_fixup(arg)
1612	void *arg;
1613{
1614	return(ABORT_FIXUP_OK);
1615}
1616
1617
1618#if defined(CPU_ARM7TDMI)
1619
1620#ifdef DEBUG_FAULT_CORRECTION
1621#define DFC_PRINTF(x)		printf x
1622#define DFC_DISASSEMBLE(x)	disassemble(x)
1623#else
1624#define DFC_PRINTF(x)		/* nothing */
1625#define DFC_DISASSEMBLE(x)	/* nothing */
1626#endif
1627
1628/*
1629 * "Early" data abort fixup.
1630 *
1631 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1632 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1633 *
1634 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1635 */
1636int
1637early_abort_fixup(arg)
1638	void *arg;
1639{
1640	trapframe_t *frame = arg;
1641	u_int fault_pc;
1642	u_int fault_instruction;
1643	int saved_lr = 0;
1644
1645	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1646
1647		/* Ok an abort in SVC mode */
1648
1649		/*
1650		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1651		 * as the fault happened in svc mode but we need it in the
1652		 * usr slot so we can treat the registers as an array of ints
1653		 * during fixing.
1654		 * NOTE: This PC is in the position but writeback is not
1655		 * allowed on r15.
1656		 * Doing it like this is more efficient than trapping this
1657		 * case in all possible locations in the following fixup code.
1658		 */
1659
1660		saved_lr = frame->tf_usr_lr;
1661		frame->tf_usr_lr = frame->tf_svc_lr;
1662
1663		/*
1664		 * Note the trapframe does not have the SVC r13 so a fault
1665		 * from an instruction with writeback to r13 in SVC mode is
1666		 * not allowed. This should not happen as the kstack is
1667		 * always valid.
1668		 */
1669	}
1670
1671	/* Get fault address and status from the CPU */
1672
1673	fault_pc = frame->tf_pc;
1674	fault_instruction = *((volatile unsigned int *)fault_pc);
1675
1676	/* Decode the fault instruction and fix the registers as needed */
1677
1678	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1679		int base;
1680		int loop;
1681		int count;
1682		int *registers = &frame->tf_r0;
1683
1684		DFC_PRINTF(("LDM/STM\n"));
1685		DFC_DISASSEMBLE(fault_pc);
1686		if (fault_instruction & (1 << 21)) {
1687			DFC_PRINTF(("This instruction must be corrected\n"));
1688			base = (fault_instruction >> 16) & 0x0f;
1689			if (base == 15)
1690				return ABORT_FIXUP_FAILED;
1691			/* Count registers transferred */
1692			count = 0;
1693			for (loop = 0; loop < 16; ++loop) {
1694				if (fault_instruction & (1<<loop))
1695					++count;
1696			}
1697			DFC_PRINTF(("%d registers used\n", count));
1698			DFC_PRINTF(("Corrected r%d by %d bytes ",
1699				       base, count * 4));
1700			if (fault_instruction & (1 << 23)) {
1701				DFC_PRINTF(("down\n"));
1702				registers[base] -= count * 4;
1703			} else {
1704				DFC_PRINTF(("up\n"));
1705				registers[base] += count * 4;
1706			}
1707		}
1708	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1709		int base;
1710		int offset;
1711		int *registers = &frame->tf_r0;
1712
1713		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1714
1715		DFC_DISASSEMBLE(fault_pc);
1716
1717		/* Only need to fix registers if write back is turned on */
1718
1719		if ((fault_instruction & (1 << 21)) != 0) {
1720			base = (fault_instruction >> 16) & 0x0f;
1721			if (base == 13 &&
1722			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1723				return ABORT_FIXUP_FAILED;
1724			if (base == 15)
1725				return ABORT_FIXUP_FAILED;
1726
1727			offset = (fault_instruction & 0xff) << 2;
1728			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1729			if ((fault_instruction & (1 << 23)) != 0)
1730				offset = -offset;
1731			registers[base] += offset;
1732			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1733		}
1734	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1735		return ABORT_FIXUP_FAILED;
1736
1737	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1738
1739		/* Ok an abort in SVC mode */
1740
1741		/*
1742		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1743		 * as the fault happened in svc mode but we need it in the
1744		 * usr slot so we can treat the registers as an array of ints
1745		 * during fixing.
1746		 * NOTE: This PC is in the position but writeback is not
1747		 * allowed on r15.
1748		 * Doing it like this is more efficient than trapping this
1749		 * case in all possible locations in the prior fixup code.
1750		 */
1751
1752		frame->tf_svc_lr = frame->tf_usr_lr;
1753		frame->tf_usr_lr = saved_lr;
1754
1755		/*
1756		 * Note the trapframe does not have the SVC r13 so a fault
1757		 * from an instruction with writeback to r13 in SVC mode is
1758		 * not allowed. This should not happen as the kstack is
1759		 * always valid.
1760		 */
1761	}
1762
1763	return(ABORT_FIXUP_OK);
1764}
1765#endif	/* CPU_ARM2/250/3/6/7 */
1766
1767
1768#if defined(CPU_ARM7TDMI)
1769/*
1770 * "Late" (base updated) data abort fixup
1771 *
1772 * For ARM6 (in late-abort mode) and ARM7.
1773 *
1774 * In this model, all data-transfer instructions need fixing up.  We defer
1775 * LDM, STM, LDC and STC fixup to the early-abort handler.
1776 */
1777int
1778late_abort_fixup(arg)
1779	void *arg;
1780{
1781	trapframe_t *frame = arg;
1782	u_int fault_pc;
1783	u_int fault_instruction;
1784	int saved_lr = 0;
1785
1786	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1787
1788		/* Ok an abort in SVC mode */
1789
1790		/*
1791		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1792		 * as the fault happened in svc mode but we need it in the
1793		 * usr slot so we can treat the registers as an array of ints
1794		 * during fixing.
1795		 * NOTE: This PC is in the position but writeback is not
1796		 * allowed on r15.
1797		 * Doing it like this is more efficient than trapping this
1798		 * case in all possible locations in the following fixup code.
1799		 */
1800
1801		saved_lr = frame->tf_usr_lr;
1802		frame->tf_usr_lr = frame->tf_svc_lr;
1803
1804		/*
1805		 * Note the trapframe does not have the SVC r13 so a fault
1806		 * from an instruction with writeback to r13 in SVC mode is
1807		 * not allowed. This should not happen as the kstack is
1808		 * always valid.
1809		 */
1810	}
1811
1812	/* Get fault address and status from the CPU */
1813
1814	fault_pc = frame->tf_pc;
1815	fault_instruction = *((volatile unsigned int *)fault_pc);
1816
1817	/* Decode the fault instruction and fix the registers as needed */
1818
1819	/* Was is a swap instruction ? */
1820
1821	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1822		DFC_DISASSEMBLE(fault_pc);
1823	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1824
1825		/* Was is a ldr/str instruction */
1826		/* This is for late abort only */
1827
1828		int base;
1829		int offset;
1830		int *registers = &frame->tf_r0;
1831
1832		DFC_DISASSEMBLE(fault_pc);
1833
1834		/* This is for late abort only */
1835
1836		if ((fault_instruction & (1 << 24)) == 0
1837		    || (fault_instruction & (1 << 21)) != 0) {
1838			/* postindexed ldr/str with no writeback */
1839
1840			base = (fault_instruction >> 16) & 0x0f;
1841			if (base == 13 &&
1842			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1843				return ABORT_FIXUP_FAILED;
1844			if (base == 15)
1845				return ABORT_FIXUP_FAILED;
1846			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1847				       base, registers[base]));
1848			if ((fault_instruction & (1 << 25)) == 0) {
1849				/* Immediate offset - easy */
1850
1851				offset = fault_instruction & 0xfff;
1852				if ((fault_instruction & (1 << 23)))
1853					offset = -offset;
1854				registers[base] += offset;
1855				DFC_PRINTF(("imm=%08x ", offset));
1856			} else {
1857				/* offset is a shifted register */
1858				int shift;
1859
1860				offset = fault_instruction & 0x0f;
1861				if (offset == base)
1862					return ABORT_FIXUP_FAILED;
1863
1864				/*
1865				 * Register offset - hard we have to
1866				 * cope with shifts !
1867				 */
1868				offset = registers[offset];
1869
1870				if ((fault_instruction & (1 << 4)) == 0)
1871					/* shift with amount */
1872					shift = (fault_instruction >> 7) & 0x1f;
1873				else {
1874					/* shift with register */
1875					if ((fault_instruction & (1 << 7)) != 0)
1876						/* undefined for now so bail out */
1877						return ABORT_FIXUP_FAILED;
1878					shift = ((fault_instruction >> 8) & 0xf);
1879					if (base == shift)
1880						return ABORT_FIXUP_FAILED;
1881					DFC_PRINTF(("shift reg=%d ", shift));
1882					shift = registers[shift];
1883				}
1884				DFC_PRINTF(("shift=%08x ", shift));
1885				switch (((fault_instruction >> 5) & 0x3)) {
1886				case 0 : /* Logical left */
1887					offset = (int)(((u_int)offset) << shift);
1888					break;
1889				case 1 : /* Logical Right */
1890					if (shift == 0) shift = 32;
1891					offset = (int)(((u_int)offset) >> shift);
1892					break;
1893				case 2 : /* Arithmetic Right */
1894					if (shift == 0) shift = 32;
1895					offset = (int)(((int)offset) >> shift);
1896					break;
1897				case 3 : /* Rotate right (rol or rxx) */
1898					return ABORT_FIXUP_FAILED;
1899					break;
1900				}
1901
1902				DFC_PRINTF(("abt: fixed LDR/STR with "
1903					       "register offset\n"));
1904				if ((fault_instruction & (1 << 23)))
1905					offset = -offset;
1906				DFC_PRINTF(("offset=%08x ", offset));
1907				registers[base] += offset;
1908			}
1909			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1910		}
1911	}
1912
1913	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1914
1915		/* Ok an abort in SVC mode */
1916
1917		/*
1918		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1919		 * as the fault happened in svc mode but we need it in the
1920		 * usr slot so we can treat the registers as an array of ints
1921		 * during fixing.
1922		 * NOTE: This PC is in the position but writeback is not
1923		 * allowed on r15.
1924		 * Doing it like this is more efficient than trapping this
1925		 * case in all possible locations in the prior fixup code.
1926		 */
1927
1928		frame->tf_svc_lr = frame->tf_usr_lr;
1929		frame->tf_usr_lr = saved_lr;
1930
1931		/*
1932		 * Note the trapframe does not have the SVC r13 so a fault
1933		 * from an instruction with writeback to r13 in SVC mode is
1934		 * not allowed. This should not happen as the kstack is
1935		 * always valid.
1936		 */
1937	}
1938
1939	/*
1940	 * Now let the early-abort fixup routine have a go, in case it
1941	 * was an LDM, STM, LDC or STC that faulted.
1942	 */
1943
1944	return early_abort_fixup(arg);
1945}
1946#endif	/* CPU_ARM7TDMI */
1947
1948/*
1949 * CPU Setup code
1950 */
1951
1952#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1953  defined(CPU_ARM9E) || \
1954  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1955  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1956  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1957  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1958  defined(CPU_ARM10) ||  defined(CPU_ARM11) || \
1959  defined(CPU_FA526) || defined(CPU_FA626TE)
1960
1961#define IGN	0
1962#define OR	1
1963#define BIC	2
1964
1965struct cpu_option {
1966	char	*co_name;
1967	int	co_falseop;
1968	int	co_trueop;
1969	int	co_value;
1970};
1971
1972static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1973
1974static u_int
1975parse_cpu_options(args, optlist, cpuctrl)
1976	char *args;
1977	struct cpu_option *optlist;
1978	u_int cpuctrl;
1979{
1980	int integer;
1981
1982	if (args == NULL)
1983		return(cpuctrl);
1984
1985	while (optlist->co_name) {
1986		if (get_bootconf_option(args, optlist->co_name,
1987		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1988			if (integer) {
1989				if (optlist->co_trueop == OR)
1990					cpuctrl |= optlist->co_value;
1991				else if (optlist->co_trueop == BIC)
1992					cpuctrl &= ~optlist->co_value;
1993			} else {
1994				if (optlist->co_falseop == OR)
1995					cpuctrl |= optlist->co_value;
1996				else if (optlist->co_falseop == BIC)
1997					cpuctrl &= ~optlist->co_value;
1998			}
1999		}
2000		++optlist;
2001	}
2002	return(cpuctrl);
2003}
2004#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
2005
2006#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
2007struct cpu_option arm678_options[] = {
2008#ifdef COMPAT_12
2009	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
2010	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2011#endif	/* COMPAT_12 */
2012	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2013	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2014	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2015	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2016	{ NULL,			IGN, IGN, 0 }
2017};
2018
2019#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
2020
2021#ifdef CPU_ARM7TDMI
2022struct cpu_option arm7tdmi_options[] = {
2023	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2024	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2025	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2026	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2027#ifdef COMPAT_12
2028	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
2029#endif	/* COMPAT_12 */
2030	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
2031	{ NULL,			IGN, IGN, 0 }
2032};
2033
2034void
2035arm7tdmi_setup(args)
2036	char *args;
2037{
2038	int cpuctrl;
2039
2040	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2041		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2042		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2043
2044	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2045	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
2046
2047#ifdef __ARMEB__
2048	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2049#endif
2050
2051	/* Clear out the cache */
2052	cpu_idcache_wbinv_all();
2053
2054	/* Set the control register */
2055	ctrl = cpuctrl;
2056	cpu_control(0xffffffff, cpuctrl);
2057}
2058#endif	/* CPU_ARM7TDMI */
2059
2060#ifdef CPU_ARM8
2061struct cpu_option arm8_options[] = {
2062	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
2063	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
2064	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2065	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2066#ifdef COMPAT_12
2067	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2068#endif	/* COMPAT_12 */
2069	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2070	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2071	{ NULL,			IGN, IGN, 0 }
2072};
2073
2074void
2075arm8_setup(args)
2076	char *args;
2077{
2078	int integer;
2079	int cpuctrl, cpuctrlmask;
2080	int clocktest;
2081	int setclock = 0;
2082
2083	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2084		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2085		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
2086	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2087		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2088		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2089		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
2090		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
2091
2092#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2093	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2094#endif
2095
2096	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
2097	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
2098
2099#ifdef __ARMEB__
2100	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2101#endif
2102
2103	/* Get clock configuration */
2104	clocktest = arm8_clock_config(0, 0) & 0x0f;
2105
2106	/* Special ARM8 clock and test configuration */
2107	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2108		clocktest = 0;
2109		setclock = 1;
2110	}
2111	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2112		if (integer)
2113			clocktest |= 0x01;
2114		else
2115			clocktest &= ~(0x01);
2116		setclock = 1;
2117	}
2118	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
2119		if (integer)
2120			clocktest |= 0x02;
2121		else
2122			clocktest &= ~(0x02);
2123		setclock = 1;
2124	}
2125	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
2126		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
2127		setclock = 1;
2128	}
2129	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
2130		clocktest |= (integer & 7) << 5;
2131		setclock = 1;
2132	}
2133
2134	/* Clear out the cache */
2135	cpu_idcache_wbinv_all();
2136
2137	/* Set the control register */
2138	ctrl = cpuctrl;
2139	cpu_control(0xffffffff, cpuctrl);
2140
2141	/* Set the clock/test register */
2142	if (setclock)
2143		arm8_clock_config(0x7f, clocktest);
2144}
2145#endif	/* CPU_ARM8 */
2146
2147#ifdef CPU_ARM9
2148struct cpu_option arm9_options[] = {
2149	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2150	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2151	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2152	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2153	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2154	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2155	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2156	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2157	{ NULL,			IGN, IGN, 0 }
2158};
2159
2160void
2161arm9_setup(args)
2162	char *args;
2163{
2164	int cpuctrl, cpuctrlmask;
2165
2166	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2167	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2168	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2169	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
2170	    CPU_CONTROL_ROUNDROBIN;
2171	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2172		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2173		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2174		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2175		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2176		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
2177		 | CPU_CONTROL_ROUNDROBIN;
2178
2179#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2180	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2181#endif
2182
2183	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
2184
2185#ifdef __ARMEB__
2186	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2187#endif
2188	if (vector_page == ARM_VECTORS_HIGH)
2189		cpuctrl |= CPU_CONTROL_VECRELOC;
2190
2191	/* Clear out the cache */
2192	cpu_idcache_wbinv_all();
2193
2194	/* Set the control register */
2195	cpu_control(cpuctrlmask, cpuctrl);
2196	ctrl = cpuctrl;
2197
2198}
2199#endif	/* CPU_ARM9 */
2200
2201#if defined(CPU_ARM9E) || defined(CPU_ARM10)
2202struct cpu_option arm10_options[] = {
2203	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2204	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2205	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2206	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2207	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2208	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2209	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2210	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2211	{ NULL,			IGN, IGN, 0 }
2212};
2213
2214void
2215arm10_setup(args)
2216	char *args;
2217{
2218	int cpuctrl, cpuctrlmask;
2219
2220	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2221	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2222	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
2223	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
2224	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2225	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2226	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2227	    | CPU_CONTROL_BPRD_ENABLE
2228	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
2229
2230#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2231	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2232#endif
2233
2234	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
2235
2236#ifdef __ARMEB__
2237	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2238#endif
2239
2240	/* Clear out the cache */
2241	cpu_idcache_wbinv_all();
2242
2243	/* Now really make sure they are clean.  */
2244	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
2245
2246	if (vector_page == ARM_VECTORS_HIGH)
2247		cpuctrl |= CPU_CONTROL_VECRELOC;
2248
2249	/* Set the control register */
2250	ctrl = cpuctrl;
2251	cpu_control(0xffffffff, cpuctrl);
2252
2253	/* And again. */
2254	cpu_idcache_wbinv_all();
2255}
2256#endif	/* CPU_ARM9E || CPU_ARM10 */
2257
2258#ifdef CPU_ARM11
2259struct cpu_option arm11_options[] = {
2260	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2261	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2262	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2263	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2264	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2265	{ NULL,			IGN, IGN, 0 }
2266};
2267
2268void
2269arm11_setup(args)
2270	char *args;
2271{
2272	int cpuctrl;
2273
2274	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2275#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2276	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2277#endif
2278	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2279	cpuctrl |= (0xf << 3);
2280	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
2281#ifdef __ARMEB__
2282	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2283#endif
2284	cpuctrl |= CPU_CONTROL_SYST_ENABLE;
2285	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2286	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2287	if (vector_page == ARM_VECTORS_HIGH)
2288		cpuctrl |= CPU_CONTROL_VECRELOC;
2289	cpuctrl |= (0x5 << 16);
2290	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2291
2292	/* Make sure caches are clean.  */
2293	cpu_idcache_wbinv_all();
2294	cpu_l2cache_wbinv_all();
2295
2296	/* Set the control register */
2297	ctrl = cpuctrl;
2298	cpu_control(0xffffffff, cpuctrl);
2299
2300	cpu_idcache_wbinv_all();
2301	cpu_l2cache_wbinv_all();
2302}
2303#endif	/* CPU_ARM11 */
2304
2305#ifdef CPU_MV_PJ4B
2306void
2307pj4bv6_setup(char *args)
2308{
2309	int cpuctrl;
2310
2311	pj4b_config();
2312
2313	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2314#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2315	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2316#endif
2317	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2318	cpuctrl |= (0xf << 3);
2319#ifdef __ARMEB__
2320	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2321#endif
2322	cpuctrl |= CPU_CONTROL_SYST_ENABLE;
2323	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2324	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2325	if (vector_page == ARM_VECTORS_HIGH)
2326		cpuctrl |= CPU_CONTROL_VECRELOC;
2327	cpuctrl |= (0x5 << 16);
2328	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2329	/* XXX not yet */
2330	/* cpuctrl |= CPU_CONTROL_L2_ENABLE; */
2331
2332	/* Make sure caches are clean.  */
2333	cpu_idcache_wbinv_all();
2334	cpu_l2cache_wbinv_all();
2335
2336	/* Set the control register */
2337	ctrl = cpuctrl;
2338	cpu_control(0xffffffff, cpuctrl);
2339
2340	cpu_idcache_wbinv_all();
2341	cpu_l2cache_wbinv_all();
2342}
2343
2344void
2345pj4bv7_setup(args)
2346	char *args;
2347{
2348	int cpuctrl;
2349
2350	pj4b_config();
2351
2352	cpuctrl = CPU_CONTROL_MMU_ENABLE;
2353#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2354	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2355#endif
2356	cpuctrl |= CPU_CONTROL_DC_ENABLE;
2357	cpuctrl |= (0xf << 3);
2358	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
2359	cpuctrl |= CPU_CONTROL_IC_ENABLE;
2360	if (vector_page == ARM_VECTORS_HIGH)
2361		cpuctrl |= CPU_CONTROL_VECRELOC;
2362	cpuctrl |= (0x5 << 16) | (1 < 22);
2363	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
2364
2365	/* Clear out the cache */
2366	cpu_idcache_wbinv_all();
2367
2368	/* Set the control register */
2369	ctrl = cpuctrl;
2370	cpu_control(0xFFFFFFFF, cpuctrl);
2371
2372	/* And again. */
2373	cpu_idcache_wbinv_all();
2374}
2375#endif /* CPU_MV_PJ4B */
2376
2377#ifdef CPU_CORTEXA
2378
2379void
2380cortexa_setup(char *args)
2381{
2382	int cpuctrl, cpuctrlmask;
2383
2384	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
2385	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
2386	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
2387	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
2388	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
2389	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
2390
2391	cpuctrl = CPU_CONTROL_MMU_ENABLE |
2392	    CPU_CONTROL_IC_ENABLE |
2393	    CPU_CONTROL_DC_ENABLE |
2394	    CPU_CONTROL_BPRD_ENABLE;
2395
2396#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2397	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2398#endif
2399
2400	/* Switch to big endian */
2401#ifdef __ARMEB__
2402	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2403#endif
2404
2405	/* Check if the vector page is at the high address (0xffff0000) */
2406	if (vector_page == ARM_VECTORS_HIGH)
2407		cpuctrl |= CPU_CONTROL_VECRELOC;
2408
2409	/* Clear out the cache */
2410	cpu_idcache_wbinv_all();
2411
2412	/* Set the control register */
2413	ctrl = cpuctrl;
2414	cpu_control(cpuctrlmask, cpuctrl);
2415
2416	/* And again. */
2417	cpu_idcache_wbinv_all();
2418#ifdef SMP
2419	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
2420#endif
2421}
2422#endif  /* CPU_CORTEXA */
2423
2424
2425#ifdef CPU_SA110
2426struct cpu_option sa110_options[] = {
2427#ifdef COMPAT_12
2428	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2429	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2430#endif	/* COMPAT_12 */
2431	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2432	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2433	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2434	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2435	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2436	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2437	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2438	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2439	{ NULL,			IGN, IGN, 0 }
2440};
2441
2442void
2443sa110_setup(args)
2444	char *args;
2445{
2446	int cpuctrl, cpuctrlmask;
2447
2448	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2449		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2450		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2451		 | CPU_CONTROL_WBUF_ENABLE;
2452	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2453		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2454		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2455		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2456		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2457		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2458		 | CPU_CONTROL_CPCLK;
2459
2460#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2461	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2462#endif
2463
2464	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
2465
2466#ifdef __ARMEB__
2467	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2468#endif
2469
2470	/* Clear out the cache */
2471	cpu_idcache_wbinv_all();
2472
2473	/* Set the control register */
2474	ctrl = cpuctrl;
2475/*	cpu_control(cpuctrlmask, cpuctrl);*/
2476	cpu_control(0xffffffff, cpuctrl);
2477
2478	/*
2479	 * enable clockswitching, note that this doesn't read or write to r0,
2480	 * r0 is just to make it valid asm
2481	 */
2482	__asm ("mcr 15, 0, r0, c15, c1, 2");
2483}
2484#endif	/* CPU_SA110 */
2485
2486#if defined(CPU_SA1100) || defined(CPU_SA1110)
2487struct cpu_option sa11x0_options[] = {
2488#ifdef COMPAT_12
2489	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2490	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2491#endif	/* COMPAT_12 */
2492	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2493	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2494	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2495	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2496	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2497	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2498	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2499	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2500	{ NULL,			IGN, IGN, 0 }
2501};
2502
2503void
2504sa11x0_setup(args)
2505	char *args;
2506{
2507	int cpuctrl, cpuctrlmask;
2508
2509	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2510		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2511		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2512		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
2513	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2514		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2515		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2516		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2517		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2518		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2519		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2520
2521#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2522	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2523#endif
2524
2525
2526	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
2527
2528#ifdef __ARMEB__
2529	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2530#endif
2531
2532	if (vector_page == ARM_VECTORS_HIGH)
2533		cpuctrl |= CPU_CONTROL_VECRELOC;
2534	/* Clear out the cache */
2535	cpu_idcache_wbinv_all();
2536	/* Set the control register */
2537	ctrl = cpuctrl;
2538	cpu_control(0xffffffff, cpuctrl);
2539}
2540#endif	/* CPU_SA1100 || CPU_SA1110 */
2541
2542#if defined(CPU_FA526) || defined(CPU_FA626TE)
2543struct cpu_option fa526_options[] = {
2544#ifdef COMPAT_12
2545	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
2546					   CPU_CONTROL_DC_ENABLE) },
2547	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
2548#endif	/* COMPAT_12 */
2549	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
2550					   CPU_CONTROL_DC_ENABLE) },
2551	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
2552					   CPU_CONTROL_DC_ENABLE) },
2553	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2554	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2555	{ NULL,			IGN, IGN, 0 }
2556};
2557
2558void
2559fa526_setup(char *args)
2560{
2561	int cpuctrl, cpuctrlmask;
2562
2563	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2564		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2565		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2566		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2567		| CPU_CONTROL_BPRD_ENABLE;
2568	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2569		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2570		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2571		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2572		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2573		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2574		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
2575
2576#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2577	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2578#endif
2579
2580	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
2581
2582#ifdef __ARMEB__
2583	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2584#endif
2585
2586	if (vector_page == ARM_VECTORS_HIGH)
2587		cpuctrl |= CPU_CONTROL_VECRELOC;
2588
2589	/* Clear out the cache */
2590	cpu_idcache_wbinv_all();
2591
2592	/* Set the control register */
2593	ctrl = cpuctrl;
2594	cpu_control(0xffffffff, cpuctrl);
2595}
2596#endif	/* CPU_FA526 || CPU_FA626TE */
2597
2598
2599#if defined(CPU_IXP12X0)
2600struct cpu_option ixp12x0_options[] = {
2601	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2602	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2603	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2604	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2605	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2606	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2607	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
2608	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
2609	{ NULL,			IGN, IGN, 0 }
2610};
2611
2612void
2613ixp12x0_setup(args)
2614	char *args;
2615{
2616	int cpuctrl, cpuctrlmask;
2617
2618
2619	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
2620		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
2621		 | CPU_CONTROL_IC_ENABLE;
2622
2623	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
2624		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
2625		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2626		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2627		 | CPU_CONTROL_VECRELOC;
2628
2629#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2630	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2631#endif
2632
2633	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2634
2635#ifdef __ARMEB__
2636	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2637#endif
2638
2639	if (vector_page == ARM_VECTORS_HIGH)
2640		cpuctrl |= CPU_CONTROL_VECRELOC;
2641
2642	/* Clear out the cache */
2643	cpu_idcache_wbinv_all();
2644
2645	/* Set the control register */
2646	ctrl = cpuctrl;
2647	/* cpu_control(0xffffffff, cpuctrl); */
2648	cpu_control(cpuctrlmask, cpuctrl);
2649}
2650#endif /* CPU_IXP12X0 */
2651
2652#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2653  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2654  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2655struct cpu_option xscale_options[] = {
2656#ifdef COMPAT_12
2657	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2658	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2659#endif	/* COMPAT_12 */
2660	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2661	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2662	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2663	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2664	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2665	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2666	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2667	{ NULL,			IGN, IGN, 0 }
2668};
2669
2670void
2671xscale_setup(args)
2672	char *args;
2673{
2674	uint32_t auxctl;
2675	int cpuctrl, cpuctrlmask;
2676
2677	/*
2678	 * The XScale Write Buffer is always enabled.  Our option
2679	 * is to enable/disable coalescing.  Note that bits 6:3
2680	 * must always be enabled.
2681	 */
2682
2683	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2684		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2685		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2686		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2687		 | CPU_CONTROL_BPRD_ENABLE;
2688	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2689		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2690		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2691		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2692		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2693		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2694		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2695		 CPU_CONTROL_L2_ENABLE;
2696
2697#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2698	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2699#endif
2700
2701	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2702
2703#ifdef __ARMEB__
2704	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2705#endif
2706
2707	if (vector_page == ARM_VECTORS_HIGH)
2708		cpuctrl |= CPU_CONTROL_VECRELOC;
2709#ifdef CPU_XSCALE_CORE3
2710	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2711#endif
2712
2713	/* Clear out the cache */
2714	cpu_idcache_wbinv_all();
2715
2716	/*
2717	 * Set the control register.  Note that bits 6:3 must always
2718	 * be set to 1.
2719	 */
2720	ctrl = cpuctrl;
2721/*	cpu_control(cpuctrlmask, cpuctrl);*/
2722	cpu_control(0xffffffff, cpuctrl);
2723
2724	/* Make sure write coalescing is turned on */
2725	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2726		: "=r" (auxctl));
2727#ifdef XSCALE_NO_COALESCE_WRITES
2728	auxctl |= XSCALE_AUXCTL_K;
2729#else
2730	auxctl &= ~XSCALE_AUXCTL_K;
2731#endif
2732#ifdef CPU_XSCALE_CORE3
2733	auxctl |= XSCALE_AUXCTL_LLR;
2734	auxctl |= XSCALE_AUXCTL_MD_MASK;
2735#endif
2736	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2737		: : "r" (auxctl));
2738}
2739#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2740	   CPU_XSCALE_80219 */
2741