cpufunc.c revision 171618
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 171618 2007-07-27 14:39:41Z cognet $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77#if defined(CPU_XSCALE_81342)
78#include <arm/xscale/i8134x/i81342reg.h>
79#endif
80
81#ifdef CPU_XSCALE_IXP425
82#include <arm/xscale/ixp425/ixp425reg.h>
83#include <arm/xscale/ixp425/ixp425var.h>
84#endif
85
86#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88#include <arm/xscale/xscalereg.h>
89#endif
90
91#if defined(PERFCTRS)
92struct arm_pmc_funcs *arm_pmc;
93#endif
94
95/* PRIMARY CACHE VARIABLES */
96int	arm_picache_size;
97int	arm_picache_line_size;
98int	arm_picache_ways;
99
100int	arm_pdcache_size;	/* and unified */
101int	arm_pdcache_line_size;
102int	arm_pdcache_ways;
103
104int	arm_pcache_type;
105int	arm_pcache_unified;
106
107int	arm_dcache_align;
108int	arm_dcache_align_mask;
109
110/* 1 == use cpu_sleep(), 0 == don't */
111int cpu_do_powersave;
112int ctrl;
113
114#ifdef CPU_ARM7TDMI
115struct cpu_functions arm7tdmi_cpufuncs = {
116	/* CPU functions */
117
118	cpufunc_id,			/* id			*/
119	cpufunc_nullop,			/* cpwait		*/
120
121	/* MMU functions */
122
123	cpufunc_control,		/* control		*/
124	cpufunc_domains,		/* domain		*/
125	arm7tdmi_setttb,		/* setttb		*/
126	cpufunc_faultstatus,		/* faultstatus		*/
127	cpufunc_faultaddress,		/* faultaddress		*/
128
129	/* TLB functions */
130
131	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
132	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
133	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
134	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
135	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
136	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
137
138	/* Cache operations */
139
140	cpufunc_nullop,			/* icache_sync_all	*/
141	(void *)cpufunc_nullop,		/* icache_sync_range	*/
142
143	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
144	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
145	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
146	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
147
148	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
149	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
150	cpufunc_nullop,			/* l2cache_wbinv_all	*/
151	cpufunc_nullop,			/* l2cache_wbinv_range	*/
152	cpufunc_nullop,			/* l2cache_inv_range	*/
153	cpufunc_nullop,			/* l2cache_wb_range	*/
154
155	/* Other functions */
156
157	cpufunc_nullop,			/* flush_prefetchbuf	*/
158	cpufunc_nullop,			/* drain_writebuf	*/
159	cpufunc_nullop,			/* flush_brnchtgt_C	*/
160	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
161
162	(void *)cpufunc_nullop,		/* sleep		*/
163
164	/* Soft functions */
165
166	late_abort_fixup,		/* dataabt_fixup	*/
167	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
168
169	arm7tdmi_context_switch,	/* context_switch	*/
170
171	arm7tdmi_setup			/* cpu setup		*/
172
173};
174#endif	/* CPU_ARM7TDMI */
175
176#ifdef CPU_ARM8
177struct cpu_functions arm8_cpufuncs = {
178	/* CPU functions */
179
180	cpufunc_id,			/* id			*/
181	cpufunc_nullop,			/* cpwait		*/
182
183	/* MMU functions */
184
185	cpufunc_control,		/* control		*/
186	cpufunc_domains,		/* domain		*/
187	arm8_setttb,			/* setttb		*/
188	cpufunc_faultstatus,		/* faultstatus		*/
189	cpufunc_faultaddress,		/* faultaddress		*/
190
191	/* TLB functions */
192
193	arm8_tlb_flushID,		/* tlb_flushID		*/
194	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
195	arm8_tlb_flushID,		/* tlb_flushI		*/
196	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
197	arm8_tlb_flushID,		/* tlb_flushD		*/
198	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
199
200	/* Cache operations */
201
202	cpufunc_nullop,			/* icache_sync_all	*/
203	(void *)cpufunc_nullop,		/* icache_sync_range	*/
204
205	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
206	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
207/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
208	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
209
210	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
211	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
212	cpufunc_nullop,			/* l2cache_wbinv_all	*/
213	cpufunc_nullop,			/* l2cache_wbinv_range	*/
214	cpufunc_nullop,			/* l2cache_inv_range	*/
215	cpufunc_nullop,			/* l2cache_wb_range	*/
216
217	/* Other functions */
218
219	cpufunc_nullop,			/* flush_prefetchbuf	*/
220	cpufunc_nullop,			/* drain_writebuf	*/
221	cpufunc_nullop,			/* flush_brnchtgt_C	*/
222	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
223
224	(void *)cpufunc_nullop,		/* sleep		*/
225
226	/* Soft functions */
227
228	cpufunc_null_fixup,		/* dataabt_fixup	*/
229	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
230
231	arm8_context_switch,		/* context_switch	*/
232
233	arm8_setup			/* cpu setup		*/
234};
235#endif	/* CPU_ARM8 */
236
237#ifdef CPU_ARM9
238struct cpu_functions arm9_cpufuncs = {
239	/* CPU functions */
240
241	cpufunc_id,			/* id			*/
242	cpufunc_nullop,			/* cpwait		*/
243
244	/* MMU functions */
245
246	cpufunc_control,		/* control		*/
247	cpufunc_domains,		/* Domain		*/
248	arm9_setttb,			/* Setttb		*/
249	cpufunc_faultstatus,		/* Faultstatus		*/
250	cpufunc_faultaddress,		/* Faultaddress		*/
251
252	/* TLB functions */
253
254	armv4_tlb_flushID,		/* tlb_flushID		*/
255	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
256	armv4_tlb_flushI,		/* tlb_flushI		*/
257	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
258	armv4_tlb_flushD,		/* tlb_flushD		*/
259	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
260
261	/* Cache operations */
262
263	arm9_icache_sync_all,		/* icache_sync_all	*/
264	arm9_icache_sync_range,		/* icache_sync_range	*/
265
266	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
267	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
268/*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
269	arm9_dcache_wb_range,		/* dcache_wb_range	*/
270
271	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
272	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
273	cpufunc_nullop,			/* l2cache_wbinv_all	*/
274	cpufunc_nullop,			/* l2cache_wbinv_range	*/
275	cpufunc_nullop,			/* l2cache_inv_range	*/
276	cpufunc_nullop,			/* l2cache_wb_range	*/
277
278	/* Other functions */
279
280	cpufunc_nullop,			/* flush_prefetchbuf	*/
281	armv4_drain_writebuf,		/* drain_writebuf	*/
282	cpufunc_nullop,			/* flush_brnchtgt_C	*/
283	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
284
285	(void *)cpufunc_nullop,		/* sleep		*/
286
287	/* Soft functions */
288
289	cpufunc_null_fixup,		/* dataabt_fixup	*/
290	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
291
292	arm9_context_switch,		/* context_switch	*/
293
294	arm9_setup			/* cpu setup		*/
295
296};
297#endif /* CPU_ARM9 */
298
299#ifdef CPU_ARM10
300struct cpu_functions arm10_cpufuncs = {
301	/* CPU functions */
302
303	cpufunc_id,			/* id			*/
304	cpufunc_nullop,			/* cpwait		*/
305
306	/* MMU functions */
307
308	cpufunc_control,		/* control		*/
309	cpufunc_domains,		/* Domain		*/
310	arm10_setttb,			/* Setttb		*/
311	cpufunc_faultstatus,		/* Faultstatus		*/
312	cpufunc_faultaddress,		/* Faultaddress		*/
313
314	/* TLB functions */
315
316	armv4_tlb_flushID,		/* tlb_flushID		*/
317	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
318	armv4_tlb_flushI,		/* tlb_flushI		*/
319	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
320	armv4_tlb_flushD,		/* tlb_flushD		*/
321	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
322
323	/* Cache operations */
324
325	arm10_icache_sync_all,		/* icache_sync_all	*/
326	arm10_icache_sync_range,	/* icache_sync_range	*/
327
328	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
329	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
330	arm10_dcache_inv_range,		/* dcache_inv_range	*/
331	arm10_dcache_wb_range,		/* dcache_wb_range	*/
332
333	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
334	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
335	cpufunc_nullop,			/* l2cache_wbinv_all	*/
336	cpufunc_nullop,			/* l2cache_wbinv_range	*/
337	cpufunc_nullop,			/* l2cache_inv_range	*/
338	cpufunc_nullop,			/* l2cache_wb_range	*/
339
340	/* Other functions */
341
342	cpufunc_nullop,			/* flush_prefetchbuf	*/
343	armv4_drain_writebuf,		/* drain_writebuf	*/
344	cpufunc_nullop,			/* flush_brnchtgt_C	*/
345	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
346
347	(void *)cpufunc_nullop,		/* sleep		*/
348
349	/* Soft functions */
350
351	cpufunc_null_fixup,		/* dataabt_fixup	*/
352	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
353
354	arm10_context_switch,		/* context_switch	*/
355
356	arm10_setup			/* cpu setup		*/
357
358};
359#endif /* CPU_ARM10 */
360
361#ifdef CPU_SA110
362struct cpu_functions sa110_cpufuncs = {
363	/* CPU functions */
364
365	cpufunc_id,			/* id			*/
366	cpufunc_nullop,			/* cpwait		*/
367
368	/* MMU functions */
369
370	cpufunc_control,		/* control		*/
371	cpufunc_domains,		/* domain		*/
372	sa1_setttb,			/* setttb		*/
373	cpufunc_faultstatus,		/* faultstatus		*/
374	cpufunc_faultaddress,		/* faultaddress		*/
375
376	/* TLB functions */
377
378	armv4_tlb_flushID,		/* tlb_flushID		*/
379	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
380	armv4_tlb_flushI,		/* tlb_flushI		*/
381	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
382	armv4_tlb_flushD,		/* tlb_flushD		*/
383	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
384
385	/* Cache operations */
386
387	sa1_cache_syncI,		/* icache_sync_all	*/
388	sa1_cache_syncI_rng,		/* icache_sync_range	*/
389
390	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
391	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
392/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
393	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
394
395	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
396	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
397	cpufunc_nullop,			/* l2cache_wbinv_all	*/
398	cpufunc_nullop,			/* l2cache_wbinv_range	*/
399	cpufunc_nullop,			/* l2cache_inv_range	*/
400	cpufunc_nullop,			/* l2cache_wb_range	*/
401
402	/* Other functions */
403
404	cpufunc_nullop,			/* flush_prefetchbuf	*/
405	armv4_drain_writebuf,		/* drain_writebuf	*/
406	cpufunc_nullop,			/* flush_brnchtgt_C	*/
407	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
408
409	(void *)cpufunc_nullop,		/* sleep		*/
410
411	/* Soft functions */
412
413	cpufunc_null_fixup,		/* dataabt_fixup	*/
414	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
415
416	sa110_context_switch,		/* context_switch	*/
417
418	sa110_setup			/* cpu setup		*/
419};
420#endif	/* CPU_SA110 */
421
422#if defined(CPU_SA1100) || defined(CPU_SA1110)
423struct cpu_functions sa11x0_cpufuncs = {
424	/* CPU functions */
425
426	cpufunc_id,			/* id			*/
427	cpufunc_nullop,			/* cpwait		*/
428
429	/* MMU functions */
430
431	cpufunc_control,		/* control		*/
432	cpufunc_domains,		/* domain		*/
433	sa1_setttb,			/* setttb		*/
434	cpufunc_faultstatus,		/* faultstatus		*/
435	cpufunc_faultaddress,		/* faultaddress		*/
436
437	/* TLB functions */
438
439	armv4_tlb_flushID,		/* tlb_flushID		*/
440	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
441	armv4_tlb_flushI,		/* tlb_flushI		*/
442	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
443	armv4_tlb_flushD,		/* tlb_flushD		*/
444	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
445
446	/* Cache operations */
447
448	sa1_cache_syncI,		/* icache_sync_all	*/
449	sa1_cache_syncI_rng,		/* icache_sync_range	*/
450
451	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
452	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
453/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
454	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
455
456	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
457	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
458	cpufunc_nullop,			/* l2cache_wbinv_all	*/
459	cpufunc_nullop,			/* l2cache_wbinv_range	*/
460	cpufunc_nullop,			/* l2cache_inv_range	*/
461	cpufunc_nullop,			/* l2cache_wb_range	*/
462
463	/* Other functions */
464
465	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
466	armv4_drain_writebuf,		/* drain_writebuf	*/
467	cpufunc_nullop,			/* flush_brnchtgt_C	*/
468	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
469
470	sa11x0_cpu_sleep,		/* sleep		*/
471
472	/* Soft functions */
473
474	cpufunc_null_fixup,		/* dataabt_fixup	*/
475	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
476
477	sa11x0_context_switch,		/* context_switch	*/
478
479	sa11x0_setup			/* cpu setup		*/
480};
481#endif	/* CPU_SA1100 || CPU_SA1110 */
482
483#ifdef CPU_IXP12X0
484struct cpu_functions ixp12x0_cpufuncs = {
485	/* CPU functions */
486
487	cpufunc_id,			/* id			*/
488	cpufunc_nullop,			/* cpwait		*/
489
490	/* MMU functions */
491
492	cpufunc_control,		/* control		*/
493	cpufunc_domains,		/* domain		*/
494	sa1_setttb,			/* setttb		*/
495	cpufunc_faultstatus,		/* faultstatus		*/
496	cpufunc_faultaddress,		/* faultaddress		*/
497
498	/* TLB functions */
499
500	armv4_tlb_flushID,		/* tlb_flushID		*/
501	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
502	armv4_tlb_flushI,		/* tlb_flushI		*/
503	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
504	armv4_tlb_flushD,		/* tlb_flushD		*/
505	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
506
507	/* Cache operations */
508
509	sa1_cache_syncI,		/* icache_sync_all	*/
510	sa1_cache_syncI_rng,		/* icache_sync_range	*/
511
512	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
513	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
514/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
515	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
516
517	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
518	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
519	cpufunc_nullop,			/* l2cache_wbinv_all	*/
520	cpufunc_nullop,			/* l2cache_wbinv_range	*/
521	cpufunc_nullop,			/* l2cache_inv_range	*/
522	cpufunc_nullop,			/* l2cache_wb_range	*/
523
524	/* Other functions */
525
526	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
527	armv4_drain_writebuf,		/* drain_writebuf	*/
528	cpufunc_nullop,			/* flush_brnchtgt_C	*/
529	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
530
531	(void *)cpufunc_nullop,		/* sleep		*/
532
533	/* Soft functions */
534
535	cpufunc_null_fixup,		/* dataabt_fixup	*/
536	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
537
538	ixp12x0_context_switch,		/* context_switch	*/
539
540	ixp12x0_setup			/* cpu setup		*/
541};
542#endif	/* CPU_IXP12X0 */
543
544#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
545  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
546  defined(CPU_XSCALE_80219)
547
548struct cpu_functions xscale_cpufuncs = {
549	/* CPU functions */
550
551	cpufunc_id,			/* id			*/
552	xscale_cpwait,			/* cpwait		*/
553
554	/* MMU functions */
555
556	xscale_control,			/* control		*/
557	cpufunc_domains,		/* domain		*/
558	xscale_setttb,			/* setttb		*/
559	cpufunc_faultstatus,		/* faultstatus		*/
560	cpufunc_faultaddress,		/* faultaddress		*/
561
562	/* TLB functions */
563
564	armv4_tlb_flushID,		/* tlb_flushID		*/
565	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
566	armv4_tlb_flushI,		/* tlb_flushI		*/
567	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
568	armv4_tlb_flushD,		/* tlb_flushD		*/
569	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
570
571	/* Cache operations */
572
573	xscale_cache_syncI,		/* icache_sync_all	*/
574	xscale_cache_syncI_rng,		/* icache_sync_range	*/
575
576	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
577	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
578	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
579	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
580
581	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
582	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
583	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
584	cpufunc_nullop,			/* l2cache_wbinv_range	*/
585	cpufunc_nullop,			/* l2cache_inv_range	*/
586	cpufunc_nullop,			/* l2cache_wb_range	*/
587
588	/* Other functions */
589
590	cpufunc_nullop,			/* flush_prefetchbuf	*/
591	armv4_drain_writebuf,		/* drain_writebuf	*/
592	cpufunc_nullop,			/* flush_brnchtgt_C	*/
593	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
594
595	xscale_cpu_sleep,		/* sleep		*/
596
597	/* Soft functions */
598
599	cpufunc_null_fixup,		/* dataabt_fixup	*/
600	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
601
602	xscale_context_switch,		/* context_switch	*/
603
604	xscale_setup			/* cpu setup		*/
605};
606#endif
607/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
608   CPU_XSCALE_80219 */
609
610#ifdef CPU_XSCALE_81342
611struct cpu_functions xscalec3_cpufuncs = {
612	/* CPU functions */
613
614	cpufunc_id,			/* id			*/
615	xscale_cpwait,			/* cpwait		*/
616
617	/* MMU functions */
618
619	xscale_control,			/* control		*/
620	cpufunc_domains,		/* domain		*/
621	xscalec3_setttb,		/* setttb		*/
622	cpufunc_faultstatus,		/* faultstatus		*/
623	cpufunc_faultaddress,		/* faultaddress		*/
624
625	/* TLB functions */
626
627	armv4_tlb_flushID,		/* tlb_flushID		*/
628	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
629	armv4_tlb_flushI,		/* tlb_flushI		*/
630	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
631	armv4_tlb_flushD,		/* tlb_flushD		*/
632	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
633
634	/* Cache operations */
635
636	xscalec3_cache_syncI,		/* icache_sync_all	*/
637	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
638
639	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
640	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
641	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
642	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
643
644	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
645	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
646	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
647	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
648	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
649	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
650
651	/* Other functions */
652
653	cpufunc_nullop,			/* flush_prefetchbuf	*/
654	armv4_drain_writebuf,		/* drain_writebuf	*/
655	cpufunc_nullop,			/* flush_brnchtgt_C	*/
656	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
657
658	xscale_cpu_sleep,		/* sleep		*/
659
660	/* Soft functions */
661
662	cpufunc_null_fixup,		/* dataabt_fixup	*/
663	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
664
665	xscalec3_context_switch,	/* context_switch	*/
666
667	xscale_setup			/* cpu setup		*/
668};
669#endif /* CPU_XSCALE_81342 */
670/*
671 * Global constants also used by locore.s
672 */
673
674struct cpu_functions cpufuncs;
675u_int cputype;
676u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
677
678#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
679  defined (CPU_ARM10) ||					       \
680  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||	       \
681  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||	       \
682  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
683
684static void get_cachetype_cp15(void);
685
686/* Additional cache information local to this file.  Log2 of some of the
687   above numbers.  */
688static int	arm_dcache_l2_nsets;
689static int	arm_dcache_l2_assoc;
690static int	arm_dcache_l2_linesize;
691
692static void
693get_cachetype_cp15()
694{
695	u_int ctype, isize, dsize;
696	u_int multiplier;
697
698	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
699		: "=r" (ctype));
700
701	/*
702	 * ...and thus spake the ARM ARM:
703	 *
704	 * If an <opcode2> value corresponding to an unimplemented or
705	 * reserved ID register is encountered, the System Control
706	 * processor returns the value of the main ID register.
707	 */
708	if (ctype == cpufunc_id())
709		goto out;
710
711	if ((ctype & CPU_CT_S) == 0)
712		arm_pcache_unified = 1;
713
714	/*
715	 * If you want to know how this code works, go read the ARM ARM.
716	 */
717
718	arm_pcache_type = CPU_CT_CTYPE(ctype);
719
720	if (arm_pcache_unified == 0) {
721		isize = CPU_CT_ISIZE(ctype);
722		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
723		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
724		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
725			if (isize & CPU_CT_xSIZE_M)
726				arm_picache_line_size = 0; /* not present */
727			else
728				arm_picache_ways = 1;
729		} else {
730			arm_picache_ways = multiplier <<
731			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
732		}
733		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
734	}
735
736	dsize = CPU_CT_DSIZE(ctype);
737	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
738	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
739	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
740		if (dsize & CPU_CT_xSIZE_M)
741			arm_pdcache_line_size = 0; /* not present */
742		else
743			arm_pdcache_ways = 1;
744	} else {
745		arm_pdcache_ways = multiplier <<
746		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
747	}
748	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
749
750	arm_dcache_align = arm_pdcache_line_size;
751
752	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
753	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
754	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
755	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
756
757 out:
758	arm_dcache_align_mask = arm_dcache_align - 1;
759}
760#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
761
762#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
763    defined(CPU_IXP12X0)
764/* Cache information for CPUs without cache type registers. */
765struct cachetab {
766	u_int32_t ct_cpuid;
767	int	ct_pcache_type;
768	int	ct_pcache_unified;
769	int	ct_pdcache_size;
770	int	ct_pdcache_line_size;
771	int	ct_pdcache_ways;
772	int	ct_picache_size;
773	int	ct_picache_line_size;
774	int	ct_picache_ways;
775};
776
777struct cachetab cachetab[] = {
778    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
779    /* XXX is this type right for SA-1? */
780    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
781    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
782    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
783    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
784    { 0, 0, 0, 0, 0, 0, 0, 0}
785};
786
787static void get_cachetype_table(void);
788
789static void
790get_cachetype_table()
791{
792	int i;
793	u_int32_t cpuid = cpufunc_id();
794
795	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
796		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
797			arm_pcache_type = cachetab[i].ct_pcache_type;
798			arm_pcache_unified = cachetab[i].ct_pcache_unified;
799			arm_pdcache_size = cachetab[i].ct_pdcache_size;
800			arm_pdcache_line_size =
801			    cachetab[i].ct_pdcache_line_size;
802			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
803			arm_picache_size = cachetab[i].ct_picache_size;
804			arm_picache_line_size =
805			    cachetab[i].ct_picache_line_size;
806			arm_picache_ways = cachetab[i].ct_picache_ways;
807		}
808	}
809	arm_dcache_align = arm_pdcache_line_size;
810
811	arm_dcache_align_mask = arm_dcache_align - 1;
812}
813
814#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
815
816/*
817 * Cannot panic here as we may not have a console yet ...
818 */
819
820int
821set_cpufuncs()
822{
823	cputype = cpufunc_id();
824	cputype &= CPU_ID_CPU_MASK;
825
826	/*
827	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
828	 * CPU type where we want to use it by default, then we set it.
829	 */
830
831#ifdef CPU_ARM7TDMI
832	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
833	    CPU_ID_IS7(cputype) &&
834	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
835		cpufuncs = arm7tdmi_cpufuncs;
836		cpu_reset_needs_v4_MMU_disable = 0;
837		get_cachetype_cp15();
838		pmap_pte_init_generic();
839		goto out;
840	}
841#endif
842#ifdef CPU_ARM8
843	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
844	    (cputype & 0x0000f000) == 0x00008000) {
845		cpufuncs = arm8_cpufuncs;
846		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
847		get_cachetype_cp15();
848		pmap_pte_init_arm8();
849		goto out;
850	}
851#endif	/* CPU_ARM8 */
852#ifdef CPU_ARM9
853	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
854	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
855	    (cputype & 0x0000f000) == 0x00009000) {
856		cpufuncs = arm9_cpufuncs;
857		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
858		get_cachetype_cp15();
859		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
860		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
861		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
862		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
863		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
864#ifdef ARM9_CACHE_WRITE_THROUGH
865		pmap_pte_init_arm9();
866#else
867		pmap_pte_init_generic();
868#endif
869		goto out;
870	}
871#endif /* CPU_ARM9 */
872#ifdef CPU_ARM10
873	if (/* cputype == CPU_ID_ARM1020T || */
874	    cputype == CPU_ID_ARM1020E) {
875		/*
876		 * Select write-through cacheing (this isn't really an
877		 * option on ARM1020T).
878		 */
879		cpufuncs = arm10_cpufuncs;
880		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
881		get_cachetype_cp15();
882		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
883		arm10_dcache_sets_max =
884		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
885		    arm10_dcache_sets_inc;
886		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
887		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
888		pmap_pte_init_generic();
889		goto out;
890	}
891#endif /* CPU_ARM10 */
892#ifdef CPU_SA110
893	if (cputype == CPU_ID_SA110) {
894		cpufuncs = sa110_cpufuncs;
895		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
896		get_cachetype_table();
897		pmap_pte_init_sa1();
898		goto out;
899	}
900#endif	/* CPU_SA110 */
901#ifdef CPU_SA1100
902	if (cputype == CPU_ID_SA1100) {
903		cpufuncs = sa11x0_cpufuncs;
904		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
905		get_cachetype_table();
906		pmap_pte_init_sa1();
907		/* Use powersave on this CPU. */
908		cpu_do_powersave = 1;
909
910		goto out;
911	}
912#endif	/* CPU_SA1100 */
913#ifdef CPU_SA1110
914	if (cputype == CPU_ID_SA1110) {
915		cpufuncs = sa11x0_cpufuncs;
916		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
917		get_cachetype_table();
918		pmap_pte_init_sa1();
919		/* Use powersave on this CPU. */
920		cpu_do_powersave = 1;
921
922		goto out;
923	}
924#endif	/* CPU_SA1110 */
925#ifdef CPU_IXP12X0
926        if (cputype == CPU_ID_IXP1200) {
927                cpufuncs = ixp12x0_cpufuncs;
928                cpu_reset_needs_v4_MMU_disable = 1;
929                get_cachetype_table();
930                pmap_pte_init_sa1();
931		goto out;
932        }
933#endif  /* CPU_IXP12X0 */
934#ifdef CPU_XSCALE_80200
935	if (cputype == CPU_ID_80200) {
936		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
937
938		i80200_icu_init();
939
940		/*
941		 * Reset the Performance Monitoring Unit to a
942		 * pristine state:
943		 *	- CCNT, PMN0, PMN1 reset to 0
944		 *	- overflow indications cleared
945		 *	- all counters disabled
946		 */
947		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
948			:
949			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
950			       PMNC_CC_IF));
951
952#if defined(XSCALE_CCLKCFG)
953		/*
954		 * Crank CCLKCFG to maximum legal value.
955		 */
956		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
957			:
958			: "r" (XSCALE_CCLKCFG));
959#endif
960
961		/*
962		 * XXX Disable ECC in the Bus Controller Unit; we
963		 * don't really support it, yet.  Clear any pending
964		 * error indications.
965		 */
966		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
967			:
968			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
969
970		cpufuncs = xscale_cpufuncs;
971#if defined(PERFCTRS)
972		xscale_pmu_init();
973#endif
974
975		/*
976		 * i80200 errata: Step-A0 and A1 have a bug where
977		 * D$ dirty bits are not cleared on "invalidate by
978		 * address".
979		 *
980		 * Workaround: Clean cache line before invalidating.
981		 */
982		if (rev == 0 || rev == 1)
983			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
984
985		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
986		get_cachetype_cp15();
987		pmap_pte_init_xscale();
988		goto out;
989	}
990#endif /* CPU_XSCALE_80200 */
991#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
992	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
993	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
994	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
995		/*
996		 * Reset the Performance Monitoring Unit to a
997		 * pristine state:
998		 *	- CCNT, PMN0, PMN1 reset to 0
999		 *	- overflow indications cleared
1000		 *	- all counters disabled
1001		 */
1002		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1003			:
1004			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1005			       PMNC_CC_IF));
1006
1007		cpufuncs = xscale_cpufuncs;
1008#if defined(PERFCTRS)
1009		xscale_pmu_init();
1010#endif
1011
1012		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1013		get_cachetype_cp15();
1014		pmap_pte_init_xscale();
1015		goto out;
1016	}
1017#endif /* CPU_XSCALE_80321 */
1018
1019#if defined(CPU_XSCALE_81342)
1020	if (cputype == CPU_ID_81342) {
1021		cpufuncs = xscalec3_cpufuncs;
1022#if defined(PERFCTRS)
1023		xscale_pmu_init();
1024#endif
1025
1026		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1027		get_cachetype_cp15();
1028		pmap_pte_init_xscale();
1029		goto out;
1030	}
1031#endif /* CPU_XSCALE_81342 */
1032#ifdef CPU_XSCALE_PXA2X0
1033	/* ignore core revision to test PXA2xx CPUs */
1034	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1035	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1036
1037		cpufuncs = xscale_cpufuncs;
1038#if defined(PERFCTRS)
1039		xscale_pmu_init();
1040#endif
1041
1042		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1043		get_cachetype_cp15();
1044		pmap_pte_init_xscale();
1045
1046		/* Use powersave on this CPU. */
1047		cpu_do_powersave = 1;
1048
1049		goto out;
1050	}
1051#endif /* CPU_XSCALE_PXA2X0 */
1052#ifdef CPU_XSCALE_IXP425
1053	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1054            cputype == CPU_ID_IXP425_266) {
1055
1056		cpufuncs = xscale_cpufuncs;
1057#if defined(PERFCTRS)
1058		xscale_pmu_init();
1059#endif
1060
1061		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1062		get_cachetype_cp15();
1063		pmap_pte_init_xscale();
1064
1065		goto out;
1066	}
1067#endif /* CPU_XSCALE_IXP425 */
1068	/*
1069	 * Bzzzz. And the answer was ...
1070	 */
1071	panic("No support for this CPU type (%08x) in kernel", cputype);
1072	return(ARCHITECTURE_NOT_PRESENT);
1073out:
1074	uma_set_align(arm_dcache_align_mask);
1075	return (0);
1076}
1077
1078/*
1079 * Fixup routines for data and prefetch aborts.
1080 *
1081 * Several compile time symbols are used
1082 *
1083 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1084 * correction of registers after a fault.
1085 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1086 * when defined should use late aborts
1087 */
1088
1089
1090/*
1091 * Null abort fixup routine.
1092 * For use when no fixup is required.
1093 */
1094int
1095cpufunc_null_fixup(arg)
1096	void *arg;
1097{
1098	return(ABORT_FIXUP_OK);
1099}
1100
1101
1102#if defined(CPU_ARM7TDMI)
1103
1104#ifdef DEBUG_FAULT_CORRECTION
1105#define DFC_PRINTF(x)		printf x
1106#define DFC_DISASSEMBLE(x)	disassemble(x)
1107#else
1108#define DFC_PRINTF(x)		/* nothing */
1109#define DFC_DISASSEMBLE(x)	/* nothing */
1110#endif
1111
1112/*
1113 * "Early" data abort fixup.
1114 *
1115 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1116 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1117 *
1118 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1119 */
1120int
1121early_abort_fixup(arg)
1122	void *arg;
1123{
1124	trapframe_t *frame = arg;
1125	u_int fault_pc;
1126	u_int fault_instruction;
1127	int saved_lr = 0;
1128
1129	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1130
1131		/* Ok an abort in SVC mode */
1132
1133		/*
1134		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1135		 * as the fault happened in svc mode but we need it in the
1136		 * usr slot so we can treat the registers as an array of ints
1137		 * during fixing.
1138		 * NOTE: This PC is in the position but writeback is not
1139		 * allowed on r15.
1140		 * Doing it like this is more efficient than trapping this
1141		 * case in all possible locations in the following fixup code.
1142		 */
1143
1144		saved_lr = frame->tf_usr_lr;
1145		frame->tf_usr_lr = frame->tf_svc_lr;
1146
1147		/*
1148		 * Note the trapframe does not have the SVC r13 so a fault
1149		 * from an instruction with writeback to r13 in SVC mode is
1150		 * not allowed. This should not happen as the kstack is
1151		 * always valid.
1152		 */
1153	}
1154
1155	/* Get fault address and status from the CPU */
1156
1157	fault_pc = frame->tf_pc;
1158	fault_instruction = *((volatile unsigned int *)fault_pc);
1159
1160	/* Decode the fault instruction and fix the registers as needed */
1161
1162	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1163		int base;
1164		int loop;
1165		int count;
1166		int *registers = &frame->tf_r0;
1167
1168		DFC_PRINTF(("LDM/STM\n"));
1169		DFC_DISASSEMBLE(fault_pc);
1170		if (fault_instruction & (1 << 21)) {
1171			DFC_PRINTF(("This instruction must be corrected\n"));
1172			base = (fault_instruction >> 16) & 0x0f;
1173			if (base == 15)
1174				return ABORT_FIXUP_FAILED;
1175			/* Count registers transferred */
1176			count = 0;
1177			for (loop = 0; loop < 16; ++loop) {
1178				if (fault_instruction & (1<<loop))
1179					++count;
1180			}
1181			DFC_PRINTF(("%d registers used\n", count));
1182			DFC_PRINTF(("Corrected r%d by %d bytes ",
1183				       base, count * 4));
1184			if (fault_instruction & (1 << 23)) {
1185				DFC_PRINTF(("down\n"));
1186				registers[base] -= count * 4;
1187			} else {
1188				DFC_PRINTF(("up\n"));
1189				registers[base] += count * 4;
1190			}
1191		}
1192	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1193		int base;
1194		int offset;
1195		int *registers = &frame->tf_r0;
1196
1197		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1198
1199		DFC_DISASSEMBLE(fault_pc);
1200
1201		/* Only need to fix registers if write back is turned on */
1202
1203		if ((fault_instruction & (1 << 21)) != 0) {
1204			base = (fault_instruction >> 16) & 0x0f;
1205			if (base == 13 &&
1206			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1207				return ABORT_FIXUP_FAILED;
1208			if (base == 15)
1209				return ABORT_FIXUP_FAILED;
1210
1211			offset = (fault_instruction & 0xff) << 2;
1212			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1213			if ((fault_instruction & (1 << 23)) != 0)
1214				offset = -offset;
1215			registers[base] += offset;
1216			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1217		}
1218	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1219		return ABORT_FIXUP_FAILED;
1220
1221	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1222
1223		/* Ok an abort in SVC mode */
1224
1225		/*
1226		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1227		 * as the fault happened in svc mode but we need it in the
1228		 * usr slot so we can treat the registers as an array of ints
1229		 * during fixing.
1230		 * NOTE: This PC is in the position but writeback is not
1231		 * allowed on r15.
1232		 * Doing it like this is more efficient than trapping this
1233		 * case in all possible locations in the prior fixup code.
1234		 */
1235
1236		frame->tf_svc_lr = frame->tf_usr_lr;
1237		frame->tf_usr_lr = saved_lr;
1238
1239		/*
1240		 * Note the trapframe does not have the SVC r13 so a fault
1241		 * from an instruction with writeback to r13 in SVC mode is
1242		 * not allowed. This should not happen as the kstack is
1243		 * always valid.
1244		 */
1245	}
1246
1247	return(ABORT_FIXUP_OK);
1248}
1249#endif	/* CPU_ARM2/250/3/6/7 */
1250
1251
1252#if defined(CPU_ARM7TDMI)
1253/*
1254 * "Late" (base updated) data abort fixup
1255 *
1256 * For ARM6 (in late-abort mode) and ARM7.
1257 *
1258 * In this model, all data-transfer instructions need fixing up.  We defer
1259 * LDM, STM, LDC and STC fixup to the early-abort handler.
1260 */
1261int
1262late_abort_fixup(arg)
1263	void *arg;
1264{
1265	trapframe_t *frame = arg;
1266	u_int fault_pc;
1267	u_int fault_instruction;
1268	int saved_lr = 0;
1269
1270	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1271
1272		/* Ok an abort in SVC mode */
1273
1274		/*
1275		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1276		 * as the fault happened in svc mode but we need it in the
1277		 * usr slot so we can treat the registers as an array of ints
1278		 * during fixing.
1279		 * NOTE: This PC is in the position but writeback is not
1280		 * allowed on r15.
1281		 * Doing it like this is more efficient than trapping this
1282		 * case in all possible locations in the following fixup code.
1283		 */
1284
1285		saved_lr = frame->tf_usr_lr;
1286		frame->tf_usr_lr = frame->tf_svc_lr;
1287
1288		/*
1289		 * Note the trapframe does not have the SVC r13 so a fault
1290		 * from an instruction with writeback to r13 in SVC mode is
1291		 * not allowed. This should not happen as the kstack is
1292		 * always valid.
1293		 */
1294	}
1295
1296	/* Get fault address and status from the CPU */
1297
1298	fault_pc = frame->tf_pc;
1299	fault_instruction = *((volatile unsigned int *)fault_pc);
1300
1301	/* Decode the fault instruction and fix the registers as needed */
1302
1303	/* Was is a swap instruction ? */
1304
1305	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1306		DFC_DISASSEMBLE(fault_pc);
1307	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1308
1309		/* Was is a ldr/str instruction */
1310		/* This is for late abort only */
1311
1312		int base;
1313		int offset;
1314		int *registers = &frame->tf_r0;
1315
1316		DFC_DISASSEMBLE(fault_pc);
1317
1318		/* This is for late abort only */
1319
1320		if ((fault_instruction & (1 << 24)) == 0
1321		    || (fault_instruction & (1 << 21)) != 0) {
1322			/* postindexed ldr/str with no writeback */
1323
1324			base = (fault_instruction >> 16) & 0x0f;
1325			if (base == 13 &&
1326			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1327				return ABORT_FIXUP_FAILED;
1328			if (base == 15)
1329				return ABORT_FIXUP_FAILED;
1330			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1331				       base, registers[base]));
1332			if ((fault_instruction & (1 << 25)) == 0) {
1333				/* Immediate offset - easy */
1334
1335				offset = fault_instruction & 0xfff;
1336				if ((fault_instruction & (1 << 23)))
1337					offset = -offset;
1338				registers[base] += offset;
1339				DFC_PRINTF(("imm=%08x ", offset));
1340			} else {
1341				/* offset is a shifted register */
1342				int shift;
1343
1344				offset = fault_instruction & 0x0f;
1345				if (offset == base)
1346					return ABORT_FIXUP_FAILED;
1347
1348				/*
1349				 * Register offset - hard we have to
1350				 * cope with shifts !
1351				 */
1352				offset = registers[offset];
1353
1354				if ((fault_instruction & (1 << 4)) == 0)
1355					/* shift with amount */
1356					shift = (fault_instruction >> 7) & 0x1f;
1357				else {
1358					/* shift with register */
1359					if ((fault_instruction & (1 << 7)) != 0)
1360						/* undefined for now so bail out */
1361						return ABORT_FIXUP_FAILED;
1362					shift = ((fault_instruction >> 8) & 0xf);
1363					if (base == shift)
1364						return ABORT_FIXUP_FAILED;
1365					DFC_PRINTF(("shift reg=%d ", shift));
1366					shift = registers[shift];
1367				}
1368				DFC_PRINTF(("shift=%08x ", shift));
1369				switch (((fault_instruction >> 5) & 0x3)) {
1370				case 0 : /* Logical left */
1371					offset = (int)(((u_int)offset) << shift);
1372					break;
1373				case 1 : /* Logical Right */
1374					if (shift == 0) shift = 32;
1375					offset = (int)(((u_int)offset) >> shift);
1376					break;
1377				case 2 : /* Arithmetic Right */
1378					if (shift == 0) shift = 32;
1379					offset = (int)(((int)offset) >> shift);
1380					break;
1381				case 3 : /* Rotate right (rol or rxx) */
1382					return ABORT_FIXUP_FAILED;
1383					break;
1384				}
1385
1386				DFC_PRINTF(("abt: fixed LDR/STR with "
1387					       "register offset\n"));
1388				if ((fault_instruction & (1 << 23)))
1389					offset = -offset;
1390				DFC_PRINTF(("offset=%08x ", offset));
1391				registers[base] += offset;
1392			}
1393			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1394		}
1395	}
1396
1397	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1398
1399		/* Ok an abort in SVC mode */
1400
1401		/*
1402		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1403		 * as the fault happened in svc mode but we need it in the
1404		 * usr slot so we can treat the registers as an array of ints
1405		 * during fixing.
1406		 * NOTE: This PC is in the position but writeback is not
1407		 * allowed on r15.
1408		 * Doing it like this is more efficient than trapping this
1409		 * case in all possible locations in the prior fixup code.
1410		 */
1411
1412		frame->tf_svc_lr = frame->tf_usr_lr;
1413		frame->tf_usr_lr = saved_lr;
1414
1415		/*
1416		 * Note the trapframe does not have the SVC r13 so a fault
1417		 * from an instruction with writeback to r13 in SVC mode is
1418		 * not allowed. This should not happen as the kstack is
1419		 * always valid.
1420		 */
1421	}
1422
1423	/*
1424	 * Now let the early-abort fixup routine have a go, in case it
1425	 * was an LDM, STM, LDC or STC that faulted.
1426	 */
1427
1428	return early_abort_fixup(arg);
1429}
1430#endif	/* CPU_ARM7TDMI */
1431
1432/*
1433 * CPU Setup code
1434 */
1435
1436#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1437  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1438  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1439  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1440  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1441
1442#define IGN	0
1443#define OR	1
1444#define BIC	2
1445
1446struct cpu_option {
1447	char	*co_name;
1448	int	co_falseop;
1449	int	co_trueop;
1450	int	co_value;
1451};
1452
1453static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1454
1455static u_int
1456parse_cpu_options(args, optlist, cpuctrl)
1457	char *args;
1458	struct cpu_option *optlist;
1459	u_int cpuctrl;
1460{
1461	int integer;
1462
1463	if (args == NULL)
1464		return(cpuctrl);
1465
1466	while (optlist->co_name) {
1467		if (get_bootconf_option(args, optlist->co_name,
1468		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1469			if (integer) {
1470				if (optlist->co_trueop == OR)
1471					cpuctrl |= optlist->co_value;
1472				else if (optlist->co_trueop == BIC)
1473					cpuctrl &= ~optlist->co_value;
1474			} else {
1475				if (optlist->co_falseop == OR)
1476					cpuctrl |= optlist->co_value;
1477				else if (optlist->co_falseop == BIC)
1478					cpuctrl &= ~optlist->co_value;
1479			}
1480		}
1481		++optlist;
1482	}
1483	return(cpuctrl);
1484}
1485#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1486
1487#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1488struct cpu_option arm678_options[] = {
1489#ifdef COMPAT_12
1490	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1491	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1492#endif	/* COMPAT_12 */
1493	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1494	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1495	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1496	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1497	{ NULL,			IGN, IGN, 0 }
1498};
1499
1500#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1501
1502#ifdef CPU_ARM7TDMI
1503struct cpu_option arm7tdmi_options[] = {
1504	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1505	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1506	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1507	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1508#ifdef COMPAT_12
1509	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1510#endif	/* COMPAT_12 */
1511	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1512	{ NULL,			IGN, IGN, 0 }
1513};
1514
1515void
1516arm7tdmi_setup(args)
1517	char *args;
1518{
1519	int cpuctrl;
1520
1521	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1522		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1523		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1524
1525	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1526	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1527
1528#ifdef __ARMEB__
1529	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1530#endif
1531
1532	/* Clear out the cache */
1533	cpu_idcache_wbinv_all();
1534
1535	/* Set the control register */
1536	ctrl = cpuctrl;
1537	cpu_control(0xffffffff, cpuctrl);
1538}
1539#endif	/* CPU_ARM7TDMI */
1540
1541#ifdef CPU_ARM8
1542struct cpu_option arm8_options[] = {
1543	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1544	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1545	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1546	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1547#ifdef COMPAT_12
1548	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1549#endif	/* COMPAT_12 */
1550	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1551	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1552	{ NULL,			IGN, IGN, 0 }
1553};
1554
1555void
1556arm8_setup(args)
1557	char *args;
1558{
1559	int integer;
1560	int cpuctrl, cpuctrlmask;
1561	int clocktest;
1562	int setclock = 0;
1563
1564	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1565		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1566		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1567	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1568		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1569		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1570		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1571		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1572
1573#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1574	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1575#endif
1576
1577	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1578	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1579
1580#ifdef __ARMEB__
1581	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1582#endif
1583
1584	/* Get clock configuration */
1585	clocktest = arm8_clock_config(0, 0) & 0x0f;
1586
1587	/* Special ARM8 clock and test configuration */
1588	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1589		clocktest = 0;
1590		setclock = 1;
1591	}
1592	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1593		if (integer)
1594			clocktest |= 0x01;
1595		else
1596			clocktest &= ~(0x01);
1597		setclock = 1;
1598	}
1599	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1600		if (integer)
1601			clocktest |= 0x02;
1602		else
1603			clocktest &= ~(0x02);
1604		setclock = 1;
1605	}
1606	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1607		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1608		setclock = 1;
1609	}
1610	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1611		clocktest |= (integer & 7) << 5;
1612		setclock = 1;
1613	}
1614
1615	/* Clear out the cache */
1616	cpu_idcache_wbinv_all();
1617
1618	/* Set the control register */
1619	ctrl = cpuctrl;
1620	cpu_control(0xffffffff, cpuctrl);
1621
1622	/* Set the clock/test register */
1623	if (setclock)
1624		arm8_clock_config(0x7f, clocktest);
1625}
1626#endif	/* CPU_ARM8 */
1627
1628#ifdef CPU_ARM9
1629struct cpu_option arm9_options[] = {
1630	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1631	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1632	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1633	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1634	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1635	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1636	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1637	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1638	{ NULL,			IGN, IGN, 0 }
1639};
1640
1641void
1642arm9_setup(args)
1643	char *args;
1644{
1645	int cpuctrl, cpuctrlmask;
1646
1647	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1648	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1649	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1650	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1651	    CPU_CONTROL_ROUNDROBIN;
1652	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1653		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1654		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1655		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1656		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1657		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1658		 | CPU_CONTROL_ROUNDROBIN;
1659
1660#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1661	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1662#endif
1663
1664	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1665
1666#ifdef __ARMEB__
1667	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1668#endif
1669	if (vector_page == ARM_VECTORS_HIGH)
1670		cpuctrl |= CPU_CONTROL_VECRELOC;
1671
1672	/* Clear out the cache */
1673	cpu_idcache_wbinv_all();
1674
1675	/* Set the control register */
1676	cpu_control(cpuctrlmask, cpuctrl);
1677	ctrl = cpuctrl;
1678
1679}
1680#endif	/* CPU_ARM9 */
1681
1682#ifdef CPU_ARM10
1683struct cpu_option arm10_options[] = {
1684	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1685	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1686	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1687	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1688	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1689	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1690	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1691	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1692	{ NULL,			IGN, IGN, 0 }
1693};
1694
1695void
1696arm10_setup(args)
1697	char *args;
1698{
1699	int cpuctrl, cpuctrlmask;
1700
1701	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1702	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1703	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1704	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1705	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1706	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1707	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1708	    | CPU_CONTROL_BPRD_ENABLE
1709	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1710
1711#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1712	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1713#endif
1714
1715	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1716
1717#ifdef __ARMEB__
1718	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1719#endif
1720
1721	/* Clear out the cache */
1722	cpu_idcache_wbinv_all();
1723
1724	/* Now really make sure they are clean.  */
1725	asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1726
1727	/* Set the control register */
1728	ctrl = cpuctrl;
1729	cpu_control(0xffffffff, cpuctrl);
1730
1731	/* And again. */
1732	cpu_idcache_wbinv_all();
1733}
1734#endif	/* CPU_ARM10 */
1735
1736#ifdef CPU_SA110
1737struct cpu_option sa110_options[] = {
1738#ifdef COMPAT_12
1739	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1740	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1741#endif	/* COMPAT_12 */
1742	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1743	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1744	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1745	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1746	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1747	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1748	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1749	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1750	{ NULL,			IGN, IGN, 0 }
1751};
1752
1753void
1754sa110_setup(args)
1755	char *args;
1756{
1757	int cpuctrl, cpuctrlmask;
1758
1759	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1760		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1761		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1762		 | CPU_CONTROL_WBUF_ENABLE;
1763	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1764		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1765		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1766		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1767		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1768		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1769		 | CPU_CONTROL_CPCLK;
1770
1771#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1772	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1773#endif
1774
1775	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1776
1777#ifdef __ARMEB__
1778	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1779#endif
1780
1781	/* Clear out the cache */
1782	cpu_idcache_wbinv_all();
1783
1784	/* Set the control register */
1785	ctrl = cpuctrl;
1786/*	cpu_control(cpuctrlmask, cpuctrl);*/
1787	cpu_control(0xffffffff, cpuctrl);
1788
1789	/*
1790	 * enable clockswitching, note that this doesn't read or write to r0,
1791	 * r0 is just to make it valid asm
1792	 */
1793	__asm ("mcr 15, 0, r0, c15, c1, 2");
1794}
1795#endif	/* CPU_SA110 */
1796
1797#if defined(CPU_SA1100) || defined(CPU_SA1110)
1798struct cpu_option sa11x0_options[] = {
1799#ifdef COMPAT_12
1800	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1801	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1802#endif	/* COMPAT_12 */
1803	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1804	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1805	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1806	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1807	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1808	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1809	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1810	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1811	{ NULL,			IGN, IGN, 0 }
1812};
1813
1814void
1815sa11x0_setup(args)
1816	char *args;
1817{
1818	int cpuctrl, cpuctrlmask;
1819
1820	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1821		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1822		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1823		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1824	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1825		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1826		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1827		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1828		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1829		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1830		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1831
1832#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1833	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1834#endif
1835
1836
1837	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1838
1839#ifdef __ARMEB__
1840	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1841#endif
1842
1843	if (vector_page == ARM_VECTORS_HIGH)
1844		cpuctrl |= CPU_CONTROL_VECRELOC;
1845	/* Clear out the cache */
1846	cpu_idcache_wbinv_all();
1847	/* Set the control register */
1848	ctrl = cpuctrl;
1849	cpu_control(0xffffffff, cpuctrl);
1850}
1851#endif	/* CPU_SA1100 || CPU_SA1110 */
1852
1853#if defined(CPU_IXP12X0)
1854struct cpu_option ixp12x0_options[] = {
1855	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1856	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1857	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1858	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1859	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1860	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1861	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1862	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1863	{ NULL,			IGN, IGN, 0 }
1864};
1865
1866void
1867ixp12x0_setup(args)
1868	char *args;
1869{
1870	int cpuctrl, cpuctrlmask;
1871
1872
1873	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1874		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1875		 | CPU_CONTROL_IC_ENABLE;
1876
1877	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1878		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1879		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
1880		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
1881		 | CPU_CONTROL_VECRELOC;
1882
1883#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1884	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1885#endif
1886
1887	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
1888
1889#ifdef __ARMEB__
1890	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1891#endif
1892
1893	if (vector_page == ARM_VECTORS_HIGH)
1894		cpuctrl |= CPU_CONTROL_VECRELOC;
1895
1896	/* Clear out the cache */
1897	cpu_idcache_wbinv_all();
1898
1899	/* Set the control register */
1900	ctrl = cpuctrl;
1901	/* cpu_control(0xffffffff, cpuctrl); */
1902	cpu_control(cpuctrlmask, cpuctrl);
1903}
1904#endif /* CPU_IXP12X0 */
1905
1906#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1907  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1908  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1909struct cpu_option xscale_options[] = {
1910#ifdef COMPAT_12
1911	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1912	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1913#endif	/* COMPAT_12 */
1914	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1915	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1916	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1917	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1918	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1919	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1920	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1921	{ NULL,			IGN, IGN, 0 }
1922};
1923
1924void
1925xscale_setup(args)
1926	char *args;
1927{
1928	uint32_t auxctl;
1929	int cpuctrl, cpuctrlmask;
1930
1931	/*
1932	 * The XScale Write Buffer is always enabled.  Our option
1933	 * is to enable/disable coalescing.  Note that bits 6:3
1934	 * must always be enabled.
1935	 */
1936
1937	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1938		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1939		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1940		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1941		 | CPU_CONTROL_BPRD_ENABLE;
1942	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1943		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1944		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1945		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1946		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1947		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1948		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1949		 CPU_CONTROL_L2_ENABLE;
1950
1951#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1952	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1953#endif
1954
1955	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1956
1957#ifdef __ARMEB__
1958	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1959#endif
1960
1961	if (vector_page == ARM_VECTORS_HIGH)
1962		cpuctrl |= CPU_CONTROL_VECRELOC;
1963#ifdef CPU_XSCALE_CORE3
1964	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1965#endif
1966
1967	/* Clear out the cache */
1968	cpu_idcache_wbinv_all();
1969
1970	/*
1971	 * Set the control register.  Note that bits 6:3 must always
1972	 * be set to 1.
1973	 */
1974	ctrl = cpuctrl;
1975/*	cpu_control(cpuctrlmask, cpuctrl);*/
1976	cpu_control(0xffffffff, cpuctrl);
1977
1978	/* Make sure write coalescing is turned on */
1979	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1980		: "=r" (auxctl));
1981#ifdef XSCALE_NO_COALESCE_WRITES
1982	auxctl |= XSCALE_AUXCTL_K;
1983#else
1984	auxctl &= ~XSCALE_AUXCTL_K;
1985#endif
1986#ifdef CPU_XSCALE_CORE3
1987	auxctl |= XSCALE_AUXCTL_LLR;
1988	auxctl |= XSCALE_AUXCTL_MD_MASK;
1989#endif
1990	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1991		: : "r" (auxctl));
1992}
1993#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1994	   CPU_XSCALE_80219 */
1995