cpufunc.c revision 173215
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm7tdmi support code Copyright (c) 2001 John Fremlin
5 * arm8 support code Copyright (c) 1997 ARM Limited
6 * arm8 support code Copyright (c) 1997 Causality Limited
7 * arm9 support code Copyright (C) 2001 ARM Ltd
8 * Copyright (c) 1997 Mark Brinicombe.
9 * Copyright (c) 1997 Causality Limited
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 *    must display the following acknowledgement:
22 *	This product includes software developed by Causality Limited.
23 * 4. The name of Causality Limited may not be used to endorse or promote
24 *    products derived from this software without specific prior written
25 *    permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
28 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
31 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
32 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
33 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * SUCH DAMAGE.
38 *
39 * RiscBSD kernel project
40 *
41 * cpufuncs.c
42 *
43 * C functions for supporting CPU / MMU / TLB specific operations.
44 *
45 * Created      : 30/01/97
46 */
47#include <sys/cdefs.h>
48__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 173215 2007-10-31 07:27:31Z kevlo $");
49
50#include <sys/param.h>
51#include <sys/systm.h>
52#include <sys/lock.h>
53#include <sys/mutex.h>
54#include <sys/bus.h>
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/disassem.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/uma.h>
62
63#include <machine/cpuconf.h>
64#include <machine/cpufunc.h>
65#include <machine/bootconfig.h>
66
67#ifdef CPU_XSCALE_80200
68#include <arm/xscale/i80200/i80200reg.h>
69#include <arm/xscale/i80200/i80200var.h>
70#endif
71
72#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
73#include <arm/xscale/i80321/i80321reg.h>
74#include <arm/xscale/i80321/i80321var.h>
75#endif
76
77#if defined(CPU_XSCALE_81342)
78#include <arm/xscale/i8134x/i81342reg.h>
79#endif
80
81#ifdef CPU_XSCALE_IXP425
82#include <arm/xscale/ixp425/ixp425reg.h>
83#include <arm/xscale/ixp425/ixp425var.h>
84#endif
85
86#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
87    defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
88#include <arm/xscale/xscalereg.h>
89#endif
90
91#if defined(PERFCTRS)
92struct arm_pmc_funcs *arm_pmc;
93#endif
94
95/* PRIMARY CACHE VARIABLES */
96int	arm_picache_size;
97int	arm_picache_line_size;
98int	arm_picache_ways;
99
100int	arm_pdcache_size;	/* and unified */
101int	arm_pdcache_line_size;
102int	arm_pdcache_ways;
103
104int	arm_pcache_type;
105int	arm_pcache_unified;
106
107int	arm_dcache_align;
108int	arm_dcache_align_mask;
109
110/* 1 == use cpu_sleep(), 0 == don't */
111int cpu_do_powersave;
112int ctrl;
113
114#ifdef CPU_ARM7TDMI
115struct cpu_functions arm7tdmi_cpufuncs = {
116	/* CPU functions */
117
118	cpufunc_id,			/* id			*/
119	cpufunc_nullop,			/* cpwait		*/
120
121	/* MMU functions */
122
123	cpufunc_control,		/* control		*/
124	cpufunc_domains,		/* domain		*/
125	arm7tdmi_setttb,		/* setttb		*/
126	cpufunc_faultstatus,		/* faultstatus		*/
127	cpufunc_faultaddress,		/* faultaddress		*/
128
129	/* TLB functions */
130
131	arm7tdmi_tlb_flushID,		/* tlb_flushID		*/
132	arm7tdmi_tlb_flushID_SE,	/* tlb_flushID_SE	*/
133	arm7tdmi_tlb_flushID,		/* tlb_flushI		*/
134	arm7tdmi_tlb_flushID_SE,	/* tlb_flushI_SE	*/
135	arm7tdmi_tlb_flushID,		/* tlb_flushD		*/
136	arm7tdmi_tlb_flushID_SE,	/* tlb_flushD_SE	*/
137
138	/* Cache operations */
139
140	cpufunc_nullop,			/* icache_sync_all	*/
141	(void *)cpufunc_nullop,		/* icache_sync_range	*/
142
143	arm7tdmi_cache_flushID,		/* dcache_wbinv_all	*/
144	(void *)arm7tdmi_cache_flushID,	/* dcache_wbinv_range	*/
145	(void *)arm7tdmi_cache_flushID,	/* dcache_inv_range	*/
146	(void *)cpufunc_nullop,		/* dcache_wb_range	*/
147
148	arm7tdmi_cache_flushID,		/* idcache_wbinv_all	*/
149	(void *)arm7tdmi_cache_flushID,	/* idcache_wbinv_range	*/
150	cpufunc_nullop,			/* l2cache_wbinv_all	*/
151	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
152	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
153	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
154
155	/* Other functions */
156
157	cpufunc_nullop,			/* flush_prefetchbuf	*/
158	cpufunc_nullop,			/* drain_writebuf	*/
159	cpufunc_nullop,			/* flush_brnchtgt_C	*/
160	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
161
162	(void *)cpufunc_nullop,		/* sleep		*/
163
164	/* Soft functions */
165
166	late_abort_fixup,		/* dataabt_fixup	*/
167	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
168
169	arm7tdmi_context_switch,	/* context_switch	*/
170
171	arm7tdmi_setup			/* cpu setup		*/
172
173};
174#endif	/* CPU_ARM7TDMI */
175
176#ifdef CPU_ARM8
177struct cpu_functions arm8_cpufuncs = {
178	/* CPU functions */
179
180	cpufunc_id,			/* id			*/
181	cpufunc_nullop,			/* cpwait		*/
182
183	/* MMU functions */
184
185	cpufunc_control,		/* control		*/
186	cpufunc_domains,		/* domain		*/
187	arm8_setttb,			/* setttb		*/
188	cpufunc_faultstatus,		/* faultstatus		*/
189	cpufunc_faultaddress,		/* faultaddress		*/
190
191	/* TLB functions */
192
193	arm8_tlb_flushID,		/* tlb_flushID		*/
194	arm8_tlb_flushID_SE,		/* tlb_flushID_SE	*/
195	arm8_tlb_flushID,		/* tlb_flushI		*/
196	arm8_tlb_flushID_SE,		/* tlb_flushI_SE	*/
197	arm8_tlb_flushID,		/* tlb_flushD		*/
198	arm8_tlb_flushID_SE,		/* tlb_flushD_SE	*/
199
200	/* Cache operations */
201
202	cpufunc_nullop,			/* icache_sync_all	*/
203	(void *)cpufunc_nullop,		/* icache_sync_range	*/
204
205	arm8_cache_purgeID,		/* dcache_wbinv_all	*/
206	(void *)arm8_cache_purgeID,	/* dcache_wbinv_range	*/
207/*XXX*/	(void *)arm8_cache_purgeID,	/* dcache_inv_range	*/
208	(void *)arm8_cache_cleanID,	/* dcache_wb_range	*/
209
210	arm8_cache_purgeID,		/* idcache_wbinv_all	*/
211	(void *)arm8_cache_purgeID,	/* idcache_wbinv_range	*/
212	cpufunc_nullop,			/* l2cache_wbinv_all	*/
213	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
214	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
215	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
216
217	/* Other functions */
218
219	cpufunc_nullop,			/* flush_prefetchbuf	*/
220	cpufunc_nullop,			/* drain_writebuf	*/
221	cpufunc_nullop,			/* flush_brnchtgt_C	*/
222	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
223
224	(void *)cpufunc_nullop,		/* sleep		*/
225
226	/* Soft functions */
227
228	cpufunc_null_fixup,		/* dataabt_fixup	*/
229	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
230
231	arm8_context_switch,		/* context_switch	*/
232
233	arm8_setup			/* cpu setup		*/
234};
235#endif	/* CPU_ARM8 */
236
237#ifdef CPU_ARM9
238struct cpu_functions arm9_cpufuncs = {
239	/* CPU functions */
240
241	cpufunc_id,			/* id			*/
242	cpufunc_nullop,			/* cpwait		*/
243
244	/* MMU functions */
245
246	cpufunc_control,		/* control		*/
247	cpufunc_domains,		/* Domain		*/
248	arm9_setttb,			/* Setttb		*/
249	cpufunc_faultstatus,		/* Faultstatus		*/
250	cpufunc_faultaddress,		/* Faultaddress		*/
251
252	/* TLB functions */
253
254	armv4_tlb_flushID,		/* tlb_flushID		*/
255	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
256	armv4_tlb_flushI,		/* tlb_flushI		*/
257	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
258	armv4_tlb_flushD,		/* tlb_flushD		*/
259	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
260
261	/* Cache operations */
262
263	arm9_icache_sync_all,		/* icache_sync_all	*/
264	arm9_icache_sync_range,		/* icache_sync_range	*/
265
266	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
267	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
268/*XXX*/	arm9_dcache_wbinv_range,	/* dcache_inv_range	*/
269	arm9_dcache_wb_range,		/* dcache_wb_range	*/
270
271	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
272	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
273	cpufunc_nullop,			/* l2cache_wbinv_all	*/
274	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
275	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
276	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
277
278	/* Other functions */
279
280	cpufunc_nullop,			/* flush_prefetchbuf	*/
281	armv4_drain_writebuf,		/* drain_writebuf	*/
282	cpufunc_nullop,			/* flush_brnchtgt_C	*/
283	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
284
285	(void *)cpufunc_nullop,		/* sleep		*/
286
287	/* Soft functions */
288
289	cpufunc_null_fixup,		/* dataabt_fixup	*/
290	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
291
292	arm9_context_switch,		/* context_switch	*/
293
294	arm9_setup			/* cpu setup		*/
295
296};
297#endif /* CPU_ARM9 */
298
299#if defined(CPU_ARM9E) || defined(CPU_ARM10)
300struct cpu_functions armv5_ec_cpufuncs = {
301	/* CPU functions */
302
303	cpufunc_id,			/* id			*/
304	cpufunc_nullop,			/* cpwait		*/
305
306	/* MMU functions */
307
308	cpufunc_control,		/* control		*/
309	cpufunc_domains,		/* Domain		*/
310	armv5_ec_setttb,		/* Setttb		*/
311	cpufunc_faultstatus,		/* Faultstatus		*/
312	cpufunc_faultaddress,		/* Faultaddress		*/
313
314	/* TLB functions */
315
316	armv4_tlb_flushID,		/* tlb_flushID		*/
317	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
318	armv4_tlb_flushI,		/* tlb_flushI		*/
319	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
320	armv4_tlb_flushD,		/* tlb_flushD		*/
321	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
322
323	/* Cache operations */
324
325	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
326	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
327
328	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
329	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
330/*XXX*/	armv5_ec_dcache_wbinv_range,	/* dcache_inv_range	*/
331	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
332
333	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
334	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
335
336	/* Other functions */
337
338	cpufunc_nullop,			/* flush_prefetchbuf	*/
339	armv4_drain_writebuf,		/* drain_writebuf	*/
340	cpufunc_nullop,			/* flush_brnchtgt_C	*/
341	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
342
343	(void *)cpufunc_nullop,		/* sleep		*/
344
345	/* Soft functions */
346
347	cpufunc_null_fixup,		/* dataabt_fixup	*/
348	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
349
350	arm10_context_switch,		/* context_switch	*/
351
352	arm10_setup			/* cpu setup		*/
353
354};
355#endif /* CPU_ARM9E || CPU_ARM10 */
356
357#ifdef CPU_ARM10
358struct cpu_functions arm10_cpufuncs = {
359	/* CPU functions */
360
361	cpufunc_id,			/* id			*/
362	cpufunc_nullop,			/* cpwait		*/
363
364	/* MMU functions */
365
366	cpufunc_control,		/* control		*/
367	cpufunc_domains,		/* Domain		*/
368	arm10_setttb,			/* Setttb		*/
369	cpufunc_faultstatus,		/* Faultstatus		*/
370	cpufunc_faultaddress,		/* Faultaddress		*/
371
372	/* TLB functions */
373
374	armv4_tlb_flushID,		/* tlb_flushID		*/
375	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
376	armv4_tlb_flushI,		/* tlb_flushI		*/
377	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
378	armv4_tlb_flushD,		/* tlb_flushD		*/
379	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
380
381	/* Cache operations */
382
383	arm10_icache_sync_all,		/* icache_sync_all	*/
384	arm10_icache_sync_range,	/* icache_sync_range	*/
385
386	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
387	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
388	arm10_dcache_inv_range,		/* dcache_inv_range	*/
389	arm10_dcache_wb_range,		/* dcache_wb_range	*/
390
391	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
392	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
393	cpufunc_nullop,			/* l2cache_wbinv_all	*/
394	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
395	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
396	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
397
398	/* Other functions */
399
400	cpufunc_nullop,			/* flush_prefetchbuf	*/
401	armv4_drain_writebuf,		/* drain_writebuf	*/
402	cpufunc_nullop,			/* flush_brnchtgt_C	*/
403	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
404
405	(void *)cpufunc_nullop,		/* sleep		*/
406
407	/* Soft functions */
408
409	cpufunc_null_fixup,		/* dataabt_fixup	*/
410	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
411
412	arm10_context_switch,		/* context_switch	*/
413
414	arm10_setup			/* cpu setup		*/
415
416};
417#endif /* CPU_ARM10 */
418
419#ifdef CPU_SA110
420struct cpu_functions sa110_cpufuncs = {
421	/* CPU functions */
422
423	cpufunc_id,			/* id			*/
424	cpufunc_nullop,			/* cpwait		*/
425
426	/* MMU functions */
427
428	cpufunc_control,		/* control		*/
429	cpufunc_domains,		/* domain		*/
430	sa1_setttb,			/* setttb		*/
431	cpufunc_faultstatus,		/* faultstatus		*/
432	cpufunc_faultaddress,		/* faultaddress		*/
433
434	/* TLB functions */
435
436	armv4_tlb_flushID,		/* tlb_flushID		*/
437	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
438	armv4_tlb_flushI,		/* tlb_flushI		*/
439	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
440	armv4_tlb_flushD,		/* tlb_flushD		*/
441	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
442
443	/* Cache operations */
444
445	sa1_cache_syncI,		/* icache_sync_all	*/
446	sa1_cache_syncI_rng,		/* icache_sync_range	*/
447
448	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
449	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
450/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
451	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
452
453	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
454	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
455	cpufunc_nullop,			/* l2cache_wbinv_all	*/
456	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
457	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
458	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
459
460	/* Other functions */
461
462	cpufunc_nullop,			/* flush_prefetchbuf	*/
463	armv4_drain_writebuf,		/* drain_writebuf	*/
464	cpufunc_nullop,			/* flush_brnchtgt_C	*/
465	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
466
467	(void *)cpufunc_nullop,		/* sleep		*/
468
469	/* Soft functions */
470
471	cpufunc_null_fixup,		/* dataabt_fixup	*/
472	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
473
474	sa110_context_switch,		/* context_switch	*/
475
476	sa110_setup			/* cpu setup		*/
477};
478#endif	/* CPU_SA110 */
479
480#if defined(CPU_SA1100) || defined(CPU_SA1110)
481struct cpu_functions sa11x0_cpufuncs = {
482	/* CPU functions */
483
484	cpufunc_id,			/* id			*/
485	cpufunc_nullop,			/* cpwait		*/
486
487	/* MMU functions */
488
489	cpufunc_control,		/* control		*/
490	cpufunc_domains,		/* domain		*/
491	sa1_setttb,			/* setttb		*/
492	cpufunc_faultstatus,		/* faultstatus		*/
493	cpufunc_faultaddress,		/* faultaddress		*/
494
495	/* TLB functions */
496
497	armv4_tlb_flushID,		/* tlb_flushID		*/
498	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
499	armv4_tlb_flushI,		/* tlb_flushI		*/
500	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
501	armv4_tlb_flushD,		/* tlb_flushD		*/
502	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
503
504	/* Cache operations */
505
506	sa1_cache_syncI,		/* icache_sync_all	*/
507	sa1_cache_syncI_rng,		/* icache_sync_range	*/
508
509	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
510	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
511/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
512	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
513
514	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
515	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
516	cpufunc_nullop,			/* l2cache_wbinv_all	*/
517	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
518	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
519	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
520
521	/* Other functions */
522
523	sa11x0_drain_readbuf,		/* flush_prefetchbuf	*/
524	armv4_drain_writebuf,		/* drain_writebuf	*/
525	cpufunc_nullop,			/* flush_brnchtgt_C	*/
526	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
527
528	sa11x0_cpu_sleep,		/* sleep		*/
529
530	/* Soft functions */
531
532	cpufunc_null_fixup,		/* dataabt_fixup	*/
533	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
534
535	sa11x0_context_switch,		/* context_switch	*/
536
537	sa11x0_setup			/* cpu setup		*/
538};
539#endif	/* CPU_SA1100 || CPU_SA1110 */
540
541#ifdef CPU_IXP12X0
542struct cpu_functions ixp12x0_cpufuncs = {
543	/* CPU functions */
544
545	cpufunc_id,			/* id			*/
546	cpufunc_nullop,			/* cpwait		*/
547
548	/* MMU functions */
549
550	cpufunc_control,		/* control		*/
551	cpufunc_domains,		/* domain		*/
552	sa1_setttb,			/* setttb		*/
553	cpufunc_faultstatus,		/* faultstatus		*/
554	cpufunc_faultaddress,		/* faultaddress		*/
555
556	/* TLB functions */
557
558	armv4_tlb_flushID,		/* tlb_flushID		*/
559	sa1_tlb_flushID_SE,		/* tlb_flushID_SE	*/
560	armv4_tlb_flushI,		/* tlb_flushI		*/
561	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
562	armv4_tlb_flushD,		/* tlb_flushD		*/
563	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
564
565	/* Cache operations */
566
567	sa1_cache_syncI,		/* icache_sync_all	*/
568	sa1_cache_syncI_rng,		/* icache_sync_range	*/
569
570	sa1_cache_purgeD,		/* dcache_wbinv_all	*/
571	sa1_cache_purgeD_rng,		/* dcache_wbinv_range	*/
572/*XXX*/	sa1_cache_purgeD_rng,		/* dcache_inv_range	*/
573	sa1_cache_cleanD_rng,		/* dcache_wb_range	*/
574
575	sa1_cache_purgeID,		/* idcache_wbinv_all	*/
576	sa1_cache_purgeID_rng,		/* idcache_wbinv_range	*/
577	cpufunc_nullop,			/* l2cache_wbinv_all	*/
578	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
579	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
580	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
581
582	/* Other functions */
583
584	ixp12x0_drain_readbuf,			/* flush_prefetchbuf	*/
585	armv4_drain_writebuf,		/* drain_writebuf	*/
586	cpufunc_nullop,			/* flush_brnchtgt_C	*/
587	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
588
589	(void *)cpufunc_nullop,		/* sleep		*/
590
591	/* Soft functions */
592
593	cpufunc_null_fixup,		/* dataabt_fixup	*/
594	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
595
596	ixp12x0_context_switch,		/* context_switch	*/
597
598	ixp12x0_setup			/* cpu setup		*/
599};
600#endif	/* CPU_IXP12X0 */
601
602#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
603  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
604  defined(CPU_XSCALE_80219)
605
606struct cpu_functions xscale_cpufuncs = {
607	/* CPU functions */
608
609	cpufunc_id,			/* id			*/
610	xscale_cpwait,			/* cpwait		*/
611
612	/* MMU functions */
613
614	xscale_control,			/* control		*/
615	cpufunc_domains,		/* domain		*/
616	xscale_setttb,			/* setttb		*/
617	cpufunc_faultstatus,		/* faultstatus		*/
618	cpufunc_faultaddress,		/* faultaddress		*/
619
620	/* TLB functions */
621
622	armv4_tlb_flushID,		/* tlb_flushID		*/
623	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
624	armv4_tlb_flushI,		/* tlb_flushI		*/
625	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
626	armv4_tlb_flushD,		/* tlb_flushD		*/
627	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
628
629	/* Cache operations */
630
631	xscale_cache_syncI,		/* icache_sync_all	*/
632	xscale_cache_syncI_rng,		/* icache_sync_range	*/
633
634	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
635	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
636	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
637	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
638
639	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
640	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
641	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
642	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
643	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
644	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
645
646	/* Other functions */
647
648	cpufunc_nullop,			/* flush_prefetchbuf	*/
649	armv4_drain_writebuf,		/* drain_writebuf	*/
650	cpufunc_nullop,			/* flush_brnchtgt_C	*/
651	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
652
653	xscale_cpu_sleep,		/* sleep		*/
654
655	/* Soft functions */
656
657	cpufunc_null_fixup,		/* dataabt_fixup	*/
658	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
659
660	xscale_context_switch,		/* context_switch	*/
661
662	xscale_setup			/* cpu setup		*/
663};
664#endif
665/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
666   CPU_XSCALE_80219 */
667
668#ifdef CPU_XSCALE_81342
669struct cpu_functions xscalec3_cpufuncs = {
670	/* CPU functions */
671
672	cpufunc_id,			/* id			*/
673	xscale_cpwait,			/* cpwait		*/
674
675	/* MMU functions */
676
677	xscale_control,			/* control		*/
678	cpufunc_domains,		/* domain		*/
679	xscalec3_setttb,		/* setttb		*/
680	cpufunc_faultstatus,		/* faultstatus		*/
681	cpufunc_faultaddress,		/* faultaddress		*/
682
683	/* TLB functions */
684
685	armv4_tlb_flushID,		/* tlb_flushID		*/
686	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
687	armv4_tlb_flushI,		/* tlb_flushI		*/
688	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
689	armv4_tlb_flushD,		/* tlb_flushD		*/
690	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
691
692	/* Cache operations */
693
694	xscalec3_cache_syncI,		/* icache_sync_all	*/
695	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
696
697	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
698	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
699	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
700	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
701
702	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
703	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
704	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
705	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
706	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
707	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
708
709	/* Other functions */
710
711	cpufunc_nullop,			/* flush_prefetchbuf	*/
712	armv4_drain_writebuf,		/* drain_writebuf	*/
713	cpufunc_nullop,			/* flush_brnchtgt_C	*/
714	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
715
716	xscale_cpu_sleep,		/* sleep		*/
717
718	/* Soft functions */
719
720	cpufunc_null_fixup,		/* dataabt_fixup	*/
721	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
722
723	xscalec3_context_switch,	/* context_switch	*/
724
725	xscale_setup			/* cpu setup		*/
726};
727#endif /* CPU_XSCALE_81342 */
728/*
729 * Global constants also used by locore.s
730 */
731
732struct cpu_functions cpufuncs;
733u_int cputype;
734u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
735
736#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \
737  defined (CPU_ARM9E) || defined (CPU_ARM10) ||			       \
738  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||	       \
739  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||	       \
740  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
741
742static void get_cachetype_cp15(void);
743
744/* Additional cache information local to this file.  Log2 of some of the
745   above numbers.  */
746static int	arm_dcache_l2_nsets;
747static int	arm_dcache_l2_assoc;
748static int	arm_dcache_l2_linesize;
749
750static void
751get_cachetype_cp15()
752{
753	u_int ctype, isize, dsize;
754	u_int multiplier;
755
756	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
757		: "=r" (ctype));
758
759	/*
760	 * ...and thus spake the ARM ARM:
761	 *
762	 * If an <opcode2> value corresponding to an unimplemented or
763	 * reserved ID register is encountered, the System Control
764	 * processor returns the value of the main ID register.
765	 */
766	if (ctype == cpufunc_id())
767		goto out;
768
769	if ((ctype & CPU_CT_S) == 0)
770		arm_pcache_unified = 1;
771
772	/*
773	 * If you want to know how this code works, go read the ARM ARM.
774	 */
775
776	arm_pcache_type = CPU_CT_CTYPE(ctype);
777
778	if (arm_pcache_unified == 0) {
779		isize = CPU_CT_ISIZE(ctype);
780		multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
781		arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
782		if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
783			if (isize & CPU_CT_xSIZE_M)
784				arm_picache_line_size = 0; /* not present */
785			else
786				arm_picache_ways = 1;
787		} else {
788			arm_picache_ways = multiplier <<
789			    (CPU_CT_xSIZE_ASSOC(isize) - 1);
790		}
791		arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
792	}
793
794	dsize = CPU_CT_DSIZE(ctype);
795	multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
796	arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
797	if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
798		if (dsize & CPU_CT_xSIZE_M)
799			arm_pdcache_line_size = 0; /* not present */
800		else
801			arm_pdcache_ways = 1;
802	} else {
803		arm_pdcache_ways = multiplier <<
804		    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
805	}
806	arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
807
808	arm_dcache_align = arm_pdcache_line_size;
809
810	arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
811	arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
812	arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
813	    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
814
815 out:
816	arm_dcache_align_mask = arm_dcache_align - 1;
817}
818#endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */
819
820#if defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \
821    defined(CPU_IXP12X0)
822/* Cache information for CPUs without cache type registers. */
823struct cachetab {
824	u_int32_t ct_cpuid;
825	int	ct_pcache_type;
826	int	ct_pcache_unified;
827	int	ct_pdcache_size;
828	int	ct_pdcache_line_size;
829	int	ct_pdcache_ways;
830	int	ct_picache_size;
831	int	ct_picache_line_size;
832	int	ct_picache_ways;
833};
834
835struct cachetab cachetab[] = {
836    /* cpuid,           cache type,       u,  dsiz, ls, wy,  isiz, ls, wy */
837    /* XXX is this type right for SA-1? */
838    { CPU_ID_SA110,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 },
839    { CPU_ID_SA1100,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
840    { CPU_ID_SA1110,	CPU_CT_CTYPE_WB1, 0,  8192, 32, 32, 16384, 32, 32 },
841    { CPU_ID_IXP1200,	CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */
842    { 0, 0, 0, 0, 0, 0, 0, 0}
843};
844
845static void get_cachetype_table(void);
846
847static void
848get_cachetype_table()
849{
850	int i;
851	u_int32_t cpuid = cpufunc_id();
852
853	for (i = 0; cachetab[i].ct_cpuid != 0; i++) {
854		if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) {
855			arm_pcache_type = cachetab[i].ct_pcache_type;
856			arm_pcache_unified = cachetab[i].ct_pcache_unified;
857			arm_pdcache_size = cachetab[i].ct_pdcache_size;
858			arm_pdcache_line_size =
859			    cachetab[i].ct_pdcache_line_size;
860			arm_pdcache_ways = cachetab[i].ct_pdcache_ways;
861			arm_picache_size = cachetab[i].ct_picache_size;
862			arm_picache_line_size =
863			    cachetab[i].ct_picache_line_size;
864			arm_picache_ways = cachetab[i].ct_picache_ways;
865		}
866	}
867	arm_dcache_align = arm_pdcache_line_size;
868
869	arm_dcache_align_mask = arm_dcache_align - 1;
870}
871
872#endif /* SA110 || SA1100 || SA1111 || IXP12X0 */
873
874/*
875 * Cannot panic here as we may not have a console yet ...
876 */
877
878int
879set_cpufuncs()
880{
881	cputype = cpufunc_id();
882	cputype &= CPU_ID_CPU_MASK;
883
884	/*
885	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
886	 * CPU type where we want to use it by default, then we set it.
887	 */
888
889#ifdef CPU_ARM7TDMI
890	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
891	    CPU_ID_IS7(cputype) &&
892	    (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) {
893		cpufuncs = arm7tdmi_cpufuncs;
894		cpu_reset_needs_v4_MMU_disable = 0;
895		get_cachetype_cp15();
896		pmap_pte_init_generic();
897		goto out;
898	}
899#endif
900#ifdef CPU_ARM8
901	if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD &&
902	    (cputype & 0x0000f000) == 0x00008000) {
903		cpufuncs = arm8_cpufuncs;
904		cpu_reset_needs_v4_MMU_disable = 0;	/* XXX correct? */
905		get_cachetype_cp15();
906		pmap_pte_init_arm8();
907		goto out;
908	}
909#endif	/* CPU_ARM8 */
910#ifdef CPU_ARM9
911	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
912	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
913	    (cputype & 0x0000f000) == 0x00009000) {
914		cpufuncs = arm9_cpufuncs;
915		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
916		get_cachetype_cp15();
917		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
918		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
919		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
920		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
921		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
922#ifdef ARM9_CACHE_WRITE_THROUGH
923		pmap_pte_init_arm9();
924#else
925		pmap_pte_init_generic();
926#endif
927		goto out;
928	}
929#endif /* CPU_ARM9 */
930#if defined(CPU_ARM9E) || defined(CPU_ARM10)
931	if (cputype == CPU_ID_ARM926EJS ||
932	    cputype == CPU_ID_ARM1026EJS) {
933		cpufuncs = armv5_ec_cpufuncs;
934		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
935		get_cachetype_cp15();
936		pmap_pte_init_generic();
937		return 0;
938	}
939#endif /* CPU_ARM9E || CPU_ARM10 */
940#ifdef CPU_ARM10
941	if (/* cputype == CPU_ID_ARM1020T || */
942	    cputype == CPU_ID_ARM1020E) {
943		/*
944		 * Select write-through cacheing (this isn't really an
945		 * option on ARM1020T).
946		 */
947		cpufuncs = arm10_cpufuncs;
948		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
949		get_cachetype_cp15();
950		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
951		arm10_dcache_sets_max =
952		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
953		    arm10_dcache_sets_inc;
954		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
955		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
956		pmap_pte_init_generic();
957		goto out;
958	}
959#endif /* CPU_ARM10 */
960#ifdef CPU_SA110
961	if (cputype == CPU_ID_SA110) {
962		cpufuncs = sa110_cpufuncs;
963		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it */
964		get_cachetype_table();
965		pmap_pte_init_sa1();
966		goto out;
967	}
968#endif	/* CPU_SA110 */
969#ifdef CPU_SA1100
970	if (cputype == CPU_ID_SA1100) {
971		cpufuncs = sa11x0_cpufuncs;
972		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
973		get_cachetype_table();
974		pmap_pte_init_sa1();
975		/* Use powersave on this CPU. */
976		cpu_do_powersave = 1;
977
978		goto out;
979	}
980#endif	/* CPU_SA1100 */
981#ifdef CPU_SA1110
982	if (cputype == CPU_ID_SA1110) {
983		cpufuncs = sa11x0_cpufuncs;
984		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
985		get_cachetype_table();
986		pmap_pte_init_sa1();
987		/* Use powersave on this CPU. */
988		cpu_do_powersave = 1;
989
990		goto out;
991	}
992#endif	/* CPU_SA1110 */
993#ifdef CPU_IXP12X0
994        if (cputype == CPU_ID_IXP1200) {
995                cpufuncs = ixp12x0_cpufuncs;
996                cpu_reset_needs_v4_MMU_disable = 1;
997                get_cachetype_table();
998                pmap_pte_init_sa1();
999		goto out;
1000        }
1001#endif  /* CPU_IXP12X0 */
1002#ifdef CPU_XSCALE_80200
1003	if (cputype == CPU_ID_80200) {
1004		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1005
1006		i80200_icu_init();
1007
1008		/*
1009		 * Reset the Performance Monitoring Unit to a
1010		 * pristine state:
1011		 *	- CCNT, PMN0, PMN1 reset to 0
1012		 *	- overflow indications cleared
1013		 *	- all counters disabled
1014		 */
1015		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1016			:
1017			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1018			       PMNC_CC_IF));
1019
1020#if defined(XSCALE_CCLKCFG)
1021		/*
1022		 * Crank CCLKCFG to maximum legal value.
1023		 */
1024		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1025			:
1026			: "r" (XSCALE_CCLKCFG));
1027#endif
1028
1029		/*
1030		 * XXX Disable ECC in the Bus Controller Unit; we
1031		 * don't really support it, yet.  Clear any pending
1032		 * error indications.
1033		 */
1034		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1035			:
1036			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1037
1038		cpufuncs = xscale_cpufuncs;
1039#if defined(PERFCTRS)
1040		xscale_pmu_init();
1041#endif
1042
1043		/*
1044		 * i80200 errata: Step-A0 and A1 have a bug where
1045		 * D$ dirty bits are not cleared on "invalidate by
1046		 * address".
1047		 *
1048		 * Workaround: Clean cache line before invalidating.
1049		 */
1050		if (rev == 0 || rev == 1)
1051			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1052
1053		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1054		get_cachetype_cp15();
1055		pmap_pte_init_xscale();
1056		goto out;
1057	}
1058#endif /* CPU_XSCALE_80200 */
1059#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1060	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1061	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1062	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1063		/*
1064		 * Reset the Performance Monitoring Unit to a
1065		 * pristine state:
1066		 *	- CCNT, PMN0, PMN1 reset to 0
1067		 *	- overflow indications cleared
1068		 *	- all counters disabled
1069		 */
1070		__asm __volatile("mcr p14, 0, %0, c0, c0, 0"
1071			:
1072			: "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF|
1073			       PMNC_CC_IF));
1074
1075		cpufuncs = xscale_cpufuncs;
1076#if defined(PERFCTRS)
1077		xscale_pmu_init();
1078#endif
1079
1080		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1081		get_cachetype_cp15();
1082		pmap_pte_init_xscale();
1083		goto out;
1084	}
1085#endif /* CPU_XSCALE_80321 */
1086
1087#if defined(CPU_XSCALE_81342)
1088	if (cputype == CPU_ID_81342) {
1089		cpufuncs = xscalec3_cpufuncs;
1090#if defined(PERFCTRS)
1091		xscale_pmu_init();
1092#endif
1093
1094		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1095		get_cachetype_cp15();
1096		pmap_pte_init_xscale();
1097		goto out;
1098	}
1099#endif /* CPU_XSCALE_81342 */
1100#ifdef CPU_XSCALE_PXA2X0
1101	/* ignore core revision to test PXA2xx CPUs */
1102	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1103	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1104
1105		cpufuncs = xscale_cpufuncs;
1106#if defined(PERFCTRS)
1107		xscale_pmu_init();
1108#endif
1109
1110		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1111		get_cachetype_cp15();
1112		pmap_pte_init_xscale();
1113
1114		/* Use powersave on this CPU. */
1115		cpu_do_powersave = 1;
1116
1117		goto out;
1118	}
1119#endif /* CPU_XSCALE_PXA2X0 */
1120#ifdef CPU_XSCALE_IXP425
1121	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1122            cputype == CPU_ID_IXP425_266) {
1123
1124		cpufuncs = xscale_cpufuncs;
1125#if defined(PERFCTRS)
1126		xscale_pmu_init();
1127#endif
1128
1129		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1130		get_cachetype_cp15();
1131		pmap_pte_init_xscale();
1132
1133		goto out;
1134	}
1135#endif /* CPU_XSCALE_IXP425 */
1136	/*
1137	 * Bzzzz. And the answer was ...
1138	 */
1139	panic("No support for this CPU type (%08x) in kernel", cputype);
1140	return(ARCHITECTURE_NOT_PRESENT);
1141out:
1142	uma_set_align(arm_dcache_align_mask);
1143	return (0);
1144}
1145
1146/*
1147 * Fixup routines for data and prefetch aborts.
1148 *
1149 * Several compile time symbols are used
1150 *
1151 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1152 * correction of registers after a fault.
1153 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts
1154 * when defined should use late aborts
1155 */
1156
1157
1158/*
1159 * Null abort fixup routine.
1160 * For use when no fixup is required.
1161 */
1162int
1163cpufunc_null_fixup(arg)
1164	void *arg;
1165{
1166	return(ABORT_FIXUP_OK);
1167}
1168
1169
1170#if defined(CPU_ARM7TDMI)
1171
1172#ifdef DEBUG_FAULT_CORRECTION
1173#define DFC_PRINTF(x)		printf x
1174#define DFC_DISASSEMBLE(x)	disassemble(x)
1175#else
1176#define DFC_PRINTF(x)		/* nothing */
1177#define DFC_DISASSEMBLE(x)	/* nothing */
1178#endif
1179
1180/*
1181 * "Early" data abort fixup.
1182 *
1183 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode).  Also used
1184 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI].
1185 *
1186 * In early aborts, we may have to fix up LDM, STM, LDC and STC.
1187 */
1188int
1189early_abort_fixup(arg)
1190	void *arg;
1191{
1192	trapframe_t *frame = arg;
1193	u_int fault_pc;
1194	u_int fault_instruction;
1195	int saved_lr = 0;
1196
1197	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1198
1199		/* Ok an abort in SVC mode */
1200
1201		/*
1202		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1203		 * as the fault happened in svc mode but we need it in the
1204		 * usr slot so we can treat the registers as an array of ints
1205		 * during fixing.
1206		 * NOTE: This PC is in the position but writeback is not
1207		 * allowed on r15.
1208		 * Doing it like this is more efficient than trapping this
1209		 * case in all possible locations in the following fixup code.
1210		 */
1211
1212		saved_lr = frame->tf_usr_lr;
1213		frame->tf_usr_lr = frame->tf_svc_lr;
1214
1215		/*
1216		 * Note the trapframe does not have the SVC r13 so a fault
1217		 * from an instruction with writeback to r13 in SVC mode is
1218		 * not allowed. This should not happen as the kstack is
1219		 * always valid.
1220		 */
1221	}
1222
1223	/* Get fault address and status from the CPU */
1224
1225	fault_pc = frame->tf_pc;
1226	fault_instruction = *((volatile unsigned int *)fault_pc);
1227
1228	/* Decode the fault instruction and fix the registers as needed */
1229
1230	if ((fault_instruction & 0x0e000000) == 0x08000000) {
1231		int base;
1232		int loop;
1233		int count;
1234		int *registers = &frame->tf_r0;
1235
1236		DFC_PRINTF(("LDM/STM\n"));
1237		DFC_DISASSEMBLE(fault_pc);
1238		if (fault_instruction & (1 << 21)) {
1239			DFC_PRINTF(("This instruction must be corrected\n"));
1240			base = (fault_instruction >> 16) & 0x0f;
1241			if (base == 15)
1242				return ABORT_FIXUP_FAILED;
1243			/* Count registers transferred */
1244			count = 0;
1245			for (loop = 0; loop < 16; ++loop) {
1246				if (fault_instruction & (1<<loop))
1247					++count;
1248			}
1249			DFC_PRINTF(("%d registers used\n", count));
1250			DFC_PRINTF(("Corrected r%d by %d bytes ",
1251				       base, count * 4));
1252			if (fault_instruction & (1 << 23)) {
1253				DFC_PRINTF(("down\n"));
1254				registers[base] -= count * 4;
1255			} else {
1256				DFC_PRINTF(("up\n"));
1257				registers[base] += count * 4;
1258			}
1259		}
1260	} else if ((fault_instruction & 0x0e000000) == 0x0c000000) {
1261		int base;
1262		int offset;
1263		int *registers = &frame->tf_r0;
1264
1265		/* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */
1266
1267		DFC_DISASSEMBLE(fault_pc);
1268
1269		/* Only need to fix registers if write back is turned on */
1270
1271		if ((fault_instruction & (1 << 21)) != 0) {
1272			base = (fault_instruction >> 16) & 0x0f;
1273			if (base == 13 &&
1274			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1275				return ABORT_FIXUP_FAILED;
1276			if (base == 15)
1277				return ABORT_FIXUP_FAILED;
1278
1279			offset = (fault_instruction & 0xff) << 2;
1280			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1281			if ((fault_instruction & (1 << 23)) != 0)
1282				offset = -offset;
1283			registers[base] += offset;
1284			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1285		}
1286	} else if ((fault_instruction & 0x0e000000) == 0x0c000000)
1287		return ABORT_FIXUP_FAILED;
1288
1289	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1290
1291		/* Ok an abort in SVC mode */
1292
1293		/*
1294		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1295		 * as the fault happened in svc mode but we need it in the
1296		 * usr slot so we can treat the registers as an array of ints
1297		 * during fixing.
1298		 * NOTE: This PC is in the position but writeback is not
1299		 * allowed on r15.
1300		 * Doing it like this is more efficient than trapping this
1301		 * case in all possible locations in the prior fixup code.
1302		 */
1303
1304		frame->tf_svc_lr = frame->tf_usr_lr;
1305		frame->tf_usr_lr = saved_lr;
1306
1307		/*
1308		 * Note the trapframe does not have the SVC r13 so a fault
1309		 * from an instruction with writeback to r13 in SVC mode is
1310		 * not allowed. This should not happen as the kstack is
1311		 * always valid.
1312		 */
1313	}
1314
1315	return(ABORT_FIXUP_OK);
1316}
1317#endif	/* CPU_ARM2/250/3/6/7 */
1318
1319
1320#if defined(CPU_ARM7TDMI)
1321/*
1322 * "Late" (base updated) data abort fixup
1323 *
1324 * For ARM6 (in late-abort mode) and ARM7.
1325 *
1326 * In this model, all data-transfer instructions need fixing up.  We defer
1327 * LDM, STM, LDC and STC fixup to the early-abort handler.
1328 */
1329int
1330late_abort_fixup(arg)
1331	void *arg;
1332{
1333	trapframe_t *frame = arg;
1334	u_int fault_pc;
1335	u_int fault_instruction;
1336	int saved_lr = 0;
1337
1338	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1339
1340		/* Ok an abort in SVC mode */
1341
1342		/*
1343		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1344		 * as the fault happened in svc mode but we need it in the
1345		 * usr slot so we can treat the registers as an array of ints
1346		 * during fixing.
1347		 * NOTE: This PC is in the position but writeback is not
1348		 * allowed on r15.
1349		 * Doing it like this is more efficient than trapping this
1350		 * case in all possible locations in the following fixup code.
1351		 */
1352
1353		saved_lr = frame->tf_usr_lr;
1354		frame->tf_usr_lr = frame->tf_svc_lr;
1355
1356		/*
1357		 * Note the trapframe does not have the SVC r13 so a fault
1358		 * from an instruction with writeback to r13 in SVC mode is
1359		 * not allowed. This should not happen as the kstack is
1360		 * always valid.
1361		 */
1362	}
1363
1364	/* Get fault address and status from the CPU */
1365
1366	fault_pc = frame->tf_pc;
1367	fault_instruction = *((volatile unsigned int *)fault_pc);
1368
1369	/* Decode the fault instruction and fix the registers as needed */
1370
1371	/* Was is a swap instruction ? */
1372
1373	if ((fault_instruction & 0x0fb00ff0) == 0x01000090) {
1374		DFC_DISASSEMBLE(fault_pc);
1375	} else if ((fault_instruction & 0x0c000000) == 0x04000000) {
1376
1377		/* Was is a ldr/str instruction */
1378		/* This is for late abort only */
1379
1380		int base;
1381		int offset;
1382		int *registers = &frame->tf_r0;
1383
1384		DFC_DISASSEMBLE(fault_pc);
1385
1386		/* This is for late abort only */
1387
1388		if ((fault_instruction & (1 << 24)) == 0
1389		    || (fault_instruction & (1 << 21)) != 0) {
1390			/* postindexed ldr/str with no writeback */
1391
1392			base = (fault_instruction >> 16) & 0x0f;
1393			if (base == 13 &&
1394			    (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE)
1395				return ABORT_FIXUP_FAILED;
1396			if (base == 15)
1397				return ABORT_FIXUP_FAILED;
1398			DFC_PRINTF(("late abt fix: r%d=%08x : ",
1399				       base, registers[base]));
1400			if ((fault_instruction & (1 << 25)) == 0) {
1401				/* Immediate offset - easy */
1402
1403				offset = fault_instruction & 0xfff;
1404				if ((fault_instruction & (1 << 23)))
1405					offset = -offset;
1406				registers[base] += offset;
1407				DFC_PRINTF(("imm=%08x ", offset));
1408			} else {
1409				/* offset is a shifted register */
1410				int shift;
1411
1412				offset = fault_instruction & 0x0f;
1413				if (offset == base)
1414					return ABORT_FIXUP_FAILED;
1415
1416				/*
1417				 * Register offset - hard we have to
1418				 * cope with shifts !
1419				 */
1420				offset = registers[offset];
1421
1422				if ((fault_instruction & (1 << 4)) == 0)
1423					/* shift with amount */
1424					shift = (fault_instruction >> 7) & 0x1f;
1425				else {
1426					/* shift with register */
1427					if ((fault_instruction & (1 << 7)) != 0)
1428						/* undefined for now so bail out */
1429						return ABORT_FIXUP_FAILED;
1430					shift = ((fault_instruction >> 8) & 0xf);
1431					if (base == shift)
1432						return ABORT_FIXUP_FAILED;
1433					DFC_PRINTF(("shift reg=%d ", shift));
1434					shift = registers[shift];
1435				}
1436				DFC_PRINTF(("shift=%08x ", shift));
1437				switch (((fault_instruction >> 5) & 0x3)) {
1438				case 0 : /* Logical left */
1439					offset = (int)(((u_int)offset) << shift);
1440					break;
1441				case 1 : /* Logical Right */
1442					if (shift == 0) shift = 32;
1443					offset = (int)(((u_int)offset) >> shift);
1444					break;
1445				case 2 : /* Arithmetic Right */
1446					if (shift == 0) shift = 32;
1447					offset = (int)(((int)offset) >> shift);
1448					break;
1449				case 3 : /* Rotate right (rol or rxx) */
1450					return ABORT_FIXUP_FAILED;
1451					break;
1452				}
1453
1454				DFC_PRINTF(("abt: fixed LDR/STR with "
1455					       "register offset\n"));
1456				if ((fault_instruction & (1 << 23)))
1457					offset = -offset;
1458				DFC_PRINTF(("offset=%08x ", offset));
1459				registers[base] += offset;
1460			}
1461			DFC_PRINTF(("r%d=%08x\n", base, registers[base]));
1462		}
1463	}
1464
1465	if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) {
1466
1467		/* Ok an abort in SVC mode */
1468
1469		/*
1470		 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage
1471		 * as the fault happened in svc mode but we need it in the
1472		 * usr slot so we can treat the registers as an array of ints
1473		 * during fixing.
1474		 * NOTE: This PC is in the position but writeback is not
1475		 * allowed on r15.
1476		 * Doing it like this is more efficient than trapping this
1477		 * case in all possible locations in the prior fixup code.
1478		 */
1479
1480		frame->tf_svc_lr = frame->tf_usr_lr;
1481		frame->tf_usr_lr = saved_lr;
1482
1483		/*
1484		 * Note the trapframe does not have the SVC r13 so a fault
1485		 * from an instruction with writeback to r13 in SVC mode is
1486		 * not allowed. This should not happen as the kstack is
1487		 * always valid.
1488		 */
1489	}
1490
1491	/*
1492	 * Now let the early-abort fixup routine have a go, in case it
1493	 * was an LDM, STM, LDC or STC that faulted.
1494	 */
1495
1496	return early_abort_fixup(arg);
1497}
1498#endif	/* CPU_ARM7TDMI */
1499
1500/*
1501 * CPU Setup code
1502 */
1503
1504#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined (CPU_ARM9) || \
1505  defined(CPU_ARM9E) || \
1506  defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) ||	\
1507  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1508  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1509  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1510  defined(CPU_ARM10) ||  defined(CPU_ARM11)
1511
1512#define IGN	0
1513#define OR	1
1514#define BIC	2
1515
1516struct cpu_option {
1517	char	*co_name;
1518	int	co_falseop;
1519	int	co_trueop;
1520	int	co_value;
1521};
1522
1523static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1524
1525static u_int
1526parse_cpu_options(args, optlist, cpuctrl)
1527	char *args;
1528	struct cpu_option *optlist;
1529	u_int cpuctrl;
1530{
1531	int integer;
1532
1533	if (args == NULL)
1534		return(cpuctrl);
1535
1536	while (optlist->co_name) {
1537		if (get_bootconf_option(args, optlist->co_name,
1538		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1539			if (integer) {
1540				if (optlist->co_trueop == OR)
1541					cpuctrl |= optlist->co_value;
1542				else if (optlist->co_trueop == BIC)
1543					cpuctrl &= ~optlist->co_value;
1544			} else {
1545				if (optlist->co_falseop == OR)
1546					cpuctrl |= optlist->co_value;
1547				else if (optlist->co_falseop == BIC)
1548					cpuctrl &= ~optlist->co_value;
1549			}
1550		}
1551		++optlist;
1552	}
1553	return(cpuctrl);
1554}
1555#endif /* CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 || XSCALE*/
1556
1557#if defined(CPU_ARM7TDMI) || defined(CPU_ARM8)
1558struct cpu_option arm678_options[] = {
1559#ifdef COMPAT_12
1560	{ "nocache",		IGN, BIC, CPU_CONTROL_IDC_ENABLE },
1561	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1562#endif	/* COMPAT_12 */
1563	{ "cpu.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1564	{ "cpu.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1565	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1566	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1567	{ NULL,			IGN, IGN, 0 }
1568};
1569
1570#endif	/* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */
1571
1572#ifdef CPU_ARM7TDMI
1573struct cpu_option arm7tdmi_options[] = {
1574	{ "arm7.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1575	{ "arm7.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1576	{ "arm7.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1577	{ "arm7.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1578#ifdef COMPAT_12
1579	{ "fpaclk2",		BIC, OR,  CPU_CONTROL_CPCLK },
1580#endif	/* COMPAT_12 */
1581	{ "arm700.fpaclk",	BIC, OR,  CPU_CONTROL_CPCLK },
1582	{ NULL,			IGN, IGN, 0 }
1583};
1584
1585void
1586arm7tdmi_setup(args)
1587	char *args;
1588{
1589	int cpuctrl;
1590
1591	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1592		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1593		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1594
1595	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1596	cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl);
1597
1598#ifdef __ARMEB__
1599	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1600#endif
1601
1602	/* Clear out the cache */
1603	cpu_idcache_wbinv_all();
1604
1605	/* Set the control register */
1606	ctrl = cpuctrl;
1607	cpu_control(0xffffffff, cpuctrl);
1608}
1609#endif	/* CPU_ARM7TDMI */
1610
1611#ifdef CPU_ARM8
1612struct cpu_option arm8_options[] = {
1613	{ "arm8.cache",		BIC, OR,  CPU_CONTROL_IDC_ENABLE },
1614	{ "arm8.nocache",	OR,  BIC, CPU_CONTROL_IDC_ENABLE },
1615	{ "arm8.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1616	{ "arm8.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1617#ifdef COMPAT_12
1618	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1619#endif	/* COMPAT_12 */
1620	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1621	{ "arm8.branchpredict",	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1622	{ NULL,			IGN, IGN, 0 }
1623};
1624
1625void
1626arm8_setup(args)
1627	char *args;
1628{
1629	int integer;
1630	int cpuctrl, cpuctrlmask;
1631	int clocktest;
1632	int setclock = 0;
1633
1634	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1635		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1636		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE;
1637	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1638		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1639		 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1640		 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE
1641		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE;
1642
1643#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1644	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1645#endif
1646
1647	cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl);
1648	cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl);
1649
1650#ifdef __ARMEB__
1651	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1652#endif
1653
1654	/* Get clock configuration */
1655	clocktest = arm8_clock_config(0, 0) & 0x0f;
1656
1657	/* Special ARM8 clock and test configuration */
1658	if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1659		clocktest = 0;
1660		setclock = 1;
1661	}
1662	if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1663		if (integer)
1664			clocktest |= 0x01;
1665		else
1666			clocktest &= ~(0x01);
1667		setclock = 1;
1668	}
1669	if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) {
1670		if (integer)
1671			clocktest |= 0x02;
1672		else
1673			clocktest &= ~(0x02);
1674		setclock = 1;
1675	}
1676	if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) {
1677		clocktest = (clocktest & ~0xc0) | (integer & 3) << 2;
1678		setclock = 1;
1679	}
1680	if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) {
1681		clocktest |= (integer & 7) << 5;
1682		setclock = 1;
1683	}
1684
1685	/* Clear out the cache */
1686	cpu_idcache_wbinv_all();
1687
1688	/* Set the control register */
1689	ctrl = cpuctrl;
1690	cpu_control(0xffffffff, cpuctrl);
1691
1692	/* Set the clock/test register */
1693	if (setclock)
1694		arm8_clock_config(0x7f, clocktest);
1695}
1696#endif	/* CPU_ARM8 */
1697
1698#ifdef CPU_ARM9
1699struct cpu_option arm9_options[] = {
1700	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1701	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1702	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1703	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1704	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1705	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1706	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1707	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1708	{ NULL,			IGN, IGN, 0 }
1709};
1710
1711void
1712arm9_setup(args)
1713	char *args;
1714{
1715	int cpuctrl, cpuctrlmask;
1716
1717	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1718	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1719	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1720	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1721	    CPU_CONTROL_ROUNDROBIN;
1722	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1723		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1724		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1725		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1726		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1727		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1728		 | CPU_CONTROL_ROUNDROBIN;
1729
1730#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1731	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1732#endif
1733
1734	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1735
1736#ifdef __ARMEB__
1737	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1738#endif
1739	if (vector_page == ARM_VECTORS_HIGH)
1740		cpuctrl |= CPU_CONTROL_VECRELOC;
1741
1742	/* Clear out the cache */
1743	cpu_idcache_wbinv_all();
1744
1745	/* Set the control register */
1746	cpu_control(cpuctrlmask, cpuctrl);
1747	ctrl = cpuctrl;
1748
1749}
1750#endif	/* CPU_ARM9 */
1751
1752#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1753struct cpu_option arm10_options[] = {
1754	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1755	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1756	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1757	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1758	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1759	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1760	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1761	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1762	{ NULL,			IGN, IGN, 0 }
1763};
1764
1765void
1766arm10_setup(args)
1767	char *args;
1768{
1769	int cpuctrl, cpuctrlmask;
1770
1771	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1772	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1773	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1774	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1775	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1776	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1777	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1778	    | CPU_CONTROL_BPRD_ENABLE
1779	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1780
1781#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1782	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1783#endif
1784
1785	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1786
1787#ifdef __ARMEB__
1788	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1789#endif
1790
1791	/* Clear out the cache */
1792	cpu_idcache_wbinv_all();
1793
1794	/* Now really make sure they are clean.  */
1795	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1796
1797	/* Set the control register */
1798	ctrl = cpuctrl;
1799	cpu_control(0xffffffff, cpuctrl);
1800
1801	/* And again. */
1802	cpu_idcache_wbinv_all();
1803}
1804#endif	/* CPU_ARM9E || CPU_ARM10 */
1805
1806#ifdef CPU_ARM11
1807struct cpu_option arm11_options[] = {
1808	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1809	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1810	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1811	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1812	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1813	{ NULL,			IGN, IGN, 0 }
1814};
1815
1816void
1817arm11_setup(args)
1818	char *args;
1819{
1820	int cpuctrl, cpuctrlmask;
1821
1822	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1823	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1824	    /* | CPU_CONTROL_BPRD_ENABLE */;
1825	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1826	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1827	    | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE
1828	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1829	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1830
1831#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1832	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1833#endif
1834
1835	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1836
1837#ifdef __ARMEB__
1838	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1839#endif
1840
1841	/* Clear out the cache */
1842	cpu_idcache_wbinv_all();
1843
1844	/* Now really make sure they are clean.  */
1845	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1846
1847	/* Set the control register */
1848	curcpu()->ci_ctrl = cpuctrl;
1849	cpu_control(0xffffffff, cpuctrl);
1850
1851	/* And again. */
1852	cpu_idcache_wbinv_all();
1853}
1854#endif	/* CPU_ARM11 */
1855
1856#ifdef CPU_SA110
1857struct cpu_option sa110_options[] = {
1858#ifdef COMPAT_12
1859	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1860	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1861#endif	/* COMPAT_12 */
1862	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1863	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1864	{ "sa110.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1865	{ "sa110.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1866	{ "sa110.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1867	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1868	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1869	{ "sa110.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1870	{ NULL,			IGN, IGN, 0 }
1871};
1872
1873void
1874sa110_setup(args)
1875	char *args;
1876{
1877	int cpuctrl, cpuctrlmask;
1878
1879	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1880		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1881		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1882		 | CPU_CONTROL_WBUF_ENABLE;
1883	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1884		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1885		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1886		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1887		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1888		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1889		 | CPU_CONTROL_CPCLK;
1890
1891#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1892	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1893#endif
1894
1895	cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl);
1896
1897#ifdef __ARMEB__
1898	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1899#endif
1900
1901	/* Clear out the cache */
1902	cpu_idcache_wbinv_all();
1903
1904	/* Set the control register */
1905	ctrl = cpuctrl;
1906/*	cpu_control(cpuctrlmask, cpuctrl);*/
1907	cpu_control(0xffffffff, cpuctrl);
1908
1909	/*
1910	 * enable clockswitching, note that this doesn't read or write to r0,
1911	 * r0 is just to make it valid asm
1912	 */
1913	__asm ("mcr 15, 0, r0, c15, c1, 2");
1914}
1915#endif	/* CPU_SA110 */
1916
1917#if defined(CPU_SA1100) || defined(CPU_SA1110)
1918struct cpu_option sa11x0_options[] = {
1919#ifdef COMPAT_12
1920	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1921	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1922#endif	/* COMPAT_12 */
1923	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1924	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1925	{ "sa11x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1926	{ "sa11x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1927	{ "sa11x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1928	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1929	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1930	{ "sa11x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1931	{ NULL,			IGN, IGN, 0 }
1932};
1933
1934void
1935sa11x0_setup(args)
1936	char *args;
1937{
1938	int cpuctrl, cpuctrlmask;
1939
1940	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1941		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1942		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1943		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE;
1944	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1945		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1946		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1947		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1948		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1949		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1950		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1951
1952#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1953	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1954#endif
1955
1956
1957	cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl);
1958
1959#ifdef __ARMEB__
1960	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1961#endif
1962
1963	if (vector_page == ARM_VECTORS_HIGH)
1964		cpuctrl |= CPU_CONTROL_VECRELOC;
1965	/* Clear out the cache */
1966	cpu_idcache_wbinv_all();
1967	/* Set the control register */
1968	ctrl = cpuctrl;
1969	cpu_control(0xffffffff, cpuctrl);
1970}
1971#endif	/* CPU_SA1100 || CPU_SA1110 */
1972
1973#if defined(CPU_IXP12X0)
1974struct cpu_option ixp12x0_options[] = {
1975	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1976	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1977	{ "ixp12x0.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1978	{ "ixp12x0.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1979	{ "ixp12x0.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1980	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1981	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1982	{ "ixp12x0.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1983	{ NULL,			IGN, IGN, 0 }
1984};
1985
1986void
1987ixp12x0_setup(args)
1988	char *args;
1989{
1990	int cpuctrl, cpuctrlmask;
1991
1992
1993	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE
1994		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE
1995		 | CPU_CONTROL_IC_ENABLE;
1996
1997	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE
1998		 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE
1999		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE
2000		 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE
2001		 | CPU_CONTROL_VECRELOC;
2002
2003#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2004	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2005#endif
2006
2007	cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl);
2008
2009#ifdef __ARMEB__
2010	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2011#endif
2012
2013	if (vector_page == ARM_VECTORS_HIGH)
2014		cpuctrl |= CPU_CONTROL_VECRELOC;
2015
2016	/* Clear out the cache */
2017	cpu_idcache_wbinv_all();
2018
2019	/* Set the control register */
2020	ctrl = cpuctrl;
2021	/* cpu_control(0xffffffff, cpuctrl); */
2022	cpu_control(cpuctrlmask, cpuctrl);
2023}
2024#endif /* CPU_IXP12X0 */
2025
2026#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
2027  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
2028  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
2029struct cpu_option xscale_options[] = {
2030#ifdef COMPAT_12
2031	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2032	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2033#endif	/* COMPAT_12 */
2034	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2035	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2036	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2037	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
2038	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
2039	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
2040	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
2041	{ NULL,			IGN, IGN, 0 }
2042};
2043
2044void
2045xscale_setup(args)
2046	char *args;
2047{
2048	uint32_t auxctl;
2049	int cpuctrl, cpuctrlmask;
2050
2051	/*
2052	 * The XScale Write Buffer is always enabled.  Our option
2053	 * is to enable/disable coalescing.  Note that bits 6:3
2054	 * must always be enabled.
2055	 */
2056
2057	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2058		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2059		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2060		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
2061		 | CPU_CONTROL_BPRD_ENABLE;
2062	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
2063		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
2064		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
2065		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
2066		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
2067		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
2068		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
2069		 CPU_CONTROL_L2_ENABLE;
2070
2071#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
2072	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
2073#endif
2074
2075	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
2076
2077#ifdef __ARMEB__
2078	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
2079#endif
2080
2081	if (vector_page == ARM_VECTORS_HIGH)
2082		cpuctrl |= CPU_CONTROL_VECRELOC;
2083#ifdef CPU_XSCALE_CORE3
2084	cpuctrl |= CPU_CONTROL_L2_ENABLE;
2085#endif
2086
2087	/* Clear out the cache */
2088	cpu_idcache_wbinv_all();
2089
2090	/*
2091	 * Set the control register.  Note that bits 6:3 must always
2092	 * be set to 1.
2093	 */
2094	ctrl = cpuctrl;
2095/*	cpu_control(cpuctrlmask, cpuctrl);*/
2096	cpu_control(0xffffffff, cpuctrl);
2097
2098	/* Make sure write coalescing is turned on */
2099	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
2100		: "=r" (auxctl));
2101#ifdef XSCALE_NO_COALESCE_WRITES
2102	auxctl |= XSCALE_AUXCTL_K;
2103#else
2104	auxctl &= ~XSCALE_AUXCTL_K;
2105#endif
2106#ifdef CPU_XSCALE_CORE3
2107	auxctl |= XSCALE_AUXCTL_LLR;
2108	auxctl |= XSCALE_AUXCTL_MD_MASK;
2109#endif
2110	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
2111		: : "r" (auxctl));
2112}
2113#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
2114	   CPU_XSCALE_80219 */
2115