cpufunc.c revision 280823
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 280823 2015-03-29 20:37:28Z andrew $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62
63#ifdef CPU_XSCALE_80200
64#include <arm/xscale/i80200/i80200reg.h>
65#include <arm/xscale/i80200/i80200var.h>
66#endif
67
68#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
69#include <arm/xscale/i80321/i80321reg.h>
70#include <arm/xscale/i80321/i80321var.h>
71#endif
72
73/*
74 * Some definitions in i81342reg.h clash with i80321reg.h.
75 * This only happens for the LINT kernel. As it happens,
76 * we don't need anything from i81342reg.h that we already
77 * got from somewhere else during a LINT compile.
78 */
79#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
80#include <arm/xscale/i8134x/i81342reg.h>
81#endif
82
83#ifdef CPU_XSCALE_IXP425
84#include <arm/xscale/ixp425/ixp425reg.h>
85#include <arm/xscale/ixp425/ixp425var.h>
86#endif
87
88/* PRIMARY CACHE VARIABLES */
89int	arm_picache_size;
90int	arm_picache_line_size;
91int	arm_picache_ways;
92
93int	arm_pdcache_size;	/* and unified */
94int	arm_pdcache_line_size;
95int	arm_pdcache_ways;
96
97int	arm_pcache_type;
98int	arm_pcache_unified;
99
100int	arm_dcache_align;
101int	arm_dcache_align_mask;
102
103u_int	arm_cache_level;
104u_int	arm_cache_type[14];
105u_int	arm_cache_loc;
106
107/* 1 == use cpu_sleep(), 0 == don't */
108int cpu_do_powersave;
109int ctrl;
110
111#ifdef CPU_ARM9
112struct cpu_functions arm9_cpufuncs = {
113	/* CPU functions */
114
115	cpufunc_id,			/* id			*/
116	cpufunc_nullop,			/* cpwait		*/
117
118	/* MMU functions */
119
120	cpufunc_control,		/* control		*/
121	cpufunc_domains,		/* Domain		*/
122	arm9_setttb,			/* Setttb		*/
123	cpufunc_faultstatus,		/* Faultstatus		*/
124	cpufunc_faultaddress,		/* Faultaddress		*/
125
126	/* TLB functions */
127
128	armv4_tlb_flushID,		/* tlb_flushID		*/
129	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
130	armv4_tlb_flushI,		/* tlb_flushI		*/
131	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
132	armv4_tlb_flushD,		/* tlb_flushD		*/
133	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
134
135	/* Cache operations */
136
137	arm9_icache_sync_all,		/* icache_sync_all	*/
138	arm9_icache_sync_range,		/* icache_sync_range	*/
139
140	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
141	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
142	arm9_dcache_inv_range,		/* dcache_inv_range	*/
143	arm9_dcache_wb_range,		/* dcache_wb_range	*/
144
145	armv4_idcache_inv_all,		/* idcache_inv_all	*/
146	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
147	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
148	cpufunc_nullop,			/* l2cache_wbinv_all	*/
149	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
150	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
152	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
153
154	/* Other functions */
155
156	cpufunc_nullop,			/* flush_prefetchbuf	*/
157	armv4_drain_writebuf,		/* drain_writebuf	*/
158	cpufunc_nullop,			/* flush_brnchtgt_C	*/
159	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
160
161	(void *)cpufunc_nullop,		/* sleep		*/
162
163	/* Soft functions */
164
165	cpufunc_null_fixup,		/* dataabt_fixup	*/
166	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
167
168	arm9_context_switch,		/* context_switch	*/
169
170	arm9_setup			/* cpu setup		*/
171
172};
173#endif /* CPU_ARM9 */
174
175#if defined(CPU_ARM9E)
176struct cpu_functions armv5_ec_cpufuncs = {
177	/* CPU functions */
178
179	cpufunc_id,			/* id			*/
180	cpufunc_nullop,			/* cpwait		*/
181
182	/* MMU functions */
183
184	cpufunc_control,		/* control		*/
185	cpufunc_domains,		/* Domain		*/
186	armv5_ec_setttb,		/* Setttb		*/
187	cpufunc_faultstatus,		/* Faultstatus		*/
188	cpufunc_faultaddress,		/* Faultaddress		*/
189
190	/* TLB functions */
191
192	armv4_tlb_flushID,		/* tlb_flushID		*/
193	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
194	armv4_tlb_flushI,		/* tlb_flushI		*/
195	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
196	armv4_tlb_flushD,		/* tlb_flushD		*/
197	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
198
199	/* Cache operations */
200
201	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
202	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
203
204	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
205	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
206	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
207	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
208
209	armv4_idcache_inv_all,		/* idcache_inv_all	*/
210	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
211	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
212
213	cpufunc_nullop,                 /* l2cache_wbinv_all    */
214	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
215      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
216	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
217	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
218
219	/* Other functions */
220
221	cpufunc_nullop,			/* flush_prefetchbuf	*/
222	armv4_drain_writebuf,		/* drain_writebuf	*/
223	cpufunc_nullop,			/* flush_brnchtgt_C	*/
224	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
225
226	(void *)cpufunc_nullop,		/* sleep		*/
227
228	/* Soft functions */
229
230	cpufunc_null_fixup,		/* dataabt_fixup	*/
231	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
232
233	arm10_context_switch,		/* context_switch	*/
234
235	arm10_setup			/* cpu setup		*/
236
237};
238
239struct cpu_functions sheeva_cpufuncs = {
240	/* CPU functions */
241
242	cpufunc_id,			/* id			*/
243	cpufunc_nullop,			/* cpwait		*/
244
245	/* MMU functions */
246
247	cpufunc_control,		/* control		*/
248	cpufunc_domains,		/* Domain		*/
249	sheeva_setttb,			/* Setttb		*/
250	cpufunc_faultstatus,		/* Faultstatus		*/
251	cpufunc_faultaddress,		/* Faultaddress		*/
252
253	/* TLB functions */
254
255	armv4_tlb_flushID,		/* tlb_flushID		*/
256	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
257	armv4_tlb_flushI,		/* tlb_flushI		*/
258	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
259	armv4_tlb_flushD,		/* tlb_flushD		*/
260	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
261
262	/* Cache operations */
263
264	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
265	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
266
267	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
268	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
269	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
270	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
271
272	armv4_idcache_inv_all,		/* idcache_inv_all	*/
273	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
274	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
275
276	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
277	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
278	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
279	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
280	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
281
282	/* Other functions */
283
284	cpufunc_nullop,			/* flush_prefetchbuf	*/
285	armv4_drain_writebuf,		/* drain_writebuf	*/
286	cpufunc_nullop,			/* flush_brnchtgt_C	*/
287	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
288
289	sheeva_cpu_sleep,		/* sleep		*/
290
291	/* Soft functions */
292
293	cpufunc_null_fixup,		/* dataabt_fixup	*/
294	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
295
296	arm10_context_switch,		/* context_switch	*/
297
298	arm10_setup			/* cpu setup		*/
299};
300#endif /* CPU_ARM9E */
301
302#ifdef CPU_MV_PJ4B
303struct cpu_functions pj4bv7_cpufuncs = {
304	/* CPU functions */
305
306	cpufunc_id,			/* id			*/
307	armv7_drain_writebuf,		/* cpwait		*/
308
309	/* MMU functions */
310
311	cpufunc_control,		/* control		*/
312	cpufunc_domains,		/* Domain		*/
313	armv7_setttb,			/* Setttb		*/
314	cpufunc_faultstatus,		/* Faultstatus		*/
315	cpufunc_faultaddress,		/* Faultaddress		*/
316
317	/* TLB functions */
318
319	armv7_tlb_flushID,		/* tlb_flushID		*/
320	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
321	armv7_tlb_flushID,		/* tlb_flushI		*/
322	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
323	armv7_tlb_flushID,		/* tlb_flushD		*/
324	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
325
326	/* Cache operations */
327	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
328	armv7_icache_sync_range,	/* icache_sync_range	*/
329
330	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
331	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
332	armv7_dcache_inv_range,		/* dcache_inv_range	*/
333	armv7_dcache_wb_range,		/* dcache_wb_range	*/
334
335	armv7_idcache_inv_all,		/* idcache_inv_all	*/
336	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
337	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
338
339	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
340	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
341	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
342	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
343	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
344
345	/* Other functions */
346
347	cpufunc_nullop,			/* flush_prefetchbuf	*/
348	armv7_drain_writebuf,		/* drain_writebuf	*/
349	cpufunc_nullop,			/* flush_brnchtgt_C	*/
350	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
351
352	(void *)cpufunc_nullop,		/* sleep		*/
353
354	/* Soft functions */
355
356	cpufunc_null_fixup,		/* dataabt_fixup	*/
357	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
358
359	armv7_context_switch,		/* context_switch	*/
360
361	pj4bv7_setup			/* cpu setup		*/
362};
363#endif /* CPU_MV_PJ4B */
364
365#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
366  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
367  defined(CPU_XSCALE_80219)
368
369struct cpu_functions xscale_cpufuncs = {
370	/* CPU functions */
371
372	cpufunc_id,			/* id			*/
373	xscale_cpwait,			/* cpwait		*/
374
375	/* MMU functions */
376
377	xscale_control,			/* control		*/
378	cpufunc_domains,		/* domain		*/
379	xscale_setttb,			/* setttb		*/
380	cpufunc_faultstatus,		/* faultstatus		*/
381	cpufunc_faultaddress,		/* faultaddress		*/
382
383	/* TLB functions */
384
385	armv4_tlb_flushID,		/* tlb_flushID		*/
386	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
387	armv4_tlb_flushI,		/* tlb_flushI		*/
388	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
389	armv4_tlb_flushD,		/* tlb_flushD		*/
390	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
391
392	/* Cache operations */
393
394	xscale_cache_syncI,		/* icache_sync_all	*/
395	xscale_cache_syncI_rng,		/* icache_sync_range	*/
396
397	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
398	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
399	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
400	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
401
402	xscale_cache_flushID,		/* idcache_inv_all	*/
403	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
404	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
405	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
406	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
407	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
408	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
409	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
410
411	/* Other functions */
412
413	cpufunc_nullop,			/* flush_prefetchbuf	*/
414	armv4_drain_writebuf,		/* drain_writebuf	*/
415	cpufunc_nullop,			/* flush_brnchtgt_C	*/
416	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
417
418	xscale_cpu_sleep,		/* sleep		*/
419
420	/* Soft functions */
421
422	cpufunc_null_fixup,		/* dataabt_fixup	*/
423	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
424
425	xscale_context_switch,		/* context_switch	*/
426
427	xscale_setup			/* cpu setup		*/
428};
429#endif
430/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
431   CPU_XSCALE_80219 */
432
433#ifdef CPU_XSCALE_81342
434struct cpu_functions xscalec3_cpufuncs = {
435	/* CPU functions */
436
437	cpufunc_id,			/* id			*/
438	xscale_cpwait,			/* cpwait		*/
439
440	/* MMU functions */
441
442	xscale_control,			/* control		*/
443	cpufunc_domains,		/* domain		*/
444	xscalec3_setttb,		/* setttb		*/
445	cpufunc_faultstatus,		/* faultstatus		*/
446	cpufunc_faultaddress,		/* faultaddress		*/
447
448	/* TLB functions */
449
450	armv4_tlb_flushID,		/* tlb_flushID		*/
451	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
452	armv4_tlb_flushI,		/* tlb_flushI		*/
453	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
454	armv4_tlb_flushD,		/* tlb_flushD		*/
455	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
456
457	/* Cache operations */
458
459	xscalec3_cache_syncI,		/* icache_sync_all	*/
460	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
461
462	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
463	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
464	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
465	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
466
467	xscale_cache_flushID,		/* idcache_inv_all	*/
468	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
469	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
470	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
471	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
472	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
473	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
474	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
475
476	/* Other functions */
477
478	cpufunc_nullop,			/* flush_prefetchbuf	*/
479	armv4_drain_writebuf,		/* drain_writebuf	*/
480	cpufunc_nullop,			/* flush_brnchtgt_C	*/
481	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
482
483	xscale_cpu_sleep,		/* sleep		*/
484
485	/* Soft functions */
486
487	cpufunc_null_fixup,		/* dataabt_fixup	*/
488	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
489
490	xscalec3_context_switch,	/* context_switch	*/
491
492	xscale_setup			/* cpu setup		*/
493};
494#endif /* CPU_XSCALE_81342 */
495
496
497#if defined(CPU_FA526) || defined(CPU_FA626TE)
498struct cpu_functions fa526_cpufuncs = {
499	/* CPU functions */
500
501	cpufunc_id,			/* id			*/
502	cpufunc_nullop,			/* cpwait		*/
503
504	/* MMU functions */
505
506	cpufunc_control,		/* control		*/
507	cpufunc_domains,		/* domain		*/
508	fa526_setttb,			/* setttb		*/
509	cpufunc_faultstatus,		/* faultstatus		*/
510	cpufunc_faultaddress,		/* faultaddress		*/
511
512	/* TLB functions */
513
514	armv4_tlb_flushID,		/* tlb_flushID		*/
515	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
516	armv4_tlb_flushI,		/* tlb_flushI		*/
517	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
518	armv4_tlb_flushD,		/* tlb_flushD		*/
519	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
520
521	/* Cache operations */
522
523	fa526_icache_sync_all,		/* icache_sync_all	*/
524	fa526_icache_sync_range,	/* icache_sync_range	*/
525
526	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
527	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
528	fa526_dcache_inv_range,		/* dcache_inv_range	*/
529	fa526_dcache_wb_range,		/* dcache_wb_range	*/
530
531	armv4_idcache_inv_all,		/* idcache_inv_all	*/
532	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
533	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
534	cpufunc_nullop,			/* l2cache_wbinv_all	*/
535	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
536	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
537	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
538	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
539
540	/* Other functions */
541
542	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
543	armv4_drain_writebuf,		/* drain_writebuf	*/
544	cpufunc_nullop,			/* flush_brnchtgt_C	*/
545	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
546
547	fa526_cpu_sleep,		/* sleep		*/
548
549	/* Soft functions */
550
551	cpufunc_null_fixup,		/* dataabt_fixup	*/
552	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
553
554	fa526_context_switch,		/* context_switch	*/
555
556	fa526_setup			/* cpu setup 		*/
557};
558#endif	/* CPU_FA526 || CPU_FA626TE */
559
560#if defined(CPU_ARM1136)
561struct cpu_functions arm1136_cpufuncs = {
562	/* CPU functions */
563
564	cpufunc_id,                     /* id                   */
565	cpufunc_nullop,                 /* cpwait               */
566
567	/* MMU functions */
568
569	cpufunc_control,                /* control              */
570	cpufunc_domains,                /* Domain               */
571	arm11x6_setttb,                 /* Setttb               */
572	cpufunc_faultstatus,            /* Faultstatus          */
573	cpufunc_faultaddress,           /* Faultaddress         */
574
575	/* TLB functions */
576
577	arm11_tlb_flushID,              /* tlb_flushID          */
578	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
579	arm11_tlb_flushI,               /* tlb_flushI           */
580	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
581	arm11_tlb_flushD,               /* tlb_flushD           */
582	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
583
584	/* Cache operations */
585
586	arm11x6_icache_sync_all,        /* icache_sync_all      */
587	arm11x6_icache_sync_range,      /* icache_sync_range    */
588
589	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
590	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
591	armv6_dcache_inv_range,         /* dcache_inv_range     */
592	armv6_dcache_wb_range,          /* dcache_wb_range      */
593
594	armv6_idcache_inv_all,		/* idcache_inv_all	*/
595	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
596	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
597
598	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
599	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
600	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
601	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
602	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
603
604	/* Other functions */
605
606	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
607	arm11_drain_writebuf,           /* drain_writebuf       */
608	cpufunc_nullop,                 /* flush_brnchtgt_C     */
609	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
610
611	arm11_sleep,                  	/* sleep                */
612
613	/* Soft functions */
614
615	cpufunc_null_fixup,             /* dataabt_fixup        */
616	cpufunc_null_fixup,             /* prefetchabt_fixup    */
617
618	arm11_context_switch,           /* context_switch       */
619
620	arm11x6_setup                   /* cpu setup            */
621};
622#endif /* CPU_ARM1136 */
623#if defined(CPU_ARM1176)
624struct cpu_functions arm1176_cpufuncs = {
625	/* CPU functions */
626
627	cpufunc_id,                     /* id                   */
628	cpufunc_nullop,                 /* cpwait               */
629
630	/* MMU functions */
631
632	cpufunc_control,                /* control              */
633	cpufunc_domains,                /* Domain               */
634	arm11x6_setttb,                 /* Setttb               */
635	cpufunc_faultstatus,            /* Faultstatus          */
636	cpufunc_faultaddress,           /* Faultaddress         */
637
638	/* TLB functions */
639
640	arm11_tlb_flushID,              /* tlb_flushID          */
641	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
642	arm11_tlb_flushI,               /* tlb_flushI           */
643	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
644	arm11_tlb_flushD,               /* tlb_flushD           */
645	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
646
647	/* Cache operations */
648
649	arm11x6_icache_sync_all,        /* icache_sync_all      */
650	arm11x6_icache_sync_range,      /* icache_sync_range    */
651
652	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
653	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
654	armv6_dcache_inv_range,         /* dcache_inv_range     */
655	armv6_dcache_wb_range,          /* dcache_wb_range      */
656
657	armv6_idcache_inv_all,		/* idcache_inv_all	*/
658	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
659	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
660
661	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
662	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
663	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
664	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
665	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
666
667	/* Other functions */
668
669	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
670	arm11_drain_writebuf,           /* drain_writebuf       */
671	cpufunc_nullop,                 /* flush_brnchtgt_C     */
672	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
673
674	arm11x6_sleep,                  /* sleep                */
675
676	/* Soft functions */
677
678	cpufunc_null_fixup,             /* dataabt_fixup        */
679	cpufunc_null_fixup,             /* prefetchabt_fixup    */
680
681	arm11_context_switch,           /* context_switch       */
682
683	arm11x6_setup                   /* cpu setup            */
684};
685#endif /*CPU_ARM1176 */
686
687#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
688struct cpu_functions cortexa_cpufuncs = {
689	/* CPU functions */
690
691	cpufunc_id,                     /* id                   */
692	cpufunc_nullop,                 /* cpwait               */
693
694	/* MMU functions */
695
696	cpufunc_control,                /* control              */
697	cpufunc_domains,                /* Domain               */
698	armv7_setttb,                   /* Setttb               */
699	cpufunc_faultstatus,            /* Faultstatus          */
700	cpufunc_faultaddress,           /* Faultaddress         */
701
702	/*
703	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
704	 * whether the hardware implements separate I+D or not, so we use the
705	 * same 'ID' functions for all 3 variations.
706	 */
707
708	armv7_tlb_flushID,              /* tlb_flushID          */
709	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
710	armv7_tlb_flushID,              /* tlb_flushI           */
711	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
712	armv7_tlb_flushID,              /* tlb_flushD           */
713	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
714
715	/* Cache operations */
716
717	armv7_icache_sync_all, 	        /* icache_sync_all      */
718	armv7_icache_sync_range,        /* icache_sync_range    */
719
720	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
721	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
722	armv7_dcache_inv_range,         /* dcache_inv_range     */
723	armv7_dcache_wb_range,          /* dcache_wb_range      */
724
725	armv7_idcache_inv_all,		/* idcache_inv_all	*/
726	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
727	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
728
729	/*
730	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
731	 * L2 cache controller is actually enabled.
732	 */
733	cpufunc_nullop,                 /* l2cache_wbinv_all    */
734	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
735	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
736	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
737	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
738
739	/* Other functions */
740
741	cpufunc_nullop,                 /* flush_prefetchbuf    */
742	armv7_drain_writebuf,           /* drain_writebuf       */
743	cpufunc_nullop,                 /* flush_brnchtgt_C     */
744	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
745
746	armv7_sleep,                    /* sleep                */
747
748	/* Soft functions */
749
750	cpufunc_null_fixup,             /* dataabt_fixup        */
751	cpufunc_null_fixup,             /* prefetchabt_fixup    */
752
753	armv7_context_switch,           /* context_switch       */
754
755	cortexa_setup                     /* cpu setup            */
756};
757#endif /* CPU_CORTEXA */
758
759/*
760 * Global constants also used by locore.s
761 */
762
763struct cpu_functions cpufuncs;
764u_int cputype;
765u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
766
767#if defined(CPU_ARM9) ||	\
768  defined (CPU_ARM9E) || defined (CPU_ARM1136) ||	\
769  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
770  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
771  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
772  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
773  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
774
775/* Global cache line sizes, use 32 as default */
776int	arm_dcache_min_line_size = 32;
777int	arm_icache_min_line_size = 32;
778int	arm_idcache_min_line_size = 32;
779
780static void get_cachetype_cp15(void);
781
782/* Additional cache information local to this file.  Log2 of some of the
783   above numbers.  */
784static int	arm_dcache_l2_nsets;
785static int	arm_dcache_l2_assoc;
786static int	arm_dcache_l2_linesize;
787
788static void
789get_cachetype_cp15()
790{
791	u_int ctype, isize, dsize, cpuid;
792	u_int clevel, csize, i, sel;
793	u_int multiplier;
794	u_char type;
795
796	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
797		: "=r" (ctype));
798
799	cpuid = cpufunc_id();
800	/*
801	 * ...and thus spake the ARM ARM:
802	 *
803	 * If an <opcode2> value corresponding to an unimplemented or
804	 * reserved ID register is encountered, the System Control
805	 * processor returns the value of the main ID register.
806	 */
807	if (ctype == cpuid)
808		goto out;
809
810	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
811		/* Resolve minimal cache line sizes */
812		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
813		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
814		arm_idcache_min_line_size =
815		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
816
817		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
818		    : "=r" (clevel));
819		arm_cache_level = clevel;
820		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
821		i = 0;
822		while ((type = (clevel & 0x7)) && i < 7) {
823			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
824			    type == CACHE_SEP_CACHE) {
825				sel = i << 1;
826				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
827				    : : "r" (sel));
828				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
829				    : "=r" (csize));
830				arm_cache_type[sel] = csize;
831				arm_dcache_align = 1 <<
832				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
833				arm_dcache_align_mask = arm_dcache_align - 1;
834			}
835			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
836				sel = (i << 1) | 1;
837				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
838				    : : "r" (sel));
839				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
840				    : "=r" (csize));
841				arm_cache_type[sel] = csize;
842			}
843			i++;
844			clevel >>= 3;
845		}
846	} else {
847		if ((ctype & CPU_CT_S) == 0)
848			arm_pcache_unified = 1;
849
850		/*
851		 * If you want to know how this code works, go read the ARM ARM.
852		 */
853
854		arm_pcache_type = CPU_CT_CTYPE(ctype);
855
856		if (arm_pcache_unified == 0) {
857			isize = CPU_CT_ISIZE(ctype);
858			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
859			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
860			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
861				if (isize & CPU_CT_xSIZE_M)
862					arm_picache_line_size = 0; /* not present */
863				else
864					arm_picache_ways = 1;
865			} else {
866				arm_picache_ways = multiplier <<
867				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
868			}
869			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
870		}
871
872		dsize = CPU_CT_DSIZE(ctype);
873		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
874		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
875		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
876			if (dsize & CPU_CT_xSIZE_M)
877				arm_pdcache_line_size = 0; /* not present */
878			else
879				arm_pdcache_ways = 1;
880		} else {
881			arm_pdcache_ways = multiplier <<
882			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
883		}
884		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
885
886		arm_dcache_align = arm_pdcache_line_size;
887
888		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
889		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
890		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
891		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
892
893	out:
894		arm_dcache_align_mask = arm_dcache_align - 1;
895	}
896}
897#endif /* ARM9 || XSCALE */
898
899/*
900 * Cannot panic here as we may not have a console yet ...
901 */
902
903int
904set_cpufuncs()
905{
906	cputype = cpufunc_id();
907	cputype &= CPU_ID_CPU_MASK;
908
909	/*
910	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
911	 * CPU type where we want to use it by default, then we set it.
912	 */
913
914#ifdef CPU_ARM9
915	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
916	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
917	    (cputype & 0x0000f000) == 0x00009000) {
918		cpufuncs = arm9_cpufuncs;
919		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
920		get_cachetype_cp15();
921		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
922		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
923		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
924		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
925		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
926		pmap_pte_init_generic();
927		goto out;
928	}
929#endif /* CPU_ARM9 */
930#if defined(CPU_ARM9E)
931	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
932	    cputype == CPU_ID_MV88FR571_41) {
933		uint32_t sheeva_ctrl;
934
935		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
936		    MV_L2_ENABLE);
937		/*
938		 * Workaround for Marvell MV78100 CPU: Cache prefetch
939		 * mechanism may affect the cache coherency validity,
940		 * so it needs to be disabled.
941		 *
942		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
943		 * L2 Prefetching Mechanism) for details.
944		 */
945		if (cputype == CPU_ID_MV88FR571_VD ||
946		    cputype == CPU_ID_MV88FR571_41)
947			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
948
949		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
950
951		cpufuncs = sheeva_cpufuncs;
952		get_cachetype_cp15();
953		pmap_pte_init_generic();
954		goto out;
955	} else if (cputype == CPU_ID_ARM926EJS) {
956		cpufuncs = armv5_ec_cpufuncs;
957		get_cachetype_cp15();
958		pmap_pte_init_generic();
959		goto out;
960	}
961#endif /* CPU_ARM9E */
962#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
963	if (cputype == CPU_ID_ARM1136JS
964	    || cputype == CPU_ID_ARM1136JSR1
965	    || cputype == CPU_ID_ARM1176JZS) {
966#ifdef CPU_ARM1136
967		if (cputype == CPU_ID_ARM1136JS
968		    || cputype == CPU_ID_ARM1136JSR1)
969			cpufuncs = arm1136_cpufuncs;
970#endif
971#ifdef CPU_ARM1176
972		if (cputype == CPU_ID_ARM1176JZS)
973			cpufuncs = arm1176_cpufuncs;
974#endif
975		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
976		get_cachetype_cp15();
977
978		pmap_pte_init_mmu_v6();
979
980		goto out;
981	}
982#endif /* CPU_ARM1136 || CPU_ARM1176 */
983#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
984	if (cputype == CPU_ID_CORTEXA5 ||
985	    cputype == CPU_ID_CORTEXA7 ||
986	    cputype == CPU_ID_CORTEXA8R1 ||
987	    cputype == CPU_ID_CORTEXA8R2 ||
988	    cputype == CPU_ID_CORTEXA8R3 ||
989	    cputype == CPU_ID_CORTEXA9R1 ||
990	    cputype == CPU_ID_CORTEXA9R2 ||
991	    cputype == CPU_ID_CORTEXA9R3 ||
992	    cputype == CPU_ID_CORTEXA12R0 ||
993	    cputype == CPU_ID_CORTEXA15R0 ||
994	    cputype == CPU_ID_CORTEXA15R1 ||
995	    cputype == CPU_ID_CORTEXA15R2 ||
996	    cputype == CPU_ID_CORTEXA15R3 ||
997	    cputype == CPU_ID_KRAIT ) {
998		cpufuncs = cortexa_cpufuncs;
999		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1000		get_cachetype_cp15();
1001
1002		pmap_pte_init_mmu_v6();
1003		/* Use powersave on this CPU. */
1004		cpu_do_powersave = 1;
1005		goto out;
1006	}
1007#endif /* CPU_CORTEXA */
1008
1009#if defined(CPU_MV_PJ4B)
1010	if (cputype == CPU_ID_MV88SV581X_V7 ||
1011	    cputype == CPU_ID_MV88SV584X_V7 ||
1012	    cputype == CPU_ID_ARM_88SV581X_V7) {
1013		cpufuncs = pj4bv7_cpufuncs;
1014		get_cachetype_cp15();
1015		pmap_pte_init_mmu_v6();
1016		goto out;
1017	}
1018#endif /* CPU_MV_PJ4B */
1019
1020#if defined(CPU_FA526) || defined(CPU_FA626TE)
1021	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1022		cpufuncs = fa526_cpufuncs;
1023		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1024		get_cachetype_cp15();
1025		pmap_pte_init_generic();
1026
1027		/* Use powersave on this CPU. */
1028		cpu_do_powersave = 1;
1029
1030		goto out;
1031	}
1032#endif	/* CPU_FA526 || CPU_FA626TE */
1033
1034#ifdef CPU_XSCALE_80200
1035	if (cputype == CPU_ID_80200) {
1036		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1037
1038		i80200_icu_init();
1039
1040#if defined(XSCALE_CCLKCFG)
1041		/*
1042		 * Crank CCLKCFG to maximum legal value.
1043		 */
1044		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1045			:
1046			: "r" (XSCALE_CCLKCFG));
1047#endif
1048
1049		/*
1050		 * XXX Disable ECC in the Bus Controller Unit; we
1051		 * don't really support it, yet.  Clear any pending
1052		 * error indications.
1053		 */
1054		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1055			:
1056			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1057
1058		cpufuncs = xscale_cpufuncs;
1059		/*
1060		 * i80200 errata: Step-A0 and A1 have a bug where
1061		 * D$ dirty bits are not cleared on "invalidate by
1062		 * address".
1063		 *
1064		 * Workaround: Clean cache line before invalidating.
1065		 */
1066		if (rev == 0 || rev == 1)
1067			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1068
1069		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1070		get_cachetype_cp15();
1071		pmap_pte_init_xscale();
1072		goto out;
1073	}
1074#endif /* CPU_XSCALE_80200 */
1075#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1076	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1077	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1078	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1079		cpufuncs = xscale_cpufuncs;
1080		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1081		get_cachetype_cp15();
1082		pmap_pte_init_xscale();
1083		goto out;
1084	}
1085#endif /* CPU_XSCALE_80321 */
1086
1087#if defined(CPU_XSCALE_81342)
1088	if (cputype == CPU_ID_81342) {
1089		cpufuncs = xscalec3_cpufuncs;
1090		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1091		get_cachetype_cp15();
1092		pmap_pte_init_xscale();
1093		goto out;
1094	}
1095#endif /* CPU_XSCALE_81342 */
1096#ifdef CPU_XSCALE_PXA2X0
1097	/* ignore core revision to test PXA2xx CPUs */
1098	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1099	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1100	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1101
1102		cpufuncs = xscale_cpufuncs;
1103		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1104		get_cachetype_cp15();
1105		pmap_pte_init_xscale();
1106
1107		/* Use powersave on this CPU. */
1108		cpu_do_powersave = 1;
1109
1110		goto out;
1111	}
1112#endif /* CPU_XSCALE_PXA2X0 */
1113#ifdef CPU_XSCALE_IXP425
1114	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1115            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1116
1117		cpufuncs = xscale_cpufuncs;
1118		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1119		get_cachetype_cp15();
1120		pmap_pte_init_xscale();
1121
1122		goto out;
1123	}
1124#endif /* CPU_XSCALE_IXP425 */
1125	/*
1126	 * Bzzzz. And the answer was ...
1127	 */
1128	panic("No support for this CPU type (%08x) in kernel", cputype);
1129	return(ARCHITECTURE_NOT_PRESENT);
1130out:
1131	uma_set_align(arm_dcache_align_mask);
1132	return (0);
1133}
1134
1135/*
1136 * Fixup routines for data and prefetch aborts.
1137 *
1138 * Several compile time symbols are used
1139 *
1140 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1141 * correction of registers after a fault.
1142 */
1143
1144
1145/*
1146 * Null abort fixup routine.
1147 * For use when no fixup is required.
1148 */
1149int
1150cpufunc_null_fixup(arg)
1151	void *arg;
1152{
1153	return(ABORT_FIXUP_OK);
1154}
1155
1156/*
1157 * CPU Setup code
1158 */
1159
1160#ifdef CPU_ARM9
1161void
1162arm9_setup(void)
1163{
1164	int cpuctrl, cpuctrlmask;
1165
1166	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1167	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1168	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1169	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1170	    CPU_CONTROL_ROUNDROBIN;
1171	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1172		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1173		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1174		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1175		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1176		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1177		 | CPU_CONTROL_ROUNDROBIN;
1178
1179#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1180	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1181#endif
1182
1183#ifdef __ARMEB__
1184	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1185#endif
1186	if (vector_page == ARM_VECTORS_HIGH)
1187		cpuctrl |= CPU_CONTROL_VECRELOC;
1188
1189	/* Clear out the cache */
1190	cpu_idcache_wbinv_all();
1191
1192	/* Set the control register */
1193	cpu_control(cpuctrlmask, cpuctrl);
1194	ctrl = cpuctrl;
1195
1196}
1197#endif	/* CPU_ARM9 */
1198
1199#if defined(CPU_ARM9E)
1200void
1201arm10_setup(void)
1202{
1203	int cpuctrl, cpuctrlmask;
1204
1205	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1206	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1207	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1208	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1209	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1210	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1211	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1212	    | CPU_CONTROL_BPRD_ENABLE
1213	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1214
1215#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1216	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1217#endif
1218
1219#ifdef __ARMEB__
1220	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1221#endif
1222
1223	/* Clear out the cache */
1224	cpu_idcache_wbinv_all();
1225
1226	/* Now really make sure they are clean.  */
1227	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1228
1229	if (vector_page == ARM_VECTORS_HIGH)
1230		cpuctrl |= CPU_CONTROL_VECRELOC;
1231
1232	/* Set the control register */
1233	ctrl = cpuctrl;
1234	cpu_control(0xffffffff, cpuctrl);
1235
1236	/* And again. */
1237	cpu_idcache_wbinv_all();
1238}
1239#endif	/* CPU_ARM9E || CPU_ARM10 */
1240
1241#if defined(CPU_ARM1136) || defined(CPU_ARM1176) \
1242 || defined(CPU_MV_PJ4B) \
1243 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1244static __inline void
1245cpu_scc_setup_ccnt(void)
1246{
1247/* This is how you give userland access to the CCNT and PMCn
1248 * registers.
1249 * BEWARE! This gives write access also, which may not be what
1250 * you want!
1251 */
1252#ifdef _PMC_USER_READ_WRITE_
1253#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1254	/* Use the Secure User and Non-secure Access Validation Control Register
1255	 * to allow userland access
1256	 */
1257	__asm volatile ("mcr	p15, 0, %0, c15, c9, 0\n\t"
1258			:
1259			: "r"(0x00000001));
1260#else
1261	/* Set PMUSERENR[0] to allow userland access */
1262	__asm volatile ("mcr	p15, 0, %0, c9, c14, 0\n\t"
1263			:
1264			: "r"(0x00000001));
1265#endif
1266#endif
1267#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1268	/* Set PMCR[2,0] to enable counters and reset CCNT */
1269	__asm volatile ("mcr	p15, 0, %0, c15, c12, 0\n\t"
1270			:
1271			: "r"(0x00000005));
1272#else
1273	/* Set up the PMCCNTR register as a cyclecounter:
1274	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1275	 * Set PMCR[2,0] to enable counters and reset CCNT
1276	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1277	__asm volatile ("mcr	p15, 0, %0, c9, c14, 2\n\t"
1278			"mcr	p15, 0, %1, c9, c12, 0\n\t"
1279			"mcr	p15, 0, %2, c9, c12, 1\n\t"
1280			:
1281			: "r"(0xFFFFFFFF),
1282			  "r"(0x00000005),
1283			  "r"(0x80000000));
1284#endif
1285}
1286#endif
1287
1288#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1289void
1290arm11x6_setup(void)
1291{
1292	int cpuctrl, cpuctrl_wax;
1293	uint32_t auxctrl, auxctrl_wax;
1294	uint32_t tmp, tmp2;
1295	uint32_t sbz=0;
1296	uint32_t cpuid;
1297
1298	cpuid = cpufunc_id();
1299
1300	cpuctrl =
1301		CPU_CONTROL_MMU_ENABLE  |
1302		CPU_CONTROL_DC_ENABLE   |
1303		CPU_CONTROL_WBUF_ENABLE |
1304		CPU_CONTROL_32BP_ENABLE |
1305		CPU_CONTROL_32BD_ENABLE |
1306		CPU_CONTROL_LABT_ENABLE |
1307		CPU_CONTROL_SYST_ENABLE |
1308		CPU_CONTROL_IC_ENABLE;
1309
1310	/*
1311	 * "write as existing" bits
1312	 * inverse of this is mask
1313	 */
1314	cpuctrl_wax =
1315		(3 << 30) | /* SBZ */
1316		(1 << 29) | /* FA */
1317		(1 << 28) | /* TR */
1318		(3 << 26) | /* SBZ */
1319		(3 << 19) | /* SBZ */
1320		(1 << 17);  /* SBZ */
1321
1322	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1323	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1324
1325#ifdef __ARMEB__
1326	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1327#endif
1328
1329	if (vector_page == ARM_VECTORS_HIGH)
1330		cpuctrl |= CPU_CONTROL_VECRELOC;
1331
1332	auxctrl = 0;
1333	auxctrl_wax = ~0;
1334	/*
1335	 * This options enables the workaround for the 364296 ARM1136
1336	 * r0pX errata (possible cache data corruption with
1337	 * hit-under-miss enabled). It sets the undocumented bit 31 in
1338	 * the auxiliary control register and the FI bit in the control
1339	 * register, thus disabling hit-under-miss without putting the
1340	 * processor into full low interrupt latency mode. ARM11MPCore
1341	 * is not affected.
1342	 */
1343	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1344		cpuctrl |= CPU_CONTROL_FI_ENABLE;
1345		auxctrl = ARM1136_AUXCTL_PFI;
1346		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1347	}
1348
1349	/*
1350	 * Enable an errata workaround
1351	 */
1352	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1353		auxctrl = ARM1176_AUXCTL_PHD;
1354		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1355	}
1356
1357	/* Clear out the cache */
1358	cpu_idcache_wbinv_all();
1359
1360	/* Now really make sure they are clean.  */
1361	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1362
1363	/* Allow detection code to find the VFP if it's fitted.  */
1364	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1365
1366	/* Set the control register */
1367	ctrl = cpuctrl;
1368	cpu_control(~cpuctrl_wax, cpuctrl);
1369
1370	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1371			"and	%1, %0, %2\n\t"
1372			"orr	%1, %1, %3\n\t"
1373			"teq	%0, %1\n\t"
1374			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1375			: "=r"(tmp), "=r"(tmp2) :
1376			  "r"(auxctrl_wax), "r"(auxctrl));
1377
1378	/* And again. */
1379	cpu_idcache_wbinv_all();
1380
1381	cpu_scc_setup_ccnt();
1382}
1383#endif  /* CPU_ARM1136 || CPU_ARM1176 */
1384
1385#ifdef CPU_MV_PJ4B
1386void
1387pj4bv7_setup(void)
1388{
1389	int cpuctrl;
1390
1391	pj4b_config();
1392
1393	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1394#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1395	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1396#endif
1397	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1398	cpuctrl |= (0xf << 3);
1399	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1400	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1401	if (vector_page == ARM_VECTORS_HIGH)
1402		cpuctrl |= CPU_CONTROL_VECRELOC;
1403	cpuctrl |= (0x5 << 16) | (1 < 22);
1404	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1405
1406	/* Clear out the cache */
1407	cpu_idcache_wbinv_all();
1408
1409	/* Set the control register */
1410	ctrl = cpuctrl;
1411	cpu_control(0xFFFFFFFF, cpuctrl);
1412
1413	/* And again. */
1414	cpu_idcache_wbinv_all();
1415
1416	cpu_scc_setup_ccnt();
1417}
1418#endif /* CPU_MV_PJ4B */
1419
1420#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1421
1422void
1423cortexa_setup(void)
1424{
1425	int cpuctrl, cpuctrlmask;
1426
1427	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1428	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1429	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1430	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1431	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1432	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1433
1434	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1435	    CPU_CONTROL_IC_ENABLE |
1436	    CPU_CONTROL_DC_ENABLE |
1437	    CPU_CONTROL_BPRD_ENABLE;
1438
1439#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1440	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1441#endif
1442
1443	/* Switch to big endian */
1444#ifdef __ARMEB__
1445	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1446#endif
1447
1448	/* Check if the vector page is at the high address (0xffff0000) */
1449	if (vector_page == ARM_VECTORS_HIGH)
1450		cpuctrl |= CPU_CONTROL_VECRELOC;
1451
1452	/* Clear out the cache */
1453	cpu_idcache_wbinv_all();
1454
1455	/* Set the control register */
1456	ctrl = cpuctrl;
1457	cpu_control(cpuctrlmask, cpuctrl);
1458
1459	/* And again. */
1460	cpu_idcache_wbinv_all();
1461#ifdef SMP
1462	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1463#endif
1464
1465	cpu_scc_setup_ccnt();
1466}
1467#endif  /* CPU_CORTEXA */
1468
1469#if defined(CPU_FA526) || defined(CPU_FA626TE)
1470void
1471fa526_setup(void)
1472{
1473	int cpuctrl, cpuctrlmask;
1474
1475	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1476		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1477		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1478		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1479		| CPU_CONTROL_BPRD_ENABLE;
1480	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1481		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1482		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1483		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1484		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1485		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1486		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1487
1488#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1489	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1490#endif
1491
1492#ifdef __ARMEB__
1493	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1494#endif
1495
1496	if (vector_page == ARM_VECTORS_HIGH)
1497		cpuctrl |= CPU_CONTROL_VECRELOC;
1498
1499	/* Clear out the cache */
1500	cpu_idcache_wbinv_all();
1501
1502	/* Set the control register */
1503	ctrl = cpuctrl;
1504	cpu_control(0xffffffff, cpuctrl);
1505}
1506#endif	/* CPU_FA526 || CPU_FA626TE */
1507
1508#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1509  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1510  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1511void
1512xscale_setup(void)
1513{
1514	uint32_t auxctl;
1515	int cpuctrl, cpuctrlmask;
1516
1517	/*
1518	 * The XScale Write Buffer is always enabled.  Our option
1519	 * is to enable/disable coalescing.  Note that bits 6:3
1520	 * must always be enabled.
1521	 */
1522
1523	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1524		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1525		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1526		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1527		 | CPU_CONTROL_BPRD_ENABLE;
1528	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1529		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1530		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1531		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1532		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1533		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1534		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1535		 CPU_CONTROL_L2_ENABLE;
1536
1537#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1538	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1539#endif
1540
1541#ifdef __ARMEB__
1542	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1543#endif
1544
1545	if (vector_page == ARM_VECTORS_HIGH)
1546		cpuctrl |= CPU_CONTROL_VECRELOC;
1547#ifdef CPU_XSCALE_CORE3
1548	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1549#endif
1550
1551	/* Clear out the cache */
1552	cpu_idcache_wbinv_all();
1553
1554	/*
1555	 * Set the control register.  Note that bits 6:3 must always
1556	 * be set to 1.
1557	 */
1558	ctrl = cpuctrl;
1559/*	cpu_control(cpuctrlmask, cpuctrl);*/
1560	cpu_control(0xffffffff, cpuctrl);
1561
1562	/* Make sure write coalescing is turned on */
1563	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1564		: "=r" (auxctl));
1565#ifdef XSCALE_NO_COALESCE_WRITES
1566	auxctl |= XSCALE_AUXCTL_K;
1567#else
1568	auxctl &= ~XSCALE_AUXCTL_K;
1569#endif
1570#ifdef CPU_XSCALE_CORE3
1571	auxctl |= XSCALE_AUXCTL_LLR;
1572	auxctl |= XSCALE_AUXCTL_MD_MASK;
1573#endif
1574	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1575		: : "r" (auxctl));
1576}
1577#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1578	   CPU_XSCALE_80219 */
1579