cpufunc.c revision 280824
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: head/sys/arm/arm/cpufunc.c 280824 2015-03-29 21:12:59Z andrew $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62
63#ifdef CPU_XSCALE_80200
64#include <arm/xscale/i80200/i80200reg.h>
65#include <arm/xscale/i80200/i80200var.h>
66#endif
67
68#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
69#include <arm/xscale/i80321/i80321reg.h>
70#include <arm/xscale/i80321/i80321var.h>
71#endif
72
73/*
74 * Some definitions in i81342reg.h clash with i80321reg.h.
75 * This only happens for the LINT kernel. As it happens,
76 * we don't need anything from i81342reg.h that we already
77 * got from somewhere else during a LINT compile.
78 */
79#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
80#include <arm/xscale/i8134x/i81342reg.h>
81#endif
82
83#ifdef CPU_XSCALE_IXP425
84#include <arm/xscale/ixp425/ixp425reg.h>
85#include <arm/xscale/ixp425/ixp425var.h>
86#endif
87
88/* PRIMARY CACHE VARIABLES */
89int	arm_picache_size;
90int	arm_picache_line_size;
91int	arm_picache_ways;
92
93int	arm_pdcache_size;	/* and unified */
94int	arm_pdcache_line_size;
95int	arm_pdcache_ways;
96
97int	arm_pcache_type;
98int	arm_pcache_unified;
99
100int	arm_dcache_align;
101int	arm_dcache_align_mask;
102
103u_int	arm_cache_level;
104u_int	arm_cache_type[14];
105u_int	arm_cache_loc;
106
107/* 1 == use cpu_sleep(), 0 == don't */
108int cpu_do_powersave;
109int ctrl;
110
111#ifdef CPU_ARM9
112struct cpu_functions arm9_cpufuncs = {
113	/* CPU functions */
114
115	cpufunc_id,			/* id			*/
116	cpufunc_nullop,			/* cpwait		*/
117
118	/* MMU functions */
119
120	cpufunc_control,		/* control		*/
121	cpufunc_domains,		/* Domain		*/
122	arm9_setttb,			/* Setttb		*/
123	cpufunc_faultstatus,		/* Faultstatus		*/
124	cpufunc_faultaddress,		/* Faultaddress		*/
125
126	/* TLB functions */
127
128	armv4_tlb_flushID,		/* tlb_flushID		*/
129	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
130	armv4_tlb_flushI,		/* tlb_flushI		*/
131	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
132	armv4_tlb_flushD,		/* tlb_flushD		*/
133	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
134
135	/* Cache operations */
136
137	arm9_icache_sync_all,		/* icache_sync_all	*/
138	arm9_icache_sync_range,		/* icache_sync_range	*/
139
140	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
141	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
142	arm9_dcache_inv_range,		/* dcache_inv_range	*/
143	arm9_dcache_wb_range,		/* dcache_wb_range	*/
144
145	armv4_idcache_inv_all,		/* idcache_inv_all	*/
146	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
147	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
148	cpufunc_nullop,			/* l2cache_wbinv_all	*/
149	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
150	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
152	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
153
154	/* Other functions */
155
156	cpufunc_nullop,			/* flush_prefetchbuf	*/
157	armv4_drain_writebuf,		/* drain_writebuf	*/
158	cpufunc_nullop,			/* flush_brnchtgt_C	*/
159	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
160
161	(void *)cpufunc_nullop,		/* sleep		*/
162
163	/* Soft functions */
164
165	cpufunc_null_fixup,		/* dataabt_fixup	*/
166	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
167
168	arm9_context_switch,		/* context_switch	*/
169
170	arm9_setup			/* cpu setup		*/
171
172};
173#endif /* CPU_ARM9 */
174
175#if defined(CPU_ARM9E)
176struct cpu_functions armv5_ec_cpufuncs = {
177	/* CPU functions */
178
179	cpufunc_id,			/* id			*/
180	cpufunc_nullop,			/* cpwait		*/
181
182	/* MMU functions */
183
184	cpufunc_control,		/* control		*/
185	cpufunc_domains,		/* Domain		*/
186	armv5_ec_setttb,		/* Setttb		*/
187	cpufunc_faultstatus,		/* Faultstatus		*/
188	cpufunc_faultaddress,		/* Faultaddress		*/
189
190	/* TLB functions */
191
192	armv4_tlb_flushID,		/* tlb_flushID		*/
193	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
194	armv4_tlb_flushI,		/* tlb_flushI		*/
195	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
196	armv4_tlb_flushD,		/* tlb_flushD		*/
197	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
198
199	/* Cache operations */
200
201	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
202	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
203
204	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
205	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
206	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
207	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
208
209	armv4_idcache_inv_all,		/* idcache_inv_all	*/
210	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
211	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
212
213	cpufunc_nullop,                 /* l2cache_wbinv_all    */
214	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
215      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
216	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
217	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
218
219	/* Other functions */
220
221	cpufunc_nullop,			/* flush_prefetchbuf	*/
222	armv4_drain_writebuf,		/* drain_writebuf	*/
223	cpufunc_nullop,			/* flush_brnchtgt_C	*/
224	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
225
226	(void *)cpufunc_nullop,		/* sleep		*/
227
228	/* Soft functions */
229
230	cpufunc_null_fixup,		/* dataabt_fixup	*/
231	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
232
233	arm10_context_switch,		/* context_switch	*/
234
235	arm10_setup			/* cpu setup		*/
236
237};
238
239struct cpu_functions sheeva_cpufuncs = {
240	/* CPU functions */
241
242	cpufunc_id,			/* id			*/
243	cpufunc_nullop,			/* cpwait		*/
244
245	/* MMU functions */
246
247	cpufunc_control,		/* control		*/
248	cpufunc_domains,		/* Domain		*/
249	sheeva_setttb,			/* Setttb		*/
250	cpufunc_faultstatus,		/* Faultstatus		*/
251	cpufunc_faultaddress,		/* Faultaddress		*/
252
253	/* TLB functions */
254
255	armv4_tlb_flushID,		/* tlb_flushID		*/
256	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
257	armv4_tlb_flushI,		/* tlb_flushI		*/
258	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
259	armv4_tlb_flushD,		/* tlb_flushD		*/
260	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
261
262	/* Cache operations */
263
264	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
265	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
266
267	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
268	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
269	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
270	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
271
272	armv4_idcache_inv_all,		/* idcache_inv_all	*/
273	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
274	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
275
276	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
277	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
278	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
279	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
280	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
281
282	/* Other functions */
283
284	cpufunc_nullop,			/* flush_prefetchbuf	*/
285	armv4_drain_writebuf,		/* drain_writebuf	*/
286	cpufunc_nullop,			/* flush_brnchtgt_C	*/
287	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
288
289	sheeva_cpu_sleep,		/* sleep		*/
290
291	/* Soft functions */
292
293	cpufunc_null_fixup,		/* dataabt_fixup	*/
294	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
295
296	arm10_context_switch,		/* context_switch	*/
297
298	arm10_setup			/* cpu setup		*/
299};
300#endif /* CPU_ARM9E */
301
302#ifdef CPU_MV_PJ4B
303struct cpu_functions pj4bv7_cpufuncs = {
304	/* CPU functions */
305
306	cpufunc_id,			/* id			*/
307	armv7_drain_writebuf,		/* cpwait		*/
308
309	/* MMU functions */
310
311	cpufunc_control,		/* control		*/
312	cpufunc_domains,		/* Domain		*/
313	armv7_setttb,			/* Setttb		*/
314	cpufunc_faultstatus,		/* Faultstatus		*/
315	cpufunc_faultaddress,		/* Faultaddress		*/
316
317	/* TLB functions */
318
319	armv7_tlb_flushID,		/* tlb_flushID		*/
320	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
321	armv7_tlb_flushID,		/* tlb_flushI		*/
322	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
323	armv7_tlb_flushID,		/* tlb_flushD		*/
324	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
325
326	/* Cache operations */
327	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
328	armv7_icache_sync_range,	/* icache_sync_range	*/
329
330	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
331	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
332	armv7_dcache_inv_range,		/* dcache_inv_range	*/
333	armv7_dcache_wb_range,		/* dcache_wb_range	*/
334
335	armv7_idcache_inv_all,		/* idcache_inv_all	*/
336	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
337	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
338
339	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
340	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
341	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
342	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
343	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
344
345	/* Other functions */
346
347	cpufunc_nullop,			/* flush_prefetchbuf	*/
348	armv7_drain_writebuf,		/* drain_writebuf	*/
349	cpufunc_nullop,			/* flush_brnchtgt_C	*/
350	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
351
352	(void *)cpufunc_nullop,		/* sleep		*/
353
354	/* Soft functions */
355
356	cpufunc_null_fixup,		/* dataabt_fixup	*/
357	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
358
359	armv7_context_switch,		/* context_switch	*/
360
361	pj4bv7_setup			/* cpu setup		*/
362};
363#endif /* CPU_MV_PJ4B */
364
365#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
366  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
367  defined(CPU_XSCALE_80219)
368
369struct cpu_functions xscale_cpufuncs = {
370	/* CPU functions */
371
372	cpufunc_id,			/* id			*/
373	xscale_cpwait,			/* cpwait		*/
374
375	/* MMU functions */
376
377	xscale_control,			/* control		*/
378	cpufunc_domains,		/* domain		*/
379	xscale_setttb,			/* setttb		*/
380	cpufunc_faultstatus,		/* faultstatus		*/
381	cpufunc_faultaddress,		/* faultaddress		*/
382
383	/* TLB functions */
384
385	armv4_tlb_flushID,		/* tlb_flushID		*/
386	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
387	armv4_tlb_flushI,		/* tlb_flushI		*/
388	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
389	armv4_tlb_flushD,		/* tlb_flushD		*/
390	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
391
392	/* Cache operations */
393
394	xscale_cache_syncI,		/* icache_sync_all	*/
395	xscale_cache_syncI_rng,		/* icache_sync_range	*/
396
397	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
398	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
399	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
400	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
401
402	xscale_cache_flushID,		/* idcache_inv_all	*/
403	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
404	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
405	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
406	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
407	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
408	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
409	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
410
411	/* Other functions */
412
413	cpufunc_nullop,			/* flush_prefetchbuf	*/
414	armv4_drain_writebuf,		/* drain_writebuf	*/
415	cpufunc_nullop,			/* flush_brnchtgt_C	*/
416	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
417
418	xscale_cpu_sleep,		/* sleep		*/
419
420	/* Soft functions */
421
422	cpufunc_null_fixup,		/* dataabt_fixup	*/
423	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
424
425	xscale_context_switch,		/* context_switch	*/
426
427	xscale_setup			/* cpu setup		*/
428};
429#endif
430/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
431   CPU_XSCALE_80219 */
432
433#ifdef CPU_XSCALE_81342
434struct cpu_functions xscalec3_cpufuncs = {
435	/* CPU functions */
436
437	cpufunc_id,			/* id			*/
438	xscale_cpwait,			/* cpwait		*/
439
440	/* MMU functions */
441
442	xscale_control,			/* control		*/
443	cpufunc_domains,		/* domain		*/
444	xscalec3_setttb,		/* setttb		*/
445	cpufunc_faultstatus,		/* faultstatus		*/
446	cpufunc_faultaddress,		/* faultaddress		*/
447
448	/* TLB functions */
449
450	armv4_tlb_flushID,		/* tlb_flushID		*/
451	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
452	armv4_tlb_flushI,		/* tlb_flushI		*/
453	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
454	armv4_tlb_flushD,		/* tlb_flushD		*/
455	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
456
457	/* Cache operations */
458
459	xscalec3_cache_syncI,		/* icache_sync_all	*/
460	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
461
462	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
463	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
464	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
465	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
466
467	xscale_cache_flushID,		/* idcache_inv_all	*/
468	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
469	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
470	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
471	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
472	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
473	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
474	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
475
476	/* Other functions */
477
478	cpufunc_nullop,			/* flush_prefetchbuf	*/
479	armv4_drain_writebuf,		/* drain_writebuf	*/
480	cpufunc_nullop,			/* flush_brnchtgt_C	*/
481	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
482
483	xscale_cpu_sleep,		/* sleep		*/
484
485	/* Soft functions */
486
487	cpufunc_null_fixup,		/* dataabt_fixup	*/
488	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
489
490	xscalec3_context_switch,	/* context_switch	*/
491
492	xscale_setup			/* cpu setup		*/
493};
494#endif /* CPU_XSCALE_81342 */
495
496
497#if defined(CPU_FA526) || defined(CPU_FA626TE)
498struct cpu_functions fa526_cpufuncs = {
499	/* CPU functions */
500
501	cpufunc_id,			/* id			*/
502	cpufunc_nullop,			/* cpwait		*/
503
504	/* MMU functions */
505
506	cpufunc_control,		/* control		*/
507	cpufunc_domains,		/* domain		*/
508	fa526_setttb,			/* setttb		*/
509	cpufunc_faultstatus,		/* faultstatus		*/
510	cpufunc_faultaddress,		/* faultaddress		*/
511
512	/* TLB functions */
513
514	armv4_tlb_flushID,		/* tlb_flushID		*/
515	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
516	armv4_tlb_flushI,		/* tlb_flushI		*/
517	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
518	armv4_tlb_flushD,		/* tlb_flushD		*/
519	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
520
521	/* Cache operations */
522
523	fa526_icache_sync_all,		/* icache_sync_all	*/
524	fa526_icache_sync_range,	/* icache_sync_range	*/
525
526	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
527	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
528	fa526_dcache_inv_range,		/* dcache_inv_range	*/
529	fa526_dcache_wb_range,		/* dcache_wb_range	*/
530
531	armv4_idcache_inv_all,		/* idcache_inv_all	*/
532	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
533	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
534	cpufunc_nullop,			/* l2cache_wbinv_all	*/
535	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
536	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
537	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
538	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
539
540	/* Other functions */
541
542	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
543	armv4_drain_writebuf,		/* drain_writebuf	*/
544	cpufunc_nullop,			/* flush_brnchtgt_C	*/
545	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
546
547	fa526_cpu_sleep,		/* sleep		*/
548
549	/* Soft functions */
550
551	cpufunc_null_fixup,		/* dataabt_fixup	*/
552	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
553
554	fa526_context_switch,		/* context_switch	*/
555
556	fa526_setup			/* cpu setup 		*/
557};
558#endif	/* CPU_FA526 || CPU_FA626TE */
559
560#if defined(CPU_ARM1176)
561struct cpu_functions arm1176_cpufuncs = {
562	/* CPU functions */
563
564	cpufunc_id,                     /* id                   */
565	cpufunc_nullop,                 /* cpwait               */
566
567	/* MMU functions */
568
569	cpufunc_control,                /* control              */
570	cpufunc_domains,                /* Domain               */
571	arm11x6_setttb,                 /* Setttb               */
572	cpufunc_faultstatus,            /* Faultstatus          */
573	cpufunc_faultaddress,           /* Faultaddress         */
574
575	/* TLB functions */
576
577	arm11_tlb_flushID,              /* tlb_flushID          */
578	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
579	arm11_tlb_flushI,               /* tlb_flushI           */
580	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
581	arm11_tlb_flushD,               /* tlb_flushD           */
582	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
583
584	/* Cache operations */
585
586	arm11x6_icache_sync_all,        /* icache_sync_all      */
587	arm11x6_icache_sync_range,      /* icache_sync_range    */
588
589	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
590	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
591	armv6_dcache_inv_range,         /* dcache_inv_range     */
592	armv6_dcache_wb_range,          /* dcache_wb_range      */
593
594	armv6_idcache_inv_all,		/* idcache_inv_all	*/
595	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
596	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
597
598	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
599	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
600	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
601	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
602	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
603
604	/* Other functions */
605
606	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
607	arm11_drain_writebuf,           /* drain_writebuf       */
608	cpufunc_nullop,                 /* flush_brnchtgt_C     */
609	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
610
611	arm11x6_sleep,                  /* sleep                */
612
613	/* Soft functions */
614
615	cpufunc_null_fixup,             /* dataabt_fixup        */
616	cpufunc_null_fixup,             /* prefetchabt_fixup    */
617
618	arm11_context_switch,           /* context_switch       */
619
620	arm11x6_setup                   /* cpu setup            */
621};
622#endif /*CPU_ARM1176 */
623
624#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
625struct cpu_functions cortexa_cpufuncs = {
626	/* CPU functions */
627
628	cpufunc_id,                     /* id                   */
629	cpufunc_nullop,                 /* cpwait               */
630
631	/* MMU functions */
632
633	cpufunc_control,                /* control              */
634	cpufunc_domains,                /* Domain               */
635	armv7_setttb,                   /* Setttb               */
636	cpufunc_faultstatus,            /* Faultstatus          */
637	cpufunc_faultaddress,           /* Faultaddress         */
638
639	/*
640	 * TLB functions.  ARMv7 does all TLB ops based on a unified TLB model
641	 * whether the hardware implements separate I+D or not, so we use the
642	 * same 'ID' functions for all 3 variations.
643	 */
644
645	armv7_tlb_flushID,              /* tlb_flushID          */
646	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
647	armv7_tlb_flushID,              /* tlb_flushI           */
648	armv7_tlb_flushID_SE,           /* tlb_flushI_SE        */
649	armv7_tlb_flushID,              /* tlb_flushD           */
650	armv7_tlb_flushID_SE,           /* tlb_flushD_SE        */
651
652	/* Cache operations */
653
654	armv7_icache_sync_all, 	        /* icache_sync_all      */
655	armv7_icache_sync_range,        /* icache_sync_range    */
656
657	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
658	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
659	armv7_dcache_inv_range,         /* dcache_inv_range     */
660	armv7_dcache_wb_range,          /* dcache_wb_range      */
661
662	armv7_idcache_inv_all,		/* idcache_inv_all	*/
663	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
664	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
665
666	/*
667	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
668	 * L2 cache controller is actually enabled.
669	 */
670	cpufunc_nullop,                 /* l2cache_wbinv_all    */
671	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
672	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
673	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
674	(void *)cpufunc_nullop,         /* l2cache_drain_writebuf */
675
676	/* Other functions */
677
678	cpufunc_nullop,                 /* flush_prefetchbuf    */
679	armv7_drain_writebuf,           /* drain_writebuf       */
680	cpufunc_nullop,                 /* flush_brnchtgt_C     */
681	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
682
683	armv7_sleep,                    /* sleep                */
684
685	/* Soft functions */
686
687	cpufunc_null_fixup,             /* dataabt_fixup        */
688	cpufunc_null_fixup,             /* prefetchabt_fixup    */
689
690	armv7_context_switch,           /* context_switch       */
691
692	cortexa_setup                     /* cpu setup            */
693};
694#endif /* CPU_CORTEXA */
695
696/*
697 * Global constants also used by locore.s
698 */
699
700struct cpu_functions cpufuncs;
701u_int cputype;
702u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
703
704#if defined(CPU_ARM9) ||	\
705  defined (CPU_ARM9E) ||	\
706  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
707  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
708  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
709  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
710  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
711
712/* Global cache line sizes, use 32 as default */
713int	arm_dcache_min_line_size = 32;
714int	arm_icache_min_line_size = 32;
715int	arm_idcache_min_line_size = 32;
716
717static void get_cachetype_cp15(void);
718
719/* Additional cache information local to this file.  Log2 of some of the
720   above numbers.  */
721static int	arm_dcache_l2_nsets;
722static int	arm_dcache_l2_assoc;
723static int	arm_dcache_l2_linesize;
724
725static void
726get_cachetype_cp15()
727{
728	u_int ctype, isize, dsize, cpuid;
729	u_int clevel, csize, i, sel;
730	u_int multiplier;
731	u_char type;
732
733	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
734		: "=r" (ctype));
735
736	cpuid = cpufunc_id();
737	/*
738	 * ...and thus spake the ARM ARM:
739	 *
740	 * If an <opcode2> value corresponding to an unimplemented or
741	 * reserved ID register is encountered, the System Control
742	 * processor returns the value of the main ID register.
743	 */
744	if (ctype == cpuid)
745		goto out;
746
747	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
748		/* Resolve minimal cache line sizes */
749		arm_dcache_min_line_size = 1 << (CPU_CT_DMINLINE(ctype) + 2);
750		arm_icache_min_line_size = 1 << (CPU_CT_IMINLINE(ctype) + 2);
751		arm_idcache_min_line_size =
752		    min(arm_icache_min_line_size, arm_dcache_min_line_size);
753
754		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
755		    : "=r" (clevel));
756		arm_cache_level = clevel;
757		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
758		i = 0;
759		while ((type = (clevel & 0x7)) && i < 7) {
760			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
761			    type == CACHE_SEP_CACHE) {
762				sel = i << 1;
763				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
764				    : : "r" (sel));
765				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
766				    : "=r" (csize));
767				arm_cache_type[sel] = csize;
768				arm_dcache_align = 1 <<
769				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
770				arm_dcache_align_mask = arm_dcache_align - 1;
771			}
772			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
773				sel = (i << 1) | 1;
774				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
775				    : : "r" (sel));
776				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
777				    : "=r" (csize));
778				arm_cache_type[sel] = csize;
779			}
780			i++;
781			clevel >>= 3;
782		}
783	} else {
784		if ((ctype & CPU_CT_S) == 0)
785			arm_pcache_unified = 1;
786
787		/*
788		 * If you want to know how this code works, go read the ARM ARM.
789		 */
790
791		arm_pcache_type = CPU_CT_CTYPE(ctype);
792
793		if (arm_pcache_unified == 0) {
794			isize = CPU_CT_ISIZE(ctype);
795			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
796			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
797			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
798				if (isize & CPU_CT_xSIZE_M)
799					arm_picache_line_size = 0; /* not present */
800				else
801					arm_picache_ways = 1;
802			} else {
803				arm_picache_ways = multiplier <<
804				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
805			}
806			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
807		}
808
809		dsize = CPU_CT_DSIZE(ctype);
810		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
811		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
812		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
813			if (dsize & CPU_CT_xSIZE_M)
814				arm_pdcache_line_size = 0; /* not present */
815			else
816				arm_pdcache_ways = 1;
817		} else {
818			arm_pdcache_ways = multiplier <<
819			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
820		}
821		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
822
823		arm_dcache_align = arm_pdcache_line_size;
824
825		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
826		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
827		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
828		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
829
830	out:
831		arm_dcache_align_mask = arm_dcache_align - 1;
832	}
833}
834#endif /* ARM9 || XSCALE */
835
836/*
837 * Cannot panic here as we may not have a console yet ...
838 */
839
840int
841set_cpufuncs()
842{
843	cputype = cpufunc_id();
844	cputype &= CPU_ID_CPU_MASK;
845
846	/*
847	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
848	 * CPU type where we want to use it by default, then we set it.
849	 */
850
851#ifdef CPU_ARM9
852	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
853	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
854	    (cputype & 0x0000f000) == 0x00009000) {
855		cpufuncs = arm9_cpufuncs;
856		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
857		get_cachetype_cp15();
858		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
859		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
860		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
861		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
862		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
863		pmap_pte_init_generic();
864		goto out;
865	}
866#endif /* CPU_ARM9 */
867#if defined(CPU_ARM9E)
868	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
869	    cputype == CPU_ID_MV88FR571_41) {
870		uint32_t sheeva_ctrl;
871
872		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
873		    MV_L2_ENABLE);
874		/*
875		 * Workaround for Marvell MV78100 CPU: Cache prefetch
876		 * mechanism may affect the cache coherency validity,
877		 * so it needs to be disabled.
878		 *
879		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
880		 * L2 Prefetching Mechanism) for details.
881		 */
882		if (cputype == CPU_ID_MV88FR571_VD ||
883		    cputype == CPU_ID_MV88FR571_41)
884			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
885
886		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
887
888		cpufuncs = sheeva_cpufuncs;
889		get_cachetype_cp15();
890		pmap_pte_init_generic();
891		goto out;
892	} else if (cputype == CPU_ID_ARM926EJS) {
893		cpufuncs = armv5_ec_cpufuncs;
894		get_cachetype_cp15();
895		pmap_pte_init_generic();
896		goto out;
897	}
898#endif /* CPU_ARM9E */
899#if defined(CPU_ARM1176)
900	if (cputype == CPU_ID_ARM1176JZS) {
901		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
902		get_cachetype_cp15();
903
904		pmap_pte_init_mmu_v6();
905
906		goto out;
907	}
908#endif /* CPU_ARM1176 */
909#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
910	if (cputype == CPU_ID_CORTEXA5 ||
911	    cputype == CPU_ID_CORTEXA7 ||
912	    cputype == CPU_ID_CORTEXA8R1 ||
913	    cputype == CPU_ID_CORTEXA8R2 ||
914	    cputype == CPU_ID_CORTEXA8R3 ||
915	    cputype == CPU_ID_CORTEXA9R1 ||
916	    cputype == CPU_ID_CORTEXA9R2 ||
917	    cputype == CPU_ID_CORTEXA9R3 ||
918	    cputype == CPU_ID_CORTEXA12R0 ||
919	    cputype == CPU_ID_CORTEXA15R0 ||
920	    cputype == CPU_ID_CORTEXA15R1 ||
921	    cputype == CPU_ID_CORTEXA15R2 ||
922	    cputype == CPU_ID_CORTEXA15R3 ||
923	    cputype == CPU_ID_KRAIT ) {
924		cpufuncs = cortexa_cpufuncs;
925		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
926		get_cachetype_cp15();
927
928		pmap_pte_init_mmu_v6();
929		/* Use powersave on this CPU. */
930		cpu_do_powersave = 1;
931		goto out;
932	}
933#endif /* CPU_CORTEXA */
934
935#if defined(CPU_MV_PJ4B)
936	if (cputype == CPU_ID_MV88SV581X_V7 ||
937	    cputype == CPU_ID_MV88SV584X_V7 ||
938	    cputype == CPU_ID_ARM_88SV581X_V7) {
939		cpufuncs = pj4bv7_cpufuncs;
940		get_cachetype_cp15();
941		pmap_pte_init_mmu_v6();
942		goto out;
943	}
944#endif /* CPU_MV_PJ4B */
945
946#if defined(CPU_FA526) || defined(CPU_FA626TE)
947	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
948		cpufuncs = fa526_cpufuncs;
949		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
950		get_cachetype_cp15();
951		pmap_pte_init_generic();
952
953		/* Use powersave on this CPU. */
954		cpu_do_powersave = 1;
955
956		goto out;
957	}
958#endif	/* CPU_FA526 || CPU_FA626TE */
959
960#ifdef CPU_XSCALE_80200
961	if (cputype == CPU_ID_80200) {
962		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
963
964		i80200_icu_init();
965
966#if defined(XSCALE_CCLKCFG)
967		/*
968		 * Crank CCLKCFG to maximum legal value.
969		 */
970		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
971			:
972			: "r" (XSCALE_CCLKCFG));
973#endif
974
975		/*
976		 * XXX Disable ECC in the Bus Controller Unit; we
977		 * don't really support it, yet.  Clear any pending
978		 * error indications.
979		 */
980		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
981			:
982			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
983
984		cpufuncs = xscale_cpufuncs;
985		/*
986		 * i80200 errata: Step-A0 and A1 have a bug where
987		 * D$ dirty bits are not cleared on "invalidate by
988		 * address".
989		 *
990		 * Workaround: Clean cache line before invalidating.
991		 */
992		if (rev == 0 || rev == 1)
993			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
994
995		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
996		get_cachetype_cp15();
997		pmap_pte_init_xscale();
998		goto out;
999	}
1000#endif /* CPU_XSCALE_80200 */
1001#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1002	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1003	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1004	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1005		cpufuncs = xscale_cpufuncs;
1006		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1007		get_cachetype_cp15();
1008		pmap_pte_init_xscale();
1009		goto out;
1010	}
1011#endif /* CPU_XSCALE_80321 */
1012
1013#if defined(CPU_XSCALE_81342)
1014	if (cputype == CPU_ID_81342) {
1015		cpufuncs = xscalec3_cpufuncs;
1016		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1017		get_cachetype_cp15();
1018		pmap_pte_init_xscale();
1019		goto out;
1020	}
1021#endif /* CPU_XSCALE_81342 */
1022#ifdef CPU_XSCALE_PXA2X0
1023	/* ignore core revision to test PXA2xx CPUs */
1024	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1025	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1026	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1027
1028		cpufuncs = xscale_cpufuncs;
1029		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1030		get_cachetype_cp15();
1031		pmap_pte_init_xscale();
1032
1033		/* Use powersave on this CPU. */
1034		cpu_do_powersave = 1;
1035
1036		goto out;
1037	}
1038#endif /* CPU_XSCALE_PXA2X0 */
1039#ifdef CPU_XSCALE_IXP425
1040	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1041            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1042
1043		cpufuncs = xscale_cpufuncs;
1044		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1045		get_cachetype_cp15();
1046		pmap_pte_init_xscale();
1047
1048		goto out;
1049	}
1050#endif /* CPU_XSCALE_IXP425 */
1051	/*
1052	 * Bzzzz. And the answer was ...
1053	 */
1054	panic("No support for this CPU type (%08x) in kernel", cputype);
1055	return(ARCHITECTURE_NOT_PRESENT);
1056out:
1057	uma_set_align(arm_dcache_align_mask);
1058	return (0);
1059}
1060
1061/*
1062 * Fixup routines for data and prefetch aborts.
1063 *
1064 * Several compile time symbols are used
1065 *
1066 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1067 * correction of registers after a fault.
1068 */
1069
1070
1071/*
1072 * Null abort fixup routine.
1073 * For use when no fixup is required.
1074 */
1075int
1076cpufunc_null_fixup(arg)
1077	void *arg;
1078{
1079	return(ABORT_FIXUP_OK);
1080}
1081
1082/*
1083 * CPU Setup code
1084 */
1085
1086#ifdef CPU_ARM9
1087void
1088arm9_setup(void)
1089{
1090	int cpuctrl, cpuctrlmask;
1091
1092	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1093	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1094	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1095	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1096	    CPU_CONTROL_ROUNDROBIN;
1097	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1098		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1099		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1100		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1101		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1102		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1103		 | CPU_CONTROL_ROUNDROBIN;
1104
1105#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1106	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1107#endif
1108
1109#ifdef __ARMEB__
1110	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1111#endif
1112	if (vector_page == ARM_VECTORS_HIGH)
1113		cpuctrl |= CPU_CONTROL_VECRELOC;
1114
1115	/* Clear out the cache */
1116	cpu_idcache_wbinv_all();
1117
1118	/* Set the control register */
1119	cpu_control(cpuctrlmask, cpuctrl);
1120	ctrl = cpuctrl;
1121
1122}
1123#endif	/* CPU_ARM9 */
1124
1125#if defined(CPU_ARM9E)
1126void
1127arm10_setup(void)
1128{
1129	int cpuctrl, cpuctrlmask;
1130
1131	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1132	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1133	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1134	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1135	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1136	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1137	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1138	    | CPU_CONTROL_BPRD_ENABLE
1139	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1140
1141#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1142	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1143#endif
1144
1145#ifdef __ARMEB__
1146	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1147#endif
1148
1149	/* Clear out the cache */
1150	cpu_idcache_wbinv_all();
1151
1152	/* Now really make sure they are clean.  */
1153	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1154
1155	if (vector_page == ARM_VECTORS_HIGH)
1156		cpuctrl |= CPU_CONTROL_VECRELOC;
1157
1158	/* Set the control register */
1159	ctrl = cpuctrl;
1160	cpu_control(0xffffffff, cpuctrl);
1161
1162	/* And again. */
1163	cpu_idcache_wbinv_all();
1164}
1165#endif	/* CPU_ARM9E || CPU_ARM10 */
1166
1167#if defined(CPU_ARM1176) \
1168 || defined(CPU_MV_PJ4B) \
1169 || defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1170static __inline void
1171cpu_scc_setup_ccnt(void)
1172{
1173/* This is how you give userland access to the CCNT and PMCn
1174 * registers.
1175 * BEWARE! This gives write access also, which may not be what
1176 * you want!
1177 */
1178#ifdef _PMC_USER_READ_WRITE_
1179#if defined(CPU_ARM1176)
1180	/* Use the Secure User and Non-secure Access Validation Control Register
1181	 * to allow userland access
1182	 */
1183	__asm volatile ("mcr	p15, 0, %0, c15, c9, 0\n\t"
1184			:
1185			: "r"(0x00000001));
1186#else
1187	/* Set PMUSERENR[0] to allow userland access */
1188	__asm volatile ("mcr	p15, 0, %0, c9, c14, 0\n\t"
1189			:
1190			: "r"(0x00000001));
1191#endif
1192#endif
1193#if defined(CPU_ARM1176)
1194	/* Set PMCR[2,0] to enable counters and reset CCNT */
1195	__asm volatile ("mcr	p15, 0, %0, c15, c12, 0\n\t"
1196			:
1197			: "r"(0x00000005));
1198#else
1199	/* Set up the PMCCNTR register as a cyclecounter:
1200	 * Set PMINTENCLR to 0xFFFFFFFF to block interrupts
1201	 * Set PMCR[2,0] to enable counters and reset CCNT
1202	 * Set PMCNTENSET to 0x80000000 to enable CCNT */
1203	__asm volatile ("mcr	p15, 0, %0, c9, c14, 2\n\t"
1204			"mcr	p15, 0, %1, c9, c12, 0\n\t"
1205			"mcr	p15, 0, %2, c9, c12, 1\n\t"
1206			:
1207			: "r"(0xFFFFFFFF),
1208			  "r"(0x00000005),
1209			  "r"(0x80000000));
1210#endif
1211}
1212#endif
1213
1214#if defined(CPU_ARM1176)
1215void
1216arm11x6_setup(void)
1217{
1218	int cpuctrl, cpuctrl_wax;
1219	uint32_t auxctrl, auxctrl_wax;
1220	uint32_t tmp, tmp2;
1221	uint32_t sbz=0;
1222	uint32_t cpuid;
1223
1224	cpuid = cpufunc_id();
1225
1226	cpuctrl =
1227		CPU_CONTROL_MMU_ENABLE  |
1228		CPU_CONTROL_DC_ENABLE   |
1229		CPU_CONTROL_WBUF_ENABLE |
1230		CPU_CONTROL_32BP_ENABLE |
1231		CPU_CONTROL_32BD_ENABLE |
1232		CPU_CONTROL_LABT_ENABLE |
1233		CPU_CONTROL_SYST_ENABLE |
1234		CPU_CONTROL_IC_ENABLE;
1235
1236	/*
1237	 * "write as existing" bits
1238	 * inverse of this is mask
1239	 */
1240	cpuctrl_wax =
1241		(3 << 30) | /* SBZ */
1242		(1 << 29) | /* FA */
1243		(1 << 28) | /* TR */
1244		(3 << 26) | /* SBZ */
1245		(3 << 19) | /* SBZ */
1246		(1 << 17);  /* SBZ */
1247
1248	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1249	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1250
1251#ifdef __ARMEB__
1252	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1253#endif
1254
1255	if (vector_page == ARM_VECTORS_HIGH)
1256		cpuctrl |= CPU_CONTROL_VECRELOC;
1257
1258	auxctrl = 0;
1259	auxctrl_wax = ~0;
1260
1261	/*
1262	 * Enable an errata workaround
1263	 */
1264	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1265		auxctrl = ARM1176_AUXCTL_PHD;
1266		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1267	}
1268
1269	/* Clear out the cache */
1270	cpu_idcache_wbinv_all();
1271
1272	/* Now really make sure they are clean.  */
1273	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1274
1275	/* Allow detection code to find the VFP if it's fitted.  */
1276	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1277
1278	/* Set the control register */
1279	ctrl = cpuctrl;
1280	cpu_control(~cpuctrl_wax, cpuctrl);
1281
1282	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1283			"and	%1, %0, %2\n\t"
1284			"orr	%1, %1, %3\n\t"
1285			"teq	%0, %1\n\t"
1286			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1287			: "=r"(tmp), "=r"(tmp2) :
1288			  "r"(auxctrl_wax), "r"(auxctrl));
1289
1290	/* And again. */
1291	cpu_idcache_wbinv_all();
1292
1293	cpu_scc_setup_ccnt();
1294}
1295#endif  /* CPU_ARM1176 */
1296
1297#ifdef CPU_MV_PJ4B
1298void
1299pj4bv7_setup(void)
1300{
1301	int cpuctrl;
1302
1303	pj4b_config();
1304
1305	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1306#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1307	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1308#endif
1309	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1310	cpuctrl |= (0xf << 3);
1311	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1312	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1313	if (vector_page == ARM_VECTORS_HIGH)
1314		cpuctrl |= CPU_CONTROL_VECRELOC;
1315	cpuctrl |= (0x5 << 16) | (1 < 22);
1316	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1317
1318	/* Clear out the cache */
1319	cpu_idcache_wbinv_all();
1320
1321	/* Set the control register */
1322	ctrl = cpuctrl;
1323	cpu_control(0xFFFFFFFF, cpuctrl);
1324
1325	/* And again. */
1326	cpu_idcache_wbinv_all();
1327
1328	cpu_scc_setup_ccnt();
1329}
1330#endif /* CPU_MV_PJ4B */
1331
1332#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1333
1334void
1335cortexa_setup(void)
1336{
1337	int cpuctrl, cpuctrlmask;
1338
1339	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1340	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1341	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1342	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1343	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1344	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1345
1346	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1347	    CPU_CONTROL_IC_ENABLE |
1348	    CPU_CONTROL_DC_ENABLE |
1349	    CPU_CONTROL_BPRD_ENABLE;
1350
1351#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1352	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1353#endif
1354
1355	/* Switch to big endian */
1356#ifdef __ARMEB__
1357	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1358#endif
1359
1360	/* Check if the vector page is at the high address (0xffff0000) */
1361	if (vector_page == ARM_VECTORS_HIGH)
1362		cpuctrl |= CPU_CONTROL_VECRELOC;
1363
1364	/* Clear out the cache */
1365	cpu_idcache_wbinv_all();
1366
1367	/* Set the control register */
1368	ctrl = cpuctrl;
1369	cpu_control(cpuctrlmask, cpuctrl);
1370
1371	/* And again. */
1372	cpu_idcache_wbinv_all();
1373#ifdef SMP
1374	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1375#endif
1376
1377	cpu_scc_setup_ccnt();
1378}
1379#endif  /* CPU_CORTEXA */
1380
1381#if defined(CPU_FA526) || defined(CPU_FA626TE)
1382void
1383fa526_setup(void)
1384{
1385	int cpuctrl, cpuctrlmask;
1386
1387	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1388		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1389		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1390		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1391		| CPU_CONTROL_BPRD_ENABLE;
1392	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1393		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1394		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1395		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1396		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1397		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1398		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1399
1400#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1401	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1402#endif
1403
1404#ifdef __ARMEB__
1405	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1406#endif
1407
1408	if (vector_page == ARM_VECTORS_HIGH)
1409		cpuctrl |= CPU_CONTROL_VECRELOC;
1410
1411	/* Clear out the cache */
1412	cpu_idcache_wbinv_all();
1413
1414	/* Set the control register */
1415	ctrl = cpuctrl;
1416	cpu_control(0xffffffff, cpuctrl);
1417}
1418#endif	/* CPU_FA526 || CPU_FA626TE */
1419
1420#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1421  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1422  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1423void
1424xscale_setup(void)
1425{
1426	uint32_t auxctl;
1427	int cpuctrl, cpuctrlmask;
1428
1429	/*
1430	 * The XScale Write Buffer is always enabled.  Our option
1431	 * is to enable/disable coalescing.  Note that bits 6:3
1432	 * must always be enabled.
1433	 */
1434
1435	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1436		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1437		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1438		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1439		 | CPU_CONTROL_BPRD_ENABLE;
1440	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1441		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1442		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1443		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1444		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1445		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1446		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1447		 CPU_CONTROL_L2_ENABLE;
1448
1449#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1450	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1451#endif
1452
1453#ifdef __ARMEB__
1454	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1455#endif
1456
1457	if (vector_page == ARM_VECTORS_HIGH)
1458		cpuctrl |= CPU_CONTROL_VECRELOC;
1459#ifdef CPU_XSCALE_CORE3
1460	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1461#endif
1462
1463	/* Clear out the cache */
1464	cpu_idcache_wbinv_all();
1465
1466	/*
1467	 * Set the control register.  Note that bits 6:3 must always
1468	 * be set to 1.
1469	 */
1470	ctrl = cpuctrl;
1471/*	cpu_control(cpuctrlmask, cpuctrl);*/
1472	cpu_control(0xffffffff, cpuctrl);
1473
1474	/* Make sure write coalescing is turned on */
1475	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1476		: "=r" (auxctl));
1477#ifdef XSCALE_NO_COALESCE_WRITES
1478	auxctl |= XSCALE_AUXCTL_K;
1479#else
1480	auxctl &= ~XSCALE_AUXCTL_K;
1481#endif
1482#ifdef CPU_XSCALE_CORE3
1483	auxctl |= XSCALE_AUXCTL_LLR;
1484	auxctl |= XSCALE_AUXCTL_MD_MASK;
1485#endif
1486	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1487		: : "r" (auxctl));
1488}
1489#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1490	   CPU_XSCALE_80219 */
1491