cpufunc.c revision 266311
1/*	$NetBSD: cpufunc.c,v 1.65 2003/11/05 12:53:15 scw Exp $	*/
2
3/*-
4 * arm9 support code Copyright (C) 2001 ARM Ltd
5 * Copyright (c) 1997 Mark Brinicombe.
6 * Copyright (c) 1997 Causality Limited
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by Causality Limited.
20 * 4. The name of Causality Limited may not be used to endorse or promote
21 *    products derived from this software without specific prior written
22 *    permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS
25 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * RiscBSD kernel project
37 *
38 * cpufuncs.c
39 *
40 * C functions for supporting CPU / MMU / TLB specific operations.
41 *
42 * Created      : 30/01/97
43 */
44#include <sys/cdefs.h>
45__FBSDID("$FreeBSD: stable/10/sys/arm/arm/cpufunc.c 266311 2014-05-17 13:53:38Z ian $");
46
47#include <sys/param.h>
48#include <sys/systm.h>
49#include <sys/lock.h>
50#include <sys/mutex.h>
51#include <sys/bus.h>
52#include <machine/bus.h>
53#include <machine/cpu.h>
54#include <machine/disassem.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/uma.h>
59
60#include <machine/cpuconf.h>
61#include <machine/cpufunc.h>
62#include <machine/bootconfig.h>
63
64#ifdef CPU_XSCALE_80200
65#include <arm/xscale/i80200/i80200reg.h>
66#include <arm/xscale/i80200/i80200var.h>
67#endif
68
69#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
70#include <arm/xscale/i80321/i80321reg.h>
71#include <arm/xscale/i80321/i80321var.h>
72#endif
73
74/*
75 * Some definitions in i81342reg.h clash with i80321reg.h.
76 * This only happens for the LINT kernel. As it happens,
77 * we don't need anything from i81342reg.h that we already
78 * got from somewhere else during a LINT compile.
79 */
80#if defined(CPU_XSCALE_81342) && !defined(COMPILING_LINT)
81#include <arm/xscale/i8134x/i81342reg.h>
82#endif
83
84#ifdef CPU_XSCALE_IXP425
85#include <arm/xscale/ixp425/ixp425reg.h>
86#include <arm/xscale/ixp425/ixp425var.h>
87#endif
88
89/* PRIMARY CACHE VARIABLES */
90int	arm_picache_size;
91int	arm_picache_line_size;
92int	arm_picache_ways;
93
94int	arm_pdcache_size;	/* and unified */
95int	arm_pdcache_line_size;
96int	arm_pdcache_ways;
97
98int	arm_pcache_type;
99int	arm_pcache_unified;
100
101int	arm_dcache_align;
102int	arm_dcache_align_mask;
103
104u_int	arm_cache_level;
105u_int	arm_cache_type[14];
106u_int	arm_cache_loc;
107
108/* 1 == use cpu_sleep(), 0 == don't */
109int cpu_do_powersave;
110int ctrl;
111
112#ifdef CPU_ARM9
113struct cpu_functions arm9_cpufuncs = {
114	/* CPU functions */
115
116	cpufunc_id,			/* id			*/
117	cpufunc_nullop,			/* cpwait		*/
118
119	/* MMU functions */
120
121	cpufunc_control,		/* control		*/
122	cpufunc_domains,		/* Domain		*/
123	arm9_setttb,			/* Setttb		*/
124	cpufunc_faultstatus,		/* Faultstatus		*/
125	cpufunc_faultaddress,		/* Faultaddress		*/
126
127	/* TLB functions */
128
129	armv4_tlb_flushID,		/* tlb_flushID		*/
130	arm9_tlb_flushID_SE,		/* tlb_flushID_SE	*/
131	armv4_tlb_flushI,		/* tlb_flushI		*/
132	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
133	armv4_tlb_flushD,		/* tlb_flushD		*/
134	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
135
136	/* Cache operations */
137
138	arm9_icache_sync_all,		/* icache_sync_all	*/
139	arm9_icache_sync_range,		/* icache_sync_range	*/
140
141	arm9_dcache_wbinv_all,		/* dcache_wbinv_all	*/
142	arm9_dcache_wbinv_range,	/* dcache_wbinv_range	*/
143	arm9_dcache_inv_range,		/* dcache_inv_range	*/
144	arm9_dcache_wb_range,		/* dcache_wb_range	*/
145
146	armv4_idcache_inv_all,		/* idcache_inv_all	*/
147	arm9_idcache_wbinv_all,		/* idcache_wbinv_all	*/
148	arm9_idcache_wbinv_range,	/* idcache_wbinv_range	*/
149	cpufunc_nullop,			/* l2cache_wbinv_all	*/
150	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
151	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
152	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
153
154	/* Other functions */
155
156	cpufunc_nullop,			/* flush_prefetchbuf	*/
157	armv4_drain_writebuf,		/* drain_writebuf	*/
158	cpufunc_nullop,			/* flush_brnchtgt_C	*/
159	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
160
161	(void *)cpufunc_nullop,		/* sleep		*/
162
163	/* Soft functions */
164
165	cpufunc_null_fixup,		/* dataabt_fixup	*/
166	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
167
168	arm9_context_switch,		/* context_switch	*/
169
170	arm9_setup			/* cpu setup		*/
171
172};
173#endif /* CPU_ARM9 */
174
175#if defined(CPU_ARM9E) || defined(CPU_ARM10)
176struct cpu_functions armv5_ec_cpufuncs = {
177	/* CPU functions */
178
179	cpufunc_id,			/* id			*/
180	cpufunc_nullop,			/* cpwait		*/
181
182	/* MMU functions */
183
184	cpufunc_control,		/* control		*/
185	cpufunc_domains,		/* Domain		*/
186	armv5_ec_setttb,		/* Setttb		*/
187	cpufunc_faultstatus,		/* Faultstatus		*/
188	cpufunc_faultaddress,		/* Faultaddress		*/
189
190	/* TLB functions */
191
192	armv4_tlb_flushID,		/* tlb_flushID		*/
193	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
194	armv4_tlb_flushI,		/* tlb_flushI		*/
195	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
196	armv4_tlb_flushD,		/* tlb_flushD		*/
197	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
198
199	/* Cache operations */
200
201	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
202	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
203
204	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
205	armv5_ec_dcache_wbinv_range,	/* dcache_wbinv_range	*/
206	armv5_ec_dcache_inv_range,	/* dcache_inv_range	*/
207	armv5_ec_dcache_wb_range,	/* dcache_wb_range	*/
208
209	armv4_idcache_inv_all,		/* idcache_inv_all	*/
210	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
211	armv5_ec_idcache_wbinv_range,	/* idcache_wbinv_range	*/
212
213	cpufunc_nullop,                 /* l2cache_wbinv_all    */
214	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
215      	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
216	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
217
218	/* Other functions */
219
220	cpufunc_nullop,			/* flush_prefetchbuf	*/
221	armv4_drain_writebuf,		/* drain_writebuf	*/
222	cpufunc_nullop,			/* flush_brnchtgt_C	*/
223	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
224
225	(void *)cpufunc_nullop,		/* sleep		*/
226
227	/* Soft functions */
228
229	cpufunc_null_fixup,		/* dataabt_fixup	*/
230	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
231
232	arm10_context_switch,		/* context_switch	*/
233
234	arm10_setup			/* cpu setup		*/
235
236};
237
238struct cpu_functions sheeva_cpufuncs = {
239	/* CPU functions */
240
241	cpufunc_id,			/* id			*/
242	cpufunc_nullop,			/* cpwait		*/
243
244	/* MMU functions */
245
246	cpufunc_control,		/* control		*/
247	cpufunc_domains,		/* Domain		*/
248	sheeva_setttb,			/* Setttb		*/
249	cpufunc_faultstatus,		/* Faultstatus		*/
250	cpufunc_faultaddress,		/* Faultaddress		*/
251
252	/* TLB functions */
253
254	armv4_tlb_flushID,		/* tlb_flushID		*/
255	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
256	armv4_tlb_flushI,		/* tlb_flushI		*/
257	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
258	armv4_tlb_flushD,		/* tlb_flushD		*/
259	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
260
261	/* Cache operations */
262
263	armv5_ec_icache_sync_all,	/* icache_sync_all	*/
264	armv5_ec_icache_sync_range,	/* icache_sync_range	*/
265
266	armv5_ec_dcache_wbinv_all,	/* dcache_wbinv_all	*/
267	sheeva_dcache_wbinv_range,	/* dcache_wbinv_range	*/
268	sheeva_dcache_inv_range,	/* dcache_inv_range	*/
269	sheeva_dcache_wb_range,		/* dcache_wb_range	*/
270
271	armv4_idcache_inv_all,		/* idcache_inv_all	*/
272	armv5_ec_idcache_wbinv_all,	/* idcache_wbinv_all	*/
273	sheeva_idcache_wbinv_range,	/* idcache_wbinv_all	*/
274
275	sheeva_l2cache_wbinv_all,	/* l2cache_wbinv_all    */
276	sheeva_l2cache_wbinv_range,	/* l2cache_wbinv_range  */
277	sheeva_l2cache_inv_range,	/* l2cache_inv_range    */
278	sheeva_l2cache_wb_range,	/* l2cache_wb_range     */
279
280	/* Other functions */
281
282	cpufunc_nullop,			/* flush_prefetchbuf	*/
283	armv4_drain_writebuf,		/* drain_writebuf	*/
284	cpufunc_nullop,			/* flush_brnchtgt_C	*/
285	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
286
287	sheeva_cpu_sleep,		/* sleep		*/
288
289	/* Soft functions */
290
291	cpufunc_null_fixup,		/* dataabt_fixup	*/
292	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
293
294	arm10_context_switch,		/* context_switch	*/
295
296	arm10_setup			/* cpu setup		*/
297};
298#endif /* CPU_ARM9E || CPU_ARM10 */
299
300#ifdef CPU_ARM10
301struct cpu_functions arm10_cpufuncs = {
302	/* CPU functions */
303
304	cpufunc_id,			/* id			*/
305	cpufunc_nullop,			/* cpwait		*/
306
307	/* MMU functions */
308
309	cpufunc_control,		/* control		*/
310	cpufunc_domains,		/* Domain		*/
311	arm10_setttb,			/* Setttb		*/
312	cpufunc_faultstatus,		/* Faultstatus		*/
313	cpufunc_faultaddress,		/* Faultaddress		*/
314
315	/* TLB functions */
316
317	armv4_tlb_flushID,		/* tlb_flushID		*/
318	arm10_tlb_flushID_SE,		/* tlb_flushID_SE	*/
319	armv4_tlb_flushI,		/* tlb_flushI		*/
320	arm10_tlb_flushI_SE,		/* tlb_flushI_SE	*/
321	armv4_tlb_flushD,		/* tlb_flushD		*/
322	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
323
324	/* Cache operations */
325
326	arm10_icache_sync_all,		/* icache_sync_all	*/
327	arm10_icache_sync_range,	/* icache_sync_range	*/
328
329	arm10_dcache_wbinv_all,		/* dcache_wbinv_all	*/
330	arm10_dcache_wbinv_range,	/* dcache_wbinv_range	*/
331	arm10_dcache_inv_range,		/* dcache_inv_range	*/
332	arm10_dcache_wb_range,		/* dcache_wb_range	*/
333
334	armv4_idcache_inv_all,		/* idcache_inv_all	*/
335	arm10_idcache_wbinv_all,	/* idcache_wbinv_all	*/
336	arm10_idcache_wbinv_range,	/* idcache_wbinv_range	*/
337	cpufunc_nullop,			/* l2cache_wbinv_all	*/
338	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
339	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
340	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
341
342	/* Other functions */
343
344	cpufunc_nullop,			/* flush_prefetchbuf	*/
345	armv4_drain_writebuf,		/* drain_writebuf	*/
346	cpufunc_nullop,			/* flush_brnchtgt_C	*/
347	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
348
349	(void *)cpufunc_nullop,		/* sleep		*/
350
351	/* Soft functions */
352
353	cpufunc_null_fixup,		/* dataabt_fixup	*/
354	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
355
356	arm10_context_switch,		/* context_switch	*/
357
358	arm10_setup			/* cpu setup		*/
359
360};
361#endif /* CPU_ARM10 */
362
363#ifdef CPU_MV_PJ4B
364struct cpu_functions pj4bv7_cpufuncs = {
365	/* CPU functions */
366
367	cpufunc_id,			/* id			*/
368	arm11_drain_writebuf,		/* cpwait		*/
369
370	/* MMU functions */
371
372	cpufunc_control,		/* control		*/
373	cpufunc_domains,		/* Domain		*/
374	pj4b_setttb,			/* Setttb		*/
375	cpufunc_faultstatus,		/* Faultstatus		*/
376	cpufunc_faultaddress,		/* Faultaddress		*/
377
378	/* TLB functions */
379
380	armv7_tlb_flushID,		/* tlb_flushID		*/
381	armv7_tlb_flushID_SE,		/* tlb_flushID_SE	*/
382	armv7_tlb_flushID,		/* tlb_flushI		*/
383	armv7_tlb_flushID_SE,		/* tlb_flushI_SE	*/
384	armv7_tlb_flushID,		/* tlb_flushD		*/
385	armv7_tlb_flushID_SE,		/* tlb_flushD_SE	*/
386
387	/* Cache operations */
388	armv7_idcache_wbinv_all,	/* icache_sync_all	*/
389	armv7_icache_sync_range,	/* icache_sync_range	*/
390
391	armv7_dcache_wbinv_all,		/* dcache_wbinv_all	*/
392	armv7_dcache_wbinv_range,	/* dcache_wbinv_range	*/
393	armv7_dcache_inv_range,		/* dcache_inv_range	*/
394	armv7_dcache_wb_range,		/* dcache_wb_range	*/
395
396	armv7_idcache_inv_all,		/* idcache_inv_all	*/
397	armv7_idcache_wbinv_all,	/* idcache_wbinv_all	*/
398	armv7_idcache_wbinv_range,	/* idcache_wbinv_all	*/
399
400	(void *)cpufunc_nullop,		/* l2cache_wbinv_all	*/
401	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
402	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
403	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
404
405	/* Other functions */
406
407	pj4b_drain_readbuf,		/* flush_prefetchbuf	*/
408	arm11_drain_writebuf,		/* drain_writebuf	*/
409	pj4b_flush_brnchtgt_all,	/* flush_brnchtgt_C	*/
410	pj4b_flush_brnchtgt_va,		/* flush_brnchtgt_E	*/
411
412	(void *)cpufunc_nullop,		/* sleep		*/
413
414	/* Soft functions */
415
416	cpufunc_null_fixup,		/* dataabt_fixup	*/
417	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
418
419	arm11_context_switch,		/* context_switch	*/
420
421	pj4bv7_setup			/* cpu setup		*/
422};
423#endif /* CPU_MV_PJ4B */
424
425#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
426  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
427  defined(CPU_XSCALE_80219)
428
429struct cpu_functions xscale_cpufuncs = {
430	/* CPU functions */
431
432	cpufunc_id,			/* id			*/
433	xscale_cpwait,			/* cpwait		*/
434
435	/* MMU functions */
436
437	xscale_control,			/* control		*/
438	cpufunc_domains,		/* domain		*/
439	xscale_setttb,			/* setttb		*/
440	cpufunc_faultstatus,		/* faultstatus		*/
441	cpufunc_faultaddress,		/* faultaddress		*/
442
443	/* TLB functions */
444
445	armv4_tlb_flushID,		/* tlb_flushID		*/
446	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
447	armv4_tlb_flushI,		/* tlb_flushI		*/
448	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
449	armv4_tlb_flushD,		/* tlb_flushD		*/
450	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
451
452	/* Cache operations */
453
454	xscale_cache_syncI,		/* icache_sync_all	*/
455	xscale_cache_syncI_rng,		/* icache_sync_range	*/
456
457	xscale_cache_purgeD,		/* dcache_wbinv_all	*/
458	xscale_cache_purgeD_rng,	/* dcache_wbinv_range	*/
459	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
460	xscale_cache_cleanD_rng,	/* dcache_wb_range	*/
461
462	xscale_cache_flushID,		/* idcache_inv_all	*/
463	xscale_cache_purgeID,		/* idcache_wbinv_all	*/
464	xscale_cache_purgeID_rng,	/* idcache_wbinv_range	*/
465	cpufunc_nullop,			/* l2cache_wbinv_all 	*/
466	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
467	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
468	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
469
470	/* Other functions */
471
472	cpufunc_nullop,			/* flush_prefetchbuf	*/
473	armv4_drain_writebuf,		/* drain_writebuf	*/
474	cpufunc_nullop,			/* flush_brnchtgt_C	*/
475	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
476
477	xscale_cpu_sleep,		/* sleep		*/
478
479	/* Soft functions */
480
481	cpufunc_null_fixup,		/* dataabt_fixup	*/
482	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
483
484	xscale_context_switch,		/* context_switch	*/
485
486	xscale_setup			/* cpu setup		*/
487};
488#endif
489/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
490   CPU_XSCALE_80219 */
491
492#ifdef CPU_XSCALE_81342
493struct cpu_functions xscalec3_cpufuncs = {
494	/* CPU functions */
495
496	cpufunc_id,			/* id			*/
497	xscale_cpwait,			/* cpwait		*/
498
499	/* MMU functions */
500
501	xscale_control,			/* control		*/
502	cpufunc_domains,		/* domain		*/
503	xscalec3_setttb,		/* setttb		*/
504	cpufunc_faultstatus,		/* faultstatus		*/
505	cpufunc_faultaddress,		/* faultaddress		*/
506
507	/* TLB functions */
508
509	armv4_tlb_flushID,		/* tlb_flushID		*/
510	xscale_tlb_flushID_SE,		/* tlb_flushID_SE	*/
511	armv4_tlb_flushI,		/* tlb_flushI		*/
512	(void *)armv4_tlb_flushI,	/* tlb_flushI_SE	*/
513	armv4_tlb_flushD,		/* tlb_flushD		*/
514	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
515
516	/* Cache operations */
517
518	xscalec3_cache_syncI,		/* icache_sync_all	*/
519	xscalec3_cache_syncI_rng,	/* icache_sync_range	*/
520
521	xscalec3_cache_purgeD,		/* dcache_wbinv_all	*/
522	xscalec3_cache_purgeD_rng,	/* dcache_wbinv_range	*/
523	xscale_cache_flushD_rng,	/* dcache_inv_range	*/
524	xscalec3_cache_cleanD_rng,	/* dcache_wb_range	*/
525
526	xscale_cache_flushID,		/* idcache_inv_all	*/
527	xscalec3_cache_purgeID,		/* idcache_wbinv_all	*/
528	xscalec3_cache_purgeID_rng,	/* idcache_wbinv_range	*/
529	xscalec3_l2cache_purge,		/* l2cache_wbinv_all	*/
530	xscalec3_l2cache_purge_rng,	/* l2cache_wbinv_range	*/
531	xscalec3_l2cache_flush_rng,	/* l2cache_inv_range	*/
532	xscalec3_l2cache_clean_rng,	/* l2cache_wb_range	*/
533
534	/* Other functions */
535
536	cpufunc_nullop,			/* flush_prefetchbuf	*/
537	armv4_drain_writebuf,		/* drain_writebuf	*/
538	cpufunc_nullop,			/* flush_brnchtgt_C	*/
539	(void *)cpufunc_nullop,		/* flush_brnchtgt_E	*/
540
541	xscale_cpu_sleep,		/* sleep		*/
542
543	/* Soft functions */
544
545	cpufunc_null_fixup,		/* dataabt_fixup	*/
546	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
547
548	xscalec3_context_switch,	/* context_switch	*/
549
550	xscale_setup			/* cpu setup		*/
551};
552#endif /* CPU_XSCALE_81342 */
553
554
555#if defined(CPU_FA526) || defined(CPU_FA626TE)
556struct cpu_functions fa526_cpufuncs = {
557	/* CPU functions */
558
559	cpufunc_id,			/* id			*/
560	cpufunc_nullop,			/* cpwait		*/
561
562	/* MMU functions */
563
564	cpufunc_control,		/* control		*/
565	cpufunc_domains,		/* domain		*/
566	fa526_setttb,			/* setttb		*/
567	cpufunc_faultstatus,		/* faultstatus		*/
568	cpufunc_faultaddress,		/* faultaddress		*/
569
570	/* TLB functions */
571
572	armv4_tlb_flushID,		/* tlb_flushID		*/
573	fa526_tlb_flushID_SE,		/* tlb_flushID_SE	*/
574	armv4_tlb_flushI,		/* tlb_flushI		*/
575	fa526_tlb_flushI_SE,		/* tlb_flushI_SE	*/
576	armv4_tlb_flushD,		/* tlb_flushD		*/
577	armv4_tlb_flushD_SE,		/* tlb_flushD_SE	*/
578
579	/* Cache operations */
580
581	fa526_icache_sync_all,		/* icache_sync_all	*/
582	fa526_icache_sync_range,	/* icache_sync_range	*/
583
584	fa526_dcache_wbinv_all,		/* dcache_wbinv_all	*/
585	fa526_dcache_wbinv_range,	/* dcache_wbinv_range	*/
586	fa526_dcache_inv_range,		/* dcache_inv_range	*/
587	fa526_dcache_wb_range,		/* dcache_wb_range	*/
588
589	armv4_idcache_inv_all,		/* idcache_inv_all	*/
590	fa526_idcache_wbinv_all,	/* idcache_wbinv_all	*/
591	fa526_idcache_wbinv_range,	/* idcache_wbinv_range	*/
592	cpufunc_nullop,			/* l2cache_wbinv_all	*/
593	(void *)cpufunc_nullop,		/* l2cache_wbinv_range	*/
594	(void *)cpufunc_nullop,		/* l2cache_inv_range	*/
595	(void *)cpufunc_nullop,		/* l2cache_wb_range	*/
596
597	/* Other functions */
598
599	fa526_flush_prefetchbuf,	/* flush_prefetchbuf	*/
600	armv4_drain_writebuf,		/* drain_writebuf	*/
601	cpufunc_nullop,			/* flush_brnchtgt_C	*/
602	fa526_flush_brnchtgt_E,		/* flush_brnchtgt_E	*/
603
604	fa526_cpu_sleep,		/* sleep		*/
605
606	/* Soft functions */
607
608	cpufunc_null_fixup,		/* dataabt_fixup	*/
609	cpufunc_null_fixup,		/* prefetchabt_fixup	*/
610
611	fa526_context_switch,		/* context_switch	*/
612
613	fa526_setup			/* cpu setup 		*/
614};
615#endif	/* CPU_FA526 || CPU_FA626TE */
616
617#if defined(CPU_ARM1136)
618struct cpu_functions arm1136_cpufuncs = {
619	/* CPU functions */
620
621	cpufunc_id,                     /* id                   */
622	cpufunc_nullop,                 /* cpwait               */
623
624	/* MMU functions */
625
626	cpufunc_control,                /* control              */
627	cpufunc_domains,                /* Domain               */
628	arm11x6_setttb,                 /* Setttb               */
629	cpufunc_faultstatus,            /* Faultstatus          */
630	cpufunc_faultaddress,           /* Faultaddress         */
631
632	/* TLB functions */
633
634	arm11_tlb_flushID,              /* tlb_flushID          */
635	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
636	arm11_tlb_flushI,               /* tlb_flushI           */
637	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
638	arm11_tlb_flushD,               /* tlb_flushD           */
639	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
640
641	/* Cache operations */
642
643	arm11x6_icache_sync_all,        /* icache_sync_all      */
644	arm11x6_icache_sync_range,      /* icache_sync_range    */
645
646	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
647	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
648	armv6_dcache_inv_range,         /* dcache_inv_range     */
649	armv6_dcache_wb_range,          /* dcache_wb_range      */
650
651	armv6_idcache_inv_all,		/* idcache_inv_all	*/
652	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
653	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
654
655	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
656	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
657	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
658	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
659
660	/* Other functions */
661
662	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
663	arm11_drain_writebuf,           /* drain_writebuf       */
664	cpufunc_nullop,                 /* flush_brnchtgt_C     */
665	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
666
667	arm11_sleep,                  	/* sleep                */
668
669	/* Soft functions */
670
671	cpufunc_null_fixup,             /* dataabt_fixup        */
672	cpufunc_null_fixup,             /* prefetchabt_fixup    */
673
674	arm11_context_switch,           /* context_switch       */
675
676	arm11x6_setup                   /* cpu setup            */
677};
678#endif /* CPU_ARM1136 */
679#if defined(CPU_ARM1176)
680struct cpu_functions arm1176_cpufuncs = {
681	/* CPU functions */
682
683	cpufunc_id,                     /* id                   */
684	cpufunc_nullop,                 /* cpwait               */
685
686	/* MMU functions */
687
688	cpufunc_control,                /* control              */
689	cpufunc_domains,                /* Domain               */
690	arm11x6_setttb,                 /* Setttb               */
691	cpufunc_faultstatus,            /* Faultstatus          */
692	cpufunc_faultaddress,           /* Faultaddress         */
693
694	/* TLB functions */
695
696	arm11_tlb_flushID,              /* tlb_flushID          */
697	arm11_tlb_flushID_SE,           /* tlb_flushID_SE       */
698	arm11_tlb_flushI,               /* tlb_flushI           */
699	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
700	arm11_tlb_flushD,               /* tlb_flushD           */
701	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
702
703	/* Cache operations */
704
705	arm11x6_icache_sync_all,        /* icache_sync_all      */
706	arm11x6_icache_sync_range,      /* icache_sync_range    */
707
708	arm11x6_dcache_wbinv_all,       /* dcache_wbinv_all     */
709	armv6_dcache_wbinv_range,       /* dcache_wbinv_range   */
710	armv6_dcache_inv_range,         /* dcache_inv_range     */
711	armv6_dcache_wb_range,          /* dcache_wb_range      */
712
713	armv6_idcache_inv_all,		/* idcache_inv_all	*/
714	arm11x6_idcache_wbinv_all,      /* idcache_wbinv_all    */
715	arm11x6_idcache_wbinv_range,    /* idcache_wbinv_range  */
716
717	(void *)cpufunc_nullop,         /* l2cache_wbinv_all    */
718	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
719	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
720	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
721
722	/* Other functions */
723
724	arm11x6_flush_prefetchbuf,      /* flush_prefetchbuf    */
725	arm11_drain_writebuf,           /* drain_writebuf       */
726	cpufunc_nullop,                 /* flush_brnchtgt_C     */
727	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
728
729	arm11x6_sleep,                  /* sleep                */
730
731	/* Soft functions */
732
733	cpufunc_null_fixup,             /* dataabt_fixup        */
734	cpufunc_null_fixup,             /* prefetchabt_fixup    */
735
736	arm11_context_switch,           /* context_switch       */
737
738	arm11x6_setup                   /* cpu setup            */
739};
740#endif /*CPU_ARM1176 */
741
742#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
743struct cpu_functions cortexa_cpufuncs = {
744	/* CPU functions */
745
746	cpufunc_id,                     /* id                   */
747	cpufunc_nullop,                 /* cpwait               */
748
749	/* MMU functions */
750
751	cpufunc_control,                /* control              */
752	cpufunc_domains,                /* Domain               */
753	armv7_setttb,                   /* Setttb               */
754	cpufunc_faultstatus,            /* Faultstatus          */
755	cpufunc_faultaddress,           /* Faultaddress         */
756
757	/* TLB functions */
758
759	armv7_tlb_flushID,              /* tlb_flushID          */
760	armv7_tlb_flushID_SE,           /* tlb_flushID_SE       */
761	arm11_tlb_flushI,               /* tlb_flushI           */
762	arm11_tlb_flushI_SE,            /* tlb_flushI_SE        */
763	arm11_tlb_flushD,               /* tlb_flushD           */
764	arm11_tlb_flushD_SE,            /* tlb_flushD_SE        */
765
766	/* Cache operations */
767
768	armv7_idcache_wbinv_all,         /* icache_sync_all      */
769	armv7_icache_sync_range,        /* icache_sync_range    */
770
771	armv7_dcache_wbinv_all,         /* dcache_wbinv_all     */
772	armv7_dcache_wbinv_range,       /* dcache_wbinv_range   */
773	armv7_dcache_inv_range,         /* dcache_inv_range     */
774	armv7_dcache_wb_range,          /* dcache_wb_range      */
775
776	armv7_idcache_inv_all,		/* idcache_inv_all	*/
777	armv7_idcache_wbinv_all,        /* idcache_wbinv_all    */
778	armv7_idcache_wbinv_range,      /* idcache_wbinv_range  */
779
780	/*
781	 * Note: For CPUs using the PL310 the L2 ops are filled in when the
782	 * L2 cache controller is actually enabled.
783	 */
784	cpufunc_nullop,                 /* l2cache_wbinv_all    */
785	(void *)cpufunc_nullop,         /* l2cache_wbinv_range  */
786	(void *)cpufunc_nullop,         /* l2cache_inv_range    */
787	(void *)cpufunc_nullop,         /* l2cache_wb_range     */
788
789	/* Other functions */
790
791	cpufunc_nullop,                 /* flush_prefetchbuf    */
792	armv7_drain_writebuf,           /* drain_writebuf       */
793	cpufunc_nullop,                 /* flush_brnchtgt_C     */
794	(void *)cpufunc_nullop,         /* flush_brnchtgt_E     */
795
796	armv7_sleep,                    /* sleep                */
797
798	/* Soft functions */
799
800	cpufunc_null_fixup,             /* dataabt_fixup        */
801	cpufunc_null_fixup,             /* prefetchabt_fixup    */
802
803	armv7_context_switch,           /* context_switch       */
804
805	cortexa_setup                     /* cpu setup            */
806};
807#endif /* CPU_CORTEXA */
808
809/*
810 * Global constants also used by locore.s
811 */
812
813struct cpu_functions cpufuncs;
814u_int cputype;
815u_int cpu_reset_needs_v4_MMU_disable;	/* flag used in locore.s */
816
817#if defined(CPU_ARM9) ||	\
818  defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM1136) ||	\
819  defined(CPU_ARM1176) || defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
820  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
821  defined(CPU_FA526) || defined(CPU_FA626TE) || defined(CPU_MV_PJ4B) ||			\
822  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
823  defined(CPU_CORTEXA) || defined(CPU_KRAIT)
824
825static void get_cachetype_cp15(void);
826
827/* Additional cache information local to this file.  Log2 of some of the
828   above numbers.  */
829static int	arm_dcache_l2_nsets;
830static int	arm_dcache_l2_assoc;
831static int	arm_dcache_l2_linesize;
832
833static void
834get_cachetype_cp15()
835{
836	u_int ctype, isize, dsize, cpuid;
837	u_int clevel, csize, i, sel;
838	u_int multiplier;
839	u_char type;
840
841	__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
842		: "=r" (ctype));
843
844	cpuid = cpufunc_id();
845	/*
846	 * ...and thus spake the ARM ARM:
847	 *
848	 * If an <opcode2> value corresponding to an unimplemented or
849	 * reserved ID register is encountered, the System Control
850	 * processor returns the value of the main ID register.
851	 */
852	if (ctype == cpuid)
853		goto out;
854
855	if (CPU_CT_FORMAT(ctype) == CPU_CT_ARMV7) {
856		__asm __volatile("mrc p15, 1, %0, c0, c0, 1"
857		    : "=r" (clevel));
858		arm_cache_level = clevel;
859		arm_cache_loc = CPU_CLIDR_LOC(arm_cache_level);
860		i = 0;
861		while ((type = (clevel & 0x7)) && i < 7) {
862			if (type == CACHE_DCACHE || type == CACHE_UNI_CACHE ||
863			    type == CACHE_SEP_CACHE) {
864				sel = i << 1;
865				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
866				    : : "r" (sel));
867				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
868				    : "=r" (csize));
869				arm_cache_type[sel] = csize;
870				arm_dcache_align = 1 <<
871				    (CPUV7_CT_xSIZE_LEN(csize) + 4);
872				arm_dcache_align_mask = arm_dcache_align - 1;
873			}
874			if (type == CACHE_ICACHE || type == CACHE_SEP_CACHE) {
875				sel = (i << 1) | 1;
876				__asm __volatile("mcr p15, 2, %0, c0, c0, 0"
877				    : : "r" (sel));
878				__asm __volatile("mrc p15, 1, %0, c0, c0, 0"
879				    : "=r" (csize));
880				arm_cache_type[sel] = csize;
881			}
882			i++;
883			clevel >>= 3;
884		}
885	} else {
886		if ((ctype & CPU_CT_S) == 0)
887			arm_pcache_unified = 1;
888
889		/*
890		 * If you want to know how this code works, go read the ARM ARM.
891		 */
892
893		arm_pcache_type = CPU_CT_CTYPE(ctype);
894
895		if (arm_pcache_unified == 0) {
896			isize = CPU_CT_ISIZE(ctype);
897			multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2;
898			arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3);
899			if (CPU_CT_xSIZE_ASSOC(isize) == 0) {
900				if (isize & CPU_CT_xSIZE_M)
901					arm_picache_line_size = 0; /* not present */
902				else
903					arm_picache_ways = 1;
904			} else {
905				arm_picache_ways = multiplier <<
906				    (CPU_CT_xSIZE_ASSOC(isize) - 1);
907			}
908			arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8);
909		}
910
911		dsize = CPU_CT_DSIZE(ctype);
912		multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2;
913		arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3);
914		if (CPU_CT_xSIZE_ASSOC(dsize) == 0) {
915			if (dsize & CPU_CT_xSIZE_M)
916				arm_pdcache_line_size = 0; /* not present */
917			else
918				arm_pdcache_ways = 1;
919		} else {
920			arm_pdcache_ways = multiplier <<
921			    (CPU_CT_xSIZE_ASSOC(dsize) - 1);
922		}
923		arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8);
924
925		arm_dcache_align = arm_pdcache_line_size;
926
927		arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2;
928		arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3;
929		arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) -
930		    CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize);
931
932	out:
933		arm_dcache_align_mask = arm_dcache_align - 1;
934	}
935}
936#endif /* ARM9 || XSCALE */
937
938/*
939 * Cannot panic here as we may not have a console yet ...
940 */
941
942int
943set_cpufuncs()
944{
945	cputype = cpufunc_id();
946	cputype &= CPU_ID_CPU_MASK;
947
948	/*
949	 * NOTE: cpu_do_powersave defaults to off.  If we encounter a
950	 * CPU type where we want to use it by default, then we set it.
951	 */
952
953#ifdef CPU_ARM9
954	if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD ||
955	     (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) &&
956	    (cputype & 0x0000f000) == 0x00009000) {
957		cpufuncs = arm9_cpufuncs;
958		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
959		get_cachetype_cp15();
960		arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
961		arm9_dcache_sets_max = (1U << (arm_dcache_l2_linesize +
962		    arm_dcache_l2_nsets)) - arm9_dcache_sets_inc;
963		arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
964		arm9_dcache_index_max = 0U - arm9_dcache_index_inc;
965#ifdef ARM9_CACHE_WRITE_THROUGH
966		pmap_pte_init_arm9();
967#else
968		pmap_pte_init_generic();
969#endif
970		goto out;
971	}
972#endif /* CPU_ARM9 */
973#if defined(CPU_ARM9E) || defined(CPU_ARM10)
974	if (cputype == CPU_ID_MV88FR131 || cputype == CPU_ID_MV88FR571_VD ||
975	    cputype == CPU_ID_MV88FR571_41) {
976		uint32_t sheeva_ctrl;
977
978		sheeva_ctrl = (MV_DC_STREAM_ENABLE | MV_BTB_DISABLE |
979		    MV_L2_ENABLE);
980		/*
981		 * Workaround for Marvell MV78100 CPU: Cache prefetch
982		 * mechanism may affect the cache coherency validity,
983		 * so it needs to be disabled.
984		 *
985		 * Refer to errata document MV-S501058-00C.pdf (p. 3.1
986		 * L2 Prefetching Mechanism) for details.
987		 */
988		if (cputype == CPU_ID_MV88FR571_VD ||
989		    cputype == CPU_ID_MV88FR571_41)
990			sheeva_ctrl |= MV_L2_PREFETCH_DISABLE;
991
992		sheeva_control_ext(0xffffffff & ~MV_WA_ENABLE, sheeva_ctrl);
993
994		cpufuncs = sheeva_cpufuncs;
995		get_cachetype_cp15();
996		pmap_pte_init_generic();
997		goto out;
998	} else if (cputype == CPU_ID_ARM926EJS || cputype == CPU_ID_ARM1026EJS) {
999		cpufuncs = armv5_ec_cpufuncs;
1000		get_cachetype_cp15();
1001		pmap_pte_init_generic();
1002		goto out;
1003	}
1004#endif /* CPU_ARM9E || CPU_ARM10 */
1005#ifdef CPU_ARM10
1006	if (/* cputype == CPU_ID_ARM1020T || */
1007	    cputype == CPU_ID_ARM1020E) {
1008		/*
1009		 * Select write-through cacheing (this isn't really an
1010		 * option on ARM1020T).
1011		 */
1012		cpufuncs = arm10_cpufuncs;
1013		cpu_reset_needs_v4_MMU_disable = 1;	/* V4 or higher */
1014		get_cachetype_cp15();
1015		arm10_dcache_sets_inc = 1U << arm_dcache_l2_linesize;
1016		arm10_dcache_sets_max =
1017		    (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) -
1018		    arm10_dcache_sets_inc;
1019		arm10_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc);
1020		arm10_dcache_index_max = 0U - arm10_dcache_index_inc;
1021		pmap_pte_init_generic();
1022		goto out;
1023	}
1024#endif /* CPU_ARM10 */
1025#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1026	if (cputype == CPU_ID_ARM1136JS
1027	    || cputype == CPU_ID_ARM1136JSR1
1028	    || cputype == CPU_ID_ARM1176JZS) {
1029#ifdef CPU_ARM1136
1030		if (cputype == CPU_ID_ARM1136JS
1031		    || cputype == CPU_ID_ARM1136JSR1)
1032			cpufuncs = arm1136_cpufuncs;
1033#endif
1034#ifdef CPU_ARM1176
1035		if (cputype == CPU_ID_ARM1176JZS)
1036			cpufuncs = arm1176_cpufuncs;
1037#endif
1038		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1039		get_cachetype_cp15();
1040
1041		pmap_pte_init_mmu_v6();
1042
1043		goto out;
1044	}
1045#endif /* CPU_ARM1136 || CPU_ARM1176 */
1046#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1047	if (cputype == CPU_ID_CORTEXA7 ||
1048	    cputype == CPU_ID_CORTEXA8R1 ||
1049	    cputype == CPU_ID_CORTEXA8R2 ||
1050	    cputype == CPU_ID_CORTEXA8R3 ||
1051	    cputype == CPU_ID_CORTEXA9R1 ||
1052	    cputype == CPU_ID_CORTEXA9R2 ||
1053	    cputype == CPU_ID_CORTEXA9R3 ||
1054	    cputype == CPU_ID_CORTEXA15 ||
1055	    cputype == CPU_ID_KRAIT ) {
1056		cpufuncs = cortexa_cpufuncs;
1057		cpu_reset_needs_v4_MMU_disable = 1;     /* V4 or higher */
1058		get_cachetype_cp15();
1059
1060		pmap_pte_init_mmu_v6();
1061		/* Use powersave on this CPU. */
1062		cpu_do_powersave = 1;
1063		goto out;
1064	}
1065#endif /* CPU_CORTEXA */
1066
1067#if defined(CPU_MV_PJ4B)
1068	if (cputype == CPU_ID_MV88SV581X_V7 ||
1069	    cputype == CPU_ID_MV88SV584X_V7 ||
1070	    cputype == CPU_ID_ARM_88SV581X_V7) {
1071		cpufuncs = pj4bv7_cpufuncs;
1072		get_cachetype_cp15();
1073		pmap_pte_init_mmu_v6();
1074		goto out;
1075	}
1076#endif /* CPU_MV_PJ4B */
1077
1078#if defined(CPU_FA526) || defined(CPU_FA626TE)
1079	if (cputype == CPU_ID_FA526 || cputype == CPU_ID_FA626TE) {
1080		cpufuncs = fa526_cpufuncs;
1081		cpu_reset_needs_v4_MMU_disable = 1;	/* SA needs it	*/
1082		get_cachetype_cp15();
1083		pmap_pte_init_generic();
1084
1085		/* Use powersave on this CPU. */
1086		cpu_do_powersave = 1;
1087
1088		goto out;
1089	}
1090#endif	/* CPU_FA526 || CPU_FA626TE */
1091
1092#ifdef CPU_XSCALE_80200
1093	if (cputype == CPU_ID_80200) {
1094		int rev = cpufunc_id() & CPU_ID_REVISION_MASK;
1095
1096		i80200_icu_init();
1097
1098#if defined(XSCALE_CCLKCFG)
1099		/*
1100		 * Crank CCLKCFG to maximum legal value.
1101		 */
1102		__asm __volatile ("mcr p14, 0, %0, c6, c0, 0"
1103			:
1104			: "r" (XSCALE_CCLKCFG));
1105#endif
1106
1107		/*
1108		 * XXX Disable ECC in the Bus Controller Unit; we
1109		 * don't really support it, yet.  Clear any pending
1110		 * error indications.
1111		 */
1112		__asm __volatile("mcr p13, 0, %0, c0, c1, 0"
1113			:
1114			: "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV));
1115
1116		cpufuncs = xscale_cpufuncs;
1117		/*
1118		 * i80200 errata: Step-A0 and A1 have a bug where
1119		 * D$ dirty bits are not cleared on "invalidate by
1120		 * address".
1121		 *
1122		 * Workaround: Clean cache line before invalidating.
1123		 */
1124		if (rev == 0 || rev == 1)
1125			cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng;
1126
1127		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1128		get_cachetype_cp15();
1129		pmap_pte_init_xscale();
1130		goto out;
1131	}
1132#endif /* CPU_XSCALE_80200 */
1133#if defined(CPU_XSCALE_80321) || defined(CPU_XSCALE_80219)
1134	if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 ||
1135	    cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 ||
1136	    cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) {
1137		cpufuncs = xscale_cpufuncs;
1138		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1139		get_cachetype_cp15();
1140		pmap_pte_init_xscale();
1141		goto out;
1142	}
1143#endif /* CPU_XSCALE_80321 */
1144
1145#if defined(CPU_XSCALE_81342)
1146	if (cputype == CPU_ID_81342) {
1147		cpufuncs = xscalec3_cpufuncs;
1148		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1149		get_cachetype_cp15();
1150		pmap_pte_init_xscale();
1151		goto out;
1152	}
1153#endif /* CPU_XSCALE_81342 */
1154#ifdef CPU_XSCALE_PXA2X0
1155	/* ignore core revision to test PXA2xx CPUs */
1156	if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 ||
1157	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X ||
1158	    (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) {
1159
1160		cpufuncs = xscale_cpufuncs;
1161		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1162		get_cachetype_cp15();
1163		pmap_pte_init_xscale();
1164
1165		/* Use powersave on this CPU. */
1166		cpu_do_powersave = 1;
1167
1168		goto out;
1169	}
1170#endif /* CPU_XSCALE_PXA2X0 */
1171#ifdef CPU_XSCALE_IXP425
1172	if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 ||
1173            cputype == CPU_ID_IXP425_266 || cputype == CPU_ID_IXP435) {
1174
1175		cpufuncs = xscale_cpufuncs;
1176		cpu_reset_needs_v4_MMU_disable = 1;	/* XScale needs it */
1177		get_cachetype_cp15();
1178		pmap_pte_init_xscale();
1179
1180		goto out;
1181	}
1182#endif /* CPU_XSCALE_IXP425 */
1183	/*
1184	 * Bzzzz. And the answer was ...
1185	 */
1186	panic("No support for this CPU type (%08x) in kernel", cputype);
1187	return(ARCHITECTURE_NOT_PRESENT);
1188out:
1189	uma_set_align(arm_dcache_align_mask);
1190	return (0);
1191}
1192
1193/*
1194 * Fixup routines for data and prefetch aborts.
1195 *
1196 * Several compile time symbols are used
1197 *
1198 * DEBUG_FAULT_CORRECTION - Print debugging information during the
1199 * correction of registers after a fault.
1200 */
1201
1202
1203/*
1204 * Null abort fixup routine.
1205 * For use when no fixup is required.
1206 */
1207int
1208cpufunc_null_fixup(arg)
1209	void *arg;
1210{
1211	return(ABORT_FIXUP_OK);
1212}
1213
1214/*
1215 * CPU Setup code
1216 */
1217
1218#if defined (CPU_ARM9) || \
1219  defined(CPU_ARM9E) || \
1220  defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) ||		\
1221  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) ||		\
1222  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) || \
1223  defined(CPU_ARM10) ||  defined(CPU_ARM1136) || defined(CPU_ARM1176) ||\
1224  defined(CPU_FA526) || defined(CPU_FA626TE)
1225
1226#define IGN	0
1227#define OR	1
1228#define BIC	2
1229
1230struct cpu_option {
1231	char	*co_name;
1232	int	co_falseop;
1233	int	co_trueop;
1234	int	co_value;
1235};
1236
1237static u_int parse_cpu_options(char *, struct cpu_option *, u_int);
1238
1239static u_int
1240parse_cpu_options(args, optlist, cpuctrl)
1241	char *args;
1242	struct cpu_option *optlist;
1243	u_int cpuctrl;
1244{
1245	int integer;
1246
1247	if (args == NULL)
1248		return(cpuctrl);
1249
1250	while (optlist->co_name) {
1251		if (get_bootconf_option(args, optlist->co_name,
1252		    BOOTOPT_TYPE_BOOLEAN, &integer)) {
1253			if (integer) {
1254				if (optlist->co_trueop == OR)
1255					cpuctrl |= optlist->co_value;
1256				else if (optlist->co_trueop == BIC)
1257					cpuctrl &= ~optlist->co_value;
1258			} else {
1259				if (optlist->co_falseop == OR)
1260					cpuctrl |= optlist->co_value;
1261				else if (optlist->co_falseop == BIC)
1262					cpuctrl &= ~optlist->co_value;
1263			}
1264		}
1265		++optlist;
1266	}
1267	return(cpuctrl);
1268}
1269#endif /* CPU_ARM9 || XSCALE*/
1270
1271#ifdef CPU_ARM9
1272struct cpu_option arm9_options[] = {
1273	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1274	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1275	{ "arm9.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1276	{ "arm9.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1277	{ "arm9.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1278	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1279	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1280	{ "arm9.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1281	{ NULL,			IGN, IGN, 0 }
1282};
1283
1284void
1285arm9_setup(args)
1286	char *args;
1287{
1288	int cpuctrl, cpuctrlmask;
1289
1290	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1291	    | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1292	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1293	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE |
1294	    CPU_CONTROL_ROUNDROBIN;
1295	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1296		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1297		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1298		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1299		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1300		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC
1301		 | CPU_CONTROL_ROUNDROBIN;
1302
1303#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1304	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1305#endif
1306
1307	cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl);
1308
1309#ifdef __ARMEB__
1310	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1311#endif
1312	if (vector_page == ARM_VECTORS_HIGH)
1313		cpuctrl |= CPU_CONTROL_VECRELOC;
1314
1315	/* Clear out the cache */
1316	cpu_idcache_wbinv_all();
1317
1318	/* Set the control register */
1319	cpu_control(cpuctrlmask, cpuctrl);
1320	ctrl = cpuctrl;
1321
1322}
1323#endif	/* CPU_ARM9 */
1324
1325#if defined(CPU_ARM9E) || defined(CPU_ARM10)
1326struct cpu_option arm10_options[] = {
1327	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1328	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1329	{ "arm10.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1330	{ "arm10.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1331	{ "arm10.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1332	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1333	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1334	{ "arm10.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1335	{ NULL,			IGN, IGN, 0 }
1336};
1337
1338void
1339arm10_setup(args)
1340	char *args;
1341{
1342	int cpuctrl, cpuctrlmask;
1343
1344	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1345	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1346	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE;
1347	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE
1348	    | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1349	    | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1350	    | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1351	    | CPU_CONTROL_BPRD_ENABLE
1352	    | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK;
1353
1354#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1355	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1356#endif
1357
1358	cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl);
1359
1360#ifdef __ARMEB__
1361	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1362#endif
1363
1364	/* Clear out the cache */
1365	cpu_idcache_wbinv_all();
1366
1367	/* Now really make sure they are clean.  */
1368	__asm __volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : );
1369
1370	if (vector_page == ARM_VECTORS_HIGH)
1371		cpuctrl |= CPU_CONTROL_VECRELOC;
1372
1373	/* Set the control register */
1374	ctrl = cpuctrl;
1375	cpu_control(0xffffffff, cpuctrl);
1376
1377	/* And again. */
1378	cpu_idcache_wbinv_all();
1379}
1380#endif	/* CPU_ARM9E || CPU_ARM10 */
1381
1382#if defined(CPU_ARM1136) || defined(CPU_ARM1176)
1383struct cpu_option arm11_options[] = {
1384	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1385	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1386	{ "arm11.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1387	{ "arm11.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1388	{ "arm11.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1389	{ NULL,			IGN, IGN, 0 }
1390};
1391
1392void
1393arm11x6_setup(char *args)
1394{
1395	int cpuctrl, cpuctrl_wax;
1396	uint32_t auxctrl, auxctrl_wax;
1397	uint32_t tmp, tmp2;
1398	uint32_t sbz=0;
1399	uint32_t cpuid;
1400
1401	cpuid = cpufunc_id();
1402
1403	cpuctrl =
1404		CPU_CONTROL_MMU_ENABLE  |
1405		CPU_CONTROL_DC_ENABLE   |
1406		CPU_CONTROL_WBUF_ENABLE |
1407		CPU_CONTROL_32BP_ENABLE |
1408		CPU_CONTROL_32BD_ENABLE |
1409		CPU_CONTROL_LABT_ENABLE |
1410		CPU_CONTROL_SYST_ENABLE |
1411		CPU_CONTROL_IC_ENABLE;
1412
1413	/*
1414	 * "write as existing" bits
1415	 * inverse of this is mask
1416	 */
1417	cpuctrl_wax =
1418		(3 << 30) | /* SBZ */
1419		(1 << 29) | /* FA */
1420		(1 << 28) | /* TR */
1421		(3 << 26) | /* SBZ */
1422		(3 << 19) | /* SBZ */
1423		(1 << 17);  /* SBZ */
1424
1425	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1426	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1427
1428	cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl);
1429
1430#ifdef __ARMEB__
1431	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1432#endif
1433
1434	if (vector_page == ARM_VECTORS_HIGH)
1435		cpuctrl |= CPU_CONTROL_VECRELOC;
1436
1437	auxctrl = 0;
1438	auxctrl_wax = ~0;
1439	/*
1440	 * This options enables the workaround for the 364296 ARM1136
1441	 * r0pX errata (possible cache data corruption with
1442	 * hit-under-miss enabled). It sets the undocumented bit 31 in
1443	 * the auxiliary control register and the FI bit in the control
1444	 * register, thus disabling hit-under-miss without putting the
1445	 * processor into full low interrupt latency mode. ARM11MPCore
1446	 * is not affected.
1447	 */
1448	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */
1449		cpuctrl |= CPU_CONTROL_FI_ENABLE;
1450		auxctrl = ARM1136_AUXCTL_PFI;
1451		auxctrl_wax = ~ARM1136_AUXCTL_PFI;
1452	}
1453
1454	/*
1455	 * Enable an errata workaround
1456	 */
1457	if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */
1458		auxctrl = ARM1176_AUXCTL_PHD;
1459		auxctrl_wax = ~ARM1176_AUXCTL_PHD;
1460	}
1461
1462	/* Clear out the cache */
1463	cpu_idcache_wbinv_all();
1464
1465	/* Now really make sure they are clean.  */
1466	__asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz));
1467
1468	/* Allow detection code to find the VFP if it's fitted.  */
1469	__asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff));
1470
1471	/* Set the control register */
1472	ctrl = cpuctrl;
1473	cpu_control(~cpuctrl_wax, cpuctrl);
1474
1475	__asm volatile ("mrc	p15, 0, %0, c1, c0, 1\n\t"
1476			"and	%1, %0, %2\n\t"
1477			"orr	%1, %1, %3\n\t"
1478			"teq	%0, %1\n\t"
1479			"mcrne	p15, 0, %1, c1, c0, 1\n\t"
1480			: "=r"(tmp), "=r"(tmp2) :
1481			  "r"(auxctrl_wax), "r"(auxctrl));
1482
1483	/* And again. */
1484	cpu_idcache_wbinv_all();
1485}
1486#endif  /* CPU_ARM1136 || CPU_ARM1176 */
1487
1488#ifdef CPU_MV_PJ4B
1489void
1490pj4bv7_setup(args)
1491	char *args;
1492{
1493	int cpuctrl;
1494
1495	pj4b_config();
1496
1497	cpuctrl = CPU_CONTROL_MMU_ENABLE;
1498#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1499	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1500#endif
1501	cpuctrl |= CPU_CONTROL_DC_ENABLE;
1502	cpuctrl |= (0xf << 3);
1503	cpuctrl |= CPU_CONTROL_BPRD_ENABLE;
1504	cpuctrl |= CPU_CONTROL_IC_ENABLE;
1505	if (vector_page == ARM_VECTORS_HIGH)
1506		cpuctrl |= CPU_CONTROL_VECRELOC;
1507	cpuctrl |= (0x5 << 16) | (1 < 22);
1508	cpuctrl |= CPU_CONTROL_V6_EXTPAGE;
1509
1510	/* Clear out the cache */
1511	cpu_idcache_wbinv_all();
1512
1513	/* Set the control register */
1514	ctrl = cpuctrl;
1515	cpu_control(0xFFFFFFFF, cpuctrl);
1516
1517	/* And again. */
1518	cpu_idcache_wbinv_all();
1519}
1520#endif /* CPU_MV_PJ4B */
1521
1522#if defined(CPU_CORTEXA) || defined(CPU_KRAIT)
1523
1524void
1525cortexa_setup(char *args)
1526{
1527	int cpuctrl, cpuctrlmask;
1528
1529	cpuctrlmask = CPU_CONTROL_MMU_ENABLE |     /* MMU enable         [0] */
1530	    CPU_CONTROL_AFLT_ENABLE |    /* Alignment fault    [1] */
1531	    CPU_CONTROL_DC_ENABLE |      /* DCache enable      [2] */
1532	    CPU_CONTROL_BPRD_ENABLE |    /* Branch prediction [11] */
1533	    CPU_CONTROL_IC_ENABLE |      /* ICache enable     [12] */
1534	    CPU_CONTROL_VECRELOC;        /* Vector relocation [13] */
1535
1536	cpuctrl = CPU_CONTROL_MMU_ENABLE |
1537	    CPU_CONTROL_IC_ENABLE |
1538	    CPU_CONTROL_DC_ENABLE |
1539	    CPU_CONTROL_BPRD_ENABLE;
1540
1541#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1542	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1543#endif
1544
1545	/* Switch to big endian */
1546#ifdef __ARMEB__
1547	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1548#endif
1549
1550	/* Check if the vector page is at the high address (0xffff0000) */
1551	if (vector_page == ARM_VECTORS_HIGH)
1552		cpuctrl |= CPU_CONTROL_VECRELOC;
1553
1554	/* Clear out the cache */
1555	cpu_idcache_wbinv_all();
1556
1557	/* Set the control register */
1558	ctrl = cpuctrl;
1559	cpu_control(cpuctrlmask, cpuctrl);
1560
1561	/* And again. */
1562	cpu_idcache_wbinv_all();
1563#ifdef SMP
1564	armv7_auxctrl((1 << 6) | (1 << 0), (1 << 6) | (1 << 0)); /* Enable SMP + TLB broadcasting  */
1565#endif
1566}
1567#endif  /* CPU_CORTEXA */
1568
1569#if defined(CPU_FA526) || defined(CPU_FA626TE)
1570struct cpu_option fa526_options[] = {
1571#ifdef COMPAT_12
1572	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE |
1573					   CPU_CONTROL_DC_ENABLE) },
1574	{ "nowritebuf",		IGN, BIC, CPU_CONTROL_WBUF_ENABLE },
1575#endif	/* COMPAT_12 */
1576	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE |
1577					   CPU_CONTROL_DC_ENABLE) },
1578	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE |
1579					   CPU_CONTROL_DC_ENABLE) },
1580	{ "cpu.writebuf",	BIC, OR,  CPU_CONTROL_WBUF_ENABLE },
1581	{ "cpu.nowritebuf",	OR,  BIC, CPU_CONTROL_WBUF_ENABLE },
1582	{ NULL,			IGN, IGN, 0 }
1583};
1584
1585void
1586fa526_setup(char *args)
1587{
1588	int cpuctrl, cpuctrlmask;
1589
1590	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1591		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1592		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1593		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1594		| CPU_CONTROL_BPRD_ENABLE;
1595	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1596		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1597		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1598		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1599		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1600		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1601		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC;
1602
1603#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1604	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1605#endif
1606
1607	cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl);
1608
1609#ifdef __ARMEB__
1610	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1611#endif
1612
1613	if (vector_page == ARM_VECTORS_HIGH)
1614		cpuctrl |= CPU_CONTROL_VECRELOC;
1615
1616	/* Clear out the cache */
1617	cpu_idcache_wbinv_all();
1618
1619	/* Set the control register */
1620	ctrl = cpuctrl;
1621	cpu_control(0xffffffff, cpuctrl);
1622}
1623#endif	/* CPU_FA526 || CPU_FA626TE */
1624
1625#if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \
1626  defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_IXP425) || \
1627  defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
1628struct cpu_option xscale_options[] = {
1629#ifdef COMPAT_12
1630	{ "branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1631	{ "nocache",		IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1632#endif	/* COMPAT_12 */
1633	{ "cpu.branchpredict", 	BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1634	{ "cpu.cache",		BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1635	{ "cpu.nocache",	OR,  BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1636	{ "xscale.branchpredict", BIC, OR,  CPU_CONTROL_BPRD_ENABLE },
1637	{ "xscale.cache",	BIC, OR,  (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) },
1638	{ "xscale.icache",	BIC, OR,  CPU_CONTROL_IC_ENABLE },
1639	{ "xscale.dcache",	BIC, OR,  CPU_CONTROL_DC_ENABLE },
1640	{ NULL,			IGN, IGN, 0 }
1641};
1642
1643void
1644xscale_setup(args)
1645	char *args;
1646{
1647	uint32_t auxctl;
1648	int cpuctrl, cpuctrlmask;
1649
1650	/*
1651	 * The XScale Write Buffer is always enabled.  Our option
1652	 * is to enable/disable coalescing.  Note that bits 6:3
1653	 * must always be enabled.
1654	 */
1655
1656	cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1657		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1658		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1659		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE
1660		 | CPU_CONTROL_BPRD_ENABLE;
1661	cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE
1662		 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE
1663		 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE
1664		 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE
1665		 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE
1666		 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE
1667		 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \
1668		 CPU_CONTROL_L2_ENABLE;
1669
1670#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS
1671	cpuctrl |= CPU_CONTROL_AFLT_ENABLE;
1672#endif
1673
1674	cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl);
1675
1676#ifdef __ARMEB__
1677	cpuctrl |= CPU_CONTROL_BEND_ENABLE;
1678#endif
1679
1680	if (vector_page == ARM_VECTORS_HIGH)
1681		cpuctrl |= CPU_CONTROL_VECRELOC;
1682#ifdef CPU_XSCALE_CORE3
1683	cpuctrl |= CPU_CONTROL_L2_ENABLE;
1684#endif
1685
1686	/* Clear out the cache */
1687	cpu_idcache_wbinv_all();
1688
1689	/*
1690	 * Set the control register.  Note that bits 6:3 must always
1691	 * be set to 1.
1692	 */
1693	ctrl = cpuctrl;
1694/*	cpu_control(cpuctrlmask, cpuctrl);*/
1695	cpu_control(0xffffffff, cpuctrl);
1696
1697	/* Make sure write coalescing is turned on */
1698	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
1699		: "=r" (auxctl));
1700#ifdef XSCALE_NO_COALESCE_WRITES
1701	auxctl |= XSCALE_AUXCTL_K;
1702#else
1703	auxctl &= ~XSCALE_AUXCTL_K;
1704#endif
1705#ifdef CPU_XSCALE_CORE3
1706	auxctl |= XSCALE_AUXCTL_LLR;
1707	auxctl |= XSCALE_AUXCTL_MD_MASK;
1708#endif
1709	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
1710		: : "r" (auxctl));
1711}
1712#endif	/* CPU_XSCALE_80200 || CPU_XSCALE_80321 || CPU_XSCALE_PXA2X0 || CPU_XSCALE_IXP425
1713	   CPU_XSCALE_80219 */
1714