identcpu.c revision 305530
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Semihalf
7 * under sponsorship of the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/identcpu.c 305530 2016-09-07 12:28:30Z andrew $");
34
35#include <sys/param.h>
36#include <sys/pcpu.h>
37#include <sys/sysctl.h>
38#include <sys/systm.h>
39
40#include <machine/atomic.h>
41#include <machine/cpu.h>
42#include <machine/cpufunc.h>
43
44static int ident_lock;
45
46char machine[] = "arm64";
47
48SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
49    "Machine class");
50
51/*
52 * Per-CPU affinity as provided in MPIDR_EL1
53 * Indexed by CPU number in logical order selected by the system.
54 * Relevant fields can be extracted using CPU_AFFn macros,
55 * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
56 *
57 * Fields used by us:
58 * Aff1 - Cluster number
59 * Aff0 - CPU number in Aff1 cluster
60 */
61uint64_t __cpu_affinity[MAXCPU];
62static u_int cpu_aff_levels;
63
64struct cpu_desc {
65	u_int		cpu_impl;
66	u_int		cpu_part_num;
67	u_int		cpu_variant;
68	u_int		cpu_revision;
69	const char	*cpu_impl_name;
70	const char	*cpu_part_name;
71
72	uint64_t	mpidr;
73	uint64_t	id_aa64afr0;
74	uint64_t	id_aa64afr1;
75	uint64_t	id_aa64dfr0;
76	uint64_t	id_aa64dfr1;
77	uint64_t	id_aa64isar0;
78	uint64_t	id_aa64isar1;
79	uint64_t	id_aa64mmfr0;
80	uint64_t	id_aa64mmfr1;
81	uint64_t	id_aa64pfr0;
82	uint64_t	id_aa64pfr1;
83};
84
85struct cpu_desc cpu_desc[MAXCPU];
86static u_int cpu_print_regs;
87#define	PRINT_ID_AA64_AFR0	0x00000001
88#define	PRINT_ID_AA64_AFR1	0x00000002
89#define	PRINT_ID_AA64_DFR0	0x00000004
90#define	PRINT_ID_AA64_DFR1	0x00000008
91#define	PRINT_ID_AA64_ISAR0	0x00000010
92#define	PRINT_ID_AA64_ISAR1	0x00000020
93#define	PRINT_ID_AA64_MMFR0	0x00000040
94#define	PRINT_ID_AA64_MMFR1	0x00000080
95#define	PRINT_ID_AA64_PFR0	0x00000100
96#define	PRINT_ID_AA64_PFR1	0x00000200
97
98struct cpu_parts {
99	u_int		part_id;
100	const char	*part_name;
101};
102#define	CPU_PART_NONE	{ 0, "Unknown Processor" }
103
104struct cpu_implementers {
105	u_int			impl_id;
106	const char		*impl_name;
107	/*
108	 * Part number is implementation defined
109	 * so each vendor will have its own set of values and names.
110	 */
111	const struct cpu_parts	*cpu_parts;
112};
113#define	CPU_IMPLEMENTER_NONE	{ 0, "Unknown Implementer", cpu_parts_none }
114
115/*
116 * Per-implementer table of (PartNum, CPU Name) pairs.
117 */
118/* ARM Ltd. */
119static const struct cpu_parts cpu_parts_arm[] = {
120	{ CPU_PART_FOUNDATION, "Foundation-Model" },
121	{ CPU_PART_CORTEX_A53, "Cortex-A53" },
122	{ CPU_PART_CORTEX_A57, "Cortex-A57" },
123	CPU_PART_NONE,
124};
125/* Cavium */
126static const struct cpu_parts cpu_parts_cavium[] = {
127	{ CPU_PART_THUNDER, "Thunder" },
128	CPU_PART_NONE,
129};
130
131/* Unknown */
132static const struct cpu_parts cpu_parts_none[] = {
133	CPU_PART_NONE,
134};
135
136/*
137 * Implementers table.
138 */
139const struct cpu_implementers cpu_implementers[] = {
140	{ CPU_IMPL_ARM,		"ARM",		cpu_parts_arm },
141	{ CPU_IMPL_BROADCOM,	"Broadcom",	cpu_parts_none },
142	{ CPU_IMPL_CAVIUM,	"Cavium",	cpu_parts_cavium },
143	{ CPU_IMPL_DEC,		"DEC",		cpu_parts_none },
144	{ CPU_IMPL_INFINEON,	"IFX",		cpu_parts_none },
145	{ CPU_IMPL_FREESCALE,	"Freescale",	cpu_parts_none },
146	{ CPU_IMPL_NVIDIA,	"NVIDIA",	cpu_parts_none },
147	{ CPU_IMPL_APM,		"APM",		cpu_parts_none },
148	{ CPU_IMPL_QUALCOMM,	"Qualcomm",	cpu_parts_none },
149	{ CPU_IMPL_MARVELL,	"Marvell",	cpu_parts_none },
150	{ CPU_IMPL_INTEL,	"Intel",	cpu_parts_none },
151	CPU_IMPLEMENTER_NONE,
152};
153
154void
155print_cpu_features(u_int cpu)
156{
157	int printed;
158
159	printf("CPU%3d: %s %s r%dp%d", cpu, cpu_desc[cpu].cpu_impl_name,
160	    cpu_desc[cpu].cpu_part_name, cpu_desc[cpu].cpu_variant,
161	    cpu_desc[cpu].cpu_revision);
162
163	printf(" affinity:");
164	switch(cpu_aff_levels) {
165	default:
166	case 4:
167		printf(" %2d", CPU_AFF3(cpu_desc[cpu].mpidr));
168		/* FALLTHROUGH */
169	case 3:
170		printf(" %2d", CPU_AFF2(cpu_desc[cpu].mpidr));
171		/* FALLTHROUGH */
172	case 2:
173		printf(" %2d", CPU_AFF1(cpu_desc[cpu].mpidr));
174		/* FALLTHROUGH */
175	case 1:
176	case 0: /* On UP this will be zero */
177		printf(" %2d", CPU_AFF0(cpu_desc[cpu].mpidr));
178		break;
179	}
180	printf("\n");
181
182	if (cpu != 0 && cpu_print_regs == 0)
183		return;
184
185#define SEP_STR	((printed++) == 0) ? "" : ","
186
187	/* AArch64 Instruction Set Attribute Register 0 */
188	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR0) != 0) {
189		printed = 0;
190		printf(" Instruction Set Attributes 0 = <");
191
192		switch (ID_AA64ISAR0_RDM(cpu_desc[cpu].id_aa64isar0)) {
193		case ID_AA64ISAR0_RDM_NONE:
194			break;
195		case ID_AA64ISAR0_RDM_IMPL:
196			printf("%sRDM", SEP_STR);
197			break;
198		default:
199			printf("%sUnknown RDM", SEP_STR);
200		}
201
202		switch (ID_AA64ISAR0_ATOMIC(cpu_desc[cpu].id_aa64isar0)) {
203		case ID_AA64ISAR0_ATOMIC_NONE:
204			break;
205		case ID_AA64ISAR0_ATOMIC_IMPL:
206			printf("%sAtomic", SEP_STR);
207			break;
208		default:
209			printf("%sUnknown Atomic", SEP_STR);
210		}
211
212		switch (ID_AA64ISAR0_AES(cpu_desc[cpu].id_aa64isar0)) {
213		case ID_AA64ISAR0_AES_NONE:
214			break;
215		case ID_AA64ISAR0_AES_BASE:
216			printf("%sAES", SEP_STR);
217			break;
218		case ID_AA64ISAR0_AES_PMULL:
219			printf("%sAES+PMULL", SEP_STR);
220			break;
221		default:
222			printf("%sUnknown AES", SEP_STR);
223			break;
224		}
225
226		switch (ID_AA64ISAR0_SHA1(cpu_desc[cpu].id_aa64isar0)) {
227		case ID_AA64ISAR0_SHA1_NONE:
228			break;
229		case ID_AA64ISAR0_SHA1_BASE:
230			printf("%sSHA1", SEP_STR);
231			break;
232		default:
233			printf("%sUnknown SHA1", SEP_STR);
234			break;
235		}
236
237		switch (ID_AA64ISAR0_SHA2(cpu_desc[cpu].id_aa64isar0)) {
238		case ID_AA64ISAR0_SHA2_NONE:
239			break;
240		case ID_AA64ISAR0_SHA2_BASE:
241			printf("%sSHA2", SEP_STR);
242			break;
243		default:
244			printf("%sUnknown SHA2", SEP_STR);
245			break;
246		}
247
248		switch (ID_AA64ISAR0_CRC32(cpu_desc[cpu].id_aa64isar0)) {
249		case ID_AA64ISAR0_CRC32_NONE:
250			break;
251		case ID_AA64ISAR0_CRC32_BASE:
252			printf("%sCRC32", SEP_STR);
253			break;
254		default:
255			printf("%sUnknown CRC32", SEP_STR);
256			break;
257		}
258
259		if ((cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK) != 0)
260			printf("%s%#lx", SEP_STR,
261			    cpu_desc[cpu].id_aa64isar0 & ~ID_AA64ISAR0_MASK);
262
263		printf(">\n");
264	}
265
266	/* AArch64 Instruction Set Attribute Register 1 */
267	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_ISAR1) != 0) {
268		printf(" Instruction Set Attributes 1 = <%#lx>\n",
269		    cpu_desc[cpu].id_aa64isar1);
270	}
271
272	/* AArch64 Processor Feature Register 0 */
273	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR0) != 0) {
274		printed = 0;
275		printf("         Processor Features 0 = <");
276		switch (ID_AA64PFR0_GIC(cpu_desc[cpu].id_aa64pfr0)) {
277		case ID_AA64PFR0_GIC_CPUIF_NONE:
278			break;
279		case ID_AA64PFR0_GIC_CPUIF_EN:
280			printf("%sGIC", SEP_STR);
281			break;
282		default:
283			printf("%sUnknown GIC interface", SEP_STR);
284			break;
285		}
286
287		switch (ID_AA64PFR0_ADV_SIMD(cpu_desc[cpu].id_aa64pfr0)) {
288		case ID_AA64PFR0_ADV_SIMD_NONE:
289			break;
290		case ID_AA64PFR0_ADV_SIMD_IMPL:
291			printf("%sAdvSIMD", SEP_STR);
292			break;
293		default:
294			printf("%sUnknown AdvSIMD", SEP_STR);
295			break;
296		}
297
298		switch (ID_AA64PFR0_FP(cpu_desc[cpu].id_aa64pfr0)) {
299		case ID_AA64PFR0_FP_NONE:
300			break;
301		case ID_AA64PFR0_FP_IMPL:
302			printf("%sFloat", SEP_STR);
303			break;
304		default:
305			printf("%sUnknown Float", SEP_STR);
306			break;
307		}
308
309		switch (ID_AA64PFR0_EL3(cpu_desc[cpu].id_aa64pfr0)) {
310		case ID_AA64PFR0_EL3_NONE:
311			printf("%sNo EL3", SEP_STR);
312			break;
313		case ID_AA64PFR0_EL3_64:
314			printf("%sEL3", SEP_STR);
315			break;
316		case ID_AA64PFR0_EL3_64_32:
317			printf("%sEL3 32", SEP_STR);
318			break;
319		default:
320			printf("%sUnknown EL3", SEP_STR);
321			break;
322		}
323
324		switch (ID_AA64PFR0_EL2(cpu_desc[cpu].id_aa64pfr0)) {
325		case ID_AA64PFR0_EL2_NONE:
326			printf("%sNo EL2", SEP_STR);
327			break;
328		case ID_AA64PFR0_EL2_64:
329			printf("%sEL2", SEP_STR);
330			break;
331		case ID_AA64PFR0_EL2_64_32:
332			printf("%sEL2 32", SEP_STR);
333			break;
334		default:
335			printf("%sUnknown EL2", SEP_STR);
336			break;
337		}
338
339		switch (ID_AA64PFR0_EL1(cpu_desc[cpu].id_aa64pfr0)) {
340		case ID_AA64PFR0_EL1_64:
341			printf("%sEL1", SEP_STR);
342			break;
343		case ID_AA64PFR0_EL1_64_32:
344			printf("%sEL1 32", SEP_STR);
345			break;
346		default:
347			printf("%sUnknown EL1", SEP_STR);
348			break;
349		}
350
351		switch (ID_AA64PFR0_EL0(cpu_desc[cpu].id_aa64pfr0)) {
352		case ID_AA64PFR0_EL0_64:
353			printf("%sEL0", SEP_STR);
354			break;
355		case ID_AA64PFR0_EL0_64_32:
356			printf("%sEL0 32", SEP_STR);
357			break;
358		default:
359			printf("%sUnknown EL0", SEP_STR);
360			break;
361		}
362
363		if ((cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK) != 0)
364			printf("%s%#lx", SEP_STR,
365			    cpu_desc[cpu].id_aa64pfr0 & ~ID_AA64PFR0_MASK);
366
367		printf(">\n");
368	}
369
370	/* AArch64 Processor Feature Register 1 */
371	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_PFR1) != 0) {
372		printf("         Processor Features 1 = <%#lx>\n",
373		    cpu_desc[cpu].id_aa64pfr1);
374	}
375
376	/* AArch64 Memory Model Feature Register 0 */
377	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR0) != 0) {
378		printed = 0;
379		printf("      Memory Model Features 0 = <");
380		switch (ID_AA64MMFR0_TGRAN4(cpu_desc[cpu].id_aa64mmfr0)) {
381		case ID_AA64MMFR0_TGRAN4_NONE:
382			break;
383		case ID_AA64MMFR0_TGRAN4_IMPL:
384			printf("%s4k Granule", SEP_STR);
385			break;
386		default:
387			printf("%sUnknown 4k Granule", SEP_STR);
388			break;
389		}
390
391		switch (ID_AA64MMFR0_TGRAN16(cpu_desc[cpu].id_aa64mmfr0)) {
392		case ID_AA64MMFR0_TGRAN16_NONE:
393			break;
394		case ID_AA64MMFR0_TGRAN16_IMPL:
395			printf("%s16k Granule", SEP_STR);
396			break;
397		default:
398			printf("%sUnknown 16k Granule", SEP_STR);
399			break;
400		}
401
402		switch (ID_AA64MMFR0_TGRAN64(cpu_desc[cpu].id_aa64mmfr0)) {
403		case ID_AA64MMFR0_TGRAN64_NONE:
404			break;
405		case ID_AA64MMFR0_TGRAN64_IMPL:
406			printf("%s64k Granule", SEP_STR);
407			break;
408		default:
409			printf("%sUnknown 64k Granule", SEP_STR);
410			break;
411		}
412
413		switch (ID_AA64MMFR0_BIGEND(cpu_desc[cpu].id_aa64mmfr0)) {
414		case ID_AA64MMFR0_BIGEND_FIXED:
415			break;
416		case ID_AA64MMFR0_BIGEND_MIXED:
417			printf("%sMixedEndian", SEP_STR);
418			break;
419		default:
420			printf("%sUnknown Endian switching", SEP_STR);
421			break;
422		}
423
424		switch (ID_AA64MMFR0_BIGEND_EL0(cpu_desc[cpu].id_aa64mmfr0)) {
425		case ID_AA64MMFR0_BIGEND_EL0_FIXED:
426			break;
427		case ID_AA64MMFR0_BIGEND_EL0_MIXED:
428			printf("%sEL0 MixEndian", SEP_STR);
429			break;
430		default:
431			printf("%sUnknown EL0 Endian switching", SEP_STR);
432			break;
433		}
434
435		switch (ID_AA64MMFR0_S_NS_MEM(cpu_desc[cpu].id_aa64mmfr0)) {
436		case ID_AA64MMFR0_S_NS_MEM_NONE:
437			break;
438		case ID_AA64MMFR0_S_NS_MEM_DISTINCT:
439			printf("%sS/NS Mem", SEP_STR);
440			break;
441		default:
442			printf("%sUnknown S/NS Mem", SEP_STR);
443			break;
444		}
445
446		switch (ID_AA64MMFR0_ASID_BITS(cpu_desc[cpu].id_aa64mmfr0)) {
447		case ID_AA64MMFR0_ASID_BITS_8:
448			printf("%s8bit ASID", SEP_STR);
449			break;
450		case ID_AA64MMFR0_ASID_BITS_16:
451			printf("%s16bit ASID", SEP_STR);
452			break;
453		default:
454			printf("%sUnknown ASID", SEP_STR);
455			break;
456		}
457
458		switch (ID_AA64MMFR0_PA_RANGE(cpu_desc[cpu].id_aa64mmfr0)) {
459		case ID_AA64MMFR0_PA_RANGE_4G:
460			printf("%s4GB PA", SEP_STR);
461			break;
462		case ID_AA64MMFR0_PA_RANGE_64G:
463			printf("%s64GB PA", SEP_STR);
464			break;
465		case ID_AA64MMFR0_PA_RANGE_1T:
466			printf("%s1TB PA", SEP_STR);
467			break;
468		case ID_AA64MMFR0_PA_RANGE_4T:
469			printf("%s4TB PA", SEP_STR);
470			break;
471		case ID_AA64MMFR0_PA_RANGE_16T:
472			printf("%s16TB PA", SEP_STR);
473			break;
474		case ID_AA64MMFR0_PA_RANGE_256T:
475			printf("%s256TB PA", SEP_STR);
476			break;
477		default:
478			printf("%sUnknown PA Range", SEP_STR);
479			break;
480		}
481
482		if ((cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK) != 0)
483			printf("%s%#lx", SEP_STR,
484			    cpu_desc[cpu].id_aa64mmfr0 & ~ID_AA64MMFR0_MASK);
485		printf(">\n");
486	}
487
488	/* AArch64 Memory Model Feature Register 1 */
489	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_MMFR1) != 0) {
490		printed = 0;
491		printf("      Memory Model Features 1 = <");
492
493		switch (ID_AA64MMFR1_PAN(cpu_desc[cpu].id_aa64mmfr1)) {
494		case ID_AA64MMFR1_PAN_NONE:
495			break;
496		case ID_AA64MMFR1_PAN_IMPL:
497			printf("%sPAN", SEP_STR);
498			break;
499		default:
500			printf("%sUnknown PAN", SEP_STR);
501			break;
502		}
503
504		switch (ID_AA64MMFR1_LO(cpu_desc[cpu].id_aa64mmfr1)) {
505		case ID_AA64MMFR1_LO_NONE:
506			break;
507		case ID_AA64MMFR1_LO_IMPL:
508			printf("%sLO", SEP_STR);
509			break;
510		default:
511			printf("%sUnknown LO", SEP_STR);
512			break;
513		}
514
515		switch (ID_AA64MMFR1_HPDS(cpu_desc[cpu].id_aa64mmfr1)) {
516		case ID_AA64MMFR1_HPDS_NONE:
517			break;
518		case ID_AA64MMFR1_HPDS_IMPL:
519			printf("%sHPDS", SEP_STR);
520			break;
521		default:
522			printf("%sUnknown HPDS", SEP_STR);
523			break;
524		}
525
526		switch (ID_AA64MMFR1_VH(cpu_desc[cpu].id_aa64mmfr1)) {
527		case ID_AA64MMFR1_VH_NONE:
528			break;
529		case ID_AA64MMFR1_VH_IMPL:
530			printf("%sVHE", SEP_STR);
531			break;
532		default:
533			printf("%sUnknown VHE", SEP_STR);
534			break;
535		}
536
537		switch (ID_AA64MMFR1_VMIDBITS(cpu_desc[cpu].id_aa64mmfr1)) {
538		case ID_AA64MMFR1_VMIDBITS_8:
539			break;
540		case ID_AA64MMFR1_VMIDBITS_16:
541			printf("%s16 VMID bits", SEP_STR);
542			break;
543		default:
544			printf("%sUnknown VMID bits", SEP_STR);
545			break;
546		}
547
548		switch (ID_AA64MMFR1_HAFDBS(cpu_desc[cpu].id_aa64mmfr1)) {
549		case ID_AA64MMFR1_HAFDBS_NONE:
550			break;
551		case ID_AA64MMFR1_HAFDBS_AF:
552			printf("%sAF", SEP_STR);
553			break;
554		case ID_AA64MMFR1_HAFDBS_AF_DBS:
555			printf("%sAF+DBS", SEP_STR);
556			break;
557		default:
558			printf("%sUnknown Hardware update AF/DBS", SEP_STR);
559			break;
560		}
561
562		if ((cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK) != 0)
563			printf("%s%#lx", SEP_STR,
564			    cpu_desc[cpu].id_aa64mmfr1 & ~ID_AA64MMFR1_MASK);
565		printf(">\n");
566	}
567
568	/* AArch64 Debug Feature Register 0 */
569	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR0) != 0) {
570		printed = 0;
571		printf("             Debug Features 0 = <");
572		printf("%s%lu CTX Breakpoints", SEP_STR,
573		    ID_AA64DFR0_CTX_CMPS(cpu_desc[cpu].id_aa64dfr0));
574
575		printf("%s%lu Watchpoints", SEP_STR,
576		    ID_AA64DFR0_WRPS(cpu_desc[cpu].id_aa64dfr0));
577
578		printf("%s%lu Breakpoints", SEP_STR,
579		    ID_AA64DFR0_BRPS(cpu_desc[cpu].id_aa64dfr0));
580
581		switch (ID_AA64DFR0_PMU_VER(cpu_desc[cpu].id_aa64dfr0)) {
582		case ID_AA64DFR0_PMU_VER_NONE:
583			break;
584		case ID_AA64DFR0_PMU_VER_3:
585			printf("%sPMUv3", SEP_STR);
586			break;
587		case ID_AA64DFR0_PMU_VER_3_1:
588			printf("%sPMUv3+16 bit evtCount", SEP_STR);
589			break;
590		case ID_AA64DFR0_PMU_VER_IMPL:
591			printf("%sImplementation defined PMU", SEP_STR);
592			break;
593		default:
594			printf("%sUnknown PMU", SEP_STR);
595			break;
596		}
597
598		switch (ID_AA64DFR0_TRACE_VER(cpu_desc[cpu].id_aa64dfr0)) {
599		case ID_AA64DFR0_TRACE_VER_NONE:
600			break;
601		case ID_AA64DFR0_TRACE_VER_IMPL:
602			printf("%sTrace", SEP_STR);
603			break;
604		default:
605			printf("%sUnknown Trace", SEP_STR);
606			break;
607		}
608
609		switch (ID_AA64DFR0_DEBUG_VER(cpu_desc[cpu].id_aa64dfr0)) {
610		case ID_AA64DFR0_DEBUG_VER_8:
611			printf("%sDebug v8", SEP_STR);
612			break;
613		case ID_AA64DFR0_DEBUG_VER_8_VHE:
614			printf("%sDebug v8+VHE", SEP_STR);
615			break;
616		default:
617			printf("%sUnknown Debug", SEP_STR);
618			break;
619		}
620
621		if (cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK)
622			printf("%s%#lx", SEP_STR,
623			    cpu_desc[cpu].id_aa64dfr0 & ~ID_AA64DFR0_MASK);
624		printf(">\n");
625	}
626
627	/* AArch64 Memory Model Feature Register 1 */
628	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_DFR1) != 0) {
629		printf("             Debug Features 1 = <%#lx>\n",
630		    cpu_desc[cpu].id_aa64dfr1);
631	}
632
633	/* AArch64 Auxiliary Feature Register 0 */
634	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR0) != 0) {
635		printf("         Auxiliary Features 0 = <%#lx>\n",
636		    cpu_desc[cpu].id_aa64afr0);
637	}
638
639	/* AArch64 Auxiliary Feature Register 1 */
640	if (cpu == 0 || (cpu_print_regs & PRINT_ID_AA64_AFR1) != 0) {
641		printf("         Auxiliary Features 1 = <%#lx>\n",
642		    cpu_desc[cpu].id_aa64afr1);
643	}
644
645#undef SEP_STR
646}
647
648void
649identify_cpu(void)
650{
651	u_int midr;
652	u_int impl_id;
653	u_int part_id;
654	u_int cpu;
655	size_t i;
656	const struct cpu_parts *cpu_partsp = NULL;
657
658	cpu = PCPU_GET(cpuid);
659	midr = get_midr();
660
661	/*
662	 * Store midr to pcpu to allow fast reading
663	 * from EL0, EL1 and assembly code.
664	 */
665	PCPU_SET(midr, midr);
666
667	impl_id = CPU_IMPL(midr);
668	for (i = 0; i < nitems(cpu_implementers); i++) {
669		if (impl_id == cpu_implementers[i].impl_id ||
670		    cpu_implementers[i].impl_id == 0) {
671			cpu_desc[cpu].cpu_impl = impl_id;
672			cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
673			cpu_partsp = cpu_implementers[i].cpu_parts;
674			break;
675		}
676	}
677
678	part_id = CPU_PART(midr);
679	for (i = 0; &cpu_partsp[i] != NULL; i++) {
680		if (part_id == cpu_partsp[i].part_id ||
681		    cpu_partsp[i].part_id == 0) {
682			cpu_desc[cpu].cpu_part_num = part_id;
683			cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
684			break;
685		}
686	}
687
688	cpu_desc[cpu].cpu_revision = CPU_REV(midr);
689	cpu_desc[cpu].cpu_variant = CPU_VAR(midr);
690
691	/* Save affinity for current CPU */
692	cpu_desc[cpu].mpidr = get_mpidr();
693	CPU_AFFINITY(cpu) = cpu_desc[cpu].mpidr & CPU_AFF_MASK;
694
695	cpu_desc[cpu].id_aa64dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
696	cpu_desc[cpu].id_aa64dfr1 = READ_SPECIALREG(id_aa64dfr1_el1);
697	cpu_desc[cpu].id_aa64isar0 = READ_SPECIALREG(id_aa64isar0_el1);
698	cpu_desc[cpu].id_aa64isar1 = READ_SPECIALREG(id_aa64isar1_el1);
699	cpu_desc[cpu].id_aa64mmfr0 = READ_SPECIALREG(id_aa64mmfr0_el1);
700	cpu_desc[cpu].id_aa64mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
701	cpu_desc[cpu].id_aa64pfr0 = READ_SPECIALREG(id_aa64pfr0_el1);
702	cpu_desc[cpu].id_aa64pfr1 = READ_SPECIALREG(id_aa64pfr1_el1);
703
704	if (cpu != 0) {
705		/*
706		 * This code must run on one cpu at a time, but we are
707		 * not scheduling on the current core so implement a
708		 * simple spinlock.
709		 */
710		while (atomic_cmpset_acq_int(&ident_lock, 0, 1) == 0)
711			__asm __volatile("wfe" ::: "memory");
712
713		switch (cpu_aff_levels) {
714		case 0:
715			if (CPU_AFF0(cpu_desc[cpu].mpidr) !=
716			    CPU_AFF0(cpu_desc[0].mpidr))
717				cpu_aff_levels = 1;
718			/* FALLTHROUGH */
719		case 1:
720			if (CPU_AFF1(cpu_desc[cpu].mpidr) !=
721			    CPU_AFF1(cpu_desc[0].mpidr))
722				cpu_aff_levels = 2;
723			/* FALLTHROUGH */
724		case 2:
725			if (CPU_AFF2(cpu_desc[cpu].mpidr) !=
726			    CPU_AFF2(cpu_desc[0].mpidr))
727				cpu_aff_levels = 3;
728			/* FALLTHROUGH */
729		case 3:
730			if (CPU_AFF3(cpu_desc[cpu].mpidr) !=
731			    CPU_AFF3(cpu_desc[0].mpidr))
732				cpu_aff_levels = 4;
733			break;
734		}
735
736		if (cpu_desc[cpu].id_aa64afr0 != cpu_desc[0].id_aa64afr0)
737			cpu_print_regs |= PRINT_ID_AA64_AFR0;
738		if (cpu_desc[cpu].id_aa64afr1 != cpu_desc[0].id_aa64afr1)
739			cpu_print_regs |= PRINT_ID_AA64_AFR1;
740
741		if (cpu_desc[cpu].id_aa64dfr0 != cpu_desc[0].id_aa64dfr0)
742			cpu_print_regs |= PRINT_ID_AA64_DFR0;
743		if (cpu_desc[cpu].id_aa64dfr1 != cpu_desc[0].id_aa64dfr1)
744			cpu_print_regs |= PRINT_ID_AA64_DFR1;
745
746		if (cpu_desc[cpu].id_aa64isar0 != cpu_desc[0].id_aa64isar0)
747			cpu_print_regs |= PRINT_ID_AA64_ISAR0;
748		if (cpu_desc[cpu].id_aa64isar1 != cpu_desc[0].id_aa64isar1)
749			cpu_print_regs |= PRINT_ID_AA64_ISAR1;
750
751		if (cpu_desc[cpu].id_aa64mmfr0 != cpu_desc[0].id_aa64mmfr0)
752			cpu_print_regs |= PRINT_ID_AA64_MMFR0;
753		if (cpu_desc[cpu].id_aa64mmfr1 != cpu_desc[0].id_aa64mmfr1)
754			cpu_print_regs |= PRINT_ID_AA64_MMFR1;
755
756		if (cpu_desc[cpu].id_aa64pfr0 != cpu_desc[0].id_aa64pfr0)
757			cpu_print_regs |= PRINT_ID_AA64_PFR0;
758		if (cpu_desc[cpu].id_aa64pfr1 != cpu_desc[0].id_aa64pfr1)
759			cpu_print_regs |= PRINT_ID_AA64_PFR1;
760
761		/* Wake up the other CPUs */
762		atomic_store_rel_int(&ident_lock, 0);
763		__asm __volatile("sev" ::: "memory");
764	}
765}
766