identcpu.c revision 1.134
1/*	$OpenBSD: identcpu.c,v 1.134 2023/07/21 04:04:51 guenther Exp $	*/
2/*	$NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $	*/
3
4/*
5 * Copyright (c) 2003 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Frank van der Linden for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *      This product includes software developed for the NetBSD Project by
21 *      Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 *    or promote products derived from this software without specific prior
24 *    written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/atomic.h>
42#include <sys/proc.h>
43#include <sys/sysctl.h>
44
45#include "vmm.h"
46#include "pvbus.h"
47
48#include <machine/cpu.h>
49#include <machine/cpufunc.h>
50
51#if NPVBUS > 0
52#include <dev/pv/pvvar.h>
53#endif
54
55void	replacesmap(void);
56void	replacemeltdown(void);
57uint64_t cpu_freq(struct cpu_info *);
58void	tsc_identify(struct cpu_info *);
59void	tsc_timecounter_init(struct cpu_info *, uint64_t);
60#if NVMM > 0
61void	cpu_check_vmm_cap(struct cpu_info *);
62#endif /* NVMM > 0 */
63
64/* sysctl wants this. */
65char cpu_model[48];
66int cpuspeed;
67
68int amd64_has_xcrypt;
69#ifdef CRYPTO
70int amd64_has_pclmul;
71int amd64_has_aesni;
72#endif
73int has_rdrand;
74int has_rdseed;
75
76const struct {
77	u_int32_t	bit;
78	char		str[12];
79} cpu_cpuid_features[] = {
80	{ CPUID_FPU,	"FPU" },
81	{ CPUID_VME,	"VME" },
82	{ CPUID_DE,	"DE" },
83	{ CPUID_PSE,	"PSE" },
84	{ CPUID_TSC,	"TSC" },
85	{ CPUID_MSR,	"MSR" },
86	{ CPUID_PAE,	"PAE" },
87	{ CPUID_MCE,	"MCE" },
88	{ CPUID_CX8,	"CX8" },
89	{ CPUID_APIC,	"APIC" },
90	{ CPUID_SEP,	"SEP" },
91	{ CPUID_MTRR,	"MTRR" },
92	{ CPUID_PGE,	"PGE" },
93	{ CPUID_MCA,	"MCA" },
94	{ CPUID_CMOV,	"CMOV" },
95	{ CPUID_PAT,	"PAT" },
96	{ CPUID_PSE36,	"PSE36" },
97	{ CPUID_PSN,	"PSN" },
98	{ CPUID_CFLUSH,	"CFLUSH" },
99	{ CPUID_DS,	"DS" },
100	{ CPUID_ACPI,	"ACPI" },
101	{ CPUID_MMX,	"MMX" },
102	{ CPUID_FXSR,	"FXSR" },
103	{ CPUID_SSE,	"SSE" },
104	{ CPUID_SSE2,	"SSE2" },
105	{ CPUID_SS,	"SS" },
106	{ CPUID_HTT,	"HTT" },
107	{ CPUID_TM,	"TM" },
108	{ CPUID_PBE,	"PBE" }
109}, cpu_ecpuid_features[] = {
110	{ CPUID_MPC,		"MPC" },
111	{ CPUID_NXE,		"NXE" },
112	{ CPUID_MMXX,		"MMXX" },
113	{ CPUID_FFXSR,		"FFXSR" },
114	{ CPUID_PAGE1GB,	"PAGE1GB" },
115	{ CPUID_RDTSCP,		"RDTSCP" },
116	{ CPUID_LONG,		"LONG" },
117	{ CPUID_3DNOW2,		"3DNOW2" },
118	{ CPUID_3DNOW,		"3DNOW" }
119}, cpu_cpuid_ecxfeatures[] = {
120	{ CPUIDECX_SSE3,	"SSE3" },
121	{ CPUIDECX_PCLMUL,	"PCLMUL" },
122	{ CPUIDECX_DTES64,	"DTES64" },
123	{ CPUIDECX_MWAIT,	"MWAIT" },
124	{ CPUIDECX_DSCPL,	"DS-CPL" },
125	{ CPUIDECX_VMX,		"VMX" },
126	{ CPUIDECX_SMX,		"SMX" },
127	{ CPUIDECX_EST,		"EST" },
128	{ CPUIDECX_TM2,		"TM2" },
129	{ CPUIDECX_SSSE3,	"SSSE3" },
130	{ CPUIDECX_CNXTID,	"CNXT-ID" },
131	{ CPUIDECX_SDBG,	"SDBG" },
132	{ CPUIDECX_FMA3,	"FMA3" },
133	{ CPUIDECX_CX16,	"CX16" },
134	{ CPUIDECX_XTPR,	"xTPR" },
135	{ CPUIDECX_PDCM,	"PDCM" },
136	{ CPUIDECX_PCID,	"PCID" },
137	{ CPUIDECX_DCA,		"DCA" },
138	{ CPUIDECX_SSE41,	"SSE4.1" },
139	{ CPUIDECX_SSE42,	"SSE4.2" },
140	{ CPUIDECX_X2APIC,	"x2APIC" },
141	{ CPUIDECX_MOVBE,	"MOVBE" },
142	{ CPUIDECX_POPCNT,	"POPCNT" },
143	{ CPUIDECX_DEADLINE,	"DEADLINE" },
144	{ CPUIDECX_AES,		"AES" },
145	{ CPUIDECX_XSAVE,	"XSAVE" },
146	{ CPUIDECX_OSXSAVE,	"OSXSAVE" },
147	{ CPUIDECX_AVX,		"AVX" },
148	{ CPUIDECX_F16C,	"F16C" },
149	{ CPUIDECX_RDRAND,	"RDRAND" },
150	{ CPUIDECX_HV,		"HV" },
151}, cpu_ecpuid_ecxfeatures[] = {
152	{ CPUIDECX_LAHF,	"LAHF" },
153	{ CPUIDECX_CMPLEG,	"CMPLEG" },
154	{ CPUIDECX_SVM,		"SVM" },
155	{ CPUIDECX_EAPICSP,	"EAPICSP"},
156	{ CPUIDECX_AMCR8,	"AMCR8"},
157	{ CPUIDECX_ABM,		"ABM" },
158	{ CPUIDECX_SSE4A,	"SSE4A" },
159	{ CPUIDECX_MASSE,	"MASSE" },
160	{ CPUIDECX_3DNOWP,	"3DNOWP" },
161	{ CPUIDECX_OSVW,	"OSVW" },
162	{ CPUIDECX_IBS,		"IBS" },
163	{ CPUIDECX_XOP,		"XOP" },
164	{ CPUIDECX_SKINIT,	"SKINIT" },
165	{ CPUIDECX_LWP,		"WDT" },
166	{ CPUIDECX_FMA4,	"FMA4" },
167	{ CPUIDECX_TCE,		"TCE" },
168	{ CPUIDECX_NODEID,	"NODEID" },
169	{ CPUIDECX_TBM,		"TBM" },
170	{ CPUIDECX_TOPEXT,	"TOPEXT" },
171	{ CPUIDECX_CPCTR,	"CPCTR" },
172	{ CPUIDECX_DBKP,	"DBKP" },
173	{ CPUIDECX_PERFTSC,	"PERFTSC" },
174	{ CPUIDECX_PCTRL3,	"PCTRL3" },
175	{ CPUIDECX_MWAITX,	"MWAITX" },
176}, cpu_seff0_ebxfeatures[] = {
177	{ SEFF0EBX_FSGSBASE,	"FSGSBASE" },
178	{ SEFF0EBX_TSC_ADJUST,	"TSC_ADJUST" },
179	{ SEFF0EBX_SGX,		"SGX" },
180	{ SEFF0EBX_BMI1,	"BMI1" },
181	{ SEFF0EBX_HLE,		"HLE" },
182	{ SEFF0EBX_AVX2,	"AVX2" },
183	{ SEFF0EBX_SMEP,	"SMEP" },
184	{ SEFF0EBX_BMI2,	"BMI2" },
185	{ SEFF0EBX_ERMS,	"ERMS" },
186	{ SEFF0EBX_INVPCID,	"INVPCID" },
187	{ SEFF0EBX_RTM,		"RTM" },
188	{ SEFF0EBX_PQM,		"PQM" },
189	{ SEFF0EBX_MPX,		"MPX" },
190	{ SEFF0EBX_AVX512F,	"AVX512F" },
191	{ SEFF0EBX_AVX512DQ,	"AVX512DQ" },
192	{ SEFF0EBX_RDSEED,	"RDSEED" },
193	{ SEFF0EBX_ADX,		"ADX" },
194	{ SEFF0EBX_SMAP,	"SMAP" },
195	{ SEFF0EBX_AVX512IFMA,	"AVX512IFMA" },
196	{ SEFF0EBX_PCOMMIT,	"PCOMMIT" },
197	{ SEFF0EBX_CLFLUSHOPT,	"CLFLUSHOPT" },
198	{ SEFF0EBX_CLWB,	"CLWB" },
199	{ SEFF0EBX_PT,		"PT" },
200	{ SEFF0EBX_AVX512PF,	"AVX512PF" },
201	{ SEFF0EBX_AVX512ER,	"AVX512ER" },
202	{ SEFF0EBX_AVX512CD,	"AVX512CD" },
203	{ SEFF0EBX_SHA,		"SHA" },
204	{ SEFF0EBX_AVX512BW,	"AVX512BW" },
205	{ SEFF0EBX_AVX512VL,	"AVX512VL" },
206}, cpu_seff0_ecxfeatures[] = {
207	{ SEFF0ECX_PREFETCHWT1,	"PREFETCHWT1" },
208	{ SEFF0ECX_AVX512VBMI,	"AVX512VBMI" },
209	{ SEFF0ECX_UMIP,	"UMIP" },
210	{ SEFF0ECX_PKU,		"PKU" },
211	{ SEFF0ECX_WAITPKG,	"WAITPKG" },
212	{ SEFF0ECX_PKS,		"PKS" },
213}, cpu_seff0_edxfeatures[] = {
214	{ SEFF0EDX_AVX512_4FNNIW, "AVX512FNNIW" },
215	{ SEFF0EDX_AVX512_4FMAPS, "AVX512FMAPS" },
216	{ SEFF0EDX_SRBDS_CTRL,	"SRBDS_CTRL" },
217	{ SEFF0EDX_MD_CLEAR,	"MD_CLEAR" },
218	{ SEFF0EDX_TSXFA,	"TSXFA" },
219	{ SEFF0EDX_IBT,		"IBT" },
220	{ SEFF0EDX_IBRS,	"IBRS,IBPB" },
221	{ SEFF0EDX_STIBP,	"STIBP" },
222	{ SEFF0EDX_L1DF,	"L1DF" },
223	 /* SEFF0EDX_ARCH_CAP (not printed) */
224	{ SEFF0EDX_SSBD,	"SSBD" },
225}, cpu_tpm_eaxfeatures[] = {
226	{ TPM_SENSOR,		"SENSOR" },
227	{ TPM_ARAT,		"ARAT" },
228}, cpu_cpuid_perf_eax[] = {
229	{ CPUIDEAX_VERID,	"PERF" },
230}, cpu_cpuid_apmi_edx[] = {
231	{ CPUIDEDX_ITSC,	"ITSC" },
232}, cpu_amdspec_ebxfeatures[] = {
233	{ CPUIDEBX_IBPB,	"IBPB" },
234	{ CPUIDEBX_IBRS,	"IBRS" },
235	{ CPUIDEBX_STIBP,	"STIBP" },
236	{ CPUIDEBX_SSBD,	"SSBD" },
237	{ CPUIDEBX_VIRT_SSBD,	"VIRTSSBD" },
238	{ CPUIDEBX_SSBD_NOTREQ,	"SSBDNR" },
239}, cpu_xsave_extfeatures[] = {
240	{ XSAVE_XSAVEOPT,	"XSAVEOPT" },
241	{ XSAVE_XSAVEC,		"XSAVEC" },
242	{ XSAVE_XGETBV1,	"XGETBV1" },
243	{ XSAVE_XSAVES,		"XSAVES" },
244	{ XSAVE_XFD,		"XFD" },
245};
246
247int
248cpu_amd64speed(int *freq)
249{
250	*freq = cpuspeed;
251	return (0);
252}
253
254#ifndef SMALL_KERNEL
255void	intelcore_update_sensor(void *);
256void	cpu_hz_update_sensor(void *);
257
258/*
259 * Temperature read on the CPU is relative to the maximum
260 * temperature supported by the CPU, Tj(Max).
261 * Refer to:
262 * 64-ia-32-architectures-software-developer-vol-3c-part-3-manual.pdf
263 * Section 35 and
264 * http://www.intel.com/content/dam/www/public/us/en/documents/
265 * white-papers/cpu-monitoring-dts-peci-paper.pdf
266 *
267 * The temperature on Intel CPUs can be between 70 and 105 degC, since
268 * Westmere we can read the TJmax from the die. For older CPUs we have
269 * to guess or use undocumented MSRs. Then we subtract the temperature
270 * portion of thermal status from max to get current temperature.
271 */
272void
273intelcore_update_sensor(void *args)
274{
275	struct cpu_info *ci = (struct cpu_info *) args;
276	u_int64_t msr;
277	int max = 100;
278
279	/* Only some Core family chips have MSR_TEMPERATURE_TARGET. */
280	if (ci->ci_model == 0x0e &&
281	    (rdmsr(MSR_TEMPERATURE_TARGET_UNDOCUMENTED) &
282	     MSR_TEMPERATURE_TARGET_LOW_BIT_UNDOCUMENTED))
283		max = 85;
284
285	/*
286	 * Newer CPUs can tell you what their max temperature is.
287	 * See: '64-ia-32-architectures-software-developer-
288	 * vol-3c-part-3-manual.pdf'
289	 */
290	if (ci->ci_model > 0x17 && ci->ci_model != 0x1c &&
291	    ci->ci_model != 0x26 && ci->ci_model != 0x27 &&
292	    ci->ci_model != 0x35 && ci->ci_model != 0x36)
293		max = MSR_TEMPERATURE_TARGET_TJMAX(
294		    rdmsr(MSR_TEMPERATURE_TARGET));
295
296	msr = rdmsr(MSR_THERM_STATUS);
297	if (msr & MSR_THERM_STATUS_VALID_BIT) {
298		ci->ci_sensor.value = max - MSR_THERM_STATUS_TEMP(msr);
299		/* micro degrees */
300		ci->ci_sensor.value *= 1000000;
301		/* kelvin */
302		ci->ci_sensor.value += 273150000;
303		ci->ci_sensor.flags &= ~SENSOR_FINVALID;
304	} else {
305		ci->ci_sensor.value = 0;
306		ci->ci_sensor.flags |= SENSOR_FINVALID;
307	}
308}
309
310/*
311 * Effective CPU frequency measurement
312 *
313 * Refer to:
314 *   64-ia-32-architectures-software-developer-vol-3b-part-2-manual.pdf
315 *   Section 14.2 and
316 *   OSRR for AMD Family 17h processors Section 2.1.2
317 * Round to 50Mhz which is the accuracy of this measurement.
318 */
319#define FREQ_50MHZ	(50ULL * 1000000ULL * 1000000ULL)
320void
321cpu_hz_update_sensor(void *args)
322{
323	extern uint64_t	 tsc_frequency;
324	struct cpu_info	*ci = args;
325	uint64_t	 mperf, aperf, mdelta, adelta, val;
326	unsigned long	 s;
327
328	sched_peg_curproc(ci);
329
330	s = intr_disable();
331	mperf = rdmsr(MSR_MPERF);
332	aperf = rdmsr(MSR_APERF);
333	intr_restore(s);
334
335	mdelta = mperf - ci->ci_hz_mperf;
336	adelta = aperf - ci->ci_hz_aperf;
337	ci->ci_hz_mperf = mperf;
338	ci->ci_hz_aperf = aperf;
339
340	if (mdelta > 0) {
341		val = (adelta * 1000000) / mdelta * tsc_frequency;
342		val = ((val + FREQ_50MHZ / 2) / FREQ_50MHZ) * FREQ_50MHZ;
343		ci->ci_hz_sensor.value = val;
344	}
345
346	atomic_clearbits_int(&curproc->p_flag, P_CPUPEG);
347}
348#endif
349
350void (*setperf_setup)(struct cpu_info *);
351
352void via_nano_setup(struct cpu_info *ci);
353
354void cpu_topology(struct cpu_info *ci);
355
356void
357via_nano_setup(struct cpu_info *ci)
358{
359	u_int32_t regs[4], val;
360	u_int64_t msreg;
361	int model = (ci->ci_signature >> 4) & 15;
362
363	if (model >= 9) {
364		CPUID(0xC0000000, regs[0], regs[1], regs[2], regs[3]);
365		val = regs[0];
366		if (val >= 0xC0000001) {
367			CPUID(0xC0000001, regs[0], regs[1], regs[2], regs[3]);
368			val = regs[3];
369		} else
370			val = 0;
371
372		if (val & (C3_CPUID_HAS_RNG | C3_CPUID_HAS_ACE))
373			printf("%s:", ci->ci_dev->dv_xname);
374
375		/* Enable RNG if present and disabled */
376		if (val & C3_CPUID_HAS_RNG) {
377			extern int viac3_rnd_present;
378
379			if (!(val & C3_CPUID_DO_RNG)) {
380				msreg = rdmsr(0x110B);
381				msreg |= 0x40;
382				wrmsr(0x110B, msreg);
383			}
384			viac3_rnd_present = 1;
385			printf(" RNG");
386		}
387
388		/* Enable AES engine if present and disabled */
389		if (val & C3_CPUID_HAS_ACE) {
390#ifdef CRYPTO
391			if (!(val & C3_CPUID_DO_ACE)) {
392				msreg = rdmsr(0x1107);
393				msreg |= (0x01 << 28);
394				wrmsr(0x1107, msreg);
395			}
396			amd64_has_xcrypt |= C3_HAS_AES;
397#endif /* CRYPTO */
398			printf(" AES");
399		}
400
401		/* Enable ACE2 engine if present and disabled */
402		if (val & C3_CPUID_HAS_ACE2) {
403#ifdef CRYPTO
404			if (!(val & C3_CPUID_DO_ACE2)) {
405				msreg = rdmsr(0x1107);
406				msreg |= (0x01 << 28);
407				wrmsr(0x1107, msreg);
408			}
409			amd64_has_xcrypt |= C3_HAS_AESCTR;
410#endif /* CRYPTO */
411			printf(" AES-CTR");
412		}
413
414		/* Enable SHA engine if present and disabled */
415		if (val & C3_CPUID_HAS_PHE) {
416#ifdef CRYPTO
417			if (!(val & C3_CPUID_DO_PHE)) {
418				msreg = rdmsr(0x1107);
419				msreg |= (0x01 << 28/**/);
420				wrmsr(0x1107, msreg);
421			}
422			amd64_has_xcrypt |= C3_HAS_SHA;
423#endif /* CRYPTO */
424			printf(" SHA1 SHA256");
425		}
426
427		/* Enable MM engine if present and disabled */
428		if (val & C3_CPUID_HAS_PMM) {
429#ifdef CRYPTO
430			if (!(val & C3_CPUID_DO_PMM)) {
431				msreg = rdmsr(0x1107);
432				msreg |= (0x01 << 28/**/);
433				wrmsr(0x1107, msreg);
434			}
435			amd64_has_xcrypt |= C3_HAS_MM;
436#endif /* CRYPTO */
437			printf(" RSA");
438		}
439
440		printf("\n");
441	}
442}
443
444#ifndef SMALL_KERNEL
445void via_update_sensor(void *args);
446void
447via_update_sensor(void *args)
448{
449	struct cpu_info *ci = (struct cpu_info *) args;
450	u_int64_t msr;
451
452	msr = rdmsr(MSR_CENT_TMTEMPERATURE);
453	ci->ci_sensor.value = (msr & 0xffffff);
454	/* micro degrees */
455	ci->ci_sensor.value *= 1000000;
456	ci->ci_sensor.value += 273150000;
457	ci->ci_sensor.flags &= ~SENSOR_FINVALID;
458}
459#endif
460
461uint64_t
462cpu_freq_ctr(struct cpu_info *ci)
463{
464	uint64_t count, last_count, msr;
465
466	if ((ci->ci_flags & CPUF_CONST_TSC) == 0 ||
467	    (cpu_perf_eax & CPUIDEAX_VERID) <= 1 ||
468	    CPUIDEDX_NUM_FC(cpu_perf_edx) <= 1)
469		return (0);
470
471	msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
472	if (msr & MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK)) {
473		/* some hypervisor is dicking us around */
474		return (0);
475	}
476
477	msr |= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_1);
478	wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
479
480	msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
481	wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
482
483	last_count = rdmsr(MSR_PERF_FIXED_CTR1);
484	delay(100000);
485	count = rdmsr(MSR_PERF_FIXED_CTR1);
486
487	msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
488	msr &= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK);
489	wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
490
491	msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
492	msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
493	wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
494
495	return ((count - last_count) * 10);
496}
497
498uint64_t
499cpu_freq(struct cpu_info *ci)
500{
501	uint64_t last_count, count;
502
503	count = cpu_freq_ctr(ci);
504	if (count != 0)
505		return (count);
506
507	last_count = rdtsc();
508	delay(100000);
509	count = rdtsc();
510
511	return ((count - last_count) * 10);
512}
513
514void
515identifycpu(struct cpu_info *ci)
516{
517	uint64_t freq = 0;
518	u_int32_t dummy, val, cpu_tpm_ecxflags = 0;
519	char mycpu_model[48];
520	int i;
521	char *brandstr_from, *brandstr_to;
522	int skipspace;
523
524	CPUID(1, ci->ci_signature, val, dummy, ci->ci_feature_flags);
525	CPUID(0x80000000, ci->ci_pnfeatset, dummy, dummy, dummy);
526	if (ci->ci_pnfeatset >= 0x80000001) {
527		CPUID(0x80000001, ci->ci_efeature_eax, dummy,
528		    ci->ci_efeature_ecx, ci->ci_feature_eflags);
529		/* Other bits may clash */
530		ci->ci_feature_flags |= (ci->ci_feature_eflags & CPUID_NXE);
531		if (CPU_IS_PRIMARY(ci))
532			ecpu_ecxfeature = ci->ci_efeature_ecx;
533		/* Let cpu_feature be the common bits */
534		cpu_feature &= ci->ci_feature_flags;
535	}
536
537	CPUID(0x80000002, ci->ci_brand[0],
538	    ci->ci_brand[1], ci->ci_brand[2], ci->ci_brand[3]);
539	CPUID(0x80000003, ci->ci_brand[4],
540	    ci->ci_brand[5], ci->ci_brand[6], ci->ci_brand[7]);
541	CPUID(0x80000004, ci->ci_brand[8],
542	    ci->ci_brand[9], ci->ci_brand[10], ci->ci_brand[11]);
543	strlcpy(mycpu_model, (char *)ci->ci_brand, sizeof(mycpu_model));
544
545	/* Remove leading, trailing and duplicated spaces from mycpu_model */
546	brandstr_from = brandstr_to = mycpu_model;
547	skipspace = 1;
548	while (*brandstr_from != '\0') {
549		if (!skipspace || *brandstr_from != ' ') {
550			skipspace = 0;
551			*(brandstr_to++) = *brandstr_from;
552		}
553		if (*brandstr_from == ' ')
554			skipspace = 1;
555		brandstr_from++;
556	}
557	if (skipspace && brandstr_to > mycpu_model)
558		brandstr_to--;
559	*brandstr_to = '\0';
560
561	if (mycpu_model[0] == 0)
562		strlcpy(mycpu_model, "Opteron or Athlon 64",
563		    sizeof(mycpu_model));
564
565	/* If primary cpu, fill in the global cpu_model used by sysctl */
566	if (CPU_IS_PRIMARY(ci))
567		strlcpy(cpu_model, mycpu_model, sizeof(cpu_model));
568
569	ci->ci_family = (ci->ci_signature >> 8) & 0x0f;
570	ci->ci_model = (ci->ci_signature >> 4) & 0x0f;
571	if (ci->ci_family == 0x6 || ci->ci_family == 0xf) {
572		ci->ci_family += (ci->ci_signature >> 20) & 0xff;
573		ci->ci_model += ((ci->ci_signature >> 16) & 0x0f) << 4;
574	}
575
576#if NPVBUS > 0
577	/* Detect hypervisors early, attach the paravirtual bus later */
578	if (CPU_IS_PRIMARY(ci) && cpu_ecxfeature & CPUIDECX_HV)
579		pvbus_identify();
580#endif
581
582	if (ci->ci_feature_flags && ci->ci_feature_flags & CPUID_TSC) {
583		/* Has TSC, check if it's constant */
584		if (!strcmp(cpu_vendor, "GenuineIntel")) {
585			if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
586			    (ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
587				atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
588			}
589		} else if (!strcmp(cpu_vendor, "CentaurHauls")) {
590			/* VIA */
591			if (ci->ci_model >= 0x0f) {
592				atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
593			}
594		} else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
595			if (cpu_apmi_edx & CPUIDEDX_ITSC) {
596				/* Invariant TSC indicates constant TSC on AMD */
597				atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
598			}
599		}
600
601		/* Check if it's an invariant TSC */
602		if (cpu_apmi_edx & CPUIDEDX_ITSC)
603			atomic_setbits_int(&ci->ci_flags, CPUF_INVAR_TSC);
604
605		tsc_identify(ci);
606	}
607
608	freq = cpu_freq(ci);
609
610	printf("%s: %s", ci->ci_dev->dv_xname, mycpu_model);
611
612	if (freq != 0)
613		printf(", %llu.%02llu MHz", (freq + 4999) / 1000000,
614		    ((freq + 4999) / 10000) % 100);
615
616	if (CPU_IS_PRIMARY(ci)) {
617		cpuspeed = (freq + 4999) / 1000000;
618		cpu_cpuspeed = cpu_amd64speed;
619	}
620
621	printf(", %02x-%02x-%02x", ci->ci_family, ci->ci_model,
622	    ci->ci_signature & 0x0f);
623
624	printf("\n%s: ", ci->ci_dev->dv_xname);
625
626	for (i = 0; i < nitems(cpu_cpuid_features); i++)
627		if (ci->ci_feature_flags & cpu_cpuid_features[i].bit)
628			printf("%s%s", i? "," : "", cpu_cpuid_features[i].str);
629	for (i = 0; i < nitems(cpu_cpuid_ecxfeatures); i++)
630		if (cpu_ecxfeature & cpu_cpuid_ecxfeatures[i].bit)
631			printf(",%s", cpu_cpuid_ecxfeatures[i].str);
632	for (i = 0; i < nitems(cpu_ecpuid_features); i++)
633		if (ci->ci_feature_eflags & cpu_ecpuid_features[i].bit)
634			printf(",%s", cpu_ecpuid_features[i].str);
635	for (i = 0; i < nitems(cpu_ecpuid_ecxfeatures); i++)
636		if (ecpu_ecxfeature & cpu_ecpuid_ecxfeatures[i].bit)
637			printf(",%s", cpu_ecpuid_ecxfeatures[i].str);
638	for (i = 0; i < nitems(cpu_cpuid_perf_eax); i++)
639		if (cpu_perf_eax & cpu_cpuid_perf_eax[i].bit)
640			printf(",%s", cpu_cpuid_perf_eax[i].str);
641	for (i = 0; i < nitems(cpu_cpuid_apmi_edx); i++)
642		if (cpu_apmi_edx & cpu_cpuid_apmi_edx[i].bit)
643			printf(",%s", cpu_cpuid_apmi_edx[i].str);
644
645	if (cpuid_level >= 0x07) {
646		/* "Structured Extended Feature Flags" */
647		CPUID_LEAF(0x7, 0, dummy, ci->ci_feature_sefflags_ebx,
648		    ci->ci_feature_sefflags_ecx, ci->ci_feature_sefflags_edx);
649		for (i = 0; i < nitems(cpu_seff0_ebxfeatures); i++)
650			if (ci->ci_feature_sefflags_ebx &
651			    cpu_seff0_ebxfeatures[i].bit)
652				printf(",%s", cpu_seff0_ebxfeatures[i].str);
653		for (i = 0; i < nitems(cpu_seff0_ecxfeatures); i++)
654			if (ci->ci_feature_sefflags_ecx &
655			    cpu_seff0_ecxfeatures[i].bit)
656				printf(",%s", cpu_seff0_ecxfeatures[i].str);
657		for (i = 0; i < nitems(cpu_seff0_edxfeatures); i++)
658			if (ci->ci_feature_sefflags_edx &
659			    cpu_seff0_edxfeatures[i].bit)
660				printf(",%s", cpu_seff0_edxfeatures[i].str);
661	}
662
663	if (!strcmp(cpu_vendor, "GenuineIntel") && cpuid_level >= 0x06) {
664		CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
665		    dummy);
666		for (i = 0; i < nitems(cpu_tpm_eaxfeatures); i++)
667			if (ci->ci_feature_tpmflags &
668			    cpu_tpm_eaxfeatures[i].bit)
669				printf(",%s", cpu_tpm_eaxfeatures[i].str);
670	} else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
671		CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
672		    dummy);
673		if (ci->ci_family >= 0x12)
674			ci->ci_feature_tpmflags |= TPM_ARAT;
675	}
676
677	/* AMD speculation control features */
678	if (!strcmp(cpu_vendor, "AuthenticAMD")) {
679		if (ci->ci_pnfeatset >= 0x80000008) {
680			CPUID(0x80000008, dummy, ci->ci_feature_amdspec_ebx,
681			    dummy, dummy);
682			for (i = 0; i < nitems(cpu_amdspec_ebxfeatures); i++)
683				if (ci->ci_feature_amdspec_ebx &
684				    cpu_amdspec_ebxfeatures[i].bit)
685					printf(",%s",
686					    cpu_amdspec_ebxfeatures[i].str);
687		}
688	}
689
690	/* xsave subfeatures */
691	if (cpuid_level >= 0xd) {
692		CPUID_LEAF(0xd, 1, val, dummy, dummy, dummy);
693		for (i = 0; i < nitems(cpu_xsave_extfeatures); i++)
694			if (val & cpu_xsave_extfeatures[i].bit)
695				printf(",%s", cpu_xsave_extfeatures[i].str);
696	}
697
698	if (cpu_meltdown)
699		printf(",MELTDOWN");
700
701	printf("\n");
702
703	replacemeltdown();
704	x86_print_cacheinfo(ci);
705
706	if (CPU_IS_PRIMARY(ci)) {
707#ifndef SMALL_KERNEL
708		if (!strcmp(cpu_vendor, "AuthenticAMD") &&
709		    ci->ci_pnfeatset >= 0x80000007) {
710			CPUID(0x80000007, dummy, dummy, dummy, val);
711
712			if (val & 0x06) {
713				if ((ci->ci_signature & 0xF00) == 0xF00)
714					setperf_setup = k8_powernow_init;
715			}
716			if (ci->ci_family >= 0x10)
717				setperf_setup = k1x_init;
718		}
719
720		if (cpu_ecxfeature & CPUIDECX_EST)
721			setperf_setup = est_init;
722#endif
723
724		if (cpu_ecxfeature & CPUIDECX_RDRAND)
725			has_rdrand = 1;
726
727		if (ci->ci_feature_sefflags_ebx & SEFF0EBX_RDSEED)
728			has_rdseed = 1;
729
730		if (ci->ci_feature_sefflags_ebx & SEFF0EBX_SMAP)
731			replacesmap();
732	}
733
734	if (ci->ci_feature_flags & CPUID_CFLUSH) {
735		u_int32_t cflushsz;
736
737		CPUID(0x01, dummy, cflushsz, dummy, dummy);
738		/* cflush cacheline size is equal to bits 15-8 of ebx * 8 */
739		ci->ci_cflushsz = ((cflushsz >> 8) & 0xff) * 8;
740	}
741
742#ifndef SMALL_KERNEL
743	if (CPU_IS_PRIMARY(ci) && (ci->ci_feature_tpmflags & TPM_SENSOR)) {
744		ci->ci_sensor.type = SENSOR_TEMP;
745		sensor_task_register(ci, intelcore_update_sensor, 5);
746		sensor_attach(&ci->ci_sensordev, &ci->ci_sensor);
747	}
748#endif
749
750#ifdef CRYPTO
751	if (CPU_IS_PRIMARY(ci)) {
752		if (cpu_ecxfeature & CPUIDECX_PCLMUL)
753			amd64_has_pclmul = 1;
754
755		if (cpu_ecxfeature & CPUIDECX_AES)
756			amd64_has_aesni = 1;
757	}
758#endif
759
760	if (CPU_IS_PRIMARY(ci) && !strcmp(cpu_vendor, "CentaurHauls")) {
761		ci->cpu_setup = via_nano_setup;
762#ifndef SMALL_KERNEL
763		ci->ci_sensor.type = SENSOR_TEMP;
764		sensor_task_register(ci, via_update_sensor, 5);
765		sensor_attach(&ci->ci_sensordev, &ci->ci_sensor);
766#endif
767	}
768
769	tsc_timecounter_init(ci, freq);
770
771	cpu_topology(ci);
772#if NVMM > 0
773	cpu_check_vmm_cap(ci);
774#endif /* NVMM > 0 */
775
776	/* Check for effective frequency via MPERF, APERF */
777	if ((cpu_tpm_ecxflags & TPM_EFFFREQ) && ci->ci_smt_id == 0) {
778#ifndef SMALL_KERNEL
779		ci->ci_hz_sensor.type = SENSOR_FREQ;
780		sensor_task_register(ci, cpu_hz_update_sensor, 1);
781		sensor_attach(&ci->ci_sensordev, &ci->ci_hz_sensor);
782#endif
783	}
784}
785
786#ifndef SMALL_KERNEL
787/*
788 * Base 2 logarithm of an int. returns 0 for 0 (yeye, I know).
789 */
790static int
791log2(unsigned int i)
792{
793	int ret = 0;
794
795	while (i >>= 1)
796		ret++;
797
798	return (ret);
799}
800
801static int
802mask_width(u_int x)
803{
804	int bit;
805	int mask;
806	int powerof2;
807
808	powerof2 = ((x - 1) & x) == 0;
809	mask = (x << (1 - powerof2)) - 1;
810
811	/* fls */
812	if (mask == 0)
813		return (0);
814	for (bit = 1; mask != 1; bit++)
815		mask = (unsigned int)mask >> 1;
816
817	return (bit);
818}
819#endif
820
821/*
822 * Build up cpu topology for given cpu, must run on the core itself.
823 */
824void
825cpu_topology(struct cpu_info *ci)
826{
827#ifndef SMALL_KERNEL
828	u_int32_t eax, ebx, ecx, edx;
829	u_int32_t apicid, max_apicid = 0, max_coreid = 0;
830	u_int32_t smt_bits = 0, core_bits, pkg_bits = 0;
831	u_int32_t smt_mask = 0, core_mask, pkg_mask = 0;
832
833	/* We need at least apicid at CPUID 1 */
834	if (cpuid_level < 1)
835		goto no_topology;
836
837	/* Initial apicid */
838	CPUID(1, eax, ebx, ecx, edx);
839	apicid = (ebx >> 24) & 0xff;
840
841	if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
842		uint32_t nthreads = 1; /* per core */
843		uint32_t thread_id; /* within a package */
844
845		/* We need at least apicid at CPUID 0x80000008 */
846		if (ci->ci_pnfeatset < 0x80000008)
847			goto no_topology;
848
849		CPUID(0x80000008, eax, ebx, ecx, edx);
850		core_bits = (ecx >> 12) & 0xf;
851
852		if (ci->ci_pnfeatset >= 0x8000001e) {
853			CPUID(0x8000001e, eax, ebx, ecx, edx);
854			nthreads = ((ebx >> 8) & 0xf) + 1;
855		}
856
857		/* Shift the core_bits off to get at the pkg bits */
858		ci->ci_pkg_id = apicid >> core_bits;
859
860		/* Get rid of the package bits */
861		core_mask = (1U << core_bits) - 1;
862		thread_id = apicid & core_mask;
863
864		/* Cut logical thread_id into core id, and smt id in a core */
865		ci->ci_core_id = thread_id / nthreads;
866		ci->ci_smt_id = thread_id % nthreads;
867	} else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
868		/* We only support leaf 1/4 detection */
869		if (cpuid_level < 4)
870			goto no_topology;
871		/* Get max_apicid */
872		CPUID(1, eax, ebx, ecx, edx);
873		max_apicid = (ebx >> 16) & 0xff;
874		/* Get max_coreid */
875		CPUID_LEAF(4, 0, eax, ebx, ecx, edx);
876		max_coreid = ((eax >> 26) & 0x3f) + 1;
877		/* SMT */
878		smt_bits = mask_width(max_apicid / max_coreid);
879		smt_mask = (1U << smt_bits) - 1;
880		/* Core */
881		core_bits = log2(max_coreid);
882		core_mask = (1U << (core_bits + smt_bits)) - 1;
883		core_mask ^= smt_mask;
884		/* Pkg */
885		pkg_bits = core_bits + smt_bits;
886		pkg_mask = ~0U << core_bits;
887
888		ci->ci_smt_id = apicid & smt_mask;
889		ci->ci_core_id = (apicid & core_mask) >> smt_bits;
890		ci->ci_pkg_id = (apicid & pkg_mask) >> pkg_bits;
891	} else
892		goto no_topology;
893#ifdef DEBUG
894	printf("cpu%d: smt %u, core %u, pkg %u "
895		"(apicid 0x%x, max_apicid 0x%x, max_coreid 0x%x, smt_bits 0x%x, smt_mask 0x%x, "
896		"core_bits 0x%x, core_mask 0x%x, pkg_bits 0x%x, pkg_mask 0x%x)\n",
897		ci->ci_cpuid, ci->ci_smt_id, ci->ci_core_id, ci->ci_pkg_id,
898		apicid, max_apicid, max_coreid, smt_bits, smt_mask, core_bits,
899		core_mask, pkg_bits, pkg_mask);
900#else
901	printf("cpu%d: smt %u, core %u, package %u\n", ci->ci_cpuid,
902		ci->ci_smt_id, ci->ci_core_id, ci->ci_pkg_id);
903
904#endif
905	return;
906	/* We can't map, so consider ci_core_id as ci_cpuid */
907no_topology:
908#endif
909	ci->ci_smt_id  = 0;
910	ci->ci_core_id = ci->ci_cpuid;
911	ci->ci_pkg_id  = 0;
912}
913
914#if NVMM > 0
915/*
916 * cpu_check_vmm_cap
917 *
918 * Checks for VMM capabilities for 'ci'. Initializes certain per-cpu VMM
919 * state in 'ci' if virtualization extensions are found.
920 *
921 * Parameters:
922 *  ci: the cpu being checked
923 */
924void
925cpu_check_vmm_cap(struct cpu_info *ci)
926{
927	uint64_t msr;
928	uint32_t cap, dummy, edx;
929
930	/*
931	 * Check for workable VMX
932	 */
933	if (cpu_ecxfeature & CPUIDECX_VMX) {
934		msr = rdmsr(MSR_IA32_FEATURE_CONTROL);
935
936		if (!(msr & IA32_FEATURE_CONTROL_LOCK))
937			ci->ci_vmm_flags |= CI_VMM_VMX;
938		else {
939			if (msr & IA32_FEATURE_CONTROL_VMX_EN)
940				ci->ci_vmm_flags |= CI_VMM_VMX;
941			else
942				ci->ci_vmm_flags |= CI_VMM_DIS;
943		}
944	}
945
946	/*
947	 * Check for EPT (Intel Nested Paging) and other secondary
948	 * controls
949	 */
950	if (ci->ci_vmm_flags & CI_VMM_VMX) {
951		/* Secondary controls available? */
952		/* XXX should we check true procbased ctls here if avail? */
953		msr = rdmsr(IA32_VMX_PROCBASED_CTLS);
954		if (msr & (IA32_VMX_ACTIVATE_SECONDARY_CONTROLS) << 32) {
955			msr = rdmsr(IA32_VMX_PROCBASED2_CTLS);
956			/* EPT available? */
957			if (msr & (IA32_VMX_ENABLE_EPT) << 32)
958				ci->ci_vmm_flags |= CI_VMM_EPT;
959			/* VM Functions available? */
960			if (msr & (IA32_VMX_ENABLE_VM_FUNCTIONS) << 32) {
961				ci->ci_vmm_cap.vcc_vmx.vmx_vm_func =
962				    rdmsr(IA32_VMX_VMFUNC);
963			}
964		}
965	}
966
967	/*
968	 * Check startup config (VMX)
969	 */
970	if (ci->ci_vmm_flags & CI_VMM_VMX) {
971		/* CR0 fixed and flexible bits */
972		msr = rdmsr(IA32_VMX_CR0_FIXED0);
973		ci->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed0 = msr;
974		msr = rdmsr(IA32_VMX_CR0_FIXED1);
975		ci->ci_vmm_cap.vcc_vmx.vmx_cr0_fixed1 = msr;
976
977		/* CR4 fixed and flexible bits */
978		msr = rdmsr(IA32_VMX_CR4_FIXED0);
979		ci->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed0 = msr;
980		msr = rdmsr(IA32_VMX_CR4_FIXED1);
981		ci->ci_vmm_cap.vcc_vmx.vmx_cr4_fixed1 = msr;
982
983		/* VMXON region revision ID (bits 30:0 of IA32_VMX_BASIC) */
984		msr = rdmsr(IA32_VMX_BASIC);
985		ci->ci_vmm_cap.vcc_vmx.vmx_vmxon_revision =
986			(uint32_t)(msr & 0x7FFFFFFF);
987
988		/* MSR save / load table size */
989		msr = rdmsr(IA32_VMX_MISC);
990		ci->ci_vmm_cap.vcc_vmx.vmx_msr_table_size =
991			(uint32_t)(msr & IA32_VMX_MSR_LIST_SIZE_MASK) >> 25;
992
993		/* CR3 target count size */
994		ci->ci_vmm_cap.vcc_vmx.vmx_cr3_tgt_count =
995			(uint32_t)(msr & IA32_VMX_CR3_TGT_SIZE_MASK) >> 16;
996	}
997
998	/*
999	 * Check for workable SVM
1000	 */
1001	if (ecpu_ecxfeature & CPUIDECX_SVM) {
1002		msr = rdmsr(MSR_AMD_VM_CR);
1003
1004		if (!(msr & AMD_SVMDIS))
1005			ci->ci_vmm_flags |= CI_VMM_SVM;
1006
1007		CPUID(CPUID_AMD_SVM_CAP, dummy,
1008		    ci->ci_vmm_cap.vcc_svm.svm_max_asid, dummy, edx);
1009
1010		if (ci->ci_vmm_cap.vcc_svm.svm_max_asid > 0xFFF)
1011			ci->ci_vmm_cap.vcc_svm.svm_max_asid = 0xFFF;
1012
1013		if (edx & AMD_SVM_FLUSH_BY_ASID_CAP)
1014			ci->ci_vmm_cap.vcc_svm.svm_flush_by_asid = 1;
1015
1016		if (edx & AMD_SVM_VMCB_CLEAN_CAP)
1017			ci->ci_vmm_cap.vcc_svm.svm_vmcb_clean = 1;
1018
1019		if (edx & AMD_SVM_DECODE_ASSIST_CAP)
1020			ci->ci_vmm_cap.vcc_svm.svm_decode_assist = 1;
1021	}
1022
1023	/*
1024	 * Check for SVM Nested Paging
1025	 */
1026	if ((ci->ci_vmm_flags & CI_VMM_SVM) &&
1027	    ci->ci_pnfeatset >= CPUID_AMD_SVM_CAP) {
1028		CPUID(CPUID_AMD_SVM_CAP, dummy, dummy, dummy, cap);
1029		if (cap & AMD_SVM_NESTED_PAGING_CAP)
1030			ci->ci_vmm_flags |= CI_VMM_RVI;
1031	}
1032
1033	/*
1034	 * Check "L1 flush on VM entry" (Intel L1TF vuln) semantics
1035	 * Full details can be found here:
1036	 * https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-l1-terminal-fault
1037	 */
1038	if (!strcmp(cpu_vendor, "GenuineIntel")) {
1039		if (ci->ci_feature_sefflags_edx & SEFF0EDX_L1DF)
1040			ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 1;
1041		else
1042			ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 0;
1043
1044		/*
1045		 * Certain CPUs may have the vulnerability remedied in
1046		 * hardware (RDCL_NO), or we may be nested in an VMM that
1047		 * is doing flushes (SKIP_L1DFL_VMENTRY) using the MSR.
1048		 * In either case no mitigation at all is necessary.
1049		 */
1050		if (ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP) {
1051			msr = rdmsr(MSR_ARCH_CAPABILITIES);
1052			if ((msr & ARCH_CAP_RDCL_NO) ||
1053			    ((msr & ARCH_CAP_SKIP_L1DFL_VMENTRY) &&
1054			    ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr))
1055				ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr =
1056				    VMX_SKIP_L1D_FLUSH;
1057		}
1058	}
1059}
1060#endif /* NVMM > 0 */
1061