cpuvar.h revision 270998
1189251Ssam/* 2189251Ssam * CDDL HEADER START 3189251Ssam * 4189251Ssam * The contents of this file are subject to the terms of the 5189251Ssam * Common Development and Distribution License (the "License"). 6189251Ssam * You may not use this file except in compliance with the License. 7189251Ssam * 8189251Ssam * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9189251Ssam * or http://www.opensolaris.org/os/licensing. 10189251Ssam * See the License for the specific language governing permissions 11189251Ssam * and limitations under the License. 12189251Ssam * 13189251Ssam * When distributing Covered Code, include this CDDL HEADER in each 14189251Ssam * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15189251Ssam * If applicable, add the following below this CDDL HEADER, with the 16189251Ssam * fields enclosed by brackets "[]" replaced with your own identifying 17189251Ssam * information: Portions Copyright [yyyy] [name of copyright owner] 18189251Ssam * 19189251Ssam * CDDL HEADER END 20189251Ssam */ 21189251Ssam 22189251Ssam/* 23189251Ssam * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved. 24189251Ssam */ 25189251Ssam 26189251Ssam#ifndef _SYS_CPUVAR_H 27189251Ssam#define _SYS_CPUVAR_H 28189251Ssam 29189251Ssam#include <sys/thread.h> 30189251Ssam#include <sys/sysinfo.h> /* has cpu_stat_t definition */ 31189251Ssam#include <sys/disp.h> 32189251Ssam#include <sys/processor.h> 33189251Ssam 34189251Ssam#if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP) 35189251Ssam#include <sys/machcpuvar.h> 36189251Ssam#endif 37189251Ssam 38189251Ssam#include <sys/types.h> 39189251Ssam#include <sys/file.h> 40189251Ssam#include <sys/bitmap.h> 41189251Ssam#include <sys/rwlock.h> 42189251Ssam#include <sys/msacct.h> 43189251Ssam#if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) && \ 44189251Ssam (defined(__i386) || defined(__amd64)) 45189251Ssam#include <asm/cpuvar.h> 46189251Ssam#endif 47189251Ssam 48189251Ssam#ifdef __cplusplus 49189251Ssamextern "C" { 50189251Ssam#endif 51189251Ssam 52189251Ssamstruct squeue_set_s; 53189251Ssam 54189251Ssam#define CPU_CACHE_COHERENCE_SIZE 64 55189251Ssam#define S_LOADAVG_SZ 11 56189251Ssam#define S_MOVAVG_SZ 10 57189251Ssam 58189251Ssamstruct loadavg_s { 59189251Ssam int lg_cur; /* current loadavg entry */ 60189251Ssam unsigned int lg_len; /* number entries recorded */ 61189251Ssam hrtime_t lg_total; /* used to temporarily hold load totals */ 62189251Ssam hrtime_t lg_loads[S_LOADAVG_SZ]; /* table of recorded entries */ 63189251Ssam}; 64189251Ssam 65189251Ssam/* 66189251Ssam * For fast event tracing. 67189251Ssam */ 68189251Ssamstruct ftrace_record; 69252726Srpaulotypedef struct ftrace_data { 70189251Ssam int ftd_state; /* ftrace flags */ 71252726Srpaulo kmutex_t ftd_unused; /* ftrace buffer lock, unused */ 72252726Srpaulo struct ftrace_record *ftd_cur; /* current record */ 73252726Srpaulo struct ftrace_record *ftd_first; /* first record */ 74252726Srpaulo struct ftrace_record *ftd_last; /* last record */ 75252726Srpaulo} ftrace_data_t; 76252726Srpaulo 77189251Ssamstruct cyc_cpu; 78189251Ssamstruct nvlist; 79189251Ssam 80189251Ssam/* 81252726Srpaulo * Per-CPU data. 82189251Ssam * 83189251Ssam * Be careful adding new members: if they are not the same in all modules (e.g. 84189251Ssam * change size depending on a #define), CTF uniquification can fail to work 85189251Ssam * properly. Furthermore, this is transitive in that it applies recursively to 86189251Ssam * all types pointed to by cpu_t. 87189251Ssam */ 88189251Ssamtypedef struct cpu { 89189251Ssam processorid_t cpu_id; /* CPU number */ 90189251Ssam processorid_t cpu_seqid; /* sequential CPU id (0..ncpus-1) */ 91189251Ssam volatile cpu_flag_t cpu_flags; /* flags indicating CPU state */ 92189251Ssam struct cpu *cpu_self; /* pointer to itself */ 93189251Ssam kthread_t *cpu_thread; /* current thread */ 94189251Ssam kthread_t *cpu_idle_thread; /* idle thread for this CPU */ 95189251Ssam kthread_t *cpu_pause_thread; /* pause thread for this CPU */ 96189251Ssam klwp_id_t cpu_lwp; /* current lwp (if any) */ 97189251Ssam klwp_id_t cpu_fpowner; /* currently loaded fpu owner */ 98189251Ssam struct cpupart *cpu_part; /* partition with this CPU */ 99189251Ssam struct lgrp_ld *cpu_lpl; /* pointer to this cpu's load */ 100189251Ssam int cpu_cache_offset; /* see kmem.c for details */ 101189251Ssam 102189251Ssam /* 103189251Ssam * Links to other CPUs. It is safe to walk these lists if 104189251Ssam * one of the following is true: 105189251Ssam * - cpu_lock held 106189251Ssam * - preemption disabled via kpreempt_disable 107189251Ssam * - PIL >= DISP_LEVEL 108189251Ssam * - acting thread is an interrupt thread 109189251Ssam * - all other CPUs are paused 110189251Ssam */ 111189251Ssam struct cpu *cpu_next; /* next existing CPU */ 112189251Ssam struct cpu *cpu_prev; /* prev existing CPU */ 113189251Ssam struct cpu *cpu_next_onln; /* next online (enabled) CPU */ 114189251Ssam struct cpu *cpu_prev_onln; /* prev online (enabled) CPU */ 115189251Ssam struct cpu *cpu_next_part; /* next CPU in partition */ 116189251Ssam struct cpu *cpu_prev_part; /* prev CPU in partition */ 117189251Ssam struct cpu *cpu_next_lgrp; /* next CPU in latency group */ 118189251Ssam struct cpu *cpu_prev_lgrp; /* prev CPU in latency group */ 119189251Ssam struct cpu *cpu_next_lpl; /* next CPU in lgrp partition */ 120189251Ssam struct cpu *cpu_prev_lpl; 121189251Ssam 122189251Ssam struct cpu_pg *cpu_pg; /* cpu's processor groups */ 123189251Ssam 124189251Ssam void *cpu_reserved[4]; /* reserved for future use */ 125189251Ssam 126189251Ssam /* 127189251Ssam * Scheduling variables. 128189251Ssam */ 129189251Ssam disp_t *cpu_disp; /* dispatch queue data */ 130189251Ssam /* 131189251Ssam * Note that cpu_disp is set before the CPU is added to the system 132189251Ssam * and is never modified. Hence, no additional locking is needed 133189251Ssam * beyond what's necessary to access the cpu_t structure. 134189251Ssam */ 135189251Ssam char cpu_runrun; /* scheduling flag - set to preempt */ 136189251Ssam char cpu_kprunrun; /* force kernel preemption */ 137189251Ssam pri_t cpu_chosen_level; /* priority at which cpu */ 138189251Ssam /* was chosen for scheduling */ 139189251Ssam kthread_t *cpu_dispthread; /* thread selected for dispatch */ 140189251Ssam disp_lock_t cpu_thread_lock; /* dispatcher lock on current thread */ 141189251Ssam uint8_t cpu_disp_flags; /* flags used by dispatcher */ 142189251Ssam /* 143189251Ssam * The following field is updated when ever the cpu_dispthread 144189251Ssam * changes. Also in places, where the current thread(cpu_dispthread) 145189251Ssam * priority changes. This is used in disp_lowpri_cpu() 146189251Ssam */ 147189251Ssam pri_t cpu_dispatch_pri; /* priority of cpu_dispthread */ 148189251Ssam clock_t cpu_last_swtch; /* last time switched to new thread */ 149189251Ssam 150189251Ssam /* 151189251Ssam * Interrupt data. 152189251Ssam */ 153189251Ssam caddr_t cpu_intr_stack; /* interrupt stack */ 154189251Ssam kthread_t *cpu_intr_thread; /* interrupt thread list */ 155189251Ssam uint_t cpu_intr_actv; /* interrupt levels active (bitmask) */ 156189251Ssam int cpu_base_spl; /* priority for highest rupt active */ 157189251Ssam 158189251Ssam /* 159189251Ssam * Statistics. 160189251Ssam */ 161189251Ssam cpu_stats_t cpu_stats; /* per-CPU statistics */ 162189251Ssam struct kstat *cpu_info_kstat; /* kstat for cpu info */ 163189251Ssam 164189251Ssam uintptr_t cpu_profile_pc; /* kernel PC in profile interrupt */ 165189251Ssam uintptr_t cpu_profile_upc; /* user PC in profile interrupt */ 166189251Ssam uintptr_t cpu_profile_pil; /* PIL when profile interrupted */ 167189251Ssam 168189251Ssam ftrace_data_t cpu_ftrace; /* per cpu ftrace data */ 169189251Ssam 170189251Ssam clock_t cpu_deadman_counter; /* used by deadman() */ 171189251Ssam uint_t cpu_deadman_countdown; /* used by deadman() */ 172189251Ssam 173189251Ssam kmutex_t cpu_cpc_ctxlock; /* protects context for idle thread */ 174189251Ssam kcpc_ctx_t *cpu_cpc_ctx; /* performance counter context */ 175189251Ssam 176189251Ssam /* 177189251Ssam * Configuration information for the processor_info system call. 178189251Ssam */ 179189251Ssam processor_info_t cpu_type_info; /* config info */ 180189251Ssam time_t cpu_state_begin; /* when CPU entered current state */ 181189251Ssam char cpu_cpr_flags; /* CPR related info */ 182189251Ssam struct cyc_cpu *cpu_cyclic; /* per cpu cyclic subsystem data */ 183189251Ssam struct squeue_set_s *cpu_squeue_set; /* per cpu squeue set */ 184189251Ssam struct nvlist *cpu_props; /* pool-related properties */ 185189251Ssam 186189251Ssam krwlock_t cpu_ft_lock; /* DTrace: fasttrap lock */ 187189251Ssam uintptr_t cpu_dtrace_caller; /* DTrace: caller, if any */ 188189251Ssam hrtime_t cpu_dtrace_chillmark; /* DTrace: chill mark time */ 189189251Ssam hrtime_t cpu_dtrace_chilled; /* DTrace: total chill time */ 190189251Ssam volatile uint16_t cpu_mstate; /* cpu microstate */ 191189251Ssam volatile uint16_t cpu_mstate_gen; /* generation counter */ 192189251Ssam volatile hrtime_t cpu_mstate_start; /* cpu microstate start time */ 193189251Ssam volatile hrtime_t cpu_acct[NCMSTATES]; /* cpu microstate data */ 194189251Ssam hrtime_t cpu_intracct[NCMSTATES]; /* interrupt mstate data */ 195189251Ssam hrtime_t cpu_waitrq; /* cpu run-queue wait time */ 196189251Ssam struct loadavg_s cpu_loadavg; /* loadavg info for this cpu */ 197189251Ssam 198189251Ssam char *cpu_idstr; /* for printing and debugging */ 199189251Ssam char *cpu_brandstr; /* for printing */ 200189251Ssam 201189251Ssam /* 202189251Ssam * Sum of all device interrupt weights that are currently directed at 203189251Ssam * this cpu. Cleared at start of interrupt redistribution. 204189251Ssam */ 205189251Ssam int32_t cpu_intr_weight; 206189251Ssam void *cpu_vm_data; 207189251Ssam 208189251Ssam struct cpu_physid *cpu_physid; /* physical associations */ 209189251Ssam 210189251Ssam uint64_t cpu_curr_clock; /* current clock freq in Hz */ 211189251Ssam char *cpu_supp_freqs; /* supported freqs in Hz */ 212189251Ssam 213189251Ssam uintptr_t cpu_cpcprofile_pc; /* kernel PC in cpc interrupt */ 214189251Ssam uintptr_t cpu_cpcprofile_upc; /* user PC in cpc interrupt */ 215189251Ssam 216189251Ssam /* 217189251Ssam * Interrupt load factor used by dispatcher & softcall 218189251Ssam */ 219189251Ssam hrtime_t cpu_intrlast; /* total interrupt time (nsec) */ 220189251Ssam int cpu_intrload; /* interrupt load factor (0-99%) */ 221189251Ssam 222189251Ssam uint_t cpu_rotor; /* for cheap pseudo-random numbers */ 223189251Ssam 224189251Ssam struct cu_cpu_info *cpu_cu_info; /* capacity & util. info */ 225189251Ssam 226189251Ssam /* 227189251Ssam * cpu_generation is updated whenever CPU goes on-line or off-line. 228189251Ssam * Updates to cpu_generation are protected by cpu_lock. 229189251Ssam * 230189251Ssam * See CPU_NEW_GENERATION() macro below. 231189251Ssam */ 232189251Ssam volatile uint_t cpu_generation; /* tracking on/off-line */ 233189251Ssam 234189251Ssam /* 235189251Ssam * New members must be added /before/ this member, as the CTF tools 236189251Ssam * rely on this being the last field before cpu_m, so they can 237189251Ssam * correctly calculate the offset when synthetically adding the cpu_m 238189251Ssam * member in objects that do not have it. This fixup is required for 239189251Ssam * uniquification to work correctly. 240189251Ssam */ 241189251Ssam uintptr_t cpu_m_pad; 242189251Ssam 243189251Ssam#if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP) 244189251Ssam struct machcpu cpu_m; /* per architecture info */ 245189251Ssam#endif 246189251Ssam} cpu_t; 247189251Ssam 248189251Ssam/* 249189251Ssam * The cpu_core structure consists of per-CPU state available in any context. 250189251Ssam * On some architectures, this may mean that the page(s) containing the 251189251Ssam * NCPU-sized array of cpu_core structures must be locked in the TLB -- it 252189251Ssam * is up to the platform to assure that this is performed properly. Note that 253189251Ssam * the structure is sized to avoid false sharing. 254189251Ssam */ 255189251Ssam#define CPUC_SIZE (sizeof (uint16_t) + sizeof (uint8_t) + \ 256189251Ssam sizeof (uintptr_t) + sizeof (kmutex_t)) 257189251Ssam#define CPUC_PADSIZE CPU_CACHE_COHERENCE_SIZE - CPUC_SIZE 258189251Ssam 259189251Ssamtypedef struct cpu_core { 260189251Ssam uint16_t cpuc_dtrace_flags; /* DTrace flags */ 261189251Ssam uint8_t cpuc_dcpc_intr_state; /* DCPC provider intr state */ 262189251Ssam uint8_t cpuc_pad[CPUC_PADSIZE]; /* padding */ 263189251Ssam uintptr_t cpuc_dtrace_illval; /* DTrace illegal value */ 264189251Ssam kmutex_t cpuc_pid_lock; /* DTrace pid provider lock */ 265189251Ssam} cpu_core_t; 266189251Ssam 267189251Ssam#ifdef _KERNEL 268189251Ssamextern cpu_core_t cpu_core[]; 269189251Ssam#endif /* _KERNEL */ 270189251Ssam 271189251Ssam/* 272189251Ssam * CPU_ON_INTR() macro. Returns non-zero if currently on interrupt stack. 273189251Ssam * Note that this isn't a test for a high PIL. For example, cpu_intr_actv 274189251Ssam * does not get updated when we go through sys_trap from TL>0 at high PIL. 275189251Ssam * getpil() should be used instead to check for PIL levels. 276189251Ssam */ 277189251Ssam#define CPU_ON_INTR(cpup) ((cpup)->cpu_intr_actv >> (LOCK_LEVEL + 1)) 278189251Ssam 279189251Ssam/* 280189251Ssam * Check to see if an interrupt thread might be active at a given ipl. 281189251Ssam * If so return true. 282189251Ssam * We must be conservative--it is ok to give a false yes, but a false no 283189251Ssam * will cause disaster. (But if the situation changes after we check it is 284189251Ssam * ok--the caller is trying to ensure that an interrupt routine has been 285189251Ssam * exited). 286189251Ssam * This is used when trying to remove an interrupt handler from an autovector 287189251Ssam * list in avintr.c. 288189251Ssam */ 289189251Ssam#define INTR_ACTIVE(cpup, level) \ 290189251Ssam ((level) <= LOCK_LEVEL ? \ 291189251Ssam ((cpup)->cpu_intr_actv & (1 << (level))) : (CPU_ON_INTR(cpup))) 292189251Ssam 293189251Ssam/* 294189251Ssam * CPU_PSEUDO_RANDOM() returns a per CPU value that changes each time one 295189251Ssam * looks at it. It's meant as a cheap mechanism to be incorporated in routines 296189251Ssam * wanting to avoid biasing, but where true randomness isn't needed (just 297189251Ssam * something that changes). 298189251Ssam */ 299189251Ssam#define CPU_PSEUDO_RANDOM() (CPU->cpu_rotor++) 300189251Ssam 301189251Ssam#if defined(_KERNEL) || defined(_KMEMUSER) 302189251Ssam 303189251Ssam#define INTR_STACK_SIZE MAX(DEFAULTSTKSZ, PAGESIZE) 304189251Ssam 305189251Ssam/* MEMBERS PROTECTED BY "atomicity": cpu_flags */ 306189251Ssam 307189251Ssam/* 308189251Ssam * Flags in the CPU structure. 309189251Ssam * 310189251Ssam * These are protected by cpu_lock (except during creation). 311189251Ssam * 312189251Ssam * Offlined-CPUs have three stages of being offline: 313189251Ssam * 314189251Ssam * CPU_ENABLE indicates that the CPU is participating in I/O interrupts 315189251Ssam * that can be directed at a number of different CPUs. If CPU_ENABLE 316189251Ssam * is off, the CPU will not be given interrupts that can be sent elsewhere, 317189251Ssam * but will still get interrupts from devices associated with that CPU only, 318189251Ssam * and from other CPUs. 319189251Ssam * 320189251Ssam * CPU_OFFLINE indicates that the dispatcher should not allow any threads 321189251Ssam * other than interrupt threads to run on that CPU. A CPU will not have 322189251Ssam * CPU_OFFLINE set if there are any bound threads (besides interrupts). 323189251Ssam * 324189251Ssam * CPU_QUIESCED is set if p_offline was able to completely turn idle the 325189251Ssam * CPU and it will not have to run interrupt threads. In this case it'll 326189251Ssam * stay in the idle loop until CPU_QUIESCED is turned off. 327189251Ssam * 328189251Ssam * CPU_FROZEN is used only by CPR to mark CPUs that have been successfully 329189251Ssam * suspended (in the suspend path), or have yet to be resumed (in the resume 330189251Ssam * case). 331189251Ssam * 332189251Ssam * On some platforms CPUs can be individually powered off. 333189251Ssam * The following flags are set for powered off CPUs: CPU_QUIESCED, 334189251Ssam * CPU_OFFLINE, and CPU_POWEROFF. The following flags are cleared: 335189251Ssam * CPU_RUNNING, CPU_READY, CPU_EXISTS, and CPU_ENABLE. 336189251Ssam */ 337189251Ssam#define CPU_RUNNING 0x001 /* CPU running */ 338189251Ssam#define CPU_READY 0x002 /* CPU ready for cross-calls */ 339189251Ssam#define CPU_QUIESCED 0x004 /* CPU will stay in idle */ 340189251Ssam#define CPU_EXISTS 0x008 /* CPU is configured */ 341189251Ssam#define CPU_ENABLE 0x010 /* CPU enabled for interrupts */ 342189251Ssam#define CPU_OFFLINE 0x020 /* CPU offline via p_online */ 343189251Ssam#define CPU_POWEROFF 0x040 /* CPU is powered off */ 344189251Ssam#define CPU_FROZEN 0x080 /* CPU is frozen via CPR suspend */ 345189251Ssam#define CPU_SPARE 0x100 /* CPU offline available for use */ 346189251Ssam#define CPU_FAULTED 0x200 /* CPU offline diagnosed faulty */ 347189251Ssam 348189251Ssam#define FMT_CPU_FLAGS \ 349189251Ssam "\20\12fault\11spare\10frozen" \ 350189251Ssam "\7poweroff\6offline\5enable\4exist\3quiesced\2ready\1run" 351189251Ssam 352189251Ssam#define CPU_ACTIVE(cpu) (((cpu)->cpu_flags & CPU_OFFLINE) == 0) 353189251Ssam 354189251Ssam/* 355189251Ssam * Flags for cpu_offline(), cpu_faulted(), and cpu_spare(). 356189251Ssam */ 357189251Ssam#define CPU_FORCED 0x0001 /* Force CPU offline */ 358189251Ssam 359189251Ssam/* 360189251Ssam * DTrace flags. 361189251Ssam */ 362189251Ssam#define CPU_DTRACE_NOFAULT 0x0001 /* Don't fault */ 363189251Ssam#define CPU_DTRACE_DROP 0x0002 /* Drop this ECB */ 364189251Ssam#define CPU_DTRACE_BADADDR 0x0004 /* DTrace fault: bad address */ 365189251Ssam#define CPU_DTRACE_BADALIGN 0x0008 /* DTrace fault: bad alignment */ 366189251Ssam#define CPU_DTRACE_DIVZERO 0x0010 /* DTrace fault: divide by zero */ 367189251Ssam#define CPU_DTRACE_ILLOP 0x0020 /* DTrace fault: illegal operation */ 368189251Ssam#define CPU_DTRACE_NOSCRATCH 0x0040 /* DTrace fault: out of scratch */ 369189251Ssam#define CPU_DTRACE_KPRIV 0x0080 /* DTrace fault: bad kernel access */ 370189251Ssam#define CPU_DTRACE_UPRIV 0x0100 /* DTrace fault: bad user access */ 371189251Ssam#define CPU_DTRACE_TUPOFLOW 0x0200 /* DTrace fault: tuple stack overflow */ 372189251Ssam#if defined(__sparc) 373189251Ssam#define CPU_DTRACE_FAKERESTORE 0x0400 /* pid provider hint to getreg */ 374189251Ssam#endif 375189251Ssam#define CPU_DTRACE_ENTRY 0x0800 /* pid provider hint to ustack() */ 376189251Ssam#define CPU_DTRACE_BADSTACK 0x1000 /* DTrace fault: bad stack */ 377189251Ssam 378189251Ssam#define CPU_DTRACE_FAULT (CPU_DTRACE_BADADDR | CPU_DTRACE_BADALIGN | \ 379189251Ssam CPU_DTRACE_DIVZERO | CPU_DTRACE_ILLOP | \ 380189251Ssam CPU_DTRACE_NOSCRATCH | CPU_DTRACE_KPRIV | \ 381189251Ssam CPU_DTRACE_UPRIV | CPU_DTRACE_TUPOFLOW | \ 382189251Ssam CPU_DTRACE_BADSTACK) 383189251Ssam#define CPU_DTRACE_ERROR (CPU_DTRACE_FAULT | CPU_DTRACE_DROP) 384189251Ssam 385189251Ssam/* 386189251Ssam * Dispatcher flags 387189251Ssam * These flags must be changed only by the current CPU. 388189251Ssam */ 389189251Ssam#define CPU_DISP_DONTSTEAL 0x01 /* CPU undergoing context swtch */ 390189251Ssam#define CPU_DISP_HALTED 0x02 /* CPU halted waiting for interrupt */ 391189251Ssam 392189251Ssam#endif /* _KERNEL || _KMEMUSER */ 393189251Ssam 394189251Ssam#if (defined(_KERNEL) || defined(_KMEMUSER)) && defined(_MACHDEP) 395189251Ssam 396189251Ssam/* 397189251Ssam * Macros for manipulating sets of CPUs as a bitmap. Note that this 398189251Ssam * bitmap may vary in size depending on the maximum CPU id a specific 399189251Ssam * platform supports. This may be different than the number of CPUs 400189251Ssam * the platform supports, since CPU ids can be sparse. We define two 401189251Ssam * sets of macros; one for platforms where the maximum CPU id is less 402189251Ssam * than the number of bits in a single word (32 in a 32-bit kernel, 403189251Ssam * 64 in a 64-bit kernel), and one for platforms that require bitmaps 404189251Ssam * of more than one word. 405189251Ssam */ 406189251Ssam 407189251Ssam#define CPUSET_WORDS BT_BITOUL(NCPU) 408189251Ssam#define CPUSET_NOTINSET ((uint_t)-1) 409189251Ssam 410189251Ssam#if CPUSET_WORDS > 1 411189251Ssam 412189251Ssamtypedef struct cpuset { 413189251Ssam ulong_t cpub[CPUSET_WORDS]; 414189251Ssam} cpuset_t; 415189251Ssam 416189251Ssam/* 417189251Ssam * Private functions for manipulating cpusets that do not fit in a 418189251Ssam * single word. These should not be used directly; instead the 419189251Ssam * CPUSET_* macros should be used so the code will be portable 420189251Ssam * across different definitions of NCPU. 421189251Ssam */ 422189251Ssamextern void cpuset_all(cpuset_t *); 423189251Ssamextern void cpuset_all_but(cpuset_t *, uint_t); 424189251Ssamextern int cpuset_isnull(cpuset_t *); 425189251Ssamextern int cpuset_cmp(cpuset_t *, cpuset_t *); 426189251Ssamextern void cpuset_only(cpuset_t *, uint_t); 427189251Ssamextern uint_t cpuset_find(cpuset_t *); 428189251Ssamextern void cpuset_bounds(cpuset_t *, uint_t *, uint_t *); 429189251Ssam 430189251Ssam#define CPUSET_ALL(set) cpuset_all(&(set)) 431189251Ssam#define CPUSET_ALL_BUT(set, cpu) cpuset_all_but(&(set), cpu) 432189251Ssam#define CPUSET_ONLY(set, cpu) cpuset_only(&(set), cpu) 433189251Ssam#define CPU_IN_SET(set, cpu) BT_TEST((set).cpub, cpu) 434189251Ssam#define CPUSET_ADD(set, cpu) BT_SET((set).cpub, cpu) 435189251Ssam#define CPUSET_DEL(set, cpu) BT_CLEAR((set).cpub, cpu) 436189251Ssam#define CPUSET_ISNULL(set) cpuset_isnull(&(set)) 437189251Ssam#define CPUSET_ISEQUAL(set1, set2) cpuset_cmp(&(set1), &(set2)) 438189251Ssam 439189251Ssam/* 440189251Ssam * Find one CPU in the cpuset. 441189251Ssam * Sets "cpu" to the id of the found CPU, or CPUSET_NOTINSET if no cpu 442189251Ssam * could be found. (i.e. empty set) 443189251Ssam */ 444189251Ssam#define CPUSET_FIND(set, cpu) { \ 445189251Ssam cpu = cpuset_find(&(set)); \ 446189251Ssam} 447189251Ssam 448189251Ssam/* 449189251Ssam * Determine the smallest and largest CPU id in the set. Returns 450189251Ssam * CPUSET_NOTINSET in smallest and largest when set is empty. 451189251Ssam */ 452189251Ssam#define CPUSET_BOUNDS(set, smallest, largest) { \ 453189251Ssam cpuset_bounds(&(set), &(smallest), &(largest)); \ 454189251Ssam} 455189251Ssam 456189251Ssam/* 457189251Ssam * Atomic cpuset operations 458189251Ssam * These are safe to use for concurrent cpuset manipulations. 459189251Ssam * "xdel" and "xadd" are exclusive operations, that set "result" to "0" 460189251Ssam * if the add or del was successful, or "-1" if not successful. 461189251Ssam * (e.g. attempting to add a cpu to a cpuset that's already there, or 462189251Ssam * deleting a cpu that's not in the cpuset) 463189251Ssam */ 464189251Ssam 465189251Ssam#define CPUSET_ATOMIC_DEL(set, cpu) BT_ATOMIC_CLEAR((set).cpub, (cpu)) 466189251Ssam#define CPUSET_ATOMIC_ADD(set, cpu) BT_ATOMIC_SET((set).cpub, (cpu)) 467189251Ssam 468189251Ssam#define CPUSET_ATOMIC_XADD(set, cpu, result) \ 469189251Ssam BT_ATOMIC_SET_EXCL((set).cpub, cpu, result) 470189251Ssam 471189251Ssam#define CPUSET_ATOMIC_XDEL(set, cpu, result) \ 472189251Ssam BT_ATOMIC_CLEAR_EXCL((set).cpub, cpu, result) 473189251Ssam 474189251Ssam 475189251Ssam#define CPUSET_OR(set1, set2) { \ 476189251Ssam int _i; \ 477189251Ssam for (_i = 0; _i < CPUSET_WORDS; _i++) \ 478189251Ssam (set1).cpub[_i] |= (set2).cpub[_i]; \ 479189251Ssam} 480189251Ssam 481189251Ssam#define CPUSET_XOR(set1, set2) { \ 482189251Ssam int _i; \ 483189251Ssam for (_i = 0; _i < CPUSET_WORDS; _i++) \ 484189251Ssam (set1).cpub[_i] ^= (set2).cpub[_i]; \ 485189251Ssam} 486189251Ssam 487189251Ssam#define CPUSET_AND(set1, set2) { \ 488189251Ssam int _i; \ 489189251Ssam for (_i = 0; _i < CPUSET_WORDS; _i++) \ 490189251Ssam (set1).cpub[_i] &= (set2).cpub[_i]; \ 491189251Ssam} 492189251Ssam 493189251Ssam#define CPUSET_ZERO(set) { \ 494189251Ssam int _i; \ 495189251Ssam for (_i = 0; _i < CPUSET_WORDS; _i++) \ 496189251Ssam (set).cpub[_i] = 0; \ 497189251Ssam} 498189251Ssam 499189251Ssam#elif CPUSET_WORDS == 1 500189251Ssam 501189251Ssamtypedef ulong_t cpuset_t; /* a set of CPUs */ 502189251Ssam 503189251Ssam#define CPUSET(cpu) (1UL << (cpu)) 504189251Ssam 505189251Ssam#define CPUSET_ALL(set) ((void)((set) = ~0UL)) 506189251Ssam#define CPUSET_ALL_BUT(set, cpu) ((void)((set) = ~CPUSET(cpu))) 507189251Ssam#define CPUSET_ONLY(set, cpu) ((void)((set) = CPUSET(cpu))) 508189251Ssam#define CPU_IN_SET(set, cpu) ((set) & CPUSET(cpu)) 509189251Ssam#define CPUSET_ADD(set, cpu) ((void)((set) |= CPUSET(cpu))) 510189251Ssam#define CPUSET_DEL(set, cpu) ((void)((set) &= ~CPUSET(cpu))) 511189251Ssam#define CPUSET_ISNULL(set) ((set) == 0) 512189251Ssam#define CPUSET_ISEQUAL(set1, set2) ((set1) == (set2)) 513189251Ssam#define CPUSET_OR(set1, set2) ((void)((set1) |= (set2))) 514189251Ssam#define CPUSET_XOR(set1, set2) ((void)((set1) ^= (set2))) 515189251Ssam#define CPUSET_AND(set1, set2) ((void)((set1) &= (set2))) 516189251Ssam#define CPUSET_ZERO(set) ((void)((set) = 0)) 517189251Ssam 518189251Ssam#define CPUSET_FIND(set, cpu) { \ 519189251Ssam cpu = (uint_t)(lowbit(set) - 1); \ 520189251Ssam} 521189251Ssam 522189251Ssam#define CPUSET_BOUNDS(set, smallest, largest) { \ 523189251Ssam smallest = (uint_t)(lowbit(set) - 1); \ 524189251Ssam largest = (uint_t)(highbit(set) - 1); \ 525189251Ssam} 526189251Ssam 527189251Ssam#define CPUSET_ATOMIC_DEL(set, cpu) atomic_and_ulong(&(set), ~CPUSET(cpu)) 528189251Ssam#define CPUSET_ATOMIC_ADD(set, cpu) atomic_or_ulong(&(set), CPUSET(cpu)) 529189251Ssam 530189251Ssam#define CPUSET_ATOMIC_XADD(set, cpu, result) \ 531189251Ssam { result = atomic_set_long_excl(&(set), (cpu)); } 532189251Ssam 533189251Ssam#define CPUSET_ATOMIC_XDEL(set, cpu, result) \ 534189251Ssam { result = atomic_clear_long_excl(&(set), (cpu)); } 535189251Ssam 536189251Ssam#else /* CPUSET_WORDS <= 0 */ 537189251Ssam 538189251Ssam#error NCPU is undefined or invalid 539189251Ssam 540189251Ssam#endif /* CPUSET_WORDS */ 541189251Ssam 542189251Ssamextern cpuset_t cpu_seqid_inuse; 543189251Ssam 544189251Ssam#endif /* (_KERNEL || _KMEMUSER) && _MACHDEP */ 545189251Ssam 546189251Ssam#define CPU_CPR_OFFLINE 0x0 547189251Ssam#define CPU_CPR_ONLINE 0x1 548189251Ssam#define CPU_CPR_IS_OFFLINE(cpu) (((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) == 0) 549189251Ssam#define CPU_CPR_IS_ONLINE(cpu) ((cpu)->cpu_cpr_flags & CPU_CPR_ONLINE) 550189251Ssam#define CPU_SET_CPR_FLAGS(cpu, flag) ((cpu)->cpu_cpr_flags |= flag) 551189251Ssam 552189251Ssam#if defined(_KERNEL) || defined(_KMEMUSER) 553189251Ssam 554189251Ssamextern struct cpu *cpu[]; /* indexed by CPU number */ 555189251Ssamextern struct cpu **cpu_seq; /* indexed by sequential CPU id */ 556189251Ssamextern cpu_t *cpu_list; /* list of CPUs */ 557189251Ssamextern cpu_t *cpu_active; /* list of active CPUs */ 558189251Ssamextern int ncpus; /* number of CPUs present */ 559189251Ssamextern int ncpus_online; /* number of CPUs not quiesced */ 560189251Ssamextern int max_ncpus; /* max present before ncpus is known */ 561189251Ssamextern int boot_max_ncpus; /* like max_ncpus but for real */ 562189251Ssamextern int boot_ncpus; /* # cpus present @ boot */ 563189251Ssamextern processorid_t max_cpuid; /* maximum CPU number */ 564189251Ssamextern struct cpu *cpu_inmotion; /* offline or partition move target */ 565189251Ssamextern cpu_t *clock_cpu_list; 566189251Ssamextern processorid_t max_cpu_seqid_ever; /* maximum seqid ever given */ 567189251Ssam 568189251Ssam#if defined(__i386) || defined(__amd64) 569189251Ssamextern struct cpu *curcpup(void); 570189251Ssam#define CPU (curcpup()) /* Pointer to current CPU */ 571189251Ssam#else 572189251Ssam#define CPU (curthread->t_cpu) /* Pointer to current CPU */ 573189251Ssam#endif 574189251Ssam 575189251Ssam/* 576189251Ssam * CPU_CURRENT indicates to thread_affinity_set to use CPU->cpu_id 577189251Ssam * as the target and to grab cpu_lock instead of requiring the caller 578189251Ssam * to grab it. 579189251Ssam */ 580189251Ssam#define CPU_CURRENT -3 581189251Ssam 582189251Ssam/* 583252726Srpaulo * Per-CPU statistics 584189251Ssam * 585189251Ssam * cpu_stats_t contains numerous system and VM-related statistics, in the form 586189251Ssam * of gauges or monotonically-increasing event occurrence counts. 587189251Ssam */ 588189251Ssam 589189251Ssam#define CPU_STATS_ENTER_K() kpreempt_disable() 590189251Ssam#define CPU_STATS_EXIT_K() kpreempt_enable() 591189251Ssam 592189251Ssam#define CPU_STATS_ADD_K(class, stat, amount) \ 593189251Ssam { kpreempt_disable(); /* keep from switching CPUs */\ 594189251Ssam CPU_STATS_ADDQ(CPU, class, stat, amount); \ 595189251Ssam kpreempt_enable(); \ 596189251Ssam } 597189251Ssam 598189251Ssam#define CPU_STATS_ADDQ(cp, class, stat, amount) { \ 599189251Ssam extern void __dtrace_probe___cpu_##class##info_##stat(uint_t, \ 600189251Ssam uint64_t *, cpu_t *); \ 601189251Ssam uint64_t *stataddr = &((cp)->cpu_stats.class.stat); \ 602189251Ssam __dtrace_probe___cpu_##class##info_##stat((amount), \ 603189251Ssam stataddr, cp); \ 604189251Ssam *(stataddr) += (amount); \ 605189251Ssam} 606189251Ssam 607189251Ssam#define CPU_STATS(cp, stat) \ 608189251Ssam ((cp)->cpu_stats.stat) 609189251Ssam 610189251Ssam/* 611189251Ssam * Increment CPU generation value. 612189251Ssam * This macro should be called whenever CPU goes on-line or off-line. 613189251Ssam * Updates to cpu_generation should be protected by cpu_lock. 614189251Ssam */ 615189251Ssam#define CPU_NEW_GENERATION(cp) ((cp)->cpu_generation++) 616189251Ssam 617189251Ssam#endif /* _KERNEL || _KMEMUSER */ 618189251Ssam 619189251Ssam/* 620189251Ssam * CPU support routines. 621189251Ssam */ 622189251Ssam#if defined(_KERNEL) && defined(__STDC__) /* not for genassym.c */ 623189251Ssam 624189251Ssamstruct zone; 625189251Ssam 626189251Ssamvoid cpu_list_init(cpu_t *); 627189251Ssamvoid cpu_add_unit(cpu_t *); 628189251Ssamvoid cpu_del_unit(int cpuid); 629189251Ssamvoid cpu_add_active(cpu_t *); 630189251Ssamvoid cpu_kstat_init(cpu_t *); 631189251Ssamvoid cpu_visibility_add(cpu_t *, struct zone *); 632189251Ssamvoid cpu_visibility_remove(cpu_t *, struct zone *); 633189251Ssamvoid cpu_visibility_configure(cpu_t *, struct zone *); 634189251Ssamvoid cpu_visibility_unconfigure(cpu_t *, struct zone *); 635189251Ssamvoid cpu_visibility_online(cpu_t *, struct zone *); 636189251Ssamvoid cpu_visibility_offline(cpu_t *, struct zone *); 637189251Ssamvoid cpu_create_intrstat(cpu_t *); 638189251Ssamvoid cpu_delete_intrstat(cpu_t *); 639189251Ssamint cpu_kstat_intrstat_update(kstat_t *, int); 640189251Ssamvoid cpu_intr_swtch_enter(kthread_t *); 641189251Ssamvoid cpu_intr_swtch_exit(kthread_t *); 642189251Ssam 643189251Ssamvoid mbox_lock_init(void); /* initialize cross-call locks */ 644189251Ssamvoid mbox_init(int cpun); /* initialize cross-calls */ 645189251Ssamvoid poke_cpu(int cpun); /* interrupt another CPU (to preempt) */ 646189251Ssam 647189251Ssam/* 648189251Ssam * values for safe_list. Pause state that CPUs are in. 649189251Ssam */ 650189251Ssam#define PAUSE_IDLE 0 /* normal state */ 651189251Ssam#define PAUSE_READY 1 /* paused thread ready to spl */ 652189251Ssam#define PAUSE_WAIT 2 /* paused thread is spl-ed high */ 653189251Ssam#define PAUSE_DIE 3 /* tell pause thread to leave */ 654189251Ssam#define PAUSE_DEAD 4 /* pause thread has left */ 655189251Ssam 656189251Ssamvoid mach_cpu_pause(volatile char *); 657189251Ssam 658189251Ssamvoid pause_cpus(cpu_t *off_cp); 659189251Ssamvoid start_cpus(void); 660189251Ssamint cpus_paused(void); 661189251Ssam 662189251Ssamvoid cpu_pause_init(void); 663189251Ssamcpu_t *cpu_get(processorid_t cpun); /* get the CPU struct associated */ 664189251Ssam 665189251Ssamint cpu_online(cpu_t *cp); /* take cpu online */ 666189251Ssamint cpu_offline(cpu_t *cp, int flags); /* take cpu offline */ 667189251Ssamint cpu_spare(cpu_t *cp, int flags); /* take cpu to spare */ 668189251Ssamint cpu_faulted(cpu_t *cp, int flags); /* take cpu to faulted */ 669189251Ssamint cpu_poweron(cpu_t *cp); /* take powered-off cpu to offline */ 670189251Ssamint cpu_poweroff(cpu_t *cp); /* take offline cpu to powered-off */ 671189251Ssam 672189251Ssamcpu_t *cpu_intr_next(cpu_t *cp); /* get next online CPU taking intrs */ 673189251Ssamint cpu_intr_count(cpu_t *cp); /* count # of CPUs handling intrs */ 674189251Ssamint cpu_intr_on(cpu_t *cp); /* CPU taking I/O interrupts? */ 675189251Ssamvoid cpu_intr_enable(cpu_t *cp); /* enable I/O interrupts */ 676189251Ssamint cpu_intr_disable(cpu_t *cp); /* disable I/O interrupts */ 677189251Ssamvoid cpu_intr_alloc(cpu_t *cp, int n); /* allocate interrupt threads */ 678189251Ssam 679189251Ssam/* 680189251Ssam * Routines for checking CPU states. 681189251Ssam */ 682189251Ssamint cpu_is_online(cpu_t *); /* check if CPU is online */ 683189251Ssamint cpu_is_nointr(cpu_t *); /* check if CPU can service intrs */ 684189251Ssamint cpu_is_active(cpu_t *); /* check if CPU can run threads */ 685189251Ssamint cpu_is_offline(cpu_t *); /* check if CPU is offline */ 686189251Ssamint cpu_is_poweredoff(cpu_t *); /* check if CPU is powered off */ 687189251Ssam 688189251Ssamint cpu_flagged_online(cpu_flag_t); /* flags show CPU is online */ 689189251Ssamint cpu_flagged_nointr(cpu_flag_t); /* flags show CPU not handling intrs */ 690189251Ssamint cpu_flagged_active(cpu_flag_t); /* flags show CPU scheduling threads */ 691189251Ssamint cpu_flagged_offline(cpu_flag_t); /* flags show CPU is offline */ 692189251Ssamint cpu_flagged_poweredoff(cpu_flag_t); /* flags show CPU is powered off */ 693189251Ssam 694189251Ssam/* 695189251Ssam * The processor_info(2) state of a CPU is a simplified representation suitable 696189251Ssam * for use by an application program. Kernel subsystems should utilize the 697189251Ssam * internal per-CPU state as given by the cpu_flags member of the cpu structure, 698189251Ssam * as this information may include platform- or architecture-specific state 699189251Ssam * critical to a subsystem's disposition of a particular CPU. 700189251Ssam */ 701189251Ssamvoid cpu_set_state(cpu_t *); /* record/timestamp current state */ 702189251Ssamint cpu_get_state(cpu_t *); /* get current cpu state */ 703189251Ssamconst char *cpu_get_state_str(cpu_t *); /* get current cpu state as string */ 704189251Ssam 705189251Ssam 706189251Ssamvoid cpu_set_curr_clock(uint64_t); /* indicate the current CPU's freq */ 707189251Ssamvoid cpu_set_supp_freqs(cpu_t *, const char *); /* set the CPU supported */ 708189251Ssam /* frequencies */ 709189251Ssam 710189251Ssamint cpu_configure(int); 711189251Ssamint cpu_unconfigure(int); 712189251Ssamvoid cpu_destroy_bound_threads(cpu_t *cp); 713189251Ssam 714189251Ssamextern int cpu_bind_thread(kthread_t *tp, processorid_t bind, 715189251Ssam processorid_t *obind, int *error); 716189251Ssamextern int cpu_unbind(processorid_t cpu_id, boolean_t force); 717189251Ssamextern void thread_affinity_set(kthread_t *t, int cpu_id); 718189251Ssamextern void thread_affinity_clear(kthread_t *t); 719189251Ssamextern void affinity_set(int cpu_id); 720189251Ssamextern void affinity_clear(void); 721189251Ssamextern void init_cpu_mstate(struct cpu *, int); 722189251Ssamextern void term_cpu_mstate(struct cpu *); 723189251Ssamextern void new_cpu_mstate(int, hrtime_t); 724189251Ssamextern void get_cpu_mstate(struct cpu *, hrtime_t *); 725189251Ssamextern void thread_nomigrate(void); 726189251Ssamextern void thread_allowmigrate(void); 727189251Ssamextern void weakbinding_stop(void); 728189251Ssamextern void weakbinding_start(void); 729189251Ssam 730189251Ssam/* 731189251Ssam * The following routines affect the CPUs participation in interrupt processing, 732189251Ssam * if that is applicable on the architecture. This only affects interrupts 733189251Ssam * which aren't directed at the processor (not cross calls). 734189251Ssam * 735189251Ssam * cpu_disable_intr returns non-zero if interrupts were previously enabled. 736189251Ssam */ 737189251Ssamint cpu_disable_intr(struct cpu *cp); /* stop issuing interrupts to cpu */ 738189251Ssamvoid cpu_enable_intr(struct cpu *cp); /* start issuing interrupts to cpu */ 739189251Ssam 740189251Ssam/* 741189251Ssam * The mutex cpu_lock protects cpu_flags for all CPUs, as well as the ncpus 742189251Ssam * and ncpus_online counts. 743189251Ssam */ 744189251Ssamextern kmutex_t cpu_lock; /* lock protecting CPU data */ 745189251Ssam 746189251Ssam/* 747189251Ssam * CPU state change events 748189251Ssam * 749189251Ssam * Various subsystems need to know when CPUs change their state. They get this 750189251Ssam * information by registering CPU state change callbacks using 751189251Ssam * register_cpu_setup_func(). Whenever any CPU changes its state, the callback 752189251Ssam * function is called. The callback function is passed three arguments: 753189251Ssam * 754189251Ssam * Event, described by cpu_setup_t 755189251Ssam * CPU ID 756189251Ssam * Transparent pointer passed when registering the callback 757189251Ssam * 758189251Ssam * The callback function is called with cpu_lock held. The return value from the 759189251Ssam * callback function is usually ignored, except for CPU_CONFIG and CPU_UNCONFIG 760189251Ssam * events. For these two events, non-zero return value indicates a failure and 761189251Ssam * prevents successful completion of the operation. 762189251Ssam * 763189251Ssam * New events may be added in the future. Callback functions should ignore any 764189251Ssam * events that they do not understand. 765189251Ssam * 766189251Ssam * The following events provide notification callbacks: 767189251Ssam * 768189251Ssam * CPU_INIT A new CPU is started and added to the list of active CPUs 769189251Ssam * This event is only used during boot 770189251Ssam * 771189251Ssam * CPU_CONFIG A newly inserted CPU is prepared for starting running code 772189251Ssam * This event is called by DR code 773189251Ssam * 774189251Ssam * CPU_UNCONFIG CPU has been powered off and needs cleanup 775189251Ssam * This event is called by DR code 776189251Ssam * 777189251Ssam * CPU_ON CPU is enabled but does not run anything yet 778189251Ssam * 779189251Ssam * CPU_INTR_ON CPU is enabled and has interrupts enabled 780189251Ssam * 781189251Ssam * CPU_OFF CPU is going offline but can still run threads 782189251Ssam * 783189251Ssam * CPU_CPUPART_OUT CPU is going to move out of its partition 784189251Ssam * 785189251Ssam * CPU_CPUPART_IN CPU is going to move to a new partition 786189251Ssam * 787189251Ssam * CPU_SETUP CPU is set up during boot and can run threads 788189251Ssam */ 789189251Ssamtypedef enum { 790189251Ssam CPU_INIT, 791189251Ssam CPU_CONFIG, 792189251Ssam CPU_UNCONFIG, 793189251Ssam CPU_ON, 794189251Ssam CPU_OFF, 795189251Ssam CPU_CPUPART_IN, 796189251Ssam CPU_CPUPART_OUT, 797189251Ssam CPU_SETUP, 798189251Ssam CPU_INTR_ON 799189251Ssam} cpu_setup_t; 800189251Ssam 801189251Ssamtypedef int cpu_setup_func_t(cpu_setup_t, int, void *); 802189251Ssam 803189251Ssam/* 804189251Ssam * Routines used to register interest in cpu's being added to or removed 805189251Ssam * from the system. 806189251Ssam */ 807189251Ssamextern void register_cpu_setup_func(cpu_setup_func_t *, void *); 808189251Ssamextern void unregister_cpu_setup_func(cpu_setup_func_t *, void *); 809189251Ssamextern void cpu_state_change_notify(int, cpu_setup_t); 810189251Ssam 811189251Ssam/* 812189251Ssam * Call specified function on the given CPU 813189251Ssam */ 814189251Ssamtypedef void (*cpu_call_func_t)(uintptr_t, uintptr_t); 815189251Ssamextern void cpu_call(cpu_t *, cpu_call_func_t, uintptr_t, uintptr_t); 816189251Ssam 817189251Ssam 818189251Ssam/* 819189251Ssam * Create various strings that describe the given CPU for the 820189251Ssam * processor_info system call and configuration-related kstats. 821189251Ssam */ 822189251Ssam#define CPU_IDSTRLEN 100 823189251Ssam 824189251Ssamextern void init_cpu_info(struct cpu *); 825189251Ssamextern void populate_idstr(struct cpu *); 826189251Ssamextern void cpu_vm_data_init(struct cpu *); 827189251Ssamextern void cpu_vm_data_destroy(struct cpu *); 828189251Ssam 829189251Ssam#endif /* _KERNEL */ 830189251Ssam 831189251Ssam#ifdef __cplusplus 832189251Ssam} 833189251Ssam#endif 834189251Ssam 835189251Ssam#endif /* _SYS_CPUVAR_H */ 836189251Ssam