1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2017 RackTop Systems.
24 */
25
26#ifndef	_SYS_CPUPART_H
27#define	_SYS_CPUPART_H
28
29#include <sys/types.h>
30#include <sys/processor.h>
31#include <sys/cpuvar.h>
32#include <sys/disp.h>
33#include <sys/pset.h>
34#include <sys/lgrp.h>
35#include <sys/lgrp_user.h>
36#include <sys/pg.h>
37#include <sys/bitset.h>
38#include <sys/time.h>
39
40#ifdef	__cplusplus
41extern "C" {
42#endif
43
44#if defined(_KERNEL) || defined(_FAKE_KERNEL)
45
46typedef int	cpupartid_t;
47
48/*
49 * Special partition id.
50 */
51#define	CP_DEFAULT	0
52
53/*
54 * Flags for cpupart_list()
55 */
56#define	CP_ALL		0		/* return all cpu partitions */
57#define	CP_NONEMPTY	1		/* return only non-empty ones */
58
59typedef struct cpupart {
60	disp_t		cp_kp_queue;	/* partition-wide kpreempt queue */
61	cpupartid_t	cp_id;		/* partition ID */
62	int		cp_ncpus;	/* number of online processors */
63	struct cpupart	*cp_next;	/* next partition in list */
64	struct cpupart	*cp_prev;	/* previous partition in list */
65	struct cpu	*cp_cpulist;	/* processor list */
66	struct kstat	*cp_kstat;	/* per-partition statistics */
67
68	/*
69	 * cp_nrunnable and cp_nrunning are used to calculate load average.
70	 */
71	uint_t		cp_nrunnable;	/* current # of runnable threads */
72	uint_t		cp_nrunning;	/* current # of running threads */
73
74	/*
75	 * cp_updates, cp_nrunnable_cum, cp_nwaiting_cum, and cp_hp_avenrun
76	 * are used to generate kstat information on an as-needed basis.
77	 */
78	uint64_t	cp_updates;	/* number of statistics updates */
79	uint64_t	cp_nrunnable_cum; /* cum. # of runnable threads */
80	uint64_t	cp_nwaiting_cum;  /* cum. # of waiting threads */
81
82	struct loadavg_s cp_loadavg;	/* cpupart loadavg */
83
84	klgrpset_t	cp_lgrpset;	/* set of lgroups on which this */
85					/*    partition has cpus */
86	lpl_t		*cp_lgrploads;	/* table of load averages for this  */
87					/*    partition, indexed by lgrp ID */
88	int		cp_nlgrploads;	/* size of cp_lgrploads table */
89	uint64_t	cp_hp_avenrun[3]; /* high-precision load average */
90	uint_t		cp_attr;	/* bitmask of attributes */
91	lgrp_gen_t	cp_gen;		/* generation number */
92	lgrp_id_t	cp_lgrp_hint;	/* last home lgroup chosen */
93	bitset_t	cp_cmt_pgs;	/* CMT PGs represented */
94	bitset_t	cp_haltset;	/* halted CPUs */
95} cpupart_t;
96
97typedef struct cpupart_kstat {
98	kstat_named_t	cpk_updates;		/* number of updates */
99	kstat_named_t	cpk_runnable;		/* cum # of runnable threads */
100	kstat_named_t	cpk_waiting;		/* cum # waiting for I/O */
101	kstat_named_t	cpk_ncpus;		/* current # of CPUs */
102	kstat_named_t	cpk_avenrun_1min;	/* 1-minute load average */
103	kstat_named_t	cpk_avenrun_5min;	/* 5-minute load average */
104	kstat_named_t	cpk_avenrun_15min;	/* 15-minute load average */
105} cpupart_kstat_t;
106
107/*
108 * Macro to obtain the maximum run priority for the global queue associated
109 * with given cpu partition.
110 */
111#define	CP_MAXRUNPRI(cp)	((cp)->cp_kp_queue.disp_maxrunpri)
112
113/*
114 * This macro is used to determine if the given thread must surrender
115 * CPU to higher priority runnable threads on one of its dispatch queues.
116 * This should really be defined in <sys/disp.h> but it is not because
117 * including <sys/cpupart.h> there would cause recursive includes.
118 */
119#define	DISP_MUST_SURRENDER(t)				\
120	((DISP_MAXRUNPRI(t) > DISP_PRIO(t)) ||		\
121	(CP_MAXRUNPRI(t->t_cpupart) > DISP_PRIO(t)))
122
123extern cpupart_t	cp_default;
124extern cpupart_t	*cp_list_head;
125extern uint_t		cp_numparts;
126extern uint_t		cp_numparts_nonempty;
127
128/*
129 * Each partition contains a bitset that indicates which CPUs are halted and
130 * which ones are running. Given the growing number of CPUs in current and
131 * future platforms, it's important to fanout each CPU within its partition's
132 * haltset to prevent contention due to false sharing. The fanout factor
133 * is platform specific, and declared accordingly.
134 */
135extern uint_t cp_haltset_fanout;
136
137extern void	cpupart_initialize_default();
138extern cpupart_t *cpupart_find(psetid_t);
139extern int	cpupart_create(psetid_t *);
140extern int	cpupart_destroy(psetid_t);
141extern psetid_t	cpupart_query_cpu(cpu_t *);
142extern int	cpupart_attach_cpu(psetid_t, cpu_t *, int);
143extern int	cpupart_get_cpus(psetid_t *, processorid_t *, uint_t *);
144extern int	cpupart_bind_thread(kthread_id_t, psetid_t, int, void *,
145    void *);
146extern void	cpupart_kpqalloc(pri_t);
147extern int	cpupart_get_loadavg(psetid_t, int *, int);
148extern uint_t	cpupart_list(psetid_t *, uint_t, int);
149extern int	cpupart_setattr(psetid_t, uint_t);
150extern int	cpupart_getattr(psetid_t, uint_t *);
151
152#endif	/* _KERNEL || _FAKE_KERNEL */
153
154#ifdef	__cplusplus
155}
156#endif
157
158#endif	/* _SYS_CPUPART_H */
159