1/*
2 * Copyright (c) 2007-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/machine.h>
30#include <mach/processor.h>
31#include <kern/kalloc.h>
32#include <i386/cpu_affinity.h>
33#include <i386/cpu_topology.h>
34#include <i386/cpu_threads.h>
35#include <i386/machine_cpu.h>
36#include <i386/lock.h>
37#include <i386/cpu_data.h>
38#include <i386/lapic.h>
39#include <i386/machine_routines.h>
40
41__private_extern__ void qsort(
42    void * array,
43    size_t nmembers,
44    size_t member_size,
45    int (*)(const void *, const void *));
46
47static int lapicid_cmp(const void *x, const void *y);
48static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep);
49
50x86_affinity_set_t	*x86_affinities = NULL;
51static int		x86_affinity_count = 0;
52
53/*
54 * cpu_topology_sort() is called after all processors have been registered
55 * but before any non-boot processor id started.
56 * We establish canonical logical processor numbering - logical cpus must be
57 * contiguous, zero-based and assigned in physical (local apic id) order.
58 * This step is required because the discovery/registration order is
59 * non-deterministic - cores are registered in differing orders over boots.
60 * Enforcing canonical numbering simplifies identification
61 * of processors - in particular, for stopping/starting from CHUD.
62 */
63void
64cpu_topology_sort(int ncpus)
65{
66	int		i;
67	boolean_t	istate;
68	processor_t		lprim = NULL;
69
70	assert(machine_info.physical_cpu == 1);
71	assert(machine_info.logical_cpu == 1);
72	assert(master_cpu == 0);
73	assert(cpu_number() == 0);
74	assert(cpu_datap(0)->cpu_number == 0);
75
76	/* Lights out for this */
77	istate = ml_set_interrupts_enabled(FALSE);
78
79	if (topo_dbg) {
80		TOPO_DBG("cpu_topology_start() %d cpu%s registered\n",
81			ncpus, (ncpus > 1) ? "s" : "");
82		for (i = 0; i < ncpus; i++) {
83			cpu_data_t	*cpup = cpu_datap(i);
84			TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
85				i, (void *) cpup, cpup->cpu_phys_number);
86		}
87	}
88
89	/*
90	 * Re-order the cpu_data_ptr vector sorting by physical id.
91	 * Skip the boot processor, it's required to be correct.
92	 */
93	if (ncpus > 1) {
94		qsort((void *) &cpu_data_ptr[1],
95			ncpus - 1,
96			sizeof(cpu_data_t *),
97			lapicid_cmp);
98	}
99	if (topo_dbg) {
100		TOPO_DBG("cpu_topology_start() after sorting:\n");
101		for (i = 0; i < ncpus; i++) {
102			cpu_data_t	*cpup = cpu_datap(i);
103			TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n",
104				i, (void *) cpup, cpup->cpu_phys_number);
105		}
106	}
107
108	/*
109	 * Fix up logical numbers and reset the map kept by the lapic code.
110	 */
111	for (i = 1; i < ncpus; i++) {
112		cpu_data_t	*cpup = cpu_datap(i);
113		x86_core_t	*core = cpup->lcpu.core;
114		x86_die_t	*die  = cpup->lcpu.die;
115		x86_pkg_t	*pkg  = cpup->lcpu.package;
116
117		assert(core != NULL);
118		assert(die != NULL);
119		assert(pkg != NULL);
120
121		if (cpup->cpu_number != i) {
122			kprintf("cpu_datap(%d):%p local apic id 0x%x "
123				"remapped from %d\n",
124				i, cpup, cpup->cpu_phys_number,
125				cpup->cpu_number);
126		}
127		cpup->cpu_number = i;
128		cpup->lcpu.cpu_num = i;
129		cpup->lcpu.pnum = cpup->cpu_phys_number;
130		lapic_cpu_map(cpup->cpu_phys_number, i);
131		x86_set_lcpu_numbers(&cpup->lcpu);
132		x86_set_core_numbers(core, &cpup->lcpu);
133		x86_set_die_numbers(die, &cpup->lcpu);
134		x86_set_pkg_numbers(pkg, &cpup->lcpu);
135	}
136
137	validate_topology();
138
139	ml_set_interrupts_enabled(istate);
140	TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1);
141
142	/*
143	 * Let the CPU Power Management know that the topology is stable.
144	 */
145	topoParms.stable = TRUE;
146	pmCPUStateInit();
147
148	/*
149	 * Iterate over all logical cpus finding or creating the affinity set
150	 * for their LLC cache. Each affinity set possesses a processor set
151	 * into which each logical processor is added.
152	 */
153	TOPO_DBG("cpu_topology_start() creating affinity sets:\n");
154	for (i = 0; i < ncpus; i++) {
155		cpu_data_t		*cpup = cpu_datap(i);
156		x86_lcpu_t		*lcpup = cpu_to_lcpu(i);
157		x86_cpu_cache_t		*LLC_cachep;
158		x86_affinity_set_t	*aset;
159
160		LLC_cachep = lcpup->caches[topoParms.LLCDepth];
161		assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF);
162		aset = find_cache_affinity(LLC_cachep);
163		if (aset == NULL) {
164			aset = (x86_affinity_set_t *) kalloc(sizeof(*aset));
165			if (aset == NULL)
166				panic("cpu_topology_start() failed aset alloc");
167			aset->next = x86_affinities;
168			x86_affinities = aset;
169			aset->num = x86_affinity_count++;
170			aset->cache = LLC_cachep;
171			aset->pset = (i == master_cpu) ?
172					processor_pset(master_processor) :
173					pset_create(pset_node_root());
174			if (aset->pset == PROCESSOR_SET_NULL)
175				panic("cpu_topology_start: pset_create");
176			TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n",
177				aset, aset->num, aset->pset, aset->cache);
178		}
179
180		TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n",
181			aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor);
182
183		if (i != master_cpu)
184			processor_init(cpup->cpu_processor, i, aset->pset);
185
186		if (lcpup->core->num_lcpus > 1) {
187			if (lcpup->lnum == 0)
188				lprim = cpup->cpu_processor;
189
190			processor_meta_init(cpup->cpu_processor, lprim);
191		}
192	}
193}
194
195/* We got a request to start a CPU. Check that this CPU is within the
196 * max cpu limit set before we do.
197 */
198kern_return_t
199cpu_topology_start_cpu( int cpunum )
200{
201	int		ncpus = machine_info.max_cpus;
202	int		i = cpunum;
203
204	/* Decide whether to start a CPU, and actually start it */
205	TOPO_DBG("cpu_topology_start() processor_start():\n");
206	if( i < ncpus)
207	{
208		TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number);
209		processor_start(cpu_datap(i)->cpu_processor);
210		return KERN_SUCCESS;
211	}
212	else
213	    return KERN_FAILURE;
214}
215
216static int
217lapicid_cmp(const void *x, const void *y)
218{
219	cpu_data_t	*cpu_x = *((cpu_data_t **)(uintptr_t)x);
220	cpu_data_t	*cpu_y = *((cpu_data_t **)(uintptr_t)y);
221
222	TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n",
223		x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number);
224	if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number)
225		return -1;
226	if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number)
227		return 0;
228	return 1;
229}
230
231static x86_affinity_set_t *
232find_cache_affinity(x86_cpu_cache_t *l2_cachep)
233{
234	x86_affinity_set_t	*aset;
235
236	for (aset = x86_affinities; aset != NULL; aset = aset->next) {
237		if (l2_cachep == aset->cache)
238			break;
239	}
240	return aset;
241}
242
243int
244ml_get_max_affinity_sets(void)
245{
246	return x86_affinity_count;
247}
248
249processor_set_t
250ml_affinity_to_pset(uint32_t affinity_num)
251{
252	x86_affinity_set_t	*aset;
253
254	for (aset = x86_affinities; aset != NULL; aset = aset->next) {
255		if (affinity_num == aset->num)
256			break;
257	}
258	return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset;
259}
260
261uint64_t
262ml_cpu_cache_size(unsigned int level)
263{
264	x86_cpu_cache_t	*cachep;
265
266	if (level == 0) {
267		return machine_info.max_mem;
268	} else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
269		cachep = current_cpu_datap()->lcpu.caches[level-1];
270		return cachep ? cachep->cache_size : 0;
271	} else {
272		return 0;
273	}
274}
275
276uint64_t
277ml_cpu_cache_sharing(unsigned int level)
278{
279	x86_cpu_cache_t	*cachep;
280
281	if (level == 0) {
282		return machine_info.max_cpus;
283	} else if ( 1 <= level && level <= MAX_CACHE_DEPTH) {
284		cachep = current_cpu_datap()->lcpu.caches[level-1];
285		return cachep ? cachep->nlcpus : 0;
286	} else {
287		return 0;
288	}
289}
290
291