• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/x86/kernel/apic/
1/*
2 * Copyright 2004 James Cleverdon, IBM.
3 * Subject to the GNU Public License, v.2
4 *
5 * Flat APIC subarch code.
6 *
7 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
8 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
9 * James Cleverdon.
10 */
11#include <linux/errno.h>
12#include <linux/threads.h>
13#include <linux/cpumask.h>
14#include <linux/string.h>
15#include <linux/kernel.h>
16#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/hardirq.h>
19#include <asm/smp.h>
20#include <asm/apic.h>
21#include <asm/ipi.h>
22
23#ifdef CONFIG_ACPI
24#include <acpi/acpi_bus.h>
25#endif
26
27static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
28{
29	return 1;
30}
31
32static const struct cpumask *flat_target_cpus(void)
33{
34	return cpu_online_mask;
35}
36
37static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
38{
39	/* Careful. Some cpus do not strictly honor the set of cpus
40	 * specified in the interrupt destination when using lowest
41	 * priority interrupt delivery mode.
42	 *
43	 * In particular there was a hyperthreading cpu observed to
44	 * deliver interrupts to the wrong hyperthread when only one
45	 * hyperthread was specified in the interrupt desitination.
46	 */
47	cpumask_clear(retmask);
48	cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
49}
50
51/*
52 * Set up the logical destination ID.
53 *
54 * Intel recommends to set DFR, LDR and TPR before enabling
55 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
56 * document number 292116).  So here it goes...
57 */
58static void flat_init_apic_ldr(void)
59{
60	unsigned long val;
61	unsigned long num, id;
62
63	num = smp_processor_id();
64	id = 1UL << num;
65	apic_write(APIC_DFR, APIC_DFR_FLAT);
66	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
67	val |= SET_APIC_LOGICAL_ID(id);
68	apic_write(APIC_LDR, val);
69}
70
71static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
72{
73	unsigned long flags;
74
75	local_irq_save(flags);
76	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
77	local_irq_restore(flags);
78}
79
80static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
81{
82	unsigned long mask = cpumask_bits(cpumask)[0];
83
84	_flat_send_IPI_mask(mask, vector);
85}
86
87static void
88 flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
89{
90	unsigned long mask = cpumask_bits(cpumask)[0];
91	int cpu = smp_processor_id();
92
93	if (cpu < BITS_PER_LONG)
94		clear_bit(cpu, &mask);
95
96	_flat_send_IPI_mask(mask, vector);
97}
98
99static void flat_send_IPI_allbutself(int vector)
100{
101	int cpu = smp_processor_id();
102#ifdef	CONFIG_HOTPLUG_CPU
103	int hotplug = 1;
104#else
105	int hotplug = 0;
106#endif
107	if (hotplug || vector == NMI_VECTOR) {
108		if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
109			unsigned long mask = cpumask_bits(cpu_online_mask)[0];
110
111			if (cpu < BITS_PER_LONG)
112				clear_bit(cpu, &mask);
113
114			_flat_send_IPI_mask(mask, vector);
115		}
116	} else if (num_online_cpus() > 1) {
117		__default_send_IPI_shortcut(APIC_DEST_ALLBUT,
118					    vector, apic->dest_logical);
119	}
120}
121
122static void flat_send_IPI_all(int vector)
123{
124	if (vector == NMI_VECTOR) {
125		flat_send_IPI_mask(cpu_online_mask, vector);
126	} else {
127		__default_send_IPI_shortcut(APIC_DEST_ALLINC,
128					    vector, apic->dest_logical);
129	}
130}
131
132static unsigned int flat_get_apic_id(unsigned long x)
133{
134	unsigned int id;
135
136	id = (((x)>>24) & 0xFFu);
137
138	return id;
139}
140
141static unsigned long set_apic_id(unsigned int id)
142{
143	unsigned long x;
144
145	x = ((id & 0xFFu)<<24);
146	return x;
147}
148
149static unsigned int read_xapic_id(void)
150{
151	unsigned int id;
152
153	id = flat_get_apic_id(apic_read(APIC_ID));
154	return id;
155}
156
157static int flat_apic_id_registered(void)
158{
159	return physid_isset(read_xapic_id(), phys_cpu_present_map);
160}
161
162static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
163{
164	return initial_apic_id >> index_msb;
165}
166
167struct apic apic_flat =  {
168	.name				= "flat",
169	.probe				= NULL,
170	.acpi_madt_oem_check		= flat_acpi_madt_oem_check,
171	.apic_id_registered		= flat_apic_id_registered,
172
173	.irq_delivery_mode		= dest_LowestPrio,
174	.irq_dest_mode			= 1, /* logical */
175
176	.target_cpus			= flat_target_cpus,
177	.disable_esr			= 0,
178	.dest_logical			= APIC_DEST_LOGICAL,
179	.check_apicid_used		= NULL,
180	.check_apicid_present		= NULL,
181
182	.vector_allocation_domain	= flat_vector_allocation_domain,
183	.init_apic_ldr			= flat_init_apic_ldr,
184
185	.ioapic_phys_id_map		= NULL,
186	.setup_apic_routing		= NULL,
187	.multi_timer_check		= NULL,
188	.apicid_to_node			= NULL,
189	.cpu_to_logical_apicid		= NULL,
190	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
191	.apicid_to_cpu_present		= NULL,
192	.setup_portio_remap		= NULL,
193	.check_phys_apicid_present	= default_check_phys_apicid_present,
194	.enable_apic_mode		= NULL,
195	.phys_pkg_id			= flat_phys_pkg_id,
196	.mps_oem_check			= NULL,
197
198	.get_apic_id			= flat_get_apic_id,
199	.set_apic_id			= set_apic_id,
200	.apic_id_mask			= 0xFFu << 24,
201
202	.cpu_mask_to_apicid		= default_cpu_mask_to_apicid,
203	.cpu_mask_to_apicid_and		= default_cpu_mask_to_apicid_and,
204
205	.send_IPI_mask			= flat_send_IPI_mask,
206	.send_IPI_mask_allbutself	= flat_send_IPI_mask_allbutself,
207	.send_IPI_allbutself		= flat_send_IPI_allbutself,
208	.send_IPI_all			= flat_send_IPI_all,
209	.send_IPI_self			= apic_send_IPI_self,
210
211	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
212	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
213	.wait_for_init_deassert		= NULL,
214	.smp_callin_clear_local_apic	= NULL,
215	.inquire_remote_apic		= default_inquire_remote_apic,
216
217	.read				= native_apic_mem_read,
218	.write				= native_apic_mem_write,
219	.icr_read			= native_apic_icr_read,
220	.icr_write			= native_apic_icr_write,
221	.wait_icr_idle			= native_apic_wait_icr_idle,
222	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
223};
224
225/*
226 * Physflat mode is used when there are more than 8 CPUs on a system.
227 * We cannot use logical delivery in this case because the mask
228 * overflows, so use physical mode.
229 */
230static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
231{
232#ifdef CONFIG_ACPI
233	/*
234	 * Quirk: some x86_64 machines can only use physical APIC mode
235	 * regardless of how many processors are present (x86_64 ES7000
236	 * is an example).
237	 */
238	if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
239		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
240		printk(KERN_DEBUG "system APIC only can use physical flat");
241		return 1;
242	}
243
244	if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) {
245		printk(KERN_DEBUG "IBM Summit detected, will use apic physical");
246		return 1;
247	}
248#endif
249
250	return 0;
251}
252
253static const struct cpumask *physflat_target_cpus(void)
254{
255	return cpu_online_mask;
256}
257
258static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
259{
260	cpumask_clear(retmask);
261	cpumask_set_cpu(cpu, retmask);
262}
263
264static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
265{
266	default_send_IPI_mask_sequence_phys(cpumask, vector);
267}
268
269static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
270					      int vector)
271{
272	default_send_IPI_mask_allbutself_phys(cpumask, vector);
273}
274
275static void physflat_send_IPI_allbutself(int vector)
276{
277	default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
278}
279
280static void physflat_send_IPI_all(int vector)
281{
282	physflat_send_IPI_mask(cpu_online_mask, vector);
283}
284
285static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
286{
287	int cpu;
288
289	/*
290	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
291	 * May as well be the first.
292	 */
293	cpu = cpumask_first(cpumask);
294	if ((unsigned)cpu < nr_cpu_ids)
295		return per_cpu(x86_cpu_to_apicid, cpu);
296	else
297		return BAD_APICID;
298}
299
300static unsigned int
301physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
302				const struct cpumask *andmask)
303{
304	int cpu;
305
306	/*
307	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
308	 * May as well be the first.
309	 */
310	for_each_cpu_and(cpu, cpumask, andmask) {
311		if (cpumask_test_cpu(cpu, cpu_online_mask))
312			break;
313	}
314	return per_cpu(x86_cpu_to_apicid, cpu);
315}
316
317struct apic apic_physflat =  {
318
319	.name				= "physical flat",
320	.probe				= NULL,
321	.acpi_madt_oem_check		= physflat_acpi_madt_oem_check,
322	.apic_id_registered		= flat_apic_id_registered,
323
324	.irq_delivery_mode		= dest_Fixed,
325	.irq_dest_mode			= 0, /* physical */
326
327	.target_cpus			= physflat_target_cpus,
328	.disable_esr			= 0,
329	.dest_logical			= 0,
330	.check_apicid_used		= NULL,
331	.check_apicid_present		= NULL,
332
333	.vector_allocation_domain	= physflat_vector_allocation_domain,
334	/* not needed, but shouldn't hurt: */
335	.init_apic_ldr			= flat_init_apic_ldr,
336
337	.ioapic_phys_id_map		= NULL,
338	.setup_apic_routing		= NULL,
339	.multi_timer_check		= NULL,
340	.apicid_to_node			= NULL,
341	.cpu_to_logical_apicid		= NULL,
342	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
343	.apicid_to_cpu_present		= NULL,
344	.setup_portio_remap		= NULL,
345	.check_phys_apicid_present	= default_check_phys_apicid_present,
346	.enable_apic_mode		= NULL,
347	.phys_pkg_id			= flat_phys_pkg_id,
348	.mps_oem_check			= NULL,
349
350	.get_apic_id			= flat_get_apic_id,
351	.set_apic_id			= set_apic_id,
352	.apic_id_mask			= 0xFFu << 24,
353
354	.cpu_mask_to_apicid		= physflat_cpu_mask_to_apicid,
355	.cpu_mask_to_apicid_and		= physflat_cpu_mask_to_apicid_and,
356
357	.send_IPI_mask			= physflat_send_IPI_mask,
358	.send_IPI_mask_allbutself	= physflat_send_IPI_mask_allbutself,
359	.send_IPI_allbutself		= physflat_send_IPI_allbutself,
360	.send_IPI_all			= physflat_send_IPI_all,
361	.send_IPI_self			= apic_send_IPI_self,
362
363	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
364	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
365	.wait_for_init_deassert		= NULL,
366	.smp_callin_clear_local_apic	= NULL,
367	.inquire_remote_apic		= default_inquire_remote_apic,
368
369	.read				= native_apic_mem_read,
370	.write				= native_apic_mem_write,
371	.icr_read			= native_apic_icr_read,
372	.icr_write			= native_apic_icr_write,
373	.wait_icr_idle			= native_apic_wait_icr_idle,
374	.safe_wait_icr_idle		= native_safe_apic_wait_icr_idle,
375};
376