• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/kernel/apic/
1#include <linux/threads.h>
2#include <linux/cpumask.h>
3#include <linux/string.h>
4#include <linux/kernel.h>
5#include <linux/ctype.h>
6#include <linux/init.h>
7#include <linux/dmar.h>
8
9#include <asm/smp.h>
10#include <asm/apic.h>
11#include <asm/ipi.h>
12
13static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
14
15static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
16{
17	return x2apic_enabled();
18}
19
20/*
21 * need to use more than cpu 0, because we need more vectors when
22 * MSI-X are used.
23 */
24static const struct cpumask *x2apic_target_cpus(void)
25{
26	return cpu_online_mask;
27}
28
29/*
30 * for now each logical cpu is in its own vector allocation domain.
31 */
32static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
33{
34	cpumask_clear(retmask);
35	cpumask_set_cpu(cpu, retmask);
36}
37
38static void
39 __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
40{
41	unsigned long cfg;
42
43	cfg = __prepare_ICR(0, vector, dest);
44
45	/*
46	 * send the IPI.
47	 */
48	native_x2apic_icr_write(cfg, apicid);
49}
50
51/*
52 * for now, we send the IPI's one by one in the cpumask.
53 * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
54 * at once. We have 16 cpu's in a cluster. This will minimize IPI register
55 * writes.
56 */
57static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
58{
59	unsigned long query_cpu;
60	unsigned long flags;
61
62	x2apic_wrmsr_fence();
63
64	local_irq_save(flags);
65	for_each_cpu(query_cpu, mask) {
66		__x2apic_send_IPI_dest(
67			per_cpu(x86_cpu_to_logical_apicid, query_cpu),
68			vector, apic->dest_logical);
69	}
70	local_irq_restore(flags);
71}
72
73static void
74 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
75{
76	unsigned long this_cpu = smp_processor_id();
77	unsigned long query_cpu;
78	unsigned long flags;
79
80	x2apic_wrmsr_fence();
81
82	local_irq_save(flags);
83	for_each_cpu(query_cpu, mask) {
84		if (query_cpu == this_cpu)
85			continue;
86		__x2apic_send_IPI_dest(
87				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
88				vector, apic->dest_logical);
89	}
90	local_irq_restore(flags);
91}
92
93static void x2apic_send_IPI_allbutself(int vector)
94{
95	unsigned long this_cpu = smp_processor_id();
96	unsigned long query_cpu;
97	unsigned long flags;
98
99	x2apic_wrmsr_fence();
100
101	local_irq_save(flags);
102	for_each_online_cpu(query_cpu) {
103		if (query_cpu == this_cpu)
104			continue;
105		__x2apic_send_IPI_dest(
106				per_cpu(x86_cpu_to_logical_apicid, query_cpu),
107				vector, apic->dest_logical);
108	}
109	local_irq_restore(flags);
110}
111
112static void x2apic_send_IPI_all(int vector)
113{
114	x2apic_send_IPI_mask(cpu_online_mask, vector);
115}
116
117static int x2apic_apic_id_registered(void)
118{
119	return 1;
120}
121
122static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
123{
124	/*
125	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
126	 * May as well be the first.
127	 */
128	int cpu = cpumask_first(cpumask);
129
130	if ((unsigned)cpu < nr_cpu_ids)
131		return per_cpu(x86_cpu_to_logical_apicid, cpu);
132	else
133		return BAD_APICID;
134}
135
136static unsigned int
137x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
138			      const struct cpumask *andmask)
139{
140	int cpu;
141
142	/*
143	 * We're using fixed IRQ delivery, can only return one logical APIC ID.
144	 * May as well be the first.
145	 */
146	for_each_cpu_and(cpu, cpumask, andmask) {
147		if (cpumask_test_cpu(cpu, cpu_online_mask))
148			break;
149	}
150
151	return per_cpu(x86_cpu_to_logical_apicid, cpu);
152}
153
154static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
155{
156	unsigned int id;
157
158	id = x;
159	return id;
160}
161
162static unsigned long set_apic_id(unsigned int id)
163{
164	unsigned long x;
165
166	x = id;
167	return x;
168}
169
170static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
171{
172	return initial_apicid >> index_msb;
173}
174
175static void x2apic_send_IPI_self(int vector)
176{
177	apic_write(APIC_SELF_IPI, vector);
178}
179
180static void init_x2apic_ldr(void)
181{
182	int cpu = smp_processor_id();
183
184	per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
185}
186
187struct apic apic_x2apic_cluster = {
188
189	.name				= "cluster x2apic",
190	.probe				= NULL,
191	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
192	.apic_id_registered		= x2apic_apic_id_registered,
193
194	.irq_delivery_mode		= dest_LowestPrio,
195	.irq_dest_mode			= 1, /* logical */
196
197	.target_cpus			= x2apic_target_cpus,
198	.disable_esr			= 0,
199	.dest_logical			= APIC_DEST_LOGICAL,
200	.check_apicid_used		= NULL,
201	.check_apicid_present		= NULL,
202
203	.vector_allocation_domain	= x2apic_vector_allocation_domain,
204	.init_apic_ldr			= init_x2apic_ldr,
205
206	.ioapic_phys_id_map		= NULL,
207	.setup_apic_routing		= NULL,
208	.multi_timer_check		= NULL,
209	.apicid_to_node			= NULL,
210	.cpu_to_logical_apicid		= NULL,
211	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
212	.apicid_to_cpu_present		= NULL,
213	.setup_portio_remap		= NULL,
214	.check_phys_apicid_present	= default_check_phys_apicid_present,
215	.enable_apic_mode		= NULL,
216	.phys_pkg_id			= x2apic_cluster_phys_pkg_id,
217	.mps_oem_check			= NULL,
218
219	.get_apic_id			= x2apic_cluster_phys_get_apic_id,
220	.set_apic_id			= set_apic_id,
221	.apic_id_mask			= 0xFFFFFFFFu,
222
223	.cpu_mask_to_apicid		= x2apic_cpu_mask_to_apicid,
224	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
225
226	.send_IPI_mask			= x2apic_send_IPI_mask,
227	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
228	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
229	.send_IPI_all			= x2apic_send_IPI_all,
230	.send_IPI_self			= x2apic_send_IPI_self,
231
232	.trampoline_phys_low		= DEFAULT_TRAMPOLINE_PHYS_LOW,
233	.trampoline_phys_high		= DEFAULT_TRAMPOLINE_PHYS_HIGH,
234	.wait_for_init_deassert		= NULL,
235	.smp_callin_clear_local_apic	= NULL,
236	.inquire_remote_apic		= NULL,
237
238	.read				= native_apic_msr_read,
239	.write				= native_apic_msr_write,
240	.icr_read			= native_x2apic_icr_read,
241	.icr_write			= native_x2apic_icr_write,
242	.wait_icr_idle			= native_x2apic_wait_icr_idle,
243	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
244};
245