1/* 2 * Intel SMP support routines. 3 * 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> 5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com> 6 * (c) 2002,2003 Andi Kleen, SuSE Labs. 7 * 8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com> 9 * 10 * This code is released under the GNU General Public License version 2 or 11 * later. 12 */ 13 14#include <linux/init.h> 15 16#include <linux/mm.h> 17#include <linux/delay.h> 18#include <linux/spinlock.h> 19#include <linux/kernel_stat.h> 20#include <linux/mc146818rtc.h> 21#include <linux/cache.h> 22#include <linux/interrupt.h> 23#include <linux/cpu.h> 24#include <linux/gfp.h> 25 26#include <asm/mtrr.h> 27#include <asm/tlbflush.h> 28#include <asm/mmu_context.h> 29#include <asm/proto.h> 30#include <asm/apic.h> 31 32/* 33 * this function sends a 'reschedule' IPI to another CPU. 34 * it goes straight through and wastes no time serializing 35 * anything. Worst case is that we lose a reschedule ... 36 */ 37static void native_smp_send_reschedule(int cpu) 38{ 39 if (unlikely(cpu_is_offline(cpu))) { 40 WARN_ON(1); 41 return; 42 } 43 apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); 44} 45 46void native_send_call_func_single_ipi(int cpu) 47{ 48 apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR); 49} 50 51void native_send_call_func_ipi(const struct cpumask *mask) 52{ 53 cpumask_var_t allbutself; 54 55 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) { 56 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 57 return; 58 } 59 60 cpumask_copy(allbutself, cpu_online_mask); 61 cpumask_clear_cpu(smp_processor_id(), allbutself); 62 63 if (cpumask_equal(mask, allbutself) && 64 cpumask_equal(cpu_online_mask, cpu_callout_mask)) 65 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR); 66 else 67 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR); 68 69 free_cpumask_var(allbutself); 70} 71 72/* 73 * this function calls the 'stop' function on all other CPUs in the system. 74 */ 75 76asmlinkage void smp_reboot_interrupt(void) 77{ 78 ack_APIC_irq(); 79 irq_enter(); 80 stop_this_cpu(NULL); 81 irq_exit(); 82} 83 84static void native_stop_other_cpus(int wait) 85{ 86 unsigned long flags; 87 unsigned long timeout; 88 89 if (reboot_force) 90 return; 91 92 /* 93 * Use an own vector here because smp_call_function 94 * does lots of things not suitable in a panic situation. 95 * On most systems we could also use an NMI here, 96 * but there are a few systems around where NMI 97 * is problematic so stay with an non NMI for now 98 * (this implies we cannot stop CPUs spinning with irq off 99 * currently) 100 */ 101 if (num_online_cpus() > 1) { 102 apic->send_IPI_allbutself(REBOOT_VECTOR); 103 104 /* 105 * Don't wait longer than a second if the caller 106 * didn't ask us to wait. 107 */ 108 timeout = USEC_PER_SEC; 109 while (num_online_cpus() > 1 && (wait || timeout--)) 110 udelay(1); 111 } 112 113 local_irq_save(flags); 114 disable_local_APIC(); 115 local_irq_restore(flags); 116} 117 118/* 119 * Reschedule call back. Nothing to do, 120 * all the work is done automatically when 121 * we return from the interrupt. 122 */ 123void smp_reschedule_interrupt(struct pt_regs *regs) 124{ 125 ack_APIC_irq(); 126 inc_irq_stat(irq_resched_count); 127 /* 128 * KVM uses this interrupt to force a cpu out of guest mode 129 */ 130} 131 132void smp_call_function_interrupt(struct pt_regs *regs) 133{ 134 ack_APIC_irq(); 135 irq_enter(); 136 generic_smp_call_function_interrupt(); 137 inc_irq_stat(irq_call_count); 138 irq_exit(); 139} 140 141void smp_call_function_single_interrupt(struct pt_regs *regs) 142{ 143 ack_APIC_irq(); 144 irq_enter(); 145 generic_smp_call_function_single_interrupt(); 146 inc_irq_stat(irq_call_count); 147 irq_exit(); 148} 149 150struct smp_ops smp_ops = { 151 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, 152 .smp_prepare_cpus = native_smp_prepare_cpus, 153 .smp_cpus_done = native_smp_cpus_done, 154 155 .stop_other_cpus = native_stop_other_cpus, 156 .smp_send_reschedule = native_smp_send_reschedule, 157 158 .cpu_up = native_cpu_up, 159 .cpu_die = native_cpu_die, 160 .cpu_disable = native_cpu_disable, 161 .play_dead = native_play_dead, 162 163 .send_call_func_ipi = native_send_call_func_ipi, 164 .send_call_func_single_ipi = native_send_call_func_single_ipi, 165}; 166EXPORT_SYMBOL_GPL(smp_ops); 167