1#include <linux/types.h> 2#include <asm/delay.h> 3#include <irq.h> 4#include <hwregs/intr_vect.h> 5#include <hwregs/intr_vect_defs.h> 6#include <asm/tlbflush.h> 7#include <asm/mmu_context.h> 8#include <hwregs/asm/mmu_defs_asm.h> 9#include <hwregs/supp_reg.h> 10#include <asm/atomic.h> 11 12#include <linux/err.h> 13#include <linux/init.h> 14#include <linux/timex.h> 15#include <linux/sched.h> 16#include <linux/kernel.h> 17#include <linux/cpumask.h> 18#include <linux/interrupt.h> 19#include <linux/module.h> 20 21#define IPI_SCHEDULE 1 22#define IPI_CALL 2 23#define IPI_FLUSH_TLB 4 24#define IPI_BOOT 8 25 26#define FLUSH_ALL (void*)0xffffffff 27 28/* Vector of locks used for various atomic operations */ 29spinlock_t cris_atomic_locks[] = { [0 ... LOCK_COUNT - 1] = SPIN_LOCK_UNLOCKED}; 30 31/* CPU masks */ 32cpumask_t phys_cpu_present_map = CPU_MASK_NONE; 33EXPORT_SYMBOL(phys_cpu_present_map); 34 35/* Variables used during SMP boot */ 36volatile int cpu_now_booting = 0; 37volatile struct thread_info *smp_init_current_idle_thread; 38 39/* Variables used during IPI */ 40static DEFINE_SPINLOCK(call_lock); 41static DEFINE_SPINLOCK(tlbstate_lock); 42 43struct call_data_struct { 44 void (*func) (void *info); 45 void *info; 46 int wait; 47}; 48 49static struct call_data_struct * call_data; 50 51static struct mm_struct* flush_mm; 52static struct vm_area_struct* flush_vma; 53static unsigned long flush_addr; 54 55/* Mode registers */ 56static unsigned long irq_regs[NR_CPUS] = { 57 regi_irq, 58 regi_irq2 59}; 60 61static irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id); 62static int send_ipi(int vector, int wait, cpumask_t cpu_mask); 63static struct irqaction irq_ipi = { 64 .handler = crisv32_ipi_interrupt, 65 .flags = IRQF_DISABLED, 66 .name = "ipi", 67}; 68 69extern void cris_mmu_init(void); 70extern void cris_timer_init(void); 71 72/* SMP initialization */ 73void __init smp_prepare_cpus(unsigned int max_cpus) 74{ 75 int i; 76 77 /* From now on we can expect IPIs so set them up */ 78 setup_irq(IPI_INTR_VECT, &irq_ipi); 79 80 /* Mark all possible CPUs as present */ 81 for (i = 0; i < max_cpus; i++) 82 cpu_set(i, phys_cpu_present_map); 83} 84 85void __devinit smp_prepare_boot_cpu(void) 86{ 87 /* PGD pointer has moved after per_cpu initialization so 88 * update the MMU. 89 */ 90 pgd_t **pgd; 91 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id()); 92 93 SUPP_BANK_SEL(1); 94 SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 95 SUPP_BANK_SEL(2); 96 SUPP_REG_WR(RW_MM_TLB_PGD, pgd); 97 98 set_cpu_online(0, true); 99 cpu_set(0, phys_cpu_present_map); 100 set_cpu_possible(0, true); 101} 102 103void __init smp_cpus_done(unsigned int max_cpus) 104{ 105} 106 107/* Bring one cpu online.*/ 108static int __init 109smp_boot_one_cpu(int cpuid) 110{ 111 unsigned timeout; 112 struct task_struct *idle; 113 cpumask_t cpu_mask = CPU_MASK_NONE; 114 115 idle = fork_idle(cpuid); 116 if (IS_ERR(idle)) 117 panic("SMP: fork failed for CPU:%d", cpuid); 118 119 task_thread_info(idle)->cpu = cpuid; 120 121 /* Information to the CPU that is about to boot */ 122 smp_init_current_idle_thread = task_thread_info(idle); 123 cpu_now_booting = cpuid; 124 125 /* Kick it */ 126 cpu_set(cpuid, cpu_online_map); 127 cpu_set(cpuid, cpu_mask); 128 send_ipi(IPI_BOOT, 0, cpu_mask); 129 cpu_clear(cpuid, cpu_online_map); 130 131 /* Wait for CPU to come online */ 132 for (timeout = 0; timeout < 10000; timeout++) { 133 if(cpu_online(cpuid)) { 134 cpu_now_booting = 0; 135 smp_init_current_idle_thread = NULL; 136 return 0; /* CPU online */ 137 } 138 udelay(100); 139 barrier(); 140 } 141 142 put_task_struct(idle); 143 idle = NULL; 144 145 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); 146 return -1; 147} 148 149/* Secondary CPUs starts using C here. Here we need to setup CPU 150 * specific stuff such as the local timer and the MMU. */ 151void __init smp_callin(void) 152{ 153 extern void cpu_idle(void); 154 155 int cpu = cpu_now_booting; 156 reg_intr_vect_rw_mask vect_mask = {0}; 157 158 /* Initialise the idle task for this CPU */ 159 atomic_inc(&init_mm.mm_count); 160 current->active_mm = &init_mm; 161 162 /* Set up MMU */ 163 cris_mmu_init(); 164 __flush_tlb_all(); 165 166 /* Setup local timer. */ 167 cris_timer_init(); 168 169 /* Enable IRQ and idle */ 170 REG_WR(intr_vect, irq_regs[cpu], rw_mask, vect_mask); 171 crisv32_unmask_irq(IPI_INTR_VECT); 172 crisv32_unmask_irq(TIMER0_INTR_VECT); 173 preempt_disable(); 174 notify_cpu_starting(cpu); 175 local_irq_enable(); 176 177 cpu_set(cpu, cpu_online_map); 178 cpu_idle(); 179} 180 181/* Stop execution on this CPU.*/ 182void stop_this_cpu(void* dummy) 183{ 184 local_irq_disable(); 185 asm volatile("halt"); 186} 187 188/* Other calls */ 189void smp_send_stop(void) 190{ 191 smp_call_function(stop_this_cpu, NULL, 0); 192} 193 194int setup_profiling_timer(unsigned int multiplier) 195{ 196 return -EINVAL; 197} 198 199 200/* cache_decay_ticks is used by the scheduler to decide if a process 201 * is "hot" on one CPU. A higher value means a higher penalty to move 202 * a process to another CPU. Our cache is rather small so we report 203 * 1 tick. 204 */ 205unsigned long cache_decay_ticks = 1; 206 207int __cpuinit __cpu_up(unsigned int cpu) 208{ 209 smp_boot_one_cpu(cpu); 210 return cpu_online(cpu) ? 0 : -ENOSYS; 211} 212 213void smp_send_reschedule(int cpu) 214{ 215 cpumask_t cpu_mask = CPU_MASK_NONE; 216 cpu_set(cpu, cpu_mask); 217 send_ipi(IPI_SCHEDULE, 0, cpu_mask); 218} 219 220/* TLB flushing 221 * 222 * Flush needs to be done on the local CPU and on any other CPU that 223 * may have the same mapping. The mm->cpu_vm_mask is used to keep track 224 * of which CPUs that a specific process has been executed on. 225 */ 226void flush_tlb_common(struct mm_struct* mm, struct vm_area_struct* vma, unsigned long addr) 227{ 228 unsigned long flags; 229 cpumask_t cpu_mask; 230 231 spin_lock_irqsave(&tlbstate_lock, flags); 232 cpu_mask = (mm == FLUSH_ALL ? cpu_all_mask : *mm_cpumask(mm)); 233 cpu_clear(smp_processor_id(), cpu_mask); 234 flush_mm = mm; 235 flush_vma = vma; 236 flush_addr = addr; 237 send_ipi(IPI_FLUSH_TLB, 1, cpu_mask); 238 spin_unlock_irqrestore(&tlbstate_lock, flags); 239} 240 241void flush_tlb_all(void) 242{ 243 __flush_tlb_all(); 244 flush_tlb_common(FLUSH_ALL, FLUSH_ALL, 0); 245} 246 247void flush_tlb_mm(struct mm_struct *mm) 248{ 249 __flush_tlb_mm(mm); 250 flush_tlb_common(mm, FLUSH_ALL, 0); 251 /* No more mappings in other CPUs */ 252 cpumask_clear(mm_cpumask(mm)); 253 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 254} 255 256void flush_tlb_page(struct vm_area_struct *vma, 257 unsigned long addr) 258{ 259 __flush_tlb_page(vma, addr); 260 flush_tlb_common(vma->vm_mm, vma, addr); 261} 262 263/* Inter processor interrupts 264 * 265 * The IPIs are used for: 266 * * Force a schedule on a CPU 267 * * FLush TLB on other CPUs 268 * * Call a function on other CPUs 269 */ 270 271int send_ipi(int vector, int wait, cpumask_t cpu_mask) 272{ 273 int i = 0; 274 reg_intr_vect_rw_ipi ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 275 int ret = 0; 276 277 /* Calculate CPUs to send to. */ 278 cpus_and(cpu_mask, cpu_mask, cpu_online_map); 279 280 /* Send the IPI. */ 281 for_each_cpu_mask(i, cpu_mask) 282 { 283 ipi.vector |= vector; 284 REG_WR(intr_vect, irq_regs[i], rw_ipi, ipi); 285 } 286 287 /* Wait for IPI to finish on other CPUS */ 288 if (wait) { 289 for_each_cpu_mask(i, cpu_mask) { 290 int j; 291 for (j = 0 ; j < 1000; j++) { 292 ipi = REG_RD(intr_vect, irq_regs[i], rw_ipi); 293 if (!ipi.vector) 294 break; 295 udelay(100); 296 } 297 298 /* Timeout? */ 299 if (ipi.vector) { 300 printk("SMP call timeout from %d to %d\n", smp_processor_id(), i); 301 ret = -ETIMEDOUT; 302 dump_stack(); 303 } 304 } 305 } 306 return ret; 307} 308 309/* 310 * You must not call this function with disabled interrupts or from a 311 * hardware interrupt handler or from a bottom half handler. 312 */ 313int smp_call_function(void (*func)(void *info), void *info, int wait) 314{ 315 cpumask_t cpu_mask = CPU_MASK_ALL; 316 struct call_data_struct data; 317 int ret; 318 319 cpu_clear(smp_processor_id(), cpu_mask); 320 321 WARN_ON(irqs_disabled()); 322 323 data.func = func; 324 data.info = info; 325 data.wait = wait; 326 327 spin_lock(&call_lock); 328 call_data = &data; 329 ret = send_ipi(IPI_CALL, wait, cpu_mask); 330 spin_unlock(&call_lock); 331 332 return ret; 333} 334 335irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id) 336{ 337 void (*func) (void *info) = call_data->func; 338 void *info = call_data->info; 339 reg_intr_vect_rw_ipi ipi; 340 341 ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi); 342 343 if (ipi.vector & IPI_CALL) { 344 func(info); 345 } 346 if (ipi.vector & IPI_FLUSH_TLB) { 347 if (flush_mm == FLUSH_ALL) 348 __flush_tlb_all(); 349 else if (flush_vma == FLUSH_ALL) 350 __flush_tlb_mm(flush_mm); 351 else 352 __flush_tlb_page(flush_vma, flush_addr); 353 } 354 355 ipi.vector = 0; 356 REG_WR(intr_vect, irq_regs[smp_processor_id()], rw_ipi, ipi); 357 358 return IRQ_HANDLED; 359} 360