1 2 3#include <linux/kernel.h> 4#include <linux/init.h> 5#include <linux/irqflags.h> 6#include <linux/cpumask.h> 7 8#include <asm/r4k-timer.h> 9#include <asm/atomic.h> 10#include <asm/barrier.h> 11#include <asm/mipsregs.h> 12 13static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); 14static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); 15static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); 16static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); 17 18#define COUNTON 100 19#define NR_LOOPS 5 20 21void __cpuinit synchronise_count_master(void) 22{ 23 int i; 24 unsigned long flags; 25 unsigned int initcount; 26 int nslaves; 27 28#ifdef CONFIG_MIPS_MT_SMTC 29 /* 30 * SMTC needs to synchronise per VPE, not per CPU 31 * ignore for now 32 */ 33 return; 34#endif 35 36 printk(KERN_INFO "Synchronize counters across %u CPUs: ", 37 num_online_cpus()); 38 39 local_irq_save(flags); 40 41 /* 42 * Notify the slaves that it's time to start 43 */ 44 atomic_set(&count_reference, read_c0_count()); 45 atomic_set(&count_start_flag, 1); 46 smp_wmb(); 47 48 /* Count will be initialised to current timer for all CPU's */ 49 initcount = read_c0_count(); 50 51 /* 52 * We loop a few times to get a primed instruction cache, 53 * then the last pass is more or less synchronised and 54 * the master and slaves each set their cycle counters to a known 55 * value all at once. This reduces the chance of having random offsets 56 * between the processors, and guarantees that the maximum 57 * delay between the cycle counters is never bigger than 58 * the latency of information-passing (cachelines) between 59 * two CPUs. 60 */ 61 62 nslaves = num_online_cpus()-1; 63 for (i = 0; i < NR_LOOPS; i++) { 64 /* slaves loop on '!= ncpus' */ 65 while (atomic_read(&count_count_start) != nslaves) 66 mb(); 67 atomic_set(&count_count_stop, 0); 68 smp_wmb(); 69 70 /* this lets the slaves write their count register */ 71 atomic_inc(&count_count_start); 72 73 /* 74 * Everyone initialises count in the last loop: 75 */ 76 if (i == NR_LOOPS-1) 77 write_c0_count(initcount); 78 79 /* 80 * Wait for all slaves to leave the synchronization point: 81 */ 82 while (atomic_read(&count_count_stop) != nslaves) 83 mb(); 84 atomic_set(&count_count_start, 0); 85 smp_wmb(); 86 atomic_inc(&count_count_stop); 87 } 88 /* Arrange for an interrupt in a short while */ 89 write_c0_compare(read_c0_count() + COUNTON); 90 91 local_irq_restore(flags); 92 93 /* 94 * i386 code reported the skew here, but the 95 * count registers were almost certainly out of sync 96 * so no point in alarming people 97 */ 98 printk("done.\n"); 99} 100 101void __cpuinit synchronise_count_slave(void) 102{ 103 int i; 104 unsigned long flags; 105 unsigned int initcount; 106 int ncpus; 107 108#ifdef CONFIG_MIPS_MT_SMTC 109 /* 110 * SMTC needs to synchronise per VPE, not per CPU 111 * ignore for now 112 */ 113 return; 114#endif 115 116 local_irq_save(flags); 117 118 /* 119 * Not every cpu is online at the time this gets called, 120 * so we first wait for the master to say everyone is ready 121 */ 122 123 while (!atomic_read(&count_start_flag)) 124 mb(); 125 126 /* Count will be initialised to next expire for all CPU's */ 127 initcount = atomic_read(&count_reference); 128 129 ncpus = num_online_cpus(); 130 for (i = 0; i < NR_LOOPS; i++) { 131 atomic_inc(&count_count_start); 132 while (atomic_read(&count_count_start) != ncpus) 133 mb(); 134 135 /* 136 * Everyone initialises count in the last loop: 137 */ 138 if (i == NR_LOOPS-1) 139 write_c0_count(initcount); 140 141 atomic_inc(&count_count_stop); 142 while (atomic_read(&count_count_stop) != ncpus) 143 mb(); 144 } 145 /* Arrange for an interrupt in a short while */ 146 write_c0_compare(read_c0_count() + COUNTON); 147 148 local_irq_restore(flags); 149} 150#undef NR_LOOPS 151