1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2007 MIPS Technologies, Inc. 7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> 8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl 9 */ 10#include <linux/clockchips.h> 11#include <linux/interrupt.h> 12#include <linux/percpu.h> 13#include <linux/smp.h> 14 15#include <asm/smtc_ipi.h> 16#include <asm/time.h> 17#include <asm/cevt-r4k.h> 18 19/* 20 * Variant clock event timer support for SMTC on MIPS 34K, 1004K 21 * or other MIPS MT cores. 22 * 23 * Notes on SMTC Support: 24 * 25 * SMTC has multiple microthread TCs pretending to be Linux CPUs. 26 * But there's only one Count/Compare pair per VPE, and Compare 27 * interrupts are taken opportunisitically by available TCs 28 * bound to the VPE with the Count register. The new timer 29 * framework provides for global broadcasts, but we really 30 * want VPE-level multicasts for best behavior. So instead 31 * of invoking the high-level clock-event broadcast code, 32 * this version of SMTC support uses the historical SMTC 33 * multicast mechanisms "under the hood", appearing to the 34 * generic clock layer as if the interrupts are per-CPU. 35 * 36 * The approach taken here is to maintain a set of NR_CPUS 37 * virtual timers, and track which "CPU" needs to be alerted 38 * at each event. 39 * 40 * It's unlikely that we'll see a MIPS MT core with more than 41 * 2 VPEs, but we *know* that we won't need to handle more 42 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements 43 * is always going to be overkill, but always going to be enough. 44 */ 45 46unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; 47static int smtc_nextinvpe[NR_CPUS]; 48 49/* 50 * Timestamps stored are absolute values to be programmed 51 * into Count register. Valid timestamps will never be zero. 52 * If a Zero Count value is actually calculated, it is converted 53 * to be a 1, which will introduce 1 or two CPU cycles of error 54 * roughly once every four billion events, which at 1000 HZ means 55 * about once every 50 days. If that's actually a problem, one 56 * could alternate squashing 0 to 1 and to -1. 57 */ 58 59#define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) 60#define ISVALID(x) ((x) != 0L) 61 62/* 63 * Time comparison is subtle, as it's really truncated 64 * modular arithmetic. 65 */ 66 67#define IS_SOONER(a, b, reference) \ 68 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) 69 70/* 71 * CATCHUP_INCREMENT, used when the function falls behind the counter. 72 * Could be an increasing function instead of a constant; 73 */ 74 75#define CATCHUP_INCREMENT 64 76 77static int mips_next_event(unsigned long delta, 78 struct clock_event_device *evt) 79{ 80 unsigned long flags; 81 unsigned int mtflags; 82 unsigned long timestamp, reference, previous; 83 unsigned long nextcomp = 0L; 84 int vpe = current_cpu_data.vpe_id; 85 int cpu = smp_processor_id(); 86 local_irq_save(flags); 87 mtflags = dmt(); 88 89 /* 90 * Maintain the per-TC virtual timer 91 * and program the per-VPE shared Count register 92 * as appropriate here... 93 */ 94 reference = (unsigned long)read_c0_count(); 95 timestamp = MAKEVALID(reference + delta); 96 /* 97 * To really model the clock, we have to catch the case 98 * where the current next-in-VPE timestamp is the old 99 * timestamp for the calling CPE, but the new value is 100 * in fact later. In that case, we have to do a full 101 * scan and discover the new next-in-VPE CPU id and 102 * timestamp. 103 */ 104 previous = smtc_nexttime[vpe][cpu]; 105 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) 106 && IS_SOONER(previous, timestamp, reference)) { 107 int i; 108 int soonest = cpu; 109 110 /* 111 * Update timestamp array here, so that new 112 * value gets considered along with those of 113 * other virtual CPUs on the VPE. 114 */ 115 smtc_nexttime[vpe][cpu] = timestamp; 116 for_each_online_cpu(i) { 117 if (ISVALID(smtc_nexttime[vpe][i]) 118 && IS_SOONER(smtc_nexttime[vpe][i], 119 smtc_nexttime[vpe][soonest], reference)) { 120 soonest = i; 121 } 122 } 123 smtc_nextinvpe[vpe] = soonest; 124 nextcomp = smtc_nexttime[vpe][soonest]; 125 /* 126 * Otherwise, we don't have to process the whole array rank, 127 * we just have to see if the event horizon has gotten closer. 128 */ 129 } else { 130 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || 131 IS_SOONER(timestamp, 132 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { 133 smtc_nextinvpe[vpe] = cpu; 134 nextcomp = timestamp; 135 } 136 /* 137 * Since next-in-VPE may me the same as the executing 138 * virtual CPU, we update the array *after* checking 139 * its value. 140 */ 141 smtc_nexttime[vpe][cpu] = timestamp; 142 } 143 144 /* 145 * It may be that, in fact, we don't need to update Compare, 146 * but if we do, we want to make sure we didn't fall into 147 * a crack just behind Count. 148 */ 149 if (ISVALID(nextcomp)) { 150 write_c0_compare(nextcomp); 151 ehb(); 152 /* 153 * We never return an error, we just make sure 154 * that we trigger the handlers as quickly as 155 * we can if we fell behind. 156 */ 157 while ((nextcomp - (unsigned long)read_c0_count()) 158 > (unsigned long)LONG_MAX) { 159 nextcomp += CATCHUP_INCREMENT; 160 write_c0_compare(nextcomp); 161 ehb(); 162 } 163 } 164 emt(mtflags); 165 local_irq_restore(flags); 166 return 0; 167} 168 169 170void smtc_distribute_timer(int vpe) 171{ 172 unsigned long flags; 173 unsigned int mtflags; 174 int cpu; 175 struct clock_event_device *cd; 176 unsigned long nextstamp; 177 unsigned long reference; 178 179 180repeat: 181 nextstamp = 0L; 182 for_each_online_cpu(cpu) { 183 /* 184 * Find virtual CPUs within the current VPE who have 185 * unserviced timer requests whose time is now past. 186 */ 187 local_irq_save(flags); 188 mtflags = dmt(); 189 if (cpu_data[cpu].vpe_id == vpe && 190 ISVALID(smtc_nexttime[vpe][cpu])) { 191 reference = (unsigned long)read_c0_count(); 192 if ((smtc_nexttime[vpe][cpu] - reference) 193 > (unsigned long)LONG_MAX) { 194 smtc_nexttime[vpe][cpu] = 0L; 195 emt(mtflags); 196 local_irq_restore(flags); 197 /* 198 * We don't send IPIs to ourself. 199 */ 200 if (cpu != smp_processor_id()) { 201 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); 202 } else { 203 cd = &per_cpu(mips_clockevent_device, cpu); 204 cd->event_handler(cd); 205 } 206 } else { 207 /* Local to VPE but Valid Time not yet reached. */ 208 if (!ISVALID(nextstamp) || 209 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, 210 reference)) { 211 smtc_nextinvpe[vpe] = cpu; 212 nextstamp = smtc_nexttime[vpe][cpu]; 213 } 214 emt(mtflags); 215 local_irq_restore(flags); 216 } 217 } else { 218 emt(mtflags); 219 local_irq_restore(flags); 220 221 } 222 } 223 /* Reprogram for interrupt at next soonest timestamp for VPE */ 224 if (ISVALID(nextstamp)) { 225 write_c0_compare(nextstamp); 226 ehb(); 227 if ((nextstamp - (unsigned long)read_c0_count()) 228 > (unsigned long)LONG_MAX) 229 goto repeat; 230 } 231} 232 233 234irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 235{ 236 int cpu = smp_processor_id(); 237 238 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ 239 handle_perf_irq(1); 240 241 if (read_c0_cause() & (1 << 30)) { 242 /* Clear Count/Compare Interrupt */ 243 write_c0_compare(read_c0_compare()); 244 smtc_distribute_timer(cpu_data[cpu].vpe_id); 245 } 246 return IRQ_HANDLED; 247} 248 249 250int __cpuinit smtc_clockevent_init(void) 251{ 252 uint64_t mips_freq = mips_hpt_frequency; 253 unsigned int cpu = smp_processor_id(); 254 struct clock_event_device *cd; 255 unsigned int irq; 256 int i; 257 int j; 258 259 if (!cpu_has_counter || !mips_hpt_frequency) 260 return -ENXIO; 261 if (cpu == 0) { 262 for (i = 0; i < num_possible_cpus(); i++) { 263 smtc_nextinvpe[i] = 0; 264 for (j = 0; j < num_possible_cpus(); j++) 265 smtc_nexttime[i][j] = 0L; 266 } 267 /* 268 * SMTC also can't have the usablility test 269 * run by secondary TCs once Compare is in use. 270 */ 271 if (!c0_compare_int_usable()) 272 return -ENXIO; 273 } 274 275 /* 276 * With vectored interrupts things are getting platform specific. 277 * get_c0_compare_int is a hook to allow a platform to return the 278 * interrupt number of it's liking. 279 */ 280 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 281 if (get_c0_compare_int) 282 irq = get_c0_compare_int(); 283 284 cd = &per_cpu(mips_clockevent_device, cpu); 285 286 cd->name = "MIPS"; 287 cd->features = CLOCK_EVT_FEAT_ONESHOT; 288 289 /* Calculate the min / max delta */ 290 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); 291 cd->shift = 32; 292 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); 293 cd->min_delta_ns = clockevent_delta2ns(0x300, cd); 294 295 cd->rating = 300; 296 cd->irq = irq; 297 cd->cpumask = cpumask_of(cpu); 298 cd->set_next_event = mips_next_event; 299 cd->set_mode = mips_set_clock_mode; 300 cd->event_handler = mips_event_handler; 301 302 clockevents_register_device(cd); 303 304 /* 305 * On SMTC we only want to do the data structure 306 * initialization and IRQ setup once. 307 */ 308 if (cpu) 309 return 0; 310 /* 311 * And we need the hwmask associated with the c0_compare 312 * vector to be initialized. 313 */ 314 irq_hwmask[irq] = (0x100 << cp0_compare_irq); 315 if (cp0_timer_irq_installed) 316 return 0; 317 318 cp0_timer_irq_installed = 1; 319 320 setup_irq(irq, &c0_compare_irqaction); 321 322 return 0; 323} 324