1/* 2 * Based on arm clockevents implementation and old bfin time tick. 3 * 4 * Copyright 2008-2009 Analog Devics Inc. 5 * 2008 GeoTechnologies 6 * Vitja Makarov 7 * 8 * Licensed under the GPL-2 9 */ 10 11#include <linux/module.h> 12#include <linux/profile.h> 13#include <linux/interrupt.h> 14#include <linux/time.h> 15#include <linux/timex.h> 16#include <linux/irq.h> 17#include <linux/clocksource.h> 18#include <linux/clockchips.h> 19#include <linux/cpufreq.h> 20 21#include <asm/blackfin.h> 22#include <asm/time.h> 23#include <asm/gptimers.h> 24#include <asm/nmi.h> 25 26/* Accelerators for sched_clock() 27 * convert from cycles(64bits) => nanoseconds (64bits) 28 * basic equation: 29 * ns = cycles / (freq / ns_per_sec) 30 * ns = cycles * (ns_per_sec / freq) 31 * ns = cycles * (10^9 / (cpu_khz * 10^3)) 32 * ns = cycles * (10^6 / cpu_khz) 33 * 34 * Then we use scaling math (suggested by george@mvista.com) to get: 35 * ns = cycles * (10^6 * SC / cpu_khz) / SC 36 * ns = cycles * cyc2ns_scale / SC 37 * 38 * And since SC is a constant power of two, we can convert the div 39 * into a shift. 40 * 41 * We can use khz divisor instead of mhz to keep a better precision, since 42 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. 43 * (mathieu.desnoyers@polymtl.ca) 44 * 45 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 46 */ 47 48#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 49 50#if defined(CONFIG_CYCLES_CLOCKSOURCE) 51 52static notrace cycle_t bfin_read_cycles(struct clocksource *cs) 53{ 54#ifdef CONFIG_CPU_FREQ 55 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 56#else 57 return get_cycles(); 58#endif 59} 60 61static struct clocksource bfin_cs_cycles = { 62 .name = "bfin_cs_cycles", 63 .rating = 400, 64 .read = bfin_read_cycles, 65 .mask = CLOCKSOURCE_MASK(64), 66 .shift = CYC2NS_SCALE_FACTOR, 67 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 68}; 69 70static inline unsigned long long bfin_cs_cycles_sched_clock(void) 71{ 72 return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles), 73 bfin_cs_cycles.mult, bfin_cs_cycles.shift); 74} 75 76static int __init bfin_cs_cycles_init(void) 77{ 78 bfin_cs_cycles.mult = \ 79 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); 80 81 if (clocksource_register(&bfin_cs_cycles)) 82 panic("failed to register clocksource"); 83 84 return 0; 85} 86#else 87# define bfin_cs_cycles_init() 88#endif 89 90#ifdef CONFIG_GPTMR0_CLOCKSOURCE 91 92void __init setup_gptimer0(void) 93{ 94 disable_gptimers(TIMER0bit); 95 96 set_gptimer_config(TIMER0_id, \ 97 TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); 98 set_gptimer_period(TIMER0_id, -1); 99 set_gptimer_pwidth(TIMER0_id, -2); 100 SSYNC(); 101 enable_gptimers(TIMER0bit); 102} 103 104static cycle_t bfin_read_gptimer0(struct clocksource *cs) 105{ 106 return bfin_read_TIMER0_COUNTER(); 107} 108 109static struct clocksource bfin_cs_gptimer0 = { 110 .name = "bfin_cs_gptimer0", 111 .rating = 350, 112 .read = bfin_read_gptimer0, 113 .mask = CLOCKSOURCE_MASK(32), 114 .shift = CYC2NS_SCALE_FACTOR, 115 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 116}; 117 118static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) 119{ 120 return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(), 121 bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift); 122} 123 124static int __init bfin_cs_gptimer0_init(void) 125{ 126 setup_gptimer0(); 127 128 bfin_cs_gptimer0.mult = \ 129 clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); 130 131 if (clocksource_register(&bfin_cs_gptimer0)) 132 panic("failed to register clocksource"); 133 134 return 0; 135} 136#else 137# define bfin_cs_gptimer0_init() 138#endif 139 140#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) 141/* prefer to use cycles since it has higher rating */ 142notrace unsigned long long sched_clock(void) 143{ 144#if defined(CONFIG_CYCLES_CLOCKSOURCE) 145 return bfin_cs_cycles_sched_clock(); 146#else 147 return bfin_cs_gptimer0_sched_clock(); 148#endif 149} 150#endif 151 152#if defined(CONFIG_TICKSOURCE_GPTMR0) 153static int bfin_gptmr0_set_next_event(unsigned long cycles, 154 struct clock_event_device *evt) 155{ 156 disable_gptimers(TIMER0bit); 157 158 /* it starts counting three SCLK cycles after the TIMENx bit is set */ 159 set_gptimer_pwidth(TIMER0_id, cycles - 3); 160 enable_gptimers(TIMER0bit); 161 return 0; 162} 163 164static void bfin_gptmr0_set_mode(enum clock_event_mode mode, 165 struct clock_event_device *evt) 166{ 167 switch (mode) { 168 case CLOCK_EVT_MODE_PERIODIC: { 169 set_gptimer_config(TIMER0_id, \ 170 TIMER_OUT_DIS | TIMER_IRQ_ENA | \ 171 TIMER_PERIOD_CNT | TIMER_MODE_PWM); 172 set_gptimer_period(TIMER0_id, get_sclk() / HZ); 173 set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); 174 enable_gptimers(TIMER0bit); 175 break; 176 } 177 case CLOCK_EVT_MODE_ONESHOT: 178 disable_gptimers(TIMER0bit); 179 set_gptimer_config(TIMER0_id, \ 180 TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); 181 set_gptimer_period(TIMER0_id, 0); 182 break; 183 case CLOCK_EVT_MODE_UNUSED: 184 case CLOCK_EVT_MODE_SHUTDOWN: 185 disable_gptimers(TIMER0bit); 186 break; 187 case CLOCK_EVT_MODE_RESUME: 188 break; 189 } 190} 191 192static void bfin_gptmr0_ack(void) 193{ 194 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); 195} 196 197static void __init bfin_gptmr0_init(void) 198{ 199 disable_gptimers(TIMER0bit); 200} 201 202#ifdef CONFIG_CORE_TIMER_IRQ_L1 203__attribute__((l1_text)) 204#endif 205irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) 206{ 207 struct clock_event_device *evt = dev_id; 208 smp_mb(); 209 evt->event_handler(evt); 210 bfin_gptmr0_ack(); 211 return IRQ_HANDLED; 212} 213 214static struct irqaction gptmr0_irq = { 215 .name = "Blackfin GPTimer0", 216 .flags = IRQF_DISABLED | IRQF_TIMER | \ 217 IRQF_IRQPOLL | IRQF_PERCPU, 218 .handler = bfin_gptmr0_interrupt, 219}; 220 221static struct clock_event_device clockevent_gptmr0 = { 222 .name = "bfin_gptimer0", 223 .rating = 300, 224 .irq = IRQ_TIMER0, 225 .shift = 32, 226 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 227 .set_next_event = bfin_gptmr0_set_next_event, 228 .set_mode = bfin_gptmr0_set_mode, 229}; 230 231static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt) 232{ 233 unsigned long clock_tick; 234 235 clock_tick = get_sclk(); 236 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); 237 evt->max_delta_ns = clockevent_delta2ns(-1, evt); 238 evt->min_delta_ns = clockevent_delta2ns(100, evt); 239 240 evt->cpumask = cpumask_of(0); 241 242 clockevents_register_device(evt); 243} 244#endif /* CONFIG_TICKSOURCE_GPTMR0 */ 245 246#if defined(CONFIG_TICKSOURCE_CORETMR) 247/* per-cpu local core timer */ 248static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); 249 250static int bfin_coretmr_set_next_event(unsigned long cycles, 251 struct clock_event_device *evt) 252{ 253 bfin_write_TCNTL(TMPWR); 254 CSYNC(); 255 bfin_write_TCOUNT(cycles); 256 CSYNC(); 257 bfin_write_TCNTL(TMPWR | TMREN); 258 return 0; 259} 260 261static void bfin_coretmr_set_mode(enum clock_event_mode mode, 262 struct clock_event_device *evt) 263{ 264 switch (mode) { 265 case CLOCK_EVT_MODE_PERIODIC: { 266 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); 267 bfin_write_TCNTL(TMPWR); 268 CSYNC(); 269 bfin_write_TSCALE(TIME_SCALE - 1); 270 bfin_write_TPERIOD(tcount); 271 bfin_write_TCOUNT(tcount); 272 CSYNC(); 273 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); 274 break; 275 } 276 case CLOCK_EVT_MODE_ONESHOT: 277 bfin_write_TCNTL(TMPWR); 278 CSYNC(); 279 bfin_write_TSCALE(TIME_SCALE - 1); 280 bfin_write_TPERIOD(0); 281 bfin_write_TCOUNT(0); 282 break; 283 case CLOCK_EVT_MODE_UNUSED: 284 case CLOCK_EVT_MODE_SHUTDOWN: 285 bfin_write_TCNTL(0); 286 CSYNC(); 287 break; 288 case CLOCK_EVT_MODE_RESUME: 289 break; 290 } 291} 292 293void bfin_coretmr_init(void) 294{ 295 /* power up the timer, but don't enable it just yet */ 296 bfin_write_TCNTL(TMPWR); 297 CSYNC(); 298 299 /* the TSCALE prescaler counter. */ 300 bfin_write_TSCALE(TIME_SCALE - 1); 301 bfin_write_TPERIOD(0); 302 bfin_write_TCOUNT(0); 303 304 CSYNC(); 305} 306 307#ifdef CONFIG_CORE_TIMER_IRQ_L1 308__attribute__((l1_text)) 309#endif 310irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) 311{ 312 int cpu = smp_processor_id(); 313 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); 314 315 smp_mb(); 316 evt->event_handler(evt); 317 318 touch_nmi_watchdog(); 319 320 return IRQ_HANDLED; 321} 322 323static struct irqaction coretmr_irq = { 324 .name = "Blackfin CoreTimer", 325 .flags = IRQF_DISABLED | IRQF_TIMER | \ 326 IRQF_IRQPOLL | IRQF_PERCPU, 327 .handler = bfin_coretmr_interrupt, 328}; 329 330void bfin_coretmr_clockevent_init(void) 331{ 332 unsigned long clock_tick; 333 unsigned int cpu = smp_processor_id(); 334 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); 335 336 evt->name = "bfin_core_timer"; 337 evt->rating = 350; 338 evt->irq = -1; 339 evt->shift = 32; 340 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; 341 evt->set_next_event = bfin_coretmr_set_next_event; 342 evt->set_mode = bfin_coretmr_set_mode; 343 344 clock_tick = get_cclk() / TIME_SCALE; 345 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); 346 evt->max_delta_ns = clockevent_delta2ns(-1, evt); 347 evt->min_delta_ns = clockevent_delta2ns(100, evt); 348 349 evt->cpumask = cpumask_of(cpu); 350 351 clockevents_register_device(evt); 352} 353#endif /* CONFIG_TICKSOURCE_CORETMR */ 354 355 356void read_persistent_clock(struct timespec *ts) 357{ 358 time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ 359 ts->tv_sec = secs_since_1970; 360 ts->tv_nsec = 0; 361} 362 363void __init time_init(void) 364{ 365 366#ifdef CONFIG_RTC_DRV_BFIN 367 /* [#2663] hack to filter junk RTC values that would cause 368 * userspace to have to deal with time values greater than 369 * 2^31 seconds (which uClibc cannot cope with yet) 370 */ 371 if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { 372 printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); 373 bfin_write_RTC_STAT(0); 374 } 375#endif 376 377 bfin_cs_cycles_init(); 378 bfin_cs_gptimer0_init(); 379 380#if defined(CONFIG_TICKSOURCE_CORETMR) 381 bfin_coretmr_init(); 382 setup_irq(IRQ_CORETMR, &coretmr_irq); 383 bfin_coretmr_clockevent_init(); 384#endif 385 386#if defined(CONFIG_TICKSOURCE_GPTMR0) 387 bfin_gptmr0_init(); 388 setup_irq(IRQ_TIMER0, &gptmr0_irq); 389 gptmr0_irq.dev_id = &clockevent_gptmr0; 390 bfin_gptmr0_clockevent_init(&clockevent_gptmr0); 391#endif 392 393#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0) 394# error at least one clock event device is required 395#endif 396} 397