1/*
2 *
3 * Copyright (C) 2001 MontaVista Software, ppopov@mvista.com
4 * Copied and modified Carsten Langgaard's time.c
5 *
6 * Carsten Langgaard, carstenl@mips.com
7 * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
8 *
9 * ########################################################################
10 *
11 *  This program is free software; you can distribute it and/or modify it
12 *  under the terms of the GNU General Public License (Version 2) as
13 *  published by the Free Software Foundation.
14 *
15 *  This program is distributed in the hope it will be useful, but WITHOUT
16 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18 *  for more details.
19 *
20 *  You should have received a copy of the GNU General Public License along
21 *  with this program; if not, write to the Free Software Foundation, Inc.,
22 *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 *
24 * ########################################################################
25 *
26 * Setting up the clock on the MIPS boards.
27 *
28 */
29
30#include <linux/types.h>
31#include <linux/config.h>
32#include <linux/init.h>
33#include <linux/kernel_stat.h>
34#include <linux/sched.h>
35#include <linux/spinlock.h>
36
37#include <asm/mipsregs.h>
38#include <asm/ptrace.h>
39#include <asm/time.h>
40#include <asm/hardirq.h>
41#include <asm/div64.h>
42#include <asm/au1000.h>
43
44#include <linux/mc146818rtc.h>
45#include <linux/timex.h>
46
47extern void startup_match20_interrupt(void);
48extern void do_softirq(void);
49extern volatile unsigned long wall_jiffies;
50unsigned long missed_heart_beats = 0;
51
52static unsigned long r4k_offset; /* Amount to increment compare reg each time */
53static unsigned long r4k_cur;    /* What counter should be at next timer irq */
54extern rwlock_t xtime_lock;
55unsigned int mips_counter_frequency = 0;
56
57/* Cycle counter value at the previous timer interrupt.. */
58static unsigned int timerhi = 0, timerlo = 0;
59
60#ifdef CONFIG_PM
61#define MATCH20_INC 328
62extern void startup_match20_interrupt(void);
63static unsigned long last_pc0, last_match20;
64#endif
65
66static spinlock_t time_lock = SPIN_LOCK_UNLOCKED;
67
68static inline void ack_r4ktimer(unsigned long newval)
69{
70	write_c0_compare(newval);
71}
72
73/*
74 * There are a lot of conceptually broken versions of the MIPS timer interrupt
75 * handler floating around.  This one is rather different, but the algorithm
76 * is provably more robust.
77 */
78unsigned long wtimer;
79void mips_timer_interrupt(struct pt_regs *regs)
80{
81	int irq = 63;
82	unsigned long count;
83	int cpu = smp_processor_id();
84
85	irq_enter(cpu, irq);
86	kstat.irqs[cpu][irq]++;
87
88#ifdef CONFIG_PM
89	printk(KERN_ERR "Unexpected CP0 interrupt\n");
90	regs->cp0_status &= ~IE_IRQ5; /* disable CP0 interrupt */
91	return;
92#endif
93
94	if (r4k_offset == 0)
95		goto null;
96
97	do {
98		count = read_c0_count();
99		timerhi += (count < timerlo);   /* Wrap around */
100		timerlo = count;
101
102		kstat.irqs[0][irq]++;
103		do_timer(regs);
104		r4k_cur += r4k_offset;
105		ack_r4ktimer(r4k_cur);
106
107	} while (((unsigned long)read_c0_count()
108	         - r4k_cur) < 0x7fffffff);
109
110	irq_exit(cpu, irq);
111
112	if (softirq_pending(cpu))
113		do_softirq();
114	return;
115
116null:
117	ack_r4ktimer(0);
118}
119
120#ifdef CONFIG_PM
121void counter0_irq(int irq, void *dev_id, struct pt_regs *regs)
122{
123	unsigned long pc0;
124	int time_elapsed;
125	static int jiffie_drift = 0;
126
127	kstat.irqs[0][irq]++;
128	if (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) {
129		/* should never happen! */
130		printk(KERN_WARNING "counter 0 w status eror\n");
131		return;
132	}
133
134	pc0 = au_readl(SYS_TOYREAD);
135	if (pc0 < last_match20) {
136		/* counter overflowed */
137		time_elapsed = (0xffffffff - last_match20) + pc0;
138	}
139	else {
140		time_elapsed = pc0 - last_match20;
141	}
142
143	while (time_elapsed > 0) {
144		do_timer(regs);
145		time_elapsed -= MATCH20_INC;
146		last_match20 += MATCH20_INC;
147		jiffie_drift++;
148	}
149
150	last_pc0 = pc0;
151	au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
152	au_sync();
153
154	/* our counter ticks at 10.009765625 ms/tick, we we're running
155	 * almost 10uS too slow per tick.
156	 */
157
158	if (jiffie_drift >= 999) {
159		jiffie_drift -= 999;
160		do_timer(regs); /* increment jiffies by one */
161	}
162}
163#endif
164
165/*
166 * Figure out the r4k offset, the amount to increment the compare
167 * register for each time tick.
168 * Use the Programmable Counter 1 to do this.
169 */
170unsigned long cal_r4koff(void)
171{
172	unsigned long count;
173	unsigned long cpu_speed;
174	unsigned long start, end;
175	unsigned long counter;
176	int trim_divide = 16;
177	unsigned long flags;
178
179	spin_lock_irqsave(&time_lock, flags);
180
181	counter = au_readl(SYS_COUNTER_CNTRL);
182	au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL);
183
184	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
185	au_writel(trim_divide-1, SYS_RTCTRIM); /* RTC now ticks at 32.768/16 kHz */
186	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
187
188	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
189	au_writel (0, SYS_TOYWRITE);
190	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
191
192	start = au_readl(SYS_RTCREAD);
193	start += 2;
194	/* wait for the beginning of a new tick */
195	while (au_readl(SYS_RTCREAD) < start);
196
197	/* Start r4k counter. */
198	write_c0_count(0);
199	end = start + (32768 / trim_divide)/2; /* wait 0.5 seconds */
200
201	while (end > au_readl(SYS_RTCREAD));
202
203	count = read_c0_count();
204	cpu_speed = count * 2;
205	mips_counter_frequency = count;
206	set_au1x00_uart_baud_base(((cpu_speed) / 4) / 16);
207	spin_unlock_irqrestore(&time_lock, flags);
208	return (cpu_speed / HZ);
209}
210
211
212void __init time_init(void)
213{
214        unsigned int est_freq;
215
216	printk("calculating r4koff... ");
217	r4k_offset = cal_r4koff();
218	printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);
219
220	//est_freq = 2*r4k_offset*HZ;
221	est_freq = r4k_offset*HZ;
222	est_freq += 5000;    /* round */
223	est_freq -= est_freq%10000;
224	printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
225	       (est_freq%1000000)*100/1000000);
226 	set_au1x00_speed(est_freq);
227 	set_au1x00_lcd_clock(); // program the LCD clock
228	r4k_cur = (read_c0_count() + r4k_offset);
229
230	write_c0_compare(r4k_cur);
231
232	/* no RTC on the pb1000 */
233	xtime.tv_sec = 0;
234	xtime.tv_usec = 0;
235
236#ifdef CONFIG_PM
237	/*
238	 * setup counter 0, since it keeps ticking after a
239	 * 'wait' instruction has been executed. The CP0 timer and
240	 * counter 1 do NOT continue running after 'wait'
241	 *
242	 * It's too early to call request_irq() here, so we handle
243	 * counter 0 interrupt as a special irq and it doesn't show
244	 * up under /proc/interrupts.
245	 */
246	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
247	au_writel(0, SYS_TOYWRITE);
248	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
249
250	au_writel(au_readl(SYS_WAKEMSK) | (1<<8), SYS_WAKEMSK);
251	au_writel(~0, SYS_WAKESRC);
252	au_sync();
253	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
254
255	/* setup match20 to interrupt once every 10ms */
256	last_pc0 = last_match20 = au_readl(SYS_TOYREAD);
257	au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
258	au_sync();
259	while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
260	startup_match20_interrupt();
261#endif
262
263	//set_c0_status(ALLINTS);
264	au_sync();
265}
266
267/* This is for machines which generate the exact clock. */
268#define USECS_PER_JIFFY (1000000/HZ)
269#define USECS_PER_JIFFY_FRAC (0x100000000*1000000/HZ&0xffffffff)
270
271#ifndef CONFIG_PM
272static unsigned long
273div64_32(unsigned long v1, unsigned long v2, unsigned long v3)
274{
275	unsigned long r0;
276	do_div64_32(r0, v1, v2, v3);
277	return r0;
278}
279#endif
280
281static unsigned long do_fast_gettimeoffset(void)
282{
283#ifdef CONFIG_PM
284	unsigned long pc0;
285	unsigned long offset;
286
287	pc0 = au_readl(SYS_TOYREAD);
288	if (pc0 < last_pc0) {
289		offset = 0xffffffff - last_pc0 + pc0;
290		printk("offset over: %x\n", (unsigned)offset);
291	}
292	else {
293		offset = (unsigned long)(((pc0 - last_pc0) * 305) / 10);
294	}
295	if ((pc0-last_pc0) > 2*MATCH20_INC) {
296		printk("huge offset %x, last_pc0 %x last_match20 %x pc0 %x\n",
297				(unsigned)offset, (unsigned)last_pc0,
298				(unsigned)last_match20, (unsigned)pc0);
299	}
300	au_sync();
301	return offset;
302#else
303	u32 count;
304	unsigned long res, tmp;
305	unsigned long r0;
306
307	/* Last jiffy when do_fast_gettimeoffset() was called. */
308	static unsigned long last_jiffies=0;
309	unsigned long quotient;
310
311	/*
312	 * Cached "1/(clocks per usec)*2^32" value.
313	 * It has to be recalculated once each jiffy.
314	 */
315	static unsigned long cached_quotient=0;
316
317	tmp = jiffies;
318
319	quotient = cached_quotient;
320
321	if (tmp && last_jiffies != tmp) {
322		last_jiffies = tmp;
323		if (last_jiffies != 0) {
324			r0 = div64_32(timerhi, timerlo, tmp);
325			quotient = div64_32(USECS_PER_JIFFY, USECS_PER_JIFFY_FRAC, r0);
326			cached_quotient = quotient;
327		}
328	}
329
330	/* Get last timer tick in absolute kernel time */
331	count = read_c0_count();
332
333	/* .. relative to previous jiffy (32 bits is enough) */
334	count -= timerlo;
335
336	__asm__("multu\t%1,%2\n\t"
337		"mfhi\t%0"
338		:"=r" (res)
339		:"r" (count),
340		 "r" (quotient));
341
342	/*
343 	 * Due to possible jiffies inconsistencies, we need to check
344	 * the result so that we'll get a timer that is monotonic.
345	 */
346	if (res >= USECS_PER_JIFFY)
347		res = USECS_PER_JIFFY-1;
348
349	return res;
350#endif
351}
352
353void do_gettimeofday(struct timeval *tv)
354{
355	unsigned int flags;
356
357	read_lock_irqsave (&xtime_lock, flags);
358	*tv = xtime;
359	tv->tv_usec += do_fast_gettimeoffset();
360
361	/*
362	 * xtime is atomically updated in timer_bh. jiffies - wall_jiffies
363	 * is nonzero if the timer bottom half hasnt executed yet.
364	 */
365	if (jiffies - wall_jiffies)
366		tv->tv_usec += USECS_PER_JIFFY;
367
368	read_unlock_irqrestore (&xtime_lock, flags);
369
370	if (tv->tv_usec >= 1000000) {
371		tv->tv_usec -= 1000000;
372		tv->tv_sec++;
373	}
374}
375
376void do_settimeofday(struct timeval *tv)
377{
378	write_lock_irq (&xtime_lock);
379
380	/* This is revolting. We need to set the xtime.tv_usec correctly.
381	 * However, the value in this location is value at the last tick.
382	 * Discover what correction gettimeofday would have done, and then
383	 * undo it!
384	 */
385	tv->tv_usec -= do_fast_gettimeoffset();
386
387	if (tv->tv_usec < 0) {
388		tv->tv_usec += 1000000;
389		tv->tv_sec--;
390	}
391
392	xtime = *tv;
393	time_adjust = 0;		/* stop active adjtime() */
394	time_status |= STA_UNSYNC;
395	time_maxerror = NTP_PHASE_LIMIT;
396	time_esterror = NTP_PHASE_LIMIT;
397
398	write_unlock_irq (&xtime_lock);
399}
400