clock.c revision 1.31
1/*	$OpenBSD: clock.c,v 1.31 2020/07/06 13:33:07 pirofti Exp $	*/
2
3/*
4 * Copyright (c) 1998-2003 Michael Shalayeff
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/timetc.h>
33
34#include <dev/clock_subr.h>
35
36#include <machine/pdc.h>
37#include <machine/iomod.h>
38#include <machine/psl.h>
39#include <machine/intr.h>
40#include <machine/reg.h>
41#include <machine/cpufunc.h>
42#include <machine/autoconf.h>
43
44u_long	cpu_hzticks;
45
46int	cpu_hardclock(void *);
47u_int	itmr_get_timecount(struct timecounter *);
48
49struct timecounter itmr_timecounter = {
50	itmr_get_timecount, NULL, 0xffffffff, 0, "itmr", 0, NULL, 0
51};
52
53extern todr_chip_handle_t todr_handle;
54struct todr_chip_handle pdc_todr;
55
56int
57pdc_gettime(struct todr_chip_handle *handle, struct timeval *tv)
58{
59	struct pdc_tod tod PDC_ALIGNMENT;
60	int error;
61
62	if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_READ,
63	    &tod, 0, 0, 0, 0, 0))) {
64		printf("clock: failed to fetch (%d)\n", error);
65		return EIO;
66	}
67
68	tv->tv_sec = tod.sec;
69	tv->tv_usec = tod.usec;
70	return 0;
71}
72
73int
74pdc_settime(struct todr_chip_handle *handle, struct timeval *tv)
75{
76	int error;
77
78	if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_WRITE,
79	    tv->tv_sec, tv->tv_usec))) {
80		printf("clock: failed to save (%d)\n", error);
81		return EIO;
82	}
83
84	return 0;
85}
86
87void
88cpu_initclocks(void)
89{
90	struct cpu_info *ci = curcpu();
91	u_long __itmr;
92
93	pdc_todr.todr_gettime = pdc_gettime;
94	pdc_todr.todr_settime = pdc_settime;
95	todr_handle = &pdc_todr;
96
97	cpu_hzticks = (PAGE0->mem_10msec * 100) / hz;
98
99	itmr_timecounter.tc_frequency = PAGE0->mem_10msec * 100;
100	tc_init(&itmr_timecounter);
101
102	mfctl(CR_ITMR, __itmr);
103	ci->ci_itmr = __itmr;
104	__itmr += cpu_hzticks;
105	mtctl(__itmr, CR_ITMR);
106}
107
108int
109cpu_hardclock(void *v)
110{
111	struct cpu_info *ci = curcpu();
112	u_long __itmr, delta, eta;
113	int wrap;
114	register_t eiem;
115
116	/*
117	 * Invoke hardclock as many times as there has been cpu_hzticks
118	 * ticks since the last interrupt.
119	 */
120	for (;;) {
121		mfctl(CR_ITMR, __itmr);
122		delta = __itmr - ci->ci_itmr;
123		if (delta >= cpu_hzticks) {
124			hardclock(v);
125			ci->ci_itmr += cpu_hzticks;
126		} else
127			break;
128	}
129
130	/*
131	 * Program the next clock interrupt, making sure it will
132	 * indeed happen in the future. This is done with interrupts
133	 * disabled to avoid a possible race.
134	 */
135	eta = ci->ci_itmr + cpu_hzticks;
136	wrap = eta < ci->ci_itmr;	/* watch out for a wraparound */
137	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
138	__asm volatile("mtctl	%r0, %cr15");
139	mtctl(eta, CR_ITMR);
140	mfctl(CR_ITMR, __itmr);
141	/*
142	 * If we were close enough to the next tick interrupt
143	 * value, by the time we have programmed itmr, it might
144	 * have passed the value, which would cause a complete
145	 * cycle until the next interrupt occurs. On slow
146	 * models, this would be a disaster (a complete cycle
147	 * taking over two minutes on a 715/33).
148	 *
149	 * We expect that it will only be necessary to postpone
150	 * the interrupt once. Thus, there are two cases:
151	 * - We are expecting a wraparound: eta < cpu_itmr.
152	 *   itmr is in tracks if either >= cpu_itmr or < eta.
153	 * - We are not wrapping: eta > cpu_itmr.
154	 *   itmr is in tracks if >= cpu_itmr and < eta (we need
155	 *   to keep the >= cpu_itmr test because itmr might wrap
156	 *   before eta does).
157	 */
158	if ((wrap && !(eta > __itmr || __itmr >= ci->ci_itmr)) ||
159	    (!wrap && !(eta > __itmr && __itmr >= ci->ci_itmr))) {
160		eta += cpu_hzticks;
161		mtctl(eta, CR_ITMR);
162	}
163	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
164
165	return (1);
166}
167
168void
169setstatclockrate(int newhz)
170{
171	/* nothing we can do */
172}
173
174u_int
175itmr_get_timecount(struct timecounter *tc)
176{
177	u_long __itmr;
178
179	mfctl(CR_ITMR, __itmr);
180	return (__itmr);
181}
182