1/*	$OpenBSD: clock.c,v 1.54 2023/10/24 13:20:10 claudio Exp $ */
2
3/*
4 * Copyright (c) 2001-2004 Opsycon AB  (www.opsycon.se / www.opsycon.com)
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 */
28
29/*
30 * Clock code for systems using the on-cpu counter register, when both the
31 * counter and comparator registers are available (i.e. everything MIPS-III
32 * or MIPS-IV capable but the R8000).
33 *
34 * On most processors, this register counts at half the pipeline frequency.
35 */
36
37#include <sys/param.h>
38#include <sys/kernel.h>
39#include <sys/systm.h>
40#include <sys/clockintr.h>
41#include <sys/device.h>
42#include <sys/evcount.h>
43#include <sys/stdint.h>
44
45#include <machine/autoconf.h>
46#include <machine/cpu.h>
47#include <mips64/mips_cpu.h>
48
49static struct evcount cp0_clock_count;
50static int cp0_clock_irq = 5;
51uint64_t cp0_nsec_cycle_ratio;
52uint64_t cp0_nsec_max;
53
54int	clockmatch(struct device *, void *, void *);
55void	clockattach(struct device *, struct device *, void *);
56
57struct cfdriver clock_cd = {
58	NULL, "clock", DV_DULL
59};
60
61const struct cfattach clock_ca = {
62	sizeof(struct device), clockmatch, clockattach
63};
64
65void	cp0_rearm_int5(void *, uint64_t);
66void	cp0_trigger_int5_wrapper(void *);
67
68const struct intrclock cp0_intrclock = {
69	.ic_rearm = cp0_rearm_int5,
70	.ic_trigger = cp0_trigger_int5_wrapper
71};
72
73void	cp0_initclock(void);
74uint32_t cp0_int5(uint32_t, struct trapframe *);
75void 	cp0_startclock(struct cpu_info *);
76void	cp0_trigger_int5(void);
77void	cp0_trigger_int5_masked(void);
78
79int
80clockmatch(struct device *parent, void *vcf, void *aux)
81{
82	struct mainbus_attach_args *maa = aux;
83
84	return strcmp(maa->maa_name, clock_cd.cd_name) == 0;
85}
86
87void
88clockattach(struct device *parent, struct device *self, void *aux)
89{
90	uint64_t cp0_freq = curcpu()->ci_hw.clock / CP0_CYCLE_DIVIDER;
91
92	printf(": int 5\n");
93
94	cp0_nsec_cycle_ratio = cp0_freq * (1ULL << 32) / 1000000000;
95	cp0_nsec_max = UINT64_MAX / cp0_nsec_cycle_ratio;
96
97	/*
98	 * We need to register the interrupt now, for idle_mask to
99	 * be computed correctly.
100	 */
101	set_intr(INTPRI_CLOCK, CR_INT_5, cp0_int5);
102	evcount_attach(&cp0_clock_count, "clock", &cp0_clock_irq);
103	evcount_percpu(&cp0_clock_count);
104
105	/* try to avoid getting clock interrupts early */
106	cp0_set_compare(cp0_get_count() - 1);
107
108	md_initclock = cp0_initclock;
109	md_startclock = cp0_startclock;
110	md_triggerclock = cp0_trigger_int5;
111}
112
113/*
114 *  Interrupt handler for targets using the internal count register
115 *  as interval clock. Normally the system is run with the clock
116 *  interrupt always enabled. Masking is done here and if the clock
117 *  cannot be run the tick is handled later when the clock is logically
118 *  unmasked again.
119 */
120uint32_t
121cp0_int5(uint32_t mask, struct trapframe *tf)
122{
123	struct cpu_info *ci = curcpu();
124	int s;
125
126	evcount_inc(&cp0_clock_count);
127
128	cp0_set_compare(cp0_get_count() - 1);	/* clear INT5 */
129
130	/*
131	 * Just ignore the interrupt if we're not ready to process it.
132	 * cpu_initclocks() will retrigger it later.
133	 */
134	if (!ci->ci_clock_started)
135		return CR_INT_5;
136
137	/*
138	 * If the clock interrupt is logically masked, defer all
139	 * work until it is logically unmasked from splx(9).
140	 */
141	if (tf->ipl >= IPL_CLOCK) {
142		ci->ci_clock_deferred = 1;
143		return CR_INT_5;
144	}
145	ci->ci_clock_deferred = 0;
146
147	/*
148	 * Process clock interrupt.
149	 */
150	s = splclock();
151#ifdef MULTIPROCESSOR
152	register_t sr;
153
154	sr = getsr();
155	ENABLEIPI();
156#endif
157	clockintr_dispatch(tf);
158#ifdef MULTIPROCESSOR
159	setsr(sr);
160#endif
161	ci->ci_ipl = s;
162	return CR_INT_5;	/* Clock is always on 5 */
163}
164
165/*
166 * Arm INT5 to fire after the given number of nanoseconds have elapsed.
167 * Only try once.  If we miss, let cp0_trigger_int5_masked() handle it.
168 */
169void
170cp0_rearm_int5(void *unused, uint64_t nsecs)
171{
172	uint32_t cycles, t0;
173	register_t sr;
174
175	if (nsecs > cp0_nsec_max)
176		nsecs = cp0_nsec_max;
177	cycles = (nsecs * cp0_nsec_cycle_ratio) >> 32;
178
179	/*
180	 * Set compare, then immediately reread count.  If at least
181	 * "cycles" CP0 ticks have elapsed and INT5 isn't already
182	 * pending, we missed.
183	 */
184	sr = disableintr();
185	t0 = cp0_get_count();
186	cp0_set_compare(t0 + cycles);
187	if (cycles <= cp0_get_count() - t0) {
188		if (!ISSET(cp0_get_cause(), CR_INT_5))
189			cp0_trigger_int5_masked();
190	}
191	setsr(sr);
192}
193
194void
195cp0_trigger_int5(void)
196{
197	register_t sr;
198
199	sr = disableintr();
200	cp0_trigger_int5_masked();
201	setsr(sr);
202}
203
204/*
205 * Arm INT5 to fire as soon as possible.
206 *
207 * We need to spin until either (a) INT5 is pending or (b) the compare
208 * register leads the count register, i.e. we know INT5 will be pending
209 * very soon.
210 *
211 * To ensure we don't spin forever, double the compensatory offset
212 * added to the compare value every time we miss the count register.
213 * The initial offset of 16 cycles was chosen experimentally.  It
214 * is the smallest power of two that doesn't require multiple loops
215 * to arm the timer on most Octeon hardware.
216 */
217void
218cp0_trigger_int5_masked(void)
219{
220	uint32_t offset = 16, t0;
221
222	while (!ISSET(cp0_get_cause(), CR_INT_5)) {
223		t0 = cp0_get_count();
224		cp0_set_compare(t0 + offset);
225		if (cp0_get_count() - t0 < offset)
226			return;
227		offset *= 2;
228	}
229}
230
231void
232cp0_trigger_int5_wrapper(void *unused)
233{
234	cp0_trigger_int5();
235}
236
237void
238cp0_initclock(void)
239{
240	KASSERT(CPU_IS_PRIMARY(curcpu()));
241
242	stathz = hz;
243	profhz = stathz * 10;
244	statclock_is_randomized = 1;
245}
246
247/*
248 * Start the clock interrupt dispatch cycle.
249 */
250void
251cp0_startclock(struct cpu_info *ci)
252{
253	int s;
254
255	if (!CPU_IS_PRIMARY(ci)) {
256		/* try to avoid getting clock interrupts early */
257		cp0_set_compare(cp0_get_count() - 1);
258
259		cp0_calibrate(ci);
260	}
261
262	clockintr_cpu_init(&cp0_intrclock);
263
264	/* Start the clock. */
265	s = splclock();
266	ci->ci_clock_started = 1;
267	clockintr_trigger();
268	splx(s);
269}
270