1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 2011 The FreeBSD Foundation
5 * Copyright (c) 2013 Ruslan Bukin <br@bsdpad.com>
6 * All rights reserved.
7 *
8 * Based on mpcore_timer.c developed by Ben Gray <ben.r.gray@gmail.com>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. The name of the company nor the name of the author may be used to
19 *    endorse or promote products derived from this software without specific
20 *    prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35/**
36 *      Cortex-A7, Cortex-A15, ARMv8 and later Generic Timer
37 */
38
39#include "opt_acpi.h"
40#include "opt_platform.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bus.h>
45#include <sys/kernel.h>
46#include <sys/module.h>
47#include <sys/malloc.h>
48#include <sys/rman.h>
49#include <sys/timeet.h>
50#include <sys/timetc.h>
51#include <sys/smp.h>
52#include <sys/vdso.h>
53#include <sys/watchdog.h>
54
55#include <machine/bus.h>
56#include <machine/cpu.h>
57#include <machine/intr.h>
58#include <machine/machdep.h>
59#include <machine/md_var.h>
60
61#if defined(__aarch64__)
62#include <machine/undefined.h>
63#endif
64
65#ifdef FDT
66#include <dev/ofw/openfirm.h>
67#include <dev/ofw/ofw_bus.h>
68#include <dev/ofw/ofw_bus_subr.h>
69#endif
70
71#ifdef DEV_ACPI
72#include <contrib/dev/acpica/include/acpi.h>
73#include <dev/acpica/acpivar.h>
74#endif
75
76#define	GT_PHYS_SECURE		0
77#define	GT_PHYS_NONSECURE	1
78#define	GT_VIRT			2
79#define	GT_HYP_PHYS		3
80#define	GT_HYP_VIRT		4
81#define	GT_IRQ_COUNT		5
82
83#define	GT_CTRL_ENABLE		(1 << 0)
84#define	GT_CTRL_INT_MASK	(1 << 1)
85#define	GT_CTRL_INT_STAT	(1 << 2)
86#define	GT_REG_CTRL		0
87#define	GT_REG_TVAL		1
88
89#define	GT_CNTKCTL_PL0PTEN	(1 << 9) /* PL0 Physical timer reg access */
90#define	GT_CNTKCTL_PL0VTEN	(1 << 8) /* PL0 Virtual timer reg access */
91#define	GT_CNTKCTL_EVNTI	(0xf << 4) /* Virtual counter event bits */
92#define	GT_CNTKCTL_EVNTDIR	(1 << 3) /* Virtual counter event transition */
93#define	GT_CNTKCTL_EVNTEN	(1 << 2) /* Enables virtual counter events */
94#define	GT_CNTKCTL_PL0VCTEN	(1 << 1) /* PL0 CNTVCT and CNTFRQ access */
95#define	GT_CNTKCTL_PL0PCTEN	(1 << 0) /* PL0 CNTPCT and CNTFRQ access */
96
97struct arm_tmr_softc;
98
99struct arm_tmr_irq {
100	struct resource	*res;
101	void		*ihl;
102	int		 rid;
103	int		 idx;
104};
105
106struct arm_tmr_softc {
107	struct arm_tmr_irq	irqs[GT_IRQ_COUNT];
108	uint64_t		(*get_cntxct)(bool);
109	uint32_t		clkfreq;
110	int			irq_count;
111	struct eventtimer	et;
112	bool			physical_sys;
113	bool			physical_user;
114};
115
116static struct arm_tmr_softc *arm_tmr_sc = NULL;
117
118static const struct arm_tmr_irq_defs {
119	int idx;
120	const char *name;
121	int flags;
122} arm_tmr_irq_defs[] = {
123	{
124		.idx = GT_PHYS_SECURE,
125		.name = "sec-phys",
126		.flags = RF_ACTIVE | RF_OPTIONAL,
127	},
128	{
129		.idx = GT_PHYS_NONSECURE,
130		.name = "phys",
131		.flags = RF_ACTIVE,
132	},
133	{
134		.idx = GT_VIRT,
135		.name = "virt",
136		.flags = RF_ACTIVE,
137	},
138	{
139		.idx = GT_HYP_PHYS,
140		.name = "hyp-phys",
141		.flags = RF_ACTIVE | RF_OPTIONAL,
142	},
143	{
144		.idx = GT_HYP_VIRT,
145		.name = "hyp-virt",
146		.flags = RF_ACTIVE | RF_OPTIONAL,
147	},
148};
149
150static int arm_tmr_attach(device_t);
151
152static uint32_t arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
153    struct timecounter *tc);
154static void arm_tmr_do_delay(int usec, void *);
155
156static timecounter_get_t arm_tmr_get_timecount;
157
158static struct timecounter arm_tmr_timecount = {
159	.tc_name           = "ARM MPCore Timecounter",
160	.tc_get_timecount  = arm_tmr_get_timecount,
161	.tc_poll_pps       = NULL,
162	.tc_counter_mask   = ~0u,
163	.tc_frequency      = 0,
164	.tc_quality        = 1000,
165	.tc_fill_vdso_timehands = arm_tmr_fill_vdso_timehands,
166};
167
168#ifdef __arm__
169#define	get_el0(x)	cp15_## x ##_get()
170#define	get_el1(x)	cp15_## x ##_get()
171#define	set_el0(x, val)	cp15_## x ##_set(val)
172#define	set_el1(x, val)	cp15_## x ##_set(val)
173#define	HAS_PHYS	true
174#define	IN_VHE		false
175#else /* __aarch64__ */
176#define	get_el0(x)	READ_SPECIALREG(x ##_el0)
177#define	get_el1(x)	READ_SPECIALREG(x ##_el1)
178#define	set_el0(x, val)	WRITE_SPECIALREG(x ##_el0, val)
179#define	set_el1(x, val)	WRITE_SPECIALREG(x ##_el1, val)
180#define	HAS_PHYS	has_hyp()
181#define	IN_VHE		in_vhe()
182#endif
183
184static int
185get_freq(void)
186{
187	return (get_el0(cntfrq));
188}
189
190static uint64_t
191get_cntxct_a64_unstable(bool physical)
192{
193	uint64_t val
194;
195	isb();
196	if (physical) {
197		do {
198			val = get_el0(cntpct);
199		}
200		while (((val + 1) & 0x7FF) <= 1);
201	}
202	else {
203		do {
204			val = get_el0(cntvct);
205		}
206		while (((val + 1) & 0x7FF) <= 1);
207	}
208
209	return (val);
210}
211
212static uint64_t
213get_cntxct(bool physical)
214{
215	uint64_t val;
216
217	isb();
218	if (physical)
219		val = get_el0(cntpct);
220	else
221		val = get_el0(cntvct);
222
223	return (val);
224}
225
226static int
227set_ctrl(uint32_t val, bool physical)
228{
229
230	if (physical)
231		set_el0(cntp_ctl, val);
232	else
233		set_el0(cntv_ctl, val);
234	isb();
235
236	return (0);
237}
238
239static int
240set_tval(uint32_t val, bool physical)
241{
242
243	if (physical)
244		set_el0(cntp_tval, val);
245	else
246		set_el0(cntv_tval, val);
247	isb();
248
249	return (0);
250}
251
252static int
253get_ctrl(bool physical)
254{
255	uint32_t val;
256
257	if (physical)
258		val = get_el0(cntp_ctl);
259	else
260		val = get_el0(cntv_ctl);
261
262	return (val);
263}
264
265static void
266setup_user_access(void *arg __unused)
267{
268	uint32_t cntkctl;
269
270	cntkctl = get_el1(cntkctl);
271	cntkctl &= ~(GT_CNTKCTL_PL0PTEN | GT_CNTKCTL_PL0VTEN |
272	    GT_CNTKCTL_EVNTEN | GT_CNTKCTL_PL0PCTEN);
273	/* Always enable the virtual timer */
274	cntkctl |= GT_CNTKCTL_PL0VCTEN;
275	/* Enable the physical timer if supported */
276	if (arm_tmr_sc->physical_user) {
277		cntkctl |= GT_CNTKCTL_PL0PCTEN;
278	}
279	set_el1(cntkctl, cntkctl);
280	isb();
281}
282
283#ifdef __aarch64__
284static int
285cntpct_handler(vm_offset_t va, uint32_t insn, struct trapframe *frame,
286    uint32_t esr)
287{
288	uint64_t val;
289	int reg;
290
291	if ((insn & MRS_MASK) != MRS_VALUE)
292		return (0);
293
294	if (MRS_SPECIAL(insn) != MRS_SPECIAL(CNTPCT_EL0))
295		return (0);
296
297	reg = MRS_REGISTER(insn);
298	val = READ_SPECIALREG(cntvct_el0);
299	if (reg < nitems(frame->tf_x)) {
300		frame->tf_x[reg] = val;
301	} else if (reg == 30) {
302		frame->tf_lr = val;
303	}
304
305	/*
306	 * We will handle this instruction, move to the next so we
307	 * don't trap here again.
308	 */
309	frame->tf_elr += INSN_SIZE;
310
311	return (1);
312}
313#endif
314
315static void
316tmr_setup_user_access(void *arg __unused)
317{
318#ifdef __aarch64__
319	int emulate;
320#endif
321
322	if (arm_tmr_sc != NULL) {
323		smp_rendezvous(NULL, setup_user_access, NULL, NULL);
324#ifdef __aarch64__
325		if (TUNABLE_INT_FETCH("hw.emulate_phys_counter", &emulate) &&
326		    emulate != 0) {
327			install_undef_handler(true, cntpct_handler);
328		}
329#endif
330	}
331}
332SYSINIT(tmr_ua, SI_SUB_SMP, SI_ORDER_ANY, tmr_setup_user_access, NULL);
333
334static unsigned
335arm_tmr_get_timecount(struct timecounter *tc)
336{
337
338	return (arm_tmr_sc->get_cntxct(arm_tmr_sc->physical_sys));
339}
340
341static int
342arm_tmr_start(struct eventtimer *et, sbintime_t first,
343    sbintime_t period __unused)
344{
345	struct arm_tmr_softc *sc;
346	int counts, ctrl;
347
348	sc = (struct arm_tmr_softc *)et->et_priv;
349
350	if (first != 0) {
351		counts = ((uint32_t)et->et_frequency * first) >> 32;
352		ctrl = get_ctrl(sc->physical_sys);
353		ctrl &= ~GT_CTRL_INT_MASK;
354		ctrl |= GT_CTRL_ENABLE;
355		set_tval(counts, sc->physical_sys);
356		set_ctrl(ctrl, sc->physical_sys);
357		return (0);
358	}
359
360	return (EINVAL);
361
362}
363
364static void
365arm_tmr_disable(bool physical)
366{
367	int ctrl;
368
369	ctrl = get_ctrl(physical);
370	ctrl &= ~GT_CTRL_ENABLE;
371	set_ctrl(ctrl, physical);
372}
373
374static int
375arm_tmr_stop(struct eventtimer *et)
376{
377	struct arm_tmr_softc *sc;
378
379	sc = (struct arm_tmr_softc *)et->et_priv;
380	arm_tmr_disable(sc->physical_sys);
381
382	return (0);
383}
384
385static int
386arm_tmr_intr(void *arg)
387{
388	struct arm_tmr_softc *sc;
389	int ctrl;
390
391	sc = (struct arm_tmr_softc *)arg;
392	ctrl = get_ctrl(sc->physical_sys);
393	if (ctrl & GT_CTRL_INT_STAT) {
394		ctrl |= GT_CTRL_INT_MASK;
395		set_ctrl(ctrl, sc->physical_sys);
396	}
397
398	if (sc->et.et_active)
399		sc->et.et_event_cb(&sc->et, sc->et.et_arg);
400
401	return (FILTER_HANDLED);
402}
403
404static int
405arm_tmr_attach_irq(device_t dev, struct arm_tmr_softc *sc,
406    const struct arm_tmr_irq_defs *irq_def, int rid, int flags)
407{
408	struct arm_tmr_irq *irq;
409
410	irq = &sc->irqs[sc->irq_count];
411	irq->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
412	    &rid, flags);
413	if (irq->res == NULL) {
414		if (bootverbose || (flags & RF_OPTIONAL) == 0) {
415			device_printf(dev,
416			    "could not allocate irq for %s interrupt '%s'\n",
417			    (flags & RF_OPTIONAL) != 0 ? "optional" :
418			    "required", irq_def->name);
419		}
420
421		if ((flags & RF_OPTIONAL) == 0)
422			return (ENXIO);
423	} else {
424		if (bootverbose)
425			device_printf(dev, "allocated irq for '%s'\n",
426			    irq_def->name);
427		irq->rid = rid;
428		irq->idx = irq_def->idx;
429		sc->irq_count++;
430	}
431
432	return (0);
433}
434
435#ifdef FDT
436static int
437arm_tmr_fdt_probe(device_t dev)
438{
439
440	if (!ofw_bus_status_okay(dev))
441		return (ENXIO);
442
443	if (ofw_bus_is_compatible(dev, "arm,armv8-timer")) {
444		device_set_desc(dev, "ARMv8 Generic Timer");
445		return (BUS_PROBE_DEFAULT);
446	} else if (ofw_bus_is_compatible(dev, "arm,armv7-timer")) {
447		device_set_desc(dev, "ARMv7 Generic Timer");
448		return (BUS_PROBE_DEFAULT);
449	}
450
451	return (ENXIO);
452}
453
454static int
455arm_tmr_fdt_attach(device_t dev)
456{
457	struct arm_tmr_softc *sc;
458	const struct arm_tmr_irq_defs *irq_def;
459	size_t i;
460	phandle_t node;
461	int error, rid;
462	bool has_names;
463
464	sc = device_get_softc(dev);
465	node = ofw_bus_get_node(dev);
466
467	has_names = OF_hasprop(node, "interrupt-names");
468	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
469		int flags;
470
471		/*
472		 * If we don't have names to go off of, we assume that they're
473		 * in the "usual" order with sec-phys first and allocate by idx.
474		 */
475		irq_def = &arm_tmr_irq_defs[i];
476		rid = irq_def->idx;
477		flags = irq_def->flags;
478		if (has_names) {
479			error = ofw_bus_find_string_index(node,
480			    "interrupt-names", irq_def->name, &rid);
481
482			/*
483			 * If we have names, missing a name means we don't
484			 * have it.
485			 */
486			if (error != 0) {
487				/*
488				 * Could be noisy on a lot of platforms for no
489				 * good cause.
490				 */
491				if (bootverbose || (flags & RF_OPTIONAL) == 0) {
492					device_printf(dev,
493					    "could not find irq for %s interrupt '%s'\n",
494					    (flags & RF_OPTIONAL) != 0 ?
495					    "optional" : "required",
496					    irq_def->name);
497				}
498
499				if ((flags & RF_OPTIONAL) == 0)
500					goto out;
501
502				continue;
503			}
504
505			/*
506			 * Warn about failing to activate if we did actually
507			 * have the name present.
508			 */
509			flags &= ~RF_OPTIONAL;
510		}
511
512		error = arm_tmr_attach_irq(dev, sc, irq_def, rid, flags);
513		if (error != 0)
514			goto out;
515	}
516
517	error = arm_tmr_attach(dev);
518out:
519	if (error != 0) {
520		for (i = 0; i < sc->irq_count; i++) {
521			bus_release_resource(dev, SYS_RES_IRQ, sc->irqs[i].rid,
522			    sc->irqs[i].res);
523		}
524	}
525
526	return (error);
527
528}
529#endif
530
531#ifdef DEV_ACPI
532static void
533arm_tmr_acpi_add_irq(device_t parent, device_t dev, int rid, u_int irq)
534{
535
536	BUS_SET_RESOURCE(parent, dev, SYS_RES_IRQ, rid, irq, 1);
537}
538
539static void
540arm_tmr_acpi_identify(driver_t *driver, device_t parent)
541{
542	ACPI_TABLE_GTDT *gtdt;
543	vm_paddr_t physaddr;
544	device_t dev;
545
546	physaddr = acpi_find_table(ACPI_SIG_GTDT);
547	if (physaddr == 0)
548		return;
549
550	gtdt = acpi_map_table(physaddr, ACPI_SIG_GTDT);
551	if (gtdt == NULL) {
552		device_printf(parent, "gic: Unable to map the GTDT\n");
553		return;
554	}
555
556	dev = BUS_ADD_CHILD(parent, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE,
557	    "generic_timer", -1);
558	if (dev == NULL) {
559		device_printf(parent, "add gic child failed\n");
560		goto out;
561	}
562
563	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_SECURE,
564	    gtdt->SecureEl1Interrupt);
565	arm_tmr_acpi_add_irq(parent, dev, GT_PHYS_NONSECURE,
566	    gtdt->NonSecureEl1Interrupt);
567	arm_tmr_acpi_add_irq(parent, dev, GT_VIRT,
568	    gtdt->VirtualTimerInterrupt);
569	arm_tmr_acpi_add_irq(parent, dev, GT_HYP_PHYS,
570	    gtdt->NonSecureEl2Interrupt);
571
572out:
573	acpi_unmap_table(gtdt);
574}
575
576static int
577arm_tmr_acpi_probe(device_t dev)
578{
579
580	device_set_desc(dev, "ARM Generic Timer");
581	return (BUS_PROBE_NOWILDCARD);
582}
583
584static int
585arm_tmr_acpi_attach(device_t dev)
586{
587	const struct arm_tmr_irq_defs *irq_def;
588	struct arm_tmr_softc *sc;
589	int error;
590
591	sc = device_get_softc(dev);
592	for (int i = 0; i < nitems(arm_tmr_irq_defs); i++) {
593		irq_def = &arm_tmr_irq_defs[i];
594		error = arm_tmr_attach_irq(dev, sc, irq_def, irq_def->idx,
595		    irq_def->flags);
596		if (error != 0)
597			goto out;
598	}
599
600	error = arm_tmr_attach(dev);
601out:
602	if (error != 0) {
603		for (int i = 0; i < sc->irq_count; i++) {
604			bus_release_resource(dev, SYS_RES_IRQ,
605			    sc->irqs[i].rid, sc->irqs[i].res);
606		}
607	}
608	return (error);
609}
610#endif
611
612static int
613arm_tmr_attach(device_t dev)
614{
615	struct arm_tmr_softc *sc;
616#ifdef INVARIANTS
617	const struct arm_tmr_irq_defs *irq_def;
618#endif
619#ifdef FDT
620	phandle_t node;
621	pcell_t clock;
622#endif
623#ifdef __aarch64__
624	int user_phys;
625#endif
626	int error;
627	int i, first_timer, last_timer;
628
629	sc = device_get_softc(dev);
630	if (arm_tmr_sc)
631		return (ENXIO);
632
633	sc->get_cntxct = &get_cntxct;
634#ifdef FDT
635	/* Get the base clock frequency */
636	node = ofw_bus_get_node(dev);
637	if (node > 0) {
638		error = OF_getencprop(node, "clock-frequency", &clock,
639		    sizeof(clock));
640		if (error > 0)
641			sc->clkfreq = clock;
642
643		if (OF_hasprop(node, "allwinner,sun50i-a64-unstable-timer")) {
644			sc->get_cntxct = &get_cntxct_a64_unstable;
645			if (bootverbose)
646				device_printf(dev,
647				    "Enabling allwinner unstable timer workaround\n");
648		}
649	}
650#endif
651
652	if (sc->clkfreq == 0) {
653		/* Try to get clock frequency from timer */
654		sc->clkfreq = get_freq();
655	}
656
657	if (sc->clkfreq == 0) {
658		device_printf(dev, "No clock frequency specified\n");
659		return (ENXIO);
660	}
661
662#ifdef INVARIANTS
663	/* Confirm that non-optional irqs were allocated before coming in. */
664	for (i = 0; i < nitems(arm_tmr_irq_defs); i++) {
665		int j;
666
667		irq_def = &arm_tmr_irq_defs[i];
668
669		/* Skip optional interrupts */
670		if ((irq_def->flags & RF_OPTIONAL) != 0)
671			continue;
672
673		for (j = 0; j < sc->irq_count; j++) {
674			if (sc->irqs[j].idx == irq_def->idx)
675				break;
676		}
677		KASSERT(j < sc->irq_count, ("%s: Missing required interrupt %s",
678		    __func__, irq_def->name));
679	}
680#endif
681
682#ifdef __aarch64__
683	if (IN_VHE) {
684		/*
685		 * The kernel is running at EL2. The EL0 timer registers are
686		 * re-mapped to the EL2 version. Because of this we need to
687		 * use the EL2 interrupt.
688		 */
689		sc->physical_sys = true;
690		first_timer = GT_HYP_PHYS;
691		last_timer = GT_HYP_PHYS;
692	} else if (!HAS_PHYS) {
693		/*
694		 * Use the virtual timer when we can't use the hypervisor.
695		 * A hypervisor guest may change the virtual timer registers
696		 * while executing so any use of the virtual timer interrupt
697		 * needs to be coordinated with the virtual machine manager.
698		 */
699		sc->physical_sys = false;
700		first_timer = GT_VIRT;
701		last_timer = GT_VIRT;
702	} else
703#endif
704	/* Otherwise set up the secure and non-secure physical timers. */
705	{
706		sc->physical_sys = true;
707		first_timer = GT_PHYS_SECURE;
708		last_timer = GT_PHYS_NONSECURE;
709	}
710
711#ifdef __aarch64__
712	/*
713	 * The virtual timer is always available on arm and arm64, tell
714	 * userspace to use it.
715	 */
716	sc->physical_user = false;
717	/* Allow use of the physical counter in userspace when available */
718	if (TUNABLE_INT_FETCH("hw.userspace_allow_phys_counter", &user_phys) &&
719	    user_phys != 0)
720		sc->physical_user = sc->physical_sys;
721#else
722	/*
723	 * The virtual timer depends on setting cntvoff from the hypervisor
724	 * privilege level/el2, however this is only set on arm64.
725	 */
726	sc->physical_user = true;
727#endif
728
729	arm_tmr_sc = sc;
730
731	/* Setup secure, non-secure and virtual IRQs handler */
732	for (i = 0; i < sc->irq_count; i++) {
733		/* Only enable IRQs on timers we expect to use */
734		if (sc->irqs[i].idx < first_timer ||
735		    sc->irqs[i].idx > last_timer)
736			continue;
737		error = bus_setup_intr(dev, sc->irqs[i].res, INTR_TYPE_CLK,
738		    arm_tmr_intr, NULL, sc, &sc->irqs[i].ihl);
739		if (error) {
740			device_printf(dev, "Unable to alloc int resource.\n");
741			for (int j = 0; j < i; j++)
742				bus_teardown_intr(dev, sc->irqs[j].res,
743				    &sc->irqs[j].ihl);
744			return (ENXIO);
745		}
746	}
747
748	/* Disable the timers until we are ready */
749	arm_tmr_disable(false);
750	if (HAS_PHYS)
751		arm_tmr_disable(true);
752
753	arm_tmr_timecount.tc_frequency = sc->clkfreq;
754	tc_init(&arm_tmr_timecount);
755
756	sc->et.et_name = "ARM MPCore Eventtimer";
757	sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU;
758	sc->et.et_quality = 1000;
759
760	sc->et.et_frequency = sc->clkfreq;
761	sc->et.et_min_period = (0x00000010LLU << 32) / sc->et.et_frequency;
762	sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency;
763	sc->et.et_start = arm_tmr_start;
764	sc->et.et_stop = arm_tmr_stop;
765	sc->et.et_priv = sc;
766	et_register(&sc->et);
767
768#if defined(__arm__)
769	arm_set_delay(arm_tmr_do_delay, sc);
770#endif
771
772	return (0);
773}
774
775#ifdef FDT
776static device_method_t arm_tmr_fdt_methods[] = {
777	DEVMETHOD(device_probe,		arm_tmr_fdt_probe),
778	DEVMETHOD(device_attach,	arm_tmr_fdt_attach),
779	{ 0, 0 }
780};
781
782static DEFINE_CLASS_0(generic_timer, arm_tmr_fdt_driver, arm_tmr_fdt_methods,
783    sizeof(struct arm_tmr_softc));
784
785EARLY_DRIVER_MODULE(timer, simplebus, arm_tmr_fdt_driver, 0, 0,
786    BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
787EARLY_DRIVER_MODULE(timer, ofwbus, arm_tmr_fdt_driver, 0, 0,
788    BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
789#endif
790
791#ifdef DEV_ACPI
792static device_method_t arm_tmr_acpi_methods[] = {
793	DEVMETHOD(device_identify,	arm_tmr_acpi_identify),
794	DEVMETHOD(device_probe,		arm_tmr_acpi_probe),
795	DEVMETHOD(device_attach,	arm_tmr_acpi_attach),
796	{ 0, 0 }
797};
798
799static DEFINE_CLASS_0(generic_timer, arm_tmr_acpi_driver, arm_tmr_acpi_methods,
800    sizeof(struct arm_tmr_softc));
801
802EARLY_DRIVER_MODULE(timer, acpi, arm_tmr_acpi_driver, 0, 0,
803    BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE);
804#endif
805
806static void
807arm_tmr_do_delay(int usec, void *arg)
808{
809	struct arm_tmr_softc *sc = arg;
810	int32_t counts, counts_per_usec;
811	uint32_t first, last;
812
813	/* Get the number of times to count */
814	counts_per_usec = ((arm_tmr_timecount.tc_frequency / 1000000) + 1);
815
816	/*
817	 * Clamp the timeout at a maximum value (about 32 seconds with
818	 * a 66MHz clock). *Nobody* should be delay()ing for anywhere
819	 * near that length of time and if they are, they should be hung
820	 * out to dry.
821	 */
822	if (usec >= (0x80000000U / counts_per_usec))
823		counts = (0x80000000U / counts_per_usec) - 1;
824	else
825		counts = usec * counts_per_usec;
826
827	first = sc->get_cntxct(sc->physical_sys);
828
829	while (counts > 0) {
830		last = sc->get_cntxct(sc->physical_sys);
831		counts -= (int32_t)(last - first);
832		first = last;
833	}
834}
835
836#if defined(__aarch64__)
837void
838DELAY(int usec)
839{
840	int32_t counts;
841
842	TSENTER();
843	/*
844	 * Check the timers are setup, if not just
845	 * use a for loop for the meantime
846	 */
847	if (arm_tmr_sc == NULL) {
848		for (; usec > 0; usec--)
849			for (counts = 200; counts > 0; counts--)
850				/*
851				 * Prevent the compiler from optimizing
852				 * out the loop
853				 */
854				cpufunc_nullop();
855	} else
856		arm_tmr_do_delay(usec, arm_tmr_sc);
857	TSEXIT();
858}
859#endif
860
861static uint32_t
862arm_tmr_fill_vdso_timehands(struct vdso_timehands *vdso_th,
863    struct timecounter *tc)
864{
865
866	vdso_th->th_algo = VDSO_TH_ALGO_ARM_GENTIM;
867	vdso_th->th_physical = arm_tmr_sc->physical_user;
868	bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
869	return (1);
870}
871