acpi_hpet.c revision 294883
1/*-
2 * Copyright (c) 2005 Poul-Henning Kamp
3 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD: head/sys/dev/acpica/acpi_hpet.c 294883 2016-01-27 02:23:54Z jhibbits $");
30
31#include "opt_acpi.h"
32#if defined(__amd64__)
33#define	DEV_APIC
34#else
35#include "opt_apic.h"
36#endif
37#include <sys/param.h>
38#include <sys/conf.h>
39#include <sys/bus.h>
40#include <sys/kernel.h>
41#include <sys/module.h>
42#include <sys/proc.h>
43#include <sys/rman.h>
44#include <sys/mman.h>
45#include <sys/time.h>
46#include <sys/smp.h>
47#include <sys/sysctl.h>
48#include <sys/timeet.h>
49#include <sys/timetc.h>
50
51#include <contrib/dev/acpica/include/acpi.h>
52#include <contrib/dev/acpica/include/accommon.h>
53
54#include <dev/acpica/acpivar.h>
55#include <dev/acpica/acpi_hpet.h>
56
57#ifdef DEV_APIC
58#include "pcib_if.h"
59#endif
60
61#define HPET_VENDID_AMD		0x4353
62#define HPET_VENDID_AMD2	0x1022
63#define HPET_VENDID_INTEL	0x8086
64#define HPET_VENDID_NVIDIA	0x10de
65#define HPET_VENDID_SW		0x1166
66
67ACPI_SERIAL_DECL(hpet, "ACPI HPET support");
68
69static devclass_t hpet_devclass;
70
71/* ACPI CA debugging */
72#define _COMPONENT	ACPI_TIMER
73ACPI_MODULE_NAME("HPET")
74
75struct hpet_softc {
76	device_t		dev;
77	int			mem_rid;
78	int			intr_rid;
79	int			irq;
80	int			useirq;
81	int			legacy_route;
82	int			per_cpu;
83	uint32_t		allowed_irqs;
84	struct resource		*mem_res;
85	struct resource		*intr_res;
86	void			*intr_handle;
87	ACPI_HANDLE		handle;
88	uint64_t		freq;
89	uint32_t		caps;
90	struct timecounter	tc;
91	struct hpet_timer {
92		struct eventtimer	et;
93		struct hpet_softc	*sc;
94		int			num;
95		int			mode;
96		int			intr_rid;
97		int			irq;
98		int			pcpu_cpu;
99		int			pcpu_misrouted;
100		int			pcpu_master;
101		int			pcpu_slaves[MAXCPU];
102		struct resource		*intr_res;
103		void			*intr_handle;
104		uint32_t		caps;
105		uint32_t		vectors;
106		uint32_t		div;
107		uint32_t		next;
108		char			name[8];
109	} 			t[32];
110	int			num_timers;
111	struct cdev		*pdev;
112	int			mmap_allow;
113	int			mmap_allow_write;
114};
115
116static d_open_t hpet_open;
117static d_mmap_t hpet_mmap;
118
119static struct cdevsw hpet_cdevsw = {
120	.d_version =	D_VERSION,
121	.d_name =	"hpet",
122	.d_open =	hpet_open,
123	.d_mmap =	hpet_mmap,
124};
125
126static u_int hpet_get_timecount(struct timecounter *tc);
127static void hpet_test(struct hpet_softc *sc);
128
129static char *hpet_ids[] = { "PNP0103", NULL };
130
131/* Knob to disable acpi_hpet device */
132bool acpi_hpet_disabled = false;
133
134static u_int
135hpet_get_timecount(struct timecounter *tc)
136{
137	struct hpet_softc *sc;
138
139	sc = tc->tc_priv;
140	return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER));
141}
142
143static void
144hpet_enable(struct hpet_softc *sc)
145{
146	uint32_t val;
147
148	val = bus_read_4(sc->mem_res, HPET_CONFIG);
149	if (sc->legacy_route)
150		val |= HPET_CNF_LEG_RT;
151	else
152		val &= ~HPET_CNF_LEG_RT;
153	val |= HPET_CNF_ENABLE;
154	bus_write_4(sc->mem_res, HPET_CONFIG, val);
155}
156
157static void
158hpet_disable(struct hpet_softc *sc)
159{
160	uint32_t val;
161
162	val = bus_read_4(sc->mem_res, HPET_CONFIG);
163	val &= ~HPET_CNF_ENABLE;
164	bus_write_4(sc->mem_res, HPET_CONFIG, val);
165}
166
167static int
168hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
169{
170	struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
171	struct hpet_timer *t;
172	struct hpet_softc *sc = mt->sc;
173	uint32_t fdiv, now;
174
175	t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
176	if (period != 0) {
177		t->mode = 1;
178		t->div = (sc->freq * period) >> 32;
179	} else {
180		t->mode = 2;
181		t->div = 0;
182	}
183	if (first != 0)
184		fdiv = (sc->freq * first) >> 32;
185	else
186		fdiv = t->div;
187	if (t->irq < 0)
188		bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
189	t->caps |= HPET_TCNF_INT_ENB;
190	now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
191restart:
192	t->next = now + fdiv;
193	if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
194		t->caps |= HPET_TCNF_TYPE;
195		bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
196		    t->caps | HPET_TCNF_VAL_SET);
197		bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
198		    t->next);
199		bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
200		    t->div);
201	} else {
202		t->caps &= ~HPET_TCNF_TYPE;
203		bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
204		    t->caps);
205		bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
206		    t->next);
207	}
208	now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
209	if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) {
210		fdiv *= 2;
211		goto restart;
212	}
213	return (0);
214}
215
216static int
217hpet_stop(struct eventtimer *et)
218{
219	struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
220	struct hpet_timer *t;
221	struct hpet_softc *sc = mt->sc;
222
223	t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
224	t->mode = 0;
225	t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE);
226	bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
227	return (0);
228}
229
230static int
231hpet_intr_single(void *arg)
232{
233	struct hpet_timer *t = (struct hpet_timer *)arg;
234	struct hpet_timer *mt;
235	struct hpet_softc *sc = t->sc;
236	uint32_t now;
237
238	if (t->mode == 0)
239		return (FILTER_STRAY);
240	/* Check that per-CPU timer interrupt reached right CPU. */
241	if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) {
242		if ((++t->pcpu_misrouted) % 32 == 0) {
243			printf("HPET interrupt routed to the wrong CPU"
244			    " (timer %d CPU %d -> %d)!\n",
245			    t->num, t->pcpu_cpu, curcpu);
246		}
247
248		/*
249		 * Reload timer, hoping that next time may be more lucky
250		 * (system will manage proper interrupt binding).
251		 */
252		if ((t->mode == 1 && (t->caps & HPET_TCAP_PER_INT) == 0) ||
253		    t->mode == 2) {
254			t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) +
255			    sc->freq / 8;
256			bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
257			    t->next);
258		}
259		return (FILTER_HANDLED);
260	}
261	if (t->mode == 1 &&
262	    (t->caps & HPET_TCAP_PER_INT) == 0) {
263		t->next += t->div;
264		now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
265		if ((int32_t)((now + t->div / 2) - t->next) > 0)
266			t->next = now + t->div / 2;
267		bus_write_4(sc->mem_res,
268		    HPET_TIMER_COMPARATOR(t->num), t->next);
269	} else if (t->mode == 2)
270		t->mode = 0;
271	mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master];
272	if (mt->et.et_active)
273		mt->et.et_event_cb(&mt->et, mt->et.et_arg);
274	return (FILTER_HANDLED);
275}
276
277static int
278hpet_intr(void *arg)
279{
280	struct hpet_softc *sc = (struct hpet_softc *)arg;
281	int i;
282	uint32_t val;
283
284	val = bus_read_4(sc->mem_res, HPET_ISR);
285	if (val) {
286		bus_write_4(sc->mem_res, HPET_ISR, val);
287		val &= sc->useirq;
288		for (i = 0; i < sc->num_timers; i++) {
289			if ((val & (1 << i)) == 0)
290				continue;
291			hpet_intr_single(&sc->t[i]);
292		}
293		return (FILTER_HANDLED);
294	}
295	return (FILTER_STRAY);
296}
297
298static ACPI_STATUS
299hpet_find(ACPI_HANDLE handle, UINT32 level, void *context,
300    void **status)
301{
302	char 		**ids;
303	uint32_t	id = (uint32_t)(uintptr_t)context;
304	uint32_t	uid = 0;
305
306	for (ids = hpet_ids; *ids != NULL; ids++) {
307		if (acpi_MatchHid(handle, *ids))
308		        break;
309	}
310	if (*ids == NULL)
311		return (AE_OK);
312	if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) ||
313	    id == uid)
314		*status = acpi_get_device(handle);
315	return (AE_OK);
316}
317
318/*
319 * Find an existing IRQ resource that matches the requested IRQ range
320 * and return its RID.  If one is not found, use a new RID.
321 */
322static int
323hpet_find_irq_rid(device_t dev, u_long start, u_long end)
324{
325	rman_res_t irq;
326	int error, rid;
327
328	for (rid = 0;; rid++) {
329		error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL);
330		if (error != 0 || (start <= irq && irq <= end))
331			return (rid);
332	}
333}
334
335static int
336hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
337{
338	struct hpet_softc *sc;
339
340	sc = cdev->si_drv1;
341	if (!sc->mmap_allow)
342		return (EPERM);
343	else
344		return (0);
345}
346
347static int
348hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
349    int nprot, vm_memattr_t *memattr)
350{
351	struct hpet_softc *sc;
352
353	sc = cdev->si_drv1;
354	if (offset > rman_get_size(sc->mem_res))
355		return (EINVAL);
356	if (!sc->mmap_allow_write && (nprot & PROT_WRITE))
357		return (EPERM);
358	*paddr = rman_get_start(sc->mem_res) + offset;
359	*memattr = VM_MEMATTR_UNCACHEABLE;
360
361	return (0);
362}
363
364/* Discover the HPET via the ACPI table of the same name. */
365static void
366hpet_identify(driver_t *driver, device_t parent)
367{
368	ACPI_TABLE_HPET *hpet;
369	ACPI_STATUS	status;
370	device_t	child;
371	int		i;
372
373	/* Only one HPET device can be added. */
374	if (devclass_get_device(hpet_devclass, 0))
375		return;
376	for (i = 1; ; i++) {
377		/* Search for HPET table. */
378		status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet);
379		if (ACPI_FAILURE(status))
380			return;
381		/* Search for HPET device with same ID. */
382		child = NULL;
383		AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
384		    100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence,
385		    (void *)&child);
386		/* If found - let it be probed in normal way. */
387		if (child) {
388			if (bus_get_resource(child, SYS_RES_MEMORY, 0,
389			    NULL, NULL) != 0)
390				bus_set_resource(child, SYS_RES_MEMORY, 0,
391				    hpet->Address.Address, HPET_MEM_WIDTH);
392			continue;
393		}
394		/* If not - create it from table info. */
395		child = BUS_ADD_CHILD(parent, 2, "hpet", 0);
396		if (child == NULL) {
397			printf("%s: can't add child\n", __func__);
398			continue;
399		}
400		bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address,
401		    HPET_MEM_WIDTH);
402	}
403}
404
405static int
406hpet_probe(device_t dev)
407{
408	ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
409
410	if (acpi_disabled("hpet") || acpi_hpet_disabled)
411		return (ENXIO);
412	if (acpi_get_handle(dev) != NULL &&
413	    ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids) == NULL)
414		return (ENXIO);
415
416	device_set_desc(dev, "High Precision Event Timer");
417	return (0);
418}
419
420static int
421hpet_attach(device_t dev)
422{
423	struct hpet_softc *sc;
424	struct hpet_timer *t;
425	int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu;
426	int pcpu_master;
427	static int maxhpetet = 0;
428	uint32_t val, val2, cvectors, dvectors;
429	uint16_t vendor, rev;
430
431	ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
432
433	sc = device_get_softc(dev);
434	sc->dev = dev;
435	sc->handle = acpi_get_handle(dev);
436
437	sc->mem_rid = 0;
438	sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
439	    RF_ACTIVE);
440	if (sc->mem_res == NULL)
441		return (ENOMEM);
442
443	/* Validate that we can access the whole region. */
444	if (rman_get_size(sc->mem_res) < HPET_MEM_WIDTH) {
445		device_printf(dev, "memory region width %ld too small\n",
446		    rman_get_size(sc->mem_res));
447		bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
448		return (ENXIO);
449	}
450
451	/* Be sure timer is enabled. */
452	hpet_enable(sc);
453
454	/* Read basic statistics about the timer. */
455	val = bus_read_4(sc->mem_res, HPET_PERIOD);
456	if (val == 0) {
457		device_printf(dev, "invalid period\n");
458		hpet_disable(sc);
459		bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
460		return (ENXIO);
461	}
462
463	sc->freq = (1000000000000000LL + val / 2) / val;
464	sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES);
465	vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16;
466	rev = sc->caps & HPET_CAP_REV_ID;
467	num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8);
468	/*
469	 * ATI/AMD violates IA-PC HPET (High Precision Event Timers)
470	 * Specification and provides an off by one number
471	 * of timers/comparators.
472	 * Additionally, they use unregistered value in VENDOR_ID field.
473	 */
474	if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0)
475		num_timers--;
476	sc->num_timers = num_timers;
477	if (bootverbose) {
478		device_printf(dev,
479		    "vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n",
480		    vendor, rev, sc->freq,
481		    (sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "",
482		    num_timers,
483		    (sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : "");
484	}
485	for (i = 0; i < num_timers; i++) {
486		t = &sc->t[i];
487		t->sc = sc;
488		t->num = i;
489		t->mode = 0;
490		t->intr_rid = -1;
491		t->irq = -1;
492		t->pcpu_cpu = -1;
493		t->pcpu_misrouted = 0;
494		t->pcpu_master = -1;
495		t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i));
496		t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4);
497		if (bootverbose) {
498			device_printf(dev,
499			    " t%d: irqs 0x%08x (%d)%s%s%s\n", i,
500			    t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9,
501			    (t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "",
502			    (t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "",
503			    (t->caps & HPET_TCAP_PER_INT) ? ", periodic" : "");
504		}
505	}
506	if (testenv("debug.acpi.hpet_test"))
507		hpet_test(sc);
508	/*
509	 * Don't attach if the timer never increments.  Since the spec
510	 * requires it to be at least 10 MHz, it has to change in 1 us.
511	 */
512	val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
513	DELAY(1);
514	val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
515	if (val == val2) {
516		device_printf(dev, "HPET never increments, disabling\n");
517		hpet_disable(sc);
518		bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
519		return (ENXIO);
520	}
521	/* Announce first HPET as timecounter. */
522	if (device_get_unit(dev) == 0) {
523		sc->tc.tc_get_timecount = hpet_get_timecount,
524		sc->tc.tc_counter_mask = ~0u,
525		sc->tc.tc_name = "HPET",
526		sc->tc.tc_quality = 950,
527		sc->tc.tc_frequency = sc->freq;
528		sc->tc.tc_priv = sc;
529		tc_init(&sc->tc);
530	}
531	/* If not disabled - setup and announce event timers. */
532	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
533	     "clock", &i) == 0 && i == 0)
534	        return (0);
535
536	/* Check whether we can and want legacy routing. */
537	sc->legacy_route = 0;
538	resource_int_value(device_get_name(dev), device_get_unit(dev),
539	     "legacy_route", &sc->legacy_route);
540	if ((sc->caps & HPET_CAP_LEG_RT) == 0)
541		sc->legacy_route = 0;
542	if (sc->legacy_route) {
543		sc->t[0].vectors = 0;
544		sc->t[1].vectors = 0;
545	}
546
547	/* Check what IRQs we want use. */
548	/* By default allow any PCI IRQs. */
549	sc->allowed_irqs = 0xffff0000;
550	/*
551	 * HPETs in AMD chipsets before SB800 have problems with IRQs >= 16
552	 * Lower are also not always working for different reasons.
553	 * SB800 fixed it, but seems do not implements level triggering
554	 * properly, that makes it very unreliable - it freezes after any
555	 * interrupt loss. Avoid legacy IRQs for AMD.
556	 */
557	if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2)
558		sc->allowed_irqs = 0x00000000;
559	/*
560	 * NVidia MCP5x chipsets have number of unexplained interrupt
561	 * problems. For some reason, using HPET interrupts breaks HDA sound.
562	 */
563	if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01)
564		sc->allowed_irqs = 0x00000000;
565	/*
566	 * ServerWorks HT1000 reported to have problems with IRQs >= 16.
567	 * Lower IRQs are working, but allowed mask is not set correctly.
568	 * Legacy_route mode works fine.
569	 */
570	if (vendor == HPET_VENDID_SW && rev <= 0x01)
571		sc->allowed_irqs = 0x00000000;
572	/*
573	 * Neither QEMU nor VirtualBox report supported IRQs correctly.
574	 * The only way to use HPET there is to specify IRQs manually
575	 * and/or use legacy_route. Legacy_route mode works on both.
576	 */
577	if (vm_guest)
578		sc->allowed_irqs = 0x00000000;
579	/* Let user override. */
580	resource_int_value(device_get_name(dev), device_get_unit(dev),
581	     "allowed_irqs", &sc->allowed_irqs);
582
583	/* Get how much per-CPU timers we should try to provide. */
584	sc->per_cpu = 1;
585	resource_int_value(device_get_name(dev), device_get_unit(dev),
586	     "per_cpu", &sc->per_cpu);
587
588	num_msi = 0;
589	sc->useirq = 0;
590	/* Find IRQ vectors for all timers. */
591	cvectors = sc->allowed_irqs & 0xffff0000;
592	dvectors = sc->allowed_irqs & 0x0000ffff;
593	if (sc->legacy_route)
594		dvectors &= 0x0000fefe;
595	for (i = 0; i < num_timers; i++) {
596		t = &sc->t[i];
597		if (sc->legacy_route && i < 2)
598			t->irq = (i == 0) ? 0 : 8;
599#ifdef DEV_APIC
600		else if (t->caps & HPET_TCAP_FSB_INT_DEL) {
601			if ((j = PCIB_ALLOC_MSIX(
602			    device_get_parent(device_get_parent(dev)), dev,
603			    &t->irq))) {
604				device_printf(dev,
605				    "Can't allocate interrupt for t%d: %d\n",
606				    i, j);
607			}
608		}
609#endif
610		else if (dvectors & t->vectors) {
611			t->irq = ffs(dvectors & t->vectors) - 1;
612			dvectors &= ~(1 << t->irq);
613		}
614		if (t->irq >= 0) {
615			t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq);
616			t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
617			    &t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE);
618			if (t->intr_res == NULL) {
619				t->irq = -1;
620				device_printf(dev,
621				    "Can't map interrupt for t%d.\n", i);
622			} else if (bus_setup_intr(dev, t->intr_res,
623			    INTR_TYPE_CLK, hpet_intr_single, NULL, t,
624			    &t->intr_handle) != 0) {
625				t->irq = -1;
626				device_printf(dev,
627				    "Can't setup interrupt for t%d.\n", i);
628			} else {
629				bus_describe_intr(dev, t->intr_res,
630				    t->intr_handle, "t%d", i);
631				num_msi++;
632			}
633		}
634		if (t->irq < 0 && (cvectors & t->vectors) != 0) {
635			cvectors &= t->vectors;
636			sc->useirq |= (1 << i);
637		}
638	}
639	if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0)
640		sc->legacy_route = 0;
641	if (sc->legacy_route)
642		hpet_enable(sc);
643	/* Group timers for per-CPU operation. */
644	num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu);
645	num_percpu_t = num_percpu_et * mp_ncpus;
646	pcpu_master = 0;
647	cur_cpu = CPU_FIRST();
648	for (i = 0; i < num_timers; i++) {
649		t = &sc->t[i];
650		if (t->irq >= 0 && num_percpu_t > 0) {
651			if (cur_cpu == CPU_FIRST())
652				pcpu_master = i;
653			t->pcpu_cpu = cur_cpu;
654			t->pcpu_master = pcpu_master;
655			sc->t[pcpu_master].
656			    pcpu_slaves[cur_cpu] = i;
657			bus_bind_intr(dev, t->intr_res, cur_cpu);
658			cur_cpu = CPU_NEXT(cur_cpu);
659			num_percpu_t--;
660		} else if (t->irq >= 0)
661			bus_bind_intr(dev, t->intr_res, CPU_FIRST());
662	}
663	bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff);
664	sc->irq = -1;
665	/* If at least one timer needs legacy IRQ - set it up. */
666	if (sc->useirq) {
667		j = i = fls(cvectors) - 1;
668		while (j > 0 && (cvectors & (1 << (j - 1))) != 0)
669			j--;
670		sc->intr_rid = hpet_find_irq_rid(dev, j, i);
671		sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
672		    &sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE);
673		if (sc->intr_res == NULL)
674			device_printf(dev, "Can't map interrupt.\n");
675		else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
676		    hpet_intr, NULL, sc, &sc->intr_handle) != 0) {
677			device_printf(dev, "Can't setup interrupt.\n");
678		} else {
679			sc->irq = rman_get_start(sc->intr_res);
680			/* Bind IRQ to BSP to avoid live migration. */
681			bus_bind_intr(dev, sc->intr_res, CPU_FIRST());
682		}
683	}
684	/* Program and announce event timers. */
685	for (i = 0; i < num_timers; i++) {
686		t = &sc->t[i];
687		t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE);
688		t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB);
689		t->caps &= ~(HPET_TCNF_INT_TYPE);
690		t->caps |= HPET_TCNF_32MODE;
691		if (t->irq >= 0 && sc->legacy_route && i < 2) {
692			/* Legacy route doesn't need more configuration. */
693		} else
694#ifdef DEV_APIC
695		if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) {
696			uint64_t addr;
697			uint32_t data;
698
699			if (PCIB_MAP_MSI(
700			    device_get_parent(device_get_parent(dev)), dev,
701			    t->irq, &addr, &data) == 0) {
702				bus_write_4(sc->mem_res,
703				    HPET_TIMER_FSB_ADDR(i), addr);
704				bus_write_4(sc->mem_res,
705				    HPET_TIMER_FSB_VAL(i), data);
706				t->caps |= HPET_TCNF_FSB_EN;
707			} else
708				t->irq = -2;
709		} else
710#endif
711		if (t->irq >= 0)
712			t->caps |= (t->irq << 9);
713		else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq)))
714			t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE;
715		bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps);
716		/* Skip event timers without set up IRQ. */
717		if (t->irq < 0 &&
718		    (sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0))
719			continue;
720		/* Announce the reset. */
721		if (maxhpetet == 0)
722			t->et.et_name = "HPET";
723		else {
724			sprintf(t->name, "HPET%d", maxhpetet);
725			t->et.et_name = t->name;
726		}
727		t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT;
728		t->et.et_quality = 450;
729		if (t->pcpu_master >= 0) {
730			t->et.et_flags |= ET_FLAGS_PERCPU;
731			t->et.et_quality += 100;
732		} else if (mp_ncpus >= 8)
733			t->et.et_quality -= 100;
734		if ((t->caps & HPET_TCAP_PER_INT) == 0)
735			t->et.et_quality -= 10;
736		t->et.et_frequency = sc->freq;
737		t->et.et_min_period =
738		    ((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq;
739		t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq;
740		t->et.et_start = hpet_start;
741		t->et.et_stop = hpet_stop;
742		t->et.et_priv = &sc->t[i];
743		if (t->pcpu_master < 0 || t->pcpu_master == i) {
744			et_register(&t->et);
745			maxhpetet++;
746		}
747	}
748
749	sc->pdev = make_dev(&hpet_cdevsw, 0, UID_ROOT, GID_WHEEL,
750	    0600, "hpet%d", device_get_unit(dev));
751	if (sc->pdev) {
752		sc->pdev->si_drv1 = sc;
753		sc->mmap_allow = 1;
754		TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow",
755		    &sc->mmap_allow);
756		sc->mmap_allow_write = 1;
757		TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write",
758		    &sc->mmap_allow_write);
759		SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
760		    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
761		    OID_AUTO, "mmap_allow",
762		    CTLFLAG_RW, &sc->mmap_allow, 0,
763		    "Allow userland to memory map HPET");
764		SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
765		    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
766		    OID_AUTO, "mmap_allow_write",
767		    CTLFLAG_RW, &sc->mmap_allow_write, 0,
768		    "Allow userland write to the HPET register space");
769	} else
770		device_printf(dev, "could not create /dev/hpet%d\n",
771		    device_get_unit(dev));
772
773	return (0);
774}
775
776static int
777hpet_detach(device_t dev)
778{
779	ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
780
781	/* XXX Without a tc_remove() function, we can't detach. */
782	return (EBUSY);
783}
784
785static int
786hpet_suspend(device_t dev)
787{
788//	struct hpet_softc *sc;
789
790	/*
791	 * Disable the timer during suspend.  The timer will not lose
792	 * its state in S1 or S2, but we are required to disable
793	 * it.
794	 */
795//	sc = device_get_softc(dev);
796//	hpet_disable(sc);
797
798	return (0);
799}
800
801static int
802hpet_resume(device_t dev)
803{
804	struct hpet_softc *sc;
805	struct hpet_timer *t;
806	int i;
807
808	/* Re-enable the timer after a resume to keep the clock advancing. */
809	sc = device_get_softc(dev);
810	hpet_enable(sc);
811	/* Restart event timers that were running on suspend. */
812	for (i = 0; i < sc->num_timers; i++) {
813		t = &sc->t[i];
814#ifdef DEV_APIC
815		if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) {
816			uint64_t addr;
817			uint32_t data;
818
819			if (PCIB_MAP_MSI(
820			    device_get_parent(device_get_parent(dev)), dev,
821			    t->irq, &addr, &data) == 0) {
822				bus_write_4(sc->mem_res,
823				    HPET_TIMER_FSB_ADDR(i), addr);
824				bus_write_4(sc->mem_res,
825				    HPET_TIMER_FSB_VAL(i), data);
826			}
827		}
828#endif
829		if (t->mode == 0)
830			continue;
831		t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
832		if (t->mode == 1 && (t->caps & HPET_TCAP_PER_INT)) {
833			t->caps |= HPET_TCNF_TYPE;
834			t->next += t->div;
835			bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
836			    t->caps | HPET_TCNF_VAL_SET);
837			bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
838			    t->next);
839			bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num));
840			bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
841			    t->div);
842		} else {
843			t->next += sc->freq / 1024;
844			bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
845			    t->next);
846		}
847		bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
848		bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
849	}
850	return (0);
851}
852
853/* Print some basic latency/rate information to assist in debugging. */
854static void
855hpet_test(struct hpet_softc *sc)
856{
857	int i;
858	uint32_t u1, u2;
859	struct bintime b0, b1, b2;
860	struct timespec ts;
861
862	binuptime(&b0);
863	binuptime(&b0);
864	binuptime(&b1);
865	u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
866	for (i = 1; i < 1000; i++)
867		u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
868	binuptime(&b2);
869	u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
870
871	bintime_sub(&b2, &b1);
872	bintime_sub(&b1, &b0);
873	bintime_sub(&b2, &b1);
874	bintime2timespec(&b2, &ts);
875
876	device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n",
877	    (long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1);
878
879	device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000);
880}
881
882#ifdef DEV_APIC
883static int
884hpet_remap_intr(device_t dev, device_t child, u_int irq)
885{
886	struct hpet_softc *sc = device_get_softc(dev);
887	struct hpet_timer *t;
888	uint64_t addr;
889	uint32_t data;
890	int error, i;
891
892	for (i = 0; i < sc->num_timers; i++) {
893		t = &sc->t[i];
894		if (t->irq != irq)
895			continue;
896		error = PCIB_MAP_MSI(
897		    device_get_parent(device_get_parent(dev)), dev,
898		    irq, &addr, &data);
899		if (error)
900			return (error);
901		hpet_disable(sc); /* Stop timer to avoid interrupt loss. */
902		bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr);
903		bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data);
904		hpet_enable(sc);
905		return (0);
906	}
907	return (ENOENT);
908}
909#endif
910
911static device_method_t hpet_methods[] = {
912	/* Device interface */
913	DEVMETHOD(device_identify, hpet_identify),
914	DEVMETHOD(device_probe, hpet_probe),
915	DEVMETHOD(device_attach, hpet_attach),
916	DEVMETHOD(device_detach, hpet_detach),
917	DEVMETHOD(device_suspend, hpet_suspend),
918	DEVMETHOD(device_resume, hpet_resume),
919
920#ifdef DEV_APIC
921	DEVMETHOD(bus_remap_intr, hpet_remap_intr),
922#endif
923
924	DEVMETHOD_END
925};
926
927static driver_t	hpet_driver = {
928	"hpet",
929	hpet_methods,
930	sizeof(struct hpet_softc),
931};
932
933DRIVER_MODULE(hpet, acpi, hpet_driver, hpet_devclass, 0, 0);
934MODULE_DEPEND(hpet, acpi, 1, 1, 1);
935