acpi_cpu_cstate.c revision 1.16
1/* $NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $ */
2
3/*-
4 * Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29#include <sys/cdefs.h>
30__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.16 2010/08/08 18:25:06 jruoho Exp $");
31
32#include <sys/param.h>
33#include <sys/cpu.h>
34#include <sys/device.h>
35#include <sys/kernel.h>
36#include <sys/once.h>
37#include <sys/mutex.h>
38#include <sys/timetc.h>
39
40#include <dev/pci/pcivar.h>
41#include <dev/pci/pcidevs.h>
42
43#include <dev/acpi/acpireg.h>
44#include <dev/acpi/acpivar.h>
45#include <dev/acpi/acpi_cpu.h>
46#include <dev/acpi/acpi_timer.h>
47
48#include <machine/acpi_machdep.h>
49
50#define _COMPONENT	 ACPI_BUS_COMPONENT
51ACPI_MODULE_NAME	 ("acpi_cpu_cstate")
52
53static void		 acpicpu_cstate_attach_print(struct acpicpu_softc *);
54static ACPI_STATUS	 acpicpu_cstate_cst(struct acpicpu_softc *);
55static ACPI_STATUS	 acpicpu_cstate_cst_add(struct acpicpu_softc *,
56						ACPI_OBJECT *);
57static void		 acpicpu_cstate_cst_bios(void);
58static void		 acpicpu_cstate_fadt(struct acpicpu_softc *);
59static void		 acpicpu_cstate_quirks(struct acpicpu_softc *);
60static int		 acpicpu_cstate_quirks_piix4(struct pci_attach_args *);
61static int		 acpicpu_cstate_latency(struct acpicpu_softc *);
62static bool		 acpicpu_cstate_bm_check(void);
63static void		 acpicpu_cstate_idle_enter(struct acpicpu_softc *,int);
64
65extern struct acpicpu_softc **acpicpu_sc;
66
67/*
68 * XXX:	The local APIC timer (as well as TSC) is typically
69 *	stopped in C3. For now, we cannot but disable C3.
70 */
71#ifdef ACPICPU_ENABLE_C3
72static int cs_state_max = ACPI_STATE_C3;
73#else
74static int cs_state_max = ACPI_STATE_C2;
75#endif
76
77void
78acpicpu_cstate_attach(device_t self)
79{
80	struct acpicpu_softc *sc = device_private(self);
81	ACPI_STATUS rv;
82
83	/*
84	 * Either use the preferred _CST or resort to FADT.
85	 */
86	rv = acpicpu_cstate_cst(sc);
87
88	switch (rv) {
89
90	case AE_OK:
91		sc->sc_flags |= ACPICPU_FLAG_C_CST;
92		acpicpu_cstate_cst_bios();
93		break;
94
95	default:
96		sc->sc_flags |= ACPICPU_FLAG_C_FADT;
97		acpicpu_cstate_fadt(sc);
98		break;
99	}
100
101	acpicpu_cstate_quirks(sc);
102	acpicpu_cstate_attach_print(sc);
103}
104
105void
106acpicpu_cstate_attach_print(struct acpicpu_softc *sc)
107{
108	struct acpicpu_cstate *cs;
109	const char *str;
110	int i;
111
112	for (i = 0; i < ACPI_C_STATE_COUNT; i++) {
113
114		cs = &sc->sc_cstate[i];
115
116		if (cs->cs_method == 0)
117			continue;
118
119		switch (cs->cs_method) {
120
121		case ACPICPU_C_STATE_HALT:
122			str = "HALT";
123			break;
124
125		case ACPICPU_C_STATE_FFH:
126			str = "FFH";
127			break;
128
129		case ACPICPU_C_STATE_SYSIO:
130			str = "SYSIO";
131			break;
132
133		default:
134			panic("NOTREACHED");
135		}
136
137		aprint_debug_dev(sc->sc_dev, "C%d: %5s, "
138		    "lat %3u us, pow %5u mW, addr 0x%06x, flags 0x%02x\n",
139		    i, str, cs->cs_latency, cs->cs_power,
140		    (uint32_t)cs->cs_addr, cs->cs_flags);
141	}
142}
143
144int
145acpicpu_cstate_detach(device_t self)
146{
147	struct acpicpu_softc *sc = device_private(self);
148	static ONCE_DECL(once_detach);
149	int rv;
150
151	rv = RUN_ONCE(&once_detach, acpicpu_md_idle_stop);
152
153	if (rv != 0)
154		return rv;
155
156	sc->sc_flags &= ~ACPICPU_FLAG_C;
157
158	return 0;
159}
160
161int
162acpicpu_cstate_start(device_t self)
163{
164	struct acpicpu_softc *sc = device_private(self);
165	static ONCE_DECL(once_start);
166	static ONCE_DECL(once_save);
167	int rv;
168
169	/*
170	 * Save the existing idle-mechanism and claim the idle_loop(9).
171	 * This should be called after all ACPI CPUs have been attached.
172	 */
173	rv = RUN_ONCE(&once_save, acpicpu_md_idle_init);
174
175	if (rv != 0)
176		return rv;
177
178	rv = RUN_ONCE(&once_start, acpicpu_md_idle_start);
179
180	if (rv == 0)
181		sc->sc_flags |= ACPICPU_FLAG_C;
182
183	return rv;
184}
185
186bool
187acpicpu_cstate_suspend(device_t self)
188{
189
190	return true;
191}
192
193bool
194acpicpu_cstate_resume(device_t self)
195{
196	static const ACPI_OSD_EXEC_CALLBACK func = acpicpu_cstate_callback;
197	struct acpicpu_softc *sc = device_private(self);
198
199	if ((sc->sc_flags & ACPICPU_FLAG_C_CST) != 0)
200		(void)AcpiOsExecute(OSL_NOTIFY_HANDLER, func, sc->sc_dev);
201
202	return true;
203}
204
205void
206acpicpu_cstate_callback(void *aux)
207{
208	struct acpicpu_softc *sc;
209	device_t self = aux;
210
211	sc = device_private(self);
212
213	if ((sc->sc_flags & ACPICPU_FLAG_C_FADT) != 0) {
214		KASSERT((sc->sc_flags & ACPICPU_FLAG_C_CST) == 0);
215		return;
216	}
217
218	mutex_enter(&sc->sc_mtx);
219	(void)acpicpu_cstate_cst(sc);
220	mutex_exit(&sc->sc_mtx);
221}
222
223static ACPI_STATUS
224acpicpu_cstate_cst(struct acpicpu_softc *sc)
225{
226	ACPI_OBJECT *elm, *obj;
227	ACPI_BUFFER buf;
228	ACPI_STATUS rv;
229	uint32_t i, n;
230	uint8_t count;
231
232	rv = acpi_eval_struct(sc->sc_node->ad_handle, "_CST", &buf);
233
234	if (ACPI_FAILURE(rv))
235		return rv;
236
237	obj = buf.Pointer;
238
239	if (obj->Type != ACPI_TYPE_PACKAGE) {
240		rv = AE_TYPE;
241		goto out;
242	}
243
244	if (obj->Package.Count < 2) {
245		rv = AE_LIMIT;
246		goto out;
247	}
248
249	elm = obj->Package.Elements;
250
251	if (elm[0].Type != ACPI_TYPE_INTEGER) {
252		rv = AE_TYPE;
253		goto out;
254	}
255
256	n = elm[0].Integer.Value;
257
258	if (n != obj->Package.Count - 1) {
259		rv = AE_BAD_VALUE;
260		goto out;
261	}
262
263	if (n > ACPI_C_STATES_MAX) {
264		rv = AE_LIMIT;
265		goto out;
266	}
267
268	(void)memset(sc->sc_cstate, 0,
269	    sizeof(*sc->sc_cstate) * ACPI_C_STATE_COUNT);
270
271	CTASSERT(ACPI_STATE_C0 == 0 && ACPI_STATE_C1 == 1);
272	CTASSERT(ACPI_STATE_C2 == 2 && ACPI_STATE_C3 == 3);
273
274	for (count = 0, i = 1; i <= n; i++) {
275
276		elm = &obj->Package.Elements[i];
277		rv = acpicpu_cstate_cst_add(sc, elm);
278
279		if (ACPI_SUCCESS(rv))
280			count++;
281	}
282
283	rv = (count != 0) ? AE_OK : AE_NOT_EXIST;
284
285out:
286	if (buf.Pointer != NULL)
287		ACPI_FREE(buf.Pointer);
288
289	return rv;
290}
291
292static ACPI_STATUS
293acpicpu_cstate_cst_add(struct acpicpu_softc *sc, ACPI_OBJECT *elm)
294{
295	const struct acpicpu_object *ao = &sc->sc_object;
296	struct acpicpu_cstate *cs = sc->sc_cstate;
297	struct acpicpu_cstate state;
298	struct acpicpu_reg *reg;
299	ACPI_STATUS rv = AE_OK;
300	ACPI_OBJECT *obj;
301	uint32_t type;
302
303	(void)memset(&state, 0, sizeof(*cs));
304
305	state.cs_flags = ACPICPU_FLAG_C_BM_STS;
306
307	if (elm->Type != ACPI_TYPE_PACKAGE) {
308		rv = AE_TYPE;
309		goto out;
310	}
311
312	if (elm->Package.Count != 4) {
313		rv = AE_LIMIT;
314		goto out;
315	}
316
317	/*
318	 * Type.
319	 */
320	obj = &elm->Package.Elements[1];
321
322	if (obj->Type != ACPI_TYPE_INTEGER) {
323		rv = AE_TYPE;
324		goto out;
325	}
326
327	type = obj->Integer.Value;
328
329	if (type < ACPI_STATE_C1 || type > ACPI_STATE_C3) {
330		rv = AE_TYPE;
331		goto out;
332	}
333
334	/*
335	 * Latency.
336	 */
337	obj = &elm->Package.Elements[2];
338
339	if (obj->Type != ACPI_TYPE_INTEGER) {
340		rv = AE_TYPE;
341		goto out;
342	}
343
344	state.cs_latency = obj->Integer.Value;
345
346	/*
347	 * Power.
348	 */
349	obj = &elm->Package.Elements[3];
350
351	if (obj->Type != ACPI_TYPE_INTEGER) {
352		rv = AE_TYPE;
353		goto out;
354	}
355
356	state.cs_power = obj->Integer.Value;
357
358	/*
359	 * Register.
360	 */
361	obj = &elm->Package.Elements[0];
362
363	if (obj->Type != ACPI_TYPE_BUFFER) {
364		rv = AE_TYPE;
365		goto out;
366	}
367
368	CTASSERT(sizeof(struct acpicpu_reg) == 15);
369
370	if (obj->Buffer.Length < sizeof(struct acpicpu_reg)) {
371		rv = AE_LIMIT;
372		goto out;
373	}
374
375	reg = (struct acpicpu_reg *)obj->Buffer.Pointer;
376
377	switch (reg->reg_spaceid) {
378
379	case ACPI_ADR_SPACE_SYSTEM_IO:
380		state.cs_method = ACPICPU_C_STATE_SYSIO;
381
382		if (reg->reg_addr == 0) {
383			rv = AE_AML_ILLEGAL_ADDRESS;
384			goto out;
385		}
386
387		if (reg->reg_bitwidth != 8) {
388			rv = AE_AML_BAD_RESOURCE_LENGTH;
389			goto out;
390		}
391
392		/*
393		 * Check only that the address is in the mapped space.
394		 * Systems are allowed to change it when operating
395		 * with _CST (see ACPI 4.0, pp. 94-95). For instance,
396		 * the offset of P_LVL3 may change depending on whether
397		 * acpiacad(4) is connected or disconnected.
398		 */
399		if (reg->reg_addr > ao->ao_pblkaddr + ao->ao_pblklen) {
400			rv = AE_BAD_ADDRESS;
401			goto out;
402		}
403
404		state.cs_addr = reg->reg_addr;
405		break;
406
407	case ACPI_ADR_SPACE_FIXED_HARDWARE:
408		state.cs_method = ACPICPU_C_STATE_FFH;
409
410		switch (type) {
411
412		case ACPI_STATE_C1:
413
414			if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0)
415				state.cs_method = ACPICPU_C_STATE_HALT;
416
417			break;
418
419		default:
420
421			if ((sc->sc_flags & ACPICPU_FLAG_C_FFH) == 0) {
422				rv = AE_SUPPORT;
423				goto out;
424			}
425		}
426
427		if (sc->sc_cap != 0) {
428
429			/*
430			 * The _CST FFH GAS encoding may contain
431			 * additional hints on Intel processors.
432			 * Use these to determine whether we can
433			 * avoid the bus master activity check.
434			 */
435			if ((reg->reg_accesssize & ACPICPU_PDC_GAS_BM) == 0)
436				state.cs_flags &= ~ACPICPU_FLAG_C_BM_STS;
437		}
438
439		break;
440
441	default:
442		rv = AE_AML_INVALID_SPACE_ID;
443		goto out;
444	}
445
446	if (cs[type].cs_method != 0) {
447		rv = AE_ALREADY_EXISTS;
448		goto out;
449	}
450
451	cs[type].cs_addr = state.cs_addr;
452	cs[type].cs_power = state.cs_power;
453	cs[type].cs_flags = state.cs_flags;
454	cs[type].cs_method = state.cs_method;
455	cs[type].cs_latency = state.cs_latency;
456
457out:
458	if (ACPI_FAILURE(rv))
459		aprint_debug_dev(sc->sc_dev, "invalid "
460		    "_CST: %s\n", AcpiFormatException(rv));
461
462	return rv;
463}
464
465static void
466acpicpu_cstate_cst_bios(void)
467{
468	const uint8_t val = AcpiGbl_FADT.CstControl;
469	const uint32_t addr = AcpiGbl_FADT.SmiCommand;
470
471	if (addr == 0)
472		return;
473
474	(void)AcpiOsWritePort(addr, val, 8);
475}
476
477static void
478acpicpu_cstate_fadt(struct acpicpu_softc *sc)
479{
480	struct acpicpu_cstate *cs = sc->sc_cstate;
481
482	(void)memset(cs, 0, sizeof(*cs) * ACPI_C_STATE_COUNT);
483
484	/*
485	 * All x86 processors should support C1 (a.k.a. HALT).
486	 */
487	if ((AcpiGbl_FADT.Flags & ACPI_FADT_C1_SUPPORTED) != 0)
488		cs[ACPI_STATE_C1].cs_method = ACPICPU_C_STATE_HALT;
489
490	if ((acpicpu_md_cpus_running() > 1) &&
491	    (AcpiGbl_FADT.Flags & ACPI_FADT_C2_MP_SUPPORTED) == 0)
492		return;
493
494	cs[ACPI_STATE_C2].cs_method = ACPICPU_C_STATE_SYSIO;
495	cs[ACPI_STATE_C3].cs_method = ACPICPU_C_STATE_SYSIO;
496
497	cs[ACPI_STATE_C2].cs_latency = AcpiGbl_FADT.C2Latency;
498	cs[ACPI_STATE_C3].cs_latency = AcpiGbl_FADT.C3Latency;
499
500	cs[ACPI_STATE_C2].cs_addr = sc->sc_object.ao_pblkaddr + 4;
501	cs[ACPI_STATE_C3].cs_addr = sc->sc_object.ao_pblkaddr + 5;
502
503	/*
504	 * The P_BLK length should always be 6. If it
505	 * is not, reduce functionality accordingly.
506	 * Sanity check also FADT's latency levels.
507	 */
508	if (sc->sc_object.ao_pblklen < 5)
509		cs[ACPI_STATE_C2].cs_method = 0;
510
511	if (sc->sc_object.ao_pblklen < 6)
512		cs[ACPI_STATE_C3].cs_method = 0;
513
514	CTASSERT(ACPICPU_C_C2_LATENCY_MAX == 100);
515	CTASSERT(ACPICPU_C_C3_LATENCY_MAX == 1000);
516
517	if (AcpiGbl_FADT.C2Latency > ACPICPU_C_C2_LATENCY_MAX)
518		cs[ACPI_STATE_C2].cs_method = 0;
519
520	if (AcpiGbl_FADT.C3Latency > ACPICPU_C_C3_LATENCY_MAX)
521		cs[ACPI_STATE_C3].cs_method = 0;
522}
523
524static void
525acpicpu_cstate_quirks(struct acpicpu_softc *sc)
526{
527	const uint32_t reg = AcpiGbl_FADT.Pm2ControlBlock;
528	const uint32_t len = AcpiGbl_FADT.Pm2ControlLength;
529	struct pci_attach_args pa;
530
531	/*
532	 * Check bus master arbitration. If ARB_DIS
533	 * is not available, processor caches must be
534	 * flushed before C3 (ACPI 4.0, section 8.2).
535	 */
536	if (reg != 0 && len != 0)
537		sc->sc_flags |= ACPICPU_FLAG_C_ARB;
538	else {
539		/*
540		 * Disable C3 entirely if WBINVD is not present.
541		 */
542		if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) == 0)
543			sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
544		else {
545			/*
546			 * If WBINVD is present and functioning properly,
547			 * flush all processor caches before entering C3.
548			 */
549			if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0)
550				sc->sc_flags &= ~ACPICPU_FLAG_C_BM;
551			else
552				sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
553		}
554	}
555
556	/*
557	 * There are several erratums for PIIX4.
558	 */
559	if (pci_find_device(&pa, acpicpu_cstate_quirks_piix4) != 0)
560		sc->sc_flags |= ACPICPU_FLAG_C_NOC3;
561
562	if ((sc->sc_flags & ACPICPU_FLAG_C_NOC3) != 0)
563		sc->sc_cstate[ACPI_STATE_C3].cs_method = 0;
564}
565
566static int
567acpicpu_cstate_quirks_piix4(struct pci_attach_args *pa)
568{
569
570	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
571		return 0;
572
573	if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82371AB_ISA ||
574	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82440MX_PMC)
575		return 1;
576
577	return 0;
578}
579
580static int
581acpicpu_cstate_latency(struct acpicpu_softc *sc)
582{
583	static const uint32_t cs_factor = 3;
584	struct acpicpu_cstate *cs;
585	int i;
586
587	for (i = cs_state_max; i > 0; i--) {
588
589		cs = &sc->sc_cstate[i];
590
591		if (__predict_false(cs->cs_method == 0))
592			continue;
593
594		/*
595		 * Choose a state if we have previously slept
596		 * longer than the worst case latency of the
597		 * state times an arbitrary multiplier.
598		 */
599		if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
600			return i;
601	}
602
603	return ACPI_STATE_C1;
604}
605
606/*
607 * The main idle loop.
608 */
609void
610acpicpu_cstate_idle(void)
611{
612        struct cpu_info *ci = curcpu();
613	struct acpicpu_softc *sc;
614	int state;
615
616	if (__predict_false(ci->ci_want_resched) != 0)
617		return;
618
619	acpi_md_OsDisableInterrupt();
620
621	KASSERT(acpicpu_sc != NULL);
622	KASSERT(ci->ci_acpiid < maxcpus);
623	KASSERT(ci->ci_ilevel == IPL_NONE);
624
625	sc = acpicpu_sc[ci->ci_acpiid];
626
627	if (__predict_false(sc == NULL))
628		goto halt;
629
630	if (__predict_false(sc->sc_cold != false))
631		goto halt;
632
633	if (__predict_false((sc->sc_flags & ACPICPU_FLAG_C) == 0))
634		goto halt;
635
636	if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
637		goto halt;
638
639	mutex_exit(&sc->sc_mtx);
640	state = acpicpu_cstate_latency(sc);
641
642	/*
643	 * Check for bus master activity. Note that particularly usb(4)
644	 * causes high activity, which may prevent the use of C3 states.
645	 */
646	if ((sc->sc_cstate[state].cs_flags & ACPICPU_FLAG_C_BM_STS) != 0) {
647
648		if (acpicpu_cstate_bm_check() != false)
649			state--;
650
651		if (__predict_false(sc->sc_cstate[state].cs_method == 0))
652			state = ACPI_STATE_C1;
653	}
654
655	KASSERT(state != ACPI_STATE_C0);
656
657	if (state != ACPI_STATE_C3) {
658		acpicpu_cstate_idle_enter(sc, state);
659		return;
660	}
661
662	/*
663	 * On all recent (Intel) CPUs caches are shared
664	 * by CPUs and bus master control is required to
665	 * keep these coherent while in C3. Flushing the
666	 * CPU caches is only the last resort.
667	 */
668	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) == 0)
669		ACPI_FLUSH_CPU_CACHE();
670
671	/*
672	 * Allow the bus master to request that any given
673	 * CPU should return immediately to C0 from C3.
674	 */
675	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
676		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1);
677
678	/*
679	 * It may be necessary to disable bus master arbitration
680	 * to ensure that bus master cycles do not occur while
681	 * sleeping in C3 (see ACPI 4.0, section 8.1.4).
682	 */
683	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
684		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1);
685
686	acpicpu_cstate_idle_enter(sc, state);
687
688	/*
689	 * Disable bus master wake and re-enable the arbiter.
690	 */
691	if ((sc->sc_flags & ACPICPU_FLAG_C_BM) != 0)
692		(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0);
693
694	if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
695		(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
696
697	return;
698
699halt:
700	acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
701}
702
703static void
704acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
705{
706	struct acpicpu_cstate *cs = &sc->sc_cstate[state];
707	uint32_t end, start, val;
708
709	start = acpitimer_read_safe(NULL);
710
711	switch (cs->cs_method) {
712
713	case ACPICPU_C_STATE_FFH:
714	case ACPICPU_C_STATE_HALT:
715		acpicpu_md_idle_enter(cs->cs_method, state);
716		break;
717
718	case ACPICPU_C_STATE_SYSIO:
719		(void)AcpiOsReadPort(cs->cs_addr, &val, 8);
720		break;
721
722	default:
723		acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
724		break;
725	}
726
727	cs->cs_stat++;
728
729	end = acpitimer_read_safe(NULL);
730	sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;
731
732	acpi_md_OsEnableInterrupt();
733}
734
735static bool
736acpicpu_cstate_bm_check(void)
737{
738	uint32_t val = 0;
739	ACPI_STATUS rv;
740
741	rv = AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &val);
742
743	if (ACPI_FAILURE(rv) || val == 0)
744		return false;
745
746	(void)AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1);
747
748	return true;
749}
750