acpi.c revision 191697
1/*-
2 * Copyright (c) 2000 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2000 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * Copyright (c) 2000, 2001 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/acpica/acpi.c 191697 2009-04-30 17:45:43Z jkim $");
32
33#include "opt_acpi.h"
34#include <sys/param.h>
35#include <sys/kernel.h>
36#include <sys/proc.h>
37#include <sys/fcntl.h>
38#include <sys/malloc.h>
39#include <sys/module.h>
40#include <sys/bus.h>
41#include <sys/conf.h>
42#include <sys/ioccom.h>
43#include <sys/reboot.h>
44#include <sys/sysctl.h>
45#include <sys/ctype.h>
46#include <sys/linker.h>
47#include <sys/power.h>
48#include <sys/sbuf.h>
49#ifdef SMP
50#include <sys/sched.h>
51#endif
52#include <sys/smp.h>
53#include <sys/timetc.h>
54
55#if defined(__i386__) || defined(__amd64__)
56#include <machine/pci_cfgreg.h>
57#endif
58#include <machine/resource.h>
59#include <machine/bus.h>
60#include <sys/rman.h>
61#include <isa/isavar.h>
62#include <isa/pnpvar.h>
63
64#include <contrib/dev/acpica/acpi.h>
65#include <dev/acpica/acpivar.h>
66#include <dev/acpica/acpiio.h>
67#include <contrib/dev/acpica/achware.h>
68#include <contrib/dev/acpica/acnamesp.h>
69
70#include "pci_if.h"
71#include <dev/pci/pcivar.h>
72#include <dev/pci/pci_private.h>
73
74#include <vm/vm_param.h>
75
76MALLOC_DEFINE(M_ACPIDEV, "acpidev", "ACPI devices");
77
78/* Hooks for the ACPI CA debugging infrastructure */
79#define _COMPONENT	ACPI_BUS
80ACPI_MODULE_NAME("ACPI")
81
82static d_open_t		acpiopen;
83static d_close_t	acpiclose;
84static d_ioctl_t	acpiioctl;
85
86static struct cdevsw acpi_cdevsw = {
87	.d_version =	D_VERSION,
88	.d_open =	acpiopen,
89	.d_close =	acpiclose,
90	.d_ioctl =	acpiioctl,
91	.d_name =	"acpi",
92};
93
94/* Global mutex for locking access to the ACPI subsystem. */
95struct mtx	acpi_mutex;
96
97/* Bitmap of device quirks. */
98int		acpi_quirks;
99
100/* Supported sleep states. */
101static BOOLEAN	acpi_sleep_states[ACPI_S_STATE_COUNT];
102
103static int	acpi_modevent(struct module *mod, int event, void *junk);
104static int	acpi_probe(device_t dev);
105static int	acpi_attach(device_t dev);
106static int	acpi_suspend(device_t dev);
107static int	acpi_resume(device_t dev);
108static int	acpi_shutdown(device_t dev);
109static device_t	acpi_add_child(device_t bus, int order, const char *name,
110			int unit);
111static int	acpi_print_child(device_t bus, device_t child);
112static void	acpi_probe_nomatch(device_t bus, device_t child);
113static void	acpi_driver_added(device_t dev, driver_t *driver);
114static int	acpi_read_ivar(device_t dev, device_t child, int index,
115			uintptr_t *result);
116static int	acpi_write_ivar(device_t dev, device_t child, int index,
117			uintptr_t value);
118static struct resource_list *acpi_get_rlist(device_t dev, device_t child);
119static int	acpi_sysres_alloc(device_t dev);
120static struct resource *acpi_alloc_resource(device_t bus, device_t child,
121			int type, int *rid, u_long start, u_long end,
122			u_long count, u_int flags);
123static int	acpi_release_resource(device_t bus, device_t child, int type,
124			int rid, struct resource *r);
125static void	acpi_delete_resource(device_t bus, device_t child, int type,
126		    int rid);
127static uint32_t	acpi_isa_get_logicalid(device_t dev);
128static int	acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count);
129static char	*acpi_device_id_probe(device_t bus, device_t dev, char **ids);
130static ACPI_STATUS acpi_device_eval_obj(device_t bus, device_t dev,
131		    ACPI_STRING pathname, ACPI_OBJECT_LIST *parameters,
132		    ACPI_BUFFER *ret);
133static int	acpi_device_pwr_for_sleep(device_t bus, device_t dev,
134		    int *dstate);
135static ACPI_STATUS acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level,
136		    void *context, void **retval);
137static ACPI_STATUS acpi_device_scan_children(device_t bus, device_t dev,
138		    int max_depth, acpi_scan_cb_t user_fn, void *arg);
139static int	acpi_set_powerstate_method(device_t bus, device_t child,
140		    int state);
141static int	acpi_isa_pnp_probe(device_t bus, device_t child,
142		    struct isa_pnp_id *ids);
143static void	acpi_probe_children(device_t bus);
144static void	acpi_probe_order(ACPI_HANDLE handle, int *order);
145static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
146		    void *context, void **status);
147static BOOLEAN	acpi_MatchHid(ACPI_HANDLE h, const char *hid);
148static void	acpi_sleep_enable(void *arg);
149static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
150static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
151static void	acpi_shutdown_final(void *arg, int howto);
152static void	acpi_enable_fixed_events(struct acpi_softc *sc);
153static int	acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
154static int	acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
155static int	acpi_wake_prep_walk(int sstate);
156static int	acpi_wake_sysctl_walk(device_t dev);
157static int	acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
158static void	acpi_system_eventhandler_sleep(void *arg, int state);
159static void	acpi_system_eventhandler_wakeup(void *arg, int state);
160static int	acpi_sname2sstate(const char *sname);
161static const char *acpi_sstate2sname(int sstate);
162static int	acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
163static int	acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
164static int	acpi_pm_func(u_long cmd, void *arg, ...);
165static int	acpi_child_location_str_method(device_t acdev, device_t child,
166					       char *buf, size_t buflen);
167static int	acpi_child_pnpinfo_str_method(device_t acdev, device_t child,
168					      char *buf, size_t buflen);
169#if defined(__i386__) || defined(__amd64__)
170static void	acpi_enable_pcie(void);
171#endif
172static void	acpi_hint_device_unit(device_t acdev, device_t child,
173		    const char *name, int *unitp);
174
175static device_method_t acpi_methods[] = {
176    /* Device interface */
177    DEVMETHOD(device_probe,		acpi_probe),
178    DEVMETHOD(device_attach,		acpi_attach),
179    DEVMETHOD(device_shutdown,		acpi_shutdown),
180    DEVMETHOD(device_detach,		bus_generic_detach),
181    DEVMETHOD(device_suspend,		acpi_suspend),
182    DEVMETHOD(device_resume,		acpi_resume),
183
184    /* Bus interface */
185    DEVMETHOD(bus_add_child,		acpi_add_child),
186    DEVMETHOD(bus_print_child,		acpi_print_child),
187    DEVMETHOD(bus_probe_nomatch,	acpi_probe_nomatch),
188    DEVMETHOD(bus_driver_added,		acpi_driver_added),
189    DEVMETHOD(bus_read_ivar,		acpi_read_ivar),
190    DEVMETHOD(bus_write_ivar,		acpi_write_ivar),
191    DEVMETHOD(bus_get_resource_list,	acpi_get_rlist),
192    DEVMETHOD(bus_set_resource,		bus_generic_rl_set_resource),
193    DEVMETHOD(bus_get_resource,		bus_generic_rl_get_resource),
194    DEVMETHOD(bus_alloc_resource,	acpi_alloc_resource),
195    DEVMETHOD(bus_release_resource,	acpi_release_resource),
196    DEVMETHOD(bus_delete_resource,	acpi_delete_resource),
197    DEVMETHOD(bus_child_pnpinfo_str,	acpi_child_pnpinfo_str_method),
198    DEVMETHOD(bus_child_location_str,	acpi_child_location_str_method),
199    DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
200    DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
201    DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
202    DEVMETHOD(bus_teardown_intr,	bus_generic_teardown_intr),
203    DEVMETHOD(bus_hint_device_unit,	acpi_hint_device_unit),
204
205    /* ACPI bus */
206    DEVMETHOD(acpi_id_probe,		acpi_device_id_probe),
207    DEVMETHOD(acpi_evaluate_object,	acpi_device_eval_obj),
208    DEVMETHOD(acpi_pwr_for_sleep,	acpi_device_pwr_for_sleep),
209    DEVMETHOD(acpi_scan_children,	acpi_device_scan_children),
210
211    /* PCI emulation */
212    DEVMETHOD(pci_set_powerstate,	acpi_set_powerstate_method),
213
214    /* ISA emulation */
215    DEVMETHOD(isa_pnp_probe,		acpi_isa_pnp_probe),
216
217    {0, 0}
218};
219
220static driver_t acpi_driver = {
221    "acpi",
222    acpi_methods,
223    sizeof(struct acpi_softc),
224};
225
226static devclass_t acpi_devclass;
227DRIVER_MODULE(acpi, nexus, acpi_driver, acpi_devclass, acpi_modevent, 0);
228MODULE_VERSION(acpi, 1);
229
230ACPI_SERIAL_DECL(acpi, "ACPI root bus");
231
232/* Local pools for managing system resources for ACPI child devices. */
233static struct rman acpi_rman_io, acpi_rman_mem;
234
235#define ACPI_MINIMUM_AWAKETIME	5
236
237/* Holds the description of the acpi0 device. */
238static char acpi_desc[ACPI_OEM_ID_SIZE + ACPI_OEM_TABLE_ID_SIZE + 2];
239
240SYSCTL_NODE(_debug, OID_AUTO, acpi, CTLFLAG_RD, NULL, "ACPI debugging");
241static char acpi_ca_version[12];
242SYSCTL_STRING(_debug_acpi, OID_AUTO, acpi_ca_version, CTLFLAG_RD,
243	      acpi_ca_version, 0, "Version of Intel ACPI-CA");
244
245/*
246 * Allow override of whether methods execute in parallel or not.
247 * Enable this for serial behavior, which fixes "AE_ALREADY_EXISTS"
248 * errors for AML that really can't handle parallel method execution.
249 * It is off by default since this breaks recursive methods and
250 * some IBMs use such code.
251 */
252static int acpi_serialize_methods;
253TUNABLE_INT("hw.acpi.serialize_methods", &acpi_serialize_methods);
254
255/* Power devices off and on in suspend and resume.  XXX Remove once tested. */
256static int acpi_do_powerstate = 1;
257TUNABLE_INT("debug.acpi.do_powerstate", &acpi_do_powerstate);
258SYSCTL_INT(_debug_acpi, OID_AUTO, do_powerstate, CTLFLAG_RW,
259    &acpi_do_powerstate, 1, "Turn off devices when suspending.");
260
261/* Reset system clock while resuming.  XXX Remove once tested. */
262static int acpi_reset_clock = 1;
263TUNABLE_INT("debug.acpi.reset_clock", &acpi_reset_clock);
264SYSCTL_INT(_debug_acpi, OID_AUTO, reset_clock, CTLFLAG_RW,
265    &acpi_reset_clock, 1, "Reset system clock while resuming.");
266
267/* Allow users to override quirks. */
268TUNABLE_INT("debug.acpi.quirks", &acpi_quirks);
269
270static int acpi_susp_bounce;
271SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
272    &acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
273
274/*
275 * ACPI can only be loaded as a module by the loader; activating it after
276 * system bootstrap time is not useful, and can be fatal to the system.
277 * It also cannot be unloaded, since the entire system bus hierarchy hangs
278 * off it.
279 */
280static int
281acpi_modevent(struct module *mod, int event, void *junk)
282{
283    switch (event) {
284    case MOD_LOAD:
285	if (!cold) {
286	    printf("The ACPI driver cannot be loaded after boot.\n");
287	    return (EPERM);
288	}
289	break;
290    case MOD_UNLOAD:
291	if (!cold && power_pm_get_type() == POWER_PM_TYPE_ACPI)
292	    return (EBUSY);
293	break;
294    default:
295	break;
296    }
297    return (0);
298}
299
300/*
301 * Perform early initialization.
302 */
303ACPI_STATUS
304acpi_Startup(void)
305{
306    static int started = 0;
307    ACPI_STATUS status;
308    int val;
309
310    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
311
312    /* Only run the startup code once.  The MADT driver also calls this. */
313    if (started)
314	return_VALUE (AE_OK);
315    started = 1;
316
317    /*
318     * Pre-allocate space for RSDT/XSDT and DSDT tables and allow resizing
319     * if more tables exist.
320     */
321    if (ACPI_FAILURE(status = AcpiInitializeTables(NULL, 2, TRUE))) {
322	printf("ACPI: Table initialisation failed: %s\n",
323	    AcpiFormatException(status));
324	return_VALUE (status);
325    }
326
327    /* Set up any quirks we have for this system. */
328    if (acpi_quirks == ACPI_Q_OK)
329	acpi_table_quirks(&acpi_quirks);
330
331    /* If the user manually set the disabled hint to 0, force-enable ACPI. */
332    if (resource_int_value("acpi", 0, "disabled", &val) == 0 && val == 0)
333	acpi_quirks &= ~ACPI_Q_BROKEN;
334    if (acpi_quirks & ACPI_Q_BROKEN) {
335	printf("ACPI disabled by blacklist.  Contact your BIOS vendor.\n");
336	status = AE_SUPPORT;
337    }
338
339    return_VALUE (status);
340}
341
342/*
343 * Detect ACPI and perform early initialisation.
344 */
345int
346acpi_identify(void)
347{
348    ACPI_TABLE_RSDP	*rsdp;
349    ACPI_TABLE_HEADER	*rsdt;
350    ACPI_PHYSICAL_ADDRESS paddr;
351    struct sbuf		sb;
352
353    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
354
355    if (!cold)
356	return (ENXIO);
357
358    /* Check that we haven't been disabled with a hint. */
359    if (resource_disabled("acpi", 0))
360	return (ENXIO);
361
362    /* Check for other PM systems. */
363    if (power_pm_get_type() != POWER_PM_TYPE_NONE &&
364	power_pm_get_type() != POWER_PM_TYPE_ACPI) {
365	printf("ACPI identify failed, other PM system enabled.\n");
366	return (ENXIO);
367    }
368
369    /* Initialize root tables. */
370    if (ACPI_FAILURE(acpi_Startup())) {
371	printf("ACPI: Try disabling either ACPI or apic support.\n");
372	return (ENXIO);
373    }
374
375    if ((paddr = AcpiOsGetRootPointer()) == 0 ||
376	(rsdp = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_RSDP))) == NULL)
377	return (ENXIO);
378    if (rsdp->Revision > 1 && rsdp->XsdtPhysicalAddress != 0)
379	paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->XsdtPhysicalAddress;
380    else
381	paddr = (ACPI_PHYSICAL_ADDRESS)rsdp->RsdtPhysicalAddress;
382    AcpiOsUnmapMemory(rsdp, sizeof(ACPI_TABLE_RSDP));
383
384    if ((rsdt = AcpiOsMapMemory(paddr, sizeof(ACPI_TABLE_HEADER))) == NULL)
385	return (ENXIO);
386    sbuf_new(&sb, acpi_desc, sizeof(acpi_desc), SBUF_FIXEDLEN);
387    sbuf_bcat(&sb, rsdt->OemId, ACPI_OEM_ID_SIZE);
388    sbuf_trim(&sb);
389    sbuf_putc(&sb, ' ');
390    sbuf_bcat(&sb, rsdt->OemTableId, ACPI_OEM_TABLE_ID_SIZE);
391    sbuf_trim(&sb);
392    sbuf_finish(&sb);
393    sbuf_delete(&sb);
394    AcpiOsUnmapMemory(rsdt, sizeof(ACPI_TABLE_HEADER));
395
396    snprintf(acpi_ca_version, sizeof(acpi_ca_version), "%x", ACPI_CA_VERSION);
397
398    return (0);
399}
400
401/*
402 * Fetch some descriptive data from ACPI to put in our attach message.
403 */
404static int
405acpi_probe(device_t dev)
406{
407
408    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
409
410    device_set_desc(dev, acpi_desc);
411
412    return_VALUE (0);
413}
414
415static int
416acpi_attach(device_t dev)
417{
418    struct acpi_softc	*sc;
419    ACPI_TABLE_FACS	*facs;
420    ACPI_STATUS		status;
421    int			error, state;
422    UINT32		flags;
423    UINT8		TypeA, TypeB;
424    char		*env;
425
426    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
427
428    sc = device_get_softc(dev);
429    sc->acpi_dev = dev;
430    callout_init(&sc->susp_force_to, TRUE);
431
432    error = ENXIO;
433
434    /* Initialize resource manager. */
435    acpi_rman_io.rm_type = RMAN_ARRAY;
436    acpi_rman_io.rm_start = 0;
437    acpi_rman_io.rm_end = 0xffff;
438    acpi_rman_io.rm_descr = "ACPI I/O ports";
439    if (rman_init(&acpi_rman_io) != 0)
440	panic("acpi rman_init IO ports failed");
441    acpi_rman_mem.rm_type = RMAN_ARRAY;
442    acpi_rman_mem.rm_start = 0;
443    acpi_rman_mem.rm_end = ~0ul;
444    acpi_rman_mem.rm_descr = "ACPI I/O memory addresses";
445    if (rman_init(&acpi_rman_mem) != 0)
446	panic("acpi rman_init memory failed");
447
448    /* Initialise the ACPI mutex */
449    mtx_init(&acpi_mutex, "ACPI global lock", NULL, MTX_DEF);
450
451    /*
452     * Set the globals from our tunables.  This is needed because ACPI-CA
453     * uses UINT8 for some values and we have no tunable_byte.
454     */
455    AcpiGbl_AllMethodsSerialized = acpi_serialize_methods;
456    AcpiGbl_EnableInterpreterSlack = TRUE;
457
458    /* Start up the ACPI CA subsystem. */
459    status = AcpiInitializeSubsystem();
460    if (ACPI_FAILURE(status)) {
461	device_printf(dev, "Could not initialize Subsystem: %s\n",
462		      AcpiFormatException(status));
463	goto out;
464    }
465
466    /* Load ACPI name space. */
467    status = AcpiLoadTables();
468    if (ACPI_FAILURE(status)) {
469	device_printf(dev, "Could not load Namespace: %s\n",
470		      AcpiFormatException(status));
471	goto out;
472    }
473
474#if defined(__i386__) || defined(__amd64__)
475    /* Handle MCFG table if present. */
476    acpi_enable_pcie();
477#endif
478
479    /* Install the default address space handlers. */
480    status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
481		ACPI_ADR_SPACE_SYSTEM_MEMORY, ACPI_DEFAULT_HANDLER, NULL, NULL);
482    if (ACPI_FAILURE(status)) {
483	device_printf(dev, "Could not initialise SystemMemory handler: %s\n",
484		      AcpiFormatException(status));
485	goto out;
486    }
487    status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
488		ACPI_ADR_SPACE_SYSTEM_IO, ACPI_DEFAULT_HANDLER, NULL, NULL);
489    if (ACPI_FAILURE(status)) {
490	device_printf(dev, "Could not initialise SystemIO handler: %s\n",
491		      AcpiFormatException(status));
492	goto out;
493    }
494    status = AcpiInstallAddressSpaceHandler(ACPI_ROOT_OBJECT,
495		ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
496    if (ACPI_FAILURE(status)) {
497	device_printf(dev, "could not initialise PciConfig handler: %s\n",
498		      AcpiFormatException(status));
499	goto out;
500    }
501
502    /*
503     * Note that some systems (specifically, those with namespace evaluation
504     * issues that require the avoidance of parts of the namespace) must
505     * avoid running _INI and _STA on everything, as well as dodging the final
506     * object init pass.
507     *
508     * For these devices, we set ACPI_NO_DEVICE_INIT and ACPI_NO_OBJECT_INIT).
509     *
510     * XXX We should arrange for the object init pass after we have attached
511     *     all our child devices, but on many systems it works here.
512     */
513    flags = 0;
514    if (testenv("debug.acpi.avoid"))
515	flags = ACPI_NO_DEVICE_INIT | ACPI_NO_OBJECT_INIT;
516
517    /* Bring the hardware and basic handlers online. */
518    if (ACPI_FAILURE(status = AcpiEnableSubsystem(flags))) {
519	device_printf(dev, "Could not enable ACPI: %s\n",
520		      AcpiFormatException(status));
521	goto out;
522    }
523
524    /*
525     * Call the ECDT probe function to provide EC functionality before
526     * the namespace has been evaluated.
527     *
528     * XXX This happens before the sysresource devices have been probed and
529     * attached so its resources come from nexus0.  In practice, this isn't
530     * a problem but should be addressed eventually.
531     */
532    acpi_ec_ecdt_probe(dev);
533
534    /* Bring device objects and regions online. */
535    if (ACPI_FAILURE(status = AcpiInitializeObjects(flags))) {
536	device_printf(dev, "Could not initialize ACPI objects: %s\n",
537		      AcpiFormatException(status));
538	goto out;
539    }
540
541    /*
542     * Setup our sysctl tree.
543     *
544     * XXX: This doesn't check to make sure that none of these fail.
545     */
546    sysctl_ctx_init(&sc->acpi_sysctl_ctx);
547    sc->acpi_sysctl_tree = SYSCTL_ADD_NODE(&sc->acpi_sysctl_ctx,
548			       SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
549			       device_get_name(dev), CTLFLAG_RD, 0, "");
550    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
551	OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD,
552	0, 0, acpi_supported_sleep_state_sysctl, "A", "");
553    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
554	OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW,
555	&sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
556    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
557	OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW,
558	&sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", "");
559    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
560	OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW,
561	&sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", "");
562    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
563	OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW,
564	&sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
565    SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
566	OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW,
567	&sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
568    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
569	OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
570	"sleep delay");
571    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
572	OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
573    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
574	OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
575    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
576	OID_AUTO, "disable_on_reboot", CTLFLAG_RW,
577	&sc->acpi_do_disable, 0, "Disable ACPI when rebooting/halting system");
578    SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
579	OID_AUTO, "handle_reboot", CTLFLAG_RW,
580	&sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
581
582    /*
583     * Default to 1 second before sleeping to give some machines time to
584     * stabilize.
585     */
586    sc->acpi_sleep_delay = 1;
587    if (bootverbose)
588	sc->acpi_verbose = 1;
589    if ((env = getenv("hw.acpi.verbose")) != NULL) {
590	if (strcmp(env, "0") != 0)
591	    sc->acpi_verbose = 1;
592	freeenv(env);
593    }
594
595    /* Only enable S4BIOS by default if the FACS says it is available. */
596    status = AcpiGetTable(ACPI_SIG_FACS, 0, (ACPI_TABLE_HEADER **)&facs);
597    if (ACPI_FAILURE(status)) {
598	device_printf(dev, "couldn't get FACS: %s\n",
599		      AcpiFormatException(status));
600	error = ENXIO;
601	goto out;
602    }
603    if (facs->Flags & ACPI_FACS_S4_BIOS_PRESENT)
604	sc->acpi_s4bios = 1;
605
606    /* Probe all supported sleep states. */
607    acpi_sleep_states[ACPI_STATE_S0] = TRUE;
608    for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
609	if (ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
610	    acpi_sleep_states[state] = TRUE;
611
612    /*
613     * Dispatch the default sleep state to devices.  The lid switch is set
614     * to UNKNOWN by default to avoid surprising users.
615     */
616    sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
617	ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
618    sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
619    sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
620	ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
621    sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
622	ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
623
624    /* Pick the first valid sleep state for the sleep button default. */
625    sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
626    for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
627	if (acpi_sleep_states[state]) {
628	    sc->acpi_sleep_button_sx = state;
629	    break;
630	}
631
632    acpi_enable_fixed_events(sc);
633
634    /*
635     * Scan the namespace and attach/initialise children.
636     */
637
638    /* Register our shutdown handler. */
639    EVENTHANDLER_REGISTER(shutdown_final, acpi_shutdown_final, sc,
640	SHUTDOWN_PRI_LAST);
641
642    /*
643     * Register our acpi event handlers.
644     * XXX should be configurable eg. via userland policy manager.
645     */
646    EVENTHANDLER_REGISTER(acpi_sleep_event, acpi_system_eventhandler_sleep,
647	sc, ACPI_EVENT_PRI_LAST);
648    EVENTHANDLER_REGISTER(acpi_wakeup_event, acpi_system_eventhandler_wakeup,
649	sc, ACPI_EVENT_PRI_LAST);
650
651    /* Flag our initial states. */
652    sc->acpi_enabled = TRUE;
653    sc->acpi_sstate = ACPI_STATE_S0;
654    sc->acpi_sleep_disabled = TRUE;
655
656    /* Create the control device */
657    sc->acpi_dev_t = make_dev(&acpi_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
658			      "acpi");
659    sc->acpi_dev_t->si_drv1 = sc;
660
661    if ((error = acpi_machdep_init(dev)))
662	goto out;
663
664    /* Register ACPI again to pass the correct argument of pm_func. */
665    power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
666
667    if (!acpi_disabled("bus"))
668	acpi_probe_children(dev);
669
670    /* Allow sleep request after a while. */
671    timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
672
673    error = 0;
674
675 out:
676    return_VALUE (error);
677}
678
679static int
680acpi_suspend(device_t dev)
681{
682    device_t child, *devlist;
683    int error, i, numdevs, pstate;
684
685    GIANT_REQUIRED;
686
687    /* First give child devices a chance to suspend. */
688    error = bus_generic_suspend(dev);
689    if (error)
690	return (error);
691
692    /*
693     * Now, set them into the appropriate power state, usually D3.  If the
694     * device has an _SxD method for the next sleep state, use that power
695     * state instead.
696     */
697    error = device_get_children(dev, &devlist, &numdevs);
698    if (error)
699	return (error);
700    for (i = 0; i < numdevs; i++) {
701	/* If the device is not attached, we've powered it down elsewhere. */
702	child = devlist[i];
703	if (!device_is_attached(child))
704	    continue;
705
706	/*
707	 * Default to D3 for all sleep states.  The _SxD method is optional
708	 * so set the powerstate even if it's absent.
709	 */
710	pstate = PCI_POWERSTATE_D3;
711	error = acpi_device_pwr_for_sleep(device_get_parent(child),
712	    child, &pstate);
713	if ((error == 0 || error == ESRCH) && acpi_do_powerstate)
714	    pci_set_powerstate(child, pstate);
715    }
716    free(devlist, M_TEMP);
717    error = 0;
718
719    return (error);
720}
721
722static int
723acpi_resume(device_t dev)
724{
725    ACPI_HANDLE handle;
726    int i, numdevs, error;
727    device_t child, *devlist;
728
729    GIANT_REQUIRED;
730
731    /*
732     * Put all devices in D0 before resuming them.  Call _S0D on each one
733     * since some systems expect this.
734     */
735    error = device_get_children(dev, &devlist, &numdevs);
736    if (error)
737	return (error);
738    for (i = 0; i < numdevs; i++) {
739	child = devlist[i];
740	handle = acpi_get_handle(child);
741	if (handle)
742	    AcpiEvaluateObject(handle, "_S0D", NULL, NULL);
743	if (device_is_attached(child) && acpi_do_powerstate)
744	    pci_set_powerstate(child, PCI_POWERSTATE_D0);
745    }
746    free(devlist, M_TEMP);
747
748    return (bus_generic_resume(dev));
749}
750
751static int
752acpi_shutdown(device_t dev)
753{
754
755    GIANT_REQUIRED;
756
757    /* Allow children to shutdown first. */
758    bus_generic_shutdown(dev);
759
760    /*
761     * Enable any GPEs that are able to power-on the system (i.e., RTC).
762     * Also, disable any that are not valid for this state (most).
763     */
764    acpi_wake_prep_walk(ACPI_STATE_S5);
765
766    return (0);
767}
768
769/*
770 * Handle a new device being added
771 */
772static device_t
773acpi_add_child(device_t bus, int order, const char *name, int unit)
774{
775    struct acpi_device	*ad;
776    device_t		child;
777
778    if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
779	return (NULL);
780
781    resource_list_init(&ad->ad_rl);
782
783    child = device_add_child_ordered(bus, order, name, unit);
784    if (child != NULL)
785	device_set_ivars(child, ad);
786    else
787	free(ad, M_ACPIDEV);
788    return (child);
789}
790
791static int
792acpi_print_child(device_t bus, device_t child)
793{
794    struct acpi_device	 *adev = device_get_ivars(child);
795    struct resource_list *rl = &adev->ad_rl;
796    int retval = 0;
797
798    retval += bus_print_child_header(bus, child);
799    retval += resource_list_print_type(rl, "port",  SYS_RES_IOPORT, "%#lx");
800    retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
801    retval += resource_list_print_type(rl, "irq",   SYS_RES_IRQ,    "%ld");
802    retval += resource_list_print_type(rl, "drq",   SYS_RES_DRQ,    "%ld");
803    if (device_get_flags(child))
804	retval += printf(" flags %#x", device_get_flags(child));
805    retval += bus_print_child_footer(bus, child);
806
807    return (retval);
808}
809
810/*
811 * If this device is an ACPI child but no one claimed it, attempt
812 * to power it off.  We'll power it back up when a driver is added.
813 *
814 * XXX Disabled for now since many necessary devices (like fdc and
815 * ATA) don't claim the devices we created for them but still expect
816 * them to be powered up.
817 */
818static void
819acpi_probe_nomatch(device_t bus, device_t child)
820{
821#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
822    pci_set_powerstate(child, PCI_POWERSTATE_D3);
823#endif
824}
825
826/*
827 * If a new driver has a chance to probe a child, first power it up.
828 *
829 * XXX Disabled for now (see acpi_probe_nomatch for details).
830 */
831static void
832acpi_driver_added(device_t dev, driver_t *driver)
833{
834    device_t child, *devlist;
835    int i, numdevs;
836
837    DEVICE_IDENTIFY(driver, dev);
838    if (device_get_children(dev, &devlist, &numdevs))
839	    return;
840    for (i = 0; i < numdevs; i++) {
841	child = devlist[i];
842	if (device_get_state(child) == DS_NOTPRESENT) {
843#ifdef ACPI_ENABLE_POWERDOWN_NODRIVER
844	    pci_set_powerstate(child, PCI_POWERSTATE_D0);
845	    if (device_probe_and_attach(child) != 0)
846		pci_set_powerstate(child, PCI_POWERSTATE_D3);
847#else
848	    device_probe_and_attach(child);
849#endif
850	}
851    }
852    free(devlist, M_TEMP);
853}
854
855/* Location hint for devctl(8) */
856static int
857acpi_child_location_str_method(device_t cbdev, device_t child, char *buf,
858    size_t buflen)
859{
860    struct acpi_device *dinfo = device_get_ivars(child);
861
862    if (dinfo->ad_handle)
863	snprintf(buf, buflen, "handle=%s", acpi_name(dinfo->ad_handle));
864    else
865	snprintf(buf, buflen, "unknown");
866    return (0);
867}
868
869/* PnP information for devctl(8) */
870static int
871acpi_child_pnpinfo_str_method(device_t cbdev, device_t child, char *buf,
872    size_t buflen)
873{
874    ACPI_BUFFER adbuf = {ACPI_ALLOCATE_BUFFER, NULL};
875    ACPI_DEVICE_INFO *adinfo;
876    struct acpi_device *dinfo = device_get_ivars(child);
877    char *end;
878    int error;
879
880    error = AcpiGetObjectInfo(dinfo->ad_handle, &adbuf);
881    adinfo = (ACPI_DEVICE_INFO *) adbuf.Pointer;
882    if (error)
883	snprintf(buf, buflen, "unknown");
884    else
885	snprintf(buf, buflen, "_HID=%s _UID=%lu",
886		 (adinfo->Valid & ACPI_VALID_HID) ?
887		 adinfo->HardwareId.Value : "none",
888		 (adinfo->Valid & ACPI_VALID_UID) ?
889		 strtoul(adinfo->UniqueId.Value, &end, 10) : 0);
890    if (adinfo)
891	AcpiOsFree(adinfo);
892
893    return (0);
894}
895
896/*
897 * Handle per-device ivars
898 */
899static int
900acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
901{
902    struct acpi_device	*ad;
903
904    if ((ad = device_get_ivars(child)) == NULL) {
905	device_printf(child, "device has no ivars\n");
906	return (ENOENT);
907    }
908
909    /* ACPI and ISA compatibility ivars */
910    switch(index) {
911    case ACPI_IVAR_HANDLE:
912	*(ACPI_HANDLE *)result = ad->ad_handle;
913	break;
914    case ACPI_IVAR_MAGIC:
915	*(uintptr_t *)result = ad->ad_magic;
916	break;
917    case ACPI_IVAR_PRIVATE:
918	*(void **)result = ad->ad_private;
919	break;
920    case ACPI_IVAR_FLAGS:
921	*(int *)result = ad->ad_flags;
922	break;
923    case ISA_IVAR_VENDORID:
924    case ISA_IVAR_SERIAL:
925    case ISA_IVAR_COMPATID:
926	*(int *)result = -1;
927	break;
928    case ISA_IVAR_LOGICALID:
929	*(int *)result = acpi_isa_get_logicalid(child);
930	break;
931    default:
932	return (ENOENT);
933    }
934
935    return (0);
936}
937
938static int
939acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
940{
941    struct acpi_device	*ad;
942
943    if ((ad = device_get_ivars(child)) == NULL) {
944	device_printf(child, "device has no ivars\n");
945	return (ENOENT);
946    }
947
948    switch(index) {
949    case ACPI_IVAR_HANDLE:
950	ad->ad_handle = (ACPI_HANDLE)value;
951	break;
952    case ACPI_IVAR_MAGIC:
953	ad->ad_magic = (uintptr_t)value;
954	break;
955    case ACPI_IVAR_PRIVATE:
956	ad->ad_private = (void *)value;
957	break;
958    case ACPI_IVAR_FLAGS:
959	ad->ad_flags = (int)value;
960	break;
961    default:
962	panic("bad ivar write request (%d)", index);
963	return (ENOENT);
964    }
965
966    return (0);
967}
968
969/*
970 * Handle child resource allocation/removal
971 */
972static struct resource_list *
973acpi_get_rlist(device_t dev, device_t child)
974{
975    struct acpi_device		*ad;
976
977    ad = device_get_ivars(child);
978    return (&ad->ad_rl);
979}
980
981static int
982acpi_match_resource_hint(device_t dev, int type, long value)
983{
984    struct acpi_device *ad = device_get_ivars(dev);
985    struct resource_list *rl = &ad->ad_rl;
986    struct resource_list_entry *rle;
987
988    STAILQ_FOREACH(rle, rl, link) {
989	if (rle->type != type)
990	    continue;
991	if (rle->start <= value && rle->end >= value)
992	    return (1);
993    }
994    return (0);
995}
996
997/*
998 * Wire device unit numbers based on resource matches in hints.
999 */
1000static void
1001acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
1002    int *unitp)
1003{
1004    const char *s;
1005    long value;
1006    int line, matches, unit;
1007
1008    /*
1009     * Iterate over all the hints for the devices with the specified
1010     * name to see if one's resources are a subset of this device.
1011     */
1012    line = 0;
1013    for (;;) {
1014	if (resource_find_dev(&line, name, &unit, "at", NULL) != 0)
1015	    break;
1016
1017	/* Must have an "at" for acpi or isa. */
1018	resource_string_value(name, unit, "at", &s);
1019	if (!(strcmp(s, "acpi0") == 0 || strcmp(s, "acpi") == 0 ||
1020	    strcmp(s, "isa0") == 0 || strcmp(s, "isa") == 0))
1021	    continue;
1022
1023	/*
1024	 * Check for matching resources.  We must have at least one,
1025	 * and all resources specified have to match.
1026	 *
1027	 * XXX: We may want to revisit this to be more lenient and wire
1028	 * as long as it gets one match.
1029	 */
1030	matches = 0;
1031	if (resource_long_value(name, unit, "port", &value) == 0) {
1032	    if (acpi_match_resource_hint(child, SYS_RES_IOPORT, value))
1033		matches++;
1034	    else
1035		continue;
1036	}
1037	if (resource_long_value(name, unit, "maddr", &value) == 0) {
1038	    if (acpi_match_resource_hint(child, SYS_RES_MEMORY, value))
1039		matches++;
1040	    else
1041		continue;
1042	}
1043	if (resource_long_value(name, unit, "irq", &value) == 0) {
1044	    if (acpi_match_resource_hint(child, SYS_RES_IRQ, value))
1045		matches++;
1046	    else
1047		continue;
1048	}
1049	if (resource_long_value(name, unit, "drq", &value) == 0) {
1050	    if (acpi_match_resource_hint(child, SYS_RES_DRQ, value))
1051		matches++;
1052	    else
1053		continue;
1054	}
1055
1056	if (matches > 0) {
1057	    /* We have a winner! */
1058	    *unitp = unit;
1059	    break;
1060	}
1061    }
1062}
1063
1064/*
1065 * Pre-allocate/manage all memory and IO resources.  Since rman can't handle
1066 * duplicates, we merge any in the sysresource attach routine.
1067 */
1068static int
1069acpi_sysres_alloc(device_t dev)
1070{
1071    struct resource *res;
1072    struct resource_list *rl;
1073    struct resource_list_entry *rle;
1074    struct rman *rm;
1075    char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
1076    device_t *children;
1077    int child_count, i;
1078
1079    /*
1080     * Probe/attach any sysresource devices.  This would be unnecessary if we
1081     * had multi-pass probe/attach.
1082     */
1083    if (device_get_children(dev, &children, &child_count) != 0)
1084	return (ENXIO);
1085    for (i = 0; i < child_count; i++) {
1086	if (ACPI_ID_PROBE(dev, children[i], sysres_ids) != NULL)
1087	    device_probe_and_attach(children[i]);
1088    }
1089    free(children, M_TEMP);
1090
1091    rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
1092    STAILQ_FOREACH(rle, rl, link) {
1093	if (rle->res != NULL) {
1094	    device_printf(dev, "duplicate resource for %lx\n", rle->start);
1095	    continue;
1096	}
1097
1098	/* Only memory and IO resources are valid here. */
1099	switch (rle->type) {
1100	case SYS_RES_IOPORT:
1101	    rm = &acpi_rman_io;
1102	    break;
1103	case SYS_RES_MEMORY:
1104	    rm = &acpi_rman_mem;
1105	    break;
1106	default:
1107	    continue;
1108	}
1109
1110	/* Pre-allocate resource and add to our rman pool. */
1111	res = BUS_ALLOC_RESOURCE(device_get_parent(dev), dev, rle->type,
1112	    &rle->rid, rle->start, rle->start + rle->count - 1, rle->count, 0);
1113	if (res != NULL) {
1114	    rman_manage_region(rm, rman_get_start(res), rman_get_end(res));
1115	    rle->res = res;
1116	} else
1117	    device_printf(dev, "reservation of %lx, %lx (%d) failed\n",
1118		rle->start, rle->count, rle->type);
1119    }
1120    return (0);
1121}
1122
1123static struct resource *
1124acpi_alloc_resource(device_t bus, device_t child, int type, int *rid,
1125    u_long start, u_long end, u_long count, u_int flags)
1126{
1127    ACPI_RESOURCE ares;
1128    struct acpi_device *ad = device_get_ivars(child);
1129    struct resource_list *rl = &ad->ad_rl;
1130    struct resource_list_entry *rle;
1131    struct resource *res;
1132    struct rman *rm;
1133
1134    res = NULL;
1135
1136    /* We only handle memory and IO resources through rman. */
1137    switch (type) {
1138    case SYS_RES_IOPORT:
1139	rm = &acpi_rman_io;
1140	break;
1141    case SYS_RES_MEMORY:
1142	rm = &acpi_rman_mem;
1143	break;
1144    default:
1145	rm = NULL;
1146    }
1147
1148    ACPI_SERIAL_BEGIN(acpi);
1149
1150    /*
1151     * If this is an allocation of the "default" range for a given RID, and
1152     * we know what the resources for this device are (i.e., they're on the
1153     * child's resource list), use those start/end values.
1154     */
1155    if (bus == device_get_parent(child) && start == 0UL && end == ~0UL) {
1156	rle = resource_list_find(rl, type, *rid);
1157	if (rle == NULL)
1158	    goto out;
1159	start = rle->start;
1160	end = rle->end;
1161	count = rle->count;
1162    }
1163
1164    /*
1165     * If this is an allocation of a specific range, see if we can satisfy
1166     * the request from our system resource regions.  If we can't, pass the
1167     * request up to the parent.
1168     */
1169    if (start + count - 1 == end && rm != NULL)
1170	res = rman_reserve_resource(rm, start, end, count, flags & ~RF_ACTIVE,
1171	    child);
1172    if (res == NULL) {
1173	res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid,
1174	    start, end, count, flags);
1175    } else {
1176	rman_set_rid(res, *rid);
1177
1178	/* If requested, activate the resource using the parent's method. */
1179	if (flags & RF_ACTIVE)
1180	    if (bus_activate_resource(child, type, *rid, res) != 0) {
1181		rman_release_resource(res);
1182		res = NULL;
1183		goto out;
1184	    }
1185    }
1186
1187    if (res != NULL && device_get_parent(child) == bus)
1188	switch (type) {
1189	case SYS_RES_IRQ:
1190	    /*
1191	     * Since bus_config_intr() takes immediate effect, we cannot
1192	     * configure the interrupt associated with a device when we
1193	     * parse the resources but have to defer it until a driver
1194	     * actually allocates the interrupt via bus_alloc_resource().
1195	     *
1196	     * XXX: Should we handle the lookup failing?
1197	     */
1198	    if (ACPI_SUCCESS(acpi_lookup_irq_resource(child, *rid, res, &ares)))
1199		acpi_config_intr(child, &ares);
1200	    break;
1201	}
1202
1203out:
1204    ACPI_SERIAL_END(acpi);
1205    return (res);
1206}
1207
1208static int
1209acpi_release_resource(device_t bus, device_t child, int type, int rid,
1210    struct resource *r)
1211{
1212    struct rman *rm;
1213    int ret;
1214
1215    /* We only handle memory and IO resources through rman. */
1216    switch (type) {
1217    case SYS_RES_IOPORT:
1218	rm = &acpi_rman_io;
1219	break;
1220    case SYS_RES_MEMORY:
1221	rm = &acpi_rman_mem;
1222	break;
1223    default:
1224	rm = NULL;
1225    }
1226
1227    ACPI_SERIAL_BEGIN(acpi);
1228
1229    /*
1230     * If this resource belongs to one of our internal managers,
1231     * deactivate it and release it to the local pool.  If it doesn't,
1232     * pass this request up to the parent.
1233     */
1234    if (rm != NULL && rman_is_region_manager(r, rm)) {
1235	if (rman_get_flags(r) & RF_ACTIVE) {
1236	    ret = bus_deactivate_resource(child, type, rid, r);
1237	    if (ret != 0)
1238		goto out;
1239	}
1240	ret = rman_release_resource(r);
1241    } else
1242	ret = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, r);
1243
1244out:
1245    ACPI_SERIAL_END(acpi);
1246    return (ret);
1247}
1248
1249static void
1250acpi_delete_resource(device_t bus, device_t child, int type, int rid)
1251{
1252    struct resource_list *rl;
1253
1254    rl = acpi_get_rlist(bus, child);
1255    resource_list_delete(rl, type, rid);
1256}
1257
1258/* Allocate an IO port or memory resource, given its GAS. */
1259int
1260acpi_bus_alloc_gas(device_t dev, int *type, int *rid, ACPI_GENERIC_ADDRESS *gas,
1261    struct resource **res, u_int flags)
1262{
1263    int error, res_type;
1264
1265    error = ENOMEM;
1266    if (type == NULL || rid == NULL || gas == NULL || res == NULL)
1267	return (EINVAL);
1268
1269    /* We only support memory and IO spaces. */
1270    switch (gas->SpaceId) {
1271    case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1272	res_type = SYS_RES_MEMORY;
1273	break;
1274    case ACPI_ADR_SPACE_SYSTEM_IO:
1275	res_type = SYS_RES_IOPORT;
1276	break;
1277    default:
1278	return (EOPNOTSUPP);
1279    }
1280
1281    /*
1282     * If the register width is less than 8, assume the BIOS author means
1283     * it is a bit field and just allocate a byte.
1284     */
1285    if (gas->BitWidth && gas->BitWidth < 8)
1286	gas->BitWidth = 8;
1287
1288    /* Validate the address after we're sure we support the space. */
1289    if (gas->Address == 0 || gas->BitWidth == 0)
1290	return (EINVAL);
1291
1292    bus_set_resource(dev, res_type, *rid, gas->Address,
1293	gas->BitWidth / 8);
1294    *res = bus_alloc_resource_any(dev, res_type, rid, RF_ACTIVE | flags);
1295    if (*res != NULL) {
1296	*type = res_type;
1297	error = 0;
1298    } else
1299	bus_delete_resource(dev, res_type, *rid);
1300
1301    return (error);
1302}
1303
1304/* Probe _HID and _CID for compatible ISA PNP ids. */
1305static uint32_t
1306acpi_isa_get_logicalid(device_t dev)
1307{
1308    ACPI_DEVICE_INFO	*devinfo;
1309    ACPI_BUFFER		buf;
1310    ACPI_HANDLE		h;
1311    ACPI_STATUS		error;
1312    u_int32_t		pnpid;
1313
1314    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1315
1316    pnpid = 0;
1317    buf.Pointer = NULL;
1318    buf.Length = ACPI_ALLOCATE_BUFFER;
1319
1320    /* Fetch and validate the HID. */
1321    if ((h = acpi_get_handle(dev)) == NULL)
1322	goto out;
1323    error = AcpiGetObjectInfo(h, &buf);
1324    if (ACPI_FAILURE(error))
1325	goto out;
1326    devinfo = (ACPI_DEVICE_INFO *)buf.Pointer;
1327
1328    if ((devinfo->Valid & ACPI_VALID_HID) != 0)
1329	pnpid = PNP_EISAID(devinfo->HardwareId.Value);
1330
1331out:
1332    if (buf.Pointer != NULL)
1333	AcpiOsFree(buf.Pointer);
1334    return_VALUE (pnpid);
1335}
1336
1337static int
1338acpi_isa_get_compatid(device_t dev, uint32_t *cids, int count)
1339{
1340    ACPI_DEVICE_INFO	*devinfo;
1341    ACPI_BUFFER		buf;
1342    ACPI_HANDLE		h;
1343    ACPI_STATUS		error;
1344    uint32_t		*pnpid;
1345    int			valid, i;
1346
1347    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1348
1349    pnpid = cids;
1350    valid = 0;
1351    buf.Pointer = NULL;
1352    buf.Length = ACPI_ALLOCATE_BUFFER;
1353
1354    /* Fetch and validate the CID */
1355    if ((h = acpi_get_handle(dev)) == NULL)
1356	goto out;
1357    error = AcpiGetObjectInfo(h, &buf);
1358    if (ACPI_FAILURE(error))
1359	goto out;
1360    devinfo = (ACPI_DEVICE_INFO *)buf.Pointer;
1361    if ((devinfo->Valid & ACPI_VALID_CID) == 0)
1362	goto out;
1363
1364    if (devinfo->CompatibilityId.Count < count)
1365	count = devinfo->CompatibilityId.Count;
1366    for (i = 0; i < count; i++) {
1367	if (strncmp(devinfo->CompatibilityId.Id[i].Value, "PNP", 3) != 0)
1368	    continue;
1369	*pnpid++ = PNP_EISAID(devinfo->CompatibilityId.Id[i].Value);
1370	valid++;
1371    }
1372
1373out:
1374    if (buf.Pointer != NULL)
1375	AcpiOsFree(buf.Pointer);
1376    return_VALUE (valid);
1377}
1378
1379static char *
1380acpi_device_id_probe(device_t bus, device_t dev, char **ids)
1381{
1382    ACPI_HANDLE h;
1383    int i;
1384
1385    h = acpi_get_handle(dev);
1386    if (ids == NULL || h == NULL || acpi_get_type(dev) != ACPI_TYPE_DEVICE)
1387	return (NULL);
1388
1389    /* Try to match one of the array of IDs with a HID or CID. */
1390    for (i = 0; ids[i] != NULL; i++) {
1391	if (acpi_MatchHid(h, ids[i]))
1392	    return (ids[i]);
1393    }
1394    return (NULL);
1395}
1396
1397static ACPI_STATUS
1398acpi_device_eval_obj(device_t bus, device_t dev, ACPI_STRING pathname,
1399    ACPI_OBJECT_LIST *parameters, ACPI_BUFFER *ret)
1400{
1401    ACPI_HANDLE h;
1402
1403    if (dev == NULL)
1404	h = ACPI_ROOT_OBJECT;
1405    else if ((h = acpi_get_handle(dev)) == NULL)
1406	return (AE_BAD_PARAMETER);
1407    return (AcpiEvaluateObject(h, pathname, parameters, ret));
1408}
1409
1410static int
1411acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
1412{
1413    struct acpi_softc *sc;
1414    ACPI_HANDLE handle;
1415    ACPI_STATUS status;
1416    char sxd[8];
1417    int error;
1418
1419    sc = device_get_softc(bus);
1420    handle = acpi_get_handle(dev);
1421
1422    /*
1423     * XXX If we find these devices, don't try to power them down.
1424     * The serial and IRDA ports on my T23 hang the system when
1425     * set to D3 and it appears that such legacy devices may
1426     * need special handling in their drivers.
1427     */
1428    if (handle == NULL ||
1429	acpi_MatchHid(handle, "PNP0500") ||
1430	acpi_MatchHid(handle, "PNP0501") ||
1431	acpi_MatchHid(handle, "PNP0502") ||
1432	acpi_MatchHid(handle, "PNP0510") ||
1433	acpi_MatchHid(handle, "PNP0511"))
1434	return (ENXIO);
1435
1436    /*
1437     * Override next state with the value from _SxD, if present.  If no
1438     * dstate argument was provided, don't fetch the return value.
1439     */
1440    snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
1441    if (dstate)
1442	status = acpi_GetInteger(handle, sxd, dstate);
1443    else
1444	status = AcpiEvaluateObject(handle, sxd, NULL, NULL);
1445
1446    switch (status) {
1447    case AE_OK:
1448	error = 0;
1449	break;
1450    case AE_NOT_FOUND:
1451	error = ESRCH;
1452	break;
1453    default:
1454	error = ENXIO;
1455	break;
1456    }
1457
1458    return (error);
1459}
1460
1461/* Callback arg for our implementation of walking the namespace. */
1462struct acpi_device_scan_ctx {
1463    acpi_scan_cb_t	user_fn;
1464    void		*arg;
1465    ACPI_HANDLE		parent;
1466};
1467
1468static ACPI_STATUS
1469acpi_device_scan_cb(ACPI_HANDLE h, UINT32 level, void *arg, void **retval)
1470{
1471    struct acpi_device_scan_ctx *ctx;
1472    device_t dev, old_dev;
1473    ACPI_STATUS status;
1474    ACPI_OBJECT_TYPE type;
1475
1476    /*
1477     * Skip this device if we think we'll have trouble with it or it is
1478     * the parent where the scan began.
1479     */
1480    ctx = (struct acpi_device_scan_ctx *)arg;
1481    if (acpi_avoid(h) || h == ctx->parent)
1482	return (AE_OK);
1483
1484    /* If this is not a valid device type (e.g., a method), skip it. */
1485    if (ACPI_FAILURE(AcpiGetType(h, &type)))
1486	return (AE_OK);
1487    if (type != ACPI_TYPE_DEVICE && type != ACPI_TYPE_PROCESSOR &&
1488	type != ACPI_TYPE_THERMAL && type != ACPI_TYPE_POWER)
1489	return (AE_OK);
1490
1491    /*
1492     * Call the user function with the current device.  If it is unchanged
1493     * afterwards, return.  Otherwise, we update the handle to the new dev.
1494     */
1495    old_dev = acpi_get_device(h);
1496    dev = old_dev;
1497    status = ctx->user_fn(h, &dev, level, ctx->arg);
1498    if (ACPI_FAILURE(status) || old_dev == dev)
1499	return (status);
1500
1501    /* Remove the old child and its connection to the handle. */
1502    if (old_dev != NULL) {
1503	device_delete_child(device_get_parent(old_dev), old_dev);
1504	AcpiDetachData(h, acpi_fake_objhandler);
1505    }
1506
1507    /* Recreate the handle association if the user created a device. */
1508    if (dev != NULL)
1509	AcpiAttachData(h, acpi_fake_objhandler, dev);
1510
1511    return (AE_OK);
1512}
1513
1514static ACPI_STATUS
1515acpi_device_scan_children(device_t bus, device_t dev, int max_depth,
1516    acpi_scan_cb_t user_fn, void *arg)
1517{
1518    ACPI_HANDLE h;
1519    struct acpi_device_scan_ctx ctx;
1520
1521    if (acpi_disabled("children"))
1522	return (AE_OK);
1523
1524    if (dev == NULL)
1525	h = ACPI_ROOT_OBJECT;
1526    else if ((h = acpi_get_handle(dev)) == NULL)
1527	return (AE_BAD_PARAMETER);
1528    ctx.user_fn = user_fn;
1529    ctx.arg = arg;
1530    ctx.parent = h;
1531    return (AcpiWalkNamespace(ACPI_TYPE_ANY, h, max_depth,
1532	acpi_device_scan_cb, &ctx, NULL));
1533}
1534
1535/*
1536 * Even though ACPI devices are not PCI, we use the PCI approach for setting
1537 * device power states since it's close enough to ACPI.
1538 */
1539static int
1540acpi_set_powerstate_method(device_t bus, device_t child, int state)
1541{
1542    ACPI_HANDLE h;
1543    ACPI_STATUS status;
1544    int error;
1545
1546    error = 0;
1547    h = acpi_get_handle(child);
1548    if (state < ACPI_STATE_D0 || state > ACPI_D_STATES_MAX)
1549	return (EINVAL);
1550    if (h == NULL)
1551	return (0);
1552
1553    /* Ignore errors if the power methods aren't present. */
1554    status = acpi_pwr_switch_consumer(h, state);
1555    if (ACPI_FAILURE(status) && status != AE_NOT_FOUND
1556	&& status != AE_BAD_PARAMETER)
1557	device_printf(bus, "failed to set ACPI power state D%d on %s: %s\n",
1558	    state, acpi_name(h), AcpiFormatException(status));
1559
1560    return (error);
1561}
1562
1563static int
1564acpi_isa_pnp_probe(device_t bus, device_t child, struct isa_pnp_id *ids)
1565{
1566    int			result, cid_count, i;
1567    uint32_t		lid, cids[8];
1568
1569    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1570
1571    /*
1572     * ISA-style drivers attached to ACPI may persist and
1573     * probe manually if we return ENOENT.  We never want
1574     * that to happen, so don't ever return it.
1575     */
1576    result = ENXIO;
1577
1578    /* Scan the supplied IDs for a match */
1579    lid = acpi_isa_get_logicalid(child);
1580    cid_count = acpi_isa_get_compatid(child, cids, 8);
1581    while (ids && ids->ip_id) {
1582	if (lid == ids->ip_id) {
1583	    result = 0;
1584	    goto out;
1585	}
1586	for (i = 0; i < cid_count; i++) {
1587	    if (cids[i] == ids->ip_id) {
1588		result = 0;
1589		goto out;
1590	    }
1591	}
1592	ids++;
1593    }
1594
1595 out:
1596    if (result == 0 && ids->ip_desc)
1597	device_set_desc(child, ids->ip_desc);
1598
1599    return_VALUE (result);
1600}
1601
1602#if defined(__i386__) || defined(__amd64__)
1603/*
1604 * Look for a MCFG table.  If it is present, use the settings for
1605 * domain (segment) 0 to setup PCI config space access via the memory
1606 * map.
1607 */
1608static void
1609acpi_enable_pcie(void)
1610{
1611	ACPI_TABLE_HEADER *hdr;
1612	ACPI_MCFG_ALLOCATION *alloc, *end;
1613	ACPI_STATUS status;
1614
1615	status = AcpiGetTable(ACPI_SIG_MCFG, 1, &hdr);
1616	if (ACPI_FAILURE(status))
1617		return;
1618
1619	end = (ACPI_MCFG_ALLOCATION *)((char *)hdr + hdr->Length);
1620	alloc = (ACPI_MCFG_ALLOCATION *)((ACPI_TABLE_MCFG *)hdr + 1);
1621	while (alloc < end) {
1622		if (alloc->PciSegment == 0) {
1623			pcie_cfgregopen(alloc->Address, alloc->StartBusNumber,
1624			    alloc->EndBusNumber);
1625			return;
1626		}
1627		alloc++;
1628	}
1629}
1630#endif
1631
1632/*
1633 * Scan all of the ACPI namespace and attach child devices.
1634 *
1635 * We should only expect to find devices in the \_PR, \_TZ, \_SI, and
1636 * \_SB scopes, and \_PR and \_TZ became obsolete in the ACPI 2.0 spec.
1637 * However, in violation of the spec, some systems place their PCI link
1638 * devices in \, so we have to walk the whole namespace.  We check the
1639 * type of namespace nodes, so this should be ok.
1640 */
1641static void
1642acpi_probe_children(device_t bus)
1643{
1644
1645    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1646
1647    /*
1648     * Scan the namespace and insert placeholders for all the devices that
1649     * we find.  We also probe/attach any early devices.
1650     *
1651     * Note that we use AcpiWalkNamespace rather than AcpiGetDevices because
1652     * we want to create nodes for all devices, not just those that are
1653     * currently present. (This assumes that we don't want to create/remove
1654     * devices as they appear, which might be smarter.)
1655     */
1656    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "namespace scan\n"));
1657    AcpiWalkNamespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT, 100, acpi_probe_child,
1658	bus, NULL);
1659
1660    /* Pre-allocate resources for our rman from any sysresource devices. */
1661    acpi_sysres_alloc(bus);
1662
1663    /* Create any static children by calling device identify methods. */
1664    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
1665    bus_generic_probe(bus);
1666
1667    /* Probe/attach all children, created staticly and from the namespace. */
1668    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "first bus_generic_attach\n"));
1669    bus_generic_attach(bus);
1670
1671    /*
1672     * Some of these children may have attached others as part of their attach
1673     * process (eg. the root PCI bus driver), so rescan.
1674     */
1675    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "second bus_generic_attach\n"));
1676    bus_generic_attach(bus);
1677
1678    /* Attach wake sysctls. */
1679    acpi_wake_sysctl_walk(bus);
1680
1681    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "done attaching children\n"));
1682    return_VOID;
1683}
1684
1685/*
1686 * Determine the probe order for a given device.
1687 */
1688static void
1689acpi_probe_order(ACPI_HANDLE handle, int *order)
1690{
1691    ACPI_OBJECT_TYPE type;
1692
1693    /*
1694     * 1. I/O port and memory system resource holders
1695     * 2. Embedded controllers (to handle early accesses)
1696     * 3. PCI Link Devices
1697     * 100000. CPUs
1698     */
1699    AcpiGetType(handle, &type);
1700    if (acpi_MatchHid(handle, "PNP0C01") || acpi_MatchHid(handle, "PNP0C02"))
1701	*order = 1;
1702    else if (acpi_MatchHid(handle, "PNP0C09"))
1703	*order = 2;
1704    else if (acpi_MatchHid(handle, "PNP0C0F"))
1705	*order = 3;
1706    else if (type == ACPI_TYPE_PROCESSOR)
1707	*order = 100000;
1708}
1709
1710/*
1711 * Evaluate a child device and determine whether we might attach a device to
1712 * it.
1713 */
1714static ACPI_STATUS
1715acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
1716{
1717    ACPI_OBJECT_TYPE type;
1718    ACPI_HANDLE h;
1719    device_t bus, child;
1720    int order;
1721    char *handle_str, **search;
1722    static char *scopes[] = {"\\_PR_", "\\_TZ_", "\\_SI_", "\\_SB_", NULL};
1723
1724    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
1725
1726    /* Skip this device if we think we'll have trouble with it. */
1727    if (acpi_avoid(handle))
1728	return_ACPI_STATUS (AE_OK);
1729
1730    bus = (device_t)context;
1731    if (ACPI_SUCCESS(AcpiGetType(handle, &type))) {
1732	switch (type) {
1733	case ACPI_TYPE_DEVICE:
1734	case ACPI_TYPE_PROCESSOR:
1735	case ACPI_TYPE_THERMAL:
1736	case ACPI_TYPE_POWER:
1737	    if (acpi_disabled("children"))
1738		break;
1739
1740	    /*
1741	     * Since we scan from \, be sure to skip system scope objects.
1742	     * At least \_SB and \_TZ are detected as devices (ACPI-CA bug?)
1743	     */
1744	    handle_str = acpi_name(handle);
1745	    for (search = scopes; *search != NULL; search++) {
1746		if (strcmp(handle_str, *search) == 0)
1747		    break;
1748	    }
1749	    if (*search != NULL)
1750		break;
1751
1752	    /*
1753	     * Create a placeholder device for this node.  Sort the
1754	     * placeholder so that the probe/attach passes will run
1755	     * breadth-first.  Orders less than ACPI_DEV_BASE_ORDER
1756	     * are reserved for special objects (i.e., system
1757	     * resources).  CPU devices have a very high order to
1758	     * ensure they are probed after other devices.
1759	     */
1760	    ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
1761	    order = level * 10 + 100;
1762	    acpi_probe_order(handle, &order);
1763	    child = BUS_ADD_CHILD(bus, order, NULL, -1);
1764	    if (child == NULL)
1765		break;
1766
1767	    /* Associate the handle with the device_t and vice versa. */
1768	    acpi_set_handle(child, handle);
1769	    AcpiAttachData(handle, acpi_fake_objhandler, child);
1770
1771	    /*
1772	     * Check that the device is present.  If it's not present,
1773	     * leave it disabled (so that we have a device_t attached to
1774	     * the handle, but we don't probe it).
1775	     *
1776	     * XXX PCI link devices sometimes report "present" but not
1777	     * "functional" (i.e. if disabled).  Go ahead and probe them
1778	     * anyway since we may enable them later.
1779	     */
1780	    if (type == ACPI_TYPE_DEVICE && !acpi_DeviceIsPresent(child)) {
1781		/* Never disable PCI link devices. */
1782		if (acpi_MatchHid(handle, "PNP0C0F"))
1783		    break;
1784		/*
1785		 * Docking stations should remain enabled since the system
1786		 * may be undocked at boot.
1787		 */
1788		if (ACPI_SUCCESS(AcpiGetHandle(handle, "_DCK", &h)))
1789		    break;
1790
1791		device_disable(child);
1792		break;
1793	    }
1794
1795	    /*
1796	     * Get the device's resource settings and attach them.
1797	     * Note that if the device has _PRS but no _CRS, we need
1798	     * to decide when it's appropriate to try to configure the
1799	     * device.  Ignore the return value here; it's OK for the
1800	     * device not to have any resources.
1801	     */
1802	    acpi_parse_resources(child, handle, &acpi_res_parse_set, NULL);
1803	    break;
1804	}
1805    }
1806
1807    return_ACPI_STATUS (AE_OK);
1808}
1809
1810/*
1811 * AcpiAttachData() requires an object handler but never uses it.  This is a
1812 * placeholder object handler so we can store a device_t in an ACPI_HANDLE.
1813 */
1814void
1815acpi_fake_objhandler(ACPI_HANDLE h, UINT32 fn, void *data)
1816{
1817}
1818
1819static void
1820acpi_shutdown_final(void *arg, int howto)
1821{
1822    struct acpi_softc *sc = (struct acpi_softc *)arg;
1823    ACPI_STATUS status;
1824
1825    /*
1826     * XXX Shutdown code should only run on the BSP (cpuid 0).
1827     * Some chipsets do not power off the system correctly if called from
1828     * an AP.
1829     */
1830    if ((howto & RB_POWEROFF) != 0) {
1831	status = AcpiEnterSleepStatePrep(ACPI_STATE_S5);
1832	if (ACPI_FAILURE(status)) {
1833	    device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
1834		AcpiFormatException(status));
1835	    return;
1836	}
1837	device_printf(sc->acpi_dev, "Powering system off\n");
1838	ACPI_DISABLE_IRQS();
1839	status = AcpiEnterSleepState(ACPI_STATE_S5);
1840	if (ACPI_FAILURE(status))
1841	    device_printf(sc->acpi_dev, "power-off failed - %s\n",
1842		AcpiFormatException(status));
1843	else {
1844	    DELAY(1000000);
1845	    device_printf(sc->acpi_dev, "power-off failed - timeout\n");
1846	}
1847    } else if ((howto & RB_HALT) == 0 &&
1848	(AcpiGbl_FADT.Flags & ACPI_FADT_RESET_REGISTER) &&
1849	sc->acpi_handle_reboot) {
1850	/* Reboot using the reset register. */
1851	status = AcpiHwLowLevelWrite(
1852	    AcpiGbl_FADT.ResetRegister.BitWidth,
1853	    AcpiGbl_FADT.ResetValue, &AcpiGbl_FADT.ResetRegister);
1854	if (ACPI_FAILURE(status))
1855	    device_printf(sc->acpi_dev, "reset failed - %s\n",
1856		AcpiFormatException(status));
1857	else {
1858	    DELAY(1000000);
1859	    device_printf(sc->acpi_dev, "reset failed - timeout\n");
1860	}
1861    } else if (sc->acpi_do_disable && panicstr == NULL) {
1862	/*
1863	 * Only disable ACPI if the user requested.  On some systems, writing
1864	 * the disable value to SMI_CMD hangs the system.
1865	 */
1866	device_printf(sc->acpi_dev, "Shutting down\n");
1867	AcpiTerminate();
1868    }
1869}
1870
1871static void
1872acpi_enable_fixed_events(struct acpi_softc *sc)
1873{
1874    static int	first_time = 1;
1875
1876    /* Enable and clear fixed events and install handlers. */
1877    if ((AcpiGbl_FADT.Flags & ACPI_FADT_POWER_BUTTON) == 0) {
1878	AcpiClearEvent(ACPI_EVENT_POWER_BUTTON);
1879	AcpiInstallFixedEventHandler(ACPI_EVENT_POWER_BUTTON,
1880				     acpi_event_power_button_sleep, sc);
1881	if (first_time)
1882	    device_printf(sc->acpi_dev, "Power Button (fixed)\n");
1883    }
1884    if ((AcpiGbl_FADT.Flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1885	AcpiClearEvent(ACPI_EVENT_SLEEP_BUTTON);
1886	AcpiInstallFixedEventHandler(ACPI_EVENT_SLEEP_BUTTON,
1887				     acpi_event_sleep_button_sleep, sc);
1888	if (first_time)
1889	    device_printf(sc->acpi_dev, "Sleep Button (fixed)\n");
1890    }
1891
1892    first_time = 0;
1893}
1894
1895/*
1896 * Returns true if the device is actually present and should
1897 * be attached to.  This requires the present, enabled, UI-visible
1898 * and diagnostics-passed bits to be set.
1899 */
1900BOOLEAN
1901acpi_DeviceIsPresent(device_t dev)
1902{
1903    ACPI_DEVICE_INFO	*devinfo;
1904    ACPI_HANDLE		h;
1905    ACPI_BUFFER		buf;
1906    ACPI_STATUS		error;
1907    int			ret;
1908
1909    ret = FALSE;
1910    if ((h = acpi_get_handle(dev)) == NULL)
1911	return (FALSE);
1912    buf.Pointer = NULL;
1913    buf.Length = ACPI_ALLOCATE_BUFFER;
1914    error = AcpiGetObjectInfo(h, &buf);
1915    if (ACPI_FAILURE(error))
1916	return (FALSE);
1917    devinfo = (ACPI_DEVICE_INFO *)buf.Pointer;
1918
1919    /* If no _STA method, must be present */
1920    if ((devinfo->Valid & ACPI_VALID_STA) == 0)
1921	ret = TRUE;
1922
1923    /* Return true for 'present' and 'functioning' */
1924    if (ACPI_DEVICE_PRESENT(devinfo->CurrentStatus))
1925	ret = TRUE;
1926
1927    AcpiOsFree(buf.Pointer);
1928    return (ret);
1929}
1930
1931/*
1932 * Returns true if the battery is actually present and inserted.
1933 */
1934BOOLEAN
1935acpi_BatteryIsPresent(device_t dev)
1936{
1937    ACPI_DEVICE_INFO	*devinfo;
1938    ACPI_HANDLE		h;
1939    ACPI_BUFFER		buf;
1940    ACPI_STATUS		error;
1941    int			ret;
1942
1943    ret = FALSE;
1944    if ((h = acpi_get_handle(dev)) == NULL)
1945	return (FALSE);
1946    buf.Pointer = NULL;
1947    buf.Length = ACPI_ALLOCATE_BUFFER;
1948    error = AcpiGetObjectInfo(h, &buf);
1949    if (ACPI_FAILURE(error))
1950	return (FALSE);
1951    devinfo = (ACPI_DEVICE_INFO *)buf.Pointer;
1952
1953    /* If no _STA method, must be present */
1954    if ((devinfo->Valid & ACPI_VALID_STA) == 0)
1955	ret = TRUE;
1956
1957    /* Return true for 'present', 'battery present', and 'functioning' */
1958    if (ACPI_BATTERY_PRESENT(devinfo->CurrentStatus))
1959	ret = TRUE;
1960
1961    AcpiOsFree(buf.Pointer);
1962    return (ret);
1963}
1964
1965/*
1966 * Match a HID string against a handle
1967 */
1968static BOOLEAN
1969acpi_MatchHid(ACPI_HANDLE h, const char *hid)
1970{
1971    ACPI_DEVICE_INFO	*devinfo;
1972    ACPI_BUFFER		buf;
1973    ACPI_STATUS		error;
1974    int			ret, i;
1975
1976    ret = FALSE;
1977    if (hid == NULL || h == NULL)
1978	return (ret);
1979    buf.Pointer = NULL;
1980    buf.Length = ACPI_ALLOCATE_BUFFER;
1981    error = AcpiGetObjectInfo(h, &buf);
1982    if (ACPI_FAILURE(error))
1983	return (ret);
1984    devinfo = (ACPI_DEVICE_INFO *)buf.Pointer;
1985
1986    if ((devinfo->Valid & ACPI_VALID_HID) != 0 &&
1987	strcmp(hid, devinfo->HardwareId.Value) == 0)
1988	    ret = TRUE;
1989    else if ((devinfo->Valid & ACPI_VALID_CID) != 0) {
1990	for (i = 0; i < devinfo->CompatibilityId.Count; i++) {
1991	    if (strcmp(hid, devinfo->CompatibilityId.Id[i].Value) == 0) {
1992		ret = TRUE;
1993		break;
1994	    }
1995	}
1996    }
1997
1998    AcpiOsFree(buf.Pointer);
1999    return (ret);
2000}
2001
2002/*
2003 * Return the handle of a named object within our scope, ie. that of (parent)
2004 * or one if its parents.
2005 */
2006ACPI_STATUS
2007acpi_GetHandleInScope(ACPI_HANDLE parent, char *path, ACPI_HANDLE *result)
2008{
2009    ACPI_HANDLE		r;
2010    ACPI_STATUS		status;
2011
2012    /* Walk back up the tree to the root */
2013    for (;;) {
2014	status = AcpiGetHandle(parent, path, &r);
2015	if (ACPI_SUCCESS(status)) {
2016	    *result = r;
2017	    return (AE_OK);
2018	}
2019	/* XXX Return error here? */
2020	if (status != AE_NOT_FOUND)
2021	    return (AE_OK);
2022	if (ACPI_FAILURE(AcpiGetParent(parent, &r)))
2023	    return (AE_NOT_FOUND);
2024	parent = r;
2025    }
2026}
2027
2028/* Find the difference between two PM tick counts. */
2029uint32_t
2030acpi_TimerDelta(uint32_t end, uint32_t start)
2031{
2032    uint32_t delta;
2033
2034    if (end >= start)
2035	delta = end - start;
2036    else if (AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER)
2037	delta = ((0xFFFFFFFF - start) + end + 1);
2038    else
2039	delta = ((0x00FFFFFF - start) + end + 1) & 0x00FFFFFF;
2040    return (delta);
2041}
2042
2043/*
2044 * Allocate a buffer with a preset data size.
2045 */
2046ACPI_BUFFER *
2047acpi_AllocBuffer(int size)
2048{
2049    ACPI_BUFFER	*buf;
2050
2051    if ((buf = malloc(size + sizeof(*buf), M_ACPIDEV, M_NOWAIT)) == NULL)
2052	return (NULL);
2053    buf->Length = size;
2054    buf->Pointer = (void *)(buf + 1);
2055    return (buf);
2056}
2057
2058ACPI_STATUS
2059acpi_SetInteger(ACPI_HANDLE handle, char *path, UINT32 number)
2060{
2061    ACPI_OBJECT arg1;
2062    ACPI_OBJECT_LIST args;
2063
2064    arg1.Type = ACPI_TYPE_INTEGER;
2065    arg1.Integer.Value = number;
2066    args.Count = 1;
2067    args.Pointer = &arg1;
2068
2069    return (AcpiEvaluateObject(handle, path, &args, NULL));
2070}
2071
2072/*
2073 * Evaluate a path that should return an integer.
2074 */
2075ACPI_STATUS
2076acpi_GetInteger(ACPI_HANDLE handle, char *path, UINT32 *number)
2077{
2078    ACPI_STATUS	status;
2079    ACPI_BUFFER	buf;
2080    ACPI_OBJECT	param;
2081
2082    if (handle == NULL)
2083	handle = ACPI_ROOT_OBJECT;
2084
2085    /*
2086     * Assume that what we've been pointed at is an Integer object, or
2087     * a method that will return an Integer.
2088     */
2089    buf.Pointer = &param;
2090    buf.Length = sizeof(param);
2091    status = AcpiEvaluateObject(handle, path, NULL, &buf);
2092    if (ACPI_SUCCESS(status)) {
2093	if (param.Type == ACPI_TYPE_INTEGER)
2094	    *number = param.Integer.Value;
2095	else
2096	    status = AE_TYPE;
2097    }
2098
2099    /*
2100     * In some applications, a method that's expected to return an Integer
2101     * may instead return a Buffer (probably to simplify some internal
2102     * arithmetic).  We'll try to fetch whatever it is, and if it's a Buffer,
2103     * convert it into an Integer as best we can.
2104     *
2105     * This is a hack.
2106     */
2107    if (status == AE_BUFFER_OVERFLOW) {
2108	if ((buf.Pointer = AcpiOsAllocate(buf.Length)) == NULL) {
2109	    status = AE_NO_MEMORY;
2110	} else {
2111	    status = AcpiEvaluateObject(handle, path, NULL, &buf);
2112	    if (ACPI_SUCCESS(status))
2113		status = acpi_ConvertBufferToInteger(&buf, number);
2114	    AcpiOsFree(buf.Pointer);
2115	}
2116    }
2117    return (status);
2118}
2119
2120ACPI_STATUS
2121acpi_ConvertBufferToInteger(ACPI_BUFFER *bufp, UINT32 *number)
2122{
2123    ACPI_OBJECT	*p;
2124    UINT8	*val;
2125    int		i;
2126
2127    p = (ACPI_OBJECT *)bufp->Pointer;
2128    if (p->Type == ACPI_TYPE_INTEGER) {
2129	*number = p->Integer.Value;
2130	return (AE_OK);
2131    }
2132    if (p->Type != ACPI_TYPE_BUFFER)
2133	return (AE_TYPE);
2134    if (p->Buffer.Length > sizeof(int))
2135	return (AE_BAD_DATA);
2136
2137    *number = 0;
2138    val = p->Buffer.Pointer;
2139    for (i = 0; i < p->Buffer.Length; i++)
2140	*number += val[i] << (i * 8);
2141    return (AE_OK);
2142}
2143
2144/*
2145 * Iterate over the elements of an a package object, calling the supplied
2146 * function for each element.
2147 *
2148 * XXX possible enhancement might be to abort traversal on error.
2149 */
2150ACPI_STATUS
2151acpi_ForeachPackageObject(ACPI_OBJECT *pkg,
2152	void (*func)(ACPI_OBJECT *comp, void *arg), void *arg)
2153{
2154    ACPI_OBJECT	*comp;
2155    int		i;
2156
2157    if (pkg == NULL || pkg->Type != ACPI_TYPE_PACKAGE)
2158	return (AE_BAD_PARAMETER);
2159
2160    /* Iterate over components */
2161    i = 0;
2162    comp = pkg->Package.Elements;
2163    for (; i < pkg->Package.Count; i++, comp++)
2164	func(comp, arg);
2165
2166    return (AE_OK);
2167}
2168
2169/*
2170 * Find the (index)th resource object in a set.
2171 */
2172ACPI_STATUS
2173acpi_FindIndexedResource(ACPI_BUFFER *buf, int index, ACPI_RESOURCE **resp)
2174{
2175    ACPI_RESOURCE	*rp;
2176    int			i;
2177
2178    rp = (ACPI_RESOURCE *)buf->Pointer;
2179    i = index;
2180    while (i-- > 0) {
2181	/* Range check */
2182	if (rp > (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
2183	    return (AE_BAD_PARAMETER);
2184
2185	/* Check for terminator */
2186	if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
2187	    return (AE_NOT_FOUND);
2188	rp = ACPI_NEXT_RESOURCE(rp);
2189    }
2190    if (resp != NULL)
2191	*resp = rp;
2192
2193    return (AE_OK);
2194}
2195
2196/*
2197 * Append an ACPI_RESOURCE to an ACPI_BUFFER.
2198 *
2199 * Given a pointer to an ACPI_RESOURCE structure, expand the ACPI_BUFFER
2200 * provided to contain it.  If the ACPI_BUFFER is empty, allocate a sensible
2201 * backing block.  If the ACPI_RESOURCE is NULL, return an empty set of
2202 * resources.
2203 */
2204#define ACPI_INITIAL_RESOURCE_BUFFER_SIZE	512
2205
2206ACPI_STATUS
2207acpi_AppendBufferResource(ACPI_BUFFER *buf, ACPI_RESOURCE *res)
2208{
2209    ACPI_RESOURCE	*rp;
2210    void		*newp;
2211
2212    /* Initialise the buffer if necessary. */
2213    if (buf->Pointer == NULL) {
2214	buf->Length = ACPI_INITIAL_RESOURCE_BUFFER_SIZE;
2215	if ((buf->Pointer = AcpiOsAllocate(buf->Length)) == NULL)
2216	    return (AE_NO_MEMORY);
2217	rp = (ACPI_RESOURCE *)buf->Pointer;
2218	rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
2219	rp->Length = 0;
2220    }
2221    if (res == NULL)
2222	return (AE_OK);
2223
2224    /*
2225     * Scan the current buffer looking for the terminator.
2226     * This will either find the terminator or hit the end
2227     * of the buffer and return an error.
2228     */
2229    rp = (ACPI_RESOURCE *)buf->Pointer;
2230    for (;;) {
2231	/* Range check, don't go outside the buffer */
2232	if (rp >= (ACPI_RESOURCE *)((u_int8_t *)buf->Pointer + buf->Length))
2233	    return (AE_BAD_PARAMETER);
2234	if (rp->Type == ACPI_RESOURCE_TYPE_END_TAG || rp->Length == 0)
2235	    break;
2236	rp = ACPI_NEXT_RESOURCE(rp);
2237    }
2238
2239    /*
2240     * Check the size of the buffer and expand if required.
2241     *
2242     * Required size is:
2243     *	size of existing resources before terminator +
2244     *	size of new resource and header +
2245     * 	size of terminator.
2246     *
2247     * Note that this loop should really only run once, unless
2248     * for some reason we are stuffing a *really* huge resource.
2249     */
2250    while ((((u_int8_t *)rp - (u_int8_t *)buf->Pointer) +
2251	    res->Length + ACPI_RS_SIZE_NO_DATA +
2252	    ACPI_RS_SIZE_MIN) >= buf->Length) {
2253	if ((newp = AcpiOsAllocate(buf->Length * 2)) == NULL)
2254	    return (AE_NO_MEMORY);
2255	bcopy(buf->Pointer, newp, buf->Length);
2256	rp = (ACPI_RESOURCE *)((u_int8_t *)newp +
2257			       ((u_int8_t *)rp - (u_int8_t *)buf->Pointer));
2258	AcpiOsFree(buf->Pointer);
2259	buf->Pointer = newp;
2260	buf->Length += buf->Length;
2261    }
2262
2263    /* Insert the new resource. */
2264    bcopy(res, rp, res->Length + ACPI_RS_SIZE_NO_DATA);
2265
2266    /* And add the terminator. */
2267    rp = ACPI_NEXT_RESOURCE(rp);
2268    rp->Type = ACPI_RESOURCE_TYPE_END_TAG;
2269    rp->Length = 0;
2270
2271    return (AE_OK);
2272}
2273
2274/*
2275 * Set interrupt model.
2276 */
2277ACPI_STATUS
2278acpi_SetIntrModel(int model)
2279{
2280
2281    return (acpi_SetInteger(ACPI_ROOT_OBJECT, "_PIC", model));
2282}
2283
2284/*
2285 * DEPRECATED.  This interface has serious deficiencies and will be
2286 * removed.
2287 *
2288 * Immediately enter the sleep state.  In the old model, acpiconf(8) ran
2289 * rc.suspend and rc.resume so we don't have to notify devd(8) to do this.
2290 */
2291ACPI_STATUS
2292acpi_SetSleepState(struct acpi_softc *sc, int state)
2293{
2294    static int once;
2295
2296    if (!once) {
2297	device_printf(sc->acpi_dev,
2298"warning: acpi_SetSleepState() deprecated, need to update your software\n");
2299	once = 1;
2300    }
2301    return (acpi_EnterSleepState(sc, state));
2302}
2303
2304#if defined(__amd64__) || defined(__i386__)
2305static void
2306acpi_sleep_force(void *arg)
2307{
2308    struct acpi_softc *sc = (struct acpi_softc *)arg;
2309
2310    device_printf(sc->acpi_dev,
2311	"suspend request timed out, forcing sleep now\n");
2312    if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
2313	device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
2314	    sc->acpi_next_sstate);
2315}
2316#endif
2317
2318/*
2319 * Request that the system enter the given suspend state.  All /dev/apm
2320 * devices and devd(8) will be notified.  Userland then has a chance to
2321 * save state and acknowledge the request.  The system sleeps once all
2322 * acks are in.
2323 */
2324int
2325acpi_ReqSleepState(struct acpi_softc *sc, int state)
2326{
2327#if defined(__i386__)
2328    struct apm_clone_data *clone;
2329#endif
2330
2331    if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
2332	return (EINVAL);
2333    if (!acpi_sleep_states[state])
2334	return (EOPNOTSUPP);
2335
2336    /* S5 (soft-off) should be entered directly with no waiting. */
2337    if (state == ACPI_STATE_S5) {
2338	if (ACPI_SUCCESS(acpi_EnterSleepState(sc, state)))
2339	    return (0);
2340	else
2341	    return (ENXIO);
2342    }
2343
2344#if defined(__amd64__) || defined(__i386__)
2345    /* If a suspend request is already in progress, just return. */
2346    ACPI_LOCK(acpi);
2347    if (sc->acpi_next_sstate != 0) {
2348    	ACPI_UNLOCK(acpi);
2349	return (0);
2350    }
2351
2352    /* Record the pending state and notify all apm devices. */
2353    sc->acpi_next_sstate = state;
2354#if defined(__i386__)
2355    STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
2356	clone->notify_status = APM_EV_NONE;
2357	if ((clone->flags & ACPI_EVF_DEVD) == 0) {
2358	    selwakeuppri(&clone->sel_read, PZERO);
2359	    KNOTE_UNLOCKED(&clone->sel_read.si_note, 0);
2360	}
2361    }
2362#endif
2363
2364    /* If devd(8) is not running, immediately enter the sleep state. */
2365    if (!devctl_process_running()) {
2366	ACPI_UNLOCK(acpi);
2367	if (ACPI_SUCCESS(acpi_EnterSleepState(sc, sc->acpi_next_sstate))) {
2368	    return (0);
2369	} else {
2370	    return (ENXIO);
2371	}
2372    }
2373
2374    /*
2375     * Set a timeout to fire if userland doesn't ack the suspend request
2376     * in time.  This way we still eventually go to sleep if we were
2377     * overheating or running low on battery, even if userland is hung.
2378     * We cancel this timeout once all userland acks are in or the
2379     * suspend request is aborted.
2380     */
2381    callout_reset(&sc->susp_force_to, 10 * hz, acpi_sleep_force, sc);
2382    ACPI_UNLOCK(acpi);
2383
2384    /* Now notify devd(8) also. */
2385    acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
2386
2387    return (0);
2388#else
2389    /* This platform does not support acpi suspend/resume. */
2390    return (EOPNOTSUPP);
2391#endif
2392}
2393
2394/*
2395 * Acknowledge (or reject) a pending sleep state.  The caller has
2396 * prepared for suspend and is now ready for it to proceed.  If the
2397 * error argument is non-zero, it indicates suspend should be cancelled
2398 * and gives an errno value describing why.  Once all votes are in,
2399 * we suspend the system.
2400 */
2401int
2402acpi_AckSleepState(struct apm_clone_data *clone, int error)
2403{
2404#if defined(__amd64__) || defined(__i386__)
2405    struct acpi_softc *sc;
2406    int ret, sleeping;
2407
2408    /* If no pending sleep state, return an error. */
2409    ACPI_LOCK(acpi);
2410    sc = clone->acpi_sc;
2411    if (sc->acpi_next_sstate == 0) {
2412    	ACPI_UNLOCK(acpi);
2413	return (ENXIO);
2414    }
2415
2416    /* Caller wants to abort suspend process. */
2417    if (error) {
2418	sc->acpi_next_sstate = 0;
2419	callout_stop(&sc->susp_force_to);
2420	device_printf(sc->acpi_dev,
2421	    "listener on %s cancelled the pending suspend\n",
2422	    devtoname(clone->cdev));
2423    	ACPI_UNLOCK(acpi);
2424	return (0);
2425    }
2426
2427    /*
2428     * Mark this device as acking the suspend request.  Then, walk through
2429     * all devices, seeing if they agree yet.  We only count devices that
2430     * are writable since read-only devices couldn't ack the request.
2431     */
2432    sleeping = TRUE;
2433#if defined(__i386__)
2434    clone->notify_status = APM_EV_ACKED;
2435    STAILQ_FOREACH(clone, &sc->apm_cdevs, entries) {
2436	if ((clone->flags & ACPI_EVF_WRITE) != 0 &&
2437	    clone->notify_status != APM_EV_ACKED) {
2438	    sleeping = FALSE;
2439	    break;
2440	}
2441    }
2442#endif
2443
2444    /* If all devices have voted "yes", we will suspend now. */
2445    if (sleeping)
2446	callout_stop(&sc->susp_force_to);
2447    ACPI_UNLOCK(acpi);
2448    ret = 0;
2449    if (sleeping) {
2450	if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
2451		ret = ENODEV;
2452    }
2453    return (ret);
2454#else
2455    /* This platform does not support acpi suspend/resume. */
2456    return (EOPNOTSUPP);
2457#endif
2458}
2459
2460static void
2461acpi_sleep_enable(void *arg)
2462{
2463    struct acpi_softc	*sc = (struct acpi_softc *)arg;
2464
2465    /* Reschedule if the system is not fully up and running. */
2466    if (!AcpiGbl_SystemAwakeAndRunning) {
2467	timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
2468	return;
2469    }
2470
2471    ACPI_LOCK(acpi);
2472    sc->acpi_sleep_disabled = FALSE;
2473    ACPI_UNLOCK(acpi);
2474}
2475
2476static ACPI_STATUS
2477acpi_sleep_disable(struct acpi_softc *sc)
2478{
2479    ACPI_STATUS		status;
2480
2481    /* Fail if the system is not fully up and running. */
2482    if (!AcpiGbl_SystemAwakeAndRunning)
2483	return (AE_ERROR);
2484
2485    ACPI_LOCK(acpi);
2486    status = sc->acpi_sleep_disabled ? AE_ERROR : AE_OK;
2487    sc->acpi_sleep_disabled = TRUE;
2488    ACPI_UNLOCK(acpi);
2489
2490    return (status);
2491}
2492
2493enum acpi_sleep_state {
2494    ACPI_SS_NONE,
2495    ACPI_SS_GPE_SET,
2496    ACPI_SS_DEV_SUSPEND,
2497    ACPI_SS_SLP_PREP,
2498    ACPI_SS_SLEPT,
2499};
2500
2501/*
2502 * Enter the desired system sleep state.
2503 *
2504 * Currently we support S1-S5 but S4 is only S4BIOS
2505 */
2506static ACPI_STATUS
2507acpi_EnterSleepState(struct acpi_softc *sc, int state)
2508{
2509    ACPI_STATUS	status;
2510    enum acpi_sleep_state slp_state;
2511
2512    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
2513
2514    if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
2515	return_ACPI_STATUS (AE_BAD_PARAMETER);
2516    if (!acpi_sleep_states[state]) {
2517	device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
2518	    state);
2519	return (AE_SUPPORT);
2520    }
2521
2522    /* Re-entry once we're suspending is not allowed. */
2523    status = acpi_sleep_disable(sc);
2524    if (ACPI_FAILURE(status)) {
2525	device_printf(sc->acpi_dev,
2526	    "suspend request ignored (not ready yet)\n");
2527	return (status);
2528    }
2529
2530    if (state == ACPI_STATE_S5) {
2531	/*
2532	 * Shut down cleanly and power off.  This will call us back through the
2533	 * shutdown handlers.
2534	 */
2535	shutdown_nice(RB_POWEROFF);
2536	return_ACPI_STATUS (AE_OK);
2537    }
2538
2539#ifdef SMP
2540    thread_lock(curthread);
2541    sched_bind(curthread, 0);
2542    thread_unlock(curthread);
2543#endif
2544
2545    /*
2546     * Be sure to hold Giant across DEVICE_SUSPEND/RESUME since non-MPSAFE
2547     * drivers need this.
2548     */
2549    mtx_lock(&Giant);
2550
2551    slp_state = ACPI_SS_NONE;
2552
2553    sc->acpi_sstate = state;
2554
2555    /* Enable any GPEs as appropriate and requested by the user. */
2556    acpi_wake_prep_walk(state);
2557    slp_state = ACPI_SS_GPE_SET;
2558
2559    /*
2560     * Inform all devices that we are going to sleep.  If at least one
2561     * device fails, DEVICE_SUSPEND() automatically resumes the tree.
2562     *
2563     * XXX Note that a better two-pass approach with a 'veto' pass
2564     * followed by a "real thing" pass would be better, but the current
2565     * bus interface does not provide for this.
2566     */
2567    if (DEVICE_SUSPEND(root_bus) != 0) {
2568	device_printf(sc->acpi_dev, "device_suspend failed\n");
2569	goto backout;
2570    }
2571    slp_state = ACPI_SS_DEV_SUSPEND;
2572
2573    /* If testing device suspend only, back out of everything here. */
2574    if (acpi_susp_bounce)
2575	goto backout;
2576
2577    status = AcpiEnterSleepStatePrep(state);
2578    if (ACPI_FAILURE(status)) {
2579	device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
2580		      AcpiFormatException(status));
2581	goto backout;
2582    }
2583    slp_state = ACPI_SS_SLP_PREP;
2584
2585    if (sc->acpi_sleep_delay > 0)
2586	DELAY(sc->acpi_sleep_delay * 1000000);
2587
2588    if (state != ACPI_STATE_S1) {
2589	acpi_sleep_machdep(sc, state);
2590
2591	/* Re-enable ACPI hardware on wakeup from sleep state 4. */
2592	if (state == ACPI_STATE_S4)
2593	    AcpiEnable();
2594    } else {
2595	ACPI_DISABLE_IRQS();
2596	status = AcpiEnterSleepState(state);
2597	if (ACPI_FAILURE(status)) {
2598	    device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
2599			  AcpiFormatException(status));
2600	    goto backout;
2601	}
2602    }
2603    slp_state = ACPI_SS_SLEPT;
2604
2605    /*
2606     * Back out state according to how far along we got in the suspend
2607     * process.  This handles both the error and success cases.
2608     */
2609backout:
2610    sc->acpi_next_sstate = 0;
2611    if (slp_state >= ACPI_SS_GPE_SET) {
2612	acpi_wake_prep_walk(state);
2613	sc->acpi_sstate = ACPI_STATE_S0;
2614    }
2615    if (slp_state >= ACPI_SS_SLP_PREP)
2616	AcpiLeaveSleepState(state);
2617    if (slp_state >= ACPI_SS_DEV_SUSPEND)
2618	DEVICE_RESUME(root_bus);
2619    if (slp_state >= ACPI_SS_SLEPT)
2620	acpi_enable_fixed_events(sc);
2621
2622    mtx_unlock(&Giant);
2623
2624#ifdef SMP
2625    thread_lock(curthread);
2626    sched_unbind(curthread);
2627    thread_unlock(curthread);
2628#endif
2629
2630    /* Allow another sleep request after a while. */
2631    timeout(acpi_sleep_enable, sc, hz * ACPI_MINIMUM_AWAKETIME);
2632
2633    /* Run /etc/rc.resume after we are back. */
2634    if (devctl_process_running())
2635	acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
2636
2637    return_ACPI_STATUS (status);
2638}
2639
2640void
2641acpi_resync_clock(struct acpi_softc *sc)
2642{
2643
2644    if (!acpi_reset_clock)
2645	return;
2646
2647    /*
2648     * Warm up timecounter again and reset system clock.
2649     */
2650    (void)timecounter->tc_get_timecount(timecounter);
2651    (void)timecounter->tc_get_timecount(timecounter);
2652    inittodr(time_second + sc->acpi_sleep_delay);
2653}
2654
2655/* Initialize a device's wake GPE. */
2656int
2657acpi_wake_init(device_t dev, int type)
2658{
2659    struct acpi_prw_data prw;
2660
2661    /* Evaluate _PRW to find the GPE. */
2662    if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
2663	return (ENXIO);
2664
2665    /* Set the requested type for the GPE (runtime, wake, or both). */
2666    if (ACPI_FAILURE(AcpiSetGpeType(prw.gpe_handle, prw.gpe_bit, type))) {
2667	device_printf(dev, "set GPE type failed\n");
2668	return (ENXIO);
2669    }
2670
2671    return (0);
2672}
2673
2674/* Enable or disable the device's wake GPE. */
2675int
2676acpi_wake_set_enable(device_t dev, int enable)
2677{
2678    struct acpi_prw_data prw;
2679    ACPI_STATUS status;
2680    int flags;
2681
2682    /* Make sure the device supports waking the system and get the GPE. */
2683    if (acpi_parse_prw(acpi_get_handle(dev), &prw) != 0)
2684	return (ENXIO);
2685
2686    flags = acpi_get_flags(dev);
2687    if (enable) {
2688	status = AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR);
2689	if (ACPI_FAILURE(status)) {
2690	    device_printf(dev, "enable wake failed\n");
2691	    return (ENXIO);
2692	}
2693	acpi_set_flags(dev, flags | ACPI_FLAG_WAKE_ENABLED);
2694    } else {
2695	status = AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR);
2696	if (ACPI_FAILURE(status)) {
2697	    device_printf(dev, "disable wake failed\n");
2698	    return (ENXIO);
2699	}
2700	acpi_set_flags(dev, flags & ~ACPI_FLAG_WAKE_ENABLED);
2701    }
2702
2703    return (0);
2704}
2705
2706static int
2707acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
2708{
2709    struct acpi_prw_data prw;
2710    device_t dev;
2711
2712    /* Check that this is a wake-capable device and get its GPE. */
2713    if (acpi_parse_prw(handle, &prw) != 0)
2714	return (ENXIO);
2715    dev = acpi_get_device(handle);
2716
2717    /*
2718     * The destination sleep state must be less than (i.e., higher power)
2719     * or equal to the value specified by _PRW.  If this GPE cannot be
2720     * enabled for the next sleep state, then disable it.  If it can and
2721     * the user requested it be enabled, turn on any required power resources
2722     * and set _PSW.
2723     */
2724    if (sstate > prw.lowest_wake) {
2725	AcpiDisableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR);
2726	if (bootverbose)
2727	    device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
2728		acpi_name(handle), sstate);
2729    } else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
2730	acpi_pwr_wake_enable(handle, 1);
2731	acpi_SetInteger(handle, "_PSW", 1);
2732	if (bootverbose)
2733	    device_printf(dev, "wake_prep enabled for %s (S%d)\n",
2734		acpi_name(handle), sstate);
2735    }
2736
2737    return (0);
2738}
2739
2740static int
2741acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
2742{
2743    struct acpi_prw_data prw;
2744    device_t dev;
2745
2746    /*
2747     * Check that this is a wake-capable device and get its GPE.  Return
2748     * now if the user didn't enable this device for wake.
2749     */
2750    if (acpi_parse_prw(handle, &prw) != 0)
2751	return (ENXIO);
2752    dev = acpi_get_device(handle);
2753    if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
2754	return (0);
2755
2756    /*
2757     * If this GPE couldn't be enabled for the previous sleep state, it was
2758     * disabled before going to sleep so re-enable it.  If it was enabled,
2759     * clear _PSW and turn off any power resources it used.
2760     */
2761    if (sstate > prw.lowest_wake) {
2762	AcpiEnableGpe(prw.gpe_handle, prw.gpe_bit, ACPI_NOT_ISR);
2763	if (bootverbose)
2764	    device_printf(dev, "run_prep re-enabled %s\n", acpi_name(handle));
2765    } else {
2766	acpi_SetInteger(handle, "_PSW", 0);
2767	acpi_pwr_wake_enable(handle, 0);
2768	if (bootverbose)
2769	    device_printf(dev, "run_prep cleaned up for %s\n",
2770		acpi_name(handle));
2771    }
2772
2773    return (0);
2774}
2775
2776static ACPI_STATUS
2777acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
2778{
2779    int sstate;
2780
2781    /* If suspending, run the sleep prep function, otherwise wake. */
2782    sstate = *(int *)context;
2783    if (AcpiGbl_SystemAwakeAndRunning)
2784	acpi_wake_sleep_prep(handle, sstate);
2785    else
2786	acpi_wake_run_prep(handle, sstate);
2787    return (AE_OK);
2788}
2789
2790/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
2791static int
2792acpi_wake_prep_walk(int sstate)
2793{
2794    ACPI_HANDLE sb_handle;
2795
2796    if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
2797	AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
2798	    acpi_wake_prep, &sstate, NULL);
2799    return (0);
2800}
2801
2802/* Walk the tree rooted at acpi0 to attach per-device wake sysctls. */
2803static int
2804acpi_wake_sysctl_walk(device_t dev)
2805{
2806    int error, i, numdevs;
2807    device_t *devlist;
2808    device_t child;
2809    ACPI_STATUS status;
2810
2811    error = device_get_children(dev, &devlist, &numdevs);
2812    if (error != 0 || numdevs == 0) {
2813	if (numdevs == 0)
2814	    free(devlist, M_TEMP);
2815	return (error);
2816    }
2817    for (i = 0; i < numdevs; i++) {
2818	child = devlist[i];
2819	acpi_wake_sysctl_walk(child);
2820	if (!device_is_attached(child))
2821	    continue;
2822	status = AcpiEvaluateObject(acpi_get_handle(child), "_PRW", NULL, NULL);
2823	if (ACPI_SUCCESS(status)) {
2824	    SYSCTL_ADD_PROC(device_get_sysctl_ctx(child),
2825		SYSCTL_CHILDREN(device_get_sysctl_tree(child)), OID_AUTO,
2826		"wake", CTLTYPE_INT | CTLFLAG_RW, child, 0,
2827		acpi_wake_set_sysctl, "I", "Device set to wake the system");
2828	}
2829    }
2830    free(devlist, M_TEMP);
2831
2832    return (0);
2833}
2834
2835/* Enable or disable wake from userland. */
2836static int
2837acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS)
2838{
2839    int enable, error;
2840    device_t dev;
2841
2842    dev = (device_t)arg1;
2843    enable = (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) ? 1 : 0;
2844
2845    error = sysctl_handle_int(oidp, &enable, 0, req);
2846    if (error != 0 || req->newptr == NULL)
2847	return (error);
2848    if (enable != 0 && enable != 1)
2849	return (EINVAL);
2850
2851    return (acpi_wake_set_enable(dev, enable));
2852}
2853
2854/* Parse a device's _PRW into a structure. */
2855int
2856acpi_parse_prw(ACPI_HANDLE h, struct acpi_prw_data *prw)
2857{
2858    ACPI_STATUS			status;
2859    ACPI_BUFFER			prw_buffer;
2860    ACPI_OBJECT			*res, *res2;
2861    int				error, i, power_count;
2862
2863    if (h == NULL || prw == NULL)
2864	return (EINVAL);
2865
2866    /*
2867     * The _PRW object (7.2.9) is only required for devices that have the
2868     * ability to wake the system from a sleeping state.
2869     */
2870    error = EINVAL;
2871    prw_buffer.Pointer = NULL;
2872    prw_buffer.Length = ACPI_ALLOCATE_BUFFER;
2873    status = AcpiEvaluateObject(h, "_PRW", NULL, &prw_buffer);
2874    if (ACPI_FAILURE(status))
2875	return (ENOENT);
2876    res = (ACPI_OBJECT *)prw_buffer.Pointer;
2877    if (res == NULL)
2878	return (ENOENT);
2879    if (!ACPI_PKG_VALID(res, 2))
2880	goto out;
2881
2882    /*
2883     * Element 1 of the _PRW object:
2884     * The lowest power system sleeping state that can be entered while still
2885     * providing wake functionality.  The sleeping state being entered must
2886     * be less than (i.e., higher power) or equal to this value.
2887     */
2888    if (acpi_PkgInt32(res, 1, &prw->lowest_wake) != 0)
2889	goto out;
2890
2891    /*
2892     * Element 0 of the _PRW object:
2893     */
2894    switch (res->Package.Elements[0].Type) {
2895    case ACPI_TYPE_INTEGER:
2896	/*
2897	 * If the data type of this package element is numeric, then this
2898	 * _PRW package element is the bit index in the GPEx_EN, in the
2899	 * GPE blocks described in the FADT, of the enable bit that is
2900	 * enabled for the wake event.
2901	 */
2902	prw->gpe_handle = NULL;
2903	prw->gpe_bit = res->Package.Elements[0].Integer.Value;
2904	error = 0;
2905	break;
2906    case ACPI_TYPE_PACKAGE:
2907	/*
2908	 * If the data type of this package element is a package, then this
2909	 * _PRW package element is itself a package containing two
2910	 * elements.  The first is an object reference to the GPE Block
2911	 * device that contains the GPE that will be triggered by the wake
2912	 * event.  The second element is numeric and it contains the bit
2913	 * index in the GPEx_EN, in the GPE Block referenced by the
2914	 * first element in the package, of the enable bit that is enabled for
2915	 * the wake event.
2916	 *
2917	 * For example, if this field is a package then it is of the form:
2918	 * Package() {\_SB.PCI0.ISA.GPE, 2}
2919	 */
2920	res2 = &res->Package.Elements[0];
2921	if (!ACPI_PKG_VALID(res2, 2))
2922	    goto out;
2923	prw->gpe_handle = acpi_GetReference(NULL, &res2->Package.Elements[0]);
2924	if (prw->gpe_handle == NULL)
2925	    goto out;
2926	if (acpi_PkgInt32(res2, 1, &prw->gpe_bit) != 0)
2927	    goto out;
2928	error = 0;
2929	break;
2930    default:
2931	goto out;
2932    }
2933
2934    /* Elements 2 to N of the _PRW object are power resources. */
2935    power_count = res->Package.Count - 2;
2936    if (power_count > ACPI_PRW_MAX_POWERRES) {
2937	printf("ACPI device %s has too many power resources\n", acpi_name(h));
2938	power_count = 0;
2939    }
2940    prw->power_res_count = power_count;
2941    for (i = 0; i < power_count; i++)
2942	prw->power_res[i] = res->Package.Elements[i];
2943
2944out:
2945    if (prw_buffer.Pointer != NULL)
2946	AcpiOsFree(prw_buffer.Pointer);
2947    return (error);
2948}
2949
2950/*
2951 * ACPI Event Handlers
2952 */
2953
2954/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
2955
2956static void
2957acpi_system_eventhandler_sleep(void *arg, int state)
2958{
2959    struct acpi_softc *sc = (struct acpi_softc *)arg;
2960    int ret;
2961
2962    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
2963
2964    /* Check if button action is disabled or unknown. */
2965    if (state == ACPI_STATE_UNKNOWN)
2966	return;
2967
2968    /* Request that the system prepare to enter the given suspend state. */
2969    ret = acpi_ReqSleepState(sc, state);
2970    if (ret != 0)
2971	device_printf(sc->acpi_dev,
2972	    "request to enter state S%d failed (err %d)\n", state, ret);
2973
2974    return_VOID;
2975}
2976
2977static void
2978acpi_system_eventhandler_wakeup(void *arg, int state)
2979{
2980
2981    ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
2982
2983    /* Currently, nothing to do for wakeup. */
2984
2985    return_VOID;
2986}
2987
2988/*
2989 * ACPICA Event Handlers (FixedEvent, also called from button notify handler)
2990 */
2991UINT32
2992acpi_event_power_button_sleep(void *context)
2993{
2994    struct acpi_softc	*sc = (struct acpi_softc *)context;
2995
2996    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
2997
2998    EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_power_button_sx);
2999
3000    return_VALUE (ACPI_INTERRUPT_HANDLED);
3001}
3002
3003UINT32
3004acpi_event_power_button_wake(void *context)
3005{
3006    struct acpi_softc	*sc = (struct acpi_softc *)context;
3007
3008    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3009
3010    EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_power_button_sx);
3011
3012    return_VALUE (ACPI_INTERRUPT_HANDLED);
3013}
3014
3015UINT32
3016acpi_event_sleep_button_sleep(void *context)
3017{
3018    struct acpi_softc	*sc = (struct acpi_softc *)context;
3019
3020    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3021
3022    EVENTHANDLER_INVOKE(acpi_sleep_event, sc->acpi_sleep_button_sx);
3023
3024    return_VALUE (ACPI_INTERRUPT_HANDLED);
3025}
3026
3027UINT32
3028acpi_event_sleep_button_wake(void *context)
3029{
3030    struct acpi_softc	*sc = (struct acpi_softc *)context;
3031
3032    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
3033
3034    EVENTHANDLER_INVOKE(acpi_wakeup_event, sc->acpi_sleep_button_sx);
3035
3036    return_VALUE (ACPI_INTERRUPT_HANDLED);
3037}
3038
3039/*
3040 * XXX This static buffer is suboptimal.  There is no locking so only
3041 * use this for single-threaded callers.
3042 */
3043char *
3044acpi_name(ACPI_HANDLE handle)
3045{
3046    ACPI_BUFFER buf;
3047    static char data[256];
3048
3049    buf.Length = sizeof(data);
3050    buf.Pointer = data;
3051
3052    if (handle && ACPI_SUCCESS(AcpiGetName(handle, ACPI_FULL_PATHNAME, &buf)))
3053	return (data);
3054    return ("(unknown)");
3055}
3056
3057/*
3058 * Debugging/bug-avoidance.  Avoid trying to fetch info on various
3059 * parts of the namespace.
3060 */
3061int
3062acpi_avoid(ACPI_HANDLE handle)
3063{
3064    char	*cp, *env, *np;
3065    int		len;
3066
3067    np = acpi_name(handle);
3068    if (*np == '\\')
3069	np++;
3070    if ((env = getenv("debug.acpi.avoid")) == NULL)
3071	return (0);
3072
3073    /* Scan the avoid list checking for a match */
3074    cp = env;
3075    for (;;) {
3076	while (*cp != 0 && isspace(*cp))
3077	    cp++;
3078	if (*cp == 0)
3079	    break;
3080	len = 0;
3081	while (cp[len] != 0 && !isspace(cp[len]))
3082	    len++;
3083	if (!strncmp(cp, np, len)) {
3084	    freeenv(env);
3085	    return(1);
3086	}
3087	cp += len;
3088    }
3089    freeenv(env);
3090
3091    return (0);
3092}
3093
3094/*
3095 * Debugging/bug-avoidance.  Disable ACPI subsystem components.
3096 */
3097int
3098acpi_disabled(char *subsys)
3099{
3100    char	*cp, *env;
3101    int		len;
3102
3103    if ((env = getenv("debug.acpi.disabled")) == NULL)
3104	return (0);
3105    if (strcmp(env, "all") == 0) {
3106	freeenv(env);
3107	return (1);
3108    }
3109
3110    /* Scan the disable list, checking for a match. */
3111    cp = env;
3112    for (;;) {
3113	while (*cp != '\0' && isspace(*cp))
3114	    cp++;
3115	if (*cp == '\0')
3116	    break;
3117	len = 0;
3118	while (cp[len] != '\0' && !isspace(cp[len]))
3119	    len++;
3120	if (strncmp(cp, subsys, len) == 0) {
3121	    freeenv(env);
3122	    return (1);
3123	}
3124	cp += len;
3125    }
3126    freeenv(env);
3127
3128    return (0);
3129}
3130
3131/*
3132 * Control interface.
3133 *
3134 * We multiplex ioctls for all participating ACPI devices here.  Individual
3135 * drivers wanting to be accessible via /dev/acpi should use the
3136 * register/deregister interface to make their handlers visible.
3137 */
3138struct acpi_ioctl_hook
3139{
3140    TAILQ_ENTRY(acpi_ioctl_hook) link;
3141    u_long			 cmd;
3142    acpi_ioctl_fn		 fn;
3143    void			 *arg;
3144};
3145
3146static TAILQ_HEAD(,acpi_ioctl_hook)	acpi_ioctl_hooks;
3147static int				acpi_ioctl_hooks_initted;
3148
3149int
3150acpi_register_ioctl(u_long cmd, acpi_ioctl_fn fn, void *arg)
3151{
3152    struct acpi_ioctl_hook	*hp;
3153
3154    if ((hp = malloc(sizeof(*hp), M_ACPIDEV, M_NOWAIT)) == NULL)
3155	return (ENOMEM);
3156    hp->cmd = cmd;
3157    hp->fn = fn;
3158    hp->arg = arg;
3159
3160    ACPI_LOCK(acpi);
3161    if (acpi_ioctl_hooks_initted == 0) {
3162	TAILQ_INIT(&acpi_ioctl_hooks);
3163	acpi_ioctl_hooks_initted = 1;
3164    }
3165    TAILQ_INSERT_TAIL(&acpi_ioctl_hooks, hp, link);
3166    ACPI_UNLOCK(acpi);
3167
3168    return (0);
3169}
3170
3171void
3172acpi_deregister_ioctl(u_long cmd, acpi_ioctl_fn fn)
3173{
3174    struct acpi_ioctl_hook	*hp;
3175
3176    ACPI_LOCK(acpi);
3177    TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link)
3178	if (hp->cmd == cmd && hp->fn == fn)
3179	    break;
3180
3181    if (hp != NULL) {
3182	TAILQ_REMOVE(&acpi_ioctl_hooks, hp, link);
3183	free(hp, M_ACPIDEV);
3184    }
3185    ACPI_UNLOCK(acpi);
3186}
3187
3188static int
3189acpiopen(struct cdev *dev, int flag, int fmt, d_thread_t *td)
3190{
3191    return (0);
3192}
3193
3194static int
3195acpiclose(struct cdev *dev, int flag, int fmt, d_thread_t *td)
3196{
3197    return (0);
3198}
3199
3200static int
3201acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, d_thread_t *td)
3202{
3203    struct acpi_softc		*sc;
3204    struct acpi_ioctl_hook	*hp;
3205    int				error, state;
3206
3207    error = 0;
3208    hp = NULL;
3209    sc = dev->si_drv1;
3210
3211    /*
3212     * Scan the list of registered ioctls, looking for handlers.
3213     */
3214    ACPI_LOCK(acpi);
3215    if (acpi_ioctl_hooks_initted)
3216	TAILQ_FOREACH(hp, &acpi_ioctl_hooks, link) {
3217	    if (hp->cmd == cmd)
3218		break;
3219	}
3220    ACPI_UNLOCK(acpi);
3221    if (hp)
3222	return (hp->fn(cmd, addr, hp->arg));
3223
3224    /*
3225     * Core ioctls are not permitted for non-writable user.
3226     * Currently, other ioctls just fetch information.
3227     * Not changing system behavior.
3228     */
3229    if ((flag & FWRITE) == 0)
3230	return (EPERM);
3231
3232    /* Core system ioctls. */
3233    switch (cmd) {
3234    case ACPIIO_REQSLPSTATE:
3235	state = *(int *)addr;
3236	if (state != ACPI_STATE_S5)
3237	    return (acpi_ReqSleepState(sc, state));
3238	device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
3239	error = EOPNOTSUPP;
3240	break;
3241    case ACPIIO_ACKSLPSTATE:
3242	error = *(int *)addr;
3243	error = acpi_AckSleepState(sc->acpi_clone, error);
3244	break;
3245    case ACPIIO_SETSLPSTATE:	/* DEPRECATED */
3246	state = *(int *)addr;
3247	if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
3248	    return (EINVAL);
3249	if (!acpi_sleep_states[state])
3250	    return (EOPNOTSUPP);
3251	if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
3252	    error = ENXIO;
3253	break;
3254    default:
3255	error = ENXIO;
3256	break;
3257    }
3258
3259    return (error);
3260}
3261
3262static int
3263acpi_sname2sstate(const char *sname)
3264{
3265    int sstate;
3266
3267    if (toupper(sname[0]) == 'S') {
3268	sstate = sname[1] - '0';
3269	if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5 &&
3270	    sname[2] == '\0')
3271	    return (sstate);
3272    } else if (strcasecmp(sname, "NONE") == 0)
3273	return (ACPI_STATE_UNKNOWN);
3274    return (-1);
3275}
3276
3277static const char *
3278acpi_sstate2sname(int sstate)
3279{
3280    static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
3281
3282    if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
3283	return (snames[sstate]);
3284    else if (sstate == ACPI_STATE_UNKNOWN)
3285	return ("NONE");
3286    return (NULL);
3287}
3288
3289static int
3290acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
3291{
3292    int error;
3293    struct sbuf sb;
3294    UINT8 state;
3295
3296    sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
3297    for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
3298	if (acpi_sleep_states[state])
3299	    sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
3300    sbuf_trim(&sb);
3301    sbuf_finish(&sb);
3302    error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
3303    sbuf_delete(&sb);
3304    return (error);
3305}
3306
3307static int
3308acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
3309{
3310    char sleep_state[10];
3311    int error, new_state, old_state;
3312
3313    old_state = *(int *)oidp->oid_arg1;
3314    strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
3315    error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
3316    if (error == 0 && req->newptr != NULL) {
3317	new_state = acpi_sname2sstate(sleep_state);
3318	if (new_state < ACPI_STATE_S1)
3319	    return (EINVAL);
3320	if (new_state < ACPI_S_STATES_MAX && !acpi_sleep_states[new_state])
3321	    return (EOPNOTSUPP);
3322	if (new_state != old_state)
3323	    *(int *)oidp->oid_arg1 = new_state;
3324    }
3325    return (error);
3326}
3327
3328/* Inform devctl(4) when we receive a Notify. */
3329void
3330acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
3331{
3332    char		notify_buf[16];
3333    ACPI_BUFFER		handle_buf;
3334    ACPI_STATUS		status;
3335
3336    if (subsystem == NULL)
3337	return;
3338
3339    handle_buf.Pointer = NULL;
3340    handle_buf.Length = ACPI_ALLOCATE_BUFFER;
3341    status = AcpiNsHandleToPathname(h, &handle_buf);
3342    if (ACPI_FAILURE(status))
3343	return;
3344    snprintf(notify_buf, sizeof(notify_buf), "notify=0x%02x", notify);
3345    devctl_notify("ACPI", subsystem, handle_buf.Pointer, notify_buf);
3346    AcpiOsFree(handle_buf.Pointer);
3347}
3348
3349#ifdef ACPI_DEBUG
3350/*
3351 * Support for parsing debug options from the kernel environment.
3352 *
3353 * Bits may be set in the AcpiDbgLayer and AcpiDbgLevel debug registers
3354 * by specifying the names of the bits in the debug.acpi.layer and
3355 * debug.acpi.level environment variables.  Bits may be unset by
3356 * prefixing the bit name with !.
3357 */
3358struct debugtag
3359{
3360    char	*name;
3361    UINT32	value;
3362};
3363
3364static struct debugtag	dbg_layer[] = {
3365    {"ACPI_UTILITIES",		ACPI_UTILITIES},
3366    {"ACPI_HARDWARE",		ACPI_HARDWARE},
3367    {"ACPI_EVENTS",		ACPI_EVENTS},
3368    {"ACPI_TABLES",		ACPI_TABLES},
3369    {"ACPI_NAMESPACE",		ACPI_NAMESPACE},
3370    {"ACPI_PARSER",		ACPI_PARSER},
3371    {"ACPI_DISPATCHER",		ACPI_DISPATCHER},
3372    {"ACPI_EXECUTER",		ACPI_EXECUTER},
3373    {"ACPI_RESOURCES",		ACPI_RESOURCES},
3374    {"ACPI_CA_DEBUGGER",	ACPI_CA_DEBUGGER},
3375    {"ACPI_OS_SERVICES",	ACPI_OS_SERVICES},
3376    {"ACPI_CA_DISASSEMBLER",	ACPI_CA_DISASSEMBLER},
3377    {"ACPI_ALL_COMPONENTS",	ACPI_ALL_COMPONENTS},
3378
3379    {"ACPI_AC_ADAPTER",		ACPI_AC_ADAPTER},
3380    {"ACPI_BATTERY",		ACPI_BATTERY},
3381    {"ACPI_BUS",		ACPI_BUS},
3382    {"ACPI_BUTTON",		ACPI_BUTTON},
3383    {"ACPI_EC", 		ACPI_EC},
3384    {"ACPI_FAN",		ACPI_FAN},
3385    {"ACPI_POWERRES",		ACPI_POWERRES},
3386    {"ACPI_PROCESSOR",		ACPI_PROCESSOR},
3387    {"ACPI_THERMAL",		ACPI_THERMAL},
3388    {"ACPI_TIMER",		ACPI_TIMER},
3389    {"ACPI_ALL_DRIVERS",	ACPI_ALL_DRIVERS},
3390    {NULL, 0}
3391};
3392
3393static struct debugtag dbg_level[] = {
3394    {"ACPI_LV_ERROR",		ACPI_LV_ERROR},
3395    {"ACPI_LV_WARN",		ACPI_LV_WARN},
3396    {"ACPI_LV_INIT",		ACPI_LV_INIT},
3397    {"ACPI_LV_DEBUG_OBJECT",	ACPI_LV_DEBUG_OBJECT},
3398    {"ACPI_LV_INFO",		ACPI_LV_INFO},
3399    {"ACPI_LV_ALL_EXCEPTIONS",	ACPI_LV_ALL_EXCEPTIONS},
3400
3401    /* Trace verbosity level 1 [Standard Trace Level] */
3402    {"ACPI_LV_INIT_NAMES",	ACPI_LV_INIT_NAMES},
3403    {"ACPI_LV_PARSE",		ACPI_LV_PARSE},
3404    {"ACPI_LV_LOAD",		ACPI_LV_LOAD},
3405    {"ACPI_LV_DISPATCH",	ACPI_LV_DISPATCH},
3406    {"ACPI_LV_EXEC",		ACPI_LV_EXEC},
3407    {"ACPI_LV_NAMES",		ACPI_LV_NAMES},
3408    {"ACPI_LV_OPREGION",	ACPI_LV_OPREGION},
3409    {"ACPI_LV_BFIELD",		ACPI_LV_BFIELD},
3410    {"ACPI_LV_TABLES",		ACPI_LV_TABLES},
3411    {"ACPI_LV_VALUES",		ACPI_LV_VALUES},
3412    {"ACPI_LV_OBJECTS",		ACPI_LV_OBJECTS},
3413    {"ACPI_LV_RESOURCES",	ACPI_LV_RESOURCES},
3414    {"ACPI_LV_USER_REQUESTS",	ACPI_LV_USER_REQUESTS},
3415    {"ACPI_LV_PACKAGE",		ACPI_LV_PACKAGE},
3416    {"ACPI_LV_VERBOSITY1",	ACPI_LV_VERBOSITY1},
3417
3418    /* Trace verbosity level 2 [Function tracing and memory allocation] */
3419    {"ACPI_LV_ALLOCATIONS",	ACPI_LV_ALLOCATIONS},
3420    {"ACPI_LV_FUNCTIONS",	ACPI_LV_FUNCTIONS},
3421    {"ACPI_LV_OPTIMIZATIONS",	ACPI_LV_OPTIMIZATIONS},
3422    {"ACPI_LV_VERBOSITY2",	ACPI_LV_VERBOSITY2},
3423    {"ACPI_LV_ALL",		ACPI_LV_ALL},
3424
3425    /* Trace verbosity level 3 [Threading, I/O, and Interrupts] */
3426    {"ACPI_LV_MUTEX",		ACPI_LV_MUTEX},
3427    {"ACPI_LV_THREADS",		ACPI_LV_THREADS},
3428    {"ACPI_LV_IO",		ACPI_LV_IO},
3429    {"ACPI_LV_INTERRUPTS",	ACPI_LV_INTERRUPTS},
3430    {"ACPI_LV_VERBOSITY3",	ACPI_LV_VERBOSITY3},
3431
3432    /* Exceptionally verbose output -- also used in the global "DebugLevel"  */
3433    {"ACPI_LV_AML_DISASSEMBLE",	ACPI_LV_AML_DISASSEMBLE},
3434    {"ACPI_LV_VERBOSE_INFO",	ACPI_LV_VERBOSE_INFO},
3435    {"ACPI_LV_FULL_TABLES",	ACPI_LV_FULL_TABLES},
3436    {"ACPI_LV_EVENTS",		ACPI_LV_EVENTS},
3437    {"ACPI_LV_VERBOSE",		ACPI_LV_VERBOSE},
3438    {NULL, 0}
3439};
3440
3441static void
3442acpi_parse_debug(char *cp, struct debugtag *tag, UINT32 *flag)
3443{
3444    char	*ep;
3445    int		i, l;
3446    int		set;
3447
3448    while (*cp) {
3449	if (isspace(*cp)) {
3450	    cp++;
3451	    continue;
3452	}
3453	ep = cp;
3454	while (*ep && !isspace(*ep))
3455	    ep++;
3456	if (*cp == '!') {
3457	    set = 0;
3458	    cp++;
3459	    if (cp == ep)
3460		continue;
3461	} else {
3462	    set = 1;
3463	}
3464	l = ep - cp;
3465	for (i = 0; tag[i].name != NULL; i++) {
3466	    if (!strncmp(cp, tag[i].name, l)) {
3467		if (set)
3468		    *flag |= tag[i].value;
3469		else
3470		    *flag &= ~tag[i].value;
3471	    }
3472	}
3473	cp = ep;
3474    }
3475}
3476
3477static void
3478acpi_set_debugging(void *junk)
3479{
3480    char	*layer, *level;
3481
3482    if (cold) {
3483	AcpiDbgLayer = 0;
3484	AcpiDbgLevel = 0;
3485    }
3486
3487    layer = getenv("debug.acpi.layer");
3488    level = getenv("debug.acpi.level");
3489    if (layer == NULL && level == NULL)
3490	return;
3491
3492    printf("ACPI set debug");
3493    if (layer != NULL) {
3494	if (strcmp("NONE", layer) != 0)
3495	    printf(" layer '%s'", layer);
3496	acpi_parse_debug(layer, &dbg_layer[0], &AcpiDbgLayer);
3497	freeenv(layer);
3498    }
3499    if (level != NULL) {
3500	if (strcmp("NONE", level) != 0)
3501	    printf(" level '%s'", level);
3502	acpi_parse_debug(level, &dbg_level[0], &AcpiDbgLevel);
3503	freeenv(level);
3504    }
3505    printf("\n");
3506}
3507
3508SYSINIT(acpi_debugging, SI_SUB_TUNABLES, SI_ORDER_ANY, acpi_set_debugging,
3509	NULL);
3510
3511static int
3512acpi_debug_sysctl(SYSCTL_HANDLER_ARGS)
3513{
3514    int		 error, *dbg;
3515    struct	 debugtag *tag;
3516    struct	 sbuf sb;
3517
3518    if (sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND) == NULL)
3519	return (ENOMEM);
3520    if (strcmp(oidp->oid_arg1, "debug.acpi.layer") == 0) {
3521	tag = &dbg_layer[0];
3522	dbg = &AcpiDbgLayer;
3523    } else {
3524	tag = &dbg_level[0];
3525	dbg = &AcpiDbgLevel;
3526    }
3527
3528    /* Get old values if this is a get request. */
3529    ACPI_SERIAL_BEGIN(acpi);
3530    if (*dbg == 0) {
3531	sbuf_cpy(&sb, "NONE");
3532    } else if (req->newptr == NULL) {
3533	for (; tag->name != NULL; tag++) {
3534	    if ((*dbg & tag->value) == tag->value)
3535		sbuf_printf(&sb, "%s ", tag->name);
3536	}
3537    }
3538    sbuf_trim(&sb);
3539    sbuf_finish(&sb);
3540
3541    /* Copy out the old values to the user. */
3542    error = SYSCTL_OUT(req, sbuf_data(&sb), sbuf_len(&sb));
3543    sbuf_delete(&sb);
3544
3545    /* If the user is setting a string, parse it. */
3546    if (error == 0 && req->newptr != NULL) {
3547	*dbg = 0;
3548	setenv((char *)oidp->oid_arg1, (char *)req->newptr);
3549	acpi_set_debugging(NULL);
3550    }
3551    ACPI_SERIAL_END(acpi);
3552
3553    return (error);
3554}
3555
3556SYSCTL_PROC(_debug_acpi, OID_AUTO, layer, CTLFLAG_RW | CTLTYPE_STRING,
3557	    "debug.acpi.layer", 0, acpi_debug_sysctl, "A", "");
3558SYSCTL_PROC(_debug_acpi, OID_AUTO, level, CTLFLAG_RW | CTLTYPE_STRING,
3559	    "debug.acpi.level", 0, acpi_debug_sysctl, "A", "");
3560#endif /* ACPI_DEBUG */
3561
3562static int
3563acpi_pm_func(u_long cmd, void *arg, ...)
3564{
3565	int	state, acpi_state;
3566	int	error;
3567	struct	acpi_softc *sc;
3568	va_list	ap;
3569
3570	error = 0;
3571	switch (cmd) {
3572	case POWER_CMD_SUSPEND:
3573		sc = (struct acpi_softc *)arg;
3574		if (sc == NULL) {
3575			error = EINVAL;
3576			goto out;
3577		}
3578
3579		va_start(ap, arg);
3580		state = va_arg(ap, int);
3581		va_end(ap);
3582
3583		switch (state) {
3584		case POWER_SLEEP_STATE_STANDBY:
3585			acpi_state = sc->acpi_standby_sx;
3586			break;
3587		case POWER_SLEEP_STATE_SUSPEND:
3588			acpi_state = sc->acpi_suspend_sx;
3589			break;
3590		case POWER_SLEEP_STATE_HIBERNATE:
3591			acpi_state = ACPI_STATE_S4;
3592			break;
3593		default:
3594			error = EINVAL;
3595			goto out;
3596		}
3597
3598		if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
3599			error = ENXIO;
3600		break;
3601	default:
3602		error = EINVAL;
3603		goto out;
3604	}
3605
3606out:
3607	return (error);
3608}
3609
3610static void
3611acpi_pm_register(void *arg)
3612{
3613    if (!cold || resource_disabled("acpi", 0))
3614	return;
3615
3616    power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
3617}
3618
3619SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, 0);
3620