acpi.c revision 1.150
1/* $OpenBSD: acpi.c,v 1.150 2009/11/24 23:01:41 jsg Exp $ */ 2/* 3 * Copyright (c) 2005 Thorsten Lockert <tholo@sigmasoft.com> 4 * Copyright (c) 2005 Jordan Hargrave <jordan@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19#include <sys/param.h> 20#include <sys/systm.h> 21#include <sys/device.h> 22#include <sys/malloc.h> 23#include <sys/fcntl.h> 24#include <sys/ioccom.h> 25#include <sys/event.h> 26#include <sys/signalvar.h> 27#include <sys/proc.h> 28#include <sys/kthread.h> 29#include <sys/workq.h> 30 31#include <machine/conf.h> 32#include <machine/cpufunc.h> 33#include <machine/bus.h> 34 35#include <dev/pci/pcivar.h> 36#include <dev/acpi/acpireg.h> 37#include <dev/acpi/acpivar.h> 38#include <dev/acpi/amltypes.h> 39#include <dev/acpi/acpidev.h> 40#include <dev/acpi/dsdt.h> 41 42#include <dev/pci/pciidereg.h> 43#include <dev/pci/pciidevar.h> 44 45#include <machine/apmvar.h> 46#define APMUNIT(dev) (minor(dev)&0xf0) 47#define APMDEV(dev) (minor(dev)&0x0f) 48#define APMDEV_NORMAL 0 49#define APMDEV_CTL 8 50 51#ifdef ACPI_DEBUG 52int acpi_debug = 16; 53#endif 54int acpi_enabled; 55int acpi_poll_enabled; 56int acpi_hasprocfvs; 57int acpi_thinkpad_enabled; 58int acpi_saved_spl; 59 60#define ACPIEN_RETRIES 15 61 62void acpi_isr_thread(void *); 63void acpi_create_thread(void *); 64 65int acpi_match(struct device *, void *, void *); 66void acpi_attach(struct device *, struct device *, void *); 67int acpi_submatch(struct device *, void *, void *); 68int acpi_print(void *, const char *); 69void acpi_handle_suspend_failure(struct acpi_softc *); 70 71void acpi_map_pmregs(struct acpi_softc *); 72 73int acpi_founddock(struct aml_node *, void *); 74int acpi_foundpss(struct aml_node *, void *); 75int acpi_foundhid(struct aml_node *, void *); 76int acpi_foundec(struct aml_node *, void *); 77int acpi_foundtmp(struct aml_node *, void *); 78int acpi_foundprt(struct aml_node *, void *); 79int acpi_foundprw(struct aml_node *, void *); 80int acpi_foundvideo(struct aml_node *, void *); 81int acpi_inidev(struct aml_node *, void *); 82 83int acpi_loadtables(struct acpi_softc *, struct acpi_rsdp *); 84 85void acpi_init_states(struct acpi_softc *); 86void acpi_init_gpes(struct acpi_softc *); 87void acpi_init_pm(struct acpi_softc *); 88 89int acpi_foundide(struct aml_node *node, void *arg); 90int acpiide_notify(struct aml_node *, int, void *); 91 92void wdcattach(struct channel_softc *); 93int wdcdetach(struct channel_softc *, int); 94 95struct acpi_q *acpi_maptable(paddr_t, const char *, const char *, const char *); 96 97struct idechnl 98{ 99 struct acpi_softc *sc; 100 int64_t addr; 101 int64_t chnl; 102 int64_t sta; 103}; 104 105int is_ejectable_bay(struct aml_node *node); 106int is_ata(struct aml_node *node); 107int is_ejectable(struct aml_node *node); 108 109#ifndef SMALL_KERNEL 110void acpi_resume(struct acpi_softc *, int); 111void acpi_susp_resume_gpewalk(struct acpi_softc *, int, int); 112#endif /* SMALL_KERNEL */ 113 114#ifndef SMALL_KERNEL 115int acpi_add_device(struct aml_node *node, void *arg); 116#endif /* SMALL_KERNEL */ 117 118void acpi_enable_onegpe(struct acpi_softc *, int, int); 119int acpi_gpe_level(struct acpi_softc *, int, void *); 120int acpi_gpe_edge(struct acpi_softc *, int, void *); 121 122struct gpe_block *acpi_find_gpe(struct acpi_softc *, int); 123 124#define ACPI_LOCK(sc) 125#define ACPI_UNLOCK(sc) 126 127/* XXX move this into dsdt softc at some point */ 128extern struct aml_node aml_root; 129 130/* XXX do we need this? */ 131void acpi_filtdetach(struct knote *); 132int acpi_filtread(struct knote *, long); 133 134struct filterops acpiread_filtops = { 135 1, NULL, acpi_filtdetach, acpi_filtread 136}; 137 138struct cfattach acpi_ca = { 139 sizeof(struct acpi_softc), acpi_match, acpi_attach, NULL, 140 config_activate_children 141}; 142 143struct cfdriver acpi_cd = { 144 NULL, "acpi", DV_DULL 145}; 146 147struct acpi_softc *acpi_softc; 148int acpi_evindex; 149 150#define acpi_bus_space_map _bus_space_map 151#define acpi_bus_space_unmap _bus_space_unmap 152 153#define pch(x) (((x)>=' ' && (x)<='z') ? (x) : ' ') 154 155#if 0 156void 157acpi_delay(struct acpi_softc *sc, int64_t uSecs) 158{ 159 /* XXX this needs to become a tsleep later */ 160 delay(uSecs); 161} 162#endif 163 164int 165acpi_gasio(struct acpi_softc *sc, int iodir, int iospace, uint64_t address, 166 int access_size, int len, void *buffer) 167{ 168 u_int8_t *pb; 169 bus_space_handle_t ioh; 170 struct acpi_mem_map mh; 171 pci_chipset_tag_t pc; 172 pcitag_t tag; 173 bus_addr_t ioaddr; 174 int reg, idx, ival, sval; 175 176 dnprintf(50, "gasio: %.2x 0x%.8llx %s\n", 177 iospace, address, (iodir == ACPI_IOWRITE) ? "write" : "read"); 178 179 pb = (u_int8_t *)buffer; 180 switch (iospace) { 181 case GAS_SYSTEM_MEMORY: 182 /* copy to/from system memory */ 183 acpi_map(address, len, &mh); 184 if (iodir == ACPI_IOREAD) 185 memcpy(buffer, mh.va, len); 186 else 187 memcpy(mh.va, buffer, len); 188 acpi_unmap(&mh); 189 break; 190 191 case GAS_SYSTEM_IOSPACE: 192 /* read/write from I/O registers */ 193 ioaddr = address; 194 if (acpi_bus_space_map(sc->sc_iot, ioaddr, len, 0, &ioh) != 0) { 195 printf("unable to map iospace\n"); 196 return (-1); 197 } 198 for (reg = 0; reg < len; reg += access_size) { 199 if (iodir == ACPI_IOREAD) { 200 switch (access_size) { 201 case 1: 202 *(uint8_t *)(pb+reg) = bus_space_read_1( 203 sc->sc_iot, ioh, reg); 204 dnprintf(80, "os_in8(%llx) = %x\n", 205 reg+address, *(uint8_t *)(pb+reg)); 206 break; 207 case 2: 208 *(uint16_t *)(pb+reg) = bus_space_read_2( 209 sc->sc_iot, ioh, reg); 210 dnprintf(80, "os_in16(%llx) = %x\n", 211 reg+address, *(uint16_t *)(pb+reg)); 212 break; 213 case 4: 214 *(uint32_t *)(pb+reg) = bus_space_read_4( 215 sc->sc_iot, ioh, reg); 216 break; 217 default: 218 printf("rdio: invalid size %d\n", access_size); 219 break; 220 } 221 } else { 222 switch (access_size) { 223 case 1: 224 bus_space_write_1(sc->sc_iot, ioh, reg, 225 *(uint8_t *)(pb+reg)); 226 dnprintf(80, "os_out8(%llx,%x)\n", 227 reg+address, *(uint8_t *)(pb+reg)); 228 break; 229 case 2: 230 bus_space_write_2(sc->sc_iot, ioh, reg, 231 *(uint16_t *)(pb+reg)); 232 dnprintf(80, "os_out16(%llx,%x)\n", 233 reg+address, *(uint16_t *)(pb+reg)); 234 break; 235 case 4: 236 bus_space_write_4(sc->sc_iot, ioh, reg, 237 *(uint32_t *)(pb+reg)); 238 break; 239 default: 240 printf("wrio: invalid size %d\n", access_size); 241 break; 242 } 243 } 244 245 /* During autoconf some devices are still gathering 246 * information. Delay here to give them an opportunity 247 * to finish. During runtime we simply need to ignore 248 * transient values. 249 */ 250 if (cold) 251 delay(10000); 252 } 253 acpi_bus_space_unmap(sc->sc_iot, ioh, len, &ioaddr); 254 break; 255 256 case GAS_PCI_CFG_SPACE: 257 /* format of address: 258 * bits 00..15 = register 259 * bits 16..31 = function 260 * bits 32..47 = device 261 * bits 48..63 = bus 262 */ 263 pc = NULL; 264 tag = pci_make_tag(pc, 265 ACPI_PCI_BUS(address), ACPI_PCI_DEV(address), 266 ACPI_PCI_FN(address)); 267 268 /* XXX: This is ugly. read-modify-write does a byte at a time */ 269 reg = ACPI_PCI_REG(address); 270 for (idx = reg; idx < reg+len; idx++) { 271 ival = pci_conf_read(pc, tag, idx & ~0x3); 272 if (iodir == ACPI_IOREAD) { 273 *pb = ival >> (8 * (idx & 0x3)); 274 } else { 275 sval = *pb; 276 ival &= ~(0xFF << (8* (idx & 0x3))); 277 ival |= sval << (8* (idx & 0x3)); 278 pci_conf_write(pc, tag, idx & ~0x3, ival); 279 } 280 pb++; 281 } 282 break; 283 case GAS_EMBEDDED: 284 if (sc->sc_ec == NULL) 285 break; 286#ifndef SMALL_KERNEL 287 if (iodir == ACPI_IOREAD) 288 acpiec_read(sc->sc_ec, (u_int8_t)address, len, buffer); 289 else 290 acpiec_write(sc->sc_ec, (u_int8_t)address, len, buffer); 291#endif 292 break; 293 } 294 return (0); 295} 296 297int 298acpi_inidev(struct aml_node *node, void *arg) 299{ 300 struct acpi_softc *sc = (struct acpi_softc *)arg; 301 int64_t st; 302 303 /* 304 * Per the ACPI spec 6.5.1, only run _INI when device is there or 305 * when there is no _STA. We terminate the tree walk (with return 1) 306 * early if necessary. 307 */ 308 309 /* Evaluate _STA to decide _INI fate and walk fate */ 310 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 311 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 312 313 /* Evaluate _INI if we are present */ 314 if (st & STA_PRESENT) 315 aml_evalnode(sc, node, 0, NULL, NULL); 316 317 /* If we are functioning, we walk/search our children */ 318 if (st & STA_DEV_OK) 319 return 0; 320 321 /* If we are not enabled, or not present, terminate search */ 322 if (!(st & (STA_PRESENT|STA_ENABLED))) 323 return 1; 324 325 /* Default just continue search */ 326 return 0; 327} 328 329int 330acpi_foundprt(struct aml_node *node, void *arg) 331{ 332 struct acpi_softc *sc = (struct acpi_softc *)arg; 333 struct device *self = (struct device *)arg; 334 struct acpi_attach_args aaa; 335 int64_t st = 0; 336 337 dnprintf(10, "found prt entry: %s\n", node->parent->name); 338 339 /* Evaluate _STA to decide _PRT fate and walk fate */ 340 if (aml_evalinteger(sc, node->parent, "_STA", 0, NULL, &st)) 341 st = STA_PRESENT | STA_ENABLED | STA_DEV_OK | 0x1000; 342 343 if (st & STA_PRESENT) { 344 memset(&aaa, 0, sizeof(aaa)); 345 aaa.aaa_iot = sc->sc_iot; 346 aaa.aaa_memt = sc->sc_memt; 347 aaa.aaa_node = node; 348 aaa.aaa_name = "acpiprt"; 349 350 config_found(self, &aaa, acpi_print); 351 } 352 353 /* If we are functioning, we walk/search our children */ 354 if (st & STA_DEV_OK) 355 return 0; 356 357 /* If we are not enabled, or not present, terminate search */ 358 if (!(st & (STA_PRESENT|STA_ENABLED))) 359 return 1; 360 361 /* Default just continue search */ 362 return 0; 363} 364 365int 366is_ata(struct aml_node *node) 367{ 368 return (aml_searchname(node, "_GTM") != NULL || 369 aml_searchname(node, "_GTF") != NULL || 370 aml_searchname(node, "_STM") != NULL || 371 aml_searchname(node, "_SDD") != NULL); 372} 373 374int 375is_ejectable(struct aml_node *node) 376{ 377 return (aml_searchname(node, "_EJ0") != NULL); 378} 379 380int 381is_ejectable_bay(struct aml_node *node) 382{ 383 return ((is_ata(node) || is_ata(node->parent)) && is_ejectable(node)); 384} 385 386int 387acpiide_notify(struct aml_node *node, int ntype, void *arg) 388{ 389 struct idechnl *ide = arg; 390 struct acpi_softc *sc = ide->sc; 391 struct pciide_softc *wsc; 392 struct device *dev; 393 int b,d,f; 394 int64_t sta; 395 396 if (aml_evalinteger(sc, node, "_STA", 0, NULL, &sta) != 0) 397 return (0); 398 399 dnprintf(10, "IDE notify! %s %d status:%llx\n", aml_nodename(node), 400 ntype, sta); 401 402 /* Walk device list looking for IDE device match */ 403 TAILQ_FOREACH(dev, &alldevs, dv_list) { 404 if (strcmp(dev->dv_cfdata->cf_driver->cd_name, "pciide")) 405 continue; 406 407 wsc = (struct pciide_softc *)dev; 408 pci_decompose_tag(NULL, wsc->sc_tag, &b, &d, &f); 409 if (b != ACPI_PCI_BUS(ide->addr) || 410 d != ACPI_PCI_DEV(ide->addr) || 411 f != ACPI_PCI_FN(ide->addr)) 412 continue; 413 dnprintf(10, "Found pciide: %s %x.%x.%x channel:%llx\n", 414 dev->dv_xname, b,d,f, ide->chnl); 415 416 if (sta == 0 && ide->sta) 417 wdcdetach( 418 &wsc->pciide_channels[ide->chnl].wdc_channel, 0); 419 else if (sta && !ide->sta) 420 wdcattach( 421 &wsc->pciide_channels[ide->chnl].wdc_channel); 422 ide->sta = sta; 423 } 424 return (0); 425} 426 427int 428acpi_foundide(struct aml_node *node, void *arg) 429{ 430 struct acpi_softc *sc = arg; 431 struct aml_node *pp; 432 struct idechnl *ide; 433 union amlpci_t pi; 434 int lvl; 435 436 /* Check if this is an ejectable bay */ 437 if (!is_ejectable_bay(node)) 438 return (0); 439 440 ide = malloc(sizeof(struct idechnl), M_DEVBUF, M_NOWAIT | M_ZERO); 441 ide->sc = sc; 442 443 /* GTM/GTF can be at 2/3 levels: pciX.ideX.channelX[.driveX] */ 444 lvl = 0; 445 for (pp=node->parent; pp; pp=pp->parent) { 446 lvl++; 447 if (aml_searchname(pp, "_HID")) 448 break; 449 } 450 451 /* Get PCI address and channel */ 452 if (lvl == 3) { 453 aml_evalinteger(sc, node->parent, "_ADR", 0, NULL, 454 &ide->chnl); 455 aml_rdpciaddr(node->parent->parent, &pi); 456 ide->addr = pi.addr; 457 } else if (lvl == 4) { 458 aml_evalinteger(sc, node->parent->parent, "_ADR", 0, NULL, 459 &ide->chnl); 460 aml_rdpciaddr(node->parent->parent->parent, &pi); 461 ide->addr = pi.addr; 462 } 463 dnprintf(10, "%s %llx channel:%llx\n", 464 aml_nodename(node), ide->addr, ide->chnl); 465 466 aml_evalinteger(sc, node, "_STA", 0, NULL, &ide->sta); 467 dnprintf(10, "Got Initial STA: %llx\n", ide->sta); 468 469 aml_register_notify(node, "acpiide", acpiide_notify, ide, 0); 470 return (0); 471} 472 473int 474acpi_match(struct device *parent, void *match, void *aux) 475{ 476 struct bios_attach_args *ba = aux; 477 struct cfdata *cf = match; 478 479 /* sanity */ 480 if (strcmp(ba->ba_name, cf->cf_driver->cd_name)) 481 return (0); 482 483 if (!acpi_probe(parent, cf, ba)) 484 return (0); 485 486 return (1); 487} 488 489void 490acpi_attach(struct device *parent, struct device *self, void *aux) 491{ 492 struct bios_attach_args *ba = aux; 493 struct acpi_softc *sc = (struct acpi_softc *)self; 494 struct acpi_mem_map handle; 495 struct acpi_rsdp *rsdp; 496 struct acpi_q *entry; 497 struct acpi_dsdt *p_dsdt; 498 int idx; 499#ifndef SMALL_KERNEL 500 struct acpi_wakeq *wentry; 501 struct device *dev; 502 struct acpi_ac *ac; 503 struct acpi_bat *bat; 504#endif /* SMALL_KERNEL */ 505 paddr_t facspa; 506 507 sc->sc_iot = ba->ba_iot; 508 sc->sc_memt = ba->ba_memt; 509 510 if (acpi_map(ba->ba_acpipbase, sizeof(struct acpi_rsdp), &handle)) { 511 printf(": can't map memory\n"); 512 return; 513 } 514 515 rsdp = (struct acpi_rsdp *)handle.va; 516 sc->sc_revision = (int)rsdp->rsdp_revision; 517 printf(": rev %d", sc->sc_revision); 518 519 SIMPLEQ_INIT(&sc->sc_tables); 520 SIMPLEQ_INIT(&sc->sc_wakedevs); 521 522#ifndef SMALL_KERNEL 523 sc->sc_note = malloc(sizeof(struct klist), M_DEVBUF, M_NOWAIT | M_ZERO); 524 if (sc->sc_note == NULL) { 525 printf(", can't allocate memory\n"); 526 acpi_unmap(&handle); 527 return; 528 } 529#endif /* SMALL_KERNEL */ 530 531 if (acpi_loadtables(sc, rsdp)) { 532 printf(", can't load tables\n"); 533 acpi_unmap(&handle); 534 return; 535 } 536 537 acpi_unmap(&handle); 538 539 /* 540 * Find the FADT 541 */ 542 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 543 if (memcmp(entry->q_table, FADT_SIG, 544 sizeof(FADT_SIG) - 1) == 0) { 545 sc->sc_fadt = entry->q_table; 546 break; 547 } 548 } 549 if (sc->sc_fadt == NULL) { 550 printf(", no FADT\n"); 551 return; 552 } 553 554 /* 555 * Check if we are able to enable ACPI control 556 */ 557 if (!sc->sc_fadt->smi_cmd || 558 (!sc->sc_fadt->acpi_enable && !sc->sc_fadt->acpi_disable)) { 559 printf(", ACPI control unavailable\n"); 560 return; 561 } 562 563 /* 564 * Set up a pointer to the firmware control structure 565 */ 566 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_firmware_ctl == 0) 567 facspa = sc->sc_fadt->firmware_ctl; 568 else 569 facspa = sc->sc_fadt->x_firmware_ctl; 570 571 if (acpi_map(facspa, sizeof(struct acpi_facs), &handle)) 572 printf(" !FACS"); 573 else 574 sc->sc_facs = (struct acpi_facs *)handle.va; 575 576 acpi_enabled = 1; 577 578 /* Create opcode hashtable */ 579 aml_hashopcodes(); 580 581 /* Create Default AML objects */ 582 aml_create_defaultobjects(); 583 584 /* 585 * Load the DSDT from the FADT pointer -- use the 586 * extended (64-bit) pointer if it exists 587 */ 588 if (sc->sc_fadt->hdr_revision < 3 || sc->sc_fadt->x_dsdt == 0) 589 entry = acpi_maptable(sc->sc_fadt->dsdt, NULL, NULL, NULL); 590 else 591 entry = acpi_maptable(sc->sc_fadt->x_dsdt, NULL, NULL, NULL); 592 593 if (entry == NULL) 594 printf(" !DSDT"); 595 SIMPLEQ_INSERT_HEAD(&sc->sc_tables, entry, q_next); 596 597 p_dsdt = entry->q_table; 598 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 599 sizeof(p_dsdt->hdr)); 600 601 /* Load SSDT's */ 602 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 603 if (memcmp(entry->q_table, SSDT_SIG, 604 sizeof(SSDT_SIG) - 1) == 0) { 605 p_dsdt = entry->q_table; 606 acpi_parse_aml(sc, p_dsdt->aml, p_dsdt->hdr_length - 607 sizeof(p_dsdt->hdr)); 608 } 609 } 610 611 /* Perform post-parsing fixups */ 612 aml_postparse(); 613 614#ifndef SMALL_KERNEL 615 /* Find available sleeping states */ 616 acpi_init_states(sc); 617 618 /* Find available sleep/resume related methods. */ 619 acpi_init_pm(sc); 620#endif /* SMALL_KERNEL */ 621 622 /* Map Power Management registers */ 623 acpi_map_pmregs(sc); 624 625#ifndef SMALL_KERNEL 626 /* Initialize GPE handlers */ 627 acpi_init_gpes(sc); 628 629 /* some devices require periodic polling */ 630 timeout_set(&sc->sc_dev_timeout, acpi_poll, sc); 631#endif /* SMALL_KERNEL */ 632 633 /* 634 * Take over ACPI control. Note that once we do this, we 635 * effectively tell the system that we have ownership of 636 * the ACPI hardware registers, and that SMI should leave 637 * them alone 638 * 639 * This may prevent thermal control on some systems where 640 * that actually does work 641 */ 642 acpi_write_pmreg(sc, ACPIREG_SMICMD, 0, sc->sc_fadt->acpi_enable); 643 idx = 0; 644 do { 645 if (idx++ > ACPIEN_RETRIES) { 646 printf(", can't enable ACPI\n"); 647 return; 648 } 649 } while (!(acpi_read_pmreg(sc, ACPIREG_PM1_CNT, 0) & ACPI_PM1_SCI_EN)); 650 651 printf("\n%s: tables", DEVNAME(sc)); 652 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 653 printf(" %.4s", entry->q_table); 654 } 655 printf("\n"); 656 657#ifndef SMALL_KERNEL 658 /* Display wakeup devices and lowest S-state */ 659 printf("%s: wakeup devices", DEVNAME(sc)); 660 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 661 printf(" %.4s(S%d)", wentry->q_node->name, 662 wentry->q_state); 663 } 664 printf("\n"); 665 666 667 /* 668 * ACPI is enabled now -- attach timer 669 */ 670 { 671 struct acpi_attach_args aaa; 672 673 memset(&aaa, 0, sizeof(aaa)); 674 aaa.aaa_name = "acpitimer"; 675 aaa.aaa_iot = sc->sc_iot; 676 aaa.aaa_memt = sc->sc_memt; 677#if 0 678 aaa.aaa_pcit = sc->sc_pcit; 679 aaa.aaa_smbust = sc->sc_smbust; 680#endif 681 config_found(self, &aaa, acpi_print); 682 } 683#endif /* SMALL_KERNEL */ 684 685 /* 686 * Attach table-defined devices 687 */ 688 SIMPLEQ_FOREACH(entry, &sc->sc_tables, q_next) { 689 struct acpi_attach_args aaa; 690 691 memset(&aaa, 0, sizeof(aaa)); 692 aaa.aaa_iot = sc->sc_iot; 693 aaa.aaa_memt = sc->sc_memt; 694 #if 0 695 aaa.aaa_pcit = sc->sc_pcit; 696 aaa.aaa_smbust = sc->sc_smbust; 697 #endif 698 aaa.aaa_table = entry->q_table; 699 config_found_sm(self, &aaa, acpi_print, acpi_submatch); 700 } 701 702 acpi_softc = sc; 703 704 /* initialize runtime environment */ 705 aml_find_node(&aml_root, "_INI", acpi_inidev, sc); 706 707 /* attach pci interrupt routing tables */ 708 aml_find_node(&aml_root, "_PRT", acpi_foundprt, sc); 709 710#ifndef SMALL_KERNEL 711 /* XXX EC needs to be attached first on some systems */ 712 aml_find_node(&aml_root, "_HID", acpi_foundec, sc); 713 714 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_add_device, sc); 715 716 /* attach battery, power supply and button devices */ 717 aml_find_node(&aml_root, "_HID", acpi_foundhid, sc); 718 719 /* Attach IDE bay */ 720 aml_walknodes(&aml_root, AML_WALK_PRE, acpi_foundide, sc); 721 722 /* attach docks */ 723 aml_find_node(&aml_root, "_DCK", acpi_founddock, sc); 724 725 /* attach video only if this is not a stinkpad */ 726 if (!acpi_thinkpad_enabled) 727 aml_find_node(&aml_root, "_DOS", acpi_foundvideo, sc); 728 729 /* create list of devices we want to query when APM come in */ 730 SLIST_INIT(&sc->sc_ac); 731 SLIST_INIT(&sc->sc_bat); 732 TAILQ_FOREACH(dev, &alldevs, dv_list) { 733 if (!strcmp(dev->dv_cfdata->cf_driver->cd_name, "acpiac")) { 734 ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO); 735 ac->aac_softc = (struct acpiac_softc *)dev; 736 SLIST_INSERT_HEAD(&sc->sc_ac, ac, aac_link); 737 } else if (!strcmp(dev->dv_cfdata->cf_driver->cd_name, "acpibat")) { 738 bat = malloc(sizeof(*bat), M_DEVBUF, M_WAITOK | M_ZERO); 739 bat->aba_softc = (struct acpibat_softc *)dev; 740 SLIST_INSERT_HEAD(&sc->sc_bat, bat, aba_link); 741 } 742 } 743 744 /* Setup threads */ 745 sc->sc_thread = malloc(sizeof(struct acpi_thread), M_DEVBUF, M_WAITOK); 746 sc->sc_thread->sc = sc; 747 sc->sc_thread->running = 1; 748 749 acpi_attach_machdep(sc); 750 751 kthread_create_deferred(acpi_create_thread, sc); 752#endif /* SMALL_KERNEL */ 753} 754 755int 756acpi_submatch(struct device *parent, void *match, void *aux) 757{ 758 struct acpi_attach_args *aaa = (struct acpi_attach_args *)aux; 759 struct cfdata *cf = match; 760 761 if (aaa->aaa_table == NULL) 762 return (0); 763 return ((*cf->cf_attach->ca_match)(parent, match, aux)); 764} 765 766int 767acpi_print(void *aux, const char *pnp) 768{ 769 struct acpi_attach_args *aa = aux; 770 771 if (pnp) { 772 if (aa->aaa_name) 773 printf("%s at %s", aa->aaa_name, pnp); 774 else 775 return (QUIET); 776 } 777 778 return (UNCONF); 779} 780 781struct acpi_q * 782acpi_maptable(paddr_t addr, const char *sig, const char *oem, const char *tbl) 783{ 784 static int tblid; 785 struct acpi_mem_map handle; 786 struct acpi_table_header *hdr; 787 struct acpi_q *entry; 788 size_t len; 789 790 /* Check if we can map address */ 791 if (addr == 0) 792 return NULL; 793 if (acpi_map(addr, sizeof(*hdr), &handle)) 794 return NULL; 795 hdr = (struct acpi_table_header *)handle.va; 796 len = hdr->length; 797 acpi_unmap(&handle); 798 799 /* Validate length/checksum */ 800 if (acpi_map(addr, len, &handle)) 801 return NULL; 802 hdr = (struct acpi_table_header *)handle.va; 803 if (acpi_checksum(hdr, len)) { 804 acpi_unmap(&handle); 805 return NULL; 806 } 807 if ((sig && memcmp(sig, hdr->signature, 4)) || 808 (oem && memcmp(oem, hdr->oemid, 6)) || 809 (tbl && memcmp(tbl, hdr->oemtableid, 8))) { 810 acpi_unmap(&handle); 811 return NULL; 812 } 813 814 /* Allocate copy */ 815 entry = malloc(len + sizeof(*entry), M_DEVBUF, M_NOWAIT); 816 if (entry != NULL) { 817 memcpy(entry->q_data, handle.va, len); 818 entry->q_table = entry->q_data; 819 entry->q_id = ++tblid; 820 } 821 acpi_unmap(&handle); 822 return entry; 823} 824 825int 826acpi_loadtables(struct acpi_softc *sc, struct acpi_rsdp *rsdp) 827{ 828 struct acpi_q *entry, *sdt; 829 int i, ntables; 830 size_t len; 831 832 if (rsdp->rsdp_revision == 2 && rsdp->rsdp_xsdt) { 833 struct acpi_xsdt *xsdt; 834 835 sdt = acpi_maptable(rsdp->rsdp_xsdt, NULL, NULL, NULL); 836 if (sdt == NULL) { 837 printf("couldn't map rsdt\n"); 838 return (ENOMEM); 839 } 840 841 xsdt = (struct acpi_xsdt *)sdt->q_data; 842 len = xsdt->hdr.length; 843 ntables = (len - sizeof(struct acpi_table_header)) / 844 sizeof(xsdt->table_offsets[0]); 845 846 for (i = 0; i < ntables; i++) { 847 entry = acpi_maptable(xsdt->table_offsets[i], NULL, NULL, 848 NULL); 849 if (entry != NULL) 850 SIMPLEQ_INSERT_TAIL(&sc->sc_tables, entry, 851 q_next); 852 } 853 free(sdt, M_DEVBUF); 854 } else { 855 struct acpi_rsdt *rsdt; 856 857 sdt = acpi_maptable(rsdp->rsdp_rsdt, NULL, NULL, NULL); 858 if (sdt == NULL) { 859 printf("couldn't map rsdt\n"); 860 return (ENOMEM); 861 } 862 863 rsdt = (struct acpi_rsdt *)sdt->q_data; 864 len = rsdt->hdr.length; 865 ntables = (len - sizeof(struct acpi_table_header)) / 866 sizeof(rsdt->table_offsets[0]); 867 868 for (i = 0; i < ntables; i++) { 869 entry = acpi_maptable(rsdt->table_offsets[i], NULL, NULL, 870 NULL); 871 if (entry != NULL) 872 SIMPLEQ_INSERT_TAIL(&sc->sc_tables, entry, 873 q_next); 874 } 875 free(sdt, M_DEVBUF); 876 } 877 878 return (0); 879} 880 881int 882acpiopen(dev_t dev, int flag, int mode, struct proc *p) 883{ 884 int error = 0; 885#ifndef SMALL_KERNEL 886 struct acpi_softc *sc; 887 888 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 889 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 890 return (ENXIO); 891 892 switch (APMDEV(dev)) { 893 case APMDEV_CTL: 894 if (!(flag & FWRITE)) { 895 error = EINVAL; 896 break; 897 } 898 break; 899 case APMDEV_NORMAL: 900 if (!(flag & FREAD) || (flag & FWRITE)) { 901 error = EINVAL; 902 break; 903 } 904 break; 905 default: 906 error = ENXIO; 907 break; 908 } 909#else 910 error = ENXIO; 911#endif 912 return (error); 913} 914 915int 916acpiclose(dev_t dev, int flag, int mode, struct proc *p) 917{ 918 int error = 0; 919#ifndef SMALL_KERNEL 920 struct acpi_softc *sc; 921 922 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 923 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 924 return (ENXIO); 925 926 switch (APMDEV(dev)) { 927 case APMDEV_CTL: 928 case APMDEV_NORMAL: 929 break; 930 default: 931 error = ENXIO; 932 break; 933 } 934#else 935 error = ENXIO; 936#endif 937 return (error); 938} 939 940int 941acpiioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) 942{ 943 int error = 0; 944#ifndef SMALL_KERNEL 945 struct acpi_softc *sc; 946 struct acpi_ac *ac; 947 struct acpi_bat *bat; 948 struct apm_power_info *pi = (struct apm_power_info *)data; 949 int bats; 950 unsigned int remaining, rem, minutes, rate; 951 952 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 953 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 954 return (ENXIO); 955 956 ACPI_LOCK(sc); 957 /* fake APM */ 958 switch (cmd) { 959 case APM_IOC_STANDBY_REQ: 960 case APM_IOC_SUSPEND_REQ: 961 case APM_IOC_SUSPEND: 962 case APM_IOC_STANDBY: 963 workq_add_task(NULL, 0, (workq_fn)acpi_sleep_state, 964 acpi_softc, (void *)ACPI_STATE_S3); 965 break; 966 case APM_IOC_GETPOWER: 967 /* A/C */ 968 pi->ac_state = APM_AC_UNKNOWN; 969 SLIST_FOREACH(ac, &sc->sc_ac, aac_link) { 970 if (ac->aac_softc->sc_ac_stat == PSR_ONLINE) 971 pi->ac_state = APM_AC_ON; 972 else if (ac->aac_softc->sc_ac_stat == PSR_OFFLINE) 973 if (pi->ac_state == APM_AC_UNKNOWN) 974 pi->ac_state = APM_AC_OFF; 975 } 976 977 /* battery */ 978 pi->battery_state = APM_BATT_UNKNOWN; 979 pi->battery_life = 0; 980 pi->minutes_left = 0; 981 bats = 0; 982 remaining = rem = 0; 983 minutes = 0; 984 rate = 0; 985 SLIST_FOREACH(bat, &sc->sc_bat, aba_link) { 986 if (bat->aba_softc->sc_bat_present == 0) 987 continue; 988 989 if (bat->aba_softc->sc_bif.bif_last_capacity == 0) 990 continue; 991 992 bats++; 993 rem = (bat->aba_softc->sc_bst.bst_capacity * 100) / 994 bat->aba_softc->sc_bif.bif_last_capacity; 995 if (rem > 100) 996 rem = 100; 997 remaining += rem; 998 999 if (bat->aba_softc->sc_bst.bst_rate == BST_UNKNOWN) 1000 continue; 1001 else if (bat->aba_softc->sc_bst.bst_rate > 1) 1002 rate = bat->aba_softc->sc_bst.bst_rate; 1003 1004 minutes += bat->aba_softc->sc_bst.bst_capacity; 1005 } 1006 1007 if (bats == 0) { 1008 pi->battery_state = APM_BATTERY_ABSENT; 1009 pi->battery_life = 0; 1010 pi->minutes_left = (unsigned int)-1; 1011 break; 1012 } 1013 1014 if (pi->ac_state == APM_AC_ON || rate == 0) 1015 pi->minutes_left = (unsigned int)-1; 1016 else 1017 pi->minutes_left = 100 * minutes / rate; 1018 1019 /* running on battery */ 1020 pi->battery_life = remaining / bats; 1021 if (pi->battery_life > 50) 1022 pi->battery_state = APM_BATT_HIGH; 1023 else if (pi->battery_life > 25) 1024 pi->battery_state = APM_BATT_LOW; 1025 else 1026 pi->battery_state = APM_BATT_CRITICAL; 1027 1028 break; 1029 1030 default: 1031 error = ENOTTY; 1032 } 1033 1034 ACPI_UNLOCK(sc); 1035#else 1036 error = ENXIO; 1037#endif /* SMALL_KERNEL */ 1038 return (error); 1039} 1040 1041void 1042acpi_filtdetach(struct knote *kn) 1043{ 1044#ifndef SMALL_KERNEL 1045 struct acpi_softc *sc = kn->kn_hook; 1046 1047 ACPI_LOCK(sc); 1048 SLIST_REMOVE(sc->sc_note, kn, knote, kn_selnext); 1049 ACPI_UNLOCK(sc); 1050#endif 1051} 1052 1053int 1054acpi_filtread(struct knote *kn, long hint) 1055{ 1056#ifndef SMALL_KERNEL 1057 /* XXX weird kqueue_scan() semantics */ 1058 if (hint & !kn->kn_data) 1059 kn->kn_data = hint; 1060#endif 1061 return (1); 1062} 1063 1064int 1065acpikqfilter(dev_t dev, struct knote *kn) 1066{ 1067#ifndef SMALL_KERNEL 1068 struct acpi_softc *sc; 1069 1070 if (!acpi_cd.cd_ndevs || APMUNIT(dev) != 0 || 1071 !(sc = acpi_cd.cd_devs[APMUNIT(dev)])) 1072 return (ENXIO); 1073 1074 switch (kn->kn_filter) { 1075 case EVFILT_READ: 1076 kn->kn_fop = &acpiread_filtops; 1077 break; 1078 default: 1079 return (1); 1080 } 1081 1082 kn->kn_hook = sc; 1083 1084 ACPI_LOCK(sc); 1085 SLIST_INSERT_HEAD(sc->sc_note, kn, kn_selnext); 1086 ACPI_UNLOCK(sc); 1087 1088 return (0); 1089#else 1090 return (1); 1091#endif 1092} 1093 1094/* Read from power management register */ 1095int 1096acpi_read_pmreg(struct acpi_softc *sc, int reg, int offset) 1097{ 1098 bus_space_handle_t ioh; 1099 bus_size_t size, __size; 1100 int regval; 1101 1102 __size = 0; 1103 /* Special cases: 1A/1B blocks can be OR'ed together */ 1104 switch (reg) { 1105 case ACPIREG_PM1_EN: 1106 return (acpi_read_pmreg(sc, ACPIREG_PM1A_EN, offset) | 1107 acpi_read_pmreg(sc, ACPIREG_PM1B_EN, offset)); 1108 case ACPIREG_PM1_STS: 1109 return (acpi_read_pmreg(sc, ACPIREG_PM1A_STS, offset) | 1110 acpi_read_pmreg(sc, ACPIREG_PM1B_STS, offset)); 1111 case ACPIREG_PM1_CNT: 1112 return (acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, offset) | 1113 acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, offset)); 1114 case ACPIREG_GPE_STS: 1115 __size = 1; 1116 dnprintf(50, "read GPE_STS offset: %.2x %.2x %.2x\n", offset, 1117 sc->sc_fadt->gpe0_blk_len>>1, sc->sc_fadt->gpe1_blk_len>>1); 1118 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1119 reg = ACPIREG_GPE0_STS; 1120 } 1121 break; 1122 case ACPIREG_GPE_EN: 1123 __size = 1; 1124 dnprintf(50, "read GPE_EN offset: %.2x %.2x %.2x\n", 1125 offset, sc->sc_fadt->gpe0_blk_len>>1, 1126 sc->sc_fadt->gpe1_blk_len>>1); 1127 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1128 reg = ACPIREG_GPE0_EN; 1129 } 1130 break; 1131 } 1132 1133 if (reg >= ACPIREG_MAXREG || sc->sc_pmregs[reg].size == 0) 1134 return (0); 1135 1136 regval = 0; 1137 ioh = sc->sc_pmregs[reg].ioh; 1138 size = sc->sc_pmregs[reg].size; 1139 if (__size) 1140 size = __size; 1141 if (size > 4) 1142 size = 4; 1143 1144 switch (size) { 1145 case 1: 1146 regval = bus_space_read_1(sc->sc_iot, ioh, offset); 1147 break; 1148 case 2: 1149 regval = bus_space_read_2(sc->sc_iot, ioh, offset); 1150 break; 1151 case 4: 1152 regval = bus_space_read_4(sc->sc_iot, ioh, offset); 1153 break; 1154 } 1155 1156 dnprintf(30, "acpi_readpm: %s = %.4x:%.4x %x\n", 1157 sc->sc_pmregs[reg].name, 1158 sc->sc_pmregs[reg].addr, offset, regval); 1159 return (regval); 1160} 1161 1162/* Write to power management register */ 1163void 1164acpi_write_pmreg(struct acpi_softc *sc, int reg, int offset, int regval) 1165{ 1166 bus_space_handle_t ioh; 1167 bus_size_t size, __size; 1168 1169 __size = 0; 1170 /* Special cases: 1A/1B blocks can be written with same value */ 1171 switch (reg) { 1172 case ACPIREG_PM1_EN: 1173 acpi_write_pmreg(sc, ACPIREG_PM1A_EN, offset, regval); 1174 acpi_write_pmreg(sc, ACPIREG_PM1B_EN, offset, regval); 1175 break; 1176 case ACPIREG_PM1_STS: 1177 acpi_write_pmreg(sc, ACPIREG_PM1A_STS, offset, regval); 1178 acpi_write_pmreg(sc, ACPIREG_PM1B_STS, offset, regval); 1179 break; 1180 case ACPIREG_PM1_CNT: 1181 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, offset, regval); 1182 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, offset, regval); 1183 break; 1184 case ACPIREG_GPE_STS: 1185 __size = 1; 1186 dnprintf(50, "write GPE_STS offset: %.2x %.2x %.2x %.2x\n", 1187 offset, sc->sc_fadt->gpe0_blk_len>>1, 1188 sc->sc_fadt->gpe1_blk_len>>1, regval); 1189 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1190 reg = ACPIREG_GPE0_STS; 1191 } 1192 break; 1193 case ACPIREG_GPE_EN: 1194 __size = 1; 1195 dnprintf(50, "write GPE_EN offset: %.2x %.2x %.2x %.2x\n", 1196 offset, sc->sc_fadt->gpe0_blk_len>>1, 1197 sc->sc_fadt->gpe1_blk_len>>1, regval); 1198 if (offset < (sc->sc_fadt->gpe0_blk_len >> 1)) { 1199 reg = ACPIREG_GPE0_EN; 1200 } 1201 break; 1202 } 1203 1204 /* All special case return here */ 1205 if (reg >= ACPIREG_MAXREG) 1206 return; 1207 1208 ioh = sc->sc_pmregs[reg].ioh; 1209 size = sc->sc_pmregs[reg].size; 1210 if (__size) 1211 size = __size; 1212 if (size > 4) 1213 size = 4; 1214 switch (size) { 1215 case 1: 1216 bus_space_write_1(sc->sc_iot, ioh, offset, regval); 1217 break; 1218 case 2: 1219 bus_space_write_2(sc->sc_iot, ioh, offset, regval); 1220 break; 1221 case 4: 1222 bus_space_write_4(sc->sc_iot, ioh, offset, regval); 1223 break; 1224 } 1225 1226 dnprintf(30, "acpi_writepm: %s = %.4x:%.4x %x\n", 1227 sc->sc_pmregs[reg].name, sc->sc_pmregs[reg].addr, offset, regval); 1228} 1229 1230/* Map Power Management registers */ 1231void 1232acpi_map_pmregs(struct acpi_softc *sc) 1233{ 1234 bus_addr_t addr; 1235 bus_size_t size; 1236 const char *name; 1237 int reg; 1238 1239 for (reg = 0; reg < ACPIREG_MAXREG; reg++) { 1240 size = 0; 1241 switch (reg) { 1242 case ACPIREG_SMICMD: 1243 name = "smi"; 1244 size = 1; 1245 addr = sc->sc_fadt->smi_cmd; 1246 break; 1247 case ACPIREG_PM1A_STS: 1248 case ACPIREG_PM1A_EN: 1249 name = "pm1a_sts"; 1250 size = sc->sc_fadt->pm1_evt_len >> 1; 1251 addr = sc->sc_fadt->pm1a_evt_blk; 1252 if (reg == ACPIREG_PM1A_EN && addr) { 1253 addr += size; 1254 name = "pm1a_en"; 1255 } 1256 break; 1257 case ACPIREG_PM1A_CNT: 1258 name = "pm1a_cnt"; 1259 size = sc->sc_fadt->pm1_cnt_len; 1260 addr = sc->sc_fadt->pm1a_cnt_blk; 1261 break; 1262 case ACPIREG_PM1B_STS: 1263 case ACPIREG_PM1B_EN: 1264 name = "pm1b_sts"; 1265 size = sc->sc_fadt->pm1_evt_len >> 1; 1266 addr = sc->sc_fadt->pm1b_evt_blk; 1267 if (reg == ACPIREG_PM1B_EN && addr) { 1268 addr += size; 1269 name = "pm1b_en"; 1270 } 1271 break; 1272 case ACPIREG_PM1B_CNT: 1273 name = "pm1b_cnt"; 1274 size = sc->sc_fadt->pm1_cnt_len; 1275 addr = sc->sc_fadt->pm1b_cnt_blk; 1276 break; 1277 case ACPIREG_PM2_CNT: 1278 name = "pm2_cnt"; 1279 size = sc->sc_fadt->pm2_cnt_len; 1280 addr = sc->sc_fadt->pm2_cnt_blk; 1281 break; 1282#if 0 1283 case ACPIREG_PM_TMR: 1284 /* Allocated in acpitimer */ 1285 name = "pm_tmr"; 1286 size = sc->sc_fadt->pm_tmr_len; 1287 addr = sc->sc_fadt->pm_tmr_blk; 1288 break; 1289#endif 1290 case ACPIREG_GPE0_STS: 1291 case ACPIREG_GPE0_EN: 1292 name = "gpe0_sts"; 1293 size = sc->sc_fadt->gpe0_blk_len >> 1; 1294 addr = sc->sc_fadt->gpe0_blk; 1295 1296 dnprintf(20, "gpe0 block len : %x\n", 1297 sc->sc_fadt->gpe0_blk_len >> 1); 1298 dnprintf(20, "gpe0 block addr: %x\n", 1299 sc->sc_fadt->gpe0_blk); 1300 if (reg == ACPIREG_GPE0_EN && addr) { 1301 addr += size; 1302 name = "gpe0_en"; 1303 } 1304 break; 1305 case ACPIREG_GPE1_STS: 1306 case ACPIREG_GPE1_EN: 1307 name = "gpe1_sts"; 1308 size = sc->sc_fadt->gpe1_blk_len >> 1; 1309 addr = sc->sc_fadt->gpe1_blk; 1310 1311 dnprintf(20, "gpe1 block len : %x\n", 1312 sc->sc_fadt->gpe1_blk_len >> 1); 1313 dnprintf(20, "gpe1 block addr: %x\n", 1314 sc->sc_fadt->gpe1_blk); 1315 if (reg == ACPIREG_GPE1_EN && addr) { 1316 addr += size; 1317 name = "gpe1_en"; 1318 } 1319 break; 1320 } 1321 if (size && addr) { 1322 dnprintf(50, "mapping: %.4x %.4x %s\n", 1323 addr, size, name); 1324 1325 /* Size and address exist; map register space */ 1326 bus_space_map(sc->sc_iot, addr, size, 0, 1327 &sc->sc_pmregs[reg].ioh); 1328 1329 sc->sc_pmregs[reg].name = name; 1330 sc->sc_pmregs[reg].size = size; 1331 sc->sc_pmregs[reg].addr = addr; 1332 } 1333 } 1334} 1335 1336/* move all stuff that doesn't go on the boot media in here */ 1337#ifndef SMALL_KERNEL 1338void 1339acpi_reset(void) 1340{ 1341 struct acpi_fadt *fadt; 1342 u_int32_t reset_as, reset_len; 1343 u_int32_t value; 1344 1345 fadt = acpi_softc->sc_fadt; 1346 1347 /* 1348 * RESET_REG_SUP is not properly set in some implementations, 1349 * but not testing against it breaks more machines than it fixes 1350 */ 1351 if (acpi_softc->sc_revision <= 1 || 1352 !(fadt->flags & FADT_RESET_REG_SUP) || fadt->reset_reg.address == 0) 1353 return; 1354 1355 value = fadt->reset_value; 1356 1357 reset_as = fadt->reset_reg.register_bit_width / 8; 1358 if (reset_as == 0) 1359 reset_as = 1; 1360 1361 reset_len = fadt->reset_reg.access_size; 1362 if (reset_len == 0) 1363 reset_len = reset_as; 1364 1365 acpi_gasio(acpi_softc, ACPI_IOWRITE, 1366 fadt->reset_reg.address_space_id, 1367 fadt->reset_reg.address, reset_as, reset_len, &value); 1368 1369 delay(100000); 1370} 1371 1372int 1373acpi_interrupt(void *arg) 1374{ 1375 struct acpi_softc *sc = (struct acpi_softc *)arg; 1376 u_int32_t processed, sts, en, idx, jdx; 1377 1378 processed = 0; 1379 1380#if 0 1381 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1382 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1383 sc->sc_fadt->gpe1_base); 1384#endif 1385 1386 dnprintf(40, "ACPI Interrupt\n"); 1387 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1388 sts = acpi_read_pmreg(sc, ACPIREG_GPE_STS, idx>>3); 1389 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, idx>>3); 1390 if (en & sts) { 1391 dnprintf(10, "GPE block: %.2x %.2x %.2x\n", idx, sts, 1392 en); 1393 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, en & ~sts); 1394 for (jdx = 0; jdx < 8; jdx++) { 1395 if (en & sts & (1L << jdx)) { 1396 /* Signal this GPE */ 1397 sc->gpe_table[idx+jdx].active = 1; 1398 processed = 1; 1399 } 1400 } 1401 } 1402 } 1403 1404 sts = acpi_read_pmreg(sc, ACPIREG_PM1_STS, 0); 1405 en = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 1406 if (sts & en) { 1407 dnprintf(10,"GEN interrupt: %.4x\n", sts & en); 1408 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en & ~sts); 1409 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 0, en); 1410 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, en); 1411 if (sts & ACPI_PM1_PWRBTN_STS) 1412 sc->sc_powerbtn = 1; 1413 if (sts & ACPI_PM1_SLPBTN_STS) 1414 sc->sc_sleepbtn = 1; 1415 processed = 1; 1416 } 1417 1418 if (processed) { 1419 sc->sc_wakeup = 0; 1420 wakeup(sc); 1421 } 1422 1423 return (processed); 1424} 1425 1426int 1427acpi_add_device(struct aml_node *node, void *arg) 1428{ 1429 static int nacpicpus = 0; 1430 struct device *self = arg; 1431 struct acpi_softc *sc = arg; 1432 struct acpi_attach_args aaa; 1433#ifdef MULTIPROCESSOR 1434 struct aml_value res; 1435 int proc_id = -1; 1436#endif 1437 1438 memset(&aaa, 0, sizeof(aaa)); 1439 aaa.aaa_node = node; 1440 aaa.aaa_iot = sc->sc_iot; 1441 aaa.aaa_memt = sc->sc_memt; 1442 if (node == NULL || node->value == NULL) 1443 return 0; 1444 1445 switch (node->value->type) { 1446 case AML_OBJTYPE_PROCESSOR: 1447 if (nacpicpus >= ncpus) 1448 return 0; 1449#ifdef MULTIPROCESSOR 1450 if (aml_evalnode(sc, aaa.aaa_node, 0, NULL, &res) == 0) { 1451 if (res.type == AML_OBJTYPE_PROCESSOR) 1452 proc_id = res.v_processor.proc_id; 1453 aml_freevalue(&res); 1454 } 1455 if (proc_id < -1 || proc_id >= LAPIC_MAP_SIZE || 1456 (acpi_lapic_flags[proc_id] & ACPI_PROC_ENABLE) == 0) 1457 return 0; 1458#endif 1459 nacpicpus++; 1460 1461 aaa.aaa_name = "acpicpu"; 1462 break; 1463 case AML_OBJTYPE_THERMZONE: 1464 aaa.aaa_name = "acpitz"; 1465 break; 1466 case AML_OBJTYPE_POWERRSRC: 1467 aaa.aaa_name = "acpipwrres"; 1468 break; 1469 default: 1470 return 0; 1471 } 1472 config_found(self, &aaa, acpi_print); 1473 return 0; 1474} 1475 1476void 1477acpi_enable_onegpe(struct acpi_softc *sc, int gpe, int enable) 1478{ 1479 uint8_t mask = (1L << (gpe & 7)); 1480 uint8_t en; 1481 1482 /* Read enabled register */ 1483 en = acpi_read_pmreg(sc, ACPIREG_GPE_EN, gpe>>3); 1484 dnprintf(50, "%sabling GPE %.2x (current: %sabled) %.2x\n", 1485 enable ? "en" : "dis", gpe, (en & mask) ? "en" : "dis", en); 1486 if (enable) 1487 en |= mask; 1488 else 1489 en &= ~mask; 1490 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, en); 1491} 1492 1493int 1494acpi_set_gpehandler(struct acpi_softc *sc, int gpe, int (*handler) 1495 (struct acpi_softc *, int, void *), void *arg, const char *label) 1496{ 1497 struct gpe_block *ptbl; 1498 1499 ptbl = acpi_find_gpe(sc, gpe); 1500 if (ptbl == NULL || handler == NULL) 1501 return -EINVAL; 1502 if (ptbl->handler != NULL) { 1503 dnprintf(10, "error: GPE %.2x already enabled\n", gpe); 1504 return -EBUSY; 1505 } 1506 dnprintf(50, "Adding GPE handler %.2x (%s)\n", gpe, label); 1507 ptbl->handler = handler; 1508 ptbl->arg = arg; 1509 1510 return (0); 1511} 1512 1513int 1514acpi_gpe_level(struct acpi_softc *sc, int gpe, void *arg) 1515{ 1516 struct aml_node *node = arg; 1517 uint8_t mask; 1518 1519 dnprintf(10, "handling Level-sensitive GPE %.2x\n", gpe); 1520 mask = (1L << (gpe & 7)); 1521 1522 aml_evalnode(sc, node, 0, NULL, NULL); 1523 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1524 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1525 1526 return (0); 1527} 1528 1529int 1530acpi_gpe_edge(struct acpi_softc *sc, int gpe, void *arg) 1531{ 1532 1533 struct aml_node *node = arg; 1534 uint8_t mask; 1535 1536 dnprintf(10, "handling Edge-sensitive GPE %.2x\n", gpe); 1537 mask = (1L << (gpe & 7)); 1538 1539 aml_evalnode(sc, node, 0, NULL, NULL); 1540 acpi_write_pmreg(sc, ACPIREG_GPE_STS, gpe>>3, mask); 1541 acpi_write_pmreg(sc, ACPIREG_GPE_EN, gpe>>3, mask); 1542 1543 return (0); 1544} 1545 1546/* Discover Devices that can wakeup the system 1547 * _PRW returns a package 1548 * pkg[0] = integer (FADT gpe bit) or package (gpe block,gpe bit) 1549 * pkg[1] = lowest sleep state 1550 * pkg[2+] = power resource devices (optional) 1551 * 1552 * To enable wakeup devices: 1553 * Evaluate _ON method in each power resource device 1554 * Evaluate _PSW method 1555 */ 1556int 1557acpi_foundprw(struct aml_node *node, void *arg) 1558{ 1559 struct acpi_softc *sc = arg; 1560 struct acpi_wakeq *wq; 1561 1562 wq = malloc(sizeof(struct acpi_wakeq), M_DEVBUF, M_NOWAIT | M_ZERO); 1563 if (wq == NULL) { 1564 return 0; 1565 } 1566 1567 wq->q_wakepkg = malloc(sizeof(struct aml_value), M_DEVBUF, 1568 M_NOWAIT | M_ZERO); 1569 if (wq->q_wakepkg == NULL) { 1570 free(wq, M_DEVBUF); 1571 return 0; 1572 } 1573 dnprintf(10, "Found _PRW (%s)\n", node->parent->name); 1574 aml_evalnode(sc, node, 0, NULL, wq->q_wakepkg); 1575 wq->q_node = node->parent; 1576 wq->q_gpe = -1; 1577 1578 /* Get GPE of wakeup device, and lowest sleep level */ 1579 if (wq->q_wakepkg->type == AML_OBJTYPE_PACKAGE && wq->q_wakepkg->length >= 2) { 1580 if (wq->q_wakepkg->v_package[0]->type == AML_OBJTYPE_INTEGER) { 1581 wq->q_gpe = wq->q_wakepkg->v_package[0]->v_integer; 1582 } 1583 if (wq->q_wakepkg->v_package[1]->type == AML_OBJTYPE_INTEGER) { 1584 wq->q_state = wq->q_wakepkg->v_package[1]->v_integer; 1585 } 1586 } 1587 SIMPLEQ_INSERT_TAIL(&sc->sc_wakedevs, wq, q_next); 1588 return 0; 1589} 1590 1591struct gpe_block * 1592acpi_find_gpe(struct acpi_softc *sc, int gpe) 1593{ 1594#if 1 1595 if (gpe >= sc->sc_lastgpe) 1596 return NULL; 1597 return &sc->gpe_table[gpe]; 1598#else 1599 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1600 if (gpe >= pgpe->start && gpe <= (pgpe->start+7)) 1601 return &pgpe->table[gpe & 7]; 1602 } 1603 return NULL; 1604#endif 1605} 1606 1607#if 0 1608/* New GPE handling code: Create GPE block */ 1609void 1610acpi_init_gpeblock(struct acpi_softc *sc, int reg, int len, int base) 1611{ 1612 int i, j; 1613 1614 if (!reg || !len) 1615 return; 1616 for (i = 0; i < len; i++) { 1617 pgpe = acpi_os_malloc(sizeof(gpeblock)); 1618 if (pgpe == NULL) 1619 return; 1620 1621 /* Allocate GPE Handler Block */ 1622 pgpe->start = base + i; 1623 acpi_bus_space_map(sc->sc_iot, reg+i, 1, 0, &pgpe->sts_ioh); 1624 acpi_bus_space_map(sc->sc_iot, reg+i+len, 1, 0, &pgpe->en_ioh); 1625 SIMPLEQ_INSERT_TAIL(&sc->sc_gpes, gpe, gpe_link); 1626 1627 /* Clear pending GPEs */ 1628 bus_space_write_1(sc->sc_iot, pgpe->sts_ioh, 0, 0xFF); 1629 bus_space_write_1(sc->sc_iot, pgpe->en_ioh, 0, 0x00); 1630 } 1631 1632 /* Search for GPE handlers */ 1633 for (i = 0; i < len*8; i++) { 1634 char gpestr[32]; 1635 struct aml_node *h; 1636 1637 snprintf(gpestr, sizeof(gpestr), "\\_GPE._L%.2X", base+i); 1638 h = aml_searchnode(&aml_root, gpestr); 1639 if (acpi_set_gpehandler(sc, base+i, acpi_gpe_level, h, "level") != 0) { 1640 snprintf(gpestr, sizeof(gpestr), "\\_GPE._E%.2X", base+i); 1641 h = aml_searchnode(&aml_root, gpestr); 1642 acpi_set_gpehandler(sc, base+i, acpi_gpe_edge, h, "edge"); 1643 } 1644 } 1645} 1646 1647/* Process GPE interrupts */ 1648int 1649acpi_handle_gpes(struct acpi_softc *sc) 1650{ 1651 uint8_t en, sts; 1652 int processed, i; 1653 1654 processed = 0; 1655 SIMPLEQ_FOREACH(pgpe, &sc->sc_gpes, gpe_link) { 1656 sts = bus_space_read_1(sc->sc_iot, pgpe->sts_ioh, 0); 1657 en = bus_space_read_1(sc->sc_iot, pgpe->en_ioh, 0); 1658 for (i = 0; i< 8 ; i++) { 1659 if (en & sts & (1L << i)) { 1660 pgpe->table[i].active = 1; 1661 processed = 1; 1662 } 1663 } 1664 } 1665 return processed; 1666} 1667#endif 1668 1669#if 0 1670void 1671acpi_add_gpeblock(struct acpi_softc *sc, int reg, int len, int gpe) 1672{ 1673 int idx, jdx; 1674 u_int8_t en, sts; 1675 1676 if (!reg || !len) 1677 return; 1678 for (idx = 0; idx < len; idx++) { 1679 sts = inb(reg + idx); 1680 en = inb(reg + len + idx); 1681 printf("-- gpe %.2x-%.2x : en:%.2x sts:%.2x %.2x\n", 1682 gpe+idx*8, gpe+idx*8+7, en, sts, en&sts); 1683 for (jdx = 0; jdx < 8; jdx++) { 1684 char gpestr[32]; 1685 struct aml_node *l, *e; 1686 1687 if (en & sts & (1L << jdx)) { 1688 snprintf(gpestr,sizeof(gpestr), "\\_GPE._L%.2X", gpe+idx*8+jdx); 1689 l = aml_searchname(&aml_root, gpestr); 1690 snprintf(gpestr,sizeof(gpestr), "\\_GPE._E%.2X", gpe+idx*8+jdx); 1691 e = aml_searchname(&aml_root, gpestr); 1692 printf(" GPE %.2x active L%x E%x\n", gpe+idx*8+jdx, l, e); 1693 } 1694 } 1695 } 1696} 1697#endif 1698 1699void 1700acpi_init_gpes(struct acpi_softc *sc) 1701{ 1702 struct aml_node *gpe; 1703 char name[12]; 1704 int idx, ngpe; 1705 1706#if 0 1707 acpi_add_gpeblock(sc, sc->sc_fadt->gpe0_blk, sc->sc_fadt->gpe0_blk_len>>1, 0); 1708 acpi_add_gpeblock(sc, sc->sc_fadt->gpe1_blk, sc->sc_fadt->gpe1_blk_len>>1, 1709 sc->sc_fadt->gpe1_base); 1710#endif 1711 1712 sc->sc_lastgpe = sc->sc_fadt->gpe0_blk_len << 2; 1713 if (sc->sc_fadt->gpe1_blk_len) { 1714 } 1715 dnprintf(50, "Last GPE: %.2x\n", sc->sc_lastgpe); 1716 1717 /* Allocate GPE table */ 1718 sc->gpe_table = malloc(sc->sc_lastgpe * sizeof(struct gpe_block), 1719 M_DEVBUF, M_WAITOK | M_ZERO); 1720 1721 ngpe = 0; 1722 1723 /* Clear GPE status */ 1724 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1725 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1726 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1727 } 1728 for (idx = 0; idx < sc->sc_lastgpe; idx++) { 1729 /* Search Level-sensitive GPES */ 1730 snprintf(name, sizeof(name), "\\_GPE._L%.2X", idx); 1731 gpe = aml_searchname(&aml_root, name); 1732 if (gpe != NULL) 1733 acpi_set_gpehandler(sc, idx, acpi_gpe_level, gpe, 1734 "level"); 1735 if (gpe == NULL) { 1736 /* Search Edge-sensitive GPES */ 1737 snprintf(name, sizeof(name), "\\_GPE._E%.2X", idx); 1738 gpe = aml_searchname(&aml_root, name); 1739 if (gpe != NULL) 1740 acpi_set_gpehandler(sc, idx, acpi_gpe_edge, gpe, 1741 "edge"); 1742 } 1743 } 1744 aml_find_node(&aml_root, "_PRW", acpi_foundprw, sc); 1745 sc->sc_maxgpe = ngpe; 1746} 1747 1748void 1749acpi_init_states(struct acpi_softc *sc) 1750{ 1751 struct aml_value res; 1752 char name[8]; 1753 int i; 1754 1755 for (i = ACPI_STATE_S0; i <= ACPI_STATE_S5; i++) { 1756 snprintf(name, sizeof(name), "_S%d_", i); 1757 sc->sc_sleeptype[i].slp_typa = -1; 1758 sc->sc_sleeptype[i].slp_typb = -1; 1759 if (aml_evalname(sc, &aml_root, name, 0, NULL, &res) == 0) { 1760 if (res.type == AML_OBJTYPE_PACKAGE) { 1761 sc->sc_sleeptype[i].slp_typa = aml_val2int(res.v_package[0]); 1762 sc->sc_sleeptype[i].slp_typb = aml_val2int(res.v_package[1]); 1763 } 1764 aml_freevalue(&res); 1765 } 1766 } 1767} 1768 1769void 1770acpi_init_pm(struct acpi_softc *sc) 1771{ 1772 sc->sc_tts = aml_searchname(&aml_root, "_TTS"); 1773 sc->sc_pts = aml_searchname(&aml_root, "_PTS"); 1774 sc->sc_wak = aml_searchname(&aml_root, "_WAK"); 1775 sc->sc_bfs = aml_searchname(&aml_root, "_BFS"); 1776 sc->sc_gts = aml_searchname(&aml_root, "_GTS"); 1777 sc->sc_sst = aml_searchname(&aml_root, "_SST"); 1778} 1779 1780#ifndef SMALL_KERNEL 1781void 1782acpi_susp_resume_gpewalk(struct acpi_softc *sc, int state, 1783 int wake_gpe_state) 1784{ 1785 struct acpi_wakeq *wentry; 1786 int idx; 1787 u_int32_t gpe; 1788 1789 /* Clear GPE status */ 1790 for (idx = 0; idx < sc->sc_lastgpe; idx += 8) { 1791 acpi_write_pmreg(sc, ACPIREG_GPE_EN, idx>>3, 0); 1792 acpi_write_pmreg(sc, ACPIREG_GPE_STS, idx>>3, -1); 1793 } 1794 1795 SIMPLEQ_FOREACH(wentry, &sc->sc_wakedevs, q_next) { 1796 dnprintf(10, "%.4s(S%d) gpe %.2x\n", wentry->q_node->name, 1797 wentry->q_state, 1798 wentry->q_gpe); 1799 1800 if (state <= wentry->q_state) 1801 acpi_enable_onegpe(sc, wentry->q_gpe, 1802 wake_gpe_state); 1803 } 1804 1805 /* If we are resuming (disabling wake GPEs), enable other GPEs */ 1806 1807 if (wake_gpe_state == 0) { 1808 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 1809 if (sc->gpe_table[gpe].handler) 1810 acpi_enable_onegpe(sc, gpe, 1); 1811 } 1812 } 1813} 1814#endif /* ! SMALL_KERNEL */ 1815 1816int 1817acpi_sleep_state(struct acpi_softc *sc, int state) 1818{ 1819 int ret; 1820 1821#ifdef MULTIPROCESSOR 1822 if (ncpus > 1) /* cannot suspend MP yet */ 1823 return (0); 1824#endif 1825 switch (state) { 1826 case ACPI_STATE_S0: 1827 return (0); 1828 case ACPI_STATE_S4: 1829 return (EOPNOTSUPP); 1830 case ACPI_STATE_S5: 1831 break; 1832 case ACPI_STATE_S1: 1833 case ACPI_STATE_S2: 1834 case ACPI_STATE_S3: 1835 if (sc->sc_sleeptype[state].slp_typa == -1 || 1836 sc->sc_sleeptype[state].slp_typb == -1) 1837 return (EOPNOTSUPP); 1838 } 1839 1840 if ((ret = acpi_prepare_sleep_state(sc, state)) != 0) 1841 return (ret); 1842 1843 if (state != ACPI_STATE_S1) 1844 ret = acpi_sleep_machdep(sc, state); 1845 else 1846 ret = acpi_enter_sleep_state(sc, state); 1847 1848#ifndef SMALL_KERNEL 1849 if (state == ACPI_STATE_S3) 1850 acpi_resume(sc, state); 1851#endif /* !SMALL_KERNEL */ 1852 return (ret); 1853} 1854 1855int 1856acpi_enter_sleep_state(struct acpi_softc *sc, int state) 1857{ 1858 uint16_t rega, regb; 1859 int retries; 1860 1861 /* Clear WAK_STS bit */ 1862 acpi_write_pmreg(sc, ACPIREG_PM1_STS, 1, ACPI_PM1_WAK_STS); 1863 1864 /* Disable BM arbitration */ 1865 acpi_write_pmreg(sc, ACPIREG_PM2_CNT, 1, ACPI_PM2_ARB_DIS); 1866 1867 /* Write SLP_TYPx values */ 1868 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_CNT, 0); 1869 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_CNT, 0); 1870 rega &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1871 regb &= ~(ACPI_PM1_SLP_TYPX_MASK | ACPI_PM1_SLP_EN); 1872 rega |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typa); 1873 regb |= ACPI_PM1_SLP_TYPX(sc->sc_sleeptype[state].slp_typb); 1874 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1875 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1876 1877 /* Set SLP_EN bit */ 1878 rega |= ACPI_PM1_SLP_EN; 1879 regb |= ACPI_PM1_SLP_EN; 1880 1881 /* 1882 * Let the machdep code flush caches and do any other necessary 1883 * tasks before going away. 1884 */ 1885 acpi_cpu_flush(sc, state); 1886 1887 acpi_write_pmreg(sc, ACPIREG_PM1A_CNT, 0, rega); 1888 acpi_write_pmreg(sc, ACPIREG_PM1B_CNT, 0, regb); 1889 1890 /* Loop on WAK_STS */ 1891 for (retries = 1000; retries > 0; retries--) { 1892 rega = acpi_read_pmreg(sc, ACPIREG_PM1A_STS, 0); 1893 regb = acpi_read_pmreg(sc, ACPIREG_PM1B_STS, 0); 1894 if (rega & ACPI_PM1_WAK_STS || 1895 regb & ACPI_PM1_WAK_STS) 1896 break; 1897 DELAY(10); 1898 } 1899 1900 return (-1); 1901} 1902 1903#ifndef SMALL_KERNEL 1904void 1905acpi_resume(struct acpi_softc *sc, int state) 1906{ 1907 struct aml_value env; 1908 1909 memset(&env, 0, sizeof(env)); 1910 env.type = AML_OBJTYPE_INTEGER; 1911 env.v_integer = sc->sc_state; 1912 1913 if (sc->sc_bfs) 1914 if (aml_evalnode(sc, sc->sc_bfs, 1, &env, NULL) != 0) { 1915 dnprintf(10, "%s evaluating method _BFS failed.\n", 1916 DEVNAME(sc)); 1917 } 1918 1919 if (sc->sc_wak) 1920 if (aml_evalnode(sc, sc->sc_wak, 1, &env, NULL) != 0) { 1921 dnprintf(10, "%s evaluating method _WAK failed.\n", 1922 DEVNAME(sc)); 1923 } 1924 1925 /* Disable wake GPEs */ 1926 acpi_susp_resume_gpewalk(sc, state, 0); 1927 1928 config_suspend(TAILQ_FIRST(&alldevs), DVACT_RESUME); 1929 1930 enable_intr(); 1931 splx(acpi_saved_spl); 1932 1933 sc->sc_state = ACPI_STATE_S0; 1934 if (sc->sc_tts) { 1935 env.v_integer = sc->sc_state; 1936 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1937 dnprintf(10, "%s evaluating method _TTS failed.\n", 1938 DEVNAME(sc)); 1939 } 1940 } 1941} 1942#endif /* ! SMALL_KERNEL */ 1943 1944void 1945acpi_handle_suspend_failure(struct acpi_softc *sc) 1946{ 1947 struct aml_value env; 1948 1949 /* Undo a partial suspend. Devices will have already been resumed */ 1950 enable_intr(); 1951 splx(acpi_saved_spl); 1952 1953 1954 /* Tell ACPI to go back to S0 */ 1955 memset(&env, 0, sizeof(env)); 1956 env.type = AML_OBJTYPE_INTEGER; 1957 sc->sc_state = ACPI_STATE_S0; 1958 if (sc->sc_tts) { 1959 env.v_integer = sc->sc_state; 1960 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1961 dnprintf(10, "%s evaluating method _TTS failed.\n", 1962 DEVNAME(sc)); 1963 } 1964 } 1965} 1966 1967int 1968acpi_prepare_sleep_state(struct acpi_softc *sc, int state) 1969{ 1970 struct aml_value env; 1971 1972 if (sc == NULL || state == ACPI_STATE_S0) 1973 return(0); 1974 1975 if (sc->sc_sleeptype[state].slp_typa == -1 || 1976 sc->sc_sleeptype[state].slp_typb == -1) { 1977 printf("%s: state S%d unavailable\n", 1978 sc->sc_dev.dv_xname, state); 1979 return (ENXIO); 1980 } 1981 1982 memset(&env, 0, sizeof(env)); 1983 env.type = AML_OBJTYPE_INTEGER; 1984 env.v_integer = state; 1985 /* _TTS(state) */ 1986 if (sc->sc_tts) 1987 if (aml_evalnode(sc, sc->sc_tts, 1, &env, NULL) != 0) { 1988 dnprintf(10, "%s evaluating method _TTS failed.\n", 1989 DEVNAME(sc)); 1990 return (ENXIO); 1991 } 1992 1993 acpi_saved_spl = splhigh(); 1994 disable_intr(); 1995#ifndef SMALL_KERNEL 1996 if (state == ACPI_STATE_S3) 1997 if (config_suspend(TAILQ_FIRST(&alldevs), DVACT_SUSPEND) != 0) { 1998 acpi_handle_suspend_failure(sc); 1999 return (1); 2000 } 2001#endif /* ! SMALL_KERNEL */ 2002 2003 /* _PTS(state) */ 2004 if (sc->sc_pts) 2005 if (aml_evalnode(sc, sc->sc_pts, 1, &env, NULL) != 0) { 2006 dnprintf(10, "%s evaluating method _PTS failed.\n", 2007 DEVNAME(sc)); 2008 return (ENXIO); 2009 } 2010 2011 sc->sc_state = state; 2012 /* _GTS(state) */ 2013 if (sc->sc_gts) 2014 if (aml_evalnode(sc, sc->sc_gts, 1, &env, NULL) != 0) { 2015 dnprintf(10, "%s evaluating method _GTS failed.\n", 2016 DEVNAME(sc)); 2017 return (ENXIO); 2018 } 2019 2020 if (sc->sc_sst) 2021 aml_evalnode(sc, sc->sc_sst, 1, &env, NULL); 2022 2023 /* Enable wake GPEs */ 2024 acpi_susp_resume_gpewalk(sc, state, 1); 2025 2026 return (0); 2027} 2028 2029 2030 2031void 2032acpi_powerdown(void) 2033{ 2034 /* 2035 * In case acpi_prepare_sleep fails, we shouldn't try to enter 2036 * the sleep state. It might cost us the battery. 2037 */ 2038 acpi_susp_resume_gpewalk(acpi_softc, ACPI_STATE_S5, 1); 2039 if (acpi_prepare_sleep_state(acpi_softc, ACPI_STATE_S5) == 0) 2040 acpi_enter_sleep_state(acpi_softc, ACPI_STATE_S5); 2041} 2042 2043 2044extern int aml_busy; 2045 2046void 2047acpi_isr_thread(void *arg) 2048{ 2049 struct acpi_thread *thread = arg; 2050 struct acpi_softc *sc = thread->sc; 2051 u_int32_t gpe; 2052 2053 /* 2054 * If we have an interrupt handler, we can get notification 2055 * when certain status bits changes in the ACPI registers, 2056 * so let us enable some events we can forward to userland 2057 */ 2058 if (sc->sc_interrupt) { 2059 int16_t flag; 2060 2061 dnprintf(1,"slpbtn:%c pwrbtn:%c\n", 2062 sc->sc_fadt->flags & FADT_SLP_BUTTON ? 'n' : 'y', 2063 sc->sc_fadt->flags & FADT_PWR_BUTTON ? 'n' : 'y'); 2064 dnprintf(10, "Enabling acpi interrupts...\n"); 2065 sc->sc_wakeup = 1; 2066 2067 /* Enable Sleep/Power buttons if they exist */ 2068 flag = acpi_read_pmreg(sc, ACPIREG_PM1_EN, 0); 2069 if (!(sc->sc_fadt->flags & FADT_PWR_BUTTON)) { 2070 flag |= ACPI_PM1_PWRBTN_EN; 2071 } 2072 if (!(sc->sc_fadt->flags & FADT_SLP_BUTTON)) { 2073 flag |= ACPI_PM1_SLPBTN_EN; 2074 } 2075 acpi_write_pmreg(sc, ACPIREG_PM1_EN, 0, flag); 2076 2077 /* Enable handled GPEs here */ 2078 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2079 if (sc->gpe_table[gpe].handler) 2080 acpi_enable_onegpe(sc, gpe, 1); 2081 } 2082 } 2083 2084 while (thread->running) { 2085 dnprintf(10, "sleep... %d\n", sc->sc_wakeup); 2086 while (sc->sc_wakeup) 2087 tsleep(sc, PWAIT, "acpi_idle", 0); 2088 sc->sc_wakeup = 1; 2089 dnprintf(10, "wakeup..\n"); 2090 if (aml_busy) 2091 continue; 2092 2093 for (gpe = 0; gpe < sc->sc_lastgpe; gpe++) { 2094 struct gpe_block *pgpe = &sc->gpe_table[gpe]; 2095 2096 if (pgpe->active) { 2097 pgpe->active = 0; 2098 dnprintf(50, "softgpe: %.2x\n", gpe); 2099 if (pgpe->handler) 2100 pgpe->handler(sc, gpe, pgpe->arg); 2101 } 2102 } 2103 if (sc->sc_powerbtn) { 2104 sc->sc_powerbtn = 0; 2105 2106 aml_notify_dev(ACPI_DEV_PBD, 0x80); 2107 2108 acpi_evindex++; 2109 dnprintf(1,"power button pressed\n"); 2110 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_PWRBTN, 2111 acpi_evindex)); 2112 } 2113 if (sc->sc_sleepbtn) { 2114 sc->sc_sleepbtn = 0; 2115 2116 aml_notify_dev(ACPI_DEV_SBD, 0x80); 2117 2118 acpi_evindex++; 2119 dnprintf(1,"sleep button pressed\n"); 2120 KNOTE(sc->sc_note, ACPI_EVENT_COMPOSE(ACPI_EV_SLPBTN, 2121 acpi_evindex)); 2122 } 2123 2124 /* handle polling here to keep code non-concurrent*/ 2125 if (sc->sc_poll) { 2126 sc->sc_poll = 0; 2127 acpi_poll_notify(); 2128 } 2129 } 2130 free(thread, M_DEVBUF); 2131 2132 kthread_exit(0); 2133} 2134 2135void 2136acpi_create_thread(void *arg) 2137{ 2138 struct acpi_softc *sc = arg; 2139 2140 if (kthread_create(acpi_isr_thread, sc->sc_thread, NULL, DEVNAME(sc)) 2141 != 0) { 2142 printf("%s: unable to create isr thread, GPEs disabled\n", 2143 DEVNAME(sc)); 2144 return; 2145 } 2146} 2147 2148int 2149acpi_map_address(struct acpi_softc *sc, struct acpi_gas *gas, bus_addr_t base, 2150 bus_size_t size, bus_space_handle_t *pioh, bus_space_tag_t *piot) 2151{ 2152 int iospace = GAS_SYSTEM_IOSPACE; 2153 2154 /* No GAS structure, default to I/O space */ 2155 if (gas != NULL) { 2156 base += gas->address; 2157 iospace = gas->address_space_id; 2158 } 2159 switch (iospace) { 2160 case GAS_SYSTEM_MEMORY: 2161 *piot = sc->sc_memt; 2162 break; 2163 case GAS_SYSTEM_IOSPACE: 2164 *piot = sc->sc_iot; 2165 break; 2166 default: 2167 return -1; 2168 } 2169 if (bus_space_map(*piot, base, size, 0, pioh)) 2170 return -1; 2171 2172 return 0; 2173} 2174 2175int 2176acpi_foundec(struct aml_node *node, void *arg) 2177{ 2178 struct acpi_softc *sc = (struct acpi_softc *)arg; 2179 struct device *self = (struct device *)arg; 2180 const char *dev; 2181 struct aml_value res; 2182 struct acpi_attach_args aaa; 2183 2184 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2185 return 0; 2186 2187 switch (res.type) { 2188 case AML_OBJTYPE_STRING: 2189 dev = res.v_string; 2190 break; 2191 case AML_OBJTYPE_INTEGER: 2192 dev = aml_eisaid(aml_val2int(&res)); 2193 break; 2194 default: 2195 dev = "unknown"; 2196 break; 2197 } 2198 2199 if (strcmp(dev, ACPI_DEV_ECD)) 2200 return 0; 2201 2202 memset(&aaa, 0, sizeof(aaa)); 2203 aaa.aaa_iot = sc->sc_iot; 2204 aaa.aaa_memt = sc->sc_memt; 2205 aaa.aaa_node = node->parent; 2206 aaa.aaa_dev = dev; 2207 aaa.aaa_name = "acpiec"; 2208 config_found(self, &aaa, acpi_print); 2209 aml_freevalue(&res); 2210 2211 return 0; 2212} 2213 2214int 2215acpi_matchhids(struct acpi_attach_args *aa, const char *hids[], 2216 const char *driver) 2217{ 2218 int i; 2219 2220 if (aa->aaa_dev == NULL || aa->aaa_node == NULL) 2221 return (0); 2222 for (i = 0; hids[i]; i++) { 2223 if (!strcmp(aa->aaa_dev, hids[i])) { 2224 dnprintf(5, "driver %s matches %s\n", driver, hids[i]); 2225 return (1); 2226 } 2227 } 2228 return (0); 2229} 2230 2231int 2232acpi_foundhid(struct aml_node *node, void *arg) 2233{ 2234 struct acpi_softc *sc = (struct acpi_softc *)arg; 2235 struct device *self = (struct device *)arg; 2236 const char *dev; 2237 struct aml_value res; 2238 struct acpi_attach_args aaa; 2239 2240 dnprintf(10, "found hid device: %s ", node->parent->name); 2241 if (aml_evalnode(sc, node, 0, NULL, &res) != 0) 2242 return 0; 2243 2244 switch (res.type) { 2245 case AML_OBJTYPE_STRING: 2246 dev = res.v_string; 2247 break; 2248 case AML_OBJTYPE_INTEGER: 2249 dev = aml_eisaid(aml_val2int(&res)); 2250 break; 2251 default: 2252 dev = "unknown"; 2253 break; 2254 } 2255 dnprintf(10, " device: %s\n", dev); 2256 2257 memset(&aaa, 0, sizeof(aaa)); 2258 aaa.aaa_iot = sc->sc_iot; 2259 aaa.aaa_memt = sc->sc_memt; 2260 aaa.aaa_node = node->parent; 2261 aaa.aaa_dev = dev; 2262 2263 if (!strcmp(dev, ACPI_DEV_AC)) 2264 aaa.aaa_name = "acpiac"; 2265 else if (!strcmp(dev, ACPI_DEV_CMB)) 2266 aaa.aaa_name = "acpibat"; 2267 else if (!strcmp(dev, ACPI_DEV_LD) || 2268 !strcmp(dev, ACPI_DEV_PBD) || 2269 !strcmp(dev, ACPI_DEV_SBD)) 2270 aaa.aaa_name = "acpibtn"; 2271 else if (!strcmp(dev, ACPI_DEV_ASUS)) 2272 aaa.aaa_name = "acpiasus"; 2273 else if (!strcmp(dev, ACPI_DEV_THINKPAD)) { 2274 aaa.aaa_name = "acpithinkpad"; 2275 acpi_thinkpad_enabled = 1; 2276 } else if (!strcmp(dev, ACPI_DEV_ASUSAIBOOSTER)) 2277 aaa.aaa_name = "aibs"; 2278 2279 if (aaa.aaa_name) 2280 config_found(self, &aaa, acpi_print); 2281 2282 aml_freevalue(&res); 2283 2284 return 0; 2285} 2286 2287int 2288acpi_founddock(struct aml_node *node, void *arg) 2289{ 2290 struct acpi_softc *sc = (struct acpi_softc *)arg; 2291 struct device *self = (struct device *)arg; 2292 struct acpi_attach_args aaa; 2293 2294 dnprintf(10, "found dock entry: %s\n", node->parent->name); 2295 2296 memset(&aaa, 0, sizeof(aaa)); 2297 aaa.aaa_iot = sc->sc_iot; 2298 aaa.aaa_memt = sc->sc_memt; 2299 aaa.aaa_node = node->parent; 2300 aaa.aaa_name = "acpidock"; 2301 2302 config_found(self, &aaa, acpi_print); 2303 2304 return 0; 2305} 2306 2307int 2308acpi_foundvideo(struct aml_node *node, void *arg) 2309{ 2310 struct acpi_softc *sc = (struct acpi_softc *)arg; 2311 struct device *self = (struct device *)arg; 2312 struct acpi_attach_args aaa; 2313 2314 memset(&aaa, 0, sizeof(aaa)); 2315 aaa.aaa_iot = sc->sc_iot; 2316 aaa.aaa_memt = sc->sc_memt; 2317 aaa.aaa_node = node->parent; 2318 aaa.aaa_name = "acpivideo"; 2319 2320 config_found(self, &aaa, acpi_print); 2321 2322 return (0); 2323} 2324#endif /* SMALL_KERNEL */ 2325