Lines Matching refs:ec

3  *  ec.c - ACPI Embedded Controller Driver (v3)
107 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
168 struct acpi_ec *ec;
171 static int acpi_ec_submit_query(struct acpi_ec *ec);
172 static void advance_transaction(struct acpi_ec *ec, bool interrupt);
228 #define ec_dbg_ref(ec, fmt, ...) \
229 ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
235 static bool acpi_ec_started(struct acpi_ec *ec)
237 return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
238 !test_bit(EC_FLAGS_STOPPED, &ec->flags);
241 static bool acpi_ec_event_enabled(struct acpi_ec *ec)
248 if (!test_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
260 return acpi_ec_started(ec);
262 return test_bit(EC_FLAGS_STARTED, &ec->flags);
265 static bool acpi_ec_flushed(struct acpi_ec *ec)
267 return ec->reference_count == 1;
274 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
276 u8 x = inb(ec->command_addr);
289 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
291 u8 x = inb(ec->data_addr);
293 ec->timestamp = jiffies;
298 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
301 outb(command, ec->command_addr);
302 ec->timestamp = jiffies;
305 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
308 outb(data, ec->data_addr);
309 ec->timestamp = jiffies;
337 static inline bool acpi_ec_gpe_status_set(struct acpi_ec *ec)
341 (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
345 static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
348 acpi_enable_gpe(NULL, ec->gpe);
350 BUG_ON(ec->reference_count < 1);
351 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
353 if (acpi_ec_gpe_status_set(ec)) {
360 advance_transaction(ec, false);
364 static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
367 acpi_disable_gpe(NULL, ec->gpe);
369 BUG_ON(ec->reference_count < 1);
370 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
378 static void acpi_ec_submit_request(struct acpi_ec *ec)
380 ec->reference_count++;
381 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
382 ec->gpe >= 0 && ec->reference_count == 1)
383 acpi_ec_enable_gpe(ec, true);
386 static void acpi_ec_complete_request(struct acpi_ec *ec)
390 ec->reference_count--;
391 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags) &&
392 ec->gpe >= 0 && ec->reference_count == 0)
393 acpi_ec_disable_gpe(ec, true);
394 flushed = acpi_ec_flushed(ec);
396 wake_up(&ec->wait);
399 static void acpi_ec_mask_events(struct acpi_ec *ec)
401 if (!test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
402 if (ec->gpe >= 0)
403 acpi_ec_disable_gpe(ec, false);
405 disable_irq_nosync(ec->irq);
408 set_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
412 static void acpi_ec_unmask_events(struct acpi_ec *ec)
414 if (test_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags)) {
415 clear_bit(EC_FLAGS_EVENTS_MASKED, &ec->flags);
416 if (ec->gpe >= 0)
417 acpi_ec_enable_gpe(ec, false);
419 enable_irq(ec->irq);
429 * @ec: the EC device
436 static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
438 if (!acpi_ec_started(ec))
440 acpi_ec_submit_request(ec);
444 static void acpi_ec_submit_event(struct acpi_ec *ec)
450 acpi_ec_mask_events(ec);
451 if (!acpi_ec_event_enabled(ec))
454 if (ec->event_state != EC_EVENT_READY)
460 ec->event_state = EC_EVENT_IN_PROGRESS;
468 if (ec->events_to_process++ > 0)
471 ec->events_in_progress++;
472 queue_work(ec_wq, &ec->work);
475 static void acpi_ec_complete_event(struct acpi_ec *ec)
477 if (ec->event_state == EC_EVENT_IN_PROGRESS)
478 ec->event_state = EC_EVENT_COMPLETE;
481 static void acpi_ec_close_event(struct acpi_ec *ec)
483 if (ec->event_state != EC_EVENT_READY)
487 ec->event_state = EC_EVENT_READY;
488 acpi_ec_unmask_events(ec);
491 static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
493 if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
499 advance_transaction(ec, false);
502 static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
504 if (test_and_clear_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
510 * Run with locked ec mutex.
512 static void acpi_ec_clear(struct acpi_ec *ec)
517 if (acpi_ec_submit_query(ec))
526 static void acpi_ec_enable_event(struct acpi_ec *ec)
530 spin_lock_irqsave(&ec->lock, flags);
531 if (acpi_ec_started(ec))
532 __acpi_ec_enable_event(ec);
533 spin_unlock_irqrestore(&ec->lock, flags);
537 acpi_ec_clear(ec);
543 flush_workqueue(ec_wq); /* flush ec->work */
547 static void acpi_ec_disable_event(struct acpi_ec *ec)
551 spin_lock_irqsave(&ec->lock, flags);
552 __acpi_ec_disable_event(ec);
553 spin_unlock_irqrestore(&ec->lock, flags);
572 static bool acpi_ec_guard_event(struct acpi_ec *ec)
577 spin_lock_irqsave(&ec->lock, flags);
591 ec->event_state != EC_EVENT_READY &&
592 (!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
593 spin_unlock_irqrestore(&ec->lock, flags);
597 static int ec_transaction_polled(struct acpi_ec *ec)
602 spin_lock_irqsave(&ec->lock, flags);
603 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
605 spin_unlock_irqrestore(&ec->lock, flags);
609 static int ec_transaction_completed(struct acpi_ec *ec)
614 spin_lock_irqsave(&ec->lock, flags);
615 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
617 spin_unlock_irqrestore(&ec->lock, flags);
621 static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
623 ec->curr->flags |= flag;
625 if (ec->curr->command != ACPI_EC_COMMAND_QUERY)
631 acpi_ec_close_event(ec);
637 acpi_ec_close_event(ec);
643 acpi_ec_complete_event(ec);
647 static void acpi_ec_spurious_interrupt(struct acpi_ec *ec, struct transaction *t)
654 acpi_ec_mask_events(ec);
657 static void advance_transaction(struct acpi_ec *ec, bool interrupt)
659 struct transaction *t = ec->curr;
665 status = acpi_ec_read_status(ec);
673 ec->event_state == EC_EVENT_COMPLETE)
674 acpi_ec_close_event(ec);
683 acpi_ec_write_data(ec, t->wdata[t->wi++]);
685 acpi_ec_spurious_interrupt(ec, t);
688 t->rdata[t->ri++] = acpi_ec_read_data(ec);
690 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
697 acpi_ec_spurious_interrupt(ec, t);
700 ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
704 acpi_ec_write_cmd(ec, t->command);
705 ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
710 acpi_ec_submit_event(ec);
713 wake_up(&ec->wait);
716 static void start_transaction(struct acpi_ec *ec)
718 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
719 ec->curr->flags = 0;
722 static int ec_guard(struct acpi_ec *ec)
724 unsigned long guard = usecs_to_jiffies(ec->polling_guard);
725 unsigned long timeout = ec->timestamp + guard;
729 if (ec->busy_polling) {
731 if (ec_transaction_completed(ec))
745 if (!ec_transaction_polled(ec) &&
746 !acpi_ec_guard_event(ec))
748 if (wait_event_timeout(ec->wait,
749 ec_transaction_completed(ec),
757 static int ec_poll(struct acpi_ec *ec)
766 if (!ec_guard(ec))
768 spin_lock_irqsave(&ec->lock, flags);
769 advance_transaction(ec, false);
770 spin_unlock_irqrestore(&ec->lock, flags);
773 spin_lock_irqsave(&ec->lock, flags);
774 start_transaction(ec);
775 spin_unlock_irqrestore(&ec->lock, flags);
780 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
787 spin_lock_irqsave(&ec->lock, tmp);
789 if (!acpi_ec_submit_flushable_request(ec)) {
793 ec_dbg_ref(ec, "Increase command");
795 ec->curr = t;
797 start_transaction(ec);
798 spin_unlock_irqrestore(&ec->lock, tmp);
800 ret = ec_poll(ec);
802 spin_lock_irqsave(&ec->lock, tmp);
804 acpi_ec_unmask_events(ec);
806 ec->curr = NULL;
808 acpi_ec_complete_request(ec);
809 ec_dbg_ref(ec, "Decrease command");
811 spin_unlock_irqrestore(&ec->lock, tmp);
815 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
820 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
825 mutex_lock(&ec->mutex);
826 if (ec->global_lock) {
834 status = acpi_ec_transaction_unlocked(ec, t);
836 if (ec->global_lock)
839 mutex_unlock(&ec->mutex);
843 static int acpi_ec_burst_enable(struct acpi_ec *ec)
850 return acpi_ec_transaction(ec, &t);
853 static int acpi_ec_burst_disable(struct acpi_ec *ec)
859 return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
860 acpi_ec_transaction(ec, &t) : 0;
863 static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
871 result = acpi_ec_transaction(ec, &t);
876 static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
883 return acpi_ec_transaction(ec, &t);
937 static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
941 spin_lock_irqsave(&ec->lock, flags);
942 if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
946 acpi_ec_submit_request(ec);
947 ec_dbg_ref(ec, "Increase driver");
951 spin_unlock_irqrestore(&ec->lock, flags);
954 static bool acpi_ec_stopped(struct acpi_ec *ec)
959 spin_lock_irqsave(&ec->lock, flags);
960 flushed = acpi_ec_flushed(ec);
961 spin_unlock_irqrestore(&ec->lock, flags);
965 static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
969 spin_lock_irqsave(&ec->lock, flags);
970 if (acpi_ec_started(ec)) {
972 set_bit(EC_FLAGS_STOPPED, &ec->flags);
973 spin_unlock_irqrestore(&ec->lock, flags);
974 wait_event(ec->wait, acpi_ec_stopped(ec));
975 spin_lock_irqsave(&ec->lock, flags);
978 acpi_ec_complete_request(ec);
979 ec_dbg_ref(ec, "Decrease driver");
981 __acpi_ec_disable_event(ec);
982 clear_bit(EC_FLAGS_STARTED, &ec->flags);
983 clear_bit(EC_FLAGS_STOPPED, &ec->flags);
986 spin_unlock_irqrestore(&ec->lock, flags);
989 static void acpi_ec_enter_noirq(struct acpi_ec *ec)
993 spin_lock_irqsave(&ec->lock, flags);
994 ec->busy_polling = true;
995 ec->polling_guard = 0;
997 spin_unlock_irqrestore(&ec->lock, flags);
1000 static void acpi_ec_leave_noirq(struct acpi_ec *ec)
1004 spin_lock_irqsave(&ec->lock, flags);
1005 ec->busy_polling = ec_busy_polling;
1006 ec->polling_guard = ec_polling_guard;
1008 spin_unlock_irqrestore(&ec->lock, flags);
1013 struct acpi_ec *ec = first_ec;
1015 if (!ec)
1018 mutex_lock(&ec->mutex);
1020 acpi_ec_stop(ec, true);
1021 mutex_unlock(&ec->mutex);
1038 acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
1042 mutex_lock(&ec->mutex);
1043 list_for_each_entry(handler, &ec->list, node) {
1046 mutex_unlock(&ec->mutex);
1050 mutex_unlock(&ec->mutex);
1067 int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
1084 mutex_lock(&ec->mutex);
1086 list_add(&handler->node, &ec->list);
1087 mutex_unlock(&ec->mutex);
1093 static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
1099 mutex_lock(&ec->mutex);
1100 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
1113 mutex_unlock(&ec->mutex);
1118 void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
1120 acpi_ec_remove_query_handlers(ec, false, query_bit);
1129 struct acpi_ec *ec = q->ec;
1140 spin_lock_irq(&ec->lock);
1141 ec->queries_in_progress--;
1142 spin_unlock_irq(&ec->lock);
1148 static struct acpi_ec_query *acpi_ec_create_query(struct acpi_ec *ec, u8 *pval)
1162 q->ec = ec;
1166 static int acpi_ec_submit_query(struct acpi_ec *ec)
1172 q = acpi_ec_create_query(ec, &value);
1181 result = acpi_ec_transaction(ec, &q->transaction);
1190 q->handler = acpi_ec_get_query_handler_by_value(ec, value);
1205 spin_lock_irq(&ec->lock);
1207 ec->queries_in_progress++;
1210 spin_unlock_irq(&ec->lock);
1222 struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
1226 spin_lock_irq(&ec->lock);
1228 while (ec->events_to_process) {
1229 spin_unlock_irq(&ec->lock);
1231 acpi_ec_submit_query(ec);
1233 spin_lock_irq(&ec->lock);
1235 ec->events_to_process--;
1246 acpi_ec_complete_event(ec);
1250 spin_unlock_irq(&ec->lock);
1252 guard_timeout = !!ec_guard(ec);
1254 spin_lock_irq(&ec->lock);
1257 if (guard_timeout && !ec->curr)
1258 advance_transaction(ec, false);
1260 acpi_ec_close_event(ec);
1265 ec->events_in_progress--;
1267 spin_unlock_irq(&ec->lock);
1270 static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
1284 if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
1285 acpi_clear_gpe(NULL, ec->gpe);
1287 advance_transaction(ec, true);
1290 static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
1294 spin_lock_irqsave(&ec->lock, flags);
1296 clear_gpe_and_advance_transaction(ec, true);
1298 spin_unlock_irqrestore(&ec->lock, flags);
1323 struct acpi_ec *ec = handler_context;
1333 if (ec->busy_polling || bits > 8)
1334 acpi_ec_burst_enable(ec);
1338 acpi_ec_read(ec, address, value) :
1339 acpi_ec_write(ec, address, *value);
1341 if (ec->busy_polling || bits > 8)
1342 acpi_ec_burst_disable(ec);
1363 static void acpi_ec_free(struct acpi_ec *ec)
1365 if (first_ec == ec)
1367 if (boot_ec == ec)
1369 kfree(ec);
1374 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
1376 if (!ec)
1378 mutex_init(&ec->mutex);
1379 init_waitqueue_head(&ec->wait);
1380 INIT_LIST_HEAD(&ec->list);
1381 spin_lock_init(&ec->lock);
1382 INIT_WORK(&ec->work, acpi_ec_event_handler);
1383 ec->timestamp = jiffies;
1384 ec->busy_polling = true;
1385 ec->polling_guard = 0;
1386 ec->gpe = -1;
1387 ec->irq = -1;
1388 return ec;
1397 struct acpi_ec *ec = context;
1404 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
1413 struct acpi_ec *ec = context;
1416 ec->command_addr = ec->data_addr = 0;
1419 ec_parse_io_ports, ec);
1422 if (ec->data_addr == 0 || ec->command_addr == 0)
1429 ec->gpe = tmp;
1438 ec->global_lock = tmp;
1439 ec->handle = handle;
1443 static bool install_gpe_event_handler(struct acpi_ec *ec)
1447 status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
1449 &acpi_ec_gpe_handler, ec);
1453 if (test_bit(EC_FLAGS_STARTED, &ec->flags) && ec->reference_count >= 1)
1454 acpi_ec_enable_gpe(ec, true);
1459 static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
1461 return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler,
1462 IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0;
1467 * @ec: Target EC.
1468 * @device: ACPI device object corresponding to @ec.
1482 static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
1487 acpi_ec_start(ec, false);
1489 if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1490 acpi_ec_enter_noirq(ec);
1491 status = acpi_install_address_space_handler_no_reg(ec->handle,
1494 NULL, ec);
1496 acpi_ec_stop(ec, false);
1499 set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1500 ec->address_space_handler_holder = ec->handle;
1503 if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
1504 acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
1505 set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
1511 if (ec->gpe < 0) {
1521 ec->irq = irq;
1524 if (!test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1526 acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
1528 NULL, ec, NULL);
1529 set_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1531 if (!test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1534 if (ec->gpe >= 0)
1535 ready = install_gpe_event_handler(ec);
1536 else if (ec->irq >= 0)
1537 ready = install_gpio_irq_event_handler(ec);
1540 set_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1541 acpi_ec_leave_noirq(ec);
1549 acpi_ec_enable_event(ec);
1554 static void ec_remove_handlers(struct acpi_ec *ec)
1556 if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
1558 ec->address_space_handler_holder,
1561 clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
1575 acpi_ec_stop(ec, false);
1577 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1578 if (ec->gpe >= 0 &&
1579 ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
1583 if (ec->irq >= 0)
1584 free_irq(ec->irq, ec);
1586 clear_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags);
1588 if (test_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags)) {
1589 acpi_ec_remove_query_handlers(ec, true, 0);
1590 clear_bit(EC_FLAGS_QUERY_METHODS_INSTALLED, &ec->flags);
1594 static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool call_reg)
1598 ret = ec_install_handlers(ec, device, call_reg);
1604 first_ec = ec;
1606 pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
1607 ec->data_addr);
1609 if (test_bit(EC_FLAGS_EVENT_HANDLER_INSTALLED, &ec->flags)) {
1610 if (ec->gpe >= 0)
1611 pr_info("GPE=0x%x\n", ec->gpe);
1613 pr_info("IRQ=%d\n", ec->irq);
1621 struct acpi_ec *ec;
1630 ec = boot_ec;
1634 ec = acpi_ec_alloc();
1635 if (!ec)
1638 status = ec_parse_device(device->handle, 0, ec, NULL);
1644 if (boot_ec && ec->command_addr == boot_ec->command_addr &&
1645 ec->data_addr == boot_ec->data_addr) {
1649 * quirks. So do not change boot_ec->gpe to ec->gpe,
1652 boot_ec->handle = ec->handle;
1655 boot_ec->gpe = ec->gpe;
1657 acpi_handle_debug(ec->handle, "duplicated.\n");
1658 acpi_ec_free(ec);
1659 ec = boot_ec;
1663 ret = acpi_ec_setup(ec, device, true);
1667 if (ec == boot_ec)
1672 acpi_handle_info(ec->handle,
1675 device->driver_data = ec;
1677 ret = !!request_region(ec->data_addr, 1, "EC data");
1678 WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
1679 ret = !!request_region(ec->command_addr, 1, "EC cmd");
1680 WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
1685 acpi_handle_debug(ec->handle, "enumerated.\n");
1689 if (ec != boot_ec)
1690 acpi_ec_free(ec);
1697 struct acpi_ec *ec;
1702 ec = acpi_driver_data(device);
1703 release_region(ec->data_addr, 1);
1704 release_region(ec->command_addr, 1);
1706 if (ec != boot_ec) {
1707 ec_remove_handlers(ec);
1708 acpi_ec_free(ec);
1715 struct acpi_ec *ec = context;
1725 if (ec->data_addr == 0)
1726 ec->data_addr = resource->data.io.minimum;
1727 else if (ec->command_addr == 0)
1728 ec->command_addr = resource->data.io.minimum;
1748 struct acpi_ec *ec;
1761 ec = acpi_ec_alloc();
1762 if (!ec)
1769 status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, ec, NULL);
1770 if (ACPI_FAILURE(status) || !ec->handle) {
1771 acpi_ec_free(ec);
1782 ret = acpi_ec_setup(ec, NULL, true);
1784 acpi_ec_free(ec);
1788 boot_ec = ec;
1790 acpi_handle_info(ec->handle,
1953 struct acpi_ec *ec;
1957 /* Generate a boot ec context. */
1972 ec = acpi_ec_alloc();
1973 if (!ec)
1977 ec->command_addr = ecdt_ptr->data.address;
1978 ec->data_addr = ecdt_ptr->control.address;
1980 ec->command_addr = ecdt_ptr->control.address;
1981 ec->data_addr = ecdt_ptr->data.address;
1989 ec->gpe = ecdt_ptr->gpe;
1991 ec->handle = ACPI_ROOT_OBJECT;
1997 ret = acpi_ec_setup(ec, NULL, false);
1999 acpi_ec_free(ec);
2003 boot_ec = ec;
2015 struct acpi_ec *ec =
2019 acpi_ec_disable_event(ec);
2025 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2031 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2032 ec->gpe >= 0 && ec->reference_count >= 1)
2033 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
2035 acpi_ec_enter_noirq(ec);
2042 struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
2044 acpi_ec_leave_noirq(ec);
2046 if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
2047 ec->gpe >= 0 && ec->reference_count >= 1)
2048 acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
2055 struct acpi_ec *ec =
2058 acpi_ec_enable_event(ec);
2075 static bool acpi_ec_work_in_progress(struct acpi_ec *ec)
2077 return ec->events_in_progress + ec->queries_in_progress > 0;
2186 .name = "ec",