• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/rfkill/

Lines Matching defs:rfkill

29 #include <linux/rfkill.h>
38 #include "rfkill.h"
50 struct rfkill {
78 #define to_rfkill(d) container_of(d, struct rfkill, dev)
103 * the rfkill struct under their own lock, and take this lock during
104 * rfkill method calls -- which will cause an AB-BA deadlock situation.
113 static LIST_HEAD(rfkill_fds); /* list of open fds of /dev/rfkill */
128 static void rfkill_led_trigger_event(struct rfkill *rfkill)
132 if (!rfkill->registered)
135 trigger = &rfkill->led_trigger;
137 if (rfkill->state & RFKILL_BLOCK_ANY)
145 struct rfkill *rfkill;
147 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
149 rfkill_led_trigger_event(rfkill);
152 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
154 return rfkill->led_trigger.name;
158 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
160 BUG_ON(!rfkill);
162 rfkill->ledtrigname = name;
166 static int rfkill_led_trigger_register(struct rfkill *rfkill)
168 rfkill->led_trigger.name = rfkill->ledtrigname
169 ? : dev_name(&rfkill->dev);
170 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
171 return led_trigger_register(&rfkill->led_trigger);
174 static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
176 led_trigger_unregister(&rfkill->led_trigger);
179 static void rfkill_led_trigger_event(struct rfkill *rfkill)
183 static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
188 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
193 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
198 ev->idx = rfkill->idx;
199 ev->type = rfkill->type;
202 spin_lock_irqsave(&rfkill->lock, flags);
203 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
204 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
206 spin_unlock_irqrestore(&rfkill->lock, flags);
209 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
218 rfkill_fill_event(&ev->ev, rfkill, op);
226 static void rfkill_event(struct rfkill *rfkill)
228 if (!rfkill->registered)
231 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
233 /* also send event to /dev/rfkill */
234 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
237 static bool __rfkill_set_hw_state(struct rfkill *rfkill,
243 BUG_ON(!rfkill);
245 spin_lock_irqsave(&rfkill->lock, flags);
246 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
248 rfkill->state |= RFKILL_BLOCK_HW;
250 rfkill->state &= ~RFKILL_BLOCK_HW;
252 any = rfkill->state & RFKILL_BLOCK_ANY;
253 spin_unlock_irqrestore(&rfkill->lock, flags);
255 rfkill_led_trigger_event(rfkill);
263 * @rfkill: the rfkill struct to use
269 static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
274 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
282 if (rfkill->ops->query)
283 rfkill->ops->query(rfkill, rfkill->data);
285 spin_lock_irqsave(&rfkill->lock, flags);
286 if (rfkill->state & RFKILL_BLOCK_SW)
287 rfkill->state |= RFKILL_BLOCK_SW_PREV;
289 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
292 rfkill->state |= RFKILL_BLOCK_SW;
294 rfkill->state &= ~RFKILL_BLOCK_SW;
296 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
297 spin_unlock_irqrestore(&rfkill->lock, flags);
299 err = rfkill->ops->set_block(rfkill->data, blocked);
301 spin_lock_irqsave(&rfkill->lock, flags);
308 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
309 rfkill->state |= RFKILL_BLOCK_SW;
311 rfkill->state &= ~RFKILL_BLOCK_SW;
313 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
314 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
315 spin_unlock_irqrestore(&rfkill->lock, flags);
317 rfkill_led_trigger_event(rfkill);
318 rfkill_event(rfkill);
337 struct rfkill *rfkill;
340 list_for_each_entry(rfkill, &rfkill_list, node) {
341 if (rfkill->type != type)
344 rfkill_set_block(rfkill, blocked);
374 * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
375 * ignoring everything in its path but rfkill_global_mutex and rfkill->mutex.
382 struct rfkill *rfkill;
391 list_for_each_entry(rfkill, &rfkill_list, node)
392 rfkill_set_block(rfkill, true);
427 * Used by rfkill-input manually unlock state changes, when
468 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
472 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
474 if (!rfkill->registered)
478 schedule_work(&rfkill->uevent_work);
484 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
489 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
493 rfkill->state |= bit;
495 rfkill->state &= ~bit;
498 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
503 BUG_ON(!rfkill);
505 spin_lock_irqsave(&rfkill->lock, flags);
506 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
507 __rfkill_set_sw_state(rfkill, blocked);
508 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
510 spin_unlock_irqrestore(&rfkill->lock, flags);
512 if (!rfkill->registered)
516 schedule_work(&rfkill->uevent_work);
518 rfkill_led_trigger_event(rfkill);
524 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
528 BUG_ON(!rfkill);
529 BUG_ON(rfkill->registered);
531 spin_lock_irqsave(&rfkill->lock, flags);
532 __rfkill_set_sw_state(rfkill, blocked);
533 rfkill->persistent = true;
534 spin_unlock_irqrestore(&rfkill->lock, flags);
538 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
543 BUG_ON(!rfkill);
545 spin_lock_irqsave(&rfkill->lock, flags);
551 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
552 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
553 __rfkill_set_sw_state(rfkill, sw);
555 rfkill->state |= RFKILL_BLOCK_HW;
557 rfkill->state &= ~RFKILL_BLOCK_HW;
559 spin_unlock_irqrestore(&rfkill->lock, flags);
561 if (!rfkill->registered) {
562 rfkill->persistent = true;
565 schedule_work(&rfkill->uevent_work);
567 rfkill_led_trigger_event(rfkill);
576 struct rfkill *rfkill = to_rfkill(dev);
578 return sprintf(buf, "%s\n", rfkill->name);
609 struct rfkill *rfkill = to_rfkill(dev);
611 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
618 struct rfkill *rfkill = to_rfkill(dev);
620 return sprintf(buf, "%d\n", rfkill->idx);
627 struct rfkill *rfkill = to_rfkill(dev);
629 return sprintf(buf, "%d\n", rfkill->persistent);
636 struct rfkill *rfkill = to_rfkill(dev);
638 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
645 struct rfkill *rfkill = to_rfkill(dev);
647 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
654 struct rfkill *rfkill = to_rfkill(dev);
669 rfkill_set_block(rfkill, state);
689 struct rfkill *rfkill = to_rfkill(dev);
691 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
698 struct rfkill *rfkill = to_rfkill(dev);
714 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
748 struct rfkill *rfkill = to_rfkill(dev);
750 kfree(rfkill);
755 struct rfkill *rfkill = to_rfkill(dev);
760 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
764 rfkill_get_type_str(rfkill->type));
767 spin_lock_irqsave(&rfkill->lock, flags);
768 state = rfkill->state;
769 spin_unlock_irqrestore(&rfkill->lock, flags);
775 void rfkill_pause_polling(struct rfkill *rfkill)
777 BUG_ON(!rfkill);
779 if (!rfkill->ops->poll)
782 cancel_delayed_work_sync(&rfkill->poll_work);
786 void rfkill_resume_polling(struct rfkill *rfkill)
788 BUG_ON(!rfkill);
790 if (!rfkill->ops->poll)
793 schedule_work(&rfkill->poll_work.work);
799 struct rfkill *rfkill = to_rfkill(dev);
801 rfkill_pause_polling(rfkill);
808 struct rfkill *rfkill = to_rfkill(dev);
811 if (!rfkill->persistent) {
812 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
813 rfkill_set_block(rfkill, cur);
816 rfkill_resume_polling(rfkill);
822 .name = "rfkill",
830 bool rfkill_blocked(struct rfkill *rfkill)
835 spin_lock_irqsave(&rfkill->lock, flags);
836 state = rfkill->state;
837 spin_unlock_irqrestore(&rfkill->lock, flags);
844 struct rfkill * __must_check rfkill_alloc(const char *name,
850 struct rfkill *rfkill;
865 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
866 if (!rfkill)
869 spin_lock_init(&rfkill->lock);
870 INIT_LIST_HEAD(&rfkill->node);
871 rfkill->type = type;
872 rfkill->name = name;
873 rfkill->ops = ops;
874 rfkill->data = ops_data;
876 dev = &rfkill->dev;
881 return rfkill;
887 struct rfkill *rfkill;
889 rfkill = container_of(work, struct rfkill, poll_work.work);
896 rfkill->ops->poll(rfkill, rfkill->data);
898 schedule_delayed_work(&rfkill->poll_work,
904 struct rfkill *rfkill;
906 rfkill = container_of(work, struct rfkill, uevent_work);
909 rfkill_event(rfkill);
915 struct rfkill *rfkill;
918 rfkill = container_of(work, struct rfkill, sync_work);
921 cur = rfkill_global_states[rfkill->type].cur;
922 rfkill_set_block(rfkill, cur);
926 int __must_check rfkill_register(struct rfkill *rfkill)
929 struct device *dev = &rfkill->dev;
932 BUG_ON(!rfkill);
936 if (rfkill->registered) {
941 rfkill->idx = rfkill_no;
942 dev_set_name(dev, "rfkill%lu", rfkill_no);
945 list_add_tail(&rfkill->node, &rfkill_list);
951 error = rfkill_led_trigger_register(rfkill);
955 rfkill->registered = true;
957 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
958 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
959 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
961 if (rfkill->ops->poll)
962 schedule_delayed_work(&rfkill->poll_work,
965 if (!rfkill->persistent || rfkill_epo_lock_active) {
966 schedule_work(&rfkill->sync_work);
969 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
972 __rfkill_switch_all(rfkill->type, soft_blocked);
976 rfkill_send_events(rfkill, RFKILL_OP_ADD);
982 device_del(&rfkill->dev);
984 list_del_init(&rfkill->node);
991 void rfkill_unregister(struct rfkill *rfkill)
993 BUG_ON(!rfkill);
995 if (rfkill->ops->poll)
996 cancel_delayed_work_sync(&rfkill->poll_work);
998 cancel_work_sync(&rfkill->uevent_work);
999 cancel_work_sync(&rfkill->sync_work);
1001 rfkill->registered = false;
1003 device_del(&rfkill->dev);
1006 rfkill_send_events(rfkill, RFKILL_OP_DEL);
1007 list_del_init(&rfkill->node);
1010 rfkill_led_trigger_unregister(rfkill);
1014 void rfkill_destroy(struct rfkill *rfkill)
1016 if (rfkill)
1017 put_device(&rfkill->dev);
1024 struct rfkill *rfkill;
1043 list_for_each_entry(rfkill, &rfkill_list, node) {
1047 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1135 struct rfkill *rfkill;
1169 list_for_each_entry(rfkill, &rfkill_list, node) {
1170 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1173 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1176 rfkill_set_block(rfkill, ev.soft);
1199 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1223 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1247 .name = "rfkill",