• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/net/bluetooth/

Lines Matching defs:hdev

58 static void hci_notify(struct hci_dev *hdev, int event);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
96 void hci_req_complete(struct hci_dev *hdev, int result)
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
107 static void hci_req_cancel(struct hci_dev *hdev, int err)
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
119 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125 BT_DBG("%s start", hdev->name);
127 hdev->req_status = HCI_REQ_PEND;
129 add_wait_queue(&hdev->req_wait_q, &wait);
132 req(hdev, opt);
135 remove_wait_queue(&hdev->req_wait_q, &wait);
140 switch (hdev->req_status) {
142 err = -bt_err(hdev->req_result);
146 err = -hdev->req_result;
154 hdev->req_status = hdev->req_result = 0;
156 BT_DBG("%s end: err %d", hdev->name, err);
161 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 BT_DBG("%s %ld", hdev->name, opt);
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
187 BT_DBG("%s %ld", hdev->name, opt);
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
196 hci_sched_cmd(hdev);
198 skb_queue_purge(&hdev->driver_init);
203 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL);
213 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
217 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
220 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
228 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
233 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
237 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
240 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
244 BT_DBG("%s %x", hdev->name, scan);
247 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
250 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
254 BT_DBG("%s %x", hdev->name, auth);
257 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
260 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
264 BT_DBG("%s %x", hdev->name, encrypt);
267 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
274 struct hci_dev *hdev = NULL;
286 hdev = hci_dev_hold(d);
291 return hdev;
295 static void inquiry_cache_flush(struct hci_dev *hdev)
297 struct inquiry_cache *cache = &hdev->inq_cache;
309 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
311 struct inquiry_cache *cache = &hdev->inq_cache;
322 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
324 struct inquiry_cache *cache = &hdev->inq_cache;
329 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
342 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
344 struct inquiry_cache *cache = &hdev->inq_cache;
364 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
369 BT_DBG("%s", hdev->name);
371 if (test_bit(HCI_INQUIRY, &hdev->flags))
378 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
385 struct hci_dev *hdev;
393 if (!(hdev = hci_dev_get(ir.dev_id)))
396 hci_dev_lock_bh(hdev);
397 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
398 inquiry_cache_empty(hdev) ||
400 inquiry_cache_flush(hdev);
403 hci_dev_unlock_bh(hdev);
406 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
420 hci_dev_lock_bh(hdev);
421 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
422 hci_dev_unlock_bh(hdev);
437 hci_dev_put(hdev);
445 struct hci_dev *hdev;
448 if (!(hdev = hci_dev_get(dev)))
451 BT_DBG("%s %p", hdev->name, hdev);
453 hci_req_lock(hdev);
455 if (test_bit(HCI_UP, &hdev->flags)) {
460 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
461 set_bit(HCI_RAW, &hdev->flags);
463 if (hdev->open(hdev)) {
468 if (!test_bit(HCI_RAW, &hdev->flags)) {
469 atomic_set(&hdev->cmd_cnt, 1);
470 set_bit(HCI_INIT, &hdev->flags);
472 //__hci_request(hdev, hci_reset_req, 0, HZ);
473 ret = __hci_request(hdev, hci_init_req, 0,
476 clear_bit(HCI_INIT, &hdev->flags);
480 hci_dev_hold(hdev);
481 set_bit(HCI_UP, &hdev->flags);
482 hci_notify(hdev, HCI_DEV_UP);
485 tasklet_kill(&hdev->rx_task);
486 tasklet_kill(&hdev->tx_task);
487 tasklet_kill(&hdev->cmd_task);
489 skb_queue_purge(&hdev->cmd_q);
490 skb_queue_purge(&hdev->rx_q);
492 if (hdev->flush)
493 hdev->flush(hdev);
495 if (hdev->sent_cmd) {
496 kfree_skb(hdev->sent_cmd);
497 hdev->sent_cmd = NULL;
500 hdev->close(hdev);
501 hdev->flags = 0;
505 hci_req_unlock(hdev);
506 hci_dev_put(hdev);
510 static int hci_dev_do_close(struct hci_dev *hdev)
512 BT_DBG("%s %p", hdev->name, hdev);
514 hci_req_cancel(hdev, ENODEV);
515 hci_req_lock(hdev);
517 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
518 hci_req_unlock(hdev);
523 tasklet_kill(&hdev->rx_task);
524 tasklet_kill(&hdev->tx_task);
526 hci_dev_lock_bh(hdev);
527 inquiry_cache_flush(hdev);
528 hci_conn_hash_flush(hdev);
529 hci_dev_unlock_bh(hdev);
531 hci_notify(hdev, HCI_DEV_DOWN);
533 if (hdev->flush)
534 hdev->flush(hdev);
537 skb_queue_purge(&hdev->cmd_q);
538 atomic_set(&hdev->cmd_cnt, 1);
539 if (!test_bit(HCI_RAW, &hdev->flags)) {
540 set_bit(HCI_INIT, &hdev->flags);
541 __hci_request(hdev, hci_reset_req, 0,
543 clear_bit(HCI_INIT, &hdev->flags);
547 tasklet_kill(&hdev->cmd_task);
550 skb_queue_purge(&hdev->rx_q);
551 skb_queue_purge(&hdev->cmd_q);
552 skb_queue_purge(&hdev->raw_q);
555 if (hdev->sent_cmd) {
556 kfree_skb(hdev->sent_cmd);
557 hdev->sent_cmd = NULL;
562 hdev->close(hdev);
565 hdev->flags = 0;
567 hci_req_unlock(hdev);
569 hci_dev_put(hdev);
575 struct hci_dev *hdev;
578 if (!(hdev = hci_dev_get(dev)))
580 err = hci_dev_do_close(hdev);
581 hci_dev_put(hdev);
587 struct hci_dev *hdev;
590 if (!(hdev = hci_dev_get(dev)))
593 hci_req_lock(hdev);
594 tasklet_disable(&hdev->tx_task);
596 if (!test_bit(HCI_UP, &hdev->flags))
600 skb_queue_purge(&hdev->rx_q);
601 skb_queue_purge(&hdev->cmd_q);
603 hci_dev_lock_bh(hdev);
604 inquiry_cache_flush(hdev);
605 hci_conn_hash_flush(hdev);
606 hci_dev_unlock_bh(hdev);
608 if (hdev->flush)
609 hdev->flush(hdev);
611 atomic_set(&hdev->cmd_cnt, 1);
612 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
614 if (!test_bit(HCI_RAW, &hdev->flags))
615 ret = __hci_request(hdev, hci_reset_req, 0,
619 tasklet_enable(&hdev->tx_task);
620 hci_req_unlock(hdev);
621 hci_dev_put(hdev);
627 struct hci_dev *hdev;
630 if (!(hdev = hci_dev_get(dev)))
633 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
635 hci_dev_put(hdev);
642 struct hci_dev *hdev;
649 if (!(hdev = hci_dev_get(dr.dev_id)))
654 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
659 if (!lmp_encrypt_capable(hdev)) {
664 if (!test_bit(HCI_AUTH, &hdev->flags)) {
666 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
672 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
677 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
682 hdev->pkt_type = (__u16) dr.dev_opt;
686 hdev->link_policy = (__u16) dr.dev_opt;
690 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
694 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
695 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
699 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
700 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
707 hci_dev_put(hdev);
734 struct hci_dev *hdev;
735 hdev = list_entry(p, struct hci_dev, list);
736 (dr + n)->dev_id = hdev->id;
737 (dr + n)->dev_opt = hdev->flags;
754 struct hci_dev *hdev;
761 if (!(hdev = hci_dev_get(di.dev_id)))
764 strcpy(di.name, hdev->name);
765 di.bdaddr = hdev->bdaddr;
766 di.type = hdev->type;
767 di.flags = hdev->flags;
768 di.pkt_type = hdev->pkt_type;
769 di.acl_mtu = hdev->acl_mtu;
770 di.acl_pkts = hdev->acl_pkts;
771 di.sco_mtu = hdev->sco_mtu;
772 di.sco_pkts = hdev->sco_pkts;
773 di.link_policy = hdev->link_policy;
774 di.link_mode = hdev->link_mode;
776 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
777 memcpy(&di.features, &hdev->features, sizeof(di.features));
782 hci_dev_put(hdev);
792 struct hci_dev *hdev;
794 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
795 if (!hdev)
798 skb_queue_head_init(&hdev->driver_init);
800 return hdev;
805 void hci_free_dev(struct hci_dev *hdev)
807 skb_queue_purge(&hdev->driver_init);
810 put_device(&hdev->dev);
815 int hci_register_dev(struct hci_dev *hdev)
820 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
822 if (!hdev->open || !hdev->close || !hdev->destruct)
834 sprintf(hdev->name, "hci%d", id);
835 hdev->id = id;
836 list_add(&hdev->list, head);
838 atomic_set(&hdev->refcnt, 1);
839 spin_lock_init(&hdev->lock);
841 hdev->flags = 0;
842 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
843 hdev->link_mode = (HCI_LM_ACCEPT);
845 hdev->idle_timeout = 0;
846 hdev->sniff_max_interval = 800;
847 hdev->sniff_min_interval = 80;
849 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
850 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
851 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
853 skb_queue_head_init(&hdev->rx_q);
854 skb_queue_head_init(&hdev->cmd_q);
855 skb_queue_head_init(&hdev->raw_q);
857 init_waitqueue_head(&hdev->req_wait_q);
858 init_MUTEX(&hdev->req_lock);
860 inquiry_cache_init(hdev);
862 hci_conn_hash_init(hdev);
864 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
866 atomic_set(&hdev->promisc, 0);
870 hci_register_sysfs(hdev);
872 hci_notify(hdev, HCI_DEV_REG);
879 int hci_unregister_dev(struct hci_dev *hdev)
881 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
883 hci_unregister_sysfs(hdev);
886 list_del(&hdev->list);
889 hci_dev_do_close(hdev);
891 hci_notify(hdev, HCI_DEV_UNREG);
893 __hci_dev_put(hdev);
899 int hci_suspend_dev(struct hci_dev *hdev)
901 hci_notify(hdev, HCI_DEV_SUSPEND);
907 int hci_resume_dev(struct hci_dev *hdev)
909 hci_notify(hdev, HCI_DEV_RESUME);
988 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
990 if (!hdev) {
995 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
997 if (atomic_read(&hdev->promisc)) {
1001 hci_send_to_sock(hdev, skb);
1007 return hdev->send(skb);
1011 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1017 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1021 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1035 skb->dev = (void *) hdev;
1036 skb_queue_tail(&hdev->cmd_q, skb);
1037 hci_sched_cmd(hdev);
1043 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1047 if (!hdev->sent_cmd)
1050 hdr = (void *) hdev->sent_cmd->data;
1055 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1057 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1075 struct hci_dev *hdev = conn->hdev;
1078 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1080 skb->dev = (void *) hdev;
1086 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1102 skb->dev = (void *) hdev;
1106 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1114 hci_sched_tx(hdev);
1122 struct hci_dev *hdev = conn->hdev;
1125 BT_DBG("%s len %d", hdev->name, skb->len);
1127 if (skb->len > hdev->sco_mtu) {
1139 skb->dev = (void *) hdev;
1142 hci_sched_tx(hdev);
1150 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1152 struct hci_conn_hash *h = &hdev->conn_hash;
1175 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1185 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1187 struct hci_conn_hash *h = &hdev->conn_hash;
1191 BT_ERR("%s ACL tx timeout", hdev->name);
1198 hdev->name, batostr(&c->dst));
1204 static inline void hci_sched_acl(struct hci_dev *hdev)
1210 BT_DBG("%s", hdev->name);
1212 if (!test_bit(HCI_RAW, &hdev->flags)) {
1215 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1216 hci_acl_tx_to(hdev);
1219 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1226 hdev->acl_last_tx = jiffies;
1228 hdev->acl_cnt--;
1235 static inline void hci_sched_sco(struct hci_dev *hdev)
1241 BT_DBG("%s", hdev->name);
1243 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1257 struct hci_dev *hdev = (struct hci_dev *) arg;
1262 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1266 hci_sched_acl(hdev);
1268 hci_sched_sco(hdev);
1271 while ((skb = skb_dequeue(&hdev->raw_q)))
1280 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1292 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1294 hdev->stat.acl_rx++;
1296 hci_dev_lock(hdev);
1297 conn = hci_conn_hash_lookup_handle(hdev, handle);
1298 hci_dev_unlock(hdev);
1312 hdev->name, handle);
1319 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1329 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1331 hdev->stat.sco_rx++;
1333 hci_dev_lock(hdev);
1334 conn = hci_conn_hash_lookup_handle(hdev, handle);
1335 hci_dev_unlock(hdev);
1347 hdev->name, handle);
1355 struct hci_dev *hdev = (struct hci_dev *) arg;
1358 BT_DBG("%s", hdev->name);
1362 while ((skb = skb_dequeue(&hdev->rx_q))) {
1363 if (atomic_read(&hdev->promisc)) {
1365 hci_send_to_sock(hdev, skb);
1368 if (test_bit(HCI_RAW, &hdev->flags)) {
1373 if (test_bit(HCI_INIT, &hdev->flags)) {
1386 hci_event_packet(hdev, skb);
1390 BT_DBG("%s ACL data packet", hdev->name);
1391 hci_acldata_packet(hdev, skb);
1395 BT_DBG("%s SCO data packet", hdev->name);
1396 hci_scodata_packet(hdev, skb);
1410 struct hci_dev *hdev = (struct hci_dev *) arg;
1413 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1415 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1416 BT_ERR("%s command tx timeout", hdev->name);
1417 atomic_set(&hdev->cmd_cnt, 1);
1421 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1422 if (hdev->sent_cmd)
1423 kfree_skb(hdev->sent_cmd);
1425 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1426 atomic_dec(&hdev->cmd_cnt);
1428 hdev->cmd_last_tx = jiffies;
1430 skb_queue_head(&hdev->cmd_q, skb);
1431 hci_sched_cmd(hdev);