1/* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23*/ 24 25/* Bluetooth HCI core. */ 26 27#include <linux/module.h> 28#include <linux/kmod.h> 29 30#include <linux/types.h> 31#include <linux/errno.h> 32#include <linux/kernel.h> 33#include <linux/sched.h> 34#include <linux/slab.h> 35#include <linux/poll.h> 36#include <linux/fcntl.h> 37#include <linux/init.h> 38#include <linux/skbuff.h> 39#include <linux/interrupt.h> 40#include <linux/notifier.h> 41#include <net/sock.h> 42 43#include <asm/system.h> 44#include <asm/uaccess.h> 45#include <asm/unaligned.h> 46 47#include <net/bluetooth/bluetooth.h> 48#include <net/bluetooth/hci_core.h> 49 50#ifndef CONFIG_BT_HCI_CORE_DEBUG 51#undef BT_DBG 52#define BT_DBG(D...) 53#endif 54 55static void hci_cmd_task(unsigned long arg); 56static void hci_rx_task(unsigned long arg); 57static void hci_tx_task(unsigned long arg); 58static void hci_notify(struct hci_dev *hdev, int event); 59 60static DEFINE_RWLOCK(hci_task_lock); 61 62/* HCI device list */ 63LIST_HEAD(hci_dev_list); 64DEFINE_RWLOCK(hci_dev_list_lock); 65 66/* HCI callback list */ 67LIST_HEAD(hci_cb_list); 68DEFINE_RWLOCK(hci_cb_list_lock); 69 70/* HCI protocols */ 71#define HCI_MAX_PROTO 2 72struct hci_proto *hci_proto[HCI_MAX_PROTO]; 73 74/* HCI notifiers list */ 75static ATOMIC_NOTIFIER_HEAD(hci_notifier); 76 77/* ---- HCI notifications ---- */ 78 79int hci_register_notifier(struct notifier_block *nb) 80{ 81 return atomic_notifier_chain_register(&hci_notifier, nb); 82} 83 84int hci_unregister_notifier(struct notifier_block *nb) 85{ 86 return atomic_notifier_chain_unregister(&hci_notifier, nb); 87} 88 89static void hci_notify(struct hci_dev *hdev, int event) 90{ 91 atomic_notifier_call_chain(&hci_notifier, event, hdev); 92} 93 94/* ---- HCI requests ---- */ 95 96void hci_req_complete(struct hci_dev *hdev, int result) 97{ 98 BT_DBG("%s result 0x%2.2x", hdev->name, result); 99 100 if (hdev->req_status == HCI_REQ_PEND) { 101 hdev->req_result = result; 102 hdev->req_status = HCI_REQ_DONE; 103 wake_up_interruptible(&hdev->req_wait_q); 104 } 105} 106 107static void hci_req_cancel(struct hci_dev *hdev, int err) 108{ 109 BT_DBG("%s err 0x%2.2x", hdev->name, err); 110 111 if (hdev->req_status == HCI_REQ_PEND) { 112 hdev->req_result = err; 113 hdev->req_status = HCI_REQ_CANCELED; 114 wake_up_interruptible(&hdev->req_wait_q); 115 } 116} 117 118/* Execute request and wait for completion. */ 119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 120 unsigned long opt, __u32 timeout) 121{ 122 DECLARE_WAITQUEUE(wait, current); 123 int err = 0; 124 125 BT_DBG("%s start", hdev->name); 126 127 hdev->req_status = HCI_REQ_PEND; 128 129 add_wait_queue(&hdev->req_wait_q, &wait); 130 set_current_state(TASK_INTERRUPTIBLE); 131 132 req(hdev, opt); 133 schedule_timeout(timeout); 134 135 remove_wait_queue(&hdev->req_wait_q, &wait); 136 137 if (signal_pending(current)) 138 return -EINTR; 139 140 switch (hdev->req_status) { 141 case HCI_REQ_DONE: 142 err = -bt_err(hdev->req_result); 143 break; 144 145 case HCI_REQ_CANCELED: 146 err = -hdev->req_result; 147 break; 148 149 default: 150 err = -ETIMEDOUT; 151 break; 152 } 153 154 hdev->req_status = hdev->req_result = 0; 155 156 BT_DBG("%s end: err %d", hdev->name, err); 157 158 return err; 159} 160 161static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 162 unsigned long opt, __u32 timeout) 163{ 164 int ret; 165 166 /* Serialize all requests */ 167 hci_req_lock(hdev); 168 ret = __hci_request(hdev, req, opt, timeout); 169 hci_req_unlock(hdev); 170 171 return ret; 172} 173 174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 175{ 176 BT_DBG("%s %ld", hdev->name, opt); 177 178 /* Reset device */ 179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 180} 181 182static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 183{ 184 struct sk_buff *skb; 185 __le16 param; 186 187 BT_DBG("%s %ld", hdev->name, opt); 188 189 /* Driver initialization */ 190 191 /* Special commands */ 192 while ((skb = skb_dequeue(&hdev->driver_init))) { 193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 194 skb->dev = (void *) hdev; 195 skb_queue_tail(&hdev->cmd_q, skb); 196 hci_sched_cmd(hdev); 197 } 198 skb_queue_purge(&hdev->driver_init); 199 200 /* Mandatory initialization */ 201 202 /* Reset */ 203 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks)) 204 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL); 205 206 /* Read Local Supported Features */ 207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL); 208 209 /* Read Local Version */ 210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL); 211 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 213 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL); 214 215 216 /* Read BD Address */ 217 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL); 218 219 /* Read Voice Setting */ 220 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL); 221 222 /* Optional initialization */ 223 224 /* Clear Event Filters */ 225 { 226 struct hci_cp_set_event_flt cp; 227 cp.flt_type = HCI_FLT_CLEAR_ALL; 228 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp); 229 } 230 231 /* Page timeout ~20 secs */ 232 param = cpu_to_le16(0x8000); 233 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, ¶m); 234 235 /* Connection accept timeout ~20 secs */ 236 param = cpu_to_le16(0x7d00); 237 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, ¶m); 238} 239 240static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 241{ 242 __u8 scan = opt; 243 244 BT_DBG("%s %x", hdev->name, scan); 245 246 /* Inquiry and Page scans */ 247 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan); 248} 249 250static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 251{ 252 __u8 auth = opt; 253 254 BT_DBG("%s %x", hdev->name, auth); 255 256 /* Authentication */ 257 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth); 258} 259 260static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 261{ 262 __u8 encrypt = opt; 263 264 BT_DBG("%s %x", hdev->name, encrypt); 265 266 /* Authentication */ 267 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt); 268} 269 270/* Get HCI device by index. 271 * Device is held on return. */ 272struct hci_dev *hci_dev_get(int index) 273{ 274 struct hci_dev *hdev = NULL; 275 struct list_head *p; 276 277 BT_DBG("%d", index); 278 279 if (index < 0) 280 return NULL; 281 282 read_lock(&hci_dev_list_lock); 283 list_for_each(p, &hci_dev_list) { 284 struct hci_dev *d = list_entry(p, struct hci_dev, list); 285 if (d->id == index) { 286 hdev = hci_dev_hold(d); 287 break; 288 } 289 } 290 read_unlock(&hci_dev_list_lock); 291 return hdev; 292} 293 294/* ---- Inquiry support ---- */ 295static void inquiry_cache_flush(struct hci_dev *hdev) 296{ 297 struct inquiry_cache *cache = &hdev->inq_cache; 298 struct inquiry_entry *next = cache->list, *e; 299 300 BT_DBG("cache %p", cache); 301 302 cache->list = NULL; 303 while ((e = next)) { 304 next = e->next; 305 kfree(e); 306 } 307} 308 309struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 310{ 311 struct inquiry_cache *cache = &hdev->inq_cache; 312 struct inquiry_entry *e; 313 314 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 315 316 for (e = cache->list; e; e = e->next) 317 if (!bacmp(&e->data.bdaddr, bdaddr)) 318 break; 319 return e; 320} 321 322void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 323{ 324 struct inquiry_cache *cache = &hdev->inq_cache; 325 struct inquiry_entry *e; 326 327 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 328 329 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) { 330 /* Entry not in the cache. Add new one. */ 331 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC))) 332 return; 333 e->next = cache->list; 334 cache->list = e; 335 } 336 337 memcpy(&e->data, data, sizeof(*data)); 338 e->timestamp = jiffies; 339 cache->timestamp = jiffies; 340} 341 342static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 343{ 344 struct inquiry_cache *cache = &hdev->inq_cache; 345 struct inquiry_info *info = (struct inquiry_info *) buf; 346 struct inquiry_entry *e; 347 int copied = 0; 348 349 for (e = cache->list; e && copied < num; e = e->next, copied++) { 350 struct inquiry_data *data = &e->data; 351 bacpy(&info->bdaddr, &data->bdaddr); 352 info->pscan_rep_mode = data->pscan_rep_mode; 353 info->pscan_period_mode = data->pscan_period_mode; 354 info->pscan_mode = data->pscan_mode; 355 memcpy(info->dev_class, data->dev_class, 3); 356 info->clock_offset = data->clock_offset; 357 info++; 358 } 359 360 BT_DBG("cache %p, copied %d", cache, copied); 361 return copied; 362} 363 364static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 365{ 366 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 367 struct hci_cp_inquiry cp; 368 369 BT_DBG("%s", hdev->name); 370 371 if (test_bit(HCI_INQUIRY, &hdev->flags)) 372 return; 373 374 /* Start Inquiry */ 375 memcpy(&cp.lap, &ir->lap, 3); 376 cp.length = ir->length; 377 cp.num_rsp = ir->num_rsp; 378 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp); 379} 380 381int hci_inquiry(void __user *arg) 382{ 383 __u8 __user *ptr = arg; 384 struct hci_inquiry_req ir; 385 struct hci_dev *hdev; 386 int err = 0, do_inquiry = 0, max_rsp; 387 long timeo; 388 __u8 *buf; 389 390 if (copy_from_user(&ir, ptr, sizeof(ir))) 391 return -EFAULT; 392 393 if (!(hdev = hci_dev_get(ir.dev_id))) 394 return -ENODEV; 395 396 hci_dev_lock_bh(hdev); 397 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 398 inquiry_cache_empty(hdev) || 399 ir.flags & IREQ_CACHE_FLUSH) { 400 inquiry_cache_flush(hdev); 401 do_inquiry = 1; 402 } 403 hci_dev_unlock_bh(hdev); 404 405 timeo = ir.length * msecs_to_jiffies(2000); 406 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0) 407 goto done; 408 409 /* for unlimited number of responses we will use buffer with 255 entries */ 410 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 411 412 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 413 * copy it to the user space. 414 */ 415 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) { 416 err = -ENOMEM; 417 goto done; 418 } 419 420 hci_dev_lock_bh(hdev); 421 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 422 hci_dev_unlock_bh(hdev); 423 424 BT_DBG("num_rsp %d", ir.num_rsp); 425 426 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 427 ptr += sizeof(ir); 428 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 429 ir.num_rsp)) 430 err = -EFAULT; 431 } else 432 err = -EFAULT; 433 434 kfree(buf); 435 436done: 437 hci_dev_put(hdev); 438 return err; 439} 440 441/* ---- HCI ioctl helpers ---- */ 442 443int hci_dev_open(__u16 dev) 444{ 445 struct hci_dev *hdev; 446 int ret = 0; 447 448 if (!(hdev = hci_dev_get(dev))) 449 return -ENODEV; 450 451 BT_DBG("%s %p", hdev->name, hdev); 452 453 hci_req_lock(hdev); 454 455 if (test_bit(HCI_UP, &hdev->flags)) { 456 ret = -EALREADY; 457 goto done; 458 } 459 460 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 461 set_bit(HCI_RAW, &hdev->flags); 462 463 if (hdev->open(hdev)) { 464 ret = -EIO; 465 goto done; 466 } 467 468 if (!test_bit(HCI_RAW, &hdev->flags)) { 469 atomic_set(&hdev->cmd_cnt, 1); 470 set_bit(HCI_INIT, &hdev->flags); 471 472 //__hci_request(hdev, hci_reset_req, 0, HZ); 473 ret = __hci_request(hdev, hci_init_req, 0, 474 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 475 476 clear_bit(HCI_INIT, &hdev->flags); 477 } 478 479 if (!ret) { 480 hci_dev_hold(hdev); 481 set_bit(HCI_UP, &hdev->flags); 482 hci_notify(hdev, HCI_DEV_UP); 483 } else { 484 /* Init failed, cleanup */ 485 tasklet_kill(&hdev->rx_task); 486 tasklet_kill(&hdev->tx_task); 487 tasklet_kill(&hdev->cmd_task); 488 489 skb_queue_purge(&hdev->cmd_q); 490 skb_queue_purge(&hdev->rx_q); 491 492 if (hdev->flush) 493 hdev->flush(hdev); 494 495 if (hdev->sent_cmd) { 496 kfree_skb(hdev->sent_cmd); 497 hdev->sent_cmd = NULL; 498 } 499 500 hdev->close(hdev); 501 hdev->flags = 0; 502 } 503 504done: 505 hci_req_unlock(hdev); 506 hci_dev_put(hdev); 507 return ret; 508} 509 510static int hci_dev_do_close(struct hci_dev *hdev) 511{ 512 BT_DBG("%s %p", hdev->name, hdev); 513 514 hci_req_cancel(hdev, ENODEV); 515 hci_req_lock(hdev); 516 517 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 518 hci_req_unlock(hdev); 519 return 0; 520 } 521 522 /* Kill RX and TX tasks */ 523 tasklet_kill(&hdev->rx_task); 524 tasklet_kill(&hdev->tx_task); 525 526 hci_dev_lock_bh(hdev); 527 inquiry_cache_flush(hdev); 528 hci_conn_hash_flush(hdev); 529 hci_dev_unlock_bh(hdev); 530 531 hci_notify(hdev, HCI_DEV_DOWN); 532 533 if (hdev->flush) 534 hdev->flush(hdev); 535 536 /* Reset device */ 537 skb_queue_purge(&hdev->cmd_q); 538 atomic_set(&hdev->cmd_cnt, 1); 539 if (!test_bit(HCI_RAW, &hdev->flags)) { 540 set_bit(HCI_INIT, &hdev->flags); 541 __hci_request(hdev, hci_reset_req, 0, 542 msecs_to_jiffies(250)); 543 clear_bit(HCI_INIT, &hdev->flags); 544 } 545 546 /* Kill cmd task */ 547 tasklet_kill(&hdev->cmd_task); 548 549 /* Drop queues */ 550 skb_queue_purge(&hdev->rx_q); 551 skb_queue_purge(&hdev->cmd_q); 552 skb_queue_purge(&hdev->raw_q); 553 554 /* Drop last sent command */ 555 if (hdev->sent_cmd) { 556 kfree_skb(hdev->sent_cmd); 557 hdev->sent_cmd = NULL; 558 } 559 560 /* After this point our queues are empty 561 * and no tasks are scheduled. */ 562 hdev->close(hdev); 563 564 /* Clear flags */ 565 hdev->flags = 0; 566 567 hci_req_unlock(hdev); 568 569 hci_dev_put(hdev); 570 return 0; 571} 572 573int hci_dev_close(__u16 dev) 574{ 575 struct hci_dev *hdev; 576 int err; 577 578 if (!(hdev = hci_dev_get(dev))) 579 return -ENODEV; 580 err = hci_dev_do_close(hdev); 581 hci_dev_put(hdev); 582 return err; 583} 584 585int hci_dev_reset(__u16 dev) 586{ 587 struct hci_dev *hdev; 588 int ret = 0; 589 590 if (!(hdev = hci_dev_get(dev))) 591 return -ENODEV; 592 593 hci_req_lock(hdev); 594 tasklet_disable(&hdev->tx_task); 595 596 if (!test_bit(HCI_UP, &hdev->flags)) 597 goto done; 598 599 /* Drop queues */ 600 skb_queue_purge(&hdev->rx_q); 601 skb_queue_purge(&hdev->cmd_q); 602 603 hci_dev_lock_bh(hdev); 604 inquiry_cache_flush(hdev); 605 hci_conn_hash_flush(hdev); 606 hci_dev_unlock_bh(hdev); 607 608 if (hdev->flush) 609 hdev->flush(hdev); 610 611 atomic_set(&hdev->cmd_cnt, 1); 612 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 613 614 if (!test_bit(HCI_RAW, &hdev->flags)) 615 ret = __hci_request(hdev, hci_reset_req, 0, 616 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 617 618done: 619 tasklet_enable(&hdev->tx_task); 620 hci_req_unlock(hdev); 621 hci_dev_put(hdev); 622 return ret; 623} 624 625int hci_dev_reset_stat(__u16 dev) 626{ 627 struct hci_dev *hdev; 628 int ret = 0; 629 630 if (!(hdev = hci_dev_get(dev))) 631 return -ENODEV; 632 633 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 634 635 hci_dev_put(hdev); 636 637 return ret; 638} 639 640int hci_dev_cmd(unsigned int cmd, void __user *arg) 641{ 642 struct hci_dev *hdev; 643 struct hci_dev_req dr; 644 int err = 0; 645 646 if (copy_from_user(&dr, arg, sizeof(dr))) 647 return -EFAULT; 648 649 if (!(hdev = hci_dev_get(dr.dev_id))) 650 return -ENODEV; 651 652 switch (cmd) { 653 case HCISETAUTH: 654 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 655 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 656 break; 657 658 case HCISETENCRYPT: 659 if (!lmp_encrypt_capable(hdev)) { 660 err = -EOPNOTSUPP; 661 break; 662 } 663 664 if (!test_bit(HCI_AUTH, &hdev->flags)) { 665 /* Auth must be enabled first */ 666 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 667 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 668 if (err) 669 break; 670 } 671 672 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 673 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 674 break; 675 676 case HCISETSCAN: 677 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 678 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 679 break; 680 681 case HCISETPTYPE: 682 hdev->pkt_type = (__u16) dr.dev_opt; 683 break; 684 685 case HCISETLINKPOL: 686 hdev->link_policy = (__u16) dr.dev_opt; 687 break; 688 689 case HCISETLINKMODE: 690 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT); 691 break; 692 693 case HCISETACLMTU: 694 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1); 695 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0); 696 break; 697 698 case HCISETSCOMTU: 699 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1); 700 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0); 701 break; 702 703 default: 704 err = -EINVAL; 705 break; 706 } 707 hci_dev_put(hdev); 708 return err; 709} 710 711int hci_get_dev_list(void __user *arg) 712{ 713 struct hci_dev_list_req *dl; 714 struct hci_dev_req *dr; 715 struct list_head *p; 716 int n = 0, size, err; 717 __u16 dev_num; 718 719 if (get_user(dev_num, (__u16 __user *) arg)) 720 return -EFAULT; 721 722 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 723 return -EINVAL; 724 725 size = sizeof(*dl) + dev_num * sizeof(*dr); 726 727 if (!(dl = kmalloc(size, GFP_KERNEL))) 728 return -ENOMEM; 729 730 dr = dl->dev_req; 731 732 read_lock_bh(&hci_dev_list_lock); 733 list_for_each(p, &hci_dev_list) { 734 struct hci_dev *hdev; 735 hdev = list_entry(p, struct hci_dev, list); 736 (dr + n)->dev_id = hdev->id; 737 (dr + n)->dev_opt = hdev->flags; 738 if (++n >= dev_num) 739 break; 740 } 741 read_unlock_bh(&hci_dev_list_lock); 742 743 dl->dev_num = n; 744 size = sizeof(*dl) + n * sizeof(*dr); 745 746 err = copy_to_user(arg, dl, size); 747 kfree(dl); 748 749 return err ? -EFAULT : 0; 750} 751 752int hci_get_dev_info(void __user *arg) 753{ 754 struct hci_dev *hdev; 755 struct hci_dev_info di; 756 int err = 0; 757 758 if (copy_from_user(&di, arg, sizeof(di))) 759 return -EFAULT; 760 761 if (!(hdev = hci_dev_get(di.dev_id))) 762 return -ENODEV; 763 764 strcpy(di.name, hdev->name); 765 di.bdaddr = hdev->bdaddr; 766 di.type = hdev->type; 767 di.flags = hdev->flags; 768 di.pkt_type = hdev->pkt_type; 769 di.acl_mtu = hdev->acl_mtu; 770 di.acl_pkts = hdev->acl_pkts; 771 di.sco_mtu = hdev->sco_mtu; 772 di.sco_pkts = hdev->sco_pkts; 773 di.link_policy = hdev->link_policy; 774 di.link_mode = hdev->link_mode; 775 776 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 777 memcpy(&di.features, &hdev->features, sizeof(di.features)); 778 779 if (copy_to_user(arg, &di, sizeof(di))) 780 err = -EFAULT; 781 782 hci_dev_put(hdev); 783 784 return err; 785} 786 787/* ---- Interface to HCI drivers ---- */ 788 789/* Alloc HCI device */ 790struct hci_dev *hci_alloc_dev(void) 791{ 792 struct hci_dev *hdev; 793 794 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 795 if (!hdev) 796 return NULL; 797 798 skb_queue_head_init(&hdev->driver_init); 799 800 return hdev; 801} 802EXPORT_SYMBOL(hci_alloc_dev); 803 804/* Free HCI device */ 805void hci_free_dev(struct hci_dev *hdev) 806{ 807 skb_queue_purge(&hdev->driver_init); 808 809 /* will free via device release */ 810 put_device(&hdev->dev); 811} 812EXPORT_SYMBOL(hci_free_dev); 813 814/* Register HCI device */ 815int hci_register_dev(struct hci_dev *hdev) 816{ 817 struct list_head *head = &hci_dev_list, *p; 818 int id = 0; 819 820 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner); 821 822 if (!hdev->open || !hdev->close || !hdev->destruct) 823 return -EINVAL; 824 825 write_lock_bh(&hci_dev_list_lock); 826 827 /* Find first available device id */ 828 list_for_each(p, &hci_dev_list) { 829 if (list_entry(p, struct hci_dev, list)->id != id) 830 break; 831 head = p; id++; 832 } 833 834 sprintf(hdev->name, "hci%d", id); 835 hdev->id = id; 836 list_add(&hdev->list, head); 837 838 atomic_set(&hdev->refcnt, 1); 839 spin_lock_init(&hdev->lock); 840 841 hdev->flags = 0; 842 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 843 hdev->link_mode = (HCI_LM_ACCEPT); 844 845 hdev->idle_timeout = 0; 846 hdev->sniff_max_interval = 800; 847 hdev->sniff_min_interval = 80; 848 849 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev); 850 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 851 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 852 853 skb_queue_head_init(&hdev->rx_q); 854 skb_queue_head_init(&hdev->cmd_q); 855 skb_queue_head_init(&hdev->raw_q); 856 857 init_waitqueue_head(&hdev->req_wait_q); 858 init_MUTEX(&hdev->req_lock); 859 860 inquiry_cache_init(hdev); 861 862 hci_conn_hash_init(hdev); 863 864 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 865 866 atomic_set(&hdev->promisc, 0); 867 868 write_unlock_bh(&hci_dev_list_lock); 869 870 hci_register_sysfs(hdev); 871 872 hci_notify(hdev, HCI_DEV_REG); 873 874 return id; 875} 876EXPORT_SYMBOL(hci_register_dev); 877 878/* Unregister HCI device */ 879int hci_unregister_dev(struct hci_dev *hdev) 880{ 881 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 882 883 hci_unregister_sysfs(hdev); 884 885 write_lock_bh(&hci_dev_list_lock); 886 list_del(&hdev->list); 887 write_unlock_bh(&hci_dev_list_lock); 888 889 hci_dev_do_close(hdev); 890 891 hci_notify(hdev, HCI_DEV_UNREG); 892 893 __hci_dev_put(hdev); 894 return 0; 895} 896EXPORT_SYMBOL(hci_unregister_dev); 897 898/* Suspend HCI device */ 899int hci_suspend_dev(struct hci_dev *hdev) 900{ 901 hci_notify(hdev, HCI_DEV_SUSPEND); 902 return 0; 903} 904EXPORT_SYMBOL(hci_suspend_dev); 905 906/* Resume HCI device */ 907int hci_resume_dev(struct hci_dev *hdev) 908{ 909 hci_notify(hdev, HCI_DEV_RESUME); 910 return 0; 911} 912EXPORT_SYMBOL(hci_resume_dev); 913 914/* ---- Interface to upper protocols ---- */ 915 916/* Register/Unregister protocols. 917 * hci_task_lock is used to ensure that no tasks are running. */ 918int hci_register_proto(struct hci_proto *hp) 919{ 920 int err = 0; 921 922 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 923 924 if (hp->id >= HCI_MAX_PROTO) 925 return -EINVAL; 926 927 write_lock_bh(&hci_task_lock); 928 929 if (!hci_proto[hp->id]) 930 hci_proto[hp->id] = hp; 931 else 932 err = -EEXIST; 933 934 write_unlock_bh(&hci_task_lock); 935 936 return err; 937} 938EXPORT_SYMBOL(hci_register_proto); 939 940int hci_unregister_proto(struct hci_proto *hp) 941{ 942 int err = 0; 943 944 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 945 946 if (hp->id >= HCI_MAX_PROTO) 947 return -EINVAL; 948 949 write_lock_bh(&hci_task_lock); 950 951 if (hci_proto[hp->id]) 952 hci_proto[hp->id] = NULL; 953 else 954 err = -ENOENT; 955 956 write_unlock_bh(&hci_task_lock); 957 958 return err; 959} 960EXPORT_SYMBOL(hci_unregister_proto); 961 962int hci_register_cb(struct hci_cb *cb) 963{ 964 BT_DBG("%p name %s", cb, cb->name); 965 966 write_lock_bh(&hci_cb_list_lock); 967 list_add(&cb->list, &hci_cb_list); 968 write_unlock_bh(&hci_cb_list_lock); 969 970 return 0; 971} 972EXPORT_SYMBOL(hci_register_cb); 973 974int hci_unregister_cb(struct hci_cb *cb) 975{ 976 BT_DBG("%p name %s", cb, cb->name); 977 978 write_lock_bh(&hci_cb_list_lock); 979 list_del(&cb->list); 980 write_unlock_bh(&hci_cb_list_lock); 981 982 return 0; 983} 984EXPORT_SYMBOL(hci_unregister_cb); 985 986static int hci_send_frame(struct sk_buff *skb) 987{ 988 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 989 990 if (!hdev) { 991 kfree_skb(skb); 992 return -ENODEV; 993 } 994 995 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 996 997 if (atomic_read(&hdev->promisc)) { 998 /* Time stamp */ 999 __net_timestamp(skb); 1000 1001 hci_send_to_sock(hdev, skb); 1002 } 1003 1004 /* Get rid of skb owner, prior to sending to the driver. */ 1005 skb_orphan(skb); 1006 1007 return hdev->send(skb); 1008} 1009 1010/* Send HCI command */ 1011int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param) 1012{ 1013 int len = HCI_COMMAND_HDR_SIZE + plen; 1014 struct hci_command_hdr *hdr; 1015 struct sk_buff *skb; 1016 1017 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen); 1018 1019 skb = bt_skb_alloc(len, GFP_ATOMIC); 1020 if (!skb) { 1021 BT_ERR("%s Can't allocate memory for HCI command", hdev->name); 1022 return -ENOMEM; 1023 } 1024 1025 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1026 hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf)); 1027 hdr->plen = plen; 1028 1029 if (plen) 1030 memcpy(skb_put(skb, plen), param, plen); 1031 1032 BT_DBG("skb len %d", skb->len); 1033 1034 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1035 skb->dev = (void *) hdev; 1036 skb_queue_tail(&hdev->cmd_q, skb); 1037 hci_sched_cmd(hdev); 1038 1039 return 0; 1040} 1041 1042/* Get data from the previously sent command */ 1043void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf) 1044{ 1045 struct hci_command_hdr *hdr; 1046 1047 if (!hdev->sent_cmd) 1048 return NULL; 1049 1050 hdr = (void *) hdev->sent_cmd->data; 1051 1052 if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf))) 1053 return NULL; 1054 1055 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf); 1056 1057 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1058} 1059 1060/* Send ACL data */ 1061static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 1062{ 1063 struct hci_acl_hdr *hdr; 1064 int len = skb->len; 1065 1066 skb_push(skb, HCI_ACL_HDR_SIZE); 1067 skb_reset_transport_header(skb); 1068 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 1069 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 1070 hdr->dlen = cpu_to_le16(len); 1071} 1072 1073int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1074{ 1075 struct hci_dev *hdev = conn->hdev; 1076 struct sk_buff *list; 1077 1078 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); 1079 1080 skb->dev = (void *) hdev; 1081 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1082 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1083 1084 if (!(list = skb_shinfo(skb)->frag_list)) { 1085 /* Non fragmented */ 1086 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1087 1088 skb_queue_tail(&conn->data_q, skb); 1089 } else { 1090 /* Fragmented */ 1091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1092 1093 skb_shinfo(skb)->frag_list = NULL; 1094 1095 /* Queue all fragments atomically */ 1096 spin_lock_bh(&conn->data_q.lock); 1097 1098 __skb_queue_tail(&conn->data_q, skb); 1099 do { 1100 skb = list; list = list->next; 1101 1102 skb->dev = (void *) hdev; 1103 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1104 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1105 1106 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1107 1108 __skb_queue_tail(&conn->data_q, skb); 1109 } while (list); 1110 1111 spin_unlock_bh(&conn->data_q.lock); 1112 } 1113 1114 hci_sched_tx(hdev); 1115 return 0; 1116} 1117EXPORT_SYMBOL(hci_send_acl); 1118 1119/* Send SCO data */ 1120int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 1121{ 1122 struct hci_dev *hdev = conn->hdev; 1123 struct hci_sco_hdr hdr; 1124 1125 BT_DBG("%s len %d", hdev->name, skb->len); 1126 1127 if (skb->len > hdev->sco_mtu) { 1128 kfree_skb(skb); 1129 return -EINVAL; 1130 } 1131 1132 hdr.handle = cpu_to_le16(conn->handle); 1133 hdr.dlen = skb->len; 1134 1135 skb_push(skb, HCI_SCO_HDR_SIZE); 1136 skb_reset_transport_header(skb); 1137 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 1138 1139 skb->dev = (void *) hdev; 1140 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 1141 skb_queue_tail(&conn->data_q, skb); 1142 hci_sched_tx(hdev); 1143 return 0; 1144} 1145EXPORT_SYMBOL(hci_send_sco); 1146 1147/* ---- HCI TX task (outgoing data) ---- */ 1148 1149/* HCI Connection scheduler */ 1150static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 1151{ 1152 struct hci_conn_hash *h = &hdev->conn_hash; 1153 struct hci_conn *conn = NULL; 1154 int num = 0, min = ~0; 1155 struct list_head *p; 1156 1157 /* We don't have to lock device here. Connections are always 1158 * added and removed with TX task disabled. */ 1159 list_for_each(p, &h->list) { 1160 struct hci_conn *c; 1161 c = list_entry(p, struct hci_conn, list); 1162 1163 if (c->type != type || c->state != BT_CONNECTED 1164 || skb_queue_empty(&c->data_q)) 1165 continue; 1166 num++; 1167 1168 if (c->sent < min) { 1169 min = c->sent; 1170 conn = c; 1171 } 1172 } 1173 1174 if (conn) { 1175 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1176 int q = cnt / num; 1177 *quote = q ? q : 1; 1178 } else 1179 *quote = 0; 1180 1181 BT_DBG("conn %p quote %d", conn, *quote); 1182 return conn; 1183} 1184 1185static inline void hci_acl_tx_to(struct hci_dev *hdev) 1186{ 1187 struct hci_conn_hash *h = &hdev->conn_hash; 1188 struct list_head *p; 1189 struct hci_conn *c; 1190 1191 BT_ERR("%s ACL tx timeout", hdev->name); 1192 1193 /* Kill stalled connections */ 1194 list_for_each(p, &h->list) { 1195 c = list_entry(p, struct hci_conn, list); 1196 if (c->type == ACL_LINK && c->sent) { 1197 BT_ERR("%s killing stalled ACL connection %s", 1198 hdev->name, batostr(&c->dst)); 1199 hci_acl_disconn(c, 0x13); 1200 } 1201 } 1202} 1203 1204static inline void hci_sched_acl(struct hci_dev *hdev) 1205{ 1206 struct hci_conn *conn; 1207 struct sk_buff *skb; 1208 int quote; 1209 1210 BT_DBG("%s", hdev->name); 1211 1212 if (!test_bit(HCI_RAW, &hdev->flags)) { 1213 /* ACL tx timeout must be longer than maximum 1214 * link supervision timeout (40.9 seconds) */ 1215 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45)) 1216 hci_acl_tx_to(hdev); 1217 } 1218 1219 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { 1220 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1221 BT_DBG("skb %p len %d", skb, skb->len); 1222 1223 hci_conn_enter_active_mode(conn); 1224 1225 hci_send_frame(skb); 1226 hdev->acl_last_tx = jiffies; 1227 1228 hdev->acl_cnt--; 1229 conn->sent++; 1230 } 1231 } 1232} 1233 1234/* Schedule SCO */ 1235static inline void hci_sched_sco(struct hci_dev *hdev) 1236{ 1237 struct hci_conn *conn; 1238 struct sk_buff *skb; 1239 int quote; 1240 1241 BT_DBG("%s", hdev->name); 1242 1243 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 1244 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1245 BT_DBG("skb %p len %d", skb, skb->len); 1246 hci_send_frame(skb); 1247 1248 conn->sent++; 1249 if (conn->sent == ~0) 1250 conn->sent = 0; 1251 } 1252 } 1253} 1254 1255static void hci_tx_task(unsigned long arg) 1256{ 1257 struct hci_dev *hdev = (struct hci_dev *) arg; 1258 struct sk_buff *skb; 1259 1260 read_lock(&hci_task_lock); 1261 1262 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1263 1264 /* Schedule queues and send stuff to HCI driver */ 1265 1266 hci_sched_acl(hdev); 1267 1268 hci_sched_sco(hdev); 1269 1270 /* Send next queued raw (unknown type) packet */ 1271 while ((skb = skb_dequeue(&hdev->raw_q))) 1272 hci_send_frame(skb); 1273 1274 read_unlock(&hci_task_lock); 1275} 1276 1277/* ----- HCI RX task (incoming data proccessing) ----- */ 1278 1279/* ACL data packet */ 1280static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1281{ 1282 struct hci_acl_hdr *hdr = (void *) skb->data; 1283 struct hci_conn *conn; 1284 __u16 handle, flags; 1285 1286 skb_pull(skb, HCI_ACL_HDR_SIZE); 1287 1288 handle = __le16_to_cpu(hdr->handle); 1289 flags = hci_flags(handle); 1290 handle = hci_handle(handle); 1291 1292 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 1293 1294 hdev->stat.acl_rx++; 1295 1296 hci_dev_lock(hdev); 1297 conn = hci_conn_hash_lookup_handle(hdev, handle); 1298 hci_dev_unlock(hdev); 1299 1300 if (conn) { 1301 register struct hci_proto *hp; 1302 1303 hci_conn_enter_active_mode(conn); 1304 1305 /* Send to upper protocol */ 1306 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) { 1307 hp->recv_acldata(conn, skb, flags); 1308 return; 1309 } 1310 } else { 1311 BT_ERR("%s ACL packet for unknown connection handle %d", 1312 hdev->name, handle); 1313 } 1314 1315 kfree_skb(skb); 1316} 1317 1318/* SCO data packet */ 1319static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1320{ 1321 struct hci_sco_hdr *hdr = (void *) skb->data; 1322 struct hci_conn *conn; 1323 __u16 handle; 1324 1325 skb_pull(skb, HCI_SCO_HDR_SIZE); 1326 1327 handle = __le16_to_cpu(hdr->handle); 1328 1329 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 1330 1331 hdev->stat.sco_rx++; 1332 1333 hci_dev_lock(hdev); 1334 conn = hci_conn_hash_lookup_handle(hdev, handle); 1335 hci_dev_unlock(hdev); 1336 1337 if (conn) { 1338 register struct hci_proto *hp; 1339 1340 /* Send to upper protocol */ 1341 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) { 1342 hp->recv_scodata(conn, skb); 1343 return; 1344 } 1345 } else { 1346 BT_ERR("%s SCO packet for unknown connection handle %d", 1347 hdev->name, handle); 1348 } 1349 1350 kfree_skb(skb); 1351} 1352 1353static void hci_rx_task(unsigned long arg) 1354{ 1355 struct hci_dev *hdev = (struct hci_dev *) arg; 1356 struct sk_buff *skb; 1357 1358 BT_DBG("%s", hdev->name); 1359 1360 read_lock(&hci_task_lock); 1361 1362 while ((skb = skb_dequeue(&hdev->rx_q))) { 1363 if (atomic_read(&hdev->promisc)) { 1364 /* Send copy to the sockets */ 1365 hci_send_to_sock(hdev, skb); 1366 } 1367 1368 if (test_bit(HCI_RAW, &hdev->flags)) { 1369 kfree_skb(skb); 1370 continue; 1371 } 1372 1373 if (test_bit(HCI_INIT, &hdev->flags)) { 1374 /* Don't process data packets in this states. */ 1375 switch (bt_cb(skb)->pkt_type) { 1376 case HCI_ACLDATA_PKT: 1377 case HCI_SCODATA_PKT: 1378 kfree_skb(skb); 1379 continue; 1380 } 1381 } 1382 1383 /* Process frame */ 1384 switch (bt_cb(skb)->pkt_type) { 1385 case HCI_EVENT_PKT: 1386 hci_event_packet(hdev, skb); 1387 break; 1388 1389 case HCI_ACLDATA_PKT: 1390 BT_DBG("%s ACL data packet", hdev->name); 1391 hci_acldata_packet(hdev, skb); 1392 break; 1393 1394 case HCI_SCODATA_PKT: 1395 BT_DBG("%s SCO data packet", hdev->name); 1396 hci_scodata_packet(hdev, skb); 1397 break; 1398 1399 default: 1400 kfree_skb(skb); 1401 break; 1402 } 1403 } 1404 1405 read_unlock(&hci_task_lock); 1406} 1407 1408static void hci_cmd_task(unsigned long arg) 1409{ 1410 struct hci_dev *hdev = (struct hci_dev *) arg; 1411 struct sk_buff *skb; 1412 1413 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 1414 1415 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) { 1416 BT_ERR("%s command tx timeout", hdev->name); 1417 atomic_set(&hdev->cmd_cnt, 1); 1418 } 1419 1420 /* Send queued commands */ 1421 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 1422 if (hdev->sent_cmd) 1423 kfree_skb(hdev->sent_cmd); 1424 1425 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) { 1426 atomic_dec(&hdev->cmd_cnt); 1427 hci_send_frame(skb); 1428 hdev->cmd_last_tx = jiffies; 1429 } else { 1430 skb_queue_head(&hdev->cmd_q, skb); 1431 hci_sched_cmd(hdev); 1432 } 1433 } 1434} 1435