• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/bluetooth/
1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (C) 2000-2001 Qualcomm Incorporated
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/capability.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
33#include <linux/slab.h>
34#include <linux/poll.h>
35#include <linux/fcntl.h>
36#include <linux/init.h>
37#include <linux/skbuff.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include <linux/compat.h>
41#include <linux/socket.h>
42#include <linux/ioctl.h>
43#include <net/sock.h>
44
45#include <asm/system.h>
46#include <asm/uaccess.h>
47#include <asm/unaligned.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
51
52/* ----- HCI socket interface ----- */
53
54static inline int hci_test_bit(int nr, void *addr)
55{
56	return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
57}
58
59/* Security filter */
60static struct hci_sec_filter hci_sec_filter = {
61	/* Packet types */
62	0x10,
63	/* Events */
64	{ 0x1000d9fe, 0x0000b00c },
65	/* Commands */
66	{
67		{ 0x0 },
68		/* OGF_LINK_CTL */
69		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
70		/* OGF_LINK_POLICY */
71		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
72		/* OGF_HOST_CTL */
73		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
74		/* OGF_INFO_PARAM */
75		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
76		/* OGF_STATUS_PARAM */
77		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
78	}
79};
80
81static struct bt_sock_list hci_sk_list = {
82	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
83};
84
85/* Send frame to RAW socket */
86void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
87{
88	struct sock *sk;
89	struct hlist_node *node;
90
91	BT_DBG("hdev %p len %d", hdev, skb->len);
92
93	read_lock(&hci_sk_list.lock);
94	sk_for_each(sk, node, &hci_sk_list.head) {
95		struct hci_filter *flt;
96		struct sk_buff *nskb;
97
98		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
99			continue;
100
101		/* Don't send frame to the socket it came from */
102		if (skb->sk == sk)
103			continue;
104
105		/* Apply filter */
106		flt = &hci_pi(sk)->filter;
107
108		if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
109				0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
110			continue;
111
112		if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
113			register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
114
115			if (!hci_test_bit(evt, &flt->event_mask))
116				continue;
117
118			if (flt->opcode &&
119			    ((evt == HCI_EV_CMD_COMPLETE &&
120			      flt->opcode !=
121			      get_unaligned((__le16 *)(skb->data + 3))) ||
122			     (evt == HCI_EV_CMD_STATUS &&
123			      flt->opcode !=
124			      get_unaligned((__le16 *)(skb->data + 4)))))
125				continue;
126		}
127
128		if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
129			continue;
130
131		/* Put type byte before the data */
132		memcpy(skb_push(nskb, 1), &bt_cb(nskb)->pkt_type, 1);
133
134		if (sock_queue_rcv_skb(sk, nskb))
135			kfree_skb(nskb);
136	}
137	read_unlock(&hci_sk_list.lock);
138}
139
140static int hci_sock_release(struct socket *sock)
141{
142	struct sock *sk = sock->sk;
143	struct hci_dev *hdev;
144
145	BT_DBG("sock %p sk %p", sock, sk);
146
147	if (!sk)
148		return 0;
149
150	hdev = hci_pi(sk)->hdev;
151
152	bt_sock_unlink(&hci_sk_list, sk);
153
154	if (hdev) {
155		atomic_dec(&hdev->promisc);
156		hci_dev_put(hdev);
157	}
158
159	sock_orphan(sk);
160
161	skb_queue_purge(&sk->sk_receive_queue);
162	skb_queue_purge(&sk->sk_write_queue);
163
164	sock_put(sk);
165	return 0;
166}
167
168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
169{
170	struct list_head *p;
171
172	list_for_each(p, &hdev->blacklist) {
173		struct bdaddr_list *b;
174
175		b = list_entry(p, struct bdaddr_list, list);
176
177		if (bacmp(bdaddr, &b->bdaddr) == 0)
178			return b;
179	}
180
181	return NULL;
182}
183
184static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
185{
186	bdaddr_t bdaddr;
187	struct bdaddr_list *entry;
188
189	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
190		return -EFAULT;
191
192	if (bacmp(&bdaddr, BDADDR_ANY) == 0)
193		return -EBADF;
194
195	if (hci_blacklist_lookup(hdev, &bdaddr))
196		return -EEXIST;
197
198	entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
199	if (!entry)
200		return -ENOMEM;
201
202	bacpy(&entry->bdaddr, &bdaddr);
203
204	list_add(&entry->list, &hdev->blacklist);
205
206	return 0;
207}
208
209int hci_blacklist_clear(struct hci_dev *hdev)
210{
211	struct list_head *p, *n;
212
213	list_for_each_safe(p, n, &hdev->blacklist) {
214		struct bdaddr_list *b;
215
216		b = list_entry(p, struct bdaddr_list, list);
217
218		list_del(p);
219		kfree(b);
220	}
221
222	return 0;
223}
224
225static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
226{
227	bdaddr_t bdaddr;
228	struct bdaddr_list *entry;
229
230	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
231		return -EFAULT;
232
233	if (bacmp(&bdaddr, BDADDR_ANY) == 0)
234		return hci_blacklist_clear(hdev);
235
236	entry = hci_blacklist_lookup(hdev, &bdaddr);
237	if (!entry)
238		return -ENOENT;
239
240	list_del(&entry->list);
241	kfree(entry);
242
243	return 0;
244}
245
246/* Ioctls that require bound socket */
247static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
248{
249	struct hci_dev *hdev = hci_pi(sk)->hdev;
250
251	if (!hdev)
252		return -EBADFD;
253
254	switch (cmd) {
255	case HCISETRAW:
256		if (!capable(CAP_NET_ADMIN))
257			return -EACCES;
258
259		if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
260			return -EPERM;
261
262		if (arg)
263			set_bit(HCI_RAW, &hdev->flags);
264		else
265			clear_bit(HCI_RAW, &hdev->flags);
266
267		return 0;
268
269	case HCIGETCONNINFO:
270		return hci_get_conn_info(hdev, (void __user *) arg);
271
272	case HCIGETAUTHINFO:
273		return hci_get_auth_info(hdev, (void __user *) arg);
274
275	case HCIBLOCKADDR:
276		if (!capable(CAP_NET_ADMIN))
277			return -EACCES;
278		return hci_blacklist_add(hdev, (void __user *) arg);
279
280	case HCIUNBLOCKADDR:
281		if (!capable(CAP_NET_ADMIN))
282			return -EACCES;
283		return hci_blacklist_del(hdev, (void __user *) arg);
284
285	default:
286		if (hdev->ioctl)
287			return hdev->ioctl(hdev, cmd, arg);
288		return -EINVAL;
289	}
290}
291
292static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
293{
294	struct sock *sk = sock->sk;
295	void __user *argp = (void __user *) arg;
296	int err;
297
298	BT_DBG("cmd %x arg %lx", cmd, arg);
299
300	switch (cmd) {
301	case HCIGETDEVLIST:
302		return hci_get_dev_list(argp);
303
304	case HCIGETDEVINFO:
305		return hci_get_dev_info(argp);
306
307	case HCIGETCONNLIST:
308		return hci_get_conn_list(argp);
309
310	case HCIDEVUP:
311		if (!capable(CAP_NET_ADMIN))
312			return -EACCES;
313		return hci_dev_open(arg);
314
315	case HCIDEVDOWN:
316		if (!capable(CAP_NET_ADMIN))
317			return -EACCES;
318		return hci_dev_close(arg);
319
320	case HCIDEVRESET:
321		if (!capable(CAP_NET_ADMIN))
322			return -EACCES;
323		return hci_dev_reset(arg);
324
325	case HCIDEVRESTAT:
326		if (!capable(CAP_NET_ADMIN))
327			return -EACCES;
328		return hci_dev_reset_stat(arg);
329
330	case HCISETSCAN:
331	case HCISETAUTH:
332	case HCISETENCRYPT:
333	case HCISETPTYPE:
334	case HCISETLINKPOL:
335	case HCISETLINKMODE:
336	case HCISETACLMTU:
337	case HCISETSCOMTU:
338		if (!capable(CAP_NET_ADMIN))
339			return -EACCES;
340		return hci_dev_cmd(cmd, argp);
341
342	case HCIINQUIRY:
343		return hci_inquiry(argp);
344
345	default:
346		lock_sock(sk);
347		err = hci_sock_bound_ioctl(sk, cmd, arg);
348		release_sock(sk);
349		return err;
350	}
351}
352
353static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
354{
355	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
356	struct sock *sk = sock->sk;
357	struct hci_dev *hdev = NULL;
358	int err = 0;
359
360	BT_DBG("sock %p sk %p", sock, sk);
361
362	if (!haddr || haddr->hci_family != AF_BLUETOOTH)
363		return -EINVAL;
364
365	lock_sock(sk);
366
367	if (hci_pi(sk)->hdev) {
368		err = -EALREADY;
369		goto done;
370	}
371
372	if (haddr->hci_dev != HCI_DEV_NONE) {
373		if (!(hdev = hci_dev_get(haddr->hci_dev))) {
374			err = -ENODEV;
375			goto done;
376		}
377
378		atomic_inc(&hdev->promisc);
379	}
380
381	hci_pi(sk)->hdev = hdev;
382	sk->sk_state = BT_BOUND;
383
384done:
385	release_sock(sk);
386	return err;
387}
388
389static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
390{
391	struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
392	struct sock *sk = sock->sk;
393	struct hci_dev *hdev = hci_pi(sk)->hdev;
394
395	BT_DBG("sock %p sk %p", sock, sk);
396
397	if (!hdev)
398		return -EBADFD;
399
400	lock_sock(sk);
401
402	*addr_len = sizeof(*haddr);
403	haddr->hci_family = AF_BLUETOOTH;
404	haddr->hci_dev    = hdev->id;
405
406	release_sock(sk);
407	return 0;
408}
409
410static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
411{
412	__u32 mask = hci_pi(sk)->cmsg_mask;
413
414	if (mask & HCI_CMSG_DIR) {
415		int incoming = bt_cb(skb)->incoming;
416		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
417	}
418
419	if (mask & HCI_CMSG_TSTAMP) {
420#ifdef CONFIG_COMPAT
421		struct compat_timeval ctv;
422#endif
423		struct timeval tv;
424		void *data;
425		int len;
426
427		skb_get_timestamp(skb, &tv);
428
429		data = &tv;
430		len = sizeof(tv);
431#ifdef CONFIG_COMPAT
432		if (msg->msg_flags & MSG_CMSG_COMPAT) {
433			ctv.tv_sec = tv.tv_sec;
434			ctv.tv_usec = tv.tv_usec;
435			data = &ctv;
436			len = sizeof(ctv);
437		}
438#endif
439
440		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
441	}
442}
443
444static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
445				struct msghdr *msg, size_t len, int flags)
446{
447	int noblock = flags & MSG_DONTWAIT;
448	struct sock *sk = sock->sk;
449	struct sk_buff *skb;
450	int copied, err;
451
452	BT_DBG("sock %p, sk %p", sock, sk);
453
454	if (flags & (MSG_OOB))
455		return -EOPNOTSUPP;
456
457	if (sk->sk_state == BT_CLOSED)
458		return 0;
459
460	if (!(skb = skb_recv_datagram(sk, flags, noblock, &err)))
461		return err;
462
463	msg->msg_namelen = 0;
464
465	copied = skb->len;
466	if (len < copied) {
467		msg->msg_flags |= MSG_TRUNC;
468		copied = len;
469	}
470
471	skb_reset_transport_header(skb);
472	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
473
474	hci_sock_cmsg(sk, msg, skb);
475
476	skb_free_datagram(sk, skb);
477
478	return err ? : copied;
479}
480
481static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
482			    struct msghdr *msg, size_t len)
483{
484	struct sock *sk = sock->sk;
485	struct hci_dev *hdev;
486	struct sk_buff *skb;
487	int err;
488
489	BT_DBG("sock %p sk %p", sock, sk);
490
491	if (msg->msg_flags & MSG_OOB)
492		return -EOPNOTSUPP;
493
494	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
495		return -EINVAL;
496
497	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
498		return -EINVAL;
499
500	lock_sock(sk);
501
502	if (!(hdev = hci_pi(sk)->hdev)) {
503		err = -EBADFD;
504		goto done;
505	}
506
507	if (!test_bit(HCI_UP, &hdev->flags)) {
508		err = -ENETDOWN;
509		goto done;
510	}
511
512	if (!(skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err)))
513		goto done;
514
515	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
516		err = -EFAULT;
517		goto drop;
518	}
519
520	bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
521	skb_pull(skb, 1);
522	skb->dev = (void *) hdev;
523
524	if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
525		u16 opcode = get_unaligned_le16(skb->data);
526		u16 ogf = hci_opcode_ogf(opcode);
527		u16 ocf = hci_opcode_ocf(opcode);
528
529		if (((ogf > HCI_SFLT_MAX_OGF) ||
530				!hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
531					!capable(CAP_NET_RAW)) {
532			err = -EPERM;
533			goto drop;
534		}
535
536		if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
537			skb_queue_tail(&hdev->raw_q, skb);
538			tasklet_schedule(&hdev->tx_task);
539		} else {
540			skb_queue_tail(&hdev->cmd_q, skb);
541			tasklet_schedule(&hdev->cmd_task);
542		}
543	} else {
544		if (!capable(CAP_NET_RAW)) {
545			err = -EPERM;
546			goto drop;
547		}
548
549		skb_queue_tail(&hdev->raw_q, skb);
550		tasklet_schedule(&hdev->tx_task);
551	}
552
553	err = len;
554
555done:
556	release_sock(sk);
557	return err;
558
559drop:
560	kfree_skb(skb);
561	goto done;
562}
563
564static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
565{
566	struct hci_ufilter uf = { .opcode = 0 };
567	struct sock *sk = sock->sk;
568	int err = 0, opt = 0;
569
570	BT_DBG("sk %p, opt %d", sk, optname);
571
572	lock_sock(sk);
573
574	switch (optname) {
575	case HCI_DATA_DIR:
576		if (get_user(opt, (int __user *)optval)) {
577			err = -EFAULT;
578			break;
579		}
580
581		if (opt)
582			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
583		else
584			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
585		break;
586
587	case HCI_TIME_STAMP:
588		if (get_user(opt, (int __user *)optval)) {
589			err = -EFAULT;
590			break;
591		}
592
593		if (opt)
594			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
595		else
596			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
597		break;
598
599	case HCI_FILTER:
600		{
601			struct hci_filter *f = &hci_pi(sk)->filter;
602
603			uf.type_mask = f->type_mask;
604			uf.opcode    = f->opcode;
605			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
606			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
607		}
608
609		len = min_t(unsigned int, len, sizeof(uf));
610		if (copy_from_user(&uf, optval, len)) {
611			err = -EFAULT;
612			break;
613		}
614
615		if (!capable(CAP_NET_RAW)) {
616			uf.type_mask &= hci_sec_filter.type_mask;
617			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
618			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
619		}
620
621		{
622			struct hci_filter *f = &hci_pi(sk)->filter;
623
624			f->type_mask = uf.type_mask;
625			f->opcode    = uf.opcode;
626			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
627			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
628		}
629		break;
630
631	default:
632		err = -ENOPROTOOPT;
633		break;
634	}
635
636	release_sock(sk);
637	return err;
638}
639
640static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
641{
642	struct hci_ufilter uf;
643	struct sock *sk = sock->sk;
644	int len, opt;
645
646	if (get_user(len, optlen))
647		return -EFAULT;
648
649	switch (optname) {
650	case HCI_DATA_DIR:
651		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
652			opt = 1;
653		else
654			opt = 0;
655
656		if (put_user(opt, optval))
657			return -EFAULT;
658		break;
659
660	case HCI_TIME_STAMP:
661		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
662			opt = 1;
663		else
664			opt = 0;
665
666		if (put_user(opt, optval))
667			return -EFAULT;
668		break;
669
670	case HCI_FILTER:
671		{
672			struct hci_filter *f = &hci_pi(sk)->filter;
673
674			uf.type_mask = f->type_mask;
675			uf.opcode    = f->opcode;
676			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
677			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
678		}
679
680		len = min_t(unsigned int, len, sizeof(uf));
681		if (copy_to_user(optval, &uf, len))
682			return -EFAULT;
683		break;
684
685	default:
686		return -ENOPROTOOPT;
687		break;
688	}
689
690	return 0;
691}
692
693static const struct proto_ops hci_sock_ops = {
694	.family		= PF_BLUETOOTH,
695	.owner		= THIS_MODULE,
696	.release	= hci_sock_release,
697	.bind		= hci_sock_bind,
698	.getname	= hci_sock_getname,
699	.sendmsg	= hci_sock_sendmsg,
700	.recvmsg	= hci_sock_recvmsg,
701	.ioctl		= hci_sock_ioctl,
702	.poll		= datagram_poll,
703	.listen		= sock_no_listen,
704	.shutdown	= sock_no_shutdown,
705	.setsockopt	= hci_sock_setsockopt,
706	.getsockopt	= hci_sock_getsockopt,
707	.connect	= sock_no_connect,
708	.socketpair	= sock_no_socketpair,
709	.accept		= sock_no_accept,
710	.mmap		= sock_no_mmap
711};
712
713static struct proto hci_sk_proto = {
714	.name		= "HCI",
715	.owner		= THIS_MODULE,
716	.obj_size	= sizeof(struct hci_pinfo)
717};
718
719static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
720			   int kern)
721{
722	struct sock *sk;
723
724	BT_DBG("sock %p", sock);
725
726	if (sock->type != SOCK_RAW)
727		return -ESOCKTNOSUPPORT;
728
729	sock->ops = &hci_sock_ops;
730
731	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
732	if (!sk)
733		return -ENOMEM;
734
735	sock_init_data(sock, sk);
736
737	sock_reset_flag(sk, SOCK_ZAPPED);
738
739	sk->sk_protocol = protocol;
740
741	sock->state = SS_UNCONNECTED;
742	sk->sk_state = BT_OPEN;
743
744	bt_sock_link(&hci_sk_list, sk);
745	return 0;
746}
747
748static int hci_sock_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
749{
750	struct hci_dev *hdev = (struct hci_dev *) ptr;
751	struct hci_ev_si_device ev;
752
753	BT_DBG("hdev %s event %ld", hdev->name, event);
754
755	/* Send event to sockets */
756	ev.event  = event;
757	ev.dev_id = hdev->id;
758	hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
759
760	if (event == HCI_DEV_UNREG) {
761		struct sock *sk;
762		struct hlist_node *node;
763
764		/* Detach sockets from device */
765		read_lock(&hci_sk_list.lock);
766		sk_for_each(sk, node, &hci_sk_list.head) {
767			local_bh_disable();
768			bh_lock_sock_nested(sk);
769			if (hci_pi(sk)->hdev == hdev) {
770				hci_pi(sk)->hdev = NULL;
771				sk->sk_err = EPIPE;
772				sk->sk_state = BT_OPEN;
773				sk->sk_state_change(sk);
774
775				hci_dev_put(hdev);
776			}
777			bh_unlock_sock(sk);
778			local_bh_enable();
779		}
780		read_unlock(&hci_sk_list.lock);
781	}
782
783	return NOTIFY_DONE;
784}
785
786static const struct net_proto_family hci_sock_family_ops = {
787	.family	= PF_BLUETOOTH,
788	.owner	= THIS_MODULE,
789	.create	= hci_sock_create,
790};
791
792static struct notifier_block hci_sock_nblock = {
793	.notifier_call = hci_sock_dev_event
794};
795
796int __init hci_sock_init(void)
797{
798	int err;
799
800	err = proto_register(&hci_sk_proto, 0);
801	if (err < 0)
802		return err;
803
804	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
805	if (err < 0)
806		goto error;
807
808	hci_register_notifier(&hci_sock_nblock);
809
810	BT_INFO("HCI socket layer initialized");
811
812	return 0;
813
814error:
815	BT_ERR("HCI socket registration failed");
816	proto_unregister(&hci_sk_proto);
817	return err;
818}
819
820void __exit hci_sock_cleanup(void)
821{
822	if (bt_sock_unregister(BTPROTO_HCI) < 0)
823		BT_ERR("HCI socket unregistration failed");
824
825	hci_unregister_notifier(&hci_sock_nblock);
826
827	proto_unregister(&hci_sk_proto);
828}
829