1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4   Copyright 2023 NXP
5
6   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8   This program is free software; you can redistribute it and/or modify
9   it under the terms of the GNU General Public License version 2 as
10   published by the Free Software Foundation;
11
12   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23   SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI event handling. */
27
28#include <asm/unaligned.h>
29#include <linux/crypto.h>
30#include <crypto/algapi.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "hci_debugfs.h"
38#include "hci_codec.h"
39#include "smp.h"
40#include "msft.h"
41#include "eir.h"
42
43#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
44		 "\x00\x00\x00\x00\x00\x00\x00\x00"
45
46#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
47
48/* Handle HCI Event packets */
49
50static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
51			     u8 ev, size_t len)
52{
53	void *data;
54
55	data = skb_pull_data(skb, len);
56	if (!data)
57		bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
58
59	return data;
60}
61
62static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
63			     u16 op, size_t len)
64{
65	void *data;
66
67	data = skb_pull_data(skb, len);
68	if (!data)
69		bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
70
71	return data;
72}
73
74static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
75				u8 ev, size_t len)
76{
77	void *data;
78
79	data = skb_pull_data(skb, len);
80	if (!data)
81		bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
82
83	return data;
84}
85
86static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87				struct sk_buff *skb)
88{
89	struct hci_ev_status *rp = data;
90
91	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
92
93	/* It is possible that we receive Inquiry Complete event right
94	 * before we receive Inquiry Cancel Command Complete event, in
95	 * which case the latter event should have status of Command
96	 * Disallowed. This should not be treated as error, since
97	 * we actually achieve what Inquiry Cancel wants to achieve,
98	 * which is to end the last Inquiry session.
99	 */
100	if (rp->status == HCI_ERROR_COMMAND_DISALLOWED && !test_bit(HCI_INQUIRY, &hdev->flags)) {
101		bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
102		rp->status = 0x00;
103	}
104
105	if (rp->status)
106		return rp->status;
107
108	clear_bit(HCI_INQUIRY, &hdev->flags);
109	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
110	wake_up_bit(&hdev->flags, HCI_INQUIRY);
111
112	hci_dev_lock(hdev);
113	/* Set discovery state to stopped if we're not doing LE active
114	 * scanning.
115	 */
116	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
117	    hdev->le_scan_type != LE_SCAN_ACTIVE)
118		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
119	hci_dev_unlock(hdev);
120
121	return rp->status;
122}
123
124static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
125			      struct sk_buff *skb)
126{
127	struct hci_ev_status *rp = data;
128
129	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
130
131	if (rp->status)
132		return rp->status;
133
134	hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
135
136	return rp->status;
137}
138
139static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
140				   struct sk_buff *skb)
141{
142	struct hci_ev_status *rp = data;
143
144	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
145
146	if (rp->status)
147		return rp->status;
148
149	hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
150
151	return rp->status;
152}
153
154static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
155					struct sk_buff *skb)
156{
157	struct hci_ev_status *rp = data;
158
159	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
160
161	return rp->status;
162}
163
164static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
165				struct sk_buff *skb)
166{
167	struct hci_rp_role_discovery *rp = data;
168	struct hci_conn *conn;
169
170	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
171
172	if (rp->status)
173		return rp->status;
174
175	hci_dev_lock(hdev);
176
177	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
178	if (conn)
179		conn->role = rp->role;
180
181	hci_dev_unlock(hdev);
182
183	return rp->status;
184}
185
186static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
187				  struct sk_buff *skb)
188{
189	struct hci_rp_read_link_policy *rp = data;
190	struct hci_conn *conn;
191
192	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
193
194	if (rp->status)
195		return rp->status;
196
197	hci_dev_lock(hdev);
198
199	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
200	if (conn)
201		conn->link_policy = __le16_to_cpu(rp->policy);
202
203	hci_dev_unlock(hdev);
204
205	return rp->status;
206}
207
208static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
209				   struct sk_buff *skb)
210{
211	struct hci_rp_write_link_policy *rp = data;
212	struct hci_conn *conn;
213	void *sent;
214
215	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
216
217	if (rp->status)
218		return rp->status;
219
220	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
221	if (!sent)
222		return rp->status;
223
224	hci_dev_lock(hdev);
225
226	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
227	if (conn)
228		conn->link_policy = get_unaligned_le16(sent + 2);
229
230	hci_dev_unlock(hdev);
231
232	return rp->status;
233}
234
235static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
236				      struct sk_buff *skb)
237{
238	struct hci_rp_read_def_link_policy *rp = data;
239
240	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
241
242	if (rp->status)
243		return rp->status;
244
245	hdev->link_policy = __le16_to_cpu(rp->policy);
246
247	return rp->status;
248}
249
250static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
251				       struct sk_buff *skb)
252{
253	struct hci_ev_status *rp = data;
254	void *sent;
255
256	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
257
258	if (rp->status)
259		return rp->status;
260
261	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
262	if (!sent)
263		return rp->status;
264
265	hdev->link_policy = get_unaligned_le16(sent);
266
267	return rp->status;
268}
269
270static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
271{
272	struct hci_ev_status *rp = data;
273
274	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
275
276	clear_bit(HCI_RESET, &hdev->flags);
277
278	if (rp->status)
279		return rp->status;
280
281	/* Reset all non-persistent flags */
282	hci_dev_clear_volatile_flags(hdev);
283
284	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
285
286	hdev->inq_tx_power = HCI_TX_POWER_INVALID;
287	hdev->adv_tx_power = HCI_TX_POWER_INVALID;
288
289	memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
290	hdev->adv_data_len = 0;
291
292	memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
293	hdev->scan_rsp_data_len = 0;
294
295	hdev->le_scan_type = LE_SCAN_PASSIVE;
296
297	hdev->ssp_debug_mode = 0;
298
299	hci_bdaddr_list_clear(&hdev->le_accept_list);
300	hci_bdaddr_list_clear(&hdev->le_resolv_list);
301
302	return rp->status;
303}
304
305static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
306				      struct sk_buff *skb)
307{
308	struct hci_rp_read_stored_link_key *rp = data;
309	struct hci_cp_read_stored_link_key *sent;
310
311	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
312
313	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
314	if (!sent)
315		return rp->status;
316
317	if (!rp->status && sent->read_all == 0x01) {
318		hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
319		hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
320	}
321
322	return rp->status;
323}
324
325static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
326					struct sk_buff *skb)
327{
328	struct hci_rp_delete_stored_link_key *rp = data;
329	u16 num_keys;
330
331	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
332
333	if (rp->status)
334		return rp->status;
335
336	num_keys = le16_to_cpu(rp->num_keys);
337
338	if (num_keys <= hdev->stored_num_keys)
339		hdev->stored_num_keys -= num_keys;
340	else
341		hdev->stored_num_keys = 0;
342
343	return rp->status;
344}
345
346static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
347				  struct sk_buff *skb)
348{
349	struct hci_ev_status *rp = data;
350	void *sent;
351
352	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353
354	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
355	if (!sent)
356		return rp->status;
357
358	hci_dev_lock(hdev);
359
360	if (hci_dev_test_flag(hdev, HCI_MGMT))
361		mgmt_set_local_name_complete(hdev, sent, rp->status);
362	else if (!rp->status)
363		memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
364
365	hci_dev_unlock(hdev);
366
367	return rp->status;
368}
369
370static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
371				 struct sk_buff *skb)
372{
373	struct hci_rp_read_local_name *rp = data;
374
375	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
376
377	if (rp->status)
378		return rp->status;
379
380	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
381	    hci_dev_test_flag(hdev, HCI_CONFIG))
382		memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
383
384	return rp->status;
385}
386
387static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
388				   struct sk_buff *skb)
389{
390	struct hci_ev_status *rp = data;
391	void *sent;
392
393	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
394
395	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
396	if (!sent)
397		return rp->status;
398
399	hci_dev_lock(hdev);
400
401	if (!rp->status) {
402		__u8 param = *((__u8 *) sent);
403
404		if (param == AUTH_ENABLED)
405			set_bit(HCI_AUTH, &hdev->flags);
406		else
407			clear_bit(HCI_AUTH, &hdev->flags);
408	}
409
410	if (hci_dev_test_flag(hdev, HCI_MGMT))
411		mgmt_auth_enable_complete(hdev, rp->status);
412
413	hci_dev_unlock(hdev);
414
415	return rp->status;
416}
417
418static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
419				    struct sk_buff *skb)
420{
421	struct hci_ev_status *rp = data;
422	__u8 param;
423	void *sent;
424
425	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
426
427	if (rp->status)
428		return rp->status;
429
430	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
431	if (!sent)
432		return rp->status;
433
434	param = *((__u8 *) sent);
435
436	if (param)
437		set_bit(HCI_ENCRYPT, &hdev->flags);
438	else
439		clear_bit(HCI_ENCRYPT, &hdev->flags);
440
441	return rp->status;
442}
443
444static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
445				   struct sk_buff *skb)
446{
447	struct hci_ev_status *rp = data;
448	__u8 param;
449	void *sent;
450
451	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
452
453	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
454	if (!sent)
455		return rp->status;
456
457	param = *((__u8 *) sent);
458
459	hci_dev_lock(hdev);
460
461	if (rp->status) {
462		hdev->discov_timeout = 0;
463		goto done;
464	}
465
466	if (param & SCAN_INQUIRY)
467		set_bit(HCI_ISCAN, &hdev->flags);
468	else
469		clear_bit(HCI_ISCAN, &hdev->flags);
470
471	if (param & SCAN_PAGE)
472		set_bit(HCI_PSCAN, &hdev->flags);
473	else
474		clear_bit(HCI_PSCAN, &hdev->flags);
475
476done:
477	hci_dev_unlock(hdev);
478
479	return rp->status;
480}
481
482static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
483				  struct sk_buff *skb)
484{
485	struct hci_ev_status *rp = data;
486	struct hci_cp_set_event_filter *cp;
487	void *sent;
488
489	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
490
491	if (rp->status)
492		return rp->status;
493
494	sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
495	if (!sent)
496		return rp->status;
497
498	cp = (struct hci_cp_set_event_filter *)sent;
499
500	if (cp->flt_type == HCI_FLT_CLEAR_ALL)
501		hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
502	else
503		hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
504
505	return rp->status;
506}
507
508static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
509				   struct sk_buff *skb)
510{
511	struct hci_rp_read_class_of_dev *rp = data;
512
513	if (WARN_ON(!hdev))
514		return HCI_ERROR_UNSPECIFIED;
515
516	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
517
518	if (rp->status)
519		return rp->status;
520
521	memcpy(hdev->dev_class, rp->dev_class, 3);
522
523	bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
524		   hdev->dev_class[1], hdev->dev_class[0]);
525
526	return rp->status;
527}
528
529static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
530				    struct sk_buff *skb)
531{
532	struct hci_ev_status *rp = data;
533	void *sent;
534
535	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
536
537	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
538	if (!sent)
539		return rp->status;
540
541	hci_dev_lock(hdev);
542
543	if (!rp->status)
544		memcpy(hdev->dev_class, sent, 3);
545
546	if (hci_dev_test_flag(hdev, HCI_MGMT))
547		mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
548
549	hci_dev_unlock(hdev);
550
551	return rp->status;
552}
553
554static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
555				    struct sk_buff *skb)
556{
557	struct hci_rp_read_voice_setting *rp = data;
558	__u16 setting;
559
560	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
561
562	if (rp->status)
563		return rp->status;
564
565	setting = __le16_to_cpu(rp->voice_setting);
566
567	if (hdev->voice_setting == setting)
568		return rp->status;
569
570	hdev->voice_setting = setting;
571
572	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
573
574	if (hdev->notify)
575		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
576
577	return rp->status;
578}
579
580static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
581				     struct sk_buff *skb)
582{
583	struct hci_ev_status *rp = data;
584	__u16 setting;
585	void *sent;
586
587	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
588
589	if (rp->status)
590		return rp->status;
591
592	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
593	if (!sent)
594		return rp->status;
595
596	setting = get_unaligned_le16(sent);
597
598	if (hdev->voice_setting == setting)
599		return rp->status;
600
601	hdev->voice_setting = setting;
602
603	bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
604
605	if (hdev->notify)
606		hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
607
608	return rp->status;
609}
610
611static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
612					struct sk_buff *skb)
613{
614	struct hci_rp_read_num_supported_iac *rp = data;
615
616	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
617
618	if (rp->status)
619		return rp->status;
620
621	hdev->num_iac = rp->num_iac;
622
623	bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
624
625	return rp->status;
626}
627
628static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
629				struct sk_buff *skb)
630{
631	struct hci_ev_status *rp = data;
632	struct hci_cp_write_ssp_mode *sent;
633
634	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635
636	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
637	if (!sent)
638		return rp->status;
639
640	hci_dev_lock(hdev);
641
642	if (!rp->status) {
643		if (sent->mode)
644			hdev->features[1][0] |= LMP_HOST_SSP;
645		else
646			hdev->features[1][0] &= ~LMP_HOST_SSP;
647	}
648
649	if (!rp->status) {
650		if (sent->mode)
651			hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
652		else
653			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
654	}
655
656	hci_dev_unlock(hdev);
657
658	return rp->status;
659}
660
661static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
662				  struct sk_buff *skb)
663{
664	struct hci_ev_status *rp = data;
665	struct hci_cp_write_sc_support *sent;
666
667	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
668
669	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
670	if (!sent)
671		return rp->status;
672
673	hci_dev_lock(hdev);
674
675	if (!rp->status) {
676		if (sent->support)
677			hdev->features[1][0] |= LMP_HOST_SC;
678		else
679			hdev->features[1][0] &= ~LMP_HOST_SC;
680	}
681
682	if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
683		if (sent->support)
684			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
685		else
686			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
687	}
688
689	hci_dev_unlock(hdev);
690
691	return rp->status;
692}
693
694static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
695				    struct sk_buff *skb)
696{
697	struct hci_rp_read_local_version *rp = data;
698
699	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
700
701	if (rp->status)
702		return rp->status;
703
704	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
705	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
706		hdev->hci_ver = rp->hci_ver;
707		hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
708		hdev->lmp_ver = rp->lmp_ver;
709		hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
710		hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
711	}
712
713	return rp->status;
714}
715
716static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
717				   struct sk_buff *skb)
718{
719	struct hci_rp_read_enc_key_size *rp = data;
720	struct hci_conn *conn;
721	u16 handle;
722	u8 status = rp->status;
723
724	bt_dev_dbg(hdev, "status 0x%2.2x", status);
725
726	handle = le16_to_cpu(rp->handle);
727
728	hci_dev_lock(hdev);
729
730	conn = hci_conn_hash_lookup_handle(hdev, handle);
731	if (!conn) {
732		status = 0xFF;
733		goto done;
734	}
735
736	/* While unexpected, the read_enc_key_size command may fail. The most
737	 * secure approach is to then assume the key size is 0 to force a
738	 * disconnection.
739	 */
740	if (status) {
741		bt_dev_err(hdev, "failed to read key size for handle %u",
742			   handle);
743		conn->enc_key_size = 0;
744	} else {
745		conn->enc_key_size = rp->key_size;
746		status = 0;
747
748		if (conn->enc_key_size < hdev->min_enc_key_size) {
749			/* As slave role, the conn->state has been set to
750			 * BT_CONNECTED and l2cap conn req might not be received
751			 * yet, at this moment the l2cap layer almost does
752			 * nothing with the non-zero status.
753			 * So we also clear encrypt related bits, and then the
754			 * handler of l2cap conn req will get the right secure
755			 * state at a later time.
756			 */
757			status = HCI_ERROR_AUTH_FAILURE;
758			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
759			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
760		}
761	}
762
763	hci_encrypt_cfm(conn, status);
764
765done:
766	hci_dev_unlock(hdev);
767
768	return status;
769}
770
771static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
772				     struct sk_buff *skb)
773{
774	struct hci_rp_read_local_commands *rp = data;
775
776	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
777
778	if (rp->status)
779		return rp->status;
780
781	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782	    hci_dev_test_flag(hdev, HCI_CONFIG))
783		memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
784
785	return rp->status;
786}
787
788static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
789					   struct sk_buff *skb)
790{
791	struct hci_rp_read_auth_payload_to *rp = data;
792	struct hci_conn *conn;
793
794	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
795
796	if (rp->status)
797		return rp->status;
798
799	hci_dev_lock(hdev);
800
801	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
802	if (conn)
803		conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
804
805	hci_dev_unlock(hdev);
806
807	return rp->status;
808}
809
810static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
811					    struct sk_buff *skb)
812{
813	struct hci_rp_write_auth_payload_to *rp = data;
814	struct hci_conn *conn;
815	void *sent;
816
817	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
818
819	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
820	if (!sent)
821		return rp->status;
822
823	hci_dev_lock(hdev);
824
825	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
826	if (!conn) {
827		rp->status = 0xff;
828		goto unlock;
829	}
830
831	if (!rp->status)
832		conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
833
834unlock:
835	hci_dev_unlock(hdev);
836
837	return rp->status;
838}
839
840static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
841				     struct sk_buff *skb)
842{
843	struct hci_rp_read_local_features *rp = data;
844
845	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
846
847	if (rp->status)
848		return rp->status;
849
850	memcpy(hdev->features, rp->features, 8);
851
852	/* Adjust default settings according to features
853	 * supported by device. */
854
855	if (hdev->features[0][0] & LMP_3SLOT)
856		hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
857
858	if (hdev->features[0][0] & LMP_5SLOT)
859		hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
860
861	if (hdev->features[0][1] & LMP_HV2) {
862		hdev->pkt_type  |= (HCI_HV2);
863		hdev->esco_type |= (ESCO_HV2);
864	}
865
866	if (hdev->features[0][1] & LMP_HV3) {
867		hdev->pkt_type  |= (HCI_HV3);
868		hdev->esco_type |= (ESCO_HV3);
869	}
870
871	if (lmp_esco_capable(hdev))
872		hdev->esco_type |= (ESCO_EV3);
873
874	if (hdev->features[0][4] & LMP_EV4)
875		hdev->esco_type |= (ESCO_EV4);
876
877	if (hdev->features[0][4] & LMP_EV5)
878		hdev->esco_type |= (ESCO_EV5);
879
880	if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
881		hdev->esco_type |= (ESCO_2EV3);
882
883	if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
884		hdev->esco_type |= (ESCO_3EV3);
885
886	if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
887		hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
888
889	return rp->status;
890}
891
892static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
893					 struct sk_buff *skb)
894{
895	struct hci_rp_read_local_ext_features *rp = data;
896
897	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
898
899	if (rp->status)
900		return rp->status;
901
902	if (hdev->max_page < rp->max_page) {
903		if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
904			     &hdev->quirks))
905			bt_dev_warn(hdev, "broken local ext features page 2");
906		else
907			hdev->max_page = rp->max_page;
908	}
909
910	if (rp->page < HCI_MAX_PAGES)
911		memcpy(hdev->features[rp->page], rp->features, 8);
912
913	return rp->status;
914}
915
916static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
917					struct sk_buff *skb)
918{
919	struct hci_rp_read_flow_control_mode *rp = data;
920
921	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
922
923	if (rp->status)
924		return rp->status;
925
926	hdev->flow_ctl_mode = rp->mode;
927
928	return rp->status;
929}
930
931static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
932				  struct sk_buff *skb)
933{
934	struct hci_rp_read_buffer_size *rp = data;
935
936	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
937
938	if (rp->status)
939		return rp->status;
940
941	hdev->acl_mtu  = __le16_to_cpu(rp->acl_mtu);
942	hdev->sco_mtu  = rp->sco_mtu;
943	hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
944	hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
945
946	if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
947		hdev->sco_mtu  = 64;
948		hdev->sco_pkts = 8;
949	}
950
951	hdev->acl_cnt = hdev->acl_pkts;
952	hdev->sco_cnt = hdev->sco_pkts;
953
954	BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
955	       hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
956
957	return rp->status;
958}
959
960static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
961			      struct sk_buff *skb)
962{
963	struct hci_rp_read_bd_addr *rp = data;
964
965	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
966
967	if (rp->status)
968		return rp->status;
969
970	if (test_bit(HCI_INIT, &hdev->flags))
971		bacpy(&hdev->bdaddr, &rp->bdaddr);
972
973	if (hci_dev_test_flag(hdev, HCI_SETUP))
974		bacpy(&hdev->setup_addr, &rp->bdaddr);
975
976	return rp->status;
977}
978
979static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
980					 struct sk_buff *skb)
981{
982	struct hci_rp_read_local_pairing_opts *rp = data;
983
984	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
985
986	if (rp->status)
987		return rp->status;
988
989	if (hci_dev_test_flag(hdev, HCI_SETUP) ||
990	    hci_dev_test_flag(hdev, HCI_CONFIG)) {
991		hdev->pairing_opts = rp->pairing_opts;
992		hdev->max_enc_key_size = rp->max_key_size;
993	}
994
995	return rp->status;
996}
997
998static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
999					 struct sk_buff *skb)
1000{
1001	struct hci_rp_read_page_scan_activity *rp = data;
1002
1003	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1004
1005	if (rp->status)
1006		return rp->status;
1007
1008	if (test_bit(HCI_INIT, &hdev->flags)) {
1009		hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1010		hdev->page_scan_window = __le16_to_cpu(rp->window);
1011	}
1012
1013	return rp->status;
1014}
1015
1016static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1017					  struct sk_buff *skb)
1018{
1019	struct hci_ev_status *rp = data;
1020	struct hci_cp_write_page_scan_activity *sent;
1021
1022	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1023
1024	if (rp->status)
1025		return rp->status;
1026
1027	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1028	if (!sent)
1029		return rp->status;
1030
1031	hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1032	hdev->page_scan_window = __le16_to_cpu(sent->window);
1033
1034	return rp->status;
1035}
1036
1037static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1038				     struct sk_buff *skb)
1039{
1040	struct hci_rp_read_page_scan_type *rp = data;
1041
1042	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1043
1044	if (rp->status)
1045		return rp->status;
1046
1047	if (test_bit(HCI_INIT, &hdev->flags))
1048		hdev->page_scan_type = rp->type;
1049
1050	return rp->status;
1051}
1052
1053static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1054				      struct sk_buff *skb)
1055{
1056	struct hci_ev_status *rp = data;
1057	u8 *type;
1058
1059	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1060
1061	if (rp->status)
1062		return rp->status;
1063
1064	type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1065	if (type)
1066		hdev->page_scan_type = *type;
1067
1068	return rp->status;
1069}
1070
1071static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1072				      struct sk_buff *skb)
1073{
1074	struct hci_rp_read_data_block_size *rp = data;
1075
1076	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1077
1078	if (rp->status)
1079		return rp->status;
1080
1081	hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1082	hdev->block_len = __le16_to_cpu(rp->block_len);
1083	hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1084
1085	hdev->block_cnt = hdev->num_blocks;
1086
1087	BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1088	       hdev->block_cnt, hdev->block_len);
1089
1090	return rp->status;
1091}
1092
1093static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1094			    struct sk_buff *skb)
1095{
1096	struct hci_rp_read_clock *rp = data;
1097	struct hci_cp_read_clock *cp;
1098	struct hci_conn *conn;
1099
1100	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1101
1102	if (rp->status)
1103		return rp->status;
1104
1105	hci_dev_lock(hdev);
1106
1107	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1108	if (!cp)
1109		goto unlock;
1110
1111	if (cp->which == 0x00) {
1112		hdev->clock = le32_to_cpu(rp->clock);
1113		goto unlock;
1114	}
1115
1116	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1117	if (conn) {
1118		conn->clock = le32_to_cpu(rp->clock);
1119		conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1120	}
1121
1122unlock:
1123	hci_dev_unlock(hdev);
1124	return rp->status;
1125}
1126
1127static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1128				     struct sk_buff *skb)
1129{
1130	struct hci_rp_read_local_amp_info *rp = data;
1131
1132	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1133
1134	if (rp->status)
1135		return rp->status;
1136
1137	hdev->amp_status = rp->amp_status;
1138	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1139	hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1140	hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1141	hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1142	hdev->amp_type = rp->amp_type;
1143	hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1144	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1145	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1146	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1147
1148	return rp->status;
1149}
1150
1151static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1152				       struct sk_buff *skb)
1153{
1154	struct hci_rp_read_inq_rsp_tx_power *rp = data;
1155
1156	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1157
1158	if (rp->status)
1159		return rp->status;
1160
1161	hdev->inq_tx_power = rp->tx_power;
1162
1163	return rp->status;
1164}
1165
1166static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1167					     struct sk_buff *skb)
1168{
1169	struct hci_rp_read_def_err_data_reporting *rp = data;
1170
1171	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1172
1173	if (rp->status)
1174		return rp->status;
1175
1176	hdev->err_data_reporting = rp->err_data_reporting;
1177
1178	return rp->status;
1179}
1180
1181static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1182					      struct sk_buff *skb)
1183{
1184	struct hci_ev_status *rp = data;
1185	struct hci_cp_write_def_err_data_reporting *cp;
1186
1187	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1188
1189	if (rp->status)
1190		return rp->status;
1191
1192	cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1193	if (!cp)
1194		return rp->status;
1195
1196	hdev->err_data_reporting = cp->err_data_reporting;
1197
1198	return rp->status;
1199}
1200
1201static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1202				struct sk_buff *skb)
1203{
1204	struct hci_rp_pin_code_reply *rp = data;
1205	struct hci_cp_pin_code_reply *cp;
1206	struct hci_conn *conn;
1207
1208	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1209
1210	hci_dev_lock(hdev);
1211
1212	if (hci_dev_test_flag(hdev, HCI_MGMT))
1213		mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1214
1215	if (rp->status)
1216		goto unlock;
1217
1218	cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1219	if (!cp)
1220		goto unlock;
1221
1222	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1223	if (conn)
1224		conn->pin_length = cp->pin_len;
1225
1226unlock:
1227	hci_dev_unlock(hdev);
1228	return rp->status;
1229}
1230
1231static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1232				    struct sk_buff *skb)
1233{
1234	struct hci_rp_pin_code_neg_reply *rp = data;
1235
1236	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1237
1238	hci_dev_lock(hdev);
1239
1240	if (hci_dev_test_flag(hdev, HCI_MGMT))
1241		mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1242						 rp->status);
1243
1244	hci_dev_unlock(hdev);
1245
1246	return rp->status;
1247}
1248
1249static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1250				     struct sk_buff *skb)
1251{
1252	struct hci_rp_le_read_buffer_size *rp = data;
1253
1254	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1255
1256	if (rp->status)
1257		return rp->status;
1258
1259	hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1260	hdev->le_pkts = rp->le_max_pkt;
1261
1262	hdev->le_cnt = hdev->le_pkts;
1263
1264	BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1265
1266	return rp->status;
1267}
1268
1269static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1270					struct sk_buff *skb)
1271{
1272	struct hci_rp_le_read_local_features *rp = data;
1273
1274	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1275
1276	if (rp->status)
1277		return rp->status;
1278
1279	memcpy(hdev->le_features, rp->features, 8);
1280
1281	return rp->status;
1282}
1283
1284static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1285				      struct sk_buff *skb)
1286{
1287	struct hci_rp_le_read_adv_tx_power *rp = data;
1288
1289	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1290
1291	if (rp->status)
1292		return rp->status;
1293
1294	hdev->adv_tx_power = rp->tx_power;
1295
1296	return rp->status;
1297}
1298
1299static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1300				    struct sk_buff *skb)
1301{
1302	struct hci_rp_user_confirm_reply *rp = data;
1303
1304	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1305
1306	hci_dev_lock(hdev);
1307
1308	if (hci_dev_test_flag(hdev, HCI_MGMT))
1309		mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1310						 rp->status);
1311
1312	hci_dev_unlock(hdev);
1313
1314	return rp->status;
1315}
1316
1317static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1318					struct sk_buff *skb)
1319{
1320	struct hci_rp_user_confirm_reply *rp = data;
1321
1322	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1323
1324	hci_dev_lock(hdev);
1325
1326	if (hci_dev_test_flag(hdev, HCI_MGMT))
1327		mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1328						     ACL_LINK, 0, rp->status);
1329
1330	hci_dev_unlock(hdev);
1331
1332	return rp->status;
1333}
1334
1335static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1336				    struct sk_buff *skb)
1337{
1338	struct hci_rp_user_confirm_reply *rp = data;
1339
1340	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1341
1342	hci_dev_lock(hdev);
1343
1344	if (hci_dev_test_flag(hdev, HCI_MGMT))
1345		mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1346						 0, rp->status);
1347
1348	hci_dev_unlock(hdev);
1349
1350	return rp->status;
1351}
1352
1353static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1354					struct sk_buff *skb)
1355{
1356	struct hci_rp_user_confirm_reply *rp = data;
1357
1358	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1359
1360	hci_dev_lock(hdev);
1361
1362	if (hci_dev_test_flag(hdev, HCI_MGMT))
1363		mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1364						     ACL_LINK, 0, rp->status);
1365
1366	hci_dev_unlock(hdev);
1367
1368	return rp->status;
1369}
1370
1371static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1372				     struct sk_buff *skb)
1373{
1374	struct hci_rp_read_local_oob_data *rp = data;
1375
1376	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1377
1378	return rp->status;
1379}
1380
1381static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1382					 struct sk_buff *skb)
1383{
1384	struct hci_rp_read_local_oob_ext_data *rp = data;
1385
1386	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1387
1388	return rp->status;
1389}
1390
1391static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1392				    struct sk_buff *skb)
1393{
1394	struct hci_ev_status *rp = data;
1395	bdaddr_t *sent;
1396
1397	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1398
1399	if (rp->status)
1400		return rp->status;
1401
1402	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1403	if (!sent)
1404		return rp->status;
1405
1406	hci_dev_lock(hdev);
1407
1408	bacpy(&hdev->random_addr, sent);
1409
1410	if (!bacmp(&hdev->rpa, sent)) {
1411		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1412		queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1413				   secs_to_jiffies(hdev->rpa_timeout));
1414	}
1415
1416	hci_dev_unlock(hdev);
1417
1418	return rp->status;
1419}
1420
1421static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1422				    struct sk_buff *skb)
1423{
1424	struct hci_ev_status *rp = data;
1425	struct hci_cp_le_set_default_phy *cp;
1426
1427	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1428
1429	if (rp->status)
1430		return rp->status;
1431
1432	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1433	if (!cp)
1434		return rp->status;
1435
1436	hci_dev_lock(hdev);
1437
1438	hdev->le_tx_def_phys = cp->tx_phys;
1439	hdev->le_rx_def_phys = cp->rx_phys;
1440
1441	hci_dev_unlock(hdev);
1442
1443	return rp->status;
1444}
1445
1446static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1447					    struct sk_buff *skb)
1448{
1449	struct hci_ev_status *rp = data;
1450	struct hci_cp_le_set_adv_set_rand_addr *cp;
1451	struct adv_info *adv;
1452
1453	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1454
1455	if (rp->status)
1456		return rp->status;
1457
1458	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1459	/* Update only in case the adv instance since handle 0x00 shall be using
1460	 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1461	 * non-extended adverting.
1462	 */
1463	if (!cp || !cp->handle)
1464		return rp->status;
1465
1466	hci_dev_lock(hdev);
1467
1468	adv = hci_find_adv_instance(hdev, cp->handle);
1469	if (adv) {
1470		bacpy(&adv->random_addr, &cp->bdaddr);
1471		if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1472			adv->rpa_expired = false;
1473			queue_delayed_work(hdev->workqueue,
1474					   &adv->rpa_expired_cb,
1475					   secs_to_jiffies(hdev->rpa_timeout));
1476		}
1477	}
1478
1479	hci_dev_unlock(hdev);
1480
1481	return rp->status;
1482}
1483
1484static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1485				   struct sk_buff *skb)
1486{
1487	struct hci_ev_status *rp = data;
1488	u8 *instance;
1489	int err;
1490
1491	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1492
1493	if (rp->status)
1494		return rp->status;
1495
1496	instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1497	if (!instance)
1498		return rp->status;
1499
1500	hci_dev_lock(hdev);
1501
1502	err = hci_remove_adv_instance(hdev, *instance);
1503	if (!err)
1504		mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1505					 *instance);
1506
1507	hci_dev_unlock(hdev);
1508
1509	return rp->status;
1510}
1511
1512static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1513				   struct sk_buff *skb)
1514{
1515	struct hci_ev_status *rp = data;
1516	struct adv_info *adv, *n;
1517	int err;
1518
1519	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1520
1521	if (rp->status)
1522		return rp->status;
1523
1524	if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1525		return rp->status;
1526
1527	hci_dev_lock(hdev);
1528
1529	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1530		u8 instance = adv->instance;
1531
1532		err = hci_remove_adv_instance(hdev, instance);
1533		if (!err)
1534			mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1535						 hdev, instance);
1536	}
1537
1538	hci_dev_unlock(hdev);
1539
1540	return rp->status;
1541}
1542
1543static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1544					struct sk_buff *skb)
1545{
1546	struct hci_rp_le_read_transmit_power *rp = data;
1547
1548	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1549
1550	if (rp->status)
1551		return rp->status;
1552
1553	hdev->min_le_tx_power = rp->min_le_tx_power;
1554	hdev->max_le_tx_power = rp->max_le_tx_power;
1555
1556	return rp->status;
1557}
1558
1559static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1560				     struct sk_buff *skb)
1561{
1562	struct hci_ev_status *rp = data;
1563	struct hci_cp_le_set_privacy_mode *cp;
1564	struct hci_conn_params *params;
1565
1566	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1567
1568	if (rp->status)
1569		return rp->status;
1570
1571	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1572	if (!cp)
1573		return rp->status;
1574
1575	hci_dev_lock(hdev);
1576
1577	params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1578	if (params)
1579		WRITE_ONCE(params->privacy_mode, cp->mode);
1580
1581	hci_dev_unlock(hdev);
1582
1583	return rp->status;
1584}
1585
1586static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1587				   struct sk_buff *skb)
1588{
1589	struct hci_ev_status *rp = data;
1590	__u8 *sent;
1591
1592	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1593
1594	if (rp->status)
1595		return rp->status;
1596
1597	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1598	if (!sent)
1599		return rp->status;
1600
1601	hci_dev_lock(hdev);
1602
1603	/* If we're doing connection initiation as peripheral. Set a
1604	 * timeout in case something goes wrong.
1605	 */
1606	if (*sent) {
1607		struct hci_conn *conn;
1608
1609		hci_dev_set_flag(hdev, HCI_LE_ADV);
1610
1611		conn = hci_lookup_le_connect(hdev);
1612		if (conn)
1613			queue_delayed_work(hdev->workqueue,
1614					   &conn->le_conn_timeout,
1615					   conn->conn_timeout);
1616	} else {
1617		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1618	}
1619
1620	hci_dev_unlock(hdev);
1621
1622	return rp->status;
1623}
1624
1625static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1626				       struct sk_buff *skb)
1627{
1628	struct hci_cp_le_set_ext_adv_enable *cp;
1629	struct hci_cp_ext_adv_set *set;
1630	struct adv_info *adv = NULL, *n;
1631	struct hci_ev_status *rp = data;
1632
1633	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1634
1635	if (rp->status)
1636		return rp->status;
1637
1638	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1639	if (!cp)
1640		return rp->status;
1641
1642	set = (void *)cp->data;
1643
1644	hci_dev_lock(hdev);
1645
1646	if (cp->num_of_sets)
1647		adv = hci_find_adv_instance(hdev, set->handle);
1648
1649	if (cp->enable) {
1650		struct hci_conn *conn;
1651
1652		hci_dev_set_flag(hdev, HCI_LE_ADV);
1653
1654		if (adv && !adv->periodic)
1655			adv->enabled = true;
1656
1657		conn = hci_lookup_le_connect(hdev);
1658		if (conn)
1659			queue_delayed_work(hdev->workqueue,
1660					   &conn->le_conn_timeout,
1661					   conn->conn_timeout);
1662	} else {
1663		if (cp->num_of_sets) {
1664			if (adv)
1665				adv->enabled = false;
1666
1667			/* If just one instance was disabled check if there are
1668			 * any other instance enabled before clearing HCI_LE_ADV
1669			 */
1670			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1671						 list) {
1672				if (adv->enabled)
1673					goto unlock;
1674			}
1675		} else {
1676			/* All instances shall be considered disabled */
1677			list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1678						 list)
1679				adv->enabled = false;
1680		}
1681
1682		hci_dev_clear_flag(hdev, HCI_LE_ADV);
1683	}
1684
1685unlock:
1686	hci_dev_unlock(hdev);
1687	return rp->status;
1688}
1689
1690static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1691				   struct sk_buff *skb)
1692{
1693	struct hci_cp_le_set_scan_param *cp;
1694	struct hci_ev_status *rp = data;
1695
1696	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1697
1698	if (rp->status)
1699		return rp->status;
1700
1701	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1702	if (!cp)
1703		return rp->status;
1704
1705	hci_dev_lock(hdev);
1706
1707	hdev->le_scan_type = cp->type;
1708
1709	hci_dev_unlock(hdev);
1710
1711	return rp->status;
1712}
1713
1714static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1715				       struct sk_buff *skb)
1716{
1717	struct hci_cp_le_set_ext_scan_params *cp;
1718	struct hci_ev_status *rp = data;
1719	struct hci_cp_le_scan_phy_params *phy_param;
1720
1721	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1722
1723	if (rp->status)
1724		return rp->status;
1725
1726	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1727	if (!cp)
1728		return rp->status;
1729
1730	phy_param = (void *)cp->data;
1731
1732	hci_dev_lock(hdev);
1733
1734	hdev->le_scan_type = phy_param->type;
1735
1736	hci_dev_unlock(hdev);
1737
1738	return rp->status;
1739}
1740
1741static bool has_pending_adv_report(struct hci_dev *hdev)
1742{
1743	struct discovery_state *d = &hdev->discovery;
1744
1745	return bacmp(&d->last_adv_addr, BDADDR_ANY);
1746}
1747
1748static void clear_pending_adv_report(struct hci_dev *hdev)
1749{
1750	struct discovery_state *d = &hdev->discovery;
1751
1752	bacpy(&d->last_adv_addr, BDADDR_ANY);
1753	d->last_adv_data_len = 0;
1754}
1755
1756static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1757				     u8 bdaddr_type, s8 rssi, u32 flags,
1758				     u8 *data, u8 len)
1759{
1760	struct discovery_state *d = &hdev->discovery;
1761
1762	if (len > max_adv_len(hdev))
1763		return;
1764
1765	bacpy(&d->last_adv_addr, bdaddr);
1766	d->last_adv_addr_type = bdaddr_type;
1767	d->last_adv_rssi = rssi;
1768	d->last_adv_flags = flags;
1769	memcpy(d->last_adv_data, data, len);
1770	d->last_adv_data_len = len;
1771}
1772
1773static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1774{
1775	hci_dev_lock(hdev);
1776
1777	switch (enable) {
1778	case LE_SCAN_ENABLE:
1779		hci_dev_set_flag(hdev, HCI_LE_SCAN);
1780		if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1781			clear_pending_adv_report(hdev);
1782		if (hci_dev_test_flag(hdev, HCI_MESH))
1783			hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1784		break;
1785
1786	case LE_SCAN_DISABLE:
1787		/* We do this here instead of when setting DISCOVERY_STOPPED
1788		 * since the latter would potentially require waiting for
1789		 * inquiry to stop too.
1790		 */
1791		if (has_pending_adv_report(hdev)) {
1792			struct discovery_state *d = &hdev->discovery;
1793
1794			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1795					  d->last_adv_addr_type, NULL,
1796					  d->last_adv_rssi, d->last_adv_flags,
1797					  d->last_adv_data,
1798					  d->last_adv_data_len, NULL, 0, 0);
1799		}
1800
1801		/* Cancel this timer so that we don't try to disable scanning
1802		 * when it's already disabled.
1803		 */
1804		cancel_delayed_work(&hdev->le_scan_disable);
1805
1806		hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1807
1808		/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1809		 * interrupted scanning due to a connect request. Mark
1810		 * therefore discovery as stopped.
1811		 */
1812		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1813			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1815			 hdev->discovery.state == DISCOVERY_FINDING)
1816			queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1817
1818		break;
1819
1820	default:
1821		bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1822			   enable);
1823		break;
1824	}
1825
1826	hci_dev_unlock(hdev);
1827}
1828
1829static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1830				    struct sk_buff *skb)
1831{
1832	struct hci_cp_le_set_scan_enable *cp;
1833	struct hci_ev_status *rp = data;
1834
1835	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1836
1837	if (rp->status)
1838		return rp->status;
1839
1840	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1841	if (!cp)
1842		return rp->status;
1843
1844	le_set_scan_enable_complete(hdev, cp->enable);
1845
1846	return rp->status;
1847}
1848
1849static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1850					struct sk_buff *skb)
1851{
1852	struct hci_cp_le_set_ext_scan_enable *cp;
1853	struct hci_ev_status *rp = data;
1854
1855	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1856
1857	if (rp->status)
1858		return rp->status;
1859
1860	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1861	if (!cp)
1862		return rp->status;
1863
1864	le_set_scan_enable_complete(hdev, cp->enable);
1865
1866	return rp->status;
1867}
1868
1869static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1870				      struct sk_buff *skb)
1871{
1872	struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1873
1874	bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1875		   rp->num_of_sets);
1876
1877	if (rp->status)
1878		return rp->status;
1879
1880	hdev->le_num_of_adv_sets = rp->num_of_sets;
1881
1882	return rp->status;
1883}
1884
1885static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1886					  struct sk_buff *skb)
1887{
1888	struct hci_rp_le_read_accept_list_size *rp = data;
1889
1890	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1891
1892	if (rp->status)
1893		return rp->status;
1894
1895	hdev->le_accept_list_size = rp->size;
1896
1897	return rp->status;
1898}
1899
1900static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1901				      struct sk_buff *skb)
1902{
1903	struct hci_ev_status *rp = data;
1904
1905	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1906
1907	if (rp->status)
1908		return rp->status;
1909
1910	hci_dev_lock(hdev);
1911	hci_bdaddr_list_clear(&hdev->le_accept_list);
1912	hci_dev_unlock(hdev);
1913
1914	return rp->status;
1915}
1916
1917static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1918				       struct sk_buff *skb)
1919{
1920	struct hci_cp_le_add_to_accept_list *sent;
1921	struct hci_ev_status *rp = data;
1922
1923	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1924
1925	if (rp->status)
1926		return rp->status;
1927
1928	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1929	if (!sent)
1930		return rp->status;
1931
1932	hci_dev_lock(hdev);
1933	hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1934			    sent->bdaddr_type);
1935	hci_dev_unlock(hdev);
1936
1937	return rp->status;
1938}
1939
1940static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1941					 struct sk_buff *skb)
1942{
1943	struct hci_cp_le_del_from_accept_list *sent;
1944	struct hci_ev_status *rp = data;
1945
1946	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1947
1948	if (rp->status)
1949		return rp->status;
1950
1951	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1952	if (!sent)
1953		return rp->status;
1954
1955	hci_dev_lock(hdev);
1956	hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1957			    sent->bdaddr_type);
1958	hci_dev_unlock(hdev);
1959
1960	return rp->status;
1961}
1962
1963static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1964					  struct sk_buff *skb)
1965{
1966	struct hci_rp_le_read_supported_states *rp = data;
1967
1968	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1969
1970	if (rp->status)
1971		return rp->status;
1972
1973	memcpy(hdev->le_states, rp->le_states, 8);
1974
1975	return rp->status;
1976}
1977
1978static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1979				      struct sk_buff *skb)
1980{
1981	struct hci_rp_le_read_def_data_len *rp = data;
1982
1983	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1984
1985	if (rp->status)
1986		return rp->status;
1987
1988	hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1989	hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1990
1991	return rp->status;
1992}
1993
1994static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1995				       struct sk_buff *skb)
1996{
1997	struct hci_cp_le_write_def_data_len *sent;
1998	struct hci_ev_status *rp = data;
1999
2000	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2001
2002	if (rp->status)
2003		return rp->status;
2004
2005	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2006	if (!sent)
2007		return rp->status;
2008
2009	hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2010	hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2011
2012	return rp->status;
2013}
2014
2015static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2016				       struct sk_buff *skb)
2017{
2018	struct hci_cp_le_add_to_resolv_list *sent;
2019	struct hci_ev_status *rp = data;
2020
2021	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2022
2023	if (rp->status)
2024		return rp->status;
2025
2026	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2027	if (!sent)
2028		return rp->status;
2029
2030	hci_dev_lock(hdev);
2031	hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2032				sent->bdaddr_type, sent->peer_irk,
2033				sent->local_irk);
2034	hci_dev_unlock(hdev);
2035
2036	return rp->status;
2037}
2038
2039static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2040					 struct sk_buff *skb)
2041{
2042	struct hci_cp_le_del_from_resolv_list *sent;
2043	struct hci_ev_status *rp = data;
2044
2045	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2046
2047	if (rp->status)
2048		return rp->status;
2049
2050	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2051	if (!sent)
2052		return rp->status;
2053
2054	hci_dev_lock(hdev);
2055	hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2056			    sent->bdaddr_type);
2057	hci_dev_unlock(hdev);
2058
2059	return rp->status;
2060}
2061
2062static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2063				      struct sk_buff *skb)
2064{
2065	struct hci_ev_status *rp = data;
2066
2067	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2068
2069	if (rp->status)
2070		return rp->status;
2071
2072	hci_dev_lock(hdev);
2073	hci_bdaddr_list_clear(&hdev->le_resolv_list);
2074	hci_dev_unlock(hdev);
2075
2076	return rp->status;
2077}
2078
2079static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2080					  struct sk_buff *skb)
2081{
2082	struct hci_rp_le_read_resolv_list_size *rp = data;
2083
2084	bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2085
2086	if (rp->status)
2087		return rp->status;
2088
2089	hdev->le_resolv_list_size = rp->size;
2090
2091	return rp->status;
2092}
2093
2094static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2095					       struct sk_buff *skb)
2096{
2097	struct hci_ev_status *rp = data;
2098	__u8 *sent;
2099
2100	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2101
2102	if (rp->status)
2103		return rp->status;
2104
2105	sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2106	if (!sent)
2107		return rp->status;
2108
2109	hci_dev_lock(hdev);
2110
2111	if (*sent)
2112		hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2113	else
2114		hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2115
2116	hci_dev_unlock(hdev);
2117
2118	return rp->status;
2119}
2120
2121static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2122				      struct sk_buff *skb)
2123{
2124	struct hci_rp_le_read_max_data_len *rp = data;
2125
2126	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2127
2128	if (rp->status)
2129		return rp->status;
2130
2131	hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2132	hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2133	hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2134	hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2135
2136	return rp->status;
2137}
2138
2139static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2140					 struct sk_buff *skb)
2141{
2142	struct hci_cp_write_le_host_supported *sent;
2143	struct hci_ev_status *rp = data;
2144
2145	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2146
2147	if (rp->status)
2148		return rp->status;
2149
2150	sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2151	if (!sent)
2152		return rp->status;
2153
2154	hci_dev_lock(hdev);
2155
2156	if (sent->le) {
2157		hdev->features[1][0] |= LMP_HOST_LE;
2158		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2159	} else {
2160		hdev->features[1][0] &= ~LMP_HOST_LE;
2161		hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2162		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2163	}
2164
2165	if (sent->simul)
2166		hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2167	else
2168		hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2169
2170	hci_dev_unlock(hdev);
2171
2172	return rp->status;
2173}
2174
2175static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2176			       struct sk_buff *skb)
2177{
2178	struct hci_cp_le_set_adv_param *cp;
2179	struct hci_ev_status *rp = data;
2180
2181	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2182
2183	if (rp->status)
2184		return rp->status;
2185
2186	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2187	if (!cp)
2188		return rp->status;
2189
2190	hci_dev_lock(hdev);
2191	hdev->adv_addr_type = cp->own_address_type;
2192	hci_dev_unlock(hdev);
2193
2194	return rp->status;
2195}
2196
2197static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2198				   struct sk_buff *skb)
2199{
2200	struct hci_rp_le_set_ext_adv_params *rp = data;
2201	struct hci_cp_le_set_ext_adv_params *cp;
2202	struct adv_info *adv_instance;
2203
2204	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2205
2206	if (rp->status)
2207		return rp->status;
2208
2209	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2210	if (!cp)
2211		return rp->status;
2212
2213	hci_dev_lock(hdev);
2214	hdev->adv_addr_type = cp->own_addr_type;
2215	if (!cp->handle) {
2216		/* Store in hdev for instance 0 */
2217		hdev->adv_tx_power = rp->tx_power;
2218	} else {
2219		adv_instance = hci_find_adv_instance(hdev, cp->handle);
2220		if (adv_instance)
2221			adv_instance->tx_power = rp->tx_power;
2222	}
2223	/* Update adv data as tx power is known now */
2224	hci_update_adv_data(hdev, cp->handle);
2225
2226	hci_dev_unlock(hdev);
2227
2228	return rp->status;
2229}
2230
2231static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2232			   struct sk_buff *skb)
2233{
2234	struct hci_rp_read_rssi *rp = data;
2235	struct hci_conn *conn;
2236
2237	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2238
2239	if (rp->status)
2240		return rp->status;
2241
2242	hci_dev_lock(hdev);
2243
2244	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2245	if (conn)
2246		conn->rssi = rp->rssi;
2247
2248	hci_dev_unlock(hdev);
2249
2250	return rp->status;
2251}
2252
2253static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2254			       struct sk_buff *skb)
2255{
2256	struct hci_cp_read_tx_power *sent;
2257	struct hci_rp_read_tx_power *rp = data;
2258	struct hci_conn *conn;
2259
2260	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2261
2262	if (rp->status)
2263		return rp->status;
2264
2265	sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2266	if (!sent)
2267		return rp->status;
2268
2269	hci_dev_lock(hdev);
2270
2271	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2272	if (!conn)
2273		goto unlock;
2274
2275	switch (sent->type) {
2276	case 0x00:
2277		conn->tx_power = rp->tx_power;
2278		break;
2279	case 0x01:
2280		conn->max_tx_power = rp->tx_power;
2281		break;
2282	}
2283
2284unlock:
2285	hci_dev_unlock(hdev);
2286	return rp->status;
2287}
2288
2289static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2290				      struct sk_buff *skb)
2291{
2292	struct hci_ev_status *rp = data;
2293	u8 *mode;
2294
2295	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2296
2297	if (rp->status)
2298		return rp->status;
2299
2300	mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2301	if (mode)
2302		hdev->ssp_debug_mode = *mode;
2303
2304	return rp->status;
2305}
2306
2307static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2308{
2309	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2310
2311	if (status)
2312		return;
2313
2314	if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY))
2315		set_bit(HCI_INQUIRY, &hdev->flags);
2316}
2317
2318static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2319{
2320	struct hci_cp_create_conn *cp;
2321	struct hci_conn *conn;
2322
2323	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2324
2325	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2326	if (!cp)
2327		return;
2328
2329	hci_dev_lock(hdev);
2330
2331	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2332
2333	bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2334
2335	if (status) {
2336		if (conn && conn->state == BT_CONNECT) {
2337			conn->state = BT_CLOSED;
2338			hci_connect_cfm(conn, status);
2339			hci_conn_del(conn);
2340		}
2341	} else {
2342		if (!conn) {
2343			conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr,
2344						  HCI_ROLE_MASTER);
2345			if (!conn)
2346				bt_dev_err(hdev, "no memory for new connection");
2347		}
2348	}
2349
2350	hci_dev_unlock(hdev);
2351}
2352
2353static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2354{
2355	struct hci_cp_add_sco *cp;
2356	struct hci_conn *acl;
2357	struct hci_link *link;
2358	__u16 handle;
2359
2360	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2361
2362	if (!status)
2363		return;
2364
2365	cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2366	if (!cp)
2367		return;
2368
2369	handle = __le16_to_cpu(cp->handle);
2370
2371	bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2372
2373	hci_dev_lock(hdev);
2374
2375	acl = hci_conn_hash_lookup_handle(hdev, handle);
2376	if (acl) {
2377		link = list_first_entry_or_null(&acl->link_list,
2378						struct hci_link, list);
2379		if (link && link->conn) {
2380			link->conn->state = BT_CLOSED;
2381
2382			hci_connect_cfm(link->conn, status);
2383			hci_conn_del(link->conn);
2384		}
2385	}
2386
2387	hci_dev_unlock(hdev);
2388}
2389
2390static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2391{
2392	struct hci_cp_auth_requested *cp;
2393	struct hci_conn *conn;
2394
2395	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2396
2397	if (!status)
2398		return;
2399
2400	cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2401	if (!cp)
2402		return;
2403
2404	hci_dev_lock(hdev);
2405
2406	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2407	if (conn) {
2408		if (conn->state == BT_CONFIG) {
2409			hci_connect_cfm(conn, status);
2410			hci_conn_drop(conn);
2411		}
2412	}
2413
2414	hci_dev_unlock(hdev);
2415}
2416
2417static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2418{
2419	struct hci_cp_set_conn_encrypt *cp;
2420	struct hci_conn *conn;
2421
2422	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2423
2424	if (!status)
2425		return;
2426
2427	cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2428	if (!cp)
2429		return;
2430
2431	hci_dev_lock(hdev);
2432
2433	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2434	if (conn) {
2435		if (conn->state == BT_CONFIG) {
2436			hci_connect_cfm(conn, status);
2437			hci_conn_drop(conn);
2438		}
2439	}
2440
2441	hci_dev_unlock(hdev);
2442}
2443
2444static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2445				    struct hci_conn *conn)
2446{
2447	if (conn->state != BT_CONFIG || !conn->out)
2448		return 0;
2449
2450	if (conn->pending_sec_level == BT_SECURITY_SDP)
2451		return 0;
2452
2453	/* Only request authentication for SSP connections or non-SSP
2454	 * devices with sec_level MEDIUM or HIGH or if MITM protection
2455	 * is requested.
2456	 */
2457	if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2458	    conn->pending_sec_level != BT_SECURITY_FIPS &&
2459	    conn->pending_sec_level != BT_SECURITY_HIGH &&
2460	    conn->pending_sec_level != BT_SECURITY_MEDIUM)
2461		return 0;
2462
2463	return 1;
2464}
2465
2466static int hci_resolve_name(struct hci_dev *hdev,
2467				   struct inquiry_entry *e)
2468{
2469	struct hci_cp_remote_name_req cp;
2470
2471	memset(&cp, 0, sizeof(cp));
2472
2473	bacpy(&cp.bdaddr, &e->data.bdaddr);
2474	cp.pscan_rep_mode = e->data.pscan_rep_mode;
2475	cp.pscan_mode = e->data.pscan_mode;
2476	cp.clock_offset = e->data.clock_offset;
2477
2478	return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2479}
2480
2481static bool hci_resolve_next_name(struct hci_dev *hdev)
2482{
2483	struct discovery_state *discov = &hdev->discovery;
2484	struct inquiry_entry *e;
2485
2486	if (list_empty(&discov->resolve))
2487		return false;
2488
2489	/* We should stop if we already spent too much time resolving names. */
2490	if (time_after(jiffies, discov->name_resolve_timeout)) {
2491		bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2492		return false;
2493	}
2494
2495	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2496	if (!e)
2497		return false;
2498
2499	if (hci_resolve_name(hdev, e) == 0) {
2500		e->name_state = NAME_PENDING;
2501		return true;
2502	}
2503
2504	return false;
2505}
2506
2507static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2508				   bdaddr_t *bdaddr, u8 *name, u8 name_len)
2509{
2510	struct discovery_state *discov = &hdev->discovery;
2511	struct inquiry_entry *e;
2512
2513	/* Update the mgmt connected state if necessary. Be careful with
2514	 * conn objects that exist but are not (yet) connected however.
2515	 * Only those in BT_CONFIG or BT_CONNECTED states can be
2516	 * considered connected.
2517	 */
2518	if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
2519		mgmt_device_connected(hdev, conn, name, name_len);
2520
2521	if (discov->state == DISCOVERY_STOPPED)
2522		return;
2523
2524	if (discov->state == DISCOVERY_STOPPING)
2525		goto discov_complete;
2526
2527	if (discov->state != DISCOVERY_RESOLVING)
2528		return;
2529
2530	e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2531	/* If the device was not found in a list of found devices names of which
2532	 * are pending. there is no need to continue resolving a next name as it
2533	 * will be done upon receiving another Remote Name Request Complete
2534	 * Event */
2535	if (!e)
2536		return;
2537
2538	list_del(&e->list);
2539
2540	e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2541	mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2542			 name, name_len);
2543
2544	if (hci_resolve_next_name(hdev))
2545		return;
2546
2547discov_complete:
2548	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2549}
2550
2551static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2552{
2553	struct hci_cp_remote_name_req *cp;
2554	struct hci_conn *conn;
2555
2556	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2557
2558	/* If successful wait for the name req complete event before
2559	 * checking for the need to do authentication */
2560	if (!status)
2561		return;
2562
2563	cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2564	if (!cp)
2565		return;
2566
2567	hci_dev_lock(hdev);
2568
2569	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2570
2571	if (hci_dev_test_flag(hdev, HCI_MGMT))
2572		hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2573
2574	if (!conn)
2575		goto unlock;
2576
2577	if (!hci_outgoing_auth_needed(hdev, conn))
2578		goto unlock;
2579
2580	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2581		struct hci_cp_auth_requested auth_cp;
2582
2583		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2584
2585		auth_cp.handle = __cpu_to_le16(conn->handle);
2586		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2587			     sizeof(auth_cp), &auth_cp);
2588	}
2589
2590unlock:
2591	hci_dev_unlock(hdev);
2592}
2593
2594static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2595{
2596	struct hci_cp_read_remote_features *cp;
2597	struct hci_conn *conn;
2598
2599	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2600
2601	if (!status)
2602		return;
2603
2604	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2605	if (!cp)
2606		return;
2607
2608	hci_dev_lock(hdev);
2609
2610	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2611	if (conn) {
2612		if (conn->state == BT_CONFIG) {
2613			hci_connect_cfm(conn, status);
2614			hci_conn_drop(conn);
2615		}
2616	}
2617
2618	hci_dev_unlock(hdev);
2619}
2620
2621static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2622{
2623	struct hci_cp_read_remote_ext_features *cp;
2624	struct hci_conn *conn;
2625
2626	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2627
2628	if (!status)
2629		return;
2630
2631	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2632	if (!cp)
2633		return;
2634
2635	hci_dev_lock(hdev);
2636
2637	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2638	if (conn) {
2639		if (conn->state == BT_CONFIG) {
2640			hci_connect_cfm(conn, status);
2641			hci_conn_drop(conn);
2642		}
2643	}
2644
2645	hci_dev_unlock(hdev);
2646}
2647
2648static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle,
2649				       __u8 status)
2650{
2651	struct hci_conn *acl;
2652	struct hci_link *link;
2653
2654	bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status);
2655
2656	hci_dev_lock(hdev);
2657
2658	acl = hci_conn_hash_lookup_handle(hdev, handle);
2659	if (acl) {
2660		link = list_first_entry_or_null(&acl->link_list,
2661						struct hci_link, list);
2662		if (link && link->conn) {
2663			link->conn->state = BT_CLOSED;
2664
2665			hci_connect_cfm(link->conn, status);
2666			hci_conn_del(link->conn);
2667		}
2668	}
2669
2670	hci_dev_unlock(hdev);
2671}
2672
2673static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2674{
2675	struct hci_cp_setup_sync_conn *cp;
2676
2677	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2678
2679	if (!status)
2680		return;
2681
2682	cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2683	if (!cp)
2684		return;
2685
2686	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2687}
2688
2689static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2690{
2691	struct hci_cp_enhanced_setup_sync_conn *cp;
2692
2693	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2694
2695	if (!status)
2696		return;
2697
2698	cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2699	if (!cp)
2700		return;
2701
2702	hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status);
2703}
2704
2705static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2706{
2707	struct hci_cp_sniff_mode *cp;
2708	struct hci_conn *conn;
2709
2710	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2711
2712	if (!status)
2713		return;
2714
2715	cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2716	if (!cp)
2717		return;
2718
2719	hci_dev_lock(hdev);
2720
2721	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2722	if (conn) {
2723		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2724
2725		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2726			hci_sco_setup(conn, status);
2727	}
2728
2729	hci_dev_unlock(hdev);
2730}
2731
2732static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2733{
2734	struct hci_cp_exit_sniff_mode *cp;
2735	struct hci_conn *conn;
2736
2737	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2738
2739	if (!status)
2740		return;
2741
2742	cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2743	if (!cp)
2744		return;
2745
2746	hci_dev_lock(hdev);
2747
2748	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2749	if (conn) {
2750		clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2751
2752		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2753			hci_sco_setup(conn, status);
2754	}
2755
2756	hci_dev_unlock(hdev);
2757}
2758
2759static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2760{
2761	struct hci_cp_disconnect *cp;
2762	struct hci_conn_params *params;
2763	struct hci_conn *conn;
2764	bool mgmt_conn;
2765
2766	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2767
2768	/* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2769	 * otherwise cleanup the connection immediately.
2770	 */
2771	if (!status && !hdev->suspended)
2772		return;
2773
2774	cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2775	if (!cp)
2776		return;
2777
2778	hci_dev_lock(hdev);
2779
2780	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2781	if (!conn)
2782		goto unlock;
2783
2784	if (status) {
2785		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2786				       conn->dst_type, status);
2787
2788		if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2789			hdev->cur_adv_instance = conn->adv_instance;
2790			hci_enable_advertising(hdev);
2791		}
2792
2793		/* Inform sockets conn is gone before we delete it */
2794		hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED);
2795
2796		goto done;
2797	}
2798
2799	mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2800
2801	if (conn->type == ACL_LINK) {
2802		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2803			hci_remove_link_key(hdev, &conn->dst);
2804	}
2805
2806	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2807	if (params) {
2808		switch (params->auto_connect) {
2809		case HCI_AUTO_CONN_LINK_LOSS:
2810			if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2811				break;
2812			fallthrough;
2813
2814		case HCI_AUTO_CONN_DIRECT:
2815		case HCI_AUTO_CONN_ALWAYS:
2816			hci_pend_le_list_del_init(params);
2817			hci_pend_le_list_add(params, &hdev->pend_le_conns);
2818			break;
2819
2820		default:
2821			break;
2822		}
2823	}
2824
2825	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2826				 cp->reason, mgmt_conn);
2827
2828	hci_disconn_cfm(conn, cp->reason);
2829
2830done:
2831	/* If the disconnection failed for any reason, the upper layer
2832	 * does not retry to disconnect in current implementation.
2833	 * Hence, we need to do some basic cleanup here and re-enable
2834	 * advertising if necessary.
2835	 */
2836	hci_conn_del(conn);
2837unlock:
2838	hci_dev_unlock(hdev);
2839}
2840
2841static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2842{
2843	/* When using controller based address resolution, then the new
2844	 * address types 0x02 and 0x03 are used. These types need to be
2845	 * converted back into either public address or random address type
2846	 */
2847	switch (type) {
2848	case ADDR_LE_DEV_PUBLIC_RESOLVED:
2849		if (resolved)
2850			*resolved = true;
2851		return ADDR_LE_DEV_PUBLIC;
2852	case ADDR_LE_DEV_RANDOM_RESOLVED:
2853		if (resolved)
2854			*resolved = true;
2855		return ADDR_LE_DEV_RANDOM;
2856	}
2857
2858	if (resolved)
2859		*resolved = false;
2860	return type;
2861}
2862
2863static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2864			      u8 peer_addr_type, u8 own_address_type,
2865			      u8 filter_policy)
2866{
2867	struct hci_conn *conn;
2868
2869	conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2870				       peer_addr_type);
2871	if (!conn)
2872		return;
2873
2874	own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2875
2876	/* Store the initiator and responder address information which
2877	 * is needed for SMP. These values will not change during the
2878	 * lifetime of the connection.
2879	 */
2880	conn->init_addr_type = own_address_type;
2881	if (own_address_type == ADDR_LE_DEV_RANDOM)
2882		bacpy(&conn->init_addr, &hdev->random_addr);
2883	else
2884		bacpy(&conn->init_addr, &hdev->bdaddr);
2885
2886	conn->resp_addr_type = peer_addr_type;
2887	bacpy(&conn->resp_addr, peer_addr);
2888}
2889
2890static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2891{
2892	struct hci_cp_le_create_conn *cp;
2893
2894	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2895
2896	/* All connection failure handling is taken care of by the
2897	 * hci_conn_failed function which is triggered by the HCI
2898	 * request completion callbacks used for connecting.
2899	 */
2900	if (status)
2901		return;
2902
2903	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2904	if (!cp)
2905		return;
2906
2907	hci_dev_lock(hdev);
2908
2909	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2910			  cp->own_address_type, cp->filter_policy);
2911
2912	hci_dev_unlock(hdev);
2913}
2914
2915static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2916{
2917	struct hci_cp_le_ext_create_conn *cp;
2918
2919	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2920
2921	/* All connection failure handling is taken care of by the
2922	 * hci_conn_failed function which is triggered by the HCI
2923	 * request completion callbacks used for connecting.
2924	 */
2925	if (status)
2926		return;
2927
2928	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2929	if (!cp)
2930		return;
2931
2932	hci_dev_lock(hdev);
2933
2934	cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2935			  cp->own_addr_type, cp->filter_policy);
2936
2937	hci_dev_unlock(hdev);
2938}
2939
2940static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2941{
2942	struct hci_cp_le_read_remote_features *cp;
2943	struct hci_conn *conn;
2944
2945	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2946
2947	if (!status)
2948		return;
2949
2950	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2951	if (!cp)
2952		return;
2953
2954	hci_dev_lock(hdev);
2955
2956	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2957	if (conn) {
2958		if (conn->state == BT_CONFIG) {
2959			hci_connect_cfm(conn, status);
2960			hci_conn_drop(conn);
2961		}
2962	}
2963
2964	hci_dev_unlock(hdev);
2965}
2966
2967static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2968{
2969	struct hci_cp_le_start_enc *cp;
2970	struct hci_conn *conn;
2971
2972	bt_dev_dbg(hdev, "status 0x%2.2x", status);
2973
2974	if (!status)
2975		return;
2976
2977	hci_dev_lock(hdev);
2978
2979	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2980	if (!cp)
2981		goto unlock;
2982
2983	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2984	if (!conn)
2985		goto unlock;
2986
2987	if (conn->state != BT_CONNECTED)
2988		goto unlock;
2989
2990	hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2991	hci_conn_drop(conn);
2992
2993unlock:
2994	hci_dev_unlock(hdev);
2995}
2996
2997static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2998{
2999	struct hci_cp_switch_role *cp;
3000	struct hci_conn *conn;
3001
3002	BT_DBG("%s status 0x%2.2x", hdev->name, status);
3003
3004	if (!status)
3005		return;
3006
3007	cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3008	if (!cp)
3009		return;
3010
3011	hci_dev_lock(hdev);
3012
3013	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3014	if (conn)
3015		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3016
3017	hci_dev_unlock(hdev);
3018}
3019
3020static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3021				     struct sk_buff *skb)
3022{
3023	struct hci_ev_status *ev = data;
3024	struct discovery_state *discov = &hdev->discovery;
3025	struct inquiry_entry *e;
3026
3027	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3028
3029	if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3030		return;
3031
3032	smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3033	wake_up_bit(&hdev->flags, HCI_INQUIRY);
3034
3035	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3036		return;
3037
3038	hci_dev_lock(hdev);
3039
3040	if (discov->state != DISCOVERY_FINDING)
3041		goto unlock;
3042
3043	if (list_empty(&discov->resolve)) {
3044		/* When BR/EDR inquiry is active and no LE scanning is in
3045		 * progress, then change discovery state to indicate completion.
3046		 *
3047		 * When running LE scanning and BR/EDR inquiry simultaneously
3048		 * and the LE scan already finished, then change the discovery
3049		 * state to indicate completion.
3050		 */
3051		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3052		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3053			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3054		goto unlock;
3055	}
3056
3057	e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3058	if (e && hci_resolve_name(hdev, e) == 0) {
3059		e->name_state = NAME_PENDING;
3060		hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3061		discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3062	} else {
3063		/* When BR/EDR inquiry is active and no LE scanning is in
3064		 * progress, then change discovery state to indicate completion.
3065		 *
3066		 * When running LE scanning and BR/EDR inquiry simultaneously
3067		 * and the LE scan already finished, then change the discovery
3068		 * state to indicate completion.
3069		 */
3070		if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3071		    !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3072			hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3073	}
3074
3075unlock:
3076	hci_dev_unlock(hdev);
3077}
3078
3079static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3080				   struct sk_buff *skb)
3081{
3082	struct hci_ev_inquiry_result *ev = edata;
3083	struct inquiry_data data;
3084	int i;
3085
3086	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3087			     flex_array_size(ev, info, ev->num)))
3088		return;
3089
3090	bt_dev_dbg(hdev, "num %d", ev->num);
3091
3092	if (!ev->num)
3093		return;
3094
3095	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3096		return;
3097
3098	hci_dev_lock(hdev);
3099
3100	for (i = 0; i < ev->num; i++) {
3101		struct inquiry_info *info = &ev->info[i];
3102		u32 flags;
3103
3104		bacpy(&data.bdaddr, &info->bdaddr);
3105		data.pscan_rep_mode	= info->pscan_rep_mode;
3106		data.pscan_period_mode	= info->pscan_period_mode;
3107		data.pscan_mode		= info->pscan_mode;
3108		memcpy(data.dev_class, info->dev_class, 3);
3109		data.clock_offset	= info->clock_offset;
3110		data.rssi		= HCI_RSSI_INVALID;
3111		data.ssp_mode		= 0x00;
3112
3113		flags = hci_inquiry_cache_update(hdev, &data, false);
3114
3115		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3116				  info->dev_class, HCI_RSSI_INVALID,
3117				  flags, NULL, 0, NULL, 0, 0);
3118	}
3119
3120	hci_dev_unlock(hdev);
3121}
3122
3123static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3124				  struct sk_buff *skb)
3125{
3126	struct hci_ev_conn_complete *ev = data;
3127	struct hci_conn *conn;
3128	u8 status = ev->status;
3129
3130	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3131
3132	hci_dev_lock(hdev);
3133
3134	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3135	if (!conn) {
3136		/* In case of error status and there is no connection pending
3137		 * just unlock as there is nothing to cleanup.
3138		 */
3139		if (ev->status)
3140			goto unlock;
3141
3142		/* Connection may not exist if auto-connected. Check the bredr
3143		 * allowlist to see if this device is allowed to auto connect.
3144		 * If link is an ACL type, create a connection class
3145		 * automatically.
3146		 *
3147		 * Auto-connect will only occur if the event filter is
3148		 * programmed with a given address. Right now, event filter is
3149		 * only used during suspend.
3150		 */
3151		if (ev->link_type == ACL_LINK &&
3152		    hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3153						      &ev->bdaddr,
3154						      BDADDR_BREDR)) {
3155			conn = hci_conn_add_unset(hdev, ev->link_type,
3156						  &ev->bdaddr, HCI_ROLE_SLAVE);
3157			if (!conn) {
3158				bt_dev_err(hdev, "no memory for new conn");
3159				goto unlock;
3160			}
3161		} else {
3162			if (ev->link_type != SCO_LINK)
3163				goto unlock;
3164
3165			conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3166						       &ev->bdaddr);
3167			if (!conn)
3168				goto unlock;
3169
3170			conn->type = SCO_LINK;
3171		}
3172	}
3173
3174	/* The HCI_Connection_Complete event is only sent once per connection.
3175	 * Processing it more than once per connection can corrupt kernel memory.
3176	 *
3177	 * As the connection handle is set here for the first time, it indicates
3178	 * whether the connection is already set up.
3179	 */
3180	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
3181		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3182		goto unlock;
3183	}
3184
3185	if (!status) {
3186		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
3187		if (status)
3188			goto done;
3189
3190		if (conn->type == ACL_LINK) {
3191			conn->state = BT_CONFIG;
3192			hci_conn_hold(conn);
3193
3194			if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3195			    !hci_find_link_key(hdev, &ev->bdaddr))
3196				conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3197			else
3198				conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3199		} else
3200			conn->state = BT_CONNECTED;
3201
3202		hci_debugfs_create_conn(conn);
3203		hci_conn_add_sysfs(conn);
3204
3205		if (test_bit(HCI_AUTH, &hdev->flags))
3206			set_bit(HCI_CONN_AUTH, &conn->flags);
3207
3208		if (test_bit(HCI_ENCRYPT, &hdev->flags))
3209			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3210
3211		/* "Link key request" completed ahead of "connect request" completes */
3212		if (ev->encr_mode == 1 && !test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3213		    ev->link_type == ACL_LINK) {
3214			struct link_key *key;
3215			struct hci_cp_read_enc_key_size cp;
3216
3217			key = hci_find_link_key(hdev, &ev->bdaddr);
3218			if (key) {
3219				set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3220
3221				if (!read_key_size_capable(hdev)) {
3222					conn->enc_key_size = HCI_LINK_KEY_SIZE;
3223				} else {
3224					cp.handle = cpu_to_le16(conn->handle);
3225					if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3226							 sizeof(cp), &cp)) {
3227						bt_dev_err(hdev, "sending read key size failed");
3228						conn->enc_key_size = HCI_LINK_KEY_SIZE;
3229					}
3230				}
3231
3232				hci_encrypt_cfm(conn, ev->status);
3233			}
3234		}
3235
3236		/* Get remote features */
3237		if (conn->type == ACL_LINK) {
3238			struct hci_cp_read_remote_features cp;
3239			cp.handle = ev->handle;
3240			hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3241				     sizeof(cp), &cp);
3242
3243			hci_update_scan(hdev);
3244		}
3245
3246		/* Set packet type for incoming connection */
3247		if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3248			struct hci_cp_change_conn_ptype cp;
3249			cp.handle = ev->handle;
3250			cp.pkt_type = cpu_to_le16(conn->pkt_type);
3251			hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3252				     &cp);
3253		}
3254	}
3255
3256	if (conn->type == ACL_LINK)
3257		hci_sco_setup(conn, ev->status);
3258
3259done:
3260	if (status) {
3261		hci_conn_failed(conn, status);
3262	} else if (ev->link_type == SCO_LINK) {
3263		switch (conn->setting & SCO_AIRMODE_MASK) {
3264		case SCO_AIRMODE_CVSD:
3265			if (hdev->notify)
3266				hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3267			break;
3268		}
3269
3270		hci_connect_cfm(conn, status);
3271	}
3272
3273unlock:
3274	hci_dev_unlock(hdev);
3275}
3276
3277static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3278{
3279	struct hci_cp_reject_conn_req cp;
3280
3281	bacpy(&cp.bdaddr, bdaddr);
3282	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3283	hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3284}
3285
3286static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3287				 struct sk_buff *skb)
3288{
3289	struct hci_ev_conn_request *ev = data;
3290	int mask = hdev->link_mode;
3291	struct inquiry_entry *ie;
3292	struct hci_conn *conn;
3293	__u8 flags = 0;
3294
3295	bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3296
3297	/* Reject incoming connection from device with same BD ADDR against
3298	 * CVE-2020-26555
3299	 */
3300	if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
3301		bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
3302			   &ev->bdaddr);
3303		hci_reject_conn(hdev, &ev->bdaddr);
3304		return;
3305	}
3306
3307	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3308				      &flags);
3309
3310	if (!(mask & HCI_LM_ACCEPT)) {
3311		hci_reject_conn(hdev, &ev->bdaddr);
3312		return;
3313	}
3314
3315	hci_dev_lock(hdev);
3316
3317	if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3318				   BDADDR_BREDR)) {
3319		hci_reject_conn(hdev, &ev->bdaddr);
3320		goto unlock;
3321	}
3322
3323	/* Require HCI_CONNECTABLE or an accept list entry to accept the
3324	 * connection. These features are only touched through mgmt so
3325	 * only do the checks if HCI_MGMT is set.
3326	 */
3327	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3328	    !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3329	    !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3330					       BDADDR_BREDR)) {
3331		hci_reject_conn(hdev, &ev->bdaddr);
3332		goto unlock;
3333	}
3334
3335	/* Connection accepted */
3336
3337	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3338	if (ie)
3339		memcpy(ie->data.dev_class, ev->dev_class, 3);
3340
3341	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3342			&ev->bdaddr);
3343	if (!conn) {
3344		conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr,
3345					  HCI_ROLE_SLAVE);
3346		if (!conn) {
3347			bt_dev_err(hdev, "no memory for new connection");
3348			goto unlock;
3349		}
3350	}
3351
3352	memcpy(conn->dev_class, ev->dev_class, 3);
3353
3354	hci_dev_unlock(hdev);
3355
3356	if (ev->link_type == ACL_LINK ||
3357	    (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3358		struct hci_cp_accept_conn_req cp;
3359		conn->state = BT_CONNECT;
3360
3361		bacpy(&cp.bdaddr, &ev->bdaddr);
3362
3363		if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3364			cp.role = 0x00; /* Become central */
3365		else
3366			cp.role = 0x01; /* Remain peripheral */
3367
3368		hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3369	} else if (!(flags & HCI_PROTO_DEFER)) {
3370		struct hci_cp_accept_sync_conn_req cp;
3371		conn->state = BT_CONNECT;
3372
3373		bacpy(&cp.bdaddr, &ev->bdaddr);
3374		cp.pkt_type = cpu_to_le16(conn->pkt_type);
3375
3376		cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
3377		cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
3378		cp.max_latency    = cpu_to_le16(0xffff);
3379		cp.content_format = cpu_to_le16(hdev->voice_setting);
3380		cp.retrans_effort = 0xff;
3381
3382		hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3383			     &cp);
3384	} else {
3385		conn->state = BT_CONNECT2;
3386		hci_connect_cfm(conn, 0);
3387	}
3388
3389	return;
3390unlock:
3391	hci_dev_unlock(hdev);
3392}
3393
3394static u8 hci_to_mgmt_reason(u8 err)
3395{
3396	switch (err) {
3397	case HCI_ERROR_CONNECTION_TIMEOUT:
3398		return MGMT_DEV_DISCONN_TIMEOUT;
3399	case HCI_ERROR_REMOTE_USER_TERM:
3400	case HCI_ERROR_REMOTE_LOW_RESOURCES:
3401	case HCI_ERROR_REMOTE_POWER_OFF:
3402		return MGMT_DEV_DISCONN_REMOTE;
3403	case HCI_ERROR_LOCAL_HOST_TERM:
3404		return MGMT_DEV_DISCONN_LOCAL_HOST;
3405	default:
3406		return MGMT_DEV_DISCONN_UNKNOWN;
3407	}
3408}
3409
3410static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3411				     struct sk_buff *skb)
3412{
3413	struct hci_ev_disconn_complete *ev = data;
3414	u8 reason;
3415	struct hci_conn_params *params;
3416	struct hci_conn *conn;
3417	bool mgmt_connected;
3418
3419	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3420
3421	hci_dev_lock(hdev);
3422
3423	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3424	if (!conn)
3425		goto unlock;
3426
3427	if (ev->status) {
3428		mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3429				       conn->dst_type, ev->status);
3430		goto unlock;
3431	}
3432
3433	conn->state = BT_CLOSED;
3434
3435	mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3436
3437	if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3438		reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3439	else
3440		reason = hci_to_mgmt_reason(ev->reason);
3441
3442	mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3443				reason, mgmt_connected);
3444
3445	if (conn->type == ACL_LINK) {
3446		if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3447			hci_remove_link_key(hdev, &conn->dst);
3448
3449		hci_update_scan(hdev);
3450	}
3451
3452	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3453	if (params) {
3454		switch (params->auto_connect) {
3455		case HCI_AUTO_CONN_LINK_LOSS:
3456			if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3457				break;
3458			fallthrough;
3459
3460		case HCI_AUTO_CONN_DIRECT:
3461		case HCI_AUTO_CONN_ALWAYS:
3462			hci_pend_le_list_del_init(params);
3463			hci_pend_le_list_add(params, &hdev->pend_le_conns);
3464			hci_update_passive_scan(hdev);
3465			break;
3466
3467		default:
3468			break;
3469		}
3470	}
3471
3472	hci_disconn_cfm(conn, ev->reason);
3473
3474	/* Re-enable advertising if necessary, since it might
3475	 * have been disabled by the connection. From the
3476	 * HCI_LE_Set_Advertise_Enable command description in
3477	 * the core specification (v4.0):
3478	 * "The Controller shall continue advertising until the Host
3479	 * issues an LE_Set_Advertise_Enable command with
3480	 * Advertising_Enable set to 0x00 (Advertising is disabled)
3481	 * or until a connection is created or until the Advertising
3482	 * is timed out due to Directed Advertising."
3483	 */
3484	if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3485		hdev->cur_adv_instance = conn->adv_instance;
3486		hci_enable_advertising(hdev);
3487	}
3488
3489	hci_conn_del(conn);
3490
3491unlock:
3492	hci_dev_unlock(hdev);
3493}
3494
3495static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3496				  struct sk_buff *skb)
3497{
3498	struct hci_ev_auth_complete *ev = data;
3499	struct hci_conn *conn;
3500
3501	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3502
3503	hci_dev_lock(hdev);
3504
3505	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3506	if (!conn)
3507		goto unlock;
3508
3509	if (!ev->status) {
3510		clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3511		set_bit(HCI_CONN_AUTH, &conn->flags);
3512		conn->sec_level = conn->pending_sec_level;
3513	} else {
3514		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3515			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3516
3517		mgmt_auth_failed(conn, ev->status);
3518	}
3519
3520	clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3521
3522	if (conn->state == BT_CONFIG) {
3523		if (!ev->status && hci_conn_ssp_enabled(conn)) {
3524			struct hci_cp_set_conn_encrypt cp;
3525			cp.handle  = ev->handle;
3526			cp.encrypt = 0x01;
3527			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3528				     &cp);
3529		} else {
3530			conn->state = BT_CONNECTED;
3531			hci_connect_cfm(conn, ev->status);
3532			hci_conn_drop(conn);
3533		}
3534	} else {
3535		hci_auth_cfm(conn, ev->status);
3536
3537		hci_conn_hold(conn);
3538		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3539		hci_conn_drop(conn);
3540	}
3541
3542	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3543		if (!ev->status) {
3544			struct hci_cp_set_conn_encrypt cp;
3545			cp.handle  = ev->handle;
3546			cp.encrypt = 0x01;
3547			hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3548				     &cp);
3549		} else {
3550			clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3551			hci_encrypt_cfm(conn, ev->status);
3552		}
3553	}
3554
3555unlock:
3556	hci_dev_unlock(hdev);
3557}
3558
3559static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3560				struct sk_buff *skb)
3561{
3562	struct hci_ev_remote_name *ev = data;
3563	struct hci_conn *conn;
3564
3565	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3566
3567	hci_dev_lock(hdev);
3568
3569	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3570
3571	if (!hci_dev_test_flag(hdev, HCI_MGMT))
3572		goto check_auth;
3573
3574	if (ev->status == 0)
3575		hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3576				       strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3577	else
3578		hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3579
3580check_auth:
3581	if (!conn)
3582		goto unlock;
3583
3584	if (!hci_outgoing_auth_needed(hdev, conn))
3585		goto unlock;
3586
3587	if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3588		struct hci_cp_auth_requested cp;
3589
3590		set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3591
3592		cp.handle = __cpu_to_le16(conn->handle);
3593		hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3594	}
3595
3596unlock:
3597	hci_dev_unlock(hdev);
3598}
3599
3600static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3601				   struct sk_buff *skb)
3602{
3603	struct hci_ev_encrypt_change *ev = data;
3604	struct hci_conn *conn;
3605
3606	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3607
3608	hci_dev_lock(hdev);
3609
3610	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3611	if (!conn)
3612		goto unlock;
3613
3614	if (!ev->status) {
3615		if (ev->encrypt) {
3616			/* Encryption implies authentication */
3617			set_bit(HCI_CONN_AUTH, &conn->flags);
3618			set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3619			conn->sec_level = conn->pending_sec_level;
3620
3621			/* P-256 authentication key implies FIPS */
3622			if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3623				set_bit(HCI_CONN_FIPS, &conn->flags);
3624
3625			if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3626			    conn->type == LE_LINK)
3627				set_bit(HCI_CONN_AES_CCM, &conn->flags);
3628		} else {
3629			clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3630			clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3631		}
3632	}
3633
3634	/* We should disregard the current RPA and generate a new one
3635	 * whenever the encryption procedure fails.
3636	 */
3637	if (ev->status && conn->type == LE_LINK) {
3638		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3639		hci_adv_instances_set_rpa_expired(hdev, true);
3640	}
3641
3642	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3643
3644	/* Check link security requirements are met */
3645	if (!hci_conn_check_link_mode(conn))
3646		ev->status = HCI_ERROR_AUTH_FAILURE;
3647
3648	if (ev->status && conn->state == BT_CONNECTED) {
3649		if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3650			set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3651
3652		/* Notify upper layers so they can cleanup before
3653		 * disconnecting.
3654		 */
3655		hci_encrypt_cfm(conn, ev->status);
3656		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3657		hci_conn_drop(conn);
3658		goto unlock;
3659	}
3660
3661	/* Try reading the encryption key size for encrypted ACL links */
3662	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3663		struct hci_cp_read_enc_key_size cp;
3664
3665		/* Only send HCI_Read_Encryption_Key_Size if the
3666		 * controller really supports it. If it doesn't, assume
3667		 * the default size (16).
3668		 */
3669		if (!read_key_size_capable(hdev)) {
3670			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3671			goto notify;
3672		}
3673
3674		cp.handle = cpu_to_le16(conn->handle);
3675		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3676				 sizeof(cp), &cp)) {
3677			bt_dev_err(hdev, "sending read key size failed");
3678			conn->enc_key_size = HCI_LINK_KEY_SIZE;
3679			goto notify;
3680		}
3681
3682		goto unlock;
3683	}
3684
3685	/* Set the default Authenticated Payload Timeout after
3686	 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3687	 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3688	 * sent when the link is active and Encryption is enabled, the conn
3689	 * type can be either LE or ACL and controller must support LMP Ping.
3690	 * Ensure for AES-CCM encryption as well.
3691	 */
3692	if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3693	    test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3694	    ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3695	     (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3696		struct hci_cp_write_auth_payload_to cp;
3697
3698		cp.handle = cpu_to_le16(conn->handle);
3699		cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3700		if (hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3701				 sizeof(cp), &cp))
3702			bt_dev_err(hdev, "write auth payload timeout failed");
3703	}
3704
3705notify:
3706	hci_encrypt_cfm(conn, ev->status);
3707
3708unlock:
3709	hci_dev_unlock(hdev);
3710}
3711
3712static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3713					     struct sk_buff *skb)
3714{
3715	struct hci_ev_change_link_key_complete *ev = data;
3716	struct hci_conn *conn;
3717
3718	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3719
3720	hci_dev_lock(hdev);
3721
3722	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3723	if (conn) {
3724		if (!ev->status)
3725			set_bit(HCI_CONN_SECURE, &conn->flags);
3726
3727		clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3728
3729		hci_key_change_cfm(conn, ev->status);
3730	}
3731
3732	hci_dev_unlock(hdev);
3733}
3734
3735static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3736				    struct sk_buff *skb)
3737{
3738	struct hci_ev_remote_features *ev = data;
3739	struct hci_conn *conn;
3740
3741	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3742
3743	hci_dev_lock(hdev);
3744
3745	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3746	if (!conn)
3747		goto unlock;
3748
3749	if (!ev->status)
3750		memcpy(conn->features[0], ev->features, 8);
3751
3752	if (conn->state != BT_CONFIG)
3753		goto unlock;
3754
3755	if (!ev->status && lmp_ext_feat_capable(hdev) &&
3756	    lmp_ext_feat_capable(conn)) {
3757		struct hci_cp_read_remote_ext_features cp;
3758		cp.handle = ev->handle;
3759		cp.page = 0x01;
3760		hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3761			     sizeof(cp), &cp);
3762		goto unlock;
3763	}
3764
3765	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3766		struct hci_cp_remote_name_req cp;
3767		memset(&cp, 0, sizeof(cp));
3768		bacpy(&cp.bdaddr, &conn->dst);
3769		cp.pscan_rep_mode = 0x02;
3770		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3771	} else {
3772		mgmt_device_connected(hdev, conn, NULL, 0);
3773	}
3774
3775	if (!hci_outgoing_auth_needed(hdev, conn)) {
3776		conn->state = BT_CONNECTED;
3777		hci_connect_cfm(conn, ev->status);
3778		hci_conn_drop(conn);
3779	}
3780
3781unlock:
3782	hci_dev_unlock(hdev);
3783}
3784
3785static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3786{
3787	cancel_delayed_work(&hdev->cmd_timer);
3788
3789	rcu_read_lock();
3790	if (!test_bit(HCI_RESET, &hdev->flags)) {
3791		if (ncmd) {
3792			cancel_delayed_work(&hdev->ncmd_timer);
3793			atomic_set(&hdev->cmd_cnt, 1);
3794		} else {
3795			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3796				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3797						   HCI_NCMD_TIMEOUT);
3798		}
3799	}
3800	rcu_read_unlock();
3801}
3802
3803static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3804					struct sk_buff *skb)
3805{
3806	struct hci_rp_le_read_buffer_size_v2 *rp = data;
3807
3808	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3809
3810	if (rp->status)
3811		return rp->status;
3812
3813	hdev->le_mtu   = __le16_to_cpu(rp->acl_mtu);
3814	hdev->le_pkts  = rp->acl_max_pkt;
3815	hdev->iso_mtu  = __le16_to_cpu(rp->iso_mtu);
3816	hdev->iso_pkts = rp->iso_max_pkt;
3817
3818	hdev->le_cnt  = hdev->le_pkts;
3819	hdev->iso_cnt = hdev->iso_pkts;
3820
3821	BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3822	       hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3823
3824	return rp->status;
3825}
3826
3827static void hci_unbound_cis_failed(struct hci_dev *hdev, u8 cig, u8 status)
3828{
3829	struct hci_conn *conn, *tmp;
3830
3831	lockdep_assert_held(&hdev->lock);
3832
3833	list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
3834		if (conn->type != ISO_LINK || !bacmp(&conn->dst, BDADDR_ANY) ||
3835		    conn->state == BT_OPEN || conn->iso_qos.ucast.cig != cig)
3836			continue;
3837
3838		if (HCI_CONN_HANDLE_UNSET(conn->handle))
3839			hci_conn_failed(conn, status);
3840	}
3841}
3842
3843static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3844				   struct sk_buff *skb)
3845{
3846	struct hci_rp_le_set_cig_params *rp = data;
3847	struct hci_cp_le_set_cig_params *cp;
3848	struct hci_conn *conn;
3849	u8 status = rp->status;
3850	bool pending = false;
3851	int i;
3852
3853	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3854
3855	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
3856	if (!rp->status && (!cp || rp->num_handles != cp->num_cis ||
3857			    rp->cig_id != cp->cig_id)) {
3858		bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
3859		status = HCI_ERROR_UNSPECIFIED;
3860	}
3861
3862	hci_dev_lock(hdev);
3863
3864	/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E page 2554
3865	 *
3866	 * If the Status return parameter is non-zero, then the state of the CIG
3867	 * and its CIS configurations shall not be changed by the command. If
3868	 * the CIG did not already exist, it shall not be created.
3869	 */
3870	if (status) {
3871		/* Keep current configuration, fail only the unbound CIS */
3872		hci_unbound_cis_failed(hdev, rp->cig_id, status);
3873		goto unlock;
3874	}
3875
3876	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
3877	 *
3878	 * If the Status return parameter is zero, then the Controller shall
3879	 * set the Connection_Handle arrayed return parameter to the connection
3880	 * handle(s) corresponding to the CIS configurations specified in
3881	 * the CIS_IDs command parameter, in the same order.
3882	 */
3883	for (i = 0; i < rp->num_handles; ++i) {
3884		conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
3885						cp->cis[i].cis_id);
3886		if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
3887			continue;
3888
3889		if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
3890			continue;
3891
3892		if (hci_conn_set_handle(conn, __le16_to_cpu(rp->handle[i])))
3893			continue;
3894
3895		if (conn->state == BT_CONNECT)
3896			pending = true;
3897	}
3898
3899unlock:
3900	if (pending)
3901		hci_le_create_cis_pending(hdev);
3902
3903	hci_dev_unlock(hdev);
3904
3905	return rp->status;
3906}
3907
3908static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3909				   struct sk_buff *skb)
3910{
3911	struct hci_rp_le_setup_iso_path *rp = data;
3912	struct hci_cp_le_setup_iso_path *cp;
3913	struct hci_conn *conn;
3914
3915	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3916
3917	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3918	if (!cp)
3919		return rp->status;
3920
3921	hci_dev_lock(hdev);
3922
3923	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3924	if (!conn)
3925		goto unlock;
3926
3927	if (rp->status) {
3928		hci_connect_cfm(conn, rp->status);
3929		hci_conn_del(conn);
3930		goto unlock;
3931	}
3932
3933	switch (cp->direction) {
3934	/* Input (Host to Controller) */
3935	case 0x00:
3936		/* Only confirm connection if output only */
3937		if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu)
3938			hci_connect_cfm(conn, rp->status);
3939		break;
3940	/* Output (Controller to Host) */
3941	case 0x01:
3942		/* Confirm connection since conn->iso_qos is always configured
3943		 * last.
3944		 */
3945		hci_connect_cfm(conn, rp->status);
3946
3947		/* Notify device connected in case it is a BIG Sync */
3948		if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
3949			mgmt_device_connected(hdev, conn, NULL, 0);
3950
3951		break;
3952	}
3953
3954unlock:
3955	hci_dev_unlock(hdev);
3956	return rp->status;
3957}
3958
3959static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3960{
3961	bt_dev_dbg(hdev, "status 0x%2.2x", status);
3962}
3963
3964static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3965				   struct sk_buff *skb)
3966{
3967	struct hci_ev_status *rp = data;
3968	struct hci_cp_le_set_per_adv_params *cp;
3969
3970	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3971
3972	if (rp->status)
3973		return rp->status;
3974
3975	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3976	if (!cp)
3977		return rp->status;
3978
3979	/* TODO: set the conn state */
3980	return rp->status;
3981}
3982
3983static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3984				       struct sk_buff *skb)
3985{
3986	struct hci_ev_status *rp = data;
3987	struct hci_cp_le_set_per_adv_enable *cp;
3988	struct adv_info *adv = NULL, *n;
3989	u8 per_adv_cnt = 0;
3990
3991	bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3992
3993	if (rp->status)
3994		return rp->status;
3995
3996	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3997	if (!cp)
3998		return rp->status;
3999
4000	hci_dev_lock(hdev);
4001
4002	adv = hci_find_adv_instance(hdev, cp->handle);
4003
4004	if (cp->enable) {
4005		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4006
4007		if (adv)
4008			adv->enabled = true;
4009	} else {
4010		/* If just one instance was disabled check if there are
4011		 * any other instance enabled before clearing HCI_LE_PER_ADV.
4012		 * The current periodic adv instance will be marked as
4013		 * disabled once extended advertising is also disabled.
4014		 */
4015		list_for_each_entry_safe(adv, n, &hdev->adv_instances,
4016					 list) {
4017			if (adv->periodic && adv->enabled)
4018				per_adv_cnt++;
4019		}
4020
4021		if (per_adv_cnt > 1)
4022			goto unlock;
4023
4024		hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4025	}
4026
4027unlock:
4028	hci_dev_unlock(hdev);
4029
4030	return rp->status;
4031}
4032
4033#define HCI_CC_VL(_op, _func, _min, _max) \
4034{ \
4035	.op = _op, \
4036	.func = _func, \
4037	.min_len = _min, \
4038	.max_len = _max, \
4039}
4040
4041#define HCI_CC(_op, _func, _len) \
4042	HCI_CC_VL(_op, _func, _len, _len)
4043
4044#define HCI_CC_STATUS(_op, _func) \
4045	HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4046
4047static const struct hci_cc {
4048	u16  op;
4049	u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4050	u16  min_len;
4051	u16  max_len;
4052} hci_cc_table[] = {
4053	HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4054	HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4055	HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4056	HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4057		      hci_cc_remote_name_req_cancel),
4058	HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4059	       sizeof(struct hci_rp_role_discovery)),
4060	HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4061	       sizeof(struct hci_rp_read_link_policy)),
4062	HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4063	       sizeof(struct hci_rp_write_link_policy)),
4064	HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4065	       sizeof(struct hci_rp_read_def_link_policy)),
4066	HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4067		      hci_cc_write_def_link_policy),
4068	HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4069	HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4070	       sizeof(struct hci_rp_read_stored_link_key)),
4071	HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4072	       sizeof(struct hci_rp_delete_stored_link_key)),
4073	HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4074	HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4075	       sizeof(struct hci_rp_read_local_name)),
4076	HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4077	HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4078	HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4079	HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4080	HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4081	       sizeof(struct hci_rp_read_class_of_dev)),
4082	HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4083	HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4084	       sizeof(struct hci_rp_read_voice_setting)),
4085	HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4086	HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4087	       sizeof(struct hci_rp_read_num_supported_iac)),
4088	HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4089	HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4090	HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4091	       sizeof(struct hci_rp_read_auth_payload_to)),
4092	HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4093	       sizeof(struct hci_rp_write_auth_payload_to)),
4094	HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4095	       sizeof(struct hci_rp_read_local_version)),
4096	HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4097	       sizeof(struct hci_rp_read_local_commands)),
4098	HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4099	       sizeof(struct hci_rp_read_local_features)),
4100	HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4101	       sizeof(struct hci_rp_read_local_ext_features)),
4102	HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4103	       sizeof(struct hci_rp_read_buffer_size)),
4104	HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4105	       sizeof(struct hci_rp_read_bd_addr)),
4106	HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4107	       sizeof(struct hci_rp_read_local_pairing_opts)),
4108	HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4109	       sizeof(struct hci_rp_read_page_scan_activity)),
4110	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4111		      hci_cc_write_page_scan_activity),
4112	HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4113	       sizeof(struct hci_rp_read_page_scan_type)),
4114	HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4115	HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4116	       sizeof(struct hci_rp_read_data_block_size)),
4117	HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4118	       sizeof(struct hci_rp_read_flow_control_mode)),
4119	HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4120	       sizeof(struct hci_rp_read_local_amp_info)),
4121	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4122	       sizeof(struct hci_rp_read_clock)),
4123	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4124	       sizeof(struct hci_rp_read_enc_key_size)),
4125	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4126	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4127	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4128	       hci_cc_read_def_err_data_reporting,
4129	       sizeof(struct hci_rp_read_def_err_data_reporting)),
4130	HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4131		      hci_cc_write_def_err_data_reporting),
4132	HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4133	       sizeof(struct hci_rp_pin_code_reply)),
4134	HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4135	       sizeof(struct hci_rp_pin_code_neg_reply)),
4136	HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4137	       sizeof(struct hci_rp_read_local_oob_data)),
4138	HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4139	       sizeof(struct hci_rp_read_local_oob_ext_data)),
4140	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4141	       sizeof(struct hci_rp_le_read_buffer_size)),
4142	HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4143	       sizeof(struct hci_rp_le_read_local_features)),
4144	HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4145	       sizeof(struct hci_rp_le_read_adv_tx_power)),
4146	HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4147	       sizeof(struct hci_rp_user_confirm_reply)),
4148	HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4149	       sizeof(struct hci_rp_user_confirm_reply)),
4150	HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4151	       sizeof(struct hci_rp_user_confirm_reply)),
4152	HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4153	       sizeof(struct hci_rp_user_confirm_reply)),
4154	HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4155	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4156	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4157	HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4158	HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4159	       hci_cc_le_read_accept_list_size,
4160	       sizeof(struct hci_rp_le_read_accept_list_size)),
4161	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4162	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4163		      hci_cc_le_add_to_accept_list),
4164	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4165		      hci_cc_le_del_from_accept_list),
4166	HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4167	       sizeof(struct hci_rp_le_read_supported_states)),
4168	HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4169	       sizeof(struct hci_rp_le_read_def_data_len)),
4170	HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4171		      hci_cc_le_write_def_data_len),
4172	HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4173		      hci_cc_le_add_to_resolv_list),
4174	HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4175		      hci_cc_le_del_from_resolv_list),
4176	HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4177		      hci_cc_le_clear_resolv_list),
4178	HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4179	       sizeof(struct hci_rp_le_read_resolv_list_size)),
4180	HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4181		      hci_cc_le_set_addr_resolution_enable),
4182	HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4183	       sizeof(struct hci_rp_le_read_max_data_len)),
4184	HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4185		      hci_cc_write_le_host_supported),
4186	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4187	HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4188	       sizeof(struct hci_rp_read_rssi)),
4189	HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4190	       sizeof(struct hci_rp_read_tx_power)),
4191	HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4192	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4193		      hci_cc_le_set_ext_scan_param),
4194	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4195		      hci_cc_le_set_ext_scan_enable),
4196	HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4197	HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4198	       hci_cc_le_read_num_adv_sets,
4199	       sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4200	HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4201	       sizeof(struct hci_rp_le_set_ext_adv_params)),
4202	HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4203		      hci_cc_le_set_ext_adv_enable),
4204	HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4205		      hci_cc_le_set_adv_set_random_addr),
4206	HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4207	HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4208	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4209	HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4210		      hci_cc_le_set_per_adv_enable),
4211	HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4212	       sizeof(struct hci_rp_le_read_transmit_power)),
4213	HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4214	HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4215	       sizeof(struct hci_rp_le_read_buffer_size_v2)),
4216	HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4217		  sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4218	HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4219	       sizeof(struct hci_rp_le_setup_iso_path)),
4220};
4221
4222static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4223		      struct sk_buff *skb)
4224{
4225	void *data;
4226
4227	if (skb->len < cc->min_len) {
4228		bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4229			   cc->op, skb->len, cc->min_len);
4230		return HCI_ERROR_UNSPECIFIED;
4231	}
4232
4233	/* Just warn if the length is over max_len size it still be possible to
4234	 * partially parse the cc so leave to callback to decide if that is
4235	 * acceptable.
4236	 */
4237	if (skb->len > cc->max_len)
4238		bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4239			    cc->op, skb->len, cc->max_len);
4240
4241	data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4242	if (!data)
4243		return HCI_ERROR_UNSPECIFIED;
4244
4245	return cc->func(hdev, data, skb);
4246}
4247
4248static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4249				 struct sk_buff *skb, u16 *opcode, u8 *status,
4250				 hci_req_complete_t *req_complete,
4251				 hci_req_complete_skb_t *req_complete_skb)
4252{
4253	struct hci_ev_cmd_complete *ev = data;
4254	int i;
4255
4256	*opcode = __le16_to_cpu(ev->opcode);
4257
4258	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4259
4260	for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4261		if (hci_cc_table[i].op == *opcode) {
4262			*status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4263			break;
4264		}
4265	}
4266
4267	if (i == ARRAY_SIZE(hci_cc_table)) {
4268		/* Unknown opcode, assume byte 0 contains the status, so
4269		 * that e.g. __hci_cmd_sync() properly returns errors
4270		 * for vendor specific commands send by HCI drivers.
4271		 * If a vendor doesn't actually follow this convention we may
4272		 * need to introduce a vendor CC table in order to properly set
4273		 * the status.
4274		 */
4275		*status = skb->data[0];
4276	}
4277
4278	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4279
4280	hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4281			     req_complete_skb);
4282
4283	if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4284		bt_dev_err(hdev,
4285			   "unexpected event for opcode 0x%4.4x", *opcode);
4286		return;
4287	}
4288
4289	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4290		queue_work(hdev->workqueue, &hdev->cmd_work);
4291}
4292
4293static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4294{
4295	struct hci_cp_le_create_cis *cp;
4296	bool pending = false;
4297	int i;
4298
4299	bt_dev_dbg(hdev, "status 0x%2.2x", status);
4300
4301	if (!status)
4302		return;
4303
4304	cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4305	if (!cp)
4306		return;
4307
4308	hci_dev_lock(hdev);
4309
4310	/* Remove connection if command failed */
4311	for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4312		struct hci_conn *conn;
4313		u16 handle;
4314
4315		handle = __le16_to_cpu(cp->cis[i].cis_handle);
4316
4317		conn = hci_conn_hash_lookup_handle(hdev, handle);
4318		if (conn) {
4319			if (test_and_clear_bit(HCI_CONN_CREATE_CIS,
4320					       &conn->flags))
4321				pending = true;
4322			conn->state = BT_CLOSED;
4323			hci_connect_cfm(conn, status);
4324			hci_conn_del(conn);
4325		}
4326	}
4327
4328	if (pending)
4329		hci_le_create_cis_pending(hdev);
4330
4331	hci_dev_unlock(hdev);
4332}
4333
4334#define HCI_CS(_op, _func) \
4335{ \
4336	.op = _op, \
4337	.func = _func, \
4338}
4339
4340static const struct hci_cs {
4341	u16  op;
4342	void (*func)(struct hci_dev *hdev, __u8 status);
4343} hci_cs_table[] = {
4344	HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4345	HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4346	HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4347	HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4348	HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4349	HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4350	HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4351	HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4352	HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4353	       hci_cs_read_remote_ext_features),
4354	HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4355	HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4356	       hci_cs_enhanced_setup_sync_conn),
4357	HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4358	HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4359	HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4360	HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4361	HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4362	HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4363	HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4364	HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4365	HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4366};
4367
4368static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4369			       struct sk_buff *skb, u16 *opcode, u8 *status,
4370			       hci_req_complete_t *req_complete,
4371			       hci_req_complete_skb_t *req_complete_skb)
4372{
4373	struct hci_ev_cmd_status *ev = data;
4374	int i;
4375
4376	*opcode = __le16_to_cpu(ev->opcode);
4377	*status = ev->status;
4378
4379	bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4380
4381	for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4382		if (hci_cs_table[i].op == *opcode) {
4383			hci_cs_table[i].func(hdev, ev->status);
4384			break;
4385		}
4386	}
4387
4388	handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4389
4390	/* Indicate request completion if the command failed. Also, if
4391	 * we're not waiting for a special event and we get a success
4392	 * command status we should try to flag the request as completed
4393	 * (since for this kind of commands there will not be a command
4394	 * complete event).
4395	 */
4396	if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
4397		hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4398				     req_complete_skb);
4399		if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4400			bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4401				   *opcode);
4402			return;
4403		}
4404	}
4405
4406	if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4407		queue_work(hdev->workqueue, &hdev->cmd_work);
4408}
4409
4410static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4411				   struct sk_buff *skb)
4412{
4413	struct hci_ev_hardware_error *ev = data;
4414
4415	bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4416
4417	hdev->hw_error_code = ev->code;
4418
4419	queue_work(hdev->req_workqueue, &hdev->error_reset);
4420}
4421
4422static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4423				struct sk_buff *skb)
4424{
4425	struct hci_ev_role_change *ev = data;
4426	struct hci_conn *conn;
4427
4428	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4429
4430	hci_dev_lock(hdev);
4431
4432	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4433	if (conn) {
4434		if (!ev->status)
4435			conn->role = ev->role;
4436
4437		clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4438
4439		hci_role_switch_cfm(conn, ev->status, ev->role);
4440	}
4441
4442	hci_dev_unlock(hdev);
4443}
4444
4445static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4446				  struct sk_buff *skb)
4447{
4448	struct hci_ev_num_comp_pkts *ev = data;
4449	int i;
4450
4451	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4452			     flex_array_size(ev, handles, ev->num)))
4453		return;
4454
4455	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4456		bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4457		return;
4458	}
4459
4460	bt_dev_dbg(hdev, "num %d", ev->num);
4461
4462	for (i = 0; i < ev->num; i++) {
4463		struct hci_comp_pkts_info *info = &ev->handles[i];
4464		struct hci_conn *conn;
4465		__u16  handle, count;
4466
4467		handle = __le16_to_cpu(info->handle);
4468		count  = __le16_to_cpu(info->count);
4469
4470		conn = hci_conn_hash_lookup_handle(hdev, handle);
4471		if (!conn)
4472			continue;
4473
4474		conn->sent -= count;
4475
4476		switch (conn->type) {
4477		case ACL_LINK:
4478			hdev->acl_cnt += count;
4479			if (hdev->acl_cnt > hdev->acl_pkts)
4480				hdev->acl_cnt = hdev->acl_pkts;
4481			break;
4482
4483		case LE_LINK:
4484			if (hdev->le_pkts) {
4485				hdev->le_cnt += count;
4486				if (hdev->le_cnt > hdev->le_pkts)
4487					hdev->le_cnt = hdev->le_pkts;
4488			} else {
4489				hdev->acl_cnt += count;
4490				if (hdev->acl_cnt > hdev->acl_pkts)
4491					hdev->acl_cnt = hdev->acl_pkts;
4492			}
4493			break;
4494
4495		case SCO_LINK:
4496			hdev->sco_cnt += count;
4497			if (hdev->sco_cnt > hdev->sco_pkts)
4498				hdev->sco_cnt = hdev->sco_pkts;
4499			break;
4500
4501		case ISO_LINK:
4502			if (hdev->iso_pkts) {
4503				hdev->iso_cnt += count;
4504				if (hdev->iso_cnt > hdev->iso_pkts)
4505					hdev->iso_cnt = hdev->iso_pkts;
4506			} else if (hdev->le_pkts) {
4507				hdev->le_cnt += count;
4508				if (hdev->le_cnt > hdev->le_pkts)
4509					hdev->le_cnt = hdev->le_pkts;
4510			} else {
4511				hdev->acl_cnt += count;
4512				if (hdev->acl_cnt > hdev->acl_pkts)
4513					hdev->acl_cnt = hdev->acl_pkts;
4514			}
4515			break;
4516
4517		default:
4518			bt_dev_err(hdev, "unknown type %d conn %p",
4519				   conn->type, conn);
4520			break;
4521		}
4522	}
4523
4524	queue_work(hdev->workqueue, &hdev->tx_work);
4525}
4526
4527static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4528						 __u16 handle)
4529{
4530	struct hci_chan *chan;
4531
4532	switch (hdev->dev_type) {
4533	case HCI_PRIMARY:
4534		return hci_conn_hash_lookup_handle(hdev, handle);
4535	case HCI_AMP:
4536		chan = hci_chan_lookup_handle(hdev, handle);
4537		if (chan)
4538			return chan->conn;
4539		break;
4540	default:
4541		bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4542		break;
4543	}
4544
4545	return NULL;
4546}
4547
4548static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4549				    struct sk_buff *skb)
4550{
4551	struct hci_ev_num_comp_blocks *ev = data;
4552	int i;
4553
4554	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4555			     flex_array_size(ev, handles, ev->num_hndl)))
4556		return;
4557
4558	if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4559		bt_dev_err(hdev, "wrong event for mode %d",
4560			   hdev->flow_ctl_mode);
4561		return;
4562	}
4563
4564	bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4565		   ev->num_hndl);
4566
4567	for (i = 0; i < ev->num_hndl; i++) {
4568		struct hci_comp_blocks_info *info = &ev->handles[i];
4569		struct hci_conn *conn = NULL;
4570		__u16  handle, block_count;
4571
4572		handle = __le16_to_cpu(info->handle);
4573		block_count = __le16_to_cpu(info->blocks);
4574
4575		conn = __hci_conn_lookup_handle(hdev, handle);
4576		if (!conn)
4577			continue;
4578
4579		conn->sent -= block_count;
4580
4581		switch (conn->type) {
4582		case ACL_LINK:
4583		case AMP_LINK:
4584			hdev->block_cnt += block_count;
4585			if (hdev->block_cnt > hdev->num_blocks)
4586				hdev->block_cnt = hdev->num_blocks;
4587			break;
4588
4589		default:
4590			bt_dev_err(hdev, "unknown type %d conn %p",
4591				   conn->type, conn);
4592			break;
4593		}
4594	}
4595
4596	queue_work(hdev->workqueue, &hdev->tx_work);
4597}
4598
4599static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4600				struct sk_buff *skb)
4601{
4602	struct hci_ev_mode_change *ev = data;
4603	struct hci_conn *conn;
4604
4605	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4606
4607	hci_dev_lock(hdev);
4608
4609	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4610	if (conn) {
4611		conn->mode = ev->mode;
4612
4613		if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4614					&conn->flags)) {
4615			if (conn->mode == HCI_CM_ACTIVE)
4616				set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4617			else
4618				clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4619		}
4620
4621		if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4622			hci_sco_setup(conn, ev->status);
4623	}
4624
4625	hci_dev_unlock(hdev);
4626}
4627
4628static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4629				     struct sk_buff *skb)
4630{
4631	struct hci_ev_pin_code_req *ev = data;
4632	struct hci_conn *conn;
4633
4634	bt_dev_dbg(hdev, "");
4635
4636	hci_dev_lock(hdev);
4637
4638	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4639	if (!conn)
4640		goto unlock;
4641
4642	if (conn->state == BT_CONNECTED) {
4643		hci_conn_hold(conn);
4644		conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4645		hci_conn_drop(conn);
4646	}
4647
4648	if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4649	    !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4650		hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4651			     sizeof(ev->bdaddr), &ev->bdaddr);
4652	} else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4653		u8 secure;
4654
4655		if (conn->pending_sec_level == BT_SECURITY_HIGH)
4656			secure = 1;
4657		else
4658			secure = 0;
4659
4660		mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4661	}
4662
4663unlock:
4664	hci_dev_unlock(hdev);
4665}
4666
4667static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4668{
4669	if (key_type == HCI_LK_CHANGED_COMBINATION)
4670		return;
4671
4672	conn->pin_length = pin_len;
4673	conn->key_type = key_type;
4674
4675	switch (key_type) {
4676	case HCI_LK_LOCAL_UNIT:
4677	case HCI_LK_REMOTE_UNIT:
4678	case HCI_LK_DEBUG_COMBINATION:
4679		return;
4680	case HCI_LK_COMBINATION:
4681		if (pin_len == 16)
4682			conn->pending_sec_level = BT_SECURITY_HIGH;
4683		else
4684			conn->pending_sec_level = BT_SECURITY_MEDIUM;
4685		break;
4686	case HCI_LK_UNAUTH_COMBINATION_P192:
4687	case HCI_LK_UNAUTH_COMBINATION_P256:
4688		conn->pending_sec_level = BT_SECURITY_MEDIUM;
4689		break;
4690	case HCI_LK_AUTH_COMBINATION_P192:
4691		conn->pending_sec_level = BT_SECURITY_HIGH;
4692		break;
4693	case HCI_LK_AUTH_COMBINATION_P256:
4694		conn->pending_sec_level = BT_SECURITY_FIPS;
4695		break;
4696	}
4697}
4698
4699static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4700				     struct sk_buff *skb)
4701{
4702	struct hci_ev_link_key_req *ev = data;
4703	struct hci_cp_link_key_reply cp;
4704	struct hci_conn *conn;
4705	struct link_key *key;
4706
4707	bt_dev_dbg(hdev, "");
4708
4709	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4710		return;
4711
4712	hci_dev_lock(hdev);
4713
4714	key = hci_find_link_key(hdev, &ev->bdaddr);
4715	if (!key) {
4716		bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4717		goto not_found;
4718	}
4719
4720	bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4721
4722	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4723	if (conn) {
4724		clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4725
4726		if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4727		     key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4728		    conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4729			bt_dev_dbg(hdev, "ignoring unauthenticated key");
4730			goto not_found;
4731		}
4732
4733		if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4734		    (conn->pending_sec_level == BT_SECURITY_HIGH ||
4735		     conn->pending_sec_level == BT_SECURITY_FIPS)) {
4736			bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4737			goto not_found;
4738		}
4739
4740		conn_set_key(conn, key->type, key->pin_len);
4741	}
4742
4743	bacpy(&cp.bdaddr, &ev->bdaddr);
4744	memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4745
4746	hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4747
4748	hci_dev_unlock(hdev);
4749
4750	return;
4751
4752not_found:
4753	hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4754	hci_dev_unlock(hdev);
4755}
4756
4757static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4758				    struct sk_buff *skb)
4759{
4760	struct hci_ev_link_key_notify *ev = data;
4761	struct hci_conn *conn;
4762	struct link_key *key;
4763	bool persistent;
4764	u8 pin_len = 0;
4765
4766	bt_dev_dbg(hdev, "");
4767
4768	hci_dev_lock(hdev);
4769
4770	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4771	if (!conn)
4772		goto unlock;
4773
4774	/* Ignore NULL link key against CVE-2020-26555 */
4775	if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
4776		bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
4777			   &ev->bdaddr);
4778		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4779		hci_conn_drop(conn);
4780		goto unlock;
4781	}
4782
4783	hci_conn_hold(conn);
4784	conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4785	hci_conn_drop(conn);
4786
4787	set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4788	conn_set_key(conn, ev->key_type, conn->pin_length);
4789
4790	if (!hci_dev_test_flag(hdev, HCI_MGMT))
4791		goto unlock;
4792
4793	key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4794			        ev->key_type, pin_len, &persistent);
4795	if (!key)
4796		goto unlock;
4797
4798	/* Update connection information since adding the key will have
4799	 * fixed up the type in the case of changed combination keys.
4800	 */
4801	if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4802		conn_set_key(conn, key->type, key->pin_len);
4803
4804	mgmt_new_link_key(hdev, key, persistent);
4805
4806	/* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4807	 * is set. If it's not set simply remove the key from the kernel
4808	 * list (we've still notified user space about it but with
4809	 * store_hint being 0).
4810	 */
4811	if (key->type == HCI_LK_DEBUG_COMBINATION &&
4812	    !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4813		list_del_rcu(&key->list);
4814		kfree_rcu(key, rcu);
4815		goto unlock;
4816	}
4817
4818	if (persistent)
4819		clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4820	else
4821		set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4822
4823unlock:
4824	hci_dev_unlock(hdev);
4825}
4826
4827static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4828				 struct sk_buff *skb)
4829{
4830	struct hci_ev_clock_offset *ev = data;
4831	struct hci_conn *conn;
4832
4833	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4834
4835	hci_dev_lock(hdev);
4836
4837	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4838	if (conn && !ev->status) {
4839		struct inquiry_entry *ie;
4840
4841		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4842		if (ie) {
4843			ie->data.clock_offset = ev->clock_offset;
4844			ie->timestamp = jiffies;
4845		}
4846	}
4847
4848	hci_dev_unlock(hdev);
4849}
4850
4851static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4852				    struct sk_buff *skb)
4853{
4854	struct hci_ev_pkt_type_change *ev = data;
4855	struct hci_conn *conn;
4856
4857	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4858
4859	hci_dev_lock(hdev);
4860
4861	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4862	if (conn && !ev->status)
4863		conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4864
4865	hci_dev_unlock(hdev);
4866}
4867
4868static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4869				   struct sk_buff *skb)
4870{
4871	struct hci_ev_pscan_rep_mode *ev = data;
4872	struct inquiry_entry *ie;
4873
4874	bt_dev_dbg(hdev, "");
4875
4876	hci_dev_lock(hdev);
4877
4878	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4879	if (ie) {
4880		ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4881		ie->timestamp = jiffies;
4882	}
4883
4884	hci_dev_unlock(hdev);
4885}
4886
4887static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4888					     struct sk_buff *skb)
4889{
4890	struct hci_ev_inquiry_result_rssi *ev = edata;
4891	struct inquiry_data data;
4892	int i;
4893
4894	bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4895
4896	if (!ev->num)
4897		return;
4898
4899	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4900		return;
4901
4902	hci_dev_lock(hdev);
4903
4904	if (skb->len == array_size(ev->num,
4905				   sizeof(struct inquiry_info_rssi_pscan))) {
4906		struct inquiry_info_rssi_pscan *info;
4907
4908		for (i = 0; i < ev->num; i++) {
4909			u32 flags;
4910
4911			info = hci_ev_skb_pull(hdev, skb,
4912					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4913					       sizeof(*info));
4914			if (!info) {
4915				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4916					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4917				goto unlock;
4918			}
4919
4920			bacpy(&data.bdaddr, &info->bdaddr);
4921			data.pscan_rep_mode	= info->pscan_rep_mode;
4922			data.pscan_period_mode	= info->pscan_period_mode;
4923			data.pscan_mode		= info->pscan_mode;
4924			memcpy(data.dev_class, info->dev_class, 3);
4925			data.clock_offset	= info->clock_offset;
4926			data.rssi		= info->rssi;
4927			data.ssp_mode		= 0x00;
4928
4929			flags = hci_inquiry_cache_update(hdev, &data, false);
4930
4931			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4932					  info->dev_class, info->rssi,
4933					  flags, NULL, 0, NULL, 0, 0);
4934		}
4935	} else if (skb->len == array_size(ev->num,
4936					  sizeof(struct inquiry_info_rssi))) {
4937		struct inquiry_info_rssi *info;
4938
4939		for (i = 0; i < ev->num; i++) {
4940			u32 flags;
4941
4942			info = hci_ev_skb_pull(hdev, skb,
4943					       HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4944					       sizeof(*info));
4945			if (!info) {
4946				bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4947					   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4948				goto unlock;
4949			}
4950
4951			bacpy(&data.bdaddr, &info->bdaddr);
4952			data.pscan_rep_mode	= info->pscan_rep_mode;
4953			data.pscan_period_mode	= info->pscan_period_mode;
4954			data.pscan_mode		= 0x00;
4955			memcpy(data.dev_class, info->dev_class, 3);
4956			data.clock_offset	= info->clock_offset;
4957			data.rssi		= info->rssi;
4958			data.ssp_mode		= 0x00;
4959
4960			flags = hci_inquiry_cache_update(hdev, &data, false);
4961
4962			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4963					  info->dev_class, info->rssi,
4964					  flags, NULL, 0, NULL, 0, 0);
4965		}
4966	} else {
4967		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4968			   HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4969	}
4970unlock:
4971	hci_dev_unlock(hdev);
4972}
4973
4974static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4975					struct sk_buff *skb)
4976{
4977	struct hci_ev_remote_ext_features *ev = data;
4978	struct hci_conn *conn;
4979
4980	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4981
4982	hci_dev_lock(hdev);
4983
4984	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4985	if (!conn)
4986		goto unlock;
4987
4988	if (ev->page < HCI_MAX_PAGES)
4989		memcpy(conn->features[ev->page], ev->features, 8);
4990
4991	if (!ev->status && ev->page == 0x01) {
4992		struct inquiry_entry *ie;
4993
4994		ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4995		if (ie)
4996			ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4997
4998		if (ev->features[0] & LMP_HOST_SSP) {
4999			set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5000		} else {
5001			/* It is mandatory by the Bluetooth specification that
5002			 * Extended Inquiry Results are only used when Secure
5003			 * Simple Pairing is enabled, but some devices violate
5004			 * this.
5005			 *
5006			 * To make these devices work, the internal SSP
5007			 * enabled flag needs to be cleared if the remote host
5008			 * features do not indicate SSP support */
5009			clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5010		}
5011
5012		if (ev->features[0] & LMP_HOST_SC)
5013			set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5014	}
5015
5016	if (conn->state != BT_CONFIG)
5017		goto unlock;
5018
5019	if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5020		struct hci_cp_remote_name_req cp;
5021		memset(&cp, 0, sizeof(cp));
5022		bacpy(&cp.bdaddr, &conn->dst);
5023		cp.pscan_rep_mode = 0x02;
5024		hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5025	} else {
5026		mgmt_device_connected(hdev, conn, NULL, 0);
5027	}
5028
5029	if (!hci_outgoing_auth_needed(hdev, conn)) {
5030		conn->state = BT_CONNECTED;
5031		hci_connect_cfm(conn, ev->status);
5032		hci_conn_drop(conn);
5033	}
5034
5035unlock:
5036	hci_dev_unlock(hdev);
5037}
5038
5039static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5040				       struct sk_buff *skb)
5041{
5042	struct hci_ev_sync_conn_complete *ev = data;
5043	struct hci_conn *conn;
5044	u8 status = ev->status;
5045
5046	switch (ev->link_type) {
5047	case SCO_LINK:
5048	case ESCO_LINK:
5049		break;
5050	default:
5051		/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5052		 * for HCI_Synchronous_Connection_Complete is limited to
5053		 * either SCO or eSCO
5054		 */
5055		bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5056		return;
5057	}
5058
5059	bt_dev_dbg(hdev, "status 0x%2.2x", status);
5060
5061	hci_dev_lock(hdev);
5062
5063	conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5064	if (!conn) {
5065		if (ev->link_type == ESCO_LINK)
5066			goto unlock;
5067
5068		/* When the link type in the event indicates SCO connection
5069		 * and lookup of the connection object fails, then check
5070		 * if an eSCO connection object exists.
5071		 *
5072		 * The core limits the synchronous connections to either
5073		 * SCO or eSCO. The eSCO connection is preferred and tried
5074		 * to be setup first and until successfully established,
5075		 * the link type will be hinted as eSCO.
5076		 */
5077		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5078		if (!conn)
5079			goto unlock;
5080	}
5081
5082	/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5083	 * Processing it more than once per connection can corrupt kernel memory.
5084	 *
5085	 * As the connection handle is set here for the first time, it indicates
5086	 * whether the connection is already set up.
5087	 */
5088	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5089		bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5090		goto unlock;
5091	}
5092
5093	switch (status) {
5094	case 0x00:
5095		status = hci_conn_set_handle(conn, __le16_to_cpu(ev->handle));
5096		if (status) {
5097			conn->state = BT_CLOSED;
5098			break;
5099		}
5100
5101		conn->state  = BT_CONNECTED;
5102		conn->type   = ev->link_type;
5103
5104		hci_debugfs_create_conn(conn);
5105		hci_conn_add_sysfs(conn);
5106		break;
5107
5108	case 0x10:	/* Connection Accept Timeout */
5109	case 0x0d:	/* Connection Rejected due to Limited Resources */
5110	case 0x11:	/* Unsupported Feature or Parameter Value */
5111	case 0x1c:	/* SCO interval rejected */
5112	case 0x1a:	/* Unsupported Remote Feature */
5113	case 0x1e:	/* Invalid LMP Parameters */
5114	case 0x1f:	/* Unspecified error */
5115	case 0x20:	/* Unsupported LMP Parameter value */
5116		if (conn->out) {
5117			conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5118					(hdev->esco_type & EDR_ESCO_MASK);
5119			if (hci_setup_sync(conn, conn->parent->handle))
5120				goto unlock;
5121		}
5122		fallthrough;
5123
5124	default:
5125		conn->state = BT_CLOSED;
5126		break;
5127	}
5128
5129	bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5130	/* Notify only in case of SCO over HCI transport data path which
5131	 * is zero and non-zero value shall be non-HCI transport data path
5132	 */
5133	if (conn->codec.data_path == 0 && hdev->notify) {
5134		switch (ev->air_mode) {
5135		case 0x02:
5136			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5137			break;
5138		case 0x03:
5139			hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5140			break;
5141		}
5142	}
5143
5144	hci_connect_cfm(conn, status);
5145	if (status)
5146		hci_conn_del(conn);
5147
5148unlock:
5149	hci_dev_unlock(hdev);
5150}
5151
5152static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5153{
5154	size_t parsed = 0;
5155
5156	while (parsed < eir_len) {
5157		u8 field_len = eir[0];
5158
5159		if (field_len == 0)
5160			return parsed;
5161
5162		parsed += field_len + 1;
5163		eir += field_len + 1;
5164	}
5165
5166	return eir_len;
5167}
5168
5169static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5170					    struct sk_buff *skb)
5171{
5172	struct hci_ev_ext_inquiry_result *ev = edata;
5173	struct inquiry_data data;
5174	size_t eir_len;
5175	int i;
5176
5177	if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5178			     flex_array_size(ev, info, ev->num)))
5179		return;
5180
5181	bt_dev_dbg(hdev, "num %d", ev->num);
5182
5183	if (!ev->num)
5184		return;
5185
5186	if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5187		return;
5188
5189	hci_dev_lock(hdev);
5190
5191	for (i = 0; i < ev->num; i++) {
5192		struct extended_inquiry_info *info = &ev->info[i];
5193		u32 flags;
5194		bool name_known;
5195
5196		bacpy(&data.bdaddr, &info->bdaddr);
5197		data.pscan_rep_mode	= info->pscan_rep_mode;
5198		data.pscan_period_mode	= info->pscan_period_mode;
5199		data.pscan_mode		= 0x00;
5200		memcpy(data.dev_class, info->dev_class, 3);
5201		data.clock_offset	= info->clock_offset;
5202		data.rssi		= info->rssi;
5203		data.ssp_mode		= 0x01;
5204
5205		if (hci_dev_test_flag(hdev, HCI_MGMT))
5206			name_known = eir_get_data(info->data,
5207						  sizeof(info->data),
5208						  EIR_NAME_COMPLETE, NULL);
5209		else
5210			name_known = true;
5211
5212		flags = hci_inquiry_cache_update(hdev, &data, name_known);
5213
5214		eir_len = eir_get_length(info->data, sizeof(info->data));
5215
5216		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5217				  info->dev_class, info->rssi,
5218				  flags, info->data, eir_len, NULL, 0, 0);
5219	}
5220
5221	hci_dev_unlock(hdev);
5222}
5223
5224static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5225					 struct sk_buff *skb)
5226{
5227	struct hci_ev_key_refresh_complete *ev = data;
5228	struct hci_conn *conn;
5229
5230	bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5231		   __le16_to_cpu(ev->handle));
5232
5233	hci_dev_lock(hdev);
5234
5235	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5236	if (!conn)
5237		goto unlock;
5238
5239	/* For BR/EDR the necessary steps are taken through the
5240	 * auth_complete event.
5241	 */
5242	if (conn->type != LE_LINK)
5243		goto unlock;
5244
5245	if (!ev->status)
5246		conn->sec_level = conn->pending_sec_level;
5247
5248	clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5249
5250	if (ev->status && conn->state == BT_CONNECTED) {
5251		hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5252		hci_conn_drop(conn);
5253		goto unlock;
5254	}
5255
5256	if (conn->state == BT_CONFIG) {
5257		if (!ev->status)
5258			conn->state = BT_CONNECTED;
5259
5260		hci_connect_cfm(conn, ev->status);
5261		hci_conn_drop(conn);
5262	} else {
5263		hci_auth_cfm(conn, ev->status);
5264
5265		hci_conn_hold(conn);
5266		conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5267		hci_conn_drop(conn);
5268	}
5269
5270unlock:
5271	hci_dev_unlock(hdev);
5272}
5273
5274static u8 hci_get_auth_req(struct hci_conn *conn)
5275{
5276	/* If remote requests no-bonding follow that lead */
5277	if (conn->remote_auth == HCI_AT_NO_BONDING ||
5278	    conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5279		return conn->remote_auth | (conn->auth_type & 0x01);
5280
5281	/* If both remote and local have enough IO capabilities, require
5282	 * MITM protection
5283	 */
5284	if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5285	    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5286		return conn->remote_auth | 0x01;
5287
5288	/* No MITM protection possible so ignore remote requirement */
5289	return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5290}
5291
5292static u8 bredr_oob_data_present(struct hci_conn *conn)
5293{
5294	struct hci_dev *hdev = conn->hdev;
5295	struct oob_data *data;
5296
5297	data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5298	if (!data)
5299		return 0x00;
5300
5301	if (bredr_sc_enabled(hdev)) {
5302		/* When Secure Connections is enabled, then just
5303		 * return the present value stored with the OOB
5304		 * data. The stored value contains the right present
5305		 * information. However it can only be trusted when
5306		 * not in Secure Connection Only mode.
5307		 */
5308		if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5309			return data->present;
5310
5311		/* When Secure Connections Only mode is enabled, then
5312		 * the P-256 values are required. If they are not
5313		 * available, then do not declare that OOB data is
5314		 * present.
5315		 */
5316		if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
5317		    !crypto_memneq(data->hash256, ZERO_KEY, 16))
5318			return 0x00;
5319
5320		return 0x02;
5321	}
5322
5323	/* When Secure Connections is not enabled or actually
5324	 * not supported by the hardware, then check that if
5325	 * P-192 data values are present.
5326	 */
5327	if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
5328	    !crypto_memneq(data->hash192, ZERO_KEY, 16))
5329		return 0x00;
5330
5331	return 0x01;
5332}
5333
5334static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5335				    struct sk_buff *skb)
5336{
5337	struct hci_ev_io_capa_request *ev = data;
5338	struct hci_conn *conn;
5339
5340	bt_dev_dbg(hdev, "");
5341
5342	hci_dev_lock(hdev);
5343
5344	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5345	if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5346		goto unlock;
5347
5348	/* Assume remote supports SSP since it has triggered this event */
5349	set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5350
5351	hci_conn_hold(conn);
5352
5353	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5354		goto unlock;
5355
5356	/* Allow pairing if we're pairable, the initiators of the
5357	 * pairing or if the remote is not requesting bonding.
5358	 */
5359	if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5360	    test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5361	    (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5362		struct hci_cp_io_capability_reply cp;
5363
5364		bacpy(&cp.bdaddr, &ev->bdaddr);
5365		/* Change the IO capability from KeyboardDisplay
5366		 * to DisplayYesNo as it is not supported by BT spec. */
5367		cp.capability = (conn->io_capability == 0x04) ?
5368				HCI_IO_DISPLAY_YESNO : conn->io_capability;
5369
5370		/* If we are initiators, there is no remote information yet */
5371		if (conn->remote_auth == 0xff) {
5372			/* Request MITM protection if our IO caps allow it
5373			 * except for the no-bonding case.
5374			 */
5375			if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5376			    conn->auth_type != HCI_AT_NO_BONDING)
5377				conn->auth_type |= 0x01;
5378		} else {
5379			conn->auth_type = hci_get_auth_req(conn);
5380		}
5381
5382		/* If we're not bondable, force one of the non-bondable
5383		 * authentication requirement values.
5384		 */
5385		if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5386			conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5387
5388		cp.authentication = conn->auth_type;
5389		cp.oob_data = bredr_oob_data_present(conn);
5390
5391		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5392			     sizeof(cp), &cp);
5393	} else {
5394		struct hci_cp_io_capability_neg_reply cp;
5395
5396		bacpy(&cp.bdaddr, &ev->bdaddr);
5397		cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5398
5399		hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5400			     sizeof(cp), &cp);
5401	}
5402
5403unlock:
5404	hci_dev_unlock(hdev);
5405}
5406
5407static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5408				  struct sk_buff *skb)
5409{
5410	struct hci_ev_io_capa_reply *ev = data;
5411	struct hci_conn *conn;
5412
5413	bt_dev_dbg(hdev, "");
5414
5415	hci_dev_lock(hdev);
5416
5417	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5418	if (!conn)
5419		goto unlock;
5420
5421	conn->remote_cap = ev->capability;
5422	conn->remote_auth = ev->authentication;
5423
5424unlock:
5425	hci_dev_unlock(hdev);
5426}
5427
5428static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5429					 struct sk_buff *skb)
5430{
5431	struct hci_ev_user_confirm_req *ev = data;
5432	int loc_mitm, rem_mitm, confirm_hint = 0;
5433	struct hci_conn *conn;
5434
5435	bt_dev_dbg(hdev, "");
5436
5437	hci_dev_lock(hdev);
5438
5439	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5440		goto unlock;
5441
5442	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5443	if (!conn)
5444		goto unlock;
5445
5446	loc_mitm = (conn->auth_type & 0x01);
5447	rem_mitm = (conn->remote_auth & 0x01);
5448
5449	/* If we require MITM but the remote device can't provide that
5450	 * (it has NoInputNoOutput) then reject the confirmation
5451	 * request. We check the security level here since it doesn't
5452	 * necessarily match conn->auth_type.
5453	 */
5454	if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5455	    conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5456		bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5457		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5458			     sizeof(ev->bdaddr), &ev->bdaddr);
5459		goto unlock;
5460	}
5461
5462	/* If no side requires MITM protection; auto-accept */
5463	if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5464	    (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5465
5466		/* If we're not the initiators request authorization to
5467		 * proceed from user space (mgmt_user_confirm with
5468		 * confirm_hint set to 1). The exception is if neither
5469		 * side had MITM or if the local IO capability is
5470		 * NoInputNoOutput, in which case we do auto-accept
5471		 */
5472		if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5473		    conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5474		    (loc_mitm || rem_mitm)) {
5475			bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5476			confirm_hint = 1;
5477			goto confirm;
5478		}
5479
5480		/* If there already exists link key in local host, leave the
5481		 * decision to user space since the remote device could be
5482		 * legitimate or malicious.
5483		 */
5484		if (hci_find_link_key(hdev, &ev->bdaddr)) {
5485			bt_dev_dbg(hdev, "Local host already has link key");
5486			confirm_hint = 1;
5487			goto confirm;
5488		}
5489
5490		BT_DBG("Auto-accept of user confirmation with %ums delay",
5491		       hdev->auto_accept_delay);
5492
5493		if (hdev->auto_accept_delay > 0) {
5494			int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5495			queue_delayed_work(conn->hdev->workqueue,
5496					   &conn->auto_accept_work, delay);
5497			goto unlock;
5498		}
5499
5500		hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5501			     sizeof(ev->bdaddr), &ev->bdaddr);
5502		goto unlock;
5503	}
5504
5505confirm:
5506	mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5507				  le32_to_cpu(ev->passkey), confirm_hint);
5508
5509unlock:
5510	hci_dev_unlock(hdev);
5511}
5512
5513static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5514					 struct sk_buff *skb)
5515{
5516	struct hci_ev_user_passkey_req *ev = data;
5517
5518	bt_dev_dbg(hdev, "");
5519
5520	if (hci_dev_test_flag(hdev, HCI_MGMT))
5521		mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5522}
5523
5524static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5525					struct sk_buff *skb)
5526{
5527	struct hci_ev_user_passkey_notify *ev = data;
5528	struct hci_conn *conn;
5529
5530	bt_dev_dbg(hdev, "");
5531
5532	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5533	if (!conn)
5534		return;
5535
5536	conn->passkey_notify = __le32_to_cpu(ev->passkey);
5537	conn->passkey_entered = 0;
5538
5539	if (hci_dev_test_flag(hdev, HCI_MGMT))
5540		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5541					 conn->dst_type, conn->passkey_notify,
5542					 conn->passkey_entered);
5543}
5544
5545static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5546				    struct sk_buff *skb)
5547{
5548	struct hci_ev_keypress_notify *ev = data;
5549	struct hci_conn *conn;
5550
5551	bt_dev_dbg(hdev, "");
5552
5553	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5554	if (!conn)
5555		return;
5556
5557	switch (ev->type) {
5558	case HCI_KEYPRESS_STARTED:
5559		conn->passkey_entered = 0;
5560		return;
5561
5562	case HCI_KEYPRESS_ENTERED:
5563		conn->passkey_entered++;
5564		break;
5565
5566	case HCI_KEYPRESS_ERASED:
5567		conn->passkey_entered--;
5568		break;
5569
5570	case HCI_KEYPRESS_CLEARED:
5571		conn->passkey_entered = 0;
5572		break;
5573
5574	case HCI_KEYPRESS_COMPLETED:
5575		return;
5576	}
5577
5578	if (hci_dev_test_flag(hdev, HCI_MGMT))
5579		mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5580					 conn->dst_type, conn->passkey_notify,
5581					 conn->passkey_entered);
5582}
5583
5584static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5585					 struct sk_buff *skb)
5586{
5587	struct hci_ev_simple_pair_complete *ev = data;
5588	struct hci_conn *conn;
5589
5590	bt_dev_dbg(hdev, "");
5591
5592	hci_dev_lock(hdev);
5593
5594	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5595	if (!conn || !hci_conn_ssp_enabled(conn))
5596		goto unlock;
5597
5598	/* Reset the authentication requirement to unknown */
5599	conn->remote_auth = 0xff;
5600
5601	/* To avoid duplicate auth_failed events to user space we check
5602	 * the HCI_CONN_AUTH_PEND flag which will be set if we
5603	 * initiated the authentication. A traditional auth_complete
5604	 * event gets always produced as initiator and is also mapped to
5605	 * the mgmt_auth_failed event */
5606	if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5607		mgmt_auth_failed(conn, ev->status);
5608
5609	hci_conn_drop(conn);
5610
5611unlock:
5612	hci_dev_unlock(hdev);
5613}
5614
5615static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5616					 struct sk_buff *skb)
5617{
5618	struct hci_ev_remote_host_features *ev = data;
5619	struct inquiry_entry *ie;
5620	struct hci_conn *conn;
5621
5622	bt_dev_dbg(hdev, "");
5623
5624	hci_dev_lock(hdev);
5625
5626	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5627	if (conn)
5628		memcpy(conn->features[1], ev->features, 8);
5629
5630	ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5631	if (ie)
5632		ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5633
5634	hci_dev_unlock(hdev);
5635}
5636
5637static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5638					    struct sk_buff *skb)
5639{
5640	struct hci_ev_remote_oob_data_request *ev = edata;
5641	struct oob_data *data;
5642
5643	bt_dev_dbg(hdev, "");
5644
5645	hci_dev_lock(hdev);
5646
5647	if (!hci_dev_test_flag(hdev, HCI_MGMT))
5648		goto unlock;
5649
5650	data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5651	if (!data) {
5652		struct hci_cp_remote_oob_data_neg_reply cp;
5653
5654		bacpy(&cp.bdaddr, &ev->bdaddr);
5655		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5656			     sizeof(cp), &cp);
5657		goto unlock;
5658	}
5659
5660	if (bredr_sc_enabled(hdev)) {
5661		struct hci_cp_remote_oob_ext_data_reply cp;
5662
5663		bacpy(&cp.bdaddr, &ev->bdaddr);
5664		if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5665			memset(cp.hash192, 0, sizeof(cp.hash192));
5666			memset(cp.rand192, 0, sizeof(cp.rand192));
5667		} else {
5668			memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5669			memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5670		}
5671		memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5672		memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5673
5674		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5675			     sizeof(cp), &cp);
5676	} else {
5677		struct hci_cp_remote_oob_data_reply cp;
5678
5679		bacpy(&cp.bdaddr, &ev->bdaddr);
5680		memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5681		memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5682
5683		hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5684			     sizeof(cp), &cp);
5685	}
5686
5687unlock:
5688	hci_dev_unlock(hdev);
5689}
5690
5691static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5692				u8 bdaddr_type, bdaddr_t *local_rpa)
5693{
5694	if (conn->out) {
5695		conn->dst_type = bdaddr_type;
5696		conn->resp_addr_type = bdaddr_type;
5697		bacpy(&conn->resp_addr, bdaddr);
5698
5699		/* Check if the controller has set a Local RPA then it must be
5700		 * used instead or hdev->rpa.
5701		 */
5702		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5703			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5704			bacpy(&conn->init_addr, local_rpa);
5705		} else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5706			conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5707			bacpy(&conn->init_addr, &conn->hdev->rpa);
5708		} else {
5709			hci_copy_identity_address(conn->hdev, &conn->init_addr,
5710						  &conn->init_addr_type);
5711		}
5712	} else {
5713		conn->resp_addr_type = conn->hdev->adv_addr_type;
5714		/* Check if the controller has set a Local RPA then it must be
5715		 * used instead or hdev->rpa.
5716		 */
5717		if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5718			conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5719			bacpy(&conn->resp_addr, local_rpa);
5720		} else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5721			/* In case of ext adv, resp_addr will be updated in
5722			 * Adv Terminated event.
5723			 */
5724			if (!ext_adv_capable(conn->hdev))
5725				bacpy(&conn->resp_addr,
5726				      &conn->hdev->random_addr);
5727		} else {
5728			bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5729		}
5730
5731		conn->init_addr_type = bdaddr_type;
5732		bacpy(&conn->init_addr, bdaddr);
5733
5734		/* For incoming connections, set the default minimum
5735		 * and maximum connection interval. They will be used
5736		 * to check if the parameters are in range and if not
5737		 * trigger the connection update procedure.
5738		 */
5739		conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5740		conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5741	}
5742}
5743
5744static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5745				 bdaddr_t *bdaddr, u8 bdaddr_type,
5746				 bdaddr_t *local_rpa, u8 role, u16 handle,
5747				 u16 interval, u16 latency,
5748				 u16 supervision_timeout)
5749{
5750	struct hci_conn_params *params;
5751	struct hci_conn *conn;
5752	struct smp_irk *irk;
5753	u8 addr_type;
5754
5755	hci_dev_lock(hdev);
5756
5757	/* All controllers implicitly stop advertising in the event of a
5758	 * connection, so ensure that the state bit is cleared.
5759	 */
5760	hci_dev_clear_flag(hdev, HCI_LE_ADV);
5761
5762	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5763	if (!conn) {
5764		/* In case of error status and there is no connection pending
5765		 * just unlock as there is nothing to cleanup.
5766		 */
5767		if (status)
5768			goto unlock;
5769
5770		conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role);
5771		if (!conn) {
5772			bt_dev_err(hdev, "no memory for new connection");
5773			goto unlock;
5774		}
5775
5776		conn->dst_type = bdaddr_type;
5777
5778		/* If we didn't have a hci_conn object previously
5779		 * but we're in central role this must be something
5780		 * initiated using an accept list. Since accept list based
5781		 * connections are not "first class citizens" we don't
5782		 * have full tracking of them. Therefore, we go ahead
5783		 * with a "best effort" approach of determining the
5784		 * initiator address based on the HCI_PRIVACY flag.
5785		 */
5786		if (conn->out) {
5787			conn->resp_addr_type = bdaddr_type;
5788			bacpy(&conn->resp_addr, bdaddr);
5789			if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5790				conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5791				bacpy(&conn->init_addr, &hdev->rpa);
5792			} else {
5793				hci_copy_identity_address(hdev,
5794							  &conn->init_addr,
5795							  &conn->init_addr_type);
5796			}
5797		}
5798	} else {
5799		cancel_delayed_work(&conn->le_conn_timeout);
5800	}
5801
5802	/* The HCI_LE_Connection_Complete event is only sent once per connection.
5803	 * Processing it more than once per connection can corrupt kernel memory.
5804	 *
5805	 * As the connection handle is set here for the first time, it indicates
5806	 * whether the connection is already set up.
5807	 */
5808	if (!HCI_CONN_HANDLE_UNSET(conn->handle)) {
5809		bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5810		goto unlock;
5811	}
5812
5813	le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5814
5815	/* Lookup the identity address from the stored connection
5816	 * address and address type.
5817	 *
5818	 * When establishing connections to an identity address, the
5819	 * connection procedure will store the resolvable random
5820	 * address first. Now if it can be converted back into the
5821	 * identity address, start using the identity address from
5822	 * now on.
5823	 */
5824	irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5825	if (irk) {
5826		bacpy(&conn->dst, &irk->bdaddr);
5827		conn->dst_type = irk->addr_type;
5828	}
5829
5830	conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5831
5832	/* All connection failure handling is taken care of by the
5833	 * hci_conn_failed function which is triggered by the HCI
5834	 * request completion callbacks used for connecting.
5835	 */
5836	if (status || hci_conn_set_handle(conn, handle))
5837		goto unlock;
5838
5839	/* Drop the connection if it has been aborted */
5840	if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5841		hci_conn_drop(conn);
5842		goto unlock;
5843	}
5844
5845	if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5846		addr_type = BDADDR_LE_PUBLIC;
5847	else
5848		addr_type = BDADDR_LE_RANDOM;
5849
5850	/* Drop the connection if the device is blocked */
5851	if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5852		hci_conn_drop(conn);
5853		goto unlock;
5854	}
5855
5856	mgmt_device_connected(hdev, conn, NULL, 0);
5857
5858	conn->sec_level = BT_SECURITY_LOW;
5859	conn->state = BT_CONFIG;
5860
5861	/* Store current advertising instance as connection advertising instance
5862	 * when sotfware rotation is in use so it can be re-enabled when
5863	 * disconnected.
5864	 */
5865	if (!ext_adv_capable(hdev))
5866		conn->adv_instance = hdev->cur_adv_instance;
5867
5868	conn->le_conn_interval = interval;
5869	conn->le_conn_latency = latency;
5870	conn->le_supv_timeout = supervision_timeout;
5871
5872	hci_debugfs_create_conn(conn);
5873	hci_conn_add_sysfs(conn);
5874
5875	/* The remote features procedure is defined for central
5876	 * role only. So only in case of an initiated connection
5877	 * request the remote features.
5878	 *
5879	 * If the local controller supports peripheral-initiated features
5880	 * exchange, then requesting the remote features in peripheral
5881	 * role is possible. Otherwise just transition into the
5882	 * connected state without requesting the remote features.
5883	 */
5884	if (conn->out ||
5885	    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5886		struct hci_cp_le_read_remote_features cp;
5887
5888		cp.handle = __cpu_to_le16(conn->handle);
5889
5890		hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5891			     sizeof(cp), &cp);
5892
5893		hci_conn_hold(conn);
5894	} else {
5895		conn->state = BT_CONNECTED;
5896		hci_connect_cfm(conn, status);
5897	}
5898
5899	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5900					   conn->dst_type);
5901	if (params) {
5902		hci_pend_le_list_del_init(params);
5903		if (params->conn) {
5904			hci_conn_drop(params->conn);
5905			hci_conn_put(params->conn);
5906			params->conn = NULL;
5907		}
5908	}
5909
5910unlock:
5911	hci_update_passive_scan(hdev);
5912	hci_dev_unlock(hdev);
5913}
5914
5915static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5916				     struct sk_buff *skb)
5917{
5918	struct hci_ev_le_conn_complete *ev = data;
5919
5920	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5921
5922	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5923			     NULL, ev->role, le16_to_cpu(ev->handle),
5924			     le16_to_cpu(ev->interval),
5925			     le16_to_cpu(ev->latency),
5926			     le16_to_cpu(ev->supervision_timeout));
5927}
5928
5929static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5930					 struct sk_buff *skb)
5931{
5932	struct hci_ev_le_enh_conn_complete *ev = data;
5933
5934	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5935
5936	le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5937			     &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5938			     le16_to_cpu(ev->interval),
5939			     le16_to_cpu(ev->latency),
5940			     le16_to_cpu(ev->supervision_timeout));
5941}
5942
5943static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5944				    struct sk_buff *skb)
5945{
5946	struct hci_evt_le_ext_adv_set_term *ev = data;
5947	struct hci_conn *conn;
5948	struct adv_info *adv, *n;
5949
5950	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5951
5952	/* The Bluetooth Core 5.3 specification clearly states that this event
5953	 * shall not be sent when the Host disables the advertising set. So in
5954	 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
5955	 *
5956	 * When the Host disables an advertising set, all cleanup is done via
5957	 * its command callback and not needed to be duplicated here.
5958	 */
5959	if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
5960		bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
5961		return;
5962	}
5963
5964	hci_dev_lock(hdev);
5965
5966	adv = hci_find_adv_instance(hdev, ev->handle);
5967
5968	if (ev->status) {
5969		if (!adv)
5970			goto unlock;
5971
5972		/* Remove advertising as it has been terminated */
5973		hci_remove_adv_instance(hdev, ev->handle);
5974		mgmt_advertising_removed(NULL, hdev, ev->handle);
5975
5976		list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
5977			if (adv->enabled)
5978				goto unlock;
5979		}
5980
5981		/* We are no longer advertising, clear HCI_LE_ADV */
5982		hci_dev_clear_flag(hdev, HCI_LE_ADV);
5983		goto unlock;
5984	}
5985
5986	if (adv)
5987		adv->enabled = false;
5988
5989	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5990	if (conn) {
5991		/* Store handle in the connection so the correct advertising
5992		 * instance can be re-enabled when disconnected.
5993		 */
5994		conn->adv_instance = ev->handle;
5995
5996		if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5997		    bacmp(&conn->resp_addr, BDADDR_ANY))
5998			goto unlock;
5999
6000		if (!ev->handle) {
6001			bacpy(&conn->resp_addr, &hdev->random_addr);
6002			goto unlock;
6003		}
6004
6005		if (adv)
6006			bacpy(&conn->resp_addr, &adv->random_addr);
6007	}
6008
6009unlock:
6010	hci_dev_unlock(hdev);
6011}
6012
6013static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6014					    struct sk_buff *skb)
6015{
6016	struct hci_ev_le_conn_update_complete *ev = data;
6017	struct hci_conn *conn;
6018
6019	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6020
6021	if (ev->status)
6022		return;
6023
6024	hci_dev_lock(hdev);
6025
6026	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6027	if (conn) {
6028		conn->le_conn_interval = le16_to_cpu(ev->interval);
6029		conn->le_conn_latency = le16_to_cpu(ev->latency);
6030		conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6031	}
6032
6033	hci_dev_unlock(hdev);
6034}
6035
6036/* This function requires the caller holds hdev->lock */
6037static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6038					      bdaddr_t *addr,
6039					      u8 addr_type, bool addr_resolved,
6040					      u8 adv_type, u8 phy, u8 sec_phy)
6041{
6042	struct hci_conn *conn;
6043	struct hci_conn_params *params;
6044
6045	/* If the event is not connectable don't proceed further */
6046	if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6047		return NULL;
6048
6049	/* Ignore if the device is blocked or hdev is suspended */
6050	if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6051	    hdev->suspended)
6052		return NULL;
6053
6054	/* Most controller will fail if we try to create new connections
6055	 * while we have an existing one in peripheral role.
6056	 */
6057	if (hdev->conn_hash.le_num_peripheral > 0 &&
6058	    (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6059	     !(hdev->le_states[3] & 0x10)))
6060		return NULL;
6061
6062	/* If we're not connectable only connect devices that we have in
6063	 * our pend_le_conns list.
6064	 */
6065	params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6066					   addr_type);
6067	if (!params)
6068		return NULL;
6069
6070	if (!params->explicit_connect) {
6071		switch (params->auto_connect) {
6072		case HCI_AUTO_CONN_DIRECT:
6073			/* Only devices advertising with ADV_DIRECT_IND are
6074			 * triggering a connection attempt. This is allowing
6075			 * incoming connections from peripheral devices.
6076			 */
6077			if (adv_type != LE_ADV_DIRECT_IND)
6078				return NULL;
6079			break;
6080		case HCI_AUTO_CONN_ALWAYS:
6081			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
6082			 * are triggering a connection attempt. This means
6083			 * that incoming connections from peripheral device are
6084			 * accepted and also outgoing connections to peripheral
6085			 * devices are established when found.
6086			 */
6087			break;
6088		default:
6089			return NULL;
6090		}
6091	}
6092
6093	conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6094			      BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6095			      HCI_ROLE_MASTER, phy, sec_phy);
6096	if (!IS_ERR(conn)) {
6097		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6098		 * by higher layer that tried to connect, if no then
6099		 * store the pointer since we don't really have any
6100		 * other owner of the object besides the params that
6101		 * triggered it. This way we can abort the connection if
6102		 * the parameters get removed and keep the reference
6103		 * count consistent once the connection is established.
6104		 */
6105
6106		if (!params->explicit_connect)
6107			params->conn = hci_conn_get(conn);
6108
6109		return conn;
6110	}
6111
6112	switch (PTR_ERR(conn)) {
6113	case -EBUSY:
6114		/* If hci_connect() returns -EBUSY it means there is already
6115		 * an LE connection attempt going on. Since controllers don't
6116		 * support more than one connection attempt at the time, we
6117		 * don't consider this an error case.
6118		 */
6119		break;
6120	default:
6121		BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6122		return NULL;
6123	}
6124
6125	return NULL;
6126}
6127
6128static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6129			       u8 bdaddr_type, bdaddr_t *direct_addr,
6130			       u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
6131			       u8 *data, u8 len, bool ext_adv, bool ctl_time,
6132			       u64 instant)
6133{
6134	struct discovery_state *d = &hdev->discovery;
6135	struct smp_irk *irk;
6136	struct hci_conn *conn;
6137	bool match, bdaddr_resolved;
6138	u32 flags;
6139	u8 *ptr;
6140
6141	switch (type) {
6142	case LE_ADV_IND:
6143	case LE_ADV_DIRECT_IND:
6144	case LE_ADV_SCAN_IND:
6145	case LE_ADV_NONCONN_IND:
6146	case LE_ADV_SCAN_RSP:
6147		break;
6148	default:
6149		bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6150				       "type: 0x%02x", type);
6151		return;
6152	}
6153
6154	if (len > max_adv_len(hdev)) {
6155		bt_dev_err_ratelimited(hdev,
6156				       "adv larger than maximum supported");
6157		return;
6158	}
6159
6160	/* Find the end of the data in case the report contains padded zero
6161	 * bytes at the end causing an invalid length value.
6162	 *
6163	 * When data is NULL, len is 0 so there is no need for extra ptr
6164	 * check as 'ptr < data + 0' is already false in such case.
6165	 */
6166	for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6167		if (ptr + 1 + *ptr > data + len)
6168			break;
6169	}
6170
6171	/* Adjust for actual length. This handles the case when remote
6172	 * device is advertising with incorrect data length.
6173	 */
6174	len = ptr - data;
6175
6176	/* If the direct address is present, then this report is from
6177	 * a LE Direct Advertising Report event. In that case it is
6178	 * important to see if the address is matching the local
6179	 * controller address.
6180	 */
6181	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6182		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6183						  &bdaddr_resolved);
6184
6185		/* Only resolvable random addresses are valid for these
6186		 * kind of reports and others can be ignored.
6187		 */
6188		if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6189			return;
6190
6191		/* If the controller is not using resolvable random
6192		 * addresses, then this report can be ignored.
6193		 */
6194		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6195			return;
6196
6197		/* If the local IRK of the controller does not match
6198		 * with the resolvable random address provided, then
6199		 * this report can be ignored.
6200		 */
6201		if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6202			return;
6203	}
6204
6205	/* Check if we need to convert to identity address */
6206	irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6207	if (irk) {
6208		bdaddr = &irk->bdaddr;
6209		bdaddr_type = irk->addr_type;
6210	}
6211
6212	bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6213
6214	/* Check if we have been requested to connect to this device.
6215	 *
6216	 * direct_addr is set only for directed advertising reports (it is NULL
6217	 * for advertising reports) and is already verified to be RPA above.
6218	 */
6219	conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6220				     type, phy, sec_phy);
6221	if (!ext_adv && conn && type == LE_ADV_IND &&
6222	    len <= max_adv_len(hdev)) {
6223		/* Store report for later inclusion by
6224		 * mgmt_device_connected
6225		 */
6226		memcpy(conn->le_adv_data, data, len);
6227		conn->le_adv_data_len = len;
6228	}
6229
6230	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6231		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6232	else
6233		flags = 0;
6234
6235	/* All scan results should be sent up for Mesh systems */
6236	if (hci_dev_test_flag(hdev, HCI_MESH)) {
6237		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6238				  rssi, flags, data, len, NULL, 0, instant);
6239		return;
6240	}
6241
6242	/* Passive scanning shouldn't trigger any device found events,
6243	 * except for devices marked as CONN_REPORT for which we do send
6244	 * device found events, or advertisement monitoring requested.
6245	 */
6246	if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6247		if (type == LE_ADV_DIRECT_IND)
6248			return;
6249
6250		if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6251					       bdaddr, bdaddr_type) &&
6252		    idr_is_empty(&hdev->adv_monitors_idr))
6253			return;
6254
6255		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6256				  rssi, flags, data, len, NULL, 0, 0);
6257		return;
6258	}
6259
6260	/* When receiving a scan response, then there is no way to
6261	 * know if the remote device is connectable or not. However
6262	 * since scan responses are merged with a previously seen
6263	 * advertising report, the flags field from that report
6264	 * will be used.
6265	 *
6266	 * In the unlikely case that a controller just sends a scan
6267	 * response event that doesn't match the pending report, then
6268	 * it is marked as a standalone SCAN_RSP.
6269	 */
6270	if (type == LE_ADV_SCAN_RSP)
6271		flags = MGMT_DEV_FOUND_SCAN_RSP;
6272
6273	/* If there's nothing pending either store the data from this
6274	 * event or send an immediate device found event if the data
6275	 * should not be stored for later.
6276	 */
6277	if (!ext_adv &&	!has_pending_adv_report(hdev)) {
6278		/* If the report will trigger a SCAN_REQ store it for
6279		 * later merging.
6280		 */
6281		if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6282			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6283						 rssi, flags, data, len);
6284			return;
6285		}
6286
6287		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6288				  rssi, flags, data, len, NULL, 0, 0);
6289		return;
6290	}
6291
6292	/* Check if the pending report is for the same device as the new one */
6293	match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6294		 bdaddr_type == d->last_adv_addr_type);
6295
6296	/* If the pending data doesn't match this report or this isn't a
6297	 * scan response (e.g. we got a duplicate ADV_IND) then force
6298	 * sending of the pending data.
6299	 */
6300	if (type != LE_ADV_SCAN_RSP || !match) {
6301		/* Send out whatever is in the cache, but skip duplicates */
6302		if (!match)
6303			mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6304					  d->last_adv_addr_type, NULL,
6305					  d->last_adv_rssi, d->last_adv_flags,
6306					  d->last_adv_data,
6307					  d->last_adv_data_len, NULL, 0, 0);
6308
6309		/* If the new report will trigger a SCAN_REQ store it for
6310		 * later merging.
6311		 */
6312		if (!ext_adv && (type == LE_ADV_IND ||
6313				 type == LE_ADV_SCAN_IND)) {
6314			store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6315						 rssi, flags, data, len);
6316			return;
6317		}
6318
6319		/* The advertising reports cannot be merged, so clear
6320		 * the pending report and send out a device found event.
6321		 */
6322		clear_pending_adv_report(hdev);
6323		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6324				  rssi, flags, data, len, NULL, 0, 0);
6325		return;
6326	}
6327
6328	/* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6329	 * the new event is a SCAN_RSP. We can therefore proceed with
6330	 * sending a merged device found event.
6331	 */
6332	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6333			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6334			  d->last_adv_data, d->last_adv_data_len, data, len, 0);
6335	clear_pending_adv_report(hdev);
6336}
6337
6338static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6339				  struct sk_buff *skb)
6340{
6341	struct hci_ev_le_advertising_report *ev = data;
6342	u64 instant = jiffies;
6343
6344	if (!ev->num)
6345		return;
6346
6347	hci_dev_lock(hdev);
6348
6349	while (ev->num--) {
6350		struct hci_ev_le_advertising_info *info;
6351		s8 rssi;
6352
6353		info = hci_le_ev_skb_pull(hdev, skb,
6354					  HCI_EV_LE_ADVERTISING_REPORT,
6355					  sizeof(*info));
6356		if (!info)
6357			break;
6358
6359		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6360					info->length + 1))
6361			break;
6362
6363		if (info->length <= max_adv_len(hdev)) {
6364			rssi = info->data[info->length];
6365			process_adv_report(hdev, info->type, &info->bdaddr,
6366					   info->bdaddr_type, NULL, 0,
6367					   HCI_ADV_PHY_1M, 0, rssi,
6368					   info->data, info->length, false,
6369					   false, instant);
6370		} else {
6371			bt_dev_err(hdev, "Dropping invalid advertising data");
6372		}
6373	}
6374
6375	hci_dev_unlock(hdev);
6376}
6377
6378static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6379{
6380	if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6381		switch (evt_type) {
6382		case LE_LEGACY_ADV_IND:
6383			return LE_ADV_IND;
6384		case LE_LEGACY_ADV_DIRECT_IND:
6385			return LE_ADV_DIRECT_IND;
6386		case LE_LEGACY_ADV_SCAN_IND:
6387			return LE_ADV_SCAN_IND;
6388		case LE_LEGACY_NONCONN_IND:
6389			return LE_ADV_NONCONN_IND;
6390		case LE_LEGACY_SCAN_RSP_ADV:
6391		case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6392			return LE_ADV_SCAN_RSP;
6393		}
6394
6395		goto invalid;
6396	}
6397
6398	if (evt_type & LE_EXT_ADV_CONN_IND) {
6399		if (evt_type & LE_EXT_ADV_DIRECT_IND)
6400			return LE_ADV_DIRECT_IND;
6401
6402		return LE_ADV_IND;
6403	}
6404
6405	if (evt_type & LE_EXT_ADV_SCAN_RSP)
6406		return LE_ADV_SCAN_RSP;
6407
6408	if (evt_type & LE_EXT_ADV_SCAN_IND)
6409		return LE_ADV_SCAN_IND;
6410
6411	if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6412	    evt_type & LE_EXT_ADV_DIRECT_IND)
6413		return LE_ADV_NONCONN_IND;
6414
6415invalid:
6416	bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6417			       evt_type);
6418
6419	return LE_ADV_INVALID;
6420}
6421
6422static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6423				      struct sk_buff *skb)
6424{
6425	struct hci_ev_le_ext_adv_report *ev = data;
6426	u64 instant = jiffies;
6427
6428	if (!ev->num)
6429		return;
6430
6431	hci_dev_lock(hdev);
6432
6433	while (ev->num--) {
6434		struct hci_ev_le_ext_adv_info *info;
6435		u8 legacy_evt_type;
6436		u16 evt_type;
6437
6438		info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6439					  sizeof(*info));
6440		if (!info)
6441			break;
6442
6443		if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6444					info->length))
6445			break;
6446
6447		evt_type = __le16_to_cpu(info->type) & LE_EXT_ADV_EVT_TYPE_MASK;
6448		legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6449		if (legacy_evt_type != LE_ADV_INVALID) {
6450			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6451					   info->bdaddr_type, NULL, 0,
6452					   info->primary_phy,
6453					   info->secondary_phy,
6454					   info->rssi, info->data, info->length,
6455					   !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6456					   false, instant);
6457		}
6458	}
6459
6460	hci_dev_unlock(hdev);
6461}
6462
6463static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6464{
6465	struct hci_cp_le_pa_term_sync cp;
6466
6467	memset(&cp, 0, sizeof(cp));
6468	cp.handle = handle;
6469
6470	return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6471}
6472
6473static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6474					    struct sk_buff *skb)
6475{
6476	struct hci_ev_le_pa_sync_established *ev = data;
6477	int mask = hdev->link_mode;
6478	__u8 flags = 0;
6479	struct hci_conn *pa_sync;
6480
6481	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6482
6483	hci_dev_lock(hdev);
6484
6485	hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6486
6487	mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6488	if (!(mask & HCI_LM_ACCEPT)) {
6489		hci_le_pa_term_sync(hdev, ev->handle);
6490		goto unlock;
6491	}
6492
6493	if (!(flags & HCI_PROTO_DEFER))
6494		goto unlock;
6495
6496	if (ev->status) {
6497		/* Add connection to indicate the failed PA sync event */
6498		pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
6499					     HCI_ROLE_SLAVE);
6500
6501		if (!pa_sync)
6502			goto unlock;
6503
6504		set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
6505
6506		/* Notify iso layer */
6507		hci_connect_cfm(pa_sync, ev->status);
6508	}
6509
6510unlock:
6511	hci_dev_unlock(hdev);
6512}
6513
6514static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
6515				      struct sk_buff *skb)
6516{
6517	struct hci_ev_le_per_adv_report *ev = data;
6518	int mask = hdev->link_mode;
6519	__u8 flags = 0;
6520
6521	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
6522
6523	hci_dev_lock(hdev);
6524
6525	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
6526	if (!(mask & HCI_LM_ACCEPT))
6527		hci_le_pa_term_sync(hdev, ev->sync_handle);
6528
6529	hci_dev_unlock(hdev);
6530}
6531
6532static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6533					    struct sk_buff *skb)
6534{
6535	struct hci_ev_le_remote_feat_complete *ev = data;
6536	struct hci_conn *conn;
6537
6538	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6539
6540	hci_dev_lock(hdev);
6541
6542	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6543	if (conn) {
6544		if (!ev->status)
6545			memcpy(conn->features[0], ev->features, 8);
6546
6547		if (conn->state == BT_CONFIG) {
6548			__u8 status;
6549
6550			/* If the local controller supports peripheral-initiated
6551			 * features exchange, but the remote controller does
6552			 * not, then it is possible that the error code 0x1a
6553			 * for unsupported remote feature gets returned.
6554			 *
6555			 * In this specific case, allow the connection to
6556			 * transition into connected state and mark it as
6557			 * successful.
6558			 */
6559			if (!conn->out && ev->status == HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE &&
6560			    (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6561				status = 0x00;
6562			else
6563				status = ev->status;
6564
6565			conn->state = BT_CONNECTED;
6566			hci_connect_cfm(conn, status);
6567			hci_conn_drop(conn);
6568		}
6569	}
6570
6571	hci_dev_unlock(hdev);
6572}
6573
6574static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6575				   struct sk_buff *skb)
6576{
6577	struct hci_ev_le_ltk_req *ev = data;
6578	struct hci_cp_le_ltk_reply cp;
6579	struct hci_cp_le_ltk_neg_reply neg;
6580	struct hci_conn *conn;
6581	struct smp_ltk *ltk;
6582
6583	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6584
6585	hci_dev_lock(hdev);
6586
6587	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6588	if (conn == NULL)
6589		goto not_found;
6590
6591	ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6592	if (!ltk)
6593		goto not_found;
6594
6595	if (smp_ltk_is_sc(ltk)) {
6596		/* With SC both EDiv and Rand are set to zero */
6597		if (ev->ediv || ev->rand)
6598			goto not_found;
6599	} else {
6600		/* For non-SC keys check that EDiv and Rand match */
6601		if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6602			goto not_found;
6603	}
6604
6605	memcpy(cp.ltk, ltk->val, ltk->enc_size);
6606	memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6607	cp.handle = cpu_to_le16(conn->handle);
6608
6609	conn->pending_sec_level = smp_ltk_sec_level(ltk);
6610
6611	conn->enc_key_size = ltk->enc_size;
6612
6613	hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6614
6615	/* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6616	 * temporary key used to encrypt a connection following
6617	 * pairing. It is used during the Encrypted Session Setup to
6618	 * distribute the keys. Later, security can be re-established
6619	 * using a distributed LTK.
6620	 */
6621	if (ltk->type == SMP_STK) {
6622		set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6623		list_del_rcu(&ltk->list);
6624		kfree_rcu(ltk, rcu);
6625	} else {
6626		clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6627	}
6628
6629	hci_dev_unlock(hdev);
6630
6631	return;
6632
6633not_found:
6634	neg.handle = ev->handle;
6635	hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6636	hci_dev_unlock(hdev);
6637}
6638
6639static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6640				      u8 reason)
6641{
6642	struct hci_cp_le_conn_param_req_neg_reply cp;
6643
6644	cp.handle = cpu_to_le16(handle);
6645	cp.reason = reason;
6646
6647	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6648		     &cp);
6649}
6650
6651static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6652					     struct sk_buff *skb)
6653{
6654	struct hci_ev_le_remote_conn_param_req *ev = data;
6655	struct hci_cp_le_conn_param_req_reply cp;
6656	struct hci_conn *hcon;
6657	u16 handle, min, max, latency, timeout;
6658
6659	bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6660
6661	handle = le16_to_cpu(ev->handle);
6662	min = le16_to_cpu(ev->interval_min);
6663	max = le16_to_cpu(ev->interval_max);
6664	latency = le16_to_cpu(ev->latency);
6665	timeout = le16_to_cpu(ev->timeout);
6666
6667	hcon = hci_conn_hash_lookup_handle(hdev, handle);
6668	if (!hcon || hcon->state != BT_CONNECTED)
6669		return send_conn_param_neg_reply(hdev, handle,
6670						 HCI_ERROR_UNKNOWN_CONN_ID);
6671
6672	if (max > hcon->le_conn_max_interval)
6673		return send_conn_param_neg_reply(hdev, handle,
6674						 HCI_ERROR_INVALID_LL_PARAMS);
6675
6676	if (hci_check_conn_params(min, max, latency, timeout))
6677		return send_conn_param_neg_reply(hdev, handle,
6678						 HCI_ERROR_INVALID_LL_PARAMS);
6679
6680	if (hcon->role == HCI_ROLE_MASTER) {
6681		struct hci_conn_params *params;
6682		u8 store_hint;
6683
6684		hci_dev_lock(hdev);
6685
6686		params = hci_conn_params_lookup(hdev, &hcon->dst,
6687						hcon->dst_type);
6688		if (params) {
6689			params->conn_min_interval = min;
6690			params->conn_max_interval = max;
6691			params->conn_latency = latency;
6692			params->supervision_timeout = timeout;
6693			store_hint = 0x01;
6694		} else {
6695			store_hint = 0x00;
6696		}
6697
6698		hci_dev_unlock(hdev);
6699
6700		mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6701				    store_hint, min, max, latency, timeout);
6702	}
6703
6704	cp.handle = ev->handle;
6705	cp.interval_min = ev->interval_min;
6706	cp.interval_max = ev->interval_max;
6707	cp.latency = ev->latency;
6708	cp.timeout = ev->timeout;
6709	cp.min_ce_len = 0;
6710	cp.max_ce_len = 0;
6711
6712	hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6713}
6714
6715static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6716					 struct sk_buff *skb)
6717{
6718	struct hci_ev_le_direct_adv_report *ev = data;
6719	u64 instant = jiffies;
6720	int i;
6721
6722	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6723				flex_array_size(ev, info, ev->num)))
6724		return;
6725
6726	if (!ev->num)
6727		return;
6728
6729	hci_dev_lock(hdev);
6730
6731	for (i = 0; i < ev->num; i++) {
6732		struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6733
6734		process_adv_report(hdev, info->type, &info->bdaddr,
6735				   info->bdaddr_type, &info->direct_addr,
6736				   info->direct_addr_type, HCI_ADV_PHY_1M, 0,
6737				   info->rssi, NULL, 0, false, false, instant);
6738	}
6739
6740	hci_dev_unlock(hdev);
6741}
6742
6743static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6744				  struct sk_buff *skb)
6745{
6746	struct hci_ev_le_phy_update_complete *ev = data;
6747	struct hci_conn *conn;
6748
6749	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6750
6751	if (ev->status)
6752		return;
6753
6754	hci_dev_lock(hdev);
6755
6756	conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6757	if (!conn)
6758		goto unlock;
6759
6760	conn->le_tx_phy = ev->tx_phy;
6761	conn->le_rx_phy = ev->rx_phy;
6762
6763unlock:
6764	hci_dev_unlock(hdev);
6765}
6766
6767static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6768					struct sk_buff *skb)
6769{
6770	struct hci_evt_le_cis_established *ev = data;
6771	struct hci_conn *conn;
6772	struct bt_iso_qos *qos;
6773	bool pending = false;
6774	u16 handle = __le16_to_cpu(ev->handle);
6775
6776	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6777
6778	hci_dev_lock(hdev);
6779
6780	conn = hci_conn_hash_lookup_handle(hdev, handle);
6781	if (!conn) {
6782		bt_dev_err(hdev,
6783			   "Unable to find connection with handle 0x%4.4x",
6784			   handle);
6785		goto unlock;
6786	}
6787
6788	if (conn->type != ISO_LINK) {
6789		bt_dev_err(hdev,
6790			   "Invalid connection link type handle 0x%4.4x",
6791			   handle);
6792		goto unlock;
6793	}
6794
6795	qos = &conn->iso_qos;
6796
6797	pending = test_and_clear_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6798
6799	/* Convert ISO Interval (1.25 ms slots) to SDU Interval (us) */
6800	qos->ucast.in.interval = le16_to_cpu(ev->interval) * 1250;
6801	qos->ucast.out.interval = qos->ucast.in.interval;
6802
6803	switch (conn->role) {
6804	case HCI_ROLE_SLAVE:
6805		/* Convert Transport Latency (us) to Latency (msec) */
6806		qos->ucast.in.latency =
6807			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6808					  1000);
6809		qos->ucast.out.latency =
6810			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6811					  1000);
6812		qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
6813		qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
6814		qos->ucast.in.phy = ev->c_phy;
6815		qos->ucast.out.phy = ev->p_phy;
6816		break;
6817	case HCI_ROLE_MASTER:
6818		/* Convert Transport Latency (us) to Latency (msec) */
6819		qos->ucast.out.latency =
6820			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->c_latency),
6821					  1000);
6822		qos->ucast.in.latency =
6823			DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
6824					  1000);
6825		qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
6826		qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
6827		qos->ucast.out.phy = ev->c_phy;
6828		qos->ucast.in.phy = ev->p_phy;
6829		break;
6830	}
6831
6832	if (!ev->status) {
6833		conn->state = BT_CONNECTED;
6834		hci_debugfs_create_conn(conn);
6835		hci_conn_add_sysfs(conn);
6836		hci_iso_setup_path(conn);
6837		goto unlock;
6838	}
6839
6840	conn->state = BT_CLOSED;
6841	hci_connect_cfm(conn, ev->status);
6842	hci_conn_del(conn);
6843
6844unlock:
6845	if (pending)
6846		hci_le_create_cis_pending(hdev);
6847
6848	hci_dev_unlock(hdev);
6849}
6850
6851static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6852{
6853	struct hci_cp_le_reject_cis cp;
6854
6855	memset(&cp, 0, sizeof(cp));
6856	cp.handle = handle;
6857	cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6858	hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6859}
6860
6861static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6862{
6863	struct hci_cp_le_accept_cis cp;
6864
6865	memset(&cp, 0, sizeof(cp));
6866	cp.handle = handle;
6867	hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6868}
6869
6870static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6871			       struct sk_buff *skb)
6872{
6873	struct hci_evt_le_cis_req *ev = data;
6874	u16 acl_handle, cis_handle;
6875	struct hci_conn *acl, *cis;
6876	int mask;
6877	__u8 flags = 0;
6878
6879	acl_handle = __le16_to_cpu(ev->acl_handle);
6880	cis_handle = __le16_to_cpu(ev->cis_handle);
6881
6882	bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6883		   acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6884
6885	hci_dev_lock(hdev);
6886
6887	acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6888	if (!acl)
6889		goto unlock;
6890
6891	mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6892	if (!(mask & HCI_LM_ACCEPT)) {
6893		hci_le_reject_cis(hdev, ev->cis_handle);
6894		goto unlock;
6895	}
6896
6897	cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6898	if (!cis) {
6899		cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE,
6900				   cis_handle);
6901		if (!cis) {
6902			hci_le_reject_cis(hdev, ev->cis_handle);
6903			goto unlock;
6904		}
6905	}
6906
6907	cis->iso_qos.ucast.cig = ev->cig_id;
6908	cis->iso_qos.ucast.cis = ev->cis_id;
6909
6910	if (!(flags & HCI_PROTO_DEFER)) {
6911		hci_le_accept_cis(hdev, ev->cis_handle);
6912	} else {
6913		cis->state = BT_CONNECT2;
6914		hci_connect_cfm(cis, 0);
6915	}
6916
6917unlock:
6918	hci_dev_unlock(hdev);
6919}
6920
6921static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
6922{
6923	u8 handle = PTR_UINT(data);
6924
6925	return hci_le_terminate_big_sync(hdev, handle,
6926					 HCI_ERROR_LOCAL_HOST_TERM);
6927}
6928
6929static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6930					   struct sk_buff *skb)
6931{
6932	struct hci_evt_le_create_big_complete *ev = data;
6933	struct hci_conn *conn;
6934	__u8 i = 0;
6935
6936	BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6937
6938	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6939				flex_array_size(ev, bis_handle, ev->num_bis)))
6940		return;
6941
6942	hci_dev_lock(hdev);
6943	rcu_read_lock();
6944
6945	/* Connect all BISes that are bound to the BIG */
6946	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6947		if (bacmp(&conn->dst, BDADDR_ANY) ||
6948		    conn->type != ISO_LINK ||
6949		    conn->iso_qos.bcast.big != ev->handle)
6950			continue;
6951
6952		if (hci_conn_set_handle(conn,
6953					__le16_to_cpu(ev->bis_handle[i++])))
6954			continue;
6955
6956		if (!ev->status) {
6957			conn->state = BT_CONNECTED;
6958			set_bit(HCI_CONN_BIG_CREATED, &conn->flags);
6959			rcu_read_unlock();
6960			hci_debugfs_create_conn(conn);
6961			hci_conn_add_sysfs(conn);
6962			hci_iso_setup_path(conn);
6963			rcu_read_lock();
6964			continue;
6965		}
6966
6967		hci_connect_cfm(conn, ev->status);
6968		rcu_read_unlock();
6969		hci_conn_del(conn);
6970		rcu_read_lock();
6971	}
6972
6973	rcu_read_unlock();
6974
6975	if (!ev->status && !i)
6976		/* If no BISes have been connected for the BIG,
6977		 * terminate. This is in case all bound connections
6978		 * have been closed before the BIG creation
6979		 * has completed.
6980		 */
6981		hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
6982				   UINT_PTR(ev->handle), NULL);
6983
6984	hci_dev_unlock(hdev);
6985}
6986
6987static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6988					    struct sk_buff *skb)
6989{
6990	struct hci_evt_le_big_sync_estabilished *ev = data;
6991	struct hci_conn *bis;
6992	int i;
6993
6994	bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6995
6996	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6997				flex_array_size(ev, bis, ev->num_bis)))
6998		return;
6999
7000	hci_dev_lock(hdev);
7001
7002	for (i = 0; i < ev->num_bis; i++) {
7003		u16 handle = le16_to_cpu(ev->bis[i]);
7004		__le32 interval;
7005
7006		bis = hci_conn_hash_lookup_handle(hdev, handle);
7007		if (!bis) {
7008			bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7009					   HCI_ROLE_SLAVE, handle);
7010			if (!bis)
7011				continue;
7012		}
7013
7014		if (ev->status != 0x42)
7015			/* Mark PA sync as established */
7016			set_bit(HCI_CONN_PA_SYNC, &bis->flags);
7017
7018		bis->iso_qos.bcast.big = ev->handle;
7019		memset(&interval, 0, sizeof(interval));
7020		memcpy(&interval, ev->latency, sizeof(ev->latency));
7021		bis->iso_qos.bcast.in.interval = le32_to_cpu(interval);
7022		/* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7023		bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7024		bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
7025
7026		if (!ev->status) {
7027			set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
7028			hci_iso_setup_path(bis);
7029		}
7030	}
7031
7032	/* In case BIG sync failed, notify each failed connection to
7033	 * the user after all hci connections have been added
7034	 */
7035	if (ev->status)
7036		for (i = 0; i < ev->num_bis; i++) {
7037			u16 handle = le16_to_cpu(ev->bis[i]);
7038
7039			bis = hci_conn_hash_lookup_handle(hdev, handle);
7040
7041			set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags);
7042			hci_connect_cfm(bis, ev->status);
7043		}
7044
7045	hci_dev_unlock(hdev);
7046}
7047
7048static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7049					   struct sk_buff *skb)
7050{
7051	struct hci_evt_le_big_info_adv_report *ev = data;
7052	int mask = hdev->link_mode;
7053	__u8 flags = 0;
7054	struct hci_conn *pa_sync;
7055
7056	bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7057
7058	hci_dev_lock(hdev);
7059
7060	mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7061	if (!(mask & HCI_LM_ACCEPT)) {
7062		hci_le_pa_term_sync(hdev, ev->sync_handle);
7063		goto unlock;
7064	}
7065
7066	if (!(flags & HCI_PROTO_DEFER))
7067		goto unlock;
7068
7069	pa_sync = hci_conn_hash_lookup_pa_sync_handle
7070			(hdev,
7071			le16_to_cpu(ev->sync_handle));
7072
7073	if (pa_sync)
7074		goto unlock;
7075
7076	/* Add connection to indicate the PA sync event */
7077	pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
7078				     HCI_ROLE_SLAVE);
7079
7080	if (!pa_sync)
7081		goto unlock;
7082
7083	pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
7084	set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
7085
7086	/* Notify iso layer */
7087	hci_connect_cfm(pa_sync, 0x00);
7088
7089	/* Notify MGMT layer */
7090	mgmt_device_connected(hdev, pa_sync, NULL, 0);
7091
7092unlock:
7093	hci_dev_unlock(hdev);
7094}
7095
7096#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7097[_op] = { \
7098	.func = _func, \
7099	.min_len = _min_len, \
7100	.max_len = _max_len, \
7101}
7102
7103#define HCI_LE_EV(_op, _func, _len) \
7104	HCI_LE_EV_VL(_op, _func, _len, _len)
7105
7106#define HCI_LE_EV_STATUS(_op, _func) \
7107	HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7108
7109/* Entries in this table shall have their position according to the subevent
7110 * opcode they handle so the use of the macros above is recommend since it does
7111 * attempt to initialize at its proper index using Designated Initializers that
7112 * way events without a callback function can be ommited.
7113 */
7114static const struct hci_le_ev {
7115	void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7116	u16  min_len;
7117	u16  max_len;
7118} hci_le_ev_table[U8_MAX + 1] = {
7119	/* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7120	HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7121		  sizeof(struct hci_ev_le_conn_complete)),
7122	/* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7123	HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7124		     sizeof(struct hci_ev_le_advertising_report),
7125		     HCI_MAX_EVENT_SIZE),
7126	/* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7127	HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7128		  hci_le_conn_update_complete_evt,
7129		  sizeof(struct hci_ev_le_conn_update_complete)),
7130	/* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7131	HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7132		  hci_le_remote_feat_complete_evt,
7133		  sizeof(struct hci_ev_le_remote_feat_complete)),
7134	/* [0x05 = HCI_EV_LE_LTK_REQ] */
7135	HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7136		  sizeof(struct hci_ev_le_ltk_req)),
7137	/* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7138	HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7139		  hci_le_remote_conn_param_req_evt,
7140		  sizeof(struct hci_ev_le_remote_conn_param_req)),
7141	/* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7142	HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7143		  hci_le_enh_conn_complete_evt,
7144		  sizeof(struct hci_ev_le_enh_conn_complete)),
7145	/* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7146	HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7147		     sizeof(struct hci_ev_le_direct_adv_report),
7148		     HCI_MAX_EVENT_SIZE),
7149	/* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7150	HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7151		  sizeof(struct hci_ev_le_phy_update_complete)),
7152	/* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7153	HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7154		     sizeof(struct hci_ev_le_ext_adv_report),
7155		     HCI_MAX_EVENT_SIZE),
7156	/* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7157	HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7158		  hci_le_pa_sync_estabilished_evt,
7159		  sizeof(struct hci_ev_le_pa_sync_established)),
7160	/* [0x0f = HCI_EV_LE_PER_ADV_REPORT] */
7161	HCI_LE_EV_VL(HCI_EV_LE_PER_ADV_REPORT,
7162				 hci_le_per_adv_report_evt,
7163				 sizeof(struct hci_ev_le_per_adv_report),
7164				 HCI_MAX_EVENT_SIZE),
7165	/* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7166	HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7167		  sizeof(struct hci_evt_le_ext_adv_set_term)),
7168	/* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7169	HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7170		  sizeof(struct hci_evt_le_cis_established)),
7171	/* [0x1a = HCI_EVT_LE_CIS_REQ] */
7172	HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7173		  sizeof(struct hci_evt_le_cis_req)),
7174	/* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7175	HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7176		     hci_le_create_big_complete_evt,
7177		     sizeof(struct hci_evt_le_create_big_complete),
7178		     HCI_MAX_EVENT_SIZE),
7179	/* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7180	HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7181		     hci_le_big_sync_established_evt,
7182		     sizeof(struct hci_evt_le_big_sync_estabilished),
7183		     HCI_MAX_EVENT_SIZE),
7184	/* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7185	HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7186		     hci_le_big_info_adv_report_evt,
7187		     sizeof(struct hci_evt_le_big_info_adv_report),
7188		     HCI_MAX_EVENT_SIZE),
7189};
7190
7191static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7192			    struct sk_buff *skb, u16 *opcode, u8 *status,
7193			    hci_req_complete_t *req_complete,
7194			    hci_req_complete_skb_t *req_complete_skb)
7195{
7196	struct hci_ev_le_meta *ev = data;
7197	const struct hci_le_ev *subev;
7198
7199	bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7200
7201	/* Only match event if command OGF is for LE */
7202	if (hdev->req_skb &&
7203	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
7204	    hci_skb_event(hdev->req_skb) == ev->subevent) {
7205		*opcode = hci_skb_opcode(hdev->req_skb);
7206		hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7207				     req_complete_skb);
7208	}
7209
7210	subev = &hci_le_ev_table[ev->subevent];
7211	if (!subev->func)
7212		return;
7213
7214	if (skb->len < subev->min_len) {
7215		bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7216			   ev->subevent, skb->len, subev->min_len);
7217		return;
7218	}
7219
7220	/* Just warn if the length is over max_len size it still be
7221	 * possible to partially parse the event so leave to callback to
7222	 * decide if that is acceptable.
7223	 */
7224	if (skb->len > subev->max_len)
7225		bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7226			    ev->subevent, skb->len, subev->max_len);
7227	data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7228	if (!data)
7229		return;
7230
7231	subev->func(hdev, data, skb);
7232}
7233
7234static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7235				 u8 event, struct sk_buff *skb)
7236{
7237	struct hci_ev_cmd_complete *ev;
7238	struct hci_event_hdr *hdr;
7239
7240	if (!skb)
7241		return false;
7242
7243	hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7244	if (!hdr)
7245		return false;
7246
7247	if (event) {
7248		if (hdr->evt != event)
7249			return false;
7250		return true;
7251	}
7252
7253	/* Check if request ended in Command Status - no way to retrieve
7254	 * any extra parameters in this case.
7255	 */
7256	if (hdr->evt == HCI_EV_CMD_STATUS)
7257		return false;
7258
7259	if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7260		bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7261			   hdr->evt);
7262		return false;
7263	}
7264
7265	ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7266	if (!ev)
7267		return false;
7268
7269	if (opcode != __le16_to_cpu(ev->opcode)) {
7270		BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7271		       __le16_to_cpu(ev->opcode));
7272		return false;
7273	}
7274
7275	return true;
7276}
7277
7278static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7279				  struct sk_buff *skb)
7280{
7281	struct hci_ev_le_advertising_info *adv;
7282	struct hci_ev_le_direct_adv_info *direct_adv;
7283	struct hci_ev_le_ext_adv_info *ext_adv;
7284	const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7285	const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7286
7287	hci_dev_lock(hdev);
7288
7289	/* If we are currently suspended and this is the first BT event seen,
7290	 * save the wake reason associated with the event.
7291	 */
7292	if (!hdev->suspended || hdev->wake_reason)
7293		goto unlock;
7294
7295	/* Default to remote wake. Values for wake_reason are documented in the
7296	 * Bluez mgmt api docs.
7297	 */
7298	hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7299
7300	/* Once configured for remote wakeup, we should only wake up for
7301	 * reconnections. It's useful to see which device is waking us up so
7302	 * keep track of the bdaddr of the connection event that woke us up.
7303	 */
7304	if (event == HCI_EV_CONN_REQUEST) {
7305		bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7306		hdev->wake_addr_type = BDADDR_BREDR;
7307	} else if (event == HCI_EV_CONN_COMPLETE) {
7308		bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7309		hdev->wake_addr_type = BDADDR_BREDR;
7310	} else if (event == HCI_EV_LE_META) {
7311		struct hci_ev_le_meta *le_ev = (void *)skb->data;
7312		u8 subevent = le_ev->subevent;
7313		u8 *ptr = &skb->data[sizeof(*le_ev)];
7314		u8 num_reports = *ptr;
7315
7316		if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7317		     subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7318		     subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7319		    num_reports) {
7320			adv = (void *)(ptr + 1);
7321			direct_adv = (void *)(ptr + 1);
7322			ext_adv = (void *)(ptr + 1);
7323
7324			switch (subevent) {
7325			case HCI_EV_LE_ADVERTISING_REPORT:
7326				bacpy(&hdev->wake_addr, &adv->bdaddr);
7327				hdev->wake_addr_type = adv->bdaddr_type;
7328				break;
7329			case HCI_EV_LE_DIRECT_ADV_REPORT:
7330				bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7331				hdev->wake_addr_type = direct_adv->bdaddr_type;
7332				break;
7333			case HCI_EV_LE_EXT_ADV_REPORT:
7334				bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7335				hdev->wake_addr_type = ext_adv->bdaddr_type;
7336				break;
7337			}
7338		}
7339	} else {
7340		hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7341	}
7342
7343unlock:
7344	hci_dev_unlock(hdev);
7345}
7346
7347#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7348[_op] = { \
7349	.req = false, \
7350	.func = _func, \
7351	.min_len = _min_len, \
7352	.max_len = _max_len, \
7353}
7354
7355#define HCI_EV(_op, _func, _len) \
7356	HCI_EV_VL(_op, _func, _len, _len)
7357
7358#define HCI_EV_STATUS(_op, _func) \
7359	HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7360
7361#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7362[_op] = { \
7363	.req = true, \
7364	.func_req = _func, \
7365	.min_len = _min_len, \
7366	.max_len = _max_len, \
7367}
7368
7369#define HCI_EV_REQ(_op, _func, _len) \
7370	HCI_EV_REQ_VL(_op, _func, _len, _len)
7371
7372/* Entries in this table shall have their position according to the event opcode
7373 * they handle so the use of the macros above is recommend since it does attempt
7374 * to initialize at its proper index using Designated Initializers that way
7375 * events without a callback function don't have entered.
7376 */
7377static const struct hci_ev {
7378	bool req;
7379	union {
7380		void (*func)(struct hci_dev *hdev, void *data,
7381			     struct sk_buff *skb);
7382		void (*func_req)(struct hci_dev *hdev, void *data,
7383				 struct sk_buff *skb, u16 *opcode, u8 *status,
7384				 hci_req_complete_t *req_complete,
7385				 hci_req_complete_skb_t *req_complete_skb);
7386	};
7387	u16  min_len;
7388	u16  max_len;
7389} hci_ev_table[U8_MAX + 1] = {
7390	/* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7391	HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7392	/* [0x02 = HCI_EV_INQUIRY_RESULT] */
7393	HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7394		  sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7395	/* [0x03 = HCI_EV_CONN_COMPLETE] */
7396	HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7397	       sizeof(struct hci_ev_conn_complete)),
7398	/* [0x04 = HCI_EV_CONN_REQUEST] */
7399	HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7400	       sizeof(struct hci_ev_conn_request)),
7401	/* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7402	HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7403	       sizeof(struct hci_ev_disconn_complete)),
7404	/* [0x06 = HCI_EV_AUTH_COMPLETE] */
7405	HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7406	       sizeof(struct hci_ev_auth_complete)),
7407	/* [0x07 = HCI_EV_REMOTE_NAME] */
7408	HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7409	       sizeof(struct hci_ev_remote_name)),
7410	/* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7411	HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7412	       sizeof(struct hci_ev_encrypt_change)),
7413	/* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7414	HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7415	       hci_change_link_key_complete_evt,
7416	       sizeof(struct hci_ev_change_link_key_complete)),
7417	/* [0x0b = HCI_EV_REMOTE_FEATURES] */
7418	HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7419	       sizeof(struct hci_ev_remote_features)),
7420	/* [0x0e = HCI_EV_CMD_COMPLETE] */
7421	HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7422		      sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7423	/* [0x0f = HCI_EV_CMD_STATUS] */
7424	HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7425		   sizeof(struct hci_ev_cmd_status)),
7426	/* [0x10 = HCI_EV_CMD_STATUS] */
7427	HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7428	       sizeof(struct hci_ev_hardware_error)),
7429	/* [0x12 = HCI_EV_ROLE_CHANGE] */
7430	HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7431	       sizeof(struct hci_ev_role_change)),
7432	/* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7433	HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7434		  sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7435	/* [0x14 = HCI_EV_MODE_CHANGE] */
7436	HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7437	       sizeof(struct hci_ev_mode_change)),
7438	/* [0x16 = HCI_EV_PIN_CODE_REQ] */
7439	HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7440	       sizeof(struct hci_ev_pin_code_req)),
7441	/* [0x17 = HCI_EV_LINK_KEY_REQ] */
7442	HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7443	       sizeof(struct hci_ev_link_key_req)),
7444	/* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7445	HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7446	       sizeof(struct hci_ev_link_key_notify)),
7447	/* [0x1c = HCI_EV_CLOCK_OFFSET] */
7448	HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7449	       sizeof(struct hci_ev_clock_offset)),
7450	/* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7451	HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7452	       sizeof(struct hci_ev_pkt_type_change)),
7453	/* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7454	HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7455	       sizeof(struct hci_ev_pscan_rep_mode)),
7456	/* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7457	HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7458		  hci_inquiry_result_with_rssi_evt,
7459		  sizeof(struct hci_ev_inquiry_result_rssi),
7460		  HCI_MAX_EVENT_SIZE),
7461	/* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7462	HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7463	       sizeof(struct hci_ev_remote_ext_features)),
7464	/* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7465	HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7466	       sizeof(struct hci_ev_sync_conn_complete)),
7467	/* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7468	HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7469		  hci_extended_inquiry_result_evt,
7470		  sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7471	/* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7472	HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7473	       sizeof(struct hci_ev_key_refresh_complete)),
7474	/* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7475	HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7476	       sizeof(struct hci_ev_io_capa_request)),
7477	/* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7478	HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7479	       sizeof(struct hci_ev_io_capa_reply)),
7480	/* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7481	HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7482	       sizeof(struct hci_ev_user_confirm_req)),
7483	/* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7484	HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7485	       sizeof(struct hci_ev_user_passkey_req)),
7486	/* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7487	HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7488	       sizeof(struct hci_ev_remote_oob_data_request)),
7489	/* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7490	HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7491	       sizeof(struct hci_ev_simple_pair_complete)),
7492	/* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7493	HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7494	       sizeof(struct hci_ev_user_passkey_notify)),
7495	/* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7496	HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7497	       sizeof(struct hci_ev_keypress_notify)),
7498	/* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7499	HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7500	       sizeof(struct hci_ev_remote_host_features)),
7501	/* [0x3e = HCI_EV_LE_META] */
7502	HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7503		      sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7504	/* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7505	HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7506	       sizeof(struct hci_ev_num_comp_blocks)),
7507	/* [0xff = HCI_EV_VENDOR] */
7508	HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7509};
7510
7511static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7512			   u16 *opcode, u8 *status,
7513			   hci_req_complete_t *req_complete,
7514			   hci_req_complete_skb_t *req_complete_skb)
7515{
7516	const struct hci_ev *ev = &hci_ev_table[event];
7517	void *data;
7518
7519	if (!ev->func)
7520		return;
7521
7522	if (skb->len < ev->min_len) {
7523		bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7524			   event, skb->len, ev->min_len);
7525		return;
7526	}
7527
7528	/* Just warn if the length is over max_len size it still be
7529	 * possible to partially parse the event so leave to callback to
7530	 * decide if that is acceptable.
7531	 */
7532	if (skb->len > ev->max_len)
7533		bt_dev_warn_ratelimited(hdev,
7534					"unexpected event 0x%2.2x length: %u > %u",
7535					event, skb->len, ev->max_len);
7536
7537	data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7538	if (!data)
7539		return;
7540
7541	if (ev->req)
7542		ev->func_req(hdev, data, skb, opcode, status, req_complete,
7543			     req_complete_skb);
7544	else
7545		ev->func(hdev, data, skb);
7546}
7547
7548void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7549{
7550	struct hci_event_hdr *hdr = (void *) skb->data;
7551	hci_req_complete_t req_complete = NULL;
7552	hci_req_complete_skb_t req_complete_skb = NULL;
7553	struct sk_buff *orig_skb = NULL;
7554	u8 status = 0, event, req_evt = 0;
7555	u16 opcode = HCI_OP_NOP;
7556
7557	if (skb->len < sizeof(*hdr)) {
7558		bt_dev_err(hdev, "Malformed HCI Event");
7559		goto done;
7560	}
7561
7562	kfree_skb(hdev->recv_event);
7563	hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7564
7565	event = hdr->evt;
7566	if (!event) {
7567		bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7568			    event);
7569		goto done;
7570	}
7571
7572	/* Only match event if command OGF is not for LE */
7573	if (hdev->req_skb &&
7574	    hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
7575	    hci_skb_event(hdev->req_skb) == event) {
7576		hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
7577				     status, &req_complete, &req_complete_skb);
7578		req_evt = event;
7579	}
7580
7581	/* If it looks like we might end up having to call
7582	 * req_complete_skb, store a pristine copy of the skb since the
7583	 * various handlers may modify the original one through
7584	 * skb_pull() calls, etc.
7585	 */
7586	if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7587	    event == HCI_EV_CMD_COMPLETE)
7588		orig_skb = skb_clone(skb, GFP_KERNEL);
7589
7590	skb_pull(skb, HCI_EVENT_HDR_SIZE);
7591
7592	/* Store wake reason if we're suspended */
7593	hci_store_wake_reason(hdev, event, skb);
7594
7595	bt_dev_dbg(hdev, "event 0x%2.2x", event);
7596
7597	hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7598		       &req_complete_skb);
7599
7600	if (req_complete) {
7601		req_complete(hdev, status, opcode);
7602	} else if (req_complete_skb) {
7603		if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7604			kfree_skb(orig_skb);
7605			orig_skb = NULL;
7606		}
7607		req_complete_skb(hdev, status, opcode, orig_skb);
7608	}
7609
7610done:
7611	kfree_skb(orig_skb);
7612	kfree_skb(skb);
7613	hdev->stat.evt_rx++;
7614}
7615