1/*
2   BlueZ - Bluetooth protocol stack for Linux
3
4   Copyright (C) 2010  Nokia Corporation
5   Copyright (C) 2011-2012 Intel Corporation
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "smp.h"
38#include "mgmt_util.h"
39#include "mgmt_config.h"
40#include "msft.h"
41#include "eir.h"
42#include "aosp.h"
43
44#define MGMT_VERSION	1
45#define MGMT_REVISION	22
46
47static const u16 mgmt_commands[] = {
48	MGMT_OP_READ_INDEX_LIST,
49	MGMT_OP_READ_INFO,
50	MGMT_OP_SET_POWERED,
51	MGMT_OP_SET_DISCOVERABLE,
52	MGMT_OP_SET_CONNECTABLE,
53	MGMT_OP_SET_FAST_CONNECTABLE,
54	MGMT_OP_SET_BONDABLE,
55	MGMT_OP_SET_LINK_SECURITY,
56	MGMT_OP_SET_SSP,
57	MGMT_OP_SET_HS,
58	MGMT_OP_SET_LE,
59	MGMT_OP_SET_DEV_CLASS,
60	MGMT_OP_SET_LOCAL_NAME,
61	MGMT_OP_ADD_UUID,
62	MGMT_OP_REMOVE_UUID,
63	MGMT_OP_LOAD_LINK_KEYS,
64	MGMT_OP_LOAD_LONG_TERM_KEYS,
65	MGMT_OP_DISCONNECT,
66	MGMT_OP_GET_CONNECTIONS,
67	MGMT_OP_PIN_CODE_REPLY,
68	MGMT_OP_PIN_CODE_NEG_REPLY,
69	MGMT_OP_SET_IO_CAPABILITY,
70	MGMT_OP_PAIR_DEVICE,
71	MGMT_OP_CANCEL_PAIR_DEVICE,
72	MGMT_OP_UNPAIR_DEVICE,
73	MGMT_OP_USER_CONFIRM_REPLY,
74	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75	MGMT_OP_USER_PASSKEY_REPLY,
76	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77	MGMT_OP_READ_LOCAL_OOB_DATA,
78	MGMT_OP_ADD_REMOTE_OOB_DATA,
79	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80	MGMT_OP_START_DISCOVERY,
81	MGMT_OP_STOP_DISCOVERY,
82	MGMT_OP_CONFIRM_NAME,
83	MGMT_OP_BLOCK_DEVICE,
84	MGMT_OP_UNBLOCK_DEVICE,
85	MGMT_OP_SET_DEVICE_ID,
86	MGMT_OP_SET_ADVERTISING,
87	MGMT_OP_SET_BREDR,
88	MGMT_OP_SET_STATIC_ADDRESS,
89	MGMT_OP_SET_SCAN_PARAMS,
90	MGMT_OP_SET_SECURE_CONN,
91	MGMT_OP_SET_DEBUG_KEYS,
92	MGMT_OP_SET_PRIVACY,
93	MGMT_OP_LOAD_IRKS,
94	MGMT_OP_GET_CONN_INFO,
95	MGMT_OP_GET_CLOCK_INFO,
96	MGMT_OP_ADD_DEVICE,
97	MGMT_OP_REMOVE_DEVICE,
98	MGMT_OP_LOAD_CONN_PARAM,
99	MGMT_OP_READ_UNCONF_INDEX_LIST,
100	MGMT_OP_READ_CONFIG_INFO,
101	MGMT_OP_SET_EXTERNAL_CONFIG,
102	MGMT_OP_SET_PUBLIC_ADDRESS,
103	MGMT_OP_START_SERVICE_DISCOVERY,
104	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105	MGMT_OP_READ_EXT_INDEX_LIST,
106	MGMT_OP_READ_ADV_FEATURES,
107	MGMT_OP_ADD_ADVERTISING,
108	MGMT_OP_REMOVE_ADVERTISING,
109	MGMT_OP_GET_ADV_SIZE_INFO,
110	MGMT_OP_START_LIMITED_DISCOVERY,
111	MGMT_OP_READ_EXT_INFO,
112	MGMT_OP_SET_APPEARANCE,
113	MGMT_OP_GET_PHY_CONFIGURATION,
114	MGMT_OP_SET_PHY_CONFIGURATION,
115	MGMT_OP_SET_BLOCKED_KEYS,
116	MGMT_OP_SET_WIDEBAND_SPEECH,
117	MGMT_OP_READ_CONTROLLER_CAP,
118	MGMT_OP_READ_EXP_FEATURES_INFO,
119	MGMT_OP_SET_EXP_FEATURE,
120	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124	MGMT_OP_GET_DEVICE_FLAGS,
125	MGMT_OP_SET_DEVICE_FLAGS,
126	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128	MGMT_OP_REMOVE_ADV_MONITOR,
129	MGMT_OP_ADD_EXT_ADV_PARAMS,
130	MGMT_OP_ADD_EXT_ADV_DATA,
131	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132	MGMT_OP_SET_MESH_RECEIVER,
133	MGMT_OP_MESH_READ_FEATURES,
134	MGMT_OP_MESH_SEND,
135	MGMT_OP_MESH_SEND_CANCEL,
136};
137
138static const u16 mgmt_events[] = {
139	MGMT_EV_CONTROLLER_ERROR,
140	MGMT_EV_INDEX_ADDED,
141	MGMT_EV_INDEX_REMOVED,
142	MGMT_EV_NEW_SETTINGS,
143	MGMT_EV_CLASS_OF_DEV_CHANGED,
144	MGMT_EV_LOCAL_NAME_CHANGED,
145	MGMT_EV_NEW_LINK_KEY,
146	MGMT_EV_NEW_LONG_TERM_KEY,
147	MGMT_EV_DEVICE_CONNECTED,
148	MGMT_EV_DEVICE_DISCONNECTED,
149	MGMT_EV_CONNECT_FAILED,
150	MGMT_EV_PIN_CODE_REQUEST,
151	MGMT_EV_USER_CONFIRM_REQUEST,
152	MGMT_EV_USER_PASSKEY_REQUEST,
153	MGMT_EV_AUTH_FAILED,
154	MGMT_EV_DEVICE_FOUND,
155	MGMT_EV_DISCOVERING,
156	MGMT_EV_DEVICE_BLOCKED,
157	MGMT_EV_DEVICE_UNBLOCKED,
158	MGMT_EV_DEVICE_UNPAIRED,
159	MGMT_EV_PASSKEY_NOTIFY,
160	MGMT_EV_NEW_IRK,
161	MGMT_EV_NEW_CSRK,
162	MGMT_EV_DEVICE_ADDED,
163	MGMT_EV_DEVICE_REMOVED,
164	MGMT_EV_NEW_CONN_PARAM,
165	MGMT_EV_UNCONF_INDEX_ADDED,
166	MGMT_EV_UNCONF_INDEX_REMOVED,
167	MGMT_EV_NEW_CONFIG_OPTIONS,
168	MGMT_EV_EXT_INDEX_ADDED,
169	MGMT_EV_EXT_INDEX_REMOVED,
170	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171	MGMT_EV_ADVERTISING_ADDED,
172	MGMT_EV_ADVERTISING_REMOVED,
173	MGMT_EV_EXT_INFO_CHANGED,
174	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175	MGMT_EV_EXP_FEATURE_CHANGED,
176	MGMT_EV_DEVICE_FLAGS_CHANGED,
177	MGMT_EV_ADV_MONITOR_ADDED,
178	MGMT_EV_ADV_MONITOR_REMOVED,
179	MGMT_EV_CONTROLLER_SUSPEND,
180	MGMT_EV_CONTROLLER_RESUME,
181	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183};
184
185static const u16 mgmt_untrusted_commands[] = {
186	MGMT_OP_READ_INDEX_LIST,
187	MGMT_OP_READ_INFO,
188	MGMT_OP_READ_UNCONF_INDEX_LIST,
189	MGMT_OP_READ_CONFIG_INFO,
190	MGMT_OP_READ_EXT_INDEX_LIST,
191	MGMT_OP_READ_EXT_INFO,
192	MGMT_OP_READ_CONTROLLER_CAP,
193	MGMT_OP_READ_EXP_FEATURES_INFO,
194	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196};
197
198static const u16 mgmt_untrusted_events[] = {
199	MGMT_EV_INDEX_ADDED,
200	MGMT_EV_INDEX_REMOVED,
201	MGMT_EV_NEW_SETTINGS,
202	MGMT_EV_CLASS_OF_DEV_CHANGED,
203	MGMT_EV_LOCAL_NAME_CHANGED,
204	MGMT_EV_UNCONF_INDEX_ADDED,
205	MGMT_EV_UNCONF_INDEX_REMOVED,
206	MGMT_EV_NEW_CONFIG_OPTIONS,
207	MGMT_EV_EXT_INDEX_ADDED,
208	MGMT_EV_EXT_INDEX_REMOVED,
209	MGMT_EV_EXT_INFO_CHANGED,
210	MGMT_EV_EXP_FEATURE_CHANGED,
211};
212
213#define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214
215#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218/* HCI to MGMT error code conversion table */
219static const u8 mgmt_status_table[] = {
220	MGMT_STATUS_SUCCESS,
221	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223	MGMT_STATUS_FAILED,		/* Hardware Failure */
224	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232	MGMT_STATUS_BUSY,		/* Command Disallowed */
233	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234	MGMT_STATUS_REJECTED,		/* Rejected Security */
235	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251	MGMT_STATUS_FAILED,		/* Unspecified Error */
252	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262	MGMT_STATUS_FAILED,		/* Transaction Collision */
263	MGMT_STATUS_FAILED,		/* Reserved for future use */
264	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269	MGMT_STATUS_FAILED,		/* Reserved for future use */
270	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271	MGMT_STATUS_FAILED,		/* Reserved for future use */
272	MGMT_STATUS_FAILED,		/* Slot Violation */
273	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278	MGMT_STATUS_BUSY,		/* Controller Busy */
279	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284};
285
286static u8 mgmt_errno_status(int err)
287{
288	switch (err) {
289	case 0:
290		return MGMT_STATUS_SUCCESS;
291	case -EPERM:
292		return MGMT_STATUS_REJECTED;
293	case -EINVAL:
294		return MGMT_STATUS_INVALID_PARAMS;
295	case -EOPNOTSUPP:
296		return MGMT_STATUS_NOT_SUPPORTED;
297	case -EBUSY:
298		return MGMT_STATUS_BUSY;
299	case -ETIMEDOUT:
300		return MGMT_STATUS_AUTH_FAILED;
301	case -ENOMEM:
302		return MGMT_STATUS_NO_RESOURCES;
303	case -EISCONN:
304		return MGMT_STATUS_ALREADY_CONNECTED;
305	case -ENOTCONN:
306		return MGMT_STATUS_DISCONNECTED;
307	}
308
309	return MGMT_STATUS_FAILED;
310}
311
312static u8 mgmt_status(int err)
313{
314	if (err < 0)
315		return mgmt_errno_status(err);
316
317	if (err < ARRAY_SIZE(mgmt_status_table))
318		return mgmt_status_table[err];
319
320	return MGMT_STATUS_FAILED;
321}
322
323static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324			    u16 len, int flag)
325{
326	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327			       flag, NULL);
328}
329
330static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331			      u16 len, int flag, struct sock *skip_sk)
332{
333	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334			       flag, skip_sk);
335}
336
337static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338		      struct sock *skip_sk)
339{
340	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341			       HCI_SOCK_TRUSTED, skip_sk);
342}
343
344static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345{
346	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347				   skip_sk);
348}
349
350static u8 le_addr_type(u8 mgmt_addr_type)
351{
352	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353		return ADDR_LE_DEV_PUBLIC;
354	else
355		return ADDR_LE_DEV_RANDOM;
356}
357
358void mgmt_fill_version_info(void *ver)
359{
360	struct mgmt_rp_read_version *rp = ver;
361
362	rp->version = MGMT_VERSION;
363	rp->revision = cpu_to_le16(MGMT_REVISION);
364}
365
366static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367			u16 data_len)
368{
369	struct mgmt_rp_read_version rp;
370
371	bt_dev_dbg(hdev, "sock %p", sk);
372
373	mgmt_fill_version_info(&rp);
374
375	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376				 &rp, sizeof(rp));
377}
378
379static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380			 u16 data_len)
381{
382	struct mgmt_rp_read_commands *rp;
383	u16 num_commands, num_events;
384	size_t rp_size;
385	int i, err;
386
387	bt_dev_dbg(hdev, "sock %p", sk);
388
389	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390		num_commands = ARRAY_SIZE(mgmt_commands);
391		num_events = ARRAY_SIZE(mgmt_events);
392	} else {
393		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395	}
396
397	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399	rp = kmalloc(rp_size, GFP_KERNEL);
400	if (!rp)
401		return -ENOMEM;
402
403	rp->num_commands = cpu_to_le16(num_commands);
404	rp->num_events = cpu_to_le16(num_events);
405
406	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407		__le16 *opcode = rp->opcodes;
408
409		for (i = 0; i < num_commands; i++, opcode++)
410			put_unaligned_le16(mgmt_commands[i], opcode);
411
412		for (i = 0; i < num_events; i++, opcode++)
413			put_unaligned_le16(mgmt_events[i], opcode);
414	} else {
415		__le16 *opcode = rp->opcodes;
416
417		for (i = 0; i < num_commands; i++, opcode++)
418			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419
420		for (i = 0; i < num_events; i++, opcode++)
421			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422	}
423
424	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425				rp, rp_size);
426	kfree(rp);
427
428	return err;
429}
430
431static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432			   u16 data_len)
433{
434	struct mgmt_rp_read_index_list *rp;
435	struct hci_dev *d;
436	size_t rp_len;
437	u16 count;
438	int err;
439
440	bt_dev_dbg(hdev, "sock %p", sk);
441
442	read_lock(&hci_dev_list_lock);
443
444	count = 0;
445	list_for_each_entry(d, &hci_dev_list, list) {
446		if (d->dev_type == HCI_PRIMARY &&
447		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448			count++;
449	}
450
451	rp_len = sizeof(*rp) + (2 * count);
452	rp = kmalloc(rp_len, GFP_ATOMIC);
453	if (!rp) {
454		read_unlock(&hci_dev_list_lock);
455		return -ENOMEM;
456	}
457
458	count = 0;
459	list_for_each_entry(d, &hci_dev_list, list) {
460		if (hci_dev_test_flag(d, HCI_SETUP) ||
461		    hci_dev_test_flag(d, HCI_CONFIG) ||
462		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463			continue;
464
465		/* Devices marked as raw-only are neither configured
466		 * nor unconfigured controllers.
467		 */
468		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469			continue;
470
471		if (d->dev_type == HCI_PRIMARY &&
472		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473			rp->index[count++] = cpu_to_le16(d->id);
474			bt_dev_dbg(hdev, "Added hci%u", d->id);
475		}
476	}
477
478	rp->num_controllers = cpu_to_le16(count);
479	rp_len = sizeof(*rp) + (2 * count);
480
481	read_unlock(&hci_dev_list_lock);
482
483	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484				0, rp, rp_len);
485
486	kfree(rp);
487
488	return err;
489}
490
491static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492				  void *data, u16 data_len)
493{
494	struct mgmt_rp_read_unconf_index_list *rp;
495	struct hci_dev *d;
496	size_t rp_len;
497	u16 count;
498	int err;
499
500	bt_dev_dbg(hdev, "sock %p", sk);
501
502	read_lock(&hci_dev_list_lock);
503
504	count = 0;
505	list_for_each_entry(d, &hci_dev_list, list) {
506		if (d->dev_type == HCI_PRIMARY &&
507		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508			count++;
509	}
510
511	rp_len = sizeof(*rp) + (2 * count);
512	rp = kmalloc(rp_len, GFP_ATOMIC);
513	if (!rp) {
514		read_unlock(&hci_dev_list_lock);
515		return -ENOMEM;
516	}
517
518	count = 0;
519	list_for_each_entry(d, &hci_dev_list, list) {
520		if (hci_dev_test_flag(d, HCI_SETUP) ||
521		    hci_dev_test_flag(d, HCI_CONFIG) ||
522		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523			continue;
524
525		/* Devices marked as raw-only are neither configured
526		 * nor unconfigured controllers.
527		 */
528		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529			continue;
530
531		if (d->dev_type == HCI_PRIMARY &&
532		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533			rp->index[count++] = cpu_to_le16(d->id);
534			bt_dev_dbg(hdev, "Added hci%u", d->id);
535		}
536	}
537
538	rp->num_controllers = cpu_to_le16(count);
539	rp_len = sizeof(*rp) + (2 * count);
540
541	read_unlock(&hci_dev_list_lock);
542
543	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545
546	kfree(rp);
547
548	return err;
549}
550
551static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552			       void *data, u16 data_len)
553{
554	struct mgmt_rp_read_ext_index_list *rp;
555	struct hci_dev *d;
556	u16 count;
557	int err;
558
559	bt_dev_dbg(hdev, "sock %p", sk);
560
561	read_lock(&hci_dev_list_lock);
562
563	count = 0;
564	list_for_each_entry(d, &hci_dev_list, list) {
565		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566			count++;
567	}
568
569	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570	if (!rp) {
571		read_unlock(&hci_dev_list_lock);
572		return -ENOMEM;
573	}
574
575	count = 0;
576	list_for_each_entry(d, &hci_dev_list, list) {
577		if (hci_dev_test_flag(d, HCI_SETUP) ||
578		    hci_dev_test_flag(d, HCI_CONFIG) ||
579		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580			continue;
581
582		/* Devices marked as raw-only are neither configured
583		 * nor unconfigured controllers.
584		 */
585		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586			continue;
587
588		if (d->dev_type == HCI_PRIMARY) {
589			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590				rp->entry[count].type = 0x01;
591			else
592				rp->entry[count].type = 0x00;
593		} else if (d->dev_type == HCI_AMP) {
594			rp->entry[count].type = 0x02;
595		} else {
596			continue;
597		}
598
599		rp->entry[count].bus = d->bus;
600		rp->entry[count++].index = cpu_to_le16(d->id);
601		bt_dev_dbg(hdev, "Added hci%u", d->id);
602	}
603
604	rp->num_controllers = cpu_to_le16(count);
605
606	read_unlock(&hci_dev_list_lock);
607
608	/* If this command is called at least once, then all the
609	 * default index and unconfigured index events are disabled
610	 * and from now on only extended index events are used.
611	 */
612	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618				struct_size(rp, entry, count));
619
620	kfree(rp);
621
622	return err;
623}
624
625static bool is_configured(struct hci_dev *hdev)
626{
627	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629		return false;
630
631	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634		return false;
635
636	return true;
637}
638
639static __le32 get_missing_options(struct hci_dev *hdev)
640{
641	u32 options = 0;
642
643	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652	return cpu_to_le32(options);
653}
654
655static int new_options(struct hci_dev *hdev, struct sock *skip)
656{
657	__le32 options = get_missing_options(hdev);
658
659	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661}
662
663static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664{
665	__le32 options = get_missing_options(hdev);
666
667	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668				 sizeof(options));
669}
670
671static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672			    void *data, u16 data_len)
673{
674	struct mgmt_rp_read_config_info rp;
675	u32 options = 0;
676
677	bt_dev_dbg(hdev, "sock %p", sk);
678
679	hci_dev_lock(hdev);
680
681	memset(&rp, 0, sizeof(rp));
682	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687	if (hdev->set_bdaddr)
688		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690	rp.supported_options = cpu_to_le32(options);
691	rp.missing_options = get_missing_options(hdev);
692
693	hci_dev_unlock(hdev);
694
695	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696				 &rp, sizeof(rp));
697}
698
699static u32 get_supported_phys(struct hci_dev *hdev)
700{
701	u32 supported_phys = 0;
702
703	if (lmp_bredr_capable(hdev)) {
704		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706		if (hdev->features[0][0] & LMP_3SLOT)
707			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709		if (hdev->features[0][0] & LMP_5SLOT)
710			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712		if (lmp_edr_2m_capable(hdev)) {
713			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715			if (lmp_edr_3slot_capable(hdev))
716				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718			if (lmp_edr_5slot_capable(hdev))
719				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721			if (lmp_edr_3m_capable(hdev)) {
722				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724				if (lmp_edr_3slot_capable(hdev))
725					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727				if (lmp_edr_5slot_capable(hdev))
728					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729			}
730		}
731	}
732
733	if (lmp_le_capable(hdev)) {
734		supported_phys |= MGMT_PHY_LE_1M_TX;
735		supported_phys |= MGMT_PHY_LE_1M_RX;
736
737		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738			supported_phys |= MGMT_PHY_LE_2M_TX;
739			supported_phys |= MGMT_PHY_LE_2M_RX;
740		}
741
742		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743			supported_phys |= MGMT_PHY_LE_CODED_TX;
744			supported_phys |= MGMT_PHY_LE_CODED_RX;
745		}
746	}
747
748	return supported_phys;
749}
750
751static u32 get_selected_phys(struct hci_dev *hdev)
752{
753	u32 selected_phys = 0;
754
755	if (lmp_bredr_capable(hdev)) {
756		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764		if (lmp_edr_2m_capable(hdev)) {
765			if (!(hdev->pkt_type & HCI_2DH1))
766				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768			if (lmp_edr_3slot_capable(hdev) &&
769			    !(hdev->pkt_type & HCI_2DH3))
770				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772			if (lmp_edr_5slot_capable(hdev) &&
773			    !(hdev->pkt_type & HCI_2DH5))
774				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776			if (lmp_edr_3m_capable(hdev)) {
777				if (!(hdev->pkt_type & HCI_3DH1))
778					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780				if (lmp_edr_3slot_capable(hdev) &&
781				    !(hdev->pkt_type & HCI_3DH3))
782					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784				if (lmp_edr_5slot_capable(hdev) &&
785				    !(hdev->pkt_type & HCI_3DH5))
786					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787			}
788		}
789	}
790
791	if (lmp_le_capable(hdev)) {
792		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793			selected_phys |= MGMT_PHY_LE_1M_TX;
794
795		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796			selected_phys |= MGMT_PHY_LE_1M_RX;
797
798		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799			selected_phys |= MGMT_PHY_LE_2M_TX;
800
801		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802			selected_phys |= MGMT_PHY_LE_2M_RX;
803
804		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805			selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808			selected_phys |= MGMT_PHY_LE_CODED_RX;
809	}
810
811	return selected_phys;
812}
813
814static u32 get_configurable_phys(struct hci_dev *hdev)
815{
816	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818}
819
820static u32 get_supported_settings(struct hci_dev *hdev)
821{
822	u32 settings = 0;
823
824	settings |= MGMT_SETTING_POWERED;
825	settings |= MGMT_SETTING_BONDABLE;
826	settings |= MGMT_SETTING_DEBUG_KEYS;
827	settings |= MGMT_SETTING_CONNECTABLE;
828	settings |= MGMT_SETTING_DISCOVERABLE;
829
830	if (lmp_bredr_capable(hdev)) {
831		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833		settings |= MGMT_SETTING_BREDR;
834		settings |= MGMT_SETTING_LINK_SECURITY;
835
836		if (lmp_ssp_capable(hdev)) {
837			settings |= MGMT_SETTING_SSP;
838		}
839
840		if (lmp_sc_capable(hdev))
841			settings |= MGMT_SETTING_SECURE_CONN;
842
843		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
844			     &hdev->quirks))
845			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846	}
847
848	if (lmp_le_capable(hdev)) {
849		settings |= MGMT_SETTING_LE;
850		settings |= MGMT_SETTING_SECURE_CONN;
851		settings |= MGMT_SETTING_PRIVACY;
852		settings |= MGMT_SETTING_STATIC_ADDRESS;
853		settings |= MGMT_SETTING_ADVERTISING;
854	}
855
856	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
857	    hdev->set_bdaddr)
858		settings |= MGMT_SETTING_CONFIGURATION;
859
860	if (cis_central_capable(hdev))
861		settings |= MGMT_SETTING_CIS_CENTRAL;
862
863	if (cis_peripheral_capable(hdev))
864		settings |= MGMT_SETTING_CIS_PERIPHERAL;
865
866	settings |= MGMT_SETTING_PHY_CONFIGURATION;
867
868	return settings;
869}
870
871static u32 get_current_settings(struct hci_dev *hdev)
872{
873	u32 settings = 0;
874
875	if (hdev_is_powered(hdev))
876		settings |= MGMT_SETTING_POWERED;
877
878	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879		settings |= MGMT_SETTING_CONNECTABLE;
880
881	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882		settings |= MGMT_SETTING_FAST_CONNECTABLE;
883
884	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885		settings |= MGMT_SETTING_DISCOVERABLE;
886
887	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888		settings |= MGMT_SETTING_BONDABLE;
889
890	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891		settings |= MGMT_SETTING_BREDR;
892
893	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894		settings |= MGMT_SETTING_LE;
895
896	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897		settings |= MGMT_SETTING_LINK_SECURITY;
898
899	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900		settings |= MGMT_SETTING_SSP;
901
902	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903		settings |= MGMT_SETTING_ADVERTISING;
904
905	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906		settings |= MGMT_SETTING_SECURE_CONN;
907
908	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909		settings |= MGMT_SETTING_DEBUG_KEYS;
910
911	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912		settings |= MGMT_SETTING_PRIVACY;
913
914	/* The current setting for static address has two purposes. The
915	 * first is to indicate if the static address will be used and
916	 * the second is to indicate if it is actually set.
917	 *
918	 * This means if the static address is not configured, this flag
919	 * will never be set. If the address is configured, then if the
920	 * address is actually used decides if the flag is set or not.
921	 *
922	 * For single mode LE only controllers and dual-mode controllers
923	 * with BR/EDR disabled, the existence of the static address will
924	 * be evaluated.
925	 */
926	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929		if (bacmp(&hdev->static_addr, BDADDR_ANY))
930			settings |= MGMT_SETTING_STATIC_ADDRESS;
931	}
932
933	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
935
936	if (cis_central_capable(hdev))
937		settings |= MGMT_SETTING_CIS_CENTRAL;
938
939	if (cis_peripheral_capable(hdev))
940		settings |= MGMT_SETTING_CIS_PERIPHERAL;
941
942	if (bis_capable(hdev))
943		settings |= MGMT_SETTING_ISO_BROADCASTER;
944
945	if (sync_recv_capable(hdev))
946		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
947
948	return settings;
949}
950
951static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952{
953	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
954}
955
956u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957{
958	struct mgmt_pending_cmd *cmd;
959
960	/* If there's a pending mgmt command the flags will not yet have
961	 * their final values, so check for this first.
962	 */
963	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964	if (cmd) {
965		struct mgmt_mode *cp = cmd->param;
966		if (cp->val == 0x01)
967			return LE_AD_GENERAL;
968		else if (cp->val == 0x02)
969			return LE_AD_LIMITED;
970	} else {
971		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972			return LE_AD_LIMITED;
973		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974			return LE_AD_GENERAL;
975	}
976
977	return 0;
978}
979
980bool mgmt_get_connectable(struct hci_dev *hdev)
981{
982	struct mgmt_pending_cmd *cmd;
983
984	/* If there's a pending mgmt command the flag will not yet have
985	 * it's final value, so check for this first.
986	 */
987	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988	if (cmd) {
989		struct mgmt_mode *cp = cmd->param;
990
991		return cp->val;
992	}
993
994	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
995}
996
997static int service_cache_sync(struct hci_dev *hdev, void *data)
998{
999	hci_update_eir_sync(hdev);
1000	hci_update_class_sync(hdev);
1001
1002	return 0;
1003}
1004
1005static void service_cache_off(struct work_struct *work)
1006{
1007	struct hci_dev *hdev = container_of(work, struct hci_dev,
1008					    service_cache.work);
1009
1010	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1011		return;
1012
1013	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1014}
1015
1016static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017{
1018	/* The generation of a new RPA and programming it into the
1019	 * controller happens in the hci_req_enable_advertising()
1020	 * function.
1021	 */
1022	if (ext_adv_capable(hdev))
1023		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024	else
1025		return hci_enable_advertising_sync(hdev);
1026}
1027
1028static void rpa_expired(struct work_struct *work)
1029{
1030	struct hci_dev *hdev = container_of(work, struct hci_dev,
1031					    rpa_expired.work);
1032
1033	bt_dev_dbg(hdev, "");
1034
1035	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036
1037	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1038		return;
1039
1040	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1041}
1042
1043static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044
1045static void discov_off(struct work_struct *work)
1046{
1047	struct hci_dev *hdev = container_of(work, struct hci_dev,
1048					    discov_off.work);
1049
1050	bt_dev_dbg(hdev, "");
1051
1052	hci_dev_lock(hdev);
1053
1054	/* When discoverable timeout triggers, then just make sure
1055	 * the limited discoverable flag is cleared. Even in the case
1056	 * of a timeout triggered from general discoverable, it is
1057	 * safe to unconditionally clear the flag.
1058	 */
1059	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061	hdev->discov_timeout = 0;
1062
1063	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1064
1065	mgmt_new_settings(hdev);
1066
1067	hci_dev_unlock(hdev);
1068}
1069
1070static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071
1072static void mesh_send_complete(struct hci_dev *hdev,
1073			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1074{
1075	u8 handle = mesh_tx->handle;
1076
1077	if (!silent)
1078		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079			   sizeof(handle), NULL);
1080
1081	mgmt_mesh_remove(mesh_tx);
1082}
1083
1084static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085{
1086	struct mgmt_mesh_tx *mesh_tx;
1087
1088	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089	hci_disable_advertising_sync(hdev);
1090	mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092	if (mesh_tx)
1093		mesh_send_complete(hdev, mesh_tx, false);
1094
1095	return 0;
1096}
1097
1098static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101{
1102	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103
1104	if (!mesh_tx)
1105		return;
1106
1107	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108				 mesh_send_start_complete);
1109
1110	if (err < 0)
1111		mesh_send_complete(hdev, mesh_tx, false);
1112	else
1113		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114}
1115
1116static void mesh_send_done(struct work_struct *work)
1117{
1118	struct hci_dev *hdev = container_of(work, struct hci_dev,
1119					    mesh_send_done.work);
1120
1121	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122		return;
1123
1124	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1125}
1126
1127static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128{
1129	if (hci_dev_test_flag(hdev, HCI_MGMT))
1130		return;
1131
1132	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133
1134	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138
1139	/* Non-mgmt controlled devices get this bit set
1140	 * implicitly so that pairing works for them, however
1141	 * for mgmt we require user-space to explicitly enable
1142	 * it
1143	 */
1144	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145
1146	hci_dev_set_flag(hdev, HCI_MGMT);
1147}
1148
1149static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150				void *data, u16 data_len)
1151{
1152	struct mgmt_rp_read_info rp;
1153
1154	bt_dev_dbg(hdev, "sock %p", sk);
1155
1156	hci_dev_lock(hdev);
1157
1158	memset(&rp, 0, sizeof(rp));
1159
1160	bacpy(&rp.bdaddr, &hdev->bdaddr);
1161
1162	rp.version = hdev->hci_ver;
1163	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164
1165	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167
1168	memcpy(rp.dev_class, hdev->dev_class, 3);
1169
1170	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172
1173	hci_dev_unlock(hdev);
1174
1175	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176				 sizeof(rp));
1177}
1178
1179static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180{
1181	u16 eir_len = 0;
1182	size_t name_len;
1183
1184	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186					  hdev->dev_class, 3);
1187
1188	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190					  hdev->appearance);
1191
1192	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194				  hdev->dev_name, name_len);
1195
1196	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198				  hdev->short_name, name_len);
1199
1200	return eir_len;
1201}
1202
1203static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204				    void *data, u16 data_len)
1205{
1206	char buf[512];
1207	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208	u16 eir_len;
1209
1210	bt_dev_dbg(hdev, "sock %p", sk);
1211
1212	memset(&buf, 0, sizeof(buf));
1213
1214	hci_dev_lock(hdev);
1215
1216	bacpy(&rp->bdaddr, &hdev->bdaddr);
1217
1218	rp->version = hdev->hci_ver;
1219	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220
1221	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223
1224
1225	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226	rp->eir_len = cpu_to_le16(eir_len);
1227
1228	hci_dev_unlock(hdev);
1229
1230	/* If this command is called at least once, then the events
1231	 * for class of device and local name changes are disabled
1232	 * and only the new extended controller information event
1233	 * is used.
1234	 */
1235	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238
1239	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240				 sizeof(*rp) + eir_len);
1241}
1242
1243static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244{
1245	char buf[512];
1246	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247	u16 eir_len;
1248
1249	memset(buf, 0, sizeof(buf));
1250
1251	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252	ev->eir_len = cpu_to_le16(eir_len);
1253
1254	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255				  sizeof(*ev) + eir_len,
1256				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1257}
1258
1259static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260{
1261	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1262
1263	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264				 sizeof(settings));
1265}
1266
1267void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268{
1269	struct mgmt_ev_advertising_added ev;
1270
1271	ev.instance = instance;
1272
1273	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1274}
1275
1276void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277			      u8 instance)
1278{
1279	struct mgmt_ev_advertising_removed ev;
1280
1281	ev.instance = instance;
1282
1283	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1284}
1285
1286static void cancel_adv_timeout(struct hci_dev *hdev)
1287{
1288	if (hdev->adv_instance_timeout) {
1289		hdev->adv_instance_timeout = 0;
1290		cancel_delayed_work(&hdev->adv_instance_expire);
1291	}
1292}
1293
1294/* This function requires the caller holds hdev->lock */
1295static void restart_le_actions(struct hci_dev *hdev)
1296{
1297	struct hci_conn_params *p;
1298
1299	list_for_each_entry(p, &hdev->le_conn_params, list) {
1300		/* Needed for AUTO_OFF case where might not "really"
1301		 * have been powered off.
1302		 */
1303		hci_pend_le_list_del_init(p);
1304
1305		switch (p->auto_connect) {
1306		case HCI_AUTO_CONN_DIRECT:
1307		case HCI_AUTO_CONN_ALWAYS:
1308			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1309			break;
1310		case HCI_AUTO_CONN_REPORT:
1311			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1312			break;
1313		default:
1314			break;
1315		}
1316	}
1317}
1318
1319static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320{
1321	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1322
1323	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1325}
1326
1327static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328{
1329	struct mgmt_pending_cmd *cmd = data;
1330	struct mgmt_mode *cp;
1331
1332	/* Make sure cmd still outstanding. */
1333	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334		return;
1335
1336	cp = cmd->param;
1337
1338	bt_dev_dbg(hdev, "err %d", err);
1339
1340	if (!err) {
1341		if (cp->val) {
1342			hci_dev_lock(hdev);
1343			restart_le_actions(hdev);
1344			hci_update_passive_scan(hdev);
1345			hci_dev_unlock(hdev);
1346		}
1347
1348		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349
1350		/* Only call new_setting for power on as power off is deferred
1351		 * to hdev->power_off work which does call hci_dev_do_close.
1352		 */
1353		if (cp->val)
1354			new_settings(hdev, cmd->sk);
1355	} else {
1356		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357				mgmt_status(err));
1358	}
1359
1360	mgmt_pending_remove(cmd);
1361}
1362
1363static int set_powered_sync(struct hci_dev *hdev, void *data)
1364{
1365	struct mgmt_pending_cmd *cmd = data;
1366	struct mgmt_mode *cp = cmd->param;
1367
1368	BT_DBG("%s", hdev->name);
1369
1370	return hci_set_powered_sync(hdev, cp->val);
1371}
1372
1373static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374		       u16 len)
1375{
1376	struct mgmt_mode *cp = data;
1377	struct mgmt_pending_cmd *cmd;
1378	int err;
1379
1380	bt_dev_dbg(hdev, "sock %p", sk);
1381
1382	if (cp->val != 0x00 && cp->val != 0x01)
1383		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384				       MGMT_STATUS_INVALID_PARAMS);
1385
1386	hci_dev_lock(hdev);
1387
1388	if (!cp->val) {
1389		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391					      MGMT_STATUS_BUSY);
1392			goto failed;
1393		}
1394	}
1395
1396	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1398				      MGMT_STATUS_BUSY);
1399		goto failed;
1400	}
1401
1402	if (!!cp->val == hdev_is_powered(hdev)) {
1403		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1404		goto failed;
1405	}
1406
1407	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408	if (!cmd) {
1409		err = -ENOMEM;
1410		goto failed;
1411	}
1412
1413	/* Cancel potentially blocking sync operation before power off */
1414	if (cp->val == 0x00) {
1415		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1416		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1417					 mgmt_set_powered_complete);
1418	} else {
1419		/* Use hci_cmd_sync_submit since hdev might not be running */
1420		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1421					  mgmt_set_powered_complete);
1422	}
1423
1424	if (err < 0)
1425		mgmt_pending_remove(cmd);
1426
1427failed:
1428	hci_dev_unlock(hdev);
1429	return err;
1430}
1431
1432int mgmt_new_settings(struct hci_dev *hdev)
1433{
1434	return new_settings(hdev, NULL);
1435}
1436
1437struct cmd_lookup {
1438	struct sock *sk;
1439	struct hci_dev *hdev;
1440	u8 mgmt_status;
1441};
1442
1443static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444{
1445	struct cmd_lookup *match = data;
1446
1447	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448
1449	list_del(&cmd->list);
1450
1451	if (match->sk == NULL) {
1452		match->sk = cmd->sk;
1453		sock_hold(match->sk);
1454	}
1455
1456	mgmt_pending_free(cmd);
1457}
1458
1459static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460{
1461	u8 *status = data;
1462
1463	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1464	mgmt_pending_remove(cmd);
1465}
1466
1467static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468{
1469	if (cmd->cmd_complete) {
1470		u8 *status = data;
1471
1472		cmd->cmd_complete(cmd, *status);
1473		mgmt_pending_remove(cmd);
1474
1475		return;
1476	}
1477
1478	cmd_status_rsp(cmd, data);
1479}
1480
1481static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482{
1483	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1484				 cmd->param, cmd->param_len);
1485}
1486
1487static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488{
1489	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1490				 cmd->param, sizeof(struct mgmt_addr_info));
1491}
1492
1493static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494{
1495	if (!lmp_bredr_capable(hdev))
1496		return MGMT_STATUS_NOT_SUPPORTED;
1497	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498		return MGMT_STATUS_REJECTED;
1499	else
1500		return MGMT_STATUS_SUCCESS;
1501}
1502
1503static u8 mgmt_le_support(struct hci_dev *hdev)
1504{
1505	if (!lmp_le_capable(hdev))
1506		return MGMT_STATUS_NOT_SUPPORTED;
1507	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508		return MGMT_STATUS_REJECTED;
1509	else
1510		return MGMT_STATUS_SUCCESS;
1511}
1512
1513static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514					   int err)
1515{
1516	struct mgmt_pending_cmd *cmd = data;
1517
1518	bt_dev_dbg(hdev, "err %d", err);
1519
1520	/* Make sure cmd still outstanding. */
1521	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1522		return;
1523
1524	hci_dev_lock(hdev);
1525
1526	if (err) {
1527		u8 mgmt_err = mgmt_status(err);
1528		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1530		goto done;
1531	}
1532
1533	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1534	    hdev->discov_timeout > 0) {
1535		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1536		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1537	}
1538
1539	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1540	new_settings(hdev, cmd->sk);
1541
1542done:
1543	mgmt_pending_remove(cmd);
1544	hci_dev_unlock(hdev);
1545}
1546
1547static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548{
1549	BT_DBG("%s", hdev->name);
1550
1551	return hci_update_discoverable_sync(hdev);
1552}
1553
1554static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1555			    u16 len)
1556{
1557	struct mgmt_cp_set_discoverable *cp = data;
1558	struct mgmt_pending_cmd *cmd;
1559	u16 timeout;
1560	int err;
1561
1562	bt_dev_dbg(hdev, "sock %p", sk);
1563
1564	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1565	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1566		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567				       MGMT_STATUS_REJECTED);
1568
1569	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571				       MGMT_STATUS_INVALID_PARAMS);
1572
1573	timeout = __le16_to_cpu(cp->timeout);
1574
1575	/* Disabling discoverable requires that no timeout is set,
1576	 * and enabling limited discoverable requires a timeout.
1577	 */
1578	if ((cp->val == 0x00 && timeout > 0) ||
1579	    (cp->val == 0x02 && timeout == 0))
1580		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581				       MGMT_STATUS_INVALID_PARAMS);
1582
1583	hci_dev_lock(hdev);
1584
1585	if (!hdev_is_powered(hdev) && timeout > 0) {
1586		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587				      MGMT_STATUS_NOT_POWERED);
1588		goto failed;
1589	}
1590
1591	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594				      MGMT_STATUS_BUSY);
1595		goto failed;
1596	}
1597
1598	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1599		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600				      MGMT_STATUS_REJECTED);
1601		goto failed;
1602	}
1603
1604	if (hdev->advertising_paused) {
1605		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606				      MGMT_STATUS_BUSY);
1607		goto failed;
1608	}
1609
1610	if (!hdev_is_powered(hdev)) {
1611		bool changed = false;
1612
1613		/* Setting limited discoverable when powered off is
1614		 * not a valid operation since it requires a timeout
1615		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616		 */
1617		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1618			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1619			changed = true;
1620		}
1621
1622		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1623		if (err < 0)
1624			goto failed;
1625
1626		if (changed)
1627			err = new_settings(hdev, sk);
1628
1629		goto failed;
1630	}
1631
1632	/* If the current mode is the same, then just update the timeout
1633	 * value with the new value. And if only the timeout gets updated,
1634	 * then no need for any HCI transactions.
1635	 */
1636	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1637	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1638						   HCI_LIMITED_DISCOVERABLE)) {
1639		cancel_delayed_work(&hdev->discov_off);
1640		hdev->discov_timeout = timeout;
1641
1642		if (cp->val && hdev->discov_timeout > 0) {
1643			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1644			queue_delayed_work(hdev->req_workqueue,
1645					   &hdev->discov_off, to);
1646		}
1647
1648		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649		goto failed;
1650	}
1651
1652	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1653	if (!cmd) {
1654		err = -ENOMEM;
1655		goto failed;
1656	}
1657
1658	/* Cancel any potential discoverable timeout that might be
1659	 * still active and store new timeout value. The arming of
1660	 * the timeout happens in the complete handler.
1661	 */
1662	cancel_delayed_work(&hdev->discov_off);
1663	hdev->discov_timeout = timeout;
1664
1665	if (cp->val)
1666		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667	else
1668		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669
1670	/* Limited discoverable mode */
1671	if (cp->val == 0x02)
1672		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673	else
1674		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675
1676	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1677				 mgmt_set_discoverable_complete);
1678
1679	if (err < 0)
1680		mgmt_pending_remove(cmd);
1681
1682failed:
1683	hci_dev_unlock(hdev);
1684	return err;
1685}
1686
1687static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1688					  int err)
1689{
1690	struct mgmt_pending_cmd *cmd = data;
1691
1692	bt_dev_dbg(hdev, "err %d", err);
1693
1694	/* Make sure cmd still outstanding. */
1695	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696		return;
1697
1698	hci_dev_lock(hdev);
1699
1700	if (err) {
1701		u8 mgmt_err = mgmt_status(err);
1702		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1703		goto done;
1704	}
1705
1706	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707	new_settings(hdev, cmd->sk);
1708
1709done:
1710	mgmt_pending_remove(cmd);
1711
1712	hci_dev_unlock(hdev);
1713}
1714
1715static int set_connectable_update_settings(struct hci_dev *hdev,
1716					   struct sock *sk, u8 val)
1717{
1718	bool changed = false;
1719	int err;
1720
1721	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722		changed = true;
1723
1724	if (val) {
1725		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726	} else {
1727		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729	}
1730
1731	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732	if (err < 0)
1733		return err;
1734
1735	if (changed) {
1736		hci_update_scan(hdev);
1737		hci_update_passive_scan(hdev);
1738		return new_settings(hdev, sk);
1739	}
1740
1741	return 0;
1742}
1743
1744static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745{
1746	BT_DBG("%s", hdev->name);
1747
1748	return hci_update_connectable_sync(hdev);
1749}
1750
1751static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752			   u16 len)
1753{
1754	struct mgmt_mode *cp = data;
1755	struct mgmt_pending_cmd *cmd;
1756	int err;
1757
1758	bt_dev_dbg(hdev, "sock %p", sk);
1759
1760	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763				       MGMT_STATUS_REJECTED);
1764
1765	if (cp->val != 0x00 && cp->val != 0x01)
1766		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767				       MGMT_STATUS_INVALID_PARAMS);
1768
1769	hci_dev_lock(hdev);
1770
1771	if (!hdev_is_powered(hdev)) {
1772		err = set_connectable_update_settings(hdev, sk, cp->val);
1773		goto failed;
1774	}
1775
1776	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779				      MGMT_STATUS_BUSY);
1780		goto failed;
1781	}
1782
1783	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784	if (!cmd) {
1785		err = -ENOMEM;
1786		goto failed;
1787	}
1788
1789	if (cp->val) {
1790		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791	} else {
1792		if (hdev->discov_timeout > 0)
1793			cancel_delayed_work(&hdev->discov_off);
1794
1795		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798	}
1799
1800	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801				 mgmt_set_connectable_complete);
1802
1803	if (err < 0)
1804		mgmt_pending_remove(cmd);
1805
1806failed:
1807	hci_dev_unlock(hdev);
1808	return err;
1809}
1810
1811static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812			u16 len)
1813{
1814	struct mgmt_mode *cp = data;
1815	bool changed;
1816	int err;
1817
1818	bt_dev_dbg(hdev, "sock %p", sk);
1819
1820	if (cp->val != 0x00 && cp->val != 0x01)
1821		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822				       MGMT_STATUS_INVALID_PARAMS);
1823
1824	hci_dev_lock(hdev);
1825
1826	if (cp->val)
1827		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828	else
1829		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832	if (err < 0)
1833		goto unlock;
1834
1835	if (changed) {
1836		/* In limited privacy mode the change of bondable mode
1837		 * may affect the local advertising address.
1838		 */
1839		hci_update_discoverable(hdev);
1840
1841		err = new_settings(hdev, sk);
1842	}
1843
1844unlock:
1845	hci_dev_unlock(hdev);
1846	return err;
1847}
1848
1849static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850			     u16 len)
1851{
1852	struct mgmt_mode *cp = data;
1853	struct mgmt_pending_cmd *cmd;
1854	u8 val, status;
1855	int err;
1856
1857	bt_dev_dbg(hdev, "sock %p", sk);
1858
1859	status = mgmt_bredr_support(hdev);
1860	if (status)
1861		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862				       status);
1863
1864	if (cp->val != 0x00 && cp->val != 0x01)
1865		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866				       MGMT_STATUS_INVALID_PARAMS);
1867
1868	hci_dev_lock(hdev);
1869
1870	if (!hdev_is_powered(hdev)) {
1871		bool changed = false;
1872
1873		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875			changed = true;
1876		}
1877
1878		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879		if (err < 0)
1880			goto failed;
1881
1882		if (changed)
1883			err = new_settings(hdev, sk);
1884
1885		goto failed;
1886	}
1887
1888	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890				      MGMT_STATUS_BUSY);
1891		goto failed;
1892	}
1893
1894	val = !!cp->val;
1895
1896	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898		goto failed;
1899	}
1900
1901	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902	if (!cmd) {
1903		err = -ENOMEM;
1904		goto failed;
1905	}
1906
1907	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1908	if (err < 0) {
1909		mgmt_pending_remove(cmd);
1910		goto failed;
1911	}
1912
1913failed:
1914	hci_dev_unlock(hdev);
1915	return err;
1916}
1917
1918static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919{
1920	struct cmd_lookup match = { NULL, hdev };
1921	struct mgmt_pending_cmd *cmd = data;
1922	struct mgmt_mode *cp = cmd->param;
1923	u8 enable = cp->val;
1924	bool changed;
1925
1926	/* Make sure cmd still outstanding. */
1927	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928		return;
1929
1930	if (err) {
1931		u8 mgmt_err = mgmt_status(err);
1932
1933		if (enable && hci_dev_test_and_clear_flag(hdev,
1934							  HCI_SSP_ENABLED)) {
1935			new_settings(hdev, NULL);
1936		}
1937
1938		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1939				     &mgmt_err);
1940		return;
1941	}
1942
1943	if (enable) {
1944		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1945	} else {
1946		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1947	}
1948
1949	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1950
1951	if (changed)
1952		new_settings(hdev, match.sk);
1953
1954	if (match.sk)
1955		sock_put(match.sk);
1956
1957	hci_update_eir_sync(hdev);
1958}
1959
1960static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961{
1962	struct mgmt_pending_cmd *cmd = data;
1963	struct mgmt_mode *cp = cmd->param;
1964	bool changed = false;
1965	int err;
1966
1967	if (cp->val)
1968		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969
1970	err = hci_write_ssp_mode_sync(hdev, cp->val);
1971
1972	if (!err && changed)
1973		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1974
1975	return err;
1976}
1977
1978static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979{
1980	struct mgmt_mode *cp = data;
1981	struct mgmt_pending_cmd *cmd;
1982	u8 status;
1983	int err;
1984
1985	bt_dev_dbg(hdev, "sock %p", sk);
1986
1987	status = mgmt_bredr_support(hdev);
1988	if (status)
1989		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1990
1991	if (!lmp_ssp_capable(hdev))
1992		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1993				       MGMT_STATUS_NOT_SUPPORTED);
1994
1995	if (cp->val != 0x00 && cp->val != 0x01)
1996		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1997				       MGMT_STATUS_INVALID_PARAMS);
1998
1999	hci_dev_lock(hdev);
2000
2001	if (!hdev_is_powered(hdev)) {
2002		bool changed;
2003
2004		if (cp->val) {
2005			changed = !hci_dev_test_and_set_flag(hdev,
2006							     HCI_SSP_ENABLED);
2007		} else {
2008			changed = hci_dev_test_and_clear_flag(hdev,
2009							      HCI_SSP_ENABLED);
2010		}
2011
2012		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2013		if (err < 0)
2014			goto failed;
2015
2016		if (changed)
2017			err = new_settings(hdev, sk);
2018
2019		goto failed;
2020	}
2021
2022	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2024				      MGMT_STATUS_BUSY);
2025		goto failed;
2026	}
2027
2028	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030		goto failed;
2031	}
2032
2033	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034	if (!cmd)
2035		err = -ENOMEM;
2036	else
2037		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2038					 set_ssp_complete);
2039
2040	if (err < 0) {
2041		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2042				      MGMT_STATUS_FAILED);
2043
2044		if (cmd)
2045			mgmt_pending_remove(cmd);
2046	}
2047
2048failed:
2049	hci_dev_unlock(hdev);
2050	return err;
2051}
2052
2053static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054{
2055	bt_dev_dbg(hdev, "sock %p", sk);
2056
2057	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058				       MGMT_STATUS_NOT_SUPPORTED);
2059}
2060
2061static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2062{
2063	struct cmd_lookup match = { NULL, hdev };
2064	u8 status = mgmt_status(err);
2065
2066	bt_dev_dbg(hdev, "err %d", err);
2067
2068	if (status) {
2069		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2070							&status);
2071		return;
2072	}
2073
2074	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2075
2076	new_settings(hdev, match.sk);
2077
2078	if (match.sk)
2079		sock_put(match.sk);
2080}
2081
2082static int set_le_sync(struct hci_dev *hdev, void *data)
2083{
2084	struct mgmt_pending_cmd *cmd = data;
2085	struct mgmt_mode *cp = cmd->param;
2086	u8 val = !!cp->val;
2087	int err;
2088
2089	if (!val) {
2090		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2091
2092		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093			hci_disable_advertising_sync(hdev);
2094
2095		if (ext_adv_capable(hdev))
2096			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2097	} else {
2098		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2099	}
2100
2101	err = hci_write_le_host_supported_sync(hdev, val, 0);
2102
2103	/* Make sure the controller has a good default for
2104	 * advertising data. Restrict the update to when LE
2105	 * has actually been enabled. During power on, the
2106	 * update in powered_update_hci will take care of it.
2107	 */
2108	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109		if (ext_adv_capable(hdev)) {
2110			int status;
2111
2112			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2113			if (!status)
2114				hci_update_scan_rsp_data_sync(hdev, 0x00);
2115		} else {
2116			hci_update_adv_data_sync(hdev, 0x00);
2117			hci_update_scan_rsp_data_sync(hdev, 0x00);
2118		}
2119
2120		hci_update_passive_scan(hdev);
2121	}
2122
2123	return err;
2124}
2125
2126static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2127{
2128	struct mgmt_pending_cmd *cmd = data;
2129	u8 status = mgmt_status(err);
2130	struct sock *sk = cmd->sk;
2131
2132	if (status) {
2133		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2134				     cmd_status_rsp, &status);
2135		return;
2136	}
2137
2138	mgmt_pending_remove(cmd);
2139	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2140}
2141
2142static int set_mesh_sync(struct hci_dev *hdev, void *data)
2143{
2144	struct mgmt_pending_cmd *cmd = data;
2145	struct mgmt_cp_set_mesh *cp = cmd->param;
2146	size_t len = cmd->param_len;
2147
2148	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2149
2150	if (cp->enable)
2151		hci_dev_set_flag(hdev, HCI_MESH);
2152	else
2153		hci_dev_clear_flag(hdev, HCI_MESH);
2154
2155	len -= sizeof(*cp);
2156
2157	/* If filters don't fit, forward all adv pkts */
2158	if (len <= sizeof(hdev->mesh_ad_types))
2159		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2160
2161	hci_update_passive_scan_sync(hdev);
2162	return 0;
2163}
2164
2165static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2166{
2167	struct mgmt_cp_set_mesh *cp = data;
2168	struct mgmt_pending_cmd *cmd;
2169	int err = 0;
2170
2171	bt_dev_dbg(hdev, "sock %p", sk);
2172
2173	if (!lmp_le_capable(hdev) ||
2174	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2175		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2176				       MGMT_STATUS_NOT_SUPPORTED);
2177
2178	if (cp->enable != 0x00 && cp->enable != 0x01)
2179		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180				       MGMT_STATUS_INVALID_PARAMS);
2181
2182	hci_dev_lock(hdev);
2183
2184	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2185	if (!cmd)
2186		err = -ENOMEM;
2187	else
2188		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2189					 set_mesh_complete);
2190
2191	if (err < 0) {
2192		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2193				      MGMT_STATUS_FAILED);
2194
2195		if (cmd)
2196			mgmt_pending_remove(cmd);
2197	}
2198
2199	hci_dev_unlock(hdev);
2200	return err;
2201}
2202
2203static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2204{
2205	struct mgmt_mesh_tx *mesh_tx = data;
2206	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2207	unsigned long mesh_send_interval;
2208	u8 mgmt_err = mgmt_status(err);
2209
2210	/* Report any errors here, but don't report completion */
2211
2212	if (mgmt_err) {
2213		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2214		/* Send Complete Error Code for handle */
2215		mesh_send_complete(hdev, mesh_tx, false);
2216		return;
2217	}
2218
2219	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2220	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2221			   mesh_send_interval);
2222}
2223
2224static int mesh_send_sync(struct hci_dev *hdev, void *data)
2225{
2226	struct mgmt_mesh_tx *mesh_tx = data;
2227	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228	struct adv_info *adv, *next_instance;
2229	u8 instance = hdev->le_num_of_adv_sets + 1;
2230	u16 timeout, duration;
2231	int err = 0;
2232
2233	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2234		return MGMT_STATUS_BUSY;
2235
2236	timeout = 1000;
2237	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2238	adv = hci_add_adv_instance(hdev, instance, 0,
2239				   send->adv_data_len, send->adv_data,
2240				   0, NULL,
2241				   timeout, duration,
2242				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2243				   hdev->le_adv_min_interval,
2244				   hdev->le_adv_max_interval,
2245				   mesh_tx->handle);
2246
2247	if (!IS_ERR(adv))
2248		mesh_tx->instance = instance;
2249	else
2250		err = PTR_ERR(adv);
2251
2252	if (hdev->cur_adv_instance == instance) {
2253		/* If the currently advertised instance is being changed then
2254		 * cancel the current advertising and schedule the next
2255		 * instance. If there is only one instance then the overridden
2256		 * advertising data will be visible right away.
2257		 */
2258		cancel_adv_timeout(hdev);
2259
2260		next_instance = hci_get_next_instance(hdev, instance);
2261		if (next_instance)
2262			instance = next_instance->instance;
2263		else
2264			instance = 0;
2265	} else if (hdev->adv_instance_timeout) {
2266		/* Immediately advertise the new instance if no other, or
2267		 * let it go naturally from queue if ADV is already happening
2268		 */
2269		instance = 0;
2270	}
2271
2272	if (instance)
2273		return hci_schedule_adv_instance_sync(hdev, instance, true);
2274
2275	return err;
2276}
2277
2278static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2279{
2280	struct mgmt_rp_mesh_read_features *rp = data;
2281
2282	if (rp->used_handles >= rp->max_handles)
2283		return;
2284
2285	rp->handles[rp->used_handles++] = mesh_tx->handle;
2286}
2287
2288static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2289			 void *data, u16 len)
2290{
2291	struct mgmt_rp_mesh_read_features rp;
2292
2293	if (!lmp_le_capable(hdev) ||
2294	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2295		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2296				       MGMT_STATUS_NOT_SUPPORTED);
2297
2298	memset(&rp, 0, sizeof(rp));
2299	rp.index = cpu_to_le16(hdev->id);
2300	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2301		rp.max_handles = MESH_HANDLES_MAX;
2302
2303	hci_dev_lock(hdev);
2304
2305	if (rp.max_handles)
2306		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2307
2308	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2309			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2310
2311	hci_dev_unlock(hdev);
2312	return 0;
2313}
2314
2315static int send_cancel(struct hci_dev *hdev, void *data)
2316{
2317	struct mgmt_pending_cmd *cmd = data;
2318	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2319	struct mgmt_mesh_tx *mesh_tx;
2320
2321	if (!cancel->handle) {
2322		do {
2323			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2324
2325			if (mesh_tx)
2326				mesh_send_complete(hdev, mesh_tx, false);
2327		} while (mesh_tx);
2328	} else {
2329		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2330
2331		if (mesh_tx && mesh_tx->sk == cmd->sk)
2332			mesh_send_complete(hdev, mesh_tx, false);
2333	}
2334
2335	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336			  0, NULL, 0);
2337	mgmt_pending_free(cmd);
2338
2339	return 0;
2340}
2341
2342static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2343			    void *data, u16 len)
2344{
2345	struct mgmt_pending_cmd *cmd;
2346	int err;
2347
2348	if (!lmp_le_capable(hdev) ||
2349	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2350		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2351				       MGMT_STATUS_NOT_SUPPORTED);
2352
2353	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2354		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2355				       MGMT_STATUS_REJECTED);
2356
2357	hci_dev_lock(hdev);
2358	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2359	if (!cmd)
2360		err = -ENOMEM;
2361	else
2362		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2363
2364	if (err < 0) {
2365		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2366				      MGMT_STATUS_FAILED);
2367
2368		if (cmd)
2369			mgmt_pending_free(cmd);
2370	}
2371
2372	hci_dev_unlock(hdev);
2373	return err;
2374}
2375
2376static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2377{
2378	struct mgmt_mesh_tx *mesh_tx;
2379	struct mgmt_cp_mesh_send *send = data;
2380	struct mgmt_rp_mesh_read_features rp;
2381	bool sending;
2382	int err = 0;
2383
2384	if (!lmp_le_capable(hdev) ||
2385	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2386		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2387				       MGMT_STATUS_NOT_SUPPORTED);
2388	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2389	    len <= MGMT_MESH_SEND_SIZE ||
2390	    len > (MGMT_MESH_SEND_SIZE + 31))
2391		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2392				       MGMT_STATUS_REJECTED);
2393
2394	hci_dev_lock(hdev);
2395
2396	memset(&rp, 0, sizeof(rp));
2397	rp.max_handles = MESH_HANDLES_MAX;
2398
2399	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2400
2401	if (rp.max_handles <= rp.used_handles) {
2402		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2403				      MGMT_STATUS_BUSY);
2404		goto done;
2405	}
2406
2407	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2408	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2409
2410	if (!mesh_tx)
2411		err = -ENOMEM;
2412	else if (!sending)
2413		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2414					 mesh_send_start_complete);
2415
2416	if (err < 0) {
2417		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2418		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2419				      MGMT_STATUS_FAILED);
2420
2421		if (mesh_tx) {
2422			if (sending)
2423				mgmt_mesh_remove(mesh_tx);
2424		}
2425	} else {
2426		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2427
2428		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2429				  &mesh_tx->handle, 1);
2430	}
2431
2432done:
2433	hci_dev_unlock(hdev);
2434	return err;
2435}
2436
2437static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2438{
2439	struct mgmt_mode *cp = data;
2440	struct mgmt_pending_cmd *cmd;
2441	int err;
2442	u8 val, enabled;
2443
2444	bt_dev_dbg(hdev, "sock %p", sk);
2445
2446	if (!lmp_le_capable(hdev))
2447		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2448				       MGMT_STATUS_NOT_SUPPORTED);
2449
2450	if (cp->val != 0x00 && cp->val != 0x01)
2451		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2452				       MGMT_STATUS_INVALID_PARAMS);
2453
2454	/* Bluetooth single mode LE only controllers or dual-mode
2455	 * controllers configured as LE only devices, do not allow
2456	 * switching LE off. These have either LE enabled explicitly
2457	 * or BR/EDR has been previously switched off.
2458	 *
2459	 * When trying to enable an already enabled LE, then gracefully
2460	 * send a positive response. Trying to disable it however will
2461	 * result into rejection.
2462	 */
2463	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2464		if (cp->val == 0x01)
2465			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2466
2467		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2468				       MGMT_STATUS_REJECTED);
2469	}
2470
2471	hci_dev_lock(hdev);
2472
2473	val = !!cp->val;
2474	enabled = lmp_host_le_capable(hdev);
2475
2476	if (!hdev_is_powered(hdev) || val == enabled) {
2477		bool changed = false;
2478
2479		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2480			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2481			changed = true;
2482		}
2483
2484		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2485			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2486			changed = true;
2487		}
2488
2489		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2490		if (err < 0)
2491			goto unlock;
2492
2493		if (changed)
2494			err = new_settings(hdev, sk);
2495
2496		goto unlock;
2497	}
2498
2499	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2500	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2501		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2502				      MGMT_STATUS_BUSY);
2503		goto unlock;
2504	}
2505
2506	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2507	if (!cmd)
2508		err = -ENOMEM;
2509	else
2510		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2511					 set_le_complete);
2512
2513	if (err < 0) {
2514		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515				      MGMT_STATUS_FAILED);
2516
2517		if (cmd)
2518			mgmt_pending_remove(cmd);
2519	}
2520
2521unlock:
2522	hci_dev_unlock(hdev);
2523	return err;
2524}
2525
2526/* This is a helper function to test for pending mgmt commands that can
2527 * cause CoD or EIR HCI commands. We can only allow one such pending
2528 * mgmt command at a time since otherwise we cannot easily track what
2529 * the current values are, will be, and based on that calculate if a new
2530 * HCI command needs to be sent and if yes with what value.
2531 */
2532static bool pending_eir_or_class(struct hci_dev *hdev)
2533{
2534	struct mgmt_pending_cmd *cmd;
2535
2536	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2537		switch (cmd->opcode) {
2538		case MGMT_OP_ADD_UUID:
2539		case MGMT_OP_REMOVE_UUID:
2540		case MGMT_OP_SET_DEV_CLASS:
2541		case MGMT_OP_SET_POWERED:
2542			return true;
2543		}
2544	}
2545
2546	return false;
2547}
2548
2549static const u8 bluetooth_base_uuid[] = {
2550			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2551			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2552};
2553
2554static u8 get_uuid_size(const u8 *uuid)
2555{
2556	u32 val;
2557
2558	if (memcmp(uuid, bluetooth_base_uuid, 12))
2559		return 128;
2560
2561	val = get_unaligned_le32(&uuid[12]);
2562	if (val > 0xffff)
2563		return 32;
2564
2565	return 16;
2566}
2567
2568static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2569{
2570	struct mgmt_pending_cmd *cmd = data;
2571
2572	bt_dev_dbg(hdev, "err %d", err);
2573
2574	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2575			  mgmt_status(err), hdev->dev_class, 3);
2576
2577	mgmt_pending_free(cmd);
2578}
2579
2580static int add_uuid_sync(struct hci_dev *hdev, void *data)
2581{
2582	int err;
2583
2584	err = hci_update_class_sync(hdev);
2585	if (err)
2586		return err;
2587
2588	return hci_update_eir_sync(hdev);
2589}
2590
2591static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2592{
2593	struct mgmt_cp_add_uuid *cp = data;
2594	struct mgmt_pending_cmd *cmd;
2595	struct bt_uuid *uuid;
2596	int err;
2597
2598	bt_dev_dbg(hdev, "sock %p", sk);
2599
2600	hci_dev_lock(hdev);
2601
2602	if (pending_eir_or_class(hdev)) {
2603		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2604				      MGMT_STATUS_BUSY);
2605		goto failed;
2606	}
2607
2608	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2609	if (!uuid) {
2610		err = -ENOMEM;
2611		goto failed;
2612	}
2613
2614	memcpy(uuid->uuid, cp->uuid, 16);
2615	uuid->svc_hint = cp->svc_hint;
2616	uuid->size = get_uuid_size(cp->uuid);
2617
2618	list_add_tail(&uuid->list, &hdev->uuids);
2619
2620	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2621	if (!cmd) {
2622		err = -ENOMEM;
2623		goto failed;
2624	}
2625
2626	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2627	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2628	 */
2629	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2630				  mgmt_class_complete);
2631	if (err < 0) {
2632		mgmt_pending_free(cmd);
2633		goto failed;
2634	}
2635
2636failed:
2637	hci_dev_unlock(hdev);
2638	return err;
2639}
2640
2641static bool enable_service_cache(struct hci_dev *hdev)
2642{
2643	if (!hdev_is_powered(hdev))
2644		return false;
2645
2646	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2647		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2648				   CACHE_TIMEOUT);
2649		return true;
2650	}
2651
2652	return false;
2653}
2654
2655static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2656{
2657	int err;
2658
2659	err = hci_update_class_sync(hdev);
2660	if (err)
2661		return err;
2662
2663	return hci_update_eir_sync(hdev);
2664}
2665
2666static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2667		       u16 len)
2668{
2669	struct mgmt_cp_remove_uuid *cp = data;
2670	struct mgmt_pending_cmd *cmd;
2671	struct bt_uuid *match, *tmp;
2672	static const u8 bt_uuid_any[] = {
2673		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2674	};
2675	int err, found;
2676
2677	bt_dev_dbg(hdev, "sock %p", sk);
2678
2679	hci_dev_lock(hdev);
2680
2681	if (pending_eir_or_class(hdev)) {
2682		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2683				      MGMT_STATUS_BUSY);
2684		goto unlock;
2685	}
2686
2687	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2688		hci_uuids_clear(hdev);
2689
2690		if (enable_service_cache(hdev)) {
2691			err = mgmt_cmd_complete(sk, hdev->id,
2692						MGMT_OP_REMOVE_UUID,
2693						0, hdev->dev_class, 3);
2694			goto unlock;
2695		}
2696
2697		goto update_class;
2698	}
2699
2700	found = 0;
2701
2702	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2703		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2704			continue;
2705
2706		list_del(&match->list);
2707		kfree(match);
2708		found++;
2709	}
2710
2711	if (found == 0) {
2712		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2713				      MGMT_STATUS_INVALID_PARAMS);
2714		goto unlock;
2715	}
2716
2717update_class:
2718	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2719	if (!cmd) {
2720		err = -ENOMEM;
2721		goto unlock;
2722	}
2723
2724	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2725	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2726	 */
2727	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2728				  mgmt_class_complete);
2729	if (err < 0)
2730		mgmt_pending_free(cmd);
2731
2732unlock:
2733	hci_dev_unlock(hdev);
2734	return err;
2735}
2736
2737static int set_class_sync(struct hci_dev *hdev, void *data)
2738{
2739	int err = 0;
2740
2741	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2742		cancel_delayed_work_sync(&hdev->service_cache);
2743		err = hci_update_eir_sync(hdev);
2744	}
2745
2746	if (err)
2747		return err;
2748
2749	return hci_update_class_sync(hdev);
2750}
2751
2752static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2753			 u16 len)
2754{
2755	struct mgmt_cp_set_dev_class *cp = data;
2756	struct mgmt_pending_cmd *cmd;
2757	int err;
2758
2759	bt_dev_dbg(hdev, "sock %p", sk);
2760
2761	if (!lmp_bredr_capable(hdev))
2762		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2763				       MGMT_STATUS_NOT_SUPPORTED);
2764
2765	hci_dev_lock(hdev);
2766
2767	if (pending_eir_or_class(hdev)) {
2768		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2769				      MGMT_STATUS_BUSY);
2770		goto unlock;
2771	}
2772
2773	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2774		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2775				      MGMT_STATUS_INVALID_PARAMS);
2776		goto unlock;
2777	}
2778
2779	hdev->major_class = cp->major;
2780	hdev->minor_class = cp->minor;
2781
2782	if (!hdev_is_powered(hdev)) {
2783		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2784					hdev->dev_class, 3);
2785		goto unlock;
2786	}
2787
2788	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2789	if (!cmd) {
2790		err = -ENOMEM;
2791		goto unlock;
2792	}
2793
2794	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2795	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2796	 */
2797	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2798				  mgmt_class_complete);
2799	if (err < 0)
2800		mgmt_pending_free(cmd);
2801
2802unlock:
2803	hci_dev_unlock(hdev);
2804	return err;
2805}
2806
2807static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2808			  u16 len)
2809{
2810	struct mgmt_cp_load_link_keys *cp = data;
2811	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2812				   sizeof(struct mgmt_link_key_info));
2813	u16 key_count, expected_len;
2814	bool changed;
2815	int i;
2816
2817	bt_dev_dbg(hdev, "sock %p", sk);
2818
2819	if (!lmp_bredr_capable(hdev))
2820		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821				       MGMT_STATUS_NOT_SUPPORTED);
2822
2823	key_count = __le16_to_cpu(cp->key_count);
2824	if (key_count > max_key_count) {
2825		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2826			   key_count);
2827		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2828				       MGMT_STATUS_INVALID_PARAMS);
2829	}
2830
2831	expected_len = struct_size(cp, keys, key_count);
2832	if (expected_len != len) {
2833		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2834			   expected_len, len);
2835		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2836				       MGMT_STATUS_INVALID_PARAMS);
2837	}
2838
2839	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2840		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2841				       MGMT_STATUS_INVALID_PARAMS);
2842
2843	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2844		   key_count);
2845
2846	for (i = 0; i < key_count; i++) {
2847		struct mgmt_link_key_info *key = &cp->keys[i];
2848
2849		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2850		if (key->type > 0x08)
2851			return mgmt_cmd_status(sk, hdev->id,
2852					       MGMT_OP_LOAD_LINK_KEYS,
2853					       MGMT_STATUS_INVALID_PARAMS);
2854	}
2855
2856	hci_dev_lock(hdev);
2857
2858	hci_link_keys_clear(hdev);
2859
2860	if (cp->debug_keys)
2861		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2862	else
2863		changed = hci_dev_test_and_clear_flag(hdev,
2864						      HCI_KEEP_DEBUG_KEYS);
2865
2866	if (changed)
2867		new_settings(hdev, NULL);
2868
2869	for (i = 0; i < key_count; i++) {
2870		struct mgmt_link_key_info *key = &cp->keys[i];
2871
2872		if (hci_is_blocked_key(hdev,
2873				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2874				       key->val)) {
2875			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2876				    &key->addr.bdaddr);
2877			continue;
2878		}
2879
2880		/* Always ignore debug keys and require a new pairing if
2881		 * the user wants to use them.
2882		 */
2883		if (key->type == HCI_LK_DEBUG_COMBINATION)
2884			continue;
2885
2886		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2887				 key->type, key->pin_len, NULL);
2888	}
2889
2890	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2891
2892	hci_dev_unlock(hdev);
2893
2894	return 0;
2895}
2896
2897static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2898			   u8 addr_type, struct sock *skip_sk)
2899{
2900	struct mgmt_ev_device_unpaired ev;
2901
2902	bacpy(&ev.addr.bdaddr, bdaddr);
2903	ev.addr.type = addr_type;
2904
2905	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2906			  skip_sk);
2907}
2908
2909static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2910{
2911	struct mgmt_pending_cmd *cmd = data;
2912	struct mgmt_cp_unpair_device *cp = cmd->param;
2913
2914	if (!err)
2915		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2916
2917	cmd->cmd_complete(cmd, err);
2918	mgmt_pending_free(cmd);
2919}
2920
2921static int unpair_device_sync(struct hci_dev *hdev, void *data)
2922{
2923	struct mgmt_pending_cmd *cmd = data;
2924	struct mgmt_cp_unpair_device *cp = cmd->param;
2925	struct hci_conn *conn;
2926
2927	if (cp->addr.type == BDADDR_BREDR)
2928		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2929					       &cp->addr.bdaddr);
2930	else
2931		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2932					       le_addr_type(cp->addr.type));
2933
2934	if (!conn)
2935		return 0;
2936
2937	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2938}
2939
2940static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2941			 u16 len)
2942{
2943	struct mgmt_cp_unpair_device *cp = data;
2944	struct mgmt_rp_unpair_device rp;
2945	struct hci_conn_params *params;
2946	struct mgmt_pending_cmd *cmd;
2947	struct hci_conn *conn;
2948	u8 addr_type;
2949	int err;
2950
2951	memset(&rp, 0, sizeof(rp));
2952	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2953	rp.addr.type = cp->addr.type;
2954
2955	if (!bdaddr_type_is_valid(cp->addr.type))
2956		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2957					 MGMT_STATUS_INVALID_PARAMS,
2958					 &rp, sizeof(rp));
2959
2960	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2961		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2962					 MGMT_STATUS_INVALID_PARAMS,
2963					 &rp, sizeof(rp));
2964
2965	hci_dev_lock(hdev);
2966
2967	if (!hdev_is_powered(hdev)) {
2968		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2969					MGMT_STATUS_NOT_POWERED, &rp,
2970					sizeof(rp));
2971		goto unlock;
2972	}
2973
2974	if (cp->addr.type == BDADDR_BREDR) {
2975		/* If disconnection is requested, then look up the
2976		 * connection. If the remote device is connected, it
2977		 * will be later used to terminate the link.
2978		 *
2979		 * Setting it to NULL explicitly will cause no
2980		 * termination of the link.
2981		 */
2982		if (cp->disconnect)
2983			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2984						       &cp->addr.bdaddr);
2985		else
2986			conn = NULL;
2987
2988		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2989		if (err < 0) {
2990			err = mgmt_cmd_complete(sk, hdev->id,
2991						MGMT_OP_UNPAIR_DEVICE,
2992						MGMT_STATUS_NOT_PAIRED, &rp,
2993						sizeof(rp));
2994			goto unlock;
2995		}
2996
2997		goto done;
2998	}
2999
3000	/* LE address type */
3001	addr_type = le_addr_type(cp->addr.type);
3002
3003	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3004	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3005	if (err < 0) {
3006		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3007					MGMT_STATUS_NOT_PAIRED, &rp,
3008					sizeof(rp));
3009		goto unlock;
3010	}
3011
3012	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3013	if (!conn) {
3014		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3015		goto done;
3016	}
3017
3018
3019	/* Defer clearing up the connection parameters until closing to
3020	 * give a chance of keeping them if a repairing happens.
3021	 */
3022	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3023
3024	/* Disable auto-connection parameters if present */
3025	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3026	if (params) {
3027		if (params->explicit_connect)
3028			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3029		else
3030			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3031	}
3032
3033	/* If disconnection is not requested, then clear the connection
3034	 * variable so that the link is not terminated.
3035	 */
3036	if (!cp->disconnect)
3037		conn = NULL;
3038
3039done:
3040	/* If the connection variable is set, then termination of the
3041	 * link is requested.
3042	 */
3043	if (!conn) {
3044		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3045					&rp, sizeof(rp));
3046		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3047		goto unlock;
3048	}
3049
3050	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3051			       sizeof(*cp));
3052	if (!cmd) {
3053		err = -ENOMEM;
3054		goto unlock;
3055	}
3056
3057	cmd->cmd_complete = addr_cmd_complete;
3058
3059	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3060				 unpair_device_complete);
3061	if (err < 0)
3062		mgmt_pending_free(cmd);
3063
3064unlock:
3065	hci_dev_unlock(hdev);
3066	return err;
3067}
3068
3069static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3070		      u16 len)
3071{
3072	struct mgmt_cp_disconnect *cp = data;
3073	struct mgmt_rp_disconnect rp;
3074	struct mgmt_pending_cmd *cmd;
3075	struct hci_conn *conn;
3076	int err;
3077
3078	bt_dev_dbg(hdev, "sock %p", sk);
3079
3080	memset(&rp, 0, sizeof(rp));
3081	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3082	rp.addr.type = cp->addr.type;
3083
3084	if (!bdaddr_type_is_valid(cp->addr.type))
3085		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3086					 MGMT_STATUS_INVALID_PARAMS,
3087					 &rp, sizeof(rp));
3088
3089	hci_dev_lock(hdev);
3090
3091	if (!test_bit(HCI_UP, &hdev->flags)) {
3092		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3093					MGMT_STATUS_NOT_POWERED, &rp,
3094					sizeof(rp));
3095		goto failed;
3096	}
3097
3098	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3099		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3100					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3101		goto failed;
3102	}
3103
3104	if (cp->addr.type == BDADDR_BREDR)
3105		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3106					       &cp->addr.bdaddr);
3107	else
3108		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3109					       le_addr_type(cp->addr.type));
3110
3111	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3112		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3113					MGMT_STATUS_NOT_CONNECTED, &rp,
3114					sizeof(rp));
3115		goto failed;
3116	}
3117
3118	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3119	if (!cmd) {
3120		err = -ENOMEM;
3121		goto failed;
3122	}
3123
3124	cmd->cmd_complete = generic_cmd_complete;
3125
3126	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3127	if (err < 0)
3128		mgmt_pending_remove(cmd);
3129
3130failed:
3131	hci_dev_unlock(hdev);
3132	return err;
3133}
3134
3135static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3136{
3137	switch (link_type) {
3138	case ISO_LINK:
3139	case LE_LINK:
3140		switch (addr_type) {
3141		case ADDR_LE_DEV_PUBLIC:
3142			return BDADDR_LE_PUBLIC;
3143
3144		default:
3145			/* Fallback to LE Random address type */
3146			return BDADDR_LE_RANDOM;
3147		}
3148
3149	default:
3150		/* Fallback to BR/EDR type */
3151		return BDADDR_BREDR;
3152	}
3153}
3154
3155static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3156			   u16 data_len)
3157{
3158	struct mgmt_rp_get_connections *rp;
3159	struct hci_conn *c;
3160	int err;
3161	u16 i;
3162
3163	bt_dev_dbg(hdev, "sock %p", sk);
3164
3165	hci_dev_lock(hdev);
3166
3167	if (!hdev_is_powered(hdev)) {
3168		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3169				      MGMT_STATUS_NOT_POWERED);
3170		goto unlock;
3171	}
3172
3173	i = 0;
3174	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3175		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3176			i++;
3177	}
3178
3179	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3180	if (!rp) {
3181		err = -ENOMEM;
3182		goto unlock;
3183	}
3184
3185	i = 0;
3186	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3187		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3188			continue;
3189		bacpy(&rp->addr[i].bdaddr, &c->dst);
3190		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3191		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3192			continue;
3193		i++;
3194	}
3195
3196	rp->conn_count = cpu_to_le16(i);
3197
3198	/* Recalculate length in case of filtered SCO connections, etc */
3199	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3200				struct_size(rp, addr, i));
3201
3202	kfree(rp);
3203
3204unlock:
3205	hci_dev_unlock(hdev);
3206	return err;
3207}
3208
3209static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3210				   struct mgmt_cp_pin_code_neg_reply *cp)
3211{
3212	struct mgmt_pending_cmd *cmd;
3213	int err;
3214
3215	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3216			       sizeof(*cp));
3217	if (!cmd)
3218		return -ENOMEM;
3219
3220	cmd->cmd_complete = addr_cmd_complete;
3221
3222	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3223			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3224	if (err < 0)
3225		mgmt_pending_remove(cmd);
3226
3227	return err;
3228}
3229
3230static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3231			  u16 len)
3232{
3233	struct hci_conn *conn;
3234	struct mgmt_cp_pin_code_reply *cp = data;
3235	struct hci_cp_pin_code_reply reply;
3236	struct mgmt_pending_cmd *cmd;
3237	int err;
3238
3239	bt_dev_dbg(hdev, "sock %p", sk);
3240
3241	hci_dev_lock(hdev);
3242
3243	if (!hdev_is_powered(hdev)) {
3244		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3245				      MGMT_STATUS_NOT_POWERED);
3246		goto failed;
3247	}
3248
3249	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3250	if (!conn) {
3251		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3252				      MGMT_STATUS_NOT_CONNECTED);
3253		goto failed;
3254	}
3255
3256	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3257		struct mgmt_cp_pin_code_neg_reply ncp;
3258
3259		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3260
3261		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3262
3263		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3264		if (err >= 0)
3265			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3266					      MGMT_STATUS_INVALID_PARAMS);
3267
3268		goto failed;
3269	}
3270
3271	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3272	if (!cmd) {
3273		err = -ENOMEM;
3274		goto failed;
3275	}
3276
3277	cmd->cmd_complete = addr_cmd_complete;
3278
3279	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3280	reply.pin_len = cp->pin_len;
3281	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3282
3283	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3284	if (err < 0)
3285		mgmt_pending_remove(cmd);
3286
3287failed:
3288	hci_dev_unlock(hdev);
3289	return err;
3290}
3291
3292static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3293			     u16 len)
3294{
3295	struct mgmt_cp_set_io_capability *cp = data;
3296
3297	bt_dev_dbg(hdev, "sock %p", sk);
3298
3299	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3300		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3301				       MGMT_STATUS_INVALID_PARAMS);
3302
3303	hci_dev_lock(hdev);
3304
3305	hdev->io_capability = cp->io_capability;
3306
3307	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3308
3309	hci_dev_unlock(hdev);
3310
3311	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3312				 NULL, 0);
3313}
3314
3315static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3316{
3317	struct hci_dev *hdev = conn->hdev;
3318	struct mgmt_pending_cmd *cmd;
3319
3320	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3321		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3322			continue;
3323
3324		if (cmd->user_data != conn)
3325			continue;
3326
3327		return cmd;
3328	}
3329
3330	return NULL;
3331}
3332
3333static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3334{
3335	struct mgmt_rp_pair_device rp;
3336	struct hci_conn *conn = cmd->user_data;
3337	int err;
3338
3339	bacpy(&rp.addr.bdaddr, &conn->dst);
3340	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3341
3342	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3343				status, &rp, sizeof(rp));
3344
3345	/* So we don't get further callbacks for this connection */
3346	conn->connect_cfm_cb = NULL;
3347	conn->security_cfm_cb = NULL;
3348	conn->disconn_cfm_cb = NULL;
3349
3350	hci_conn_drop(conn);
3351
3352	/* The device is paired so there is no need to remove
3353	 * its connection parameters anymore.
3354	 */
3355	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3356
3357	hci_conn_put(conn);
3358
3359	return err;
3360}
3361
3362void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3363{
3364	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3365	struct mgmt_pending_cmd *cmd;
3366
3367	cmd = find_pairing(conn);
3368	if (cmd) {
3369		cmd->cmd_complete(cmd, status);
3370		mgmt_pending_remove(cmd);
3371	}
3372}
3373
3374static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3375{
3376	struct mgmt_pending_cmd *cmd;
3377
3378	BT_DBG("status %u", status);
3379
3380	cmd = find_pairing(conn);
3381	if (!cmd) {
3382		BT_DBG("Unable to find a pending command");
3383		return;
3384	}
3385
3386	cmd->cmd_complete(cmd, mgmt_status(status));
3387	mgmt_pending_remove(cmd);
3388}
3389
3390static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3391{
3392	struct mgmt_pending_cmd *cmd;
3393
3394	BT_DBG("status %u", status);
3395
3396	if (!status)
3397		return;
3398
3399	cmd = find_pairing(conn);
3400	if (!cmd) {
3401		BT_DBG("Unable to find a pending command");
3402		return;
3403	}
3404
3405	cmd->cmd_complete(cmd, mgmt_status(status));
3406	mgmt_pending_remove(cmd);
3407}
3408
3409static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3410		       u16 len)
3411{
3412	struct mgmt_cp_pair_device *cp = data;
3413	struct mgmt_rp_pair_device rp;
3414	struct mgmt_pending_cmd *cmd;
3415	u8 sec_level, auth_type;
3416	struct hci_conn *conn;
3417	int err;
3418
3419	bt_dev_dbg(hdev, "sock %p", sk);
3420
3421	memset(&rp, 0, sizeof(rp));
3422	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3423	rp.addr.type = cp->addr.type;
3424
3425	if (!bdaddr_type_is_valid(cp->addr.type))
3426		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3427					 MGMT_STATUS_INVALID_PARAMS,
3428					 &rp, sizeof(rp));
3429
3430	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3431		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3432					 MGMT_STATUS_INVALID_PARAMS,
3433					 &rp, sizeof(rp));
3434
3435	hci_dev_lock(hdev);
3436
3437	if (!hdev_is_powered(hdev)) {
3438		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3439					MGMT_STATUS_NOT_POWERED, &rp,
3440					sizeof(rp));
3441		goto unlock;
3442	}
3443
3444	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3445		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3446					MGMT_STATUS_ALREADY_PAIRED, &rp,
3447					sizeof(rp));
3448		goto unlock;
3449	}
3450
3451	sec_level = BT_SECURITY_MEDIUM;
3452	auth_type = HCI_AT_DEDICATED_BONDING;
3453
3454	if (cp->addr.type == BDADDR_BREDR) {
3455		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3456				       auth_type, CONN_REASON_PAIR_DEVICE,
3457				       HCI_ACL_CONN_TIMEOUT);
3458	} else {
3459		u8 addr_type = le_addr_type(cp->addr.type);
3460		struct hci_conn_params *p;
3461
3462		/* When pairing a new device, it is expected to remember
3463		 * this device for future connections. Adding the connection
3464		 * parameter information ahead of time allows tracking
3465		 * of the peripheral preferred values and will speed up any
3466		 * further connection establishment.
3467		 *
3468		 * If connection parameters already exist, then they
3469		 * will be kept and this function does nothing.
3470		 */
3471		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3472
3473		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3474			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3475
3476		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3477					   sec_level, HCI_LE_CONN_TIMEOUT,
3478					   CONN_REASON_PAIR_DEVICE);
3479	}
3480
3481	if (IS_ERR(conn)) {
3482		int status;
3483
3484		if (PTR_ERR(conn) == -EBUSY)
3485			status = MGMT_STATUS_BUSY;
3486		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3487			status = MGMT_STATUS_NOT_SUPPORTED;
3488		else if (PTR_ERR(conn) == -ECONNREFUSED)
3489			status = MGMT_STATUS_REJECTED;
3490		else
3491			status = MGMT_STATUS_CONNECT_FAILED;
3492
3493		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3494					status, &rp, sizeof(rp));
3495		goto unlock;
3496	}
3497
3498	if (conn->connect_cfm_cb) {
3499		hci_conn_drop(conn);
3500		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3501					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3502		goto unlock;
3503	}
3504
3505	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3506	if (!cmd) {
3507		err = -ENOMEM;
3508		hci_conn_drop(conn);
3509		goto unlock;
3510	}
3511
3512	cmd->cmd_complete = pairing_complete;
3513
3514	/* For LE, just connecting isn't a proof that the pairing finished */
3515	if (cp->addr.type == BDADDR_BREDR) {
3516		conn->connect_cfm_cb = pairing_complete_cb;
3517		conn->security_cfm_cb = pairing_complete_cb;
3518		conn->disconn_cfm_cb = pairing_complete_cb;
3519	} else {
3520		conn->connect_cfm_cb = le_pairing_complete_cb;
3521		conn->security_cfm_cb = le_pairing_complete_cb;
3522		conn->disconn_cfm_cb = le_pairing_complete_cb;
3523	}
3524
3525	conn->io_capability = cp->io_cap;
3526	cmd->user_data = hci_conn_get(conn);
3527
3528	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3529	    hci_conn_security(conn, sec_level, auth_type, true)) {
3530		cmd->cmd_complete(cmd, 0);
3531		mgmt_pending_remove(cmd);
3532	}
3533
3534	err = 0;
3535
3536unlock:
3537	hci_dev_unlock(hdev);
3538	return err;
3539}
3540
3541static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3542			      u16 len)
3543{
3544	struct mgmt_addr_info *addr = data;
3545	struct mgmt_pending_cmd *cmd;
3546	struct hci_conn *conn;
3547	int err;
3548
3549	bt_dev_dbg(hdev, "sock %p", sk);
3550
3551	hci_dev_lock(hdev);
3552
3553	if (!hdev_is_powered(hdev)) {
3554		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3555				      MGMT_STATUS_NOT_POWERED);
3556		goto unlock;
3557	}
3558
3559	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3560	if (!cmd) {
3561		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3562				      MGMT_STATUS_INVALID_PARAMS);
3563		goto unlock;
3564	}
3565
3566	conn = cmd->user_data;
3567
3568	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3569		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3570				      MGMT_STATUS_INVALID_PARAMS);
3571		goto unlock;
3572	}
3573
3574	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3575	mgmt_pending_remove(cmd);
3576
3577	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3578				addr, sizeof(*addr));
3579
3580	/* Since user doesn't want to proceed with the connection, abort any
3581	 * ongoing pairing and then terminate the link if it was created
3582	 * because of the pair device action.
3583	 */
3584	if (addr->type == BDADDR_BREDR)
3585		hci_remove_link_key(hdev, &addr->bdaddr);
3586	else
3587		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3588					      le_addr_type(addr->type));
3589
3590	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3591		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3592
3593unlock:
3594	hci_dev_unlock(hdev);
3595	return err;
3596}
3597
3598static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3599			     struct mgmt_addr_info *addr, u16 mgmt_op,
3600			     u16 hci_op, __le32 passkey)
3601{
3602	struct mgmt_pending_cmd *cmd;
3603	struct hci_conn *conn;
3604	int err;
3605
3606	hci_dev_lock(hdev);
3607
3608	if (!hdev_is_powered(hdev)) {
3609		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3610					MGMT_STATUS_NOT_POWERED, addr,
3611					sizeof(*addr));
3612		goto done;
3613	}
3614
3615	if (addr->type == BDADDR_BREDR)
3616		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3617	else
3618		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3619					       le_addr_type(addr->type));
3620
3621	if (!conn) {
3622		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623					MGMT_STATUS_NOT_CONNECTED, addr,
3624					sizeof(*addr));
3625		goto done;
3626	}
3627
3628	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3629		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3630		if (!err)
3631			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3632						MGMT_STATUS_SUCCESS, addr,
3633						sizeof(*addr));
3634		else
3635			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3636						MGMT_STATUS_FAILED, addr,
3637						sizeof(*addr));
3638
3639		goto done;
3640	}
3641
3642	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3643	if (!cmd) {
3644		err = -ENOMEM;
3645		goto done;
3646	}
3647
3648	cmd->cmd_complete = addr_cmd_complete;
3649
3650	/* Continue with pairing via HCI */
3651	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3652		struct hci_cp_user_passkey_reply cp;
3653
3654		bacpy(&cp.bdaddr, &addr->bdaddr);
3655		cp.passkey = passkey;
3656		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3657	} else
3658		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3659				   &addr->bdaddr);
3660
3661	if (err < 0)
3662		mgmt_pending_remove(cmd);
3663
3664done:
3665	hci_dev_unlock(hdev);
3666	return err;
3667}
3668
3669static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3670			      void *data, u16 len)
3671{
3672	struct mgmt_cp_pin_code_neg_reply *cp = data;
3673
3674	bt_dev_dbg(hdev, "sock %p", sk);
3675
3676	return user_pairing_resp(sk, hdev, &cp->addr,
3677				MGMT_OP_PIN_CODE_NEG_REPLY,
3678				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3679}
3680
3681static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3682			      u16 len)
3683{
3684	struct mgmt_cp_user_confirm_reply *cp = data;
3685
3686	bt_dev_dbg(hdev, "sock %p", sk);
3687
3688	if (len != sizeof(*cp))
3689		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3690				       MGMT_STATUS_INVALID_PARAMS);
3691
3692	return user_pairing_resp(sk, hdev, &cp->addr,
3693				 MGMT_OP_USER_CONFIRM_REPLY,
3694				 HCI_OP_USER_CONFIRM_REPLY, 0);
3695}
3696
3697static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3698				  void *data, u16 len)
3699{
3700	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3701
3702	bt_dev_dbg(hdev, "sock %p", sk);
3703
3704	return user_pairing_resp(sk, hdev, &cp->addr,
3705				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3706				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3707}
3708
3709static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3710			      u16 len)
3711{
3712	struct mgmt_cp_user_passkey_reply *cp = data;
3713
3714	bt_dev_dbg(hdev, "sock %p", sk);
3715
3716	return user_pairing_resp(sk, hdev, &cp->addr,
3717				 MGMT_OP_USER_PASSKEY_REPLY,
3718				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3719}
3720
3721static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3722				  void *data, u16 len)
3723{
3724	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3725
3726	bt_dev_dbg(hdev, "sock %p", sk);
3727
3728	return user_pairing_resp(sk, hdev, &cp->addr,
3729				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3730				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3731}
3732
3733static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3734{
3735	struct adv_info *adv_instance;
3736
3737	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3738	if (!adv_instance)
3739		return 0;
3740
3741	/* stop if current instance doesn't need to be changed */
3742	if (!(adv_instance->flags & flags))
3743		return 0;
3744
3745	cancel_adv_timeout(hdev);
3746
3747	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3748	if (!adv_instance)
3749		return 0;
3750
3751	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3752
3753	return 0;
3754}
3755
3756static int name_changed_sync(struct hci_dev *hdev, void *data)
3757{
3758	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3759}
3760
3761static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3762{
3763	struct mgmt_pending_cmd *cmd = data;
3764	struct mgmt_cp_set_local_name *cp = cmd->param;
3765	u8 status = mgmt_status(err);
3766
3767	bt_dev_dbg(hdev, "err %d", err);
3768
3769	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3770		return;
3771
3772	if (status) {
3773		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3774				status);
3775	} else {
3776		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3777				  cp, sizeof(*cp));
3778
3779		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3780			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3781	}
3782
3783	mgmt_pending_remove(cmd);
3784}
3785
3786static int set_name_sync(struct hci_dev *hdev, void *data)
3787{
3788	if (lmp_bredr_capable(hdev)) {
3789		hci_update_name_sync(hdev);
3790		hci_update_eir_sync(hdev);
3791	}
3792
3793	/* The name is stored in the scan response data and so
3794	 * no need to update the advertising data here.
3795	 */
3796	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3797		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3798
3799	return 0;
3800}
3801
3802static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3803			  u16 len)
3804{
3805	struct mgmt_cp_set_local_name *cp = data;
3806	struct mgmt_pending_cmd *cmd;
3807	int err;
3808
3809	bt_dev_dbg(hdev, "sock %p", sk);
3810
3811	hci_dev_lock(hdev);
3812
3813	/* If the old values are the same as the new ones just return a
3814	 * direct command complete event.
3815	 */
3816	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3817	    !memcmp(hdev->short_name, cp->short_name,
3818		    sizeof(hdev->short_name))) {
3819		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3820					data, len);
3821		goto failed;
3822	}
3823
3824	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3825
3826	if (!hdev_is_powered(hdev)) {
3827		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3828
3829		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3830					data, len);
3831		if (err < 0)
3832			goto failed;
3833
3834		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3835					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3836		ext_info_changed(hdev, sk);
3837
3838		goto failed;
3839	}
3840
3841	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3842	if (!cmd)
3843		err = -ENOMEM;
3844	else
3845		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3846					 set_name_complete);
3847
3848	if (err < 0) {
3849		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3850				      MGMT_STATUS_FAILED);
3851
3852		if (cmd)
3853			mgmt_pending_remove(cmd);
3854
3855		goto failed;
3856	}
3857
3858	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3859
3860failed:
3861	hci_dev_unlock(hdev);
3862	return err;
3863}
3864
3865static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3866{
3867	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3868}
3869
3870static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3871			  u16 len)
3872{
3873	struct mgmt_cp_set_appearance *cp = data;
3874	u16 appearance;
3875	int err;
3876
3877	bt_dev_dbg(hdev, "sock %p", sk);
3878
3879	if (!lmp_le_capable(hdev))
3880		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3881				       MGMT_STATUS_NOT_SUPPORTED);
3882
3883	appearance = le16_to_cpu(cp->appearance);
3884
3885	hci_dev_lock(hdev);
3886
3887	if (hdev->appearance != appearance) {
3888		hdev->appearance = appearance;
3889
3890		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3891			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3892					   NULL);
3893
3894		ext_info_changed(hdev, sk);
3895	}
3896
3897	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3898				0);
3899
3900	hci_dev_unlock(hdev);
3901
3902	return err;
3903}
3904
3905static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3906				 void *data, u16 len)
3907{
3908	struct mgmt_rp_get_phy_configuration rp;
3909
3910	bt_dev_dbg(hdev, "sock %p", sk);
3911
3912	hci_dev_lock(hdev);
3913
3914	memset(&rp, 0, sizeof(rp));
3915
3916	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3917	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3918	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3919
3920	hci_dev_unlock(hdev);
3921
3922	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3923				 &rp, sizeof(rp));
3924}
3925
3926int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3927{
3928	struct mgmt_ev_phy_configuration_changed ev;
3929
3930	memset(&ev, 0, sizeof(ev));
3931
3932	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3933
3934	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3935			  sizeof(ev), skip);
3936}
3937
3938static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3939{
3940	struct mgmt_pending_cmd *cmd = data;
3941	struct sk_buff *skb = cmd->skb;
3942	u8 status = mgmt_status(err);
3943
3944	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3945		return;
3946
3947	if (!status) {
3948		if (!skb)
3949			status = MGMT_STATUS_FAILED;
3950		else if (IS_ERR(skb))
3951			status = mgmt_status(PTR_ERR(skb));
3952		else
3953			status = mgmt_status(skb->data[0]);
3954	}
3955
3956	bt_dev_dbg(hdev, "status %d", status);
3957
3958	if (status) {
3959		mgmt_cmd_status(cmd->sk, hdev->id,
3960				MGMT_OP_SET_PHY_CONFIGURATION, status);
3961	} else {
3962		mgmt_cmd_complete(cmd->sk, hdev->id,
3963				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3964				  NULL, 0);
3965
3966		mgmt_phy_configuration_changed(hdev, cmd->sk);
3967	}
3968
3969	if (skb && !IS_ERR(skb))
3970		kfree_skb(skb);
3971
3972	mgmt_pending_remove(cmd);
3973}
3974
3975static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3976{
3977	struct mgmt_pending_cmd *cmd = data;
3978	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3979	struct hci_cp_le_set_default_phy cp_phy;
3980	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3981
3982	memset(&cp_phy, 0, sizeof(cp_phy));
3983
3984	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3985		cp_phy.all_phys |= 0x01;
3986
3987	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3988		cp_phy.all_phys |= 0x02;
3989
3990	if (selected_phys & MGMT_PHY_LE_1M_TX)
3991		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3992
3993	if (selected_phys & MGMT_PHY_LE_2M_TX)
3994		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3995
3996	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3997		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3998
3999	if (selected_phys & MGMT_PHY_LE_1M_RX)
4000		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4001
4002	if (selected_phys & MGMT_PHY_LE_2M_RX)
4003		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4004
4005	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4006		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4007
4008	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4009				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4010
4011	return 0;
4012}
4013
4014static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4015				 void *data, u16 len)
4016{
4017	struct mgmt_cp_set_phy_configuration *cp = data;
4018	struct mgmt_pending_cmd *cmd;
4019	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4020	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4021	bool changed = false;
4022	int err;
4023
4024	bt_dev_dbg(hdev, "sock %p", sk);
4025
4026	configurable_phys = get_configurable_phys(hdev);
4027	supported_phys = get_supported_phys(hdev);
4028	selected_phys = __le32_to_cpu(cp->selected_phys);
4029
4030	if (selected_phys & ~supported_phys)
4031		return mgmt_cmd_status(sk, hdev->id,
4032				       MGMT_OP_SET_PHY_CONFIGURATION,
4033				       MGMT_STATUS_INVALID_PARAMS);
4034
4035	unconfigure_phys = supported_phys & ~configurable_phys;
4036
4037	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4038		return mgmt_cmd_status(sk, hdev->id,
4039				       MGMT_OP_SET_PHY_CONFIGURATION,
4040				       MGMT_STATUS_INVALID_PARAMS);
4041
4042	if (selected_phys == get_selected_phys(hdev))
4043		return mgmt_cmd_complete(sk, hdev->id,
4044					 MGMT_OP_SET_PHY_CONFIGURATION,
4045					 0, NULL, 0);
4046
4047	hci_dev_lock(hdev);
4048
4049	if (!hdev_is_powered(hdev)) {
4050		err = mgmt_cmd_status(sk, hdev->id,
4051				      MGMT_OP_SET_PHY_CONFIGURATION,
4052				      MGMT_STATUS_REJECTED);
4053		goto unlock;
4054	}
4055
4056	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4057		err = mgmt_cmd_status(sk, hdev->id,
4058				      MGMT_OP_SET_PHY_CONFIGURATION,
4059				      MGMT_STATUS_BUSY);
4060		goto unlock;
4061	}
4062
4063	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4064		pkt_type |= (HCI_DH3 | HCI_DM3);
4065	else
4066		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4067
4068	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4069		pkt_type |= (HCI_DH5 | HCI_DM5);
4070	else
4071		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4072
4073	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4074		pkt_type &= ~HCI_2DH1;
4075	else
4076		pkt_type |= HCI_2DH1;
4077
4078	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4079		pkt_type &= ~HCI_2DH3;
4080	else
4081		pkt_type |= HCI_2DH3;
4082
4083	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4084		pkt_type &= ~HCI_2DH5;
4085	else
4086		pkt_type |= HCI_2DH5;
4087
4088	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4089		pkt_type &= ~HCI_3DH1;
4090	else
4091		pkt_type |= HCI_3DH1;
4092
4093	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4094		pkt_type &= ~HCI_3DH3;
4095	else
4096		pkt_type |= HCI_3DH3;
4097
4098	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4099		pkt_type &= ~HCI_3DH5;
4100	else
4101		pkt_type |= HCI_3DH5;
4102
4103	if (pkt_type != hdev->pkt_type) {
4104		hdev->pkt_type = pkt_type;
4105		changed = true;
4106	}
4107
4108	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4109	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4110		if (changed)
4111			mgmt_phy_configuration_changed(hdev, sk);
4112
4113		err = mgmt_cmd_complete(sk, hdev->id,
4114					MGMT_OP_SET_PHY_CONFIGURATION,
4115					0, NULL, 0);
4116
4117		goto unlock;
4118	}
4119
4120	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4121			       len);
4122	if (!cmd)
4123		err = -ENOMEM;
4124	else
4125		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4126					 set_default_phy_complete);
4127
4128	if (err < 0) {
4129		err = mgmt_cmd_status(sk, hdev->id,
4130				      MGMT_OP_SET_PHY_CONFIGURATION,
4131				      MGMT_STATUS_FAILED);
4132
4133		if (cmd)
4134			mgmt_pending_remove(cmd);
4135	}
4136
4137unlock:
4138	hci_dev_unlock(hdev);
4139
4140	return err;
4141}
4142
4143static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4144			    u16 len)
4145{
4146	int err = MGMT_STATUS_SUCCESS;
4147	struct mgmt_cp_set_blocked_keys *keys = data;
4148	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4149				   sizeof(struct mgmt_blocked_key_info));
4150	u16 key_count, expected_len;
4151	int i;
4152
4153	bt_dev_dbg(hdev, "sock %p", sk);
4154
4155	key_count = __le16_to_cpu(keys->key_count);
4156	if (key_count > max_key_count) {
4157		bt_dev_err(hdev, "too big key_count value %u", key_count);
4158		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4159				       MGMT_STATUS_INVALID_PARAMS);
4160	}
4161
4162	expected_len = struct_size(keys, keys, key_count);
4163	if (expected_len != len) {
4164		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4165			   expected_len, len);
4166		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4167				       MGMT_STATUS_INVALID_PARAMS);
4168	}
4169
4170	hci_dev_lock(hdev);
4171
4172	hci_blocked_keys_clear(hdev);
4173
4174	for (i = 0; i < key_count; ++i) {
4175		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4176
4177		if (!b) {
4178			err = MGMT_STATUS_NO_RESOURCES;
4179			break;
4180		}
4181
4182		b->type = keys->keys[i].type;
4183		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4184		list_add_rcu(&b->list, &hdev->blocked_keys);
4185	}
4186	hci_dev_unlock(hdev);
4187
4188	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4189				err, NULL, 0);
4190}
4191
4192static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4193			       void *data, u16 len)
4194{
4195	struct mgmt_mode *cp = data;
4196	int err;
4197	bool changed = false;
4198
4199	bt_dev_dbg(hdev, "sock %p", sk);
4200
4201	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4202		return mgmt_cmd_status(sk, hdev->id,
4203				       MGMT_OP_SET_WIDEBAND_SPEECH,
4204				       MGMT_STATUS_NOT_SUPPORTED);
4205
4206	if (cp->val != 0x00 && cp->val != 0x01)
4207		return mgmt_cmd_status(sk, hdev->id,
4208				       MGMT_OP_SET_WIDEBAND_SPEECH,
4209				       MGMT_STATUS_INVALID_PARAMS);
4210
4211	hci_dev_lock(hdev);
4212
4213	if (hdev_is_powered(hdev) &&
4214	    !!cp->val != hci_dev_test_flag(hdev,
4215					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4216		err = mgmt_cmd_status(sk, hdev->id,
4217				      MGMT_OP_SET_WIDEBAND_SPEECH,
4218				      MGMT_STATUS_REJECTED);
4219		goto unlock;
4220	}
4221
4222	if (cp->val)
4223		changed = !hci_dev_test_and_set_flag(hdev,
4224						   HCI_WIDEBAND_SPEECH_ENABLED);
4225	else
4226		changed = hci_dev_test_and_clear_flag(hdev,
4227						   HCI_WIDEBAND_SPEECH_ENABLED);
4228
4229	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4230	if (err < 0)
4231		goto unlock;
4232
4233	if (changed)
4234		err = new_settings(hdev, sk);
4235
4236unlock:
4237	hci_dev_unlock(hdev);
4238	return err;
4239}
4240
4241static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4242			       void *data, u16 data_len)
4243{
4244	char buf[20];
4245	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4246	u16 cap_len = 0;
4247	u8 flags = 0;
4248	u8 tx_power_range[2];
4249
4250	bt_dev_dbg(hdev, "sock %p", sk);
4251
4252	memset(&buf, 0, sizeof(buf));
4253
4254	hci_dev_lock(hdev);
4255
4256	/* When the Read Simple Pairing Options command is supported, then
4257	 * the remote public key validation is supported.
4258	 *
4259	 * Alternatively, when Microsoft extensions are available, they can
4260	 * indicate support for public key validation as well.
4261	 */
4262	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4263		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4264
4265	flags |= 0x02;		/* Remote public key validation (LE) */
4266
4267	/* When the Read Encryption Key Size command is supported, then the
4268	 * encryption key size is enforced.
4269	 */
4270	if (hdev->commands[20] & 0x10)
4271		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4272
4273	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4274
4275	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4276				  &flags, 1);
4277
4278	/* When the Read Simple Pairing Options command is supported, then
4279	 * also max encryption key size information is provided.
4280	 */
4281	if (hdev->commands[41] & 0x08)
4282		cap_len = eir_append_le16(rp->cap, cap_len,
4283					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4284					  hdev->max_enc_key_size);
4285
4286	cap_len = eir_append_le16(rp->cap, cap_len,
4287				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4288				  SMP_MAX_ENC_KEY_SIZE);
4289
4290	/* Append the min/max LE tx power parameters if we were able to fetch
4291	 * it from the controller
4292	 */
4293	if (hdev->commands[38] & 0x80) {
4294		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4295		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4296		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4297					  tx_power_range, 2);
4298	}
4299
4300	rp->cap_len = cpu_to_le16(cap_len);
4301
4302	hci_dev_unlock(hdev);
4303
4304	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4305				 rp, sizeof(*rp) + cap_len);
4306}
4307
4308#ifdef CONFIG_BT_FEATURE_DEBUG
4309/* d4992530-b9ec-469f-ab01-6c481c47da1c */
4310static const u8 debug_uuid[16] = {
4311	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4312	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4313};
4314#endif
4315
4316/* 330859bc-7506-492d-9370-9a6f0614037f */
4317static const u8 quality_report_uuid[16] = {
4318	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4319	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4320};
4321
4322/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4323static const u8 offload_codecs_uuid[16] = {
4324	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4325	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4326};
4327
4328/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4329static const u8 le_simultaneous_roles_uuid[16] = {
4330	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4331	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4332};
4333
4334/* 15c0a148-c273-11ea-b3de-0242ac130004 */
4335static const u8 rpa_resolution_uuid[16] = {
4336	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4337	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4338};
4339
4340/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4341static const u8 iso_socket_uuid[16] = {
4342	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4343	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4344};
4345
4346/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4347static const u8 mgmt_mesh_uuid[16] = {
4348	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4349	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4350};
4351
4352static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4353				  void *data, u16 data_len)
4354{
4355	struct mgmt_rp_read_exp_features_info *rp;
4356	size_t len;
4357	u16 idx = 0;
4358	u32 flags;
4359	int status;
4360
4361	bt_dev_dbg(hdev, "sock %p", sk);
4362
4363	/* Enough space for 7 features */
4364	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4365	rp = kzalloc(len, GFP_KERNEL);
4366	if (!rp)
4367		return -ENOMEM;
4368
4369#ifdef CONFIG_BT_FEATURE_DEBUG
4370	if (!hdev) {
4371		flags = bt_dbg_get() ? BIT(0) : 0;
4372
4373		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4374		rp->features[idx].flags = cpu_to_le32(flags);
4375		idx++;
4376	}
4377#endif
4378
4379	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4380		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4381			flags = BIT(0);
4382		else
4383			flags = 0;
4384
4385		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4386		rp->features[idx].flags = cpu_to_le32(flags);
4387		idx++;
4388	}
4389
4390	if (hdev && ll_privacy_capable(hdev)) {
4391		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4392			flags = BIT(0) | BIT(1);
4393		else
4394			flags = BIT(1);
4395
4396		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4397		rp->features[idx].flags = cpu_to_le32(flags);
4398		idx++;
4399	}
4400
4401	if (hdev && (aosp_has_quality_report(hdev) ||
4402		     hdev->set_quality_report)) {
4403		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4404			flags = BIT(0);
4405		else
4406			flags = 0;
4407
4408		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4409		rp->features[idx].flags = cpu_to_le32(flags);
4410		idx++;
4411	}
4412
4413	if (hdev && hdev->get_data_path_id) {
4414		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4415			flags = BIT(0);
4416		else
4417			flags = 0;
4418
4419		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4420		rp->features[idx].flags = cpu_to_le32(flags);
4421		idx++;
4422	}
4423
4424	if (IS_ENABLED(CONFIG_BT_LE)) {
4425		flags = iso_enabled() ? BIT(0) : 0;
4426		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4427		rp->features[idx].flags = cpu_to_le32(flags);
4428		idx++;
4429	}
4430
4431	if (hdev && lmp_le_capable(hdev)) {
4432		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4433			flags = BIT(0);
4434		else
4435			flags = 0;
4436
4437		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4438		rp->features[idx].flags = cpu_to_le32(flags);
4439		idx++;
4440	}
4441
4442	rp->feature_count = cpu_to_le16(idx);
4443
4444	/* After reading the experimental features information, enable
4445	 * the events to update client on any future change.
4446	 */
4447	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4448
4449	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4450				   MGMT_OP_READ_EXP_FEATURES_INFO,
4451				   0, rp, sizeof(*rp) + (20 * idx));
4452
4453	kfree(rp);
4454	return status;
4455}
4456
4457static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4458					  struct sock *skip)
4459{
4460	struct mgmt_ev_exp_feature_changed ev;
4461
4462	memset(&ev, 0, sizeof(ev));
4463	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4464	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4465
4466	// Do we need to be atomic with the conn_flags?
4467	if (enabled && privacy_mode_capable(hdev))
4468		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4469	else
4470		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4471
4472	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4473				  &ev, sizeof(ev),
4474				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4475
4476}
4477
4478static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4479			       bool enabled, struct sock *skip)
4480{
4481	struct mgmt_ev_exp_feature_changed ev;
4482
4483	memset(&ev, 0, sizeof(ev));
4484	memcpy(ev.uuid, uuid, 16);
4485	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4486
4487	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4488				  &ev, sizeof(ev),
4489				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4490}
4491
4492#define EXP_FEAT(_uuid, _set_func)	\
4493{					\
4494	.uuid = _uuid,			\
4495	.set_func = _set_func,		\
4496}
4497
4498/* The zero key uuid is special. Multiple exp features are set through it. */
4499static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4500			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4501{
4502	struct mgmt_rp_set_exp_feature rp;
4503
4504	memset(rp.uuid, 0, 16);
4505	rp.flags = cpu_to_le32(0);
4506
4507#ifdef CONFIG_BT_FEATURE_DEBUG
4508	if (!hdev) {
4509		bool changed = bt_dbg_get();
4510
4511		bt_dbg_set(false);
4512
4513		if (changed)
4514			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4515	}
4516#endif
4517
4518	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4519		bool changed;
4520
4521		changed = hci_dev_test_and_clear_flag(hdev,
4522						      HCI_ENABLE_LL_PRIVACY);
4523		if (changed)
4524			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4525					    sk);
4526	}
4527
4528	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4529
4530	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4531				 MGMT_OP_SET_EXP_FEATURE, 0,
4532				 &rp, sizeof(rp));
4533}
4534
4535#ifdef CONFIG_BT_FEATURE_DEBUG
4536static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4537			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4538{
4539	struct mgmt_rp_set_exp_feature rp;
4540
4541	bool val, changed;
4542	int err;
4543
4544	/* Command requires to use the non-controller index */
4545	if (hdev)
4546		return mgmt_cmd_status(sk, hdev->id,
4547				       MGMT_OP_SET_EXP_FEATURE,
4548				       MGMT_STATUS_INVALID_INDEX);
4549
4550	/* Parameters are limited to a single octet */
4551	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4552		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4553				       MGMT_OP_SET_EXP_FEATURE,
4554				       MGMT_STATUS_INVALID_PARAMS);
4555
4556	/* Only boolean on/off is supported */
4557	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4558		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4559				       MGMT_OP_SET_EXP_FEATURE,
4560				       MGMT_STATUS_INVALID_PARAMS);
4561
4562	val = !!cp->param[0];
4563	changed = val ? !bt_dbg_get() : bt_dbg_get();
4564	bt_dbg_set(val);
4565
4566	memcpy(rp.uuid, debug_uuid, 16);
4567	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4568
4569	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4570
4571	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4572				MGMT_OP_SET_EXP_FEATURE, 0,
4573				&rp, sizeof(rp));
4574
4575	if (changed)
4576		exp_feature_changed(hdev, debug_uuid, val, sk);
4577
4578	return err;
4579}
4580#endif
4581
4582static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4583			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4584{
4585	struct mgmt_rp_set_exp_feature rp;
4586	bool val, changed;
4587	int err;
4588
4589	/* Command requires to use the controller index */
4590	if (!hdev)
4591		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4592				       MGMT_OP_SET_EXP_FEATURE,
4593				       MGMT_STATUS_INVALID_INDEX);
4594
4595	/* Parameters are limited to a single octet */
4596	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4597		return mgmt_cmd_status(sk, hdev->id,
4598				       MGMT_OP_SET_EXP_FEATURE,
4599				       MGMT_STATUS_INVALID_PARAMS);
4600
4601	/* Only boolean on/off is supported */
4602	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4603		return mgmt_cmd_status(sk, hdev->id,
4604				       MGMT_OP_SET_EXP_FEATURE,
4605				       MGMT_STATUS_INVALID_PARAMS);
4606
4607	val = !!cp->param[0];
4608
4609	if (val) {
4610		changed = !hci_dev_test_and_set_flag(hdev,
4611						     HCI_MESH_EXPERIMENTAL);
4612	} else {
4613		hci_dev_clear_flag(hdev, HCI_MESH);
4614		changed = hci_dev_test_and_clear_flag(hdev,
4615						      HCI_MESH_EXPERIMENTAL);
4616	}
4617
4618	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4619	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4620
4621	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4622
4623	err = mgmt_cmd_complete(sk, hdev->id,
4624				MGMT_OP_SET_EXP_FEATURE, 0,
4625				&rp, sizeof(rp));
4626
4627	if (changed)
4628		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4629
4630	return err;
4631}
4632
4633static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4634				   struct mgmt_cp_set_exp_feature *cp,
4635				   u16 data_len)
4636{
4637	struct mgmt_rp_set_exp_feature rp;
4638	bool val, changed;
4639	int err;
4640	u32 flags;
4641
4642	/* Command requires to use the controller index */
4643	if (!hdev)
4644		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645				       MGMT_OP_SET_EXP_FEATURE,
4646				       MGMT_STATUS_INVALID_INDEX);
4647
4648	/* Changes can only be made when controller is powered down */
4649	if (hdev_is_powered(hdev))
4650		return mgmt_cmd_status(sk, hdev->id,
4651				       MGMT_OP_SET_EXP_FEATURE,
4652				       MGMT_STATUS_REJECTED);
4653
4654	/* Parameters are limited to a single octet */
4655	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4656		return mgmt_cmd_status(sk, hdev->id,
4657				       MGMT_OP_SET_EXP_FEATURE,
4658				       MGMT_STATUS_INVALID_PARAMS);
4659
4660	/* Only boolean on/off is supported */
4661	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4662		return mgmt_cmd_status(sk, hdev->id,
4663				       MGMT_OP_SET_EXP_FEATURE,
4664				       MGMT_STATUS_INVALID_PARAMS);
4665
4666	val = !!cp->param[0];
4667
4668	if (val) {
4669		changed = !hci_dev_test_and_set_flag(hdev,
4670						     HCI_ENABLE_LL_PRIVACY);
4671		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4672
4673		/* Enable LL privacy + supported settings changed */
4674		flags = BIT(0) | BIT(1);
4675	} else {
4676		changed = hci_dev_test_and_clear_flag(hdev,
4677						      HCI_ENABLE_LL_PRIVACY);
4678
4679		/* Disable LL privacy + supported settings changed */
4680		flags = BIT(1);
4681	}
4682
4683	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4684	rp.flags = cpu_to_le32(flags);
4685
4686	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4687
4688	err = mgmt_cmd_complete(sk, hdev->id,
4689				MGMT_OP_SET_EXP_FEATURE, 0,
4690				&rp, sizeof(rp));
4691
4692	if (changed)
4693		exp_ll_privacy_feature_changed(val, hdev, sk);
4694
4695	return err;
4696}
4697
4698static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4699				   struct mgmt_cp_set_exp_feature *cp,
4700				   u16 data_len)
4701{
4702	struct mgmt_rp_set_exp_feature rp;
4703	bool val, changed;
4704	int err;
4705
4706	/* Command requires to use a valid controller index */
4707	if (!hdev)
4708		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4709				       MGMT_OP_SET_EXP_FEATURE,
4710				       MGMT_STATUS_INVALID_INDEX);
4711
4712	/* Parameters are limited to a single octet */
4713	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4714		return mgmt_cmd_status(sk, hdev->id,
4715				       MGMT_OP_SET_EXP_FEATURE,
4716				       MGMT_STATUS_INVALID_PARAMS);
4717
4718	/* Only boolean on/off is supported */
4719	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4720		return mgmt_cmd_status(sk, hdev->id,
4721				       MGMT_OP_SET_EXP_FEATURE,
4722				       MGMT_STATUS_INVALID_PARAMS);
4723
4724	hci_req_sync_lock(hdev);
4725
4726	val = !!cp->param[0];
4727	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4728
4729	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4730		err = mgmt_cmd_status(sk, hdev->id,
4731				      MGMT_OP_SET_EXP_FEATURE,
4732				      MGMT_STATUS_NOT_SUPPORTED);
4733		goto unlock_quality_report;
4734	}
4735
4736	if (changed) {
4737		if (hdev->set_quality_report)
4738			err = hdev->set_quality_report(hdev, val);
4739		else
4740			err = aosp_set_quality_report(hdev, val);
4741
4742		if (err) {
4743			err = mgmt_cmd_status(sk, hdev->id,
4744					      MGMT_OP_SET_EXP_FEATURE,
4745					      MGMT_STATUS_FAILED);
4746			goto unlock_quality_report;
4747		}
4748
4749		if (val)
4750			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4751		else
4752			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4753	}
4754
4755	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4756
4757	memcpy(rp.uuid, quality_report_uuid, 16);
4758	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4759	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4760
4761	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4762				&rp, sizeof(rp));
4763
4764	if (changed)
4765		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4766
4767unlock_quality_report:
4768	hci_req_sync_unlock(hdev);
4769	return err;
4770}
4771
4772static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4773				  struct mgmt_cp_set_exp_feature *cp,
4774				  u16 data_len)
4775{
4776	bool val, changed;
4777	int err;
4778	struct mgmt_rp_set_exp_feature rp;
4779
4780	/* Command requires to use a valid controller index */
4781	if (!hdev)
4782		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4783				       MGMT_OP_SET_EXP_FEATURE,
4784				       MGMT_STATUS_INVALID_INDEX);
4785
4786	/* Parameters are limited to a single octet */
4787	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4788		return mgmt_cmd_status(sk, hdev->id,
4789				       MGMT_OP_SET_EXP_FEATURE,
4790				       MGMT_STATUS_INVALID_PARAMS);
4791
4792	/* Only boolean on/off is supported */
4793	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4794		return mgmt_cmd_status(sk, hdev->id,
4795				       MGMT_OP_SET_EXP_FEATURE,
4796				       MGMT_STATUS_INVALID_PARAMS);
4797
4798	val = !!cp->param[0];
4799	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4800
4801	if (!hdev->get_data_path_id) {
4802		return mgmt_cmd_status(sk, hdev->id,
4803				       MGMT_OP_SET_EXP_FEATURE,
4804				       MGMT_STATUS_NOT_SUPPORTED);
4805	}
4806
4807	if (changed) {
4808		if (val)
4809			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4810		else
4811			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4812	}
4813
4814	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4815		    val, changed);
4816
4817	memcpy(rp.uuid, offload_codecs_uuid, 16);
4818	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4819	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4820	err = mgmt_cmd_complete(sk, hdev->id,
4821				MGMT_OP_SET_EXP_FEATURE, 0,
4822				&rp, sizeof(rp));
4823
4824	if (changed)
4825		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4826
4827	return err;
4828}
4829
4830static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4831					  struct mgmt_cp_set_exp_feature *cp,
4832					  u16 data_len)
4833{
4834	bool val, changed;
4835	int err;
4836	struct mgmt_rp_set_exp_feature rp;
4837
4838	/* Command requires to use a valid controller index */
4839	if (!hdev)
4840		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4841				       MGMT_OP_SET_EXP_FEATURE,
4842				       MGMT_STATUS_INVALID_INDEX);
4843
4844	/* Parameters are limited to a single octet */
4845	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4846		return mgmt_cmd_status(sk, hdev->id,
4847				       MGMT_OP_SET_EXP_FEATURE,
4848				       MGMT_STATUS_INVALID_PARAMS);
4849
4850	/* Only boolean on/off is supported */
4851	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4852		return mgmt_cmd_status(sk, hdev->id,
4853				       MGMT_OP_SET_EXP_FEATURE,
4854				       MGMT_STATUS_INVALID_PARAMS);
4855
4856	val = !!cp->param[0];
4857	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4858
4859	if (!hci_dev_le_state_simultaneous(hdev)) {
4860		return mgmt_cmd_status(sk, hdev->id,
4861				       MGMT_OP_SET_EXP_FEATURE,
4862				       MGMT_STATUS_NOT_SUPPORTED);
4863	}
4864
4865	if (changed) {
4866		if (val)
4867			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4868		else
4869			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4870	}
4871
4872	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4873		    val, changed);
4874
4875	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4876	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4877	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4878	err = mgmt_cmd_complete(sk, hdev->id,
4879				MGMT_OP_SET_EXP_FEATURE, 0,
4880				&rp, sizeof(rp));
4881
4882	if (changed)
4883		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4884
4885	return err;
4886}
4887
4888#ifdef CONFIG_BT_LE
4889static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4890			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4891{
4892	struct mgmt_rp_set_exp_feature rp;
4893	bool val, changed = false;
4894	int err;
4895
4896	/* Command requires to use the non-controller index */
4897	if (hdev)
4898		return mgmt_cmd_status(sk, hdev->id,
4899				       MGMT_OP_SET_EXP_FEATURE,
4900				       MGMT_STATUS_INVALID_INDEX);
4901
4902	/* Parameters are limited to a single octet */
4903	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4904		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4905				       MGMT_OP_SET_EXP_FEATURE,
4906				       MGMT_STATUS_INVALID_PARAMS);
4907
4908	/* Only boolean on/off is supported */
4909	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4910		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4911				       MGMT_OP_SET_EXP_FEATURE,
4912				       MGMT_STATUS_INVALID_PARAMS);
4913
4914	val = cp->param[0] ? true : false;
4915	if (val)
4916		err = iso_init();
4917	else
4918		err = iso_exit();
4919
4920	if (!err)
4921		changed = true;
4922
4923	memcpy(rp.uuid, iso_socket_uuid, 16);
4924	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4925
4926	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4927
4928	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4929				MGMT_OP_SET_EXP_FEATURE, 0,
4930				&rp, sizeof(rp));
4931
4932	if (changed)
4933		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4934
4935	return err;
4936}
4937#endif
4938
4939static const struct mgmt_exp_feature {
4940	const u8 *uuid;
4941	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4942			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4943} exp_features[] = {
4944	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4945#ifdef CONFIG_BT_FEATURE_DEBUG
4946	EXP_FEAT(debug_uuid, set_debug_func),
4947#endif
4948	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4949	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4950	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4951	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4952	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4953#ifdef CONFIG_BT_LE
4954	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4955#endif
4956
4957	/* end with a null feature */
4958	EXP_FEAT(NULL, NULL)
4959};
4960
4961static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4962			   void *data, u16 data_len)
4963{
4964	struct mgmt_cp_set_exp_feature *cp = data;
4965	size_t i = 0;
4966
4967	bt_dev_dbg(hdev, "sock %p", sk);
4968
4969	for (i = 0; exp_features[i].uuid; i++) {
4970		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4971			return exp_features[i].set_func(sk, hdev, cp, data_len);
4972	}
4973
4974	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4975			       MGMT_OP_SET_EXP_FEATURE,
4976			       MGMT_STATUS_NOT_SUPPORTED);
4977}
4978
4979static u32 get_params_flags(struct hci_dev *hdev,
4980			    struct hci_conn_params *params)
4981{
4982	u32 flags = hdev->conn_flags;
4983
4984	/* Devices using RPAs can only be programmed in the acceptlist if
4985	 * LL Privacy has been enable otherwise they cannot mark
4986	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4987	 */
4988	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4989	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4990		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4991
4992	return flags;
4993}
4994
4995static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4996			    u16 data_len)
4997{
4998	struct mgmt_cp_get_device_flags *cp = data;
4999	struct mgmt_rp_get_device_flags rp;
5000	struct bdaddr_list_with_flags *br_params;
5001	struct hci_conn_params *params;
5002	u32 supported_flags;
5003	u32 current_flags = 0;
5004	u8 status = MGMT_STATUS_INVALID_PARAMS;
5005
5006	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5007		   &cp->addr.bdaddr, cp->addr.type);
5008
5009	hci_dev_lock(hdev);
5010
5011	supported_flags = hdev->conn_flags;
5012
5013	memset(&rp, 0, sizeof(rp));
5014
5015	if (cp->addr.type == BDADDR_BREDR) {
5016		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5017							      &cp->addr.bdaddr,
5018							      cp->addr.type);
5019		if (!br_params)
5020			goto done;
5021
5022		current_flags = br_params->flags;
5023	} else {
5024		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5025						le_addr_type(cp->addr.type));
5026		if (!params)
5027			goto done;
5028
5029		supported_flags = get_params_flags(hdev, params);
5030		current_flags = params->flags;
5031	}
5032
5033	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5034	rp.addr.type = cp->addr.type;
5035	rp.supported_flags = cpu_to_le32(supported_flags);
5036	rp.current_flags = cpu_to_le32(current_flags);
5037
5038	status = MGMT_STATUS_SUCCESS;
5039
5040done:
5041	hci_dev_unlock(hdev);
5042
5043	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5044				&rp, sizeof(rp));
5045}
5046
5047static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5048				 bdaddr_t *bdaddr, u8 bdaddr_type,
5049				 u32 supported_flags, u32 current_flags)
5050{
5051	struct mgmt_ev_device_flags_changed ev;
5052
5053	bacpy(&ev.addr.bdaddr, bdaddr);
5054	ev.addr.type = bdaddr_type;
5055	ev.supported_flags = cpu_to_le32(supported_flags);
5056	ev.current_flags = cpu_to_le32(current_flags);
5057
5058	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5059}
5060
5061static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5062			    u16 len)
5063{
5064	struct mgmt_cp_set_device_flags *cp = data;
5065	struct bdaddr_list_with_flags *br_params;
5066	struct hci_conn_params *params;
5067	u8 status = MGMT_STATUS_INVALID_PARAMS;
5068	u32 supported_flags;
5069	u32 current_flags = __le32_to_cpu(cp->current_flags);
5070
5071	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5072		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5073
5074	// We should take hci_dev_lock() early, I think.. conn_flags can change
5075	supported_flags = hdev->conn_flags;
5076
5077	if ((supported_flags | current_flags) != supported_flags) {
5078		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5079			    current_flags, supported_flags);
5080		goto done;
5081	}
5082
5083	hci_dev_lock(hdev);
5084
5085	if (cp->addr.type == BDADDR_BREDR) {
5086		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5087							      &cp->addr.bdaddr,
5088							      cp->addr.type);
5089
5090		if (br_params) {
5091			br_params->flags = current_flags;
5092			status = MGMT_STATUS_SUCCESS;
5093		} else {
5094			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5095				    &cp->addr.bdaddr, cp->addr.type);
5096		}
5097
5098		goto unlock;
5099	}
5100
5101	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5102					le_addr_type(cp->addr.type));
5103	if (!params) {
5104		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5105			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5106		goto unlock;
5107	}
5108
5109	supported_flags = get_params_flags(hdev, params);
5110
5111	if ((supported_flags | current_flags) != supported_flags) {
5112		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5113			    current_flags, supported_flags);
5114		goto unlock;
5115	}
5116
5117	WRITE_ONCE(params->flags, current_flags);
5118	status = MGMT_STATUS_SUCCESS;
5119
5120	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5121	 * has been set.
5122	 */
5123	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5124		hci_update_passive_scan(hdev);
5125
5126unlock:
5127	hci_dev_unlock(hdev);
5128
5129done:
5130	if (status == MGMT_STATUS_SUCCESS)
5131		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5132				     supported_flags, current_flags);
5133
5134	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5135				 &cp->addr, sizeof(cp->addr));
5136}
5137
5138static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5139				   u16 handle)
5140{
5141	struct mgmt_ev_adv_monitor_added ev;
5142
5143	ev.monitor_handle = cpu_to_le16(handle);
5144
5145	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5146}
5147
5148void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5149{
5150	struct mgmt_ev_adv_monitor_removed ev;
5151	struct mgmt_pending_cmd *cmd;
5152	struct sock *sk_skip = NULL;
5153	struct mgmt_cp_remove_adv_monitor *cp;
5154
5155	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5156	if (cmd) {
5157		cp = cmd->param;
5158
5159		if (cp->monitor_handle)
5160			sk_skip = cmd->sk;
5161	}
5162
5163	ev.monitor_handle = cpu_to_le16(handle);
5164
5165	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5166}
5167
5168static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5169				 void *data, u16 len)
5170{
5171	struct adv_monitor *monitor = NULL;
5172	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5173	int handle, err;
5174	size_t rp_size = 0;
5175	__u32 supported = 0;
5176	__u32 enabled = 0;
5177	__u16 num_handles = 0;
5178	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5179
5180	BT_DBG("request for %s", hdev->name);
5181
5182	hci_dev_lock(hdev);
5183
5184	if (msft_monitor_supported(hdev))
5185		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5186
5187	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5188		handles[num_handles++] = monitor->handle;
5189
5190	hci_dev_unlock(hdev);
5191
5192	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5193	rp = kmalloc(rp_size, GFP_KERNEL);
5194	if (!rp)
5195		return -ENOMEM;
5196
5197	/* All supported features are currently enabled */
5198	enabled = supported;
5199
5200	rp->supported_features = cpu_to_le32(supported);
5201	rp->enabled_features = cpu_to_le32(enabled);
5202	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5203	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5204	rp->num_handles = cpu_to_le16(num_handles);
5205	if (num_handles)
5206		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5207
5208	err = mgmt_cmd_complete(sk, hdev->id,
5209				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5210				MGMT_STATUS_SUCCESS, rp, rp_size);
5211
5212	kfree(rp);
5213
5214	return err;
5215}
5216
5217static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5218						   void *data, int status)
5219{
5220	struct mgmt_rp_add_adv_patterns_monitor rp;
5221	struct mgmt_pending_cmd *cmd = data;
5222	struct adv_monitor *monitor = cmd->user_data;
5223
5224	hci_dev_lock(hdev);
5225
5226	rp.monitor_handle = cpu_to_le16(monitor->handle);
5227
5228	if (!status) {
5229		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5230		hdev->adv_monitors_cnt++;
5231		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5232			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5233		hci_update_passive_scan(hdev);
5234	}
5235
5236	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5237			  mgmt_status(status), &rp, sizeof(rp));
5238	mgmt_pending_remove(cmd);
5239
5240	hci_dev_unlock(hdev);
5241	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5242		   rp.monitor_handle, status);
5243}
5244
5245static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5246{
5247	struct mgmt_pending_cmd *cmd = data;
5248	struct adv_monitor *monitor = cmd->user_data;
5249
5250	return hci_add_adv_monitor(hdev, monitor);
5251}
5252
5253static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5254				      struct adv_monitor *m, u8 status,
5255				      void *data, u16 len, u16 op)
5256{
5257	struct mgmt_pending_cmd *cmd;
5258	int err;
5259
5260	hci_dev_lock(hdev);
5261
5262	if (status)
5263		goto unlock;
5264
5265	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5266	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5267	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5268	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5269		status = MGMT_STATUS_BUSY;
5270		goto unlock;
5271	}
5272
5273	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5274	if (!cmd) {
5275		status = MGMT_STATUS_NO_RESOURCES;
5276		goto unlock;
5277	}
5278
5279	cmd->user_data = m;
5280	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5281				 mgmt_add_adv_patterns_monitor_complete);
5282	if (err) {
5283		if (err == -ENOMEM)
5284			status = MGMT_STATUS_NO_RESOURCES;
5285		else
5286			status = MGMT_STATUS_FAILED;
5287
5288		goto unlock;
5289	}
5290
5291	hci_dev_unlock(hdev);
5292
5293	return 0;
5294
5295unlock:
5296	hci_free_adv_monitor(hdev, m);
5297	hci_dev_unlock(hdev);
5298	return mgmt_cmd_status(sk, hdev->id, op, status);
5299}
5300
5301static void parse_adv_monitor_rssi(struct adv_monitor *m,
5302				   struct mgmt_adv_rssi_thresholds *rssi)
5303{
5304	if (rssi) {
5305		m->rssi.low_threshold = rssi->low_threshold;
5306		m->rssi.low_threshold_timeout =
5307		    __le16_to_cpu(rssi->low_threshold_timeout);
5308		m->rssi.high_threshold = rssi->high_threshold;
5309		m->rssi.high_threshold_timeout =
5310		    __le16_to_cpu(rssi->high_threshold_timeout);
5311		m->rssi.sampling_period = rssi->sampling_period;
5312	} else {
5313		/* Default values. These numbers are the least constricting
5314		 * parameters for MSFT API to work, so it behaves as if there
5315		 * are no rssi parameter to consider. May need to be changed
5316		 * if other API are to be supported.
5317		 */
5318		m->rssi.low_threshold = -127;
5319		m->rssi.low_threshold_timeout = 60;
5320		m->rssi.high_threshold = -127;
5321		m->rssi.high_threshold_timeout = 0;
5322		m->rssi.sampling_period = 0;
5323	}
5324}
5325
5326static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5327				    struct mgmt_adv_pattern *patterns)
5328{
5329	u8 offset = 0, length = 0;
5330	struct adv_pattern *p = NULL;
5331	int i;
5332
5333	for (i = 0; i < pattern_count; i++) {
5334		offset = patterns[i].offset;
5335		length = patterns[i].length;
5336		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5337		    length > HCI_MAX_EXT_AD_LENGTH ||
5338		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5339			return MGMT_STATUS_INVALID_PARAMS;
5340
5341		p = kmalloc(sizeof(*p), GFP_KERNEL);
5342		if (!p)
5343			return MGMT_STATUS_NO_RESOURCES;
5344
5345		p->ad_type = patterns[i].ad_type;
5346		p->offset = patterns[i].offset;
5347		p->length = patterns[i].length;
5348		memcpy(p->value, patterns[i].value, p->length);
5349
5350		INIT_LIST_HEAD(&p->list);
5351		list_add(&p->list, &m->patterns);
5352	}
5353
5354	return MGMT_STATUS_SUCCESS;
5355}
5356
5357static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5358				    void *data, u16 len)
5359{
5360	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5361	struct adv_monitor *m = NULL;
5362	u8 status = MGMT_STATUS_SUCCESS;
5363	size_t expected_size = sizeof(*cp);
5364
5365	BT_DBG("request for %s", hdev->name);
5366
5367	if (len <= sizeof(*cp)) {
5368		status = MGMT_STATUS_INVALID_PARAMS;
5369		goto done;
5370	}
5371
5372	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5373	if (len != expected_size) {
5374		status = MGMT_STATUS_INVALID_PARAMS;
5375		goto done;
5376	}
5377
5378	m = kzalloc(sizeof(*m), GFP_KERNEL);
5379	if (!m) {
5380		status = MGMT_STATUS_NO_RESOURCES;
5381		goto done;
5382	}
5383
5384	INIT_LIST_HEAD(&m->patterns);
5385
5386	parse_adv_monitor_rssi(m, NULL);
5387	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5388
5389done:
5390	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5391					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5392}
5393
5394static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5395					 void *data, u16 len)
5396{
5397	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5398	struct adv_monitor *m = NULL;
5399	u8 status = MGMT_STATUS_SUCCESS;
5400	size_t expected_size = sizeof(*cp);
5401
5402	BT_DBG("request for %s", hdev->name);
5403
5404	if (len <= sizeof(*cp)) {
5405		status = MGMT_STATUS_INVALID_PARAMS;
5406		goto done;
5407	}
5408
5409	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5410	if (len != expected_size) {
5411		status = MGMT_STATUS_INVALID_PARAMS;
5412		goto done;
5413	}
5414
5415	m = kzalloc(sizeof(*m), GFP_KERNEL);
5416	if (!m) {
5417		status = MGMT_STATUS_NO_RESOURCES;
5418		goto done;
5419	}
5420
5421	INIT_LIST_HEAD(&m->patterns);
5422
5423	parse_adv_monitor_rssi(m, &cp->rssi);
5424	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5425
5426done:
5427	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5428					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5429}
5430
5431static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5432					     void *data, int status)
5433{
5434	struct mgmt_rp_remove_adv_monitor rp;
5435	struct mgmt_pending_cmd *cmd = data;
5436	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5437
5438	hci_dev_lock(hdev);
5439
5440	rp.monitor_handle = cp->monitor_handle;
5441
5442	if (!status)
5443		hci_update_passive_scan(hdev);
5444
5445	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5446			  mgmt_status(status), &rp, sizeof(rp));
5447	mgmt_pending_remove(cmd);
5448
5449	hci_dev_unlock(hdev);
5450	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5451		   rp.monitor_handle, status);
5452}
5453
5454static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5455{
5456	struct mgmt_pending_cmd *cmd = data;
5457	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5458	u16 handle = __le16_to_cpu(cp->monitor_handle);
5459
5460	if (!handle)
5461		return hci_remove_all_adv_monitor(hdev);
5462
5463	return hci_remove_single_adv_monitor(hdev, handle);
5464}
5465
5466static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5467			      void *data, u16 len)
5468{
5469	struct mgmt_pending_cmd *cmd;
5470	int err, status;
5471
5472	hci_dev_lock(hdev);
5473
5474	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5475	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5476	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5477	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5478		status = MGMT_STATUS_BUSY;
5479		goto unlock;
5480	}
5481
5482	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5483	if (!cmd) {
5484		status = MGMT_STATUS_NO_RESOURCES;
5485		goto unlock;
5486	}
5487
5488	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5489				  mgmt_remove_adv_monitor_complete);
5490
5491	if (err) {
5492		mgmt_pending_remove(cmd);
5493
5494		if (err == -ENOMEM)
5495			status = MGMT_STATUS_NO_RESOURCES;
5496		else
5497			status = MGMT_STATUS_FAILED;
5498
5499		goto unlock;
5500	}
5501
5502	hci_dev_unlock(hdev);
5503
5504	return 0;
5505
5506unlock:
5507	hci_dev_unlock(hdev);
5508	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5509			       status);
5510}
5511
5512static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5513{
5514	struct mgmt_rp_read_local_oob_data mgmt_rp;
5515	size_t rp_size = sizeof(mgmt_rp);
5516	struct mgmt_pending_cmd *cmd = data;
5517	struct sk_buff *skb = cmd->skb;
5518	u8 status = mgmt_status(err);
5519
5520	if (!status) {
5521		if (!skb)
5522			status = MGMT_STATUS_FAILED;
5523		else if (IS_ERR(skb))
5524			status = mgmt_status(PTR_ERR(skb));
5525		else
5526			status = mgmt_status(skb->data[0]);
5527	}
5528
5529	bt_dev_dbg(hdev, "status %d", status);
5530
5531	if (status) {
5532		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5533		goto remove;
5534	}
5535
5536	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5537
5538	if (!bredr_sc_enabled(hdev)) {
5539		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5540
5541		if (skb->len < sizeof(*rp)) {
5542			mgmt_cmd_status(cmd->sk, hdev->id,
5543					MGMT_OP_READ_LOCAL_OOB_DATA,
5544					MGMT_STATUS_FAILED);
5545			goto remove;
5546		}
5547
5548		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5549		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5550
5551		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5552	} else {
5553		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5554
5555		if (skb->len < sizeof(*rp)) {
5556			mgmt_cmd_status(cmd->sk, hdev->id,
5557					MGMT_OP_READ_LOCAL_OOB_DATA,
5558					MGMT_STATUS_FAILED);
5559			goto remove;
5560		}
5561
5562		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5563		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5564
5565		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5566		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5567	}
5568
5569	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5570			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5571
5572remove:
5573	if (skb && !IS_ERR(skb))
5574		kfree_skb(skb);
5575
5576	mgmt_pending_free(cmd);
5577}
5578
5579static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5580{
5581	struct mgmt_pending_cmd *cmd = data;
5582
5583	if (bredr_sc_enabled(hdev))
5584		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5585	else
5586		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5587
5588	if (IS_ERR(cmd->skb))
5589		return PTR_ERR(cmd->skb);
5590	else
5591		return 0;
5592}
5593
5594static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5595			       void *data, u16 data_len)
5596{
5597	struct mgmt_pending_cmd *cmd;
5598	int err;
5599
5600	bt_dev_dbg(hdev, "sock %p", sk);
5601
5602	hci_dev_lock(hdev);
5603
5604	if (!hdev_is_powered(hdev)) {
5605		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5606				      MGMT_STATUS_NOT_POWERED);
5607		goto unlock;
5608	}
5609
5610	if (!lmp_ssp_capable(hdev)) {
5611		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5612				      MGMT_STATUS_NOT_SUPPORTED);
5613		goto unlock;
5614	}
5615
5616	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5617	if (!cmd)
5618		err = -ENOMEM;
5619	else
5620		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5621					 read_local_oob_data_complete);
5622
5623	if (err < 0) {
5624		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625				      MGMT_STATUS_FAILED);
5626
5627		if (cmd)
5628			mgmt_pending_free(cmd);
5629	}
5630
5631unlock:
5632	hci_dev_unlock(hdev);
5633	return err;
5634}
5635
5636static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5637			       void *data, u16 len)
5638{
5639	struct mgmt_addr_info *addr = data;
5640	int err;
5641
5642	bt_dev_dbg(hdev, "sock %p", sk);
5643
5644	if (!bdaddr_type_is_valid(addr->type))
5645		return mgmt_cmd_complete(sk, hdev->id,
5646					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5647					 MGMT_STATUS_INVALID_PARAMS,
5648					 addr, sizeof(*addr));
5649
5650	hci_dev_lock(hdev);
5651
5652	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5653		struct mgmt_cp_add_remote_oob_data *cp = data;
5654		u8 status;
5655
5656		if (cp->addr.type != BDADDR_BREDR) {
5657			err = mgmt_cmd_complete(sk, hdev->id,
5658						MGMT_OP_ADD_REMOTE_OOB_DATA,
5659						MGMT_STATUS_INVALID_PARAMS,
5660						&cp->addr, sizeof(cp->addr));
5661			goto unlock;
5662		}
5663
5664		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5665					      cp->addr.type, cp->hash,
5666					      cp->rand, NULL, NULL);
5667		if (err < 0)
5668			status = MGMT_STATUS_FAILED;
5669		else
5670			status = MGMT_STATUS_SUCCESS;
5671
5672		err = mgmt_cmd_complete(sk, hdev->id,
5673					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5674					&cp->addr, sizeof(cp->addr));
5675	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5676		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5677		u8 *rand192, *hash192, *rand256, *hash256;
5678		u8 status;
5679
5680		if (bdaddr_type_is_le(cp->addr.type)) {
5681			/* Enforce zero-valued 192-bit parameters as
5682			 * long as legacy SMP OOB isn't implemented.
5683			 */
5684			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5685			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5686				err = mgmt_cmd_complete(sk, hdev->id,
5687							MGMT_OP_ADD_REMOTE_OOB_DATA,
5688							MGMT_STATUS_INVALID_PARAMS,
5689							addr, sizeof(*addr));
5690				goto unlock;
5691			}
5692
5693			rand192 = NULL;
5694			hash192 = NULL;
5695		} else {
5696			/* In case one of the P-192 values is set to zero,
5697			 * then just disable OOB data for P-192.
5698			 */
5699			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5700			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5701				rand192 = NULL;
5702				hash192 = NULL;
5703			} else {
5704				rand192 = cp->rand192;
5705				hash192 = cp->hash192;
5706			}
5707		}
5708
5709		/* In case one of the P-256 values is set to zero, then just
5710		 * disable OOB data for P-256.
5711		 */
5712		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5713		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5714			rand256 = NULL;
5715			hash256 = NULL;
5716		} else {
5717			rand256 = cp->rand256;
5718			hash256 = cp->hash256;
5719		}
5720
5721		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5722					      cp->addr.type, hash192, rand192,
5723					      hash256, rand256);
5724		if (err < 0)
5725			status = MGMT_STATUS_FAILED;
5726		else
5727			status = MGMT_STATUS_SUCCESS;
5728
5729		err = mgmt_cmd_complete(sk, hdev->id,
5730					MGMT_OP_ADD_REMOTE_OOB_DATA,
5731					status, &cp->addr, sizeof(cp->addr));
5732	} else {
5733		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5734			   len);
5735		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5736				      MGMT_STATUS_INVALID_PARAMS);
5737	}
5738
5739unlock:
5740	hci_dev_unlock(hdev);
5741	return err;
5742}
5743
5744static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5745				  void *data, u16 len)
5746{
5747	struct mgmt_cp_remove_remote_oob_data *cp = data;
5748	u8 status;
5749	int err;
5750
5751	bt_dev_dbg(hdev, "sock %p", sk);
5752
5753	if (cp->addr.type != BDADDR_BREDR)
5754		return mgmt_cmd_complete(sk, hdev->id,
5755					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5756					 MGMT_STATUS_INVALID_PARAMS,
5757					 &cp->addr, sizeof(cp->addr));
5758
5759	hci_dev_lock(hdev);
5760
5761	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5762		hci_remote_oob_data_clear(hdev);
5763		status = MGMT_STATUS_SUCCESS;
5764		goto done;
5765	}
5766
5767	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5768	if (err < 0)
5769		status = MGMT_STATUS_INVALID_PARAMS;
5770	else
5771		status = MGMT_STATUS_SUCCESS;
5772
5773done:
5774	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5775				status, &cp->addr, sizeof(cp->addr));
5776
5777	hci_dev_unlock(hdev);
5778	return err;
5779}
5780
5781void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5782{
5783	struct mgmt_pending_cmd *cmd;
5784
5785	bt_dev_dbg(hdev, "status %u", status);
5786
5787	hci_dev_lock(hdev);
5788
5789	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5790	if (!cmd)
5791		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5792
5793	if (!cmd)
5794		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5795
5796	if (cmd) {
5797		cmd->cmd_complete(cmd, mgmt_status(status));
5798		mgmt_pending_remove(cmd);
5799	}
5800
5801	hci_dev_unlock(hdev);
5802}
5803
5804static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5805				    uint8_t *mgmt_status)
5806{
5807	switch (type) {
5808	case DISCOV_TYPE_LE:
5809		*mgmt_status = mgmt_le_support(hdev);
5810		if (*mgmt_status)
5811			return false;
5812		break;
5813	case DISCOV_TYPE_INTERLEAVED:
5814		*mgmt_status = mgmt_le_support(hdev);
5815		if (*mgmt_status)
5816			return false;
5817		fallthrough;
5818	case DISCOV_TYPE_BREDR:
5819		*mgmt_status = mgmt_bredr_support(hdev);
5820		if (*mgmt_status)
5821			return false;
5822		break;
5823	default:
5824		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5825		return false;
5826	}
5827
5828	return true;
5829}
5830
5831static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5832{
5833	struct mgmt_pending_cmd *cmd = data;
5834
5835	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5836	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5837	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5838		return;
5839
5840	bt_dev_dbg(hdev, "err %d", err);
5841
5842	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5843			  cmd->param, 1);
5844	mgmt_pending_remove(cmd);
5845
5846	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5847				DISCOVERY_FINDING);
5848}
5849
5850static int start_discovery_sync(struct hci_dev *hdev, void *data)
5851{
5852	return hci_start_discovery_sync(hdev);
5853}
5854
5855static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5856				    u16 op, void *data, u16 len)
5857{
5858	struct mgmt_cp_start_discovery *cp = data;
5859	struct mgmt_pending_cmd *cmd;
5860	u8 status;
5861	int err;
5862
5863	bt_dev_dbg(hdev, "sock %p", sk);
5864
5865	hci_dev_lock(hdev);
5866
5867	if (!hdev_is_powered(hdev)) {
5868		err = mgmt_cmd_complete(sk, hdev->id, op,
5869					MGMT_STATUS_NOT_POWERED,
5870					&cp->type, sizeof(cp->type));
5871		goto failed;
5872	}
5873
5874	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5875	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5876		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5877					&cp->type, sizeof(cp->type));
5878		goto failed;
5879	}
5880
5881	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5882		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5883					&cp->type, sizeof(cp->type));
5884		goto failed;
5885	}
5886
5887	/* Can't start discovery when it is paused */
5888	if (hdev->discovery_paused) {
5889		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5890					&cp->type, sizeof(cp->type));
5891		goto failed;
5892	}
5893
5894	/* Clear the discovery filter first to free any previously
5895	 * allocated memory for the UUID list.
5896	 */
5897	hci_discovery_filter_clear(hdev);
5898
5899	hdev->discovery.type = cp->type;
5900	hdev->discovery.report_invalid_rssi = false;
5901	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5902		hdev->discovery.limited = true;
5903	else
5904		hdev->discovery.limited = false;
5905
5906	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5907	if (!cmd) {
5908		err = -ENOMEM;
5909		goto failed;
5910	}
5911
5912	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5913				 start_discovery_complete);
5914	if (err < 0) {
5915		mgmt_pending_remove(cmd);
5916		goto failed;
5917	}
5918
5919	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5920
5921failed:
5922	hci_dev_unlock(hdev);
5923	return err;
5924}
5925
5926static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5927			   void *data, u16 len)
5928{
5929	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5930					data, len);
5931}
5932
5933static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5934				   void *data, u16 len)
5935{
5936	return start_discovery_internal(sk, hdev,
5937					MGMT_OP_START_LIMITED_DISCOVERY,
5938					data, len);
5939}
5940
5941static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5942				   void *data, u16 len)
5943{
5944	struct mgmt_cp_start_service_discovery *cp = data;
5945	struct mgmt_pending_cmd *cmd;
5946	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5947	u16 uuid_count, expected_len;
5948	u8 status;
5949	int err;
5950
5951	bt_dev_dbg(hdev, "sock %p", sk);
5952
5953	hci_dev_lock(hdev);
5954
5955	if (!hdev_is_powered(hdev)) {
5956		err = mgmt_cmd_complete(sk, hdev->id,
5957					MGMT_OP_START_SERVICE_DISCOVERY,
5958					MGMT_STATUS_NOT_POWERED,
5959					&cp->type, sizeof(cp->type));
5960		goto failed;
5961	}
5962
5963	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5964	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5965		err = mgmt_cmd_complete(sk, hdev->id,
5966					MGMT_OP_START_SERVICE_DISCOVERY,
5967					MGMT_STATUS_BUSY, &cp->type,
5968					sizeof(cp->type));
5969		goto failed;
5970	}
5971
5972	if (hdev->discovery_paused) {
5973		err = mgmt_cmd_complete(sk, hdev->id,
5974					MGMT_OP_START_SERVICE_DISCOVERY,
5975					MGMT_STATUS_BUSY, &cp->type,
5976					sizeof(cp->type));
5977		goto failed;
5978	}
5979
5980	uuid_count = __le16_to_cpu(cp->uuid_count);
5981	if (uuid_count > max_uuid_count) {
5982		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5983			   uuid_count);
5984		err = mgmt_cmd_complete(sk, hdev->id,
5985					MGMT_OP_START_SERVICE_DISCOVERY,
5986					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5987					sizeof(cp->type));
5988		goto failed;
5989	}
5990
5991	expected_len = sizeof(*cp) + uuid_count * 16;
5992	if (expected_len != len) {
5993		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5994			   expected_len, len);
5995		err = mgmt_cmd_complete(sk, hdev->id,
5996					MGMT_OP_START_SERVICE_DISCOVERY,
5997					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5998					sizeof(cp->type));
5999		goto failed;
6000	}
6001
6002	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6003		err = mgmt_cmd_complete(sk, hdev->id,
6004					MGMT_OP_START_SERVICE_DISCOVERY,
6005					status, &cp->type, sizeof(cp->type));
6006		goto failed;
6007	}
6008
6009	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6010			       hdev, data, len);
6011	if (!cmd) {
6012		err = -ENOMEM;
6013		goto failed;
6014	}
6015
6016	/* Clear the discovery filter first to free any previously
6017	 * allocated memory for the UUID list.
6018	 */
6019	hci_discovery_filter_clear(hdev);
6020
6021	hdev->discovery.result_filtering = true;
6022	hdev->discovery.type = cp->type;
6023	hdev->discovery.rssi = cp->rssi;
6024	hdev->discovery.uuid_count = uuid_count;
6025
6026	if (uuid_count > 0) {
6027		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6028						GFP_KERNEL);
6029		if (!hdev->discovery.uuids) {
6030			err = mgmt_cmd_complete(sk, hdev->id,
6031						MGMT_OP_START_SERVICE_DISCOVERY,
6032						MGMT_STATUS_FAILED,
6033						&cp->type, sizeof(cp->type));
6034			mgmt_pending_remove(cmd);
6035			goto failed;
6036		}
6037	}
6038
6039	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6040				 start_discovery_complete);
6041	if (err < 0) {
6042		mgmt_pending_remove(cmd);
6043		goto failed;
6044	}
6045
6046	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6047
6048failed:
6049	hci_dev_unlock(hdev);
6050	return err;
6051}
6052
6053void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6054{
6055	struct mgmt_pending_cmd *cmd;
6056
6057	bt_dev_dbg(hdev, "status %u", status);
6058
6059	hci_dev_lock(hdev);
6060
6061	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6062	if (cmd) {
6063		cmd->cmd_complete(cmd, mgmt_status(status));
6064		mgmt_pending_remove(cmd);
6065	}
6066
6067	hci_dev_unlock(hdev);
6068}
6069
6070static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6071{
6072	struct mgmt_pending_cmd *cmd = data;
6073
6074	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6075		return;
6076
6077	bt_dev_dbg(hdev, "err %d", err);
6078
6079	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6080			  cmd->param, 1);
6081	mgmt_pending_remove(cmd);
6082
6083	if (!err)
6084		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6085}
6086
6087static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6088{
6089	return hci_stop_discovery_sync(hdev);
6090}
6091
6092static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6093			  u16 len)
6094{
6095	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6096	struct mgmt_pending_cmd *cmd;
6097	int err;
6098
6099	bt_dev_dbg(hdev, "sock %p", sk);
6100
6101	hci_dev_lock(hdev);
6102
6103	if (!hci_discovery_active(hdev)) {
6104		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6105					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6106					sizeof(mgmt_cp->type));
6107		goto unlock;
6108	}
6109
6110	if (hdev->discovery.type != mgmt_cp->type) {
6111		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6112					MGMT_STATUS_INVALID_PARAMS,
6113					&mgmt_cp->type, sizeof(mgmt_cp->type));
6114		goto unlock;
6115	}
6116
6117	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6118	if (!cmd) {
6119		err = -ENOMEM;
6120		goto unlock;
6121	}
6122
6123	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6124				 stop_discovery_complete);
6125	if (err < 0) {
6126		mgmt_pending_remove(cmd);
6127		goto unlock;
6128	}
6129
6130	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6131
6132unlock:
6133	hci_dev_unlock(hdev);
6134	return err;
6135}
6136
6137static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6138			u16 len)
6139{
6140	struct mgmt_cp_confirm_name *cp = data;
6141	struct inquiry_entry *e;
6142	int err;
6143
6144	bt_dev_dbg(hdev, "sock %p", sk);
6145
6146	hci_dev_lock(hdev);
6147
6148	if (!hci_discovery_active(hdev)) {
6149		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6150					MGMT_STATUS_FAILED, &cp->addr,
6151					sizeof(cp->addr));
6152		goto failed;
6153	}
6154
6155	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6156	if (!e) {
6157		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6158					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6159					sizeof(cp->addr));
6160		goto failed;
6161	}
6162
6163	if (cp->name_known) {
6164		e->name_state = NAME_KNOWN;
6165		list_del(&e->list);
6166	} else {
6167		e->name_state = NAME_NEEDED;
6168		hci_inquiry_cache_update_resolve(hdev, e);
6169	}
6170
6171	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6172				&cp->addr, sizeof(cp->addr));
6173
6174failed:
6175	hci_dev_unlock(hdev);
6176	return err;
6177}
6178
6179static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6180			u16 len)
6181{
6182	struct mgmt_cp_block_device *cp = data;
6183	u8 status;
6184	int err;
6185
6186	bt_dev_dbg(hdev, "sock %p", sk);
6187
6188	if (!bdaddr_type_is_valid(cp->addr.type))
6189		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6190					 MGMT_STATUS_INVALID_PARAMS,
6191					 &cp->addr, sizeof(cp->addr));
6192
6193	hci_dev_lock(hdev);
6194
6195	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6196				  cp->addr.type);
6197	if (err < 0) {
6198		status = MGMT_STATUS_FAILED;
6199		goto done;
6200	}
6201
6202	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6203		   sk);
6204	status = MGMT_STATUS_SUCCESS;
6205
6206done:
6207	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6208				&cp->addr, sizeof(cp->addr));
6209
6210	hci_dev_unlock(hdev);
6211
6212	return err;
6213}
6214
6215static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6216			  u16 len)
6217{
6218	struct mgmt_cp_unblock_device *cp = data;
6219	u8 status;
6220	int err;
6221
6222	bt_dev_dbg(hdev, "sock %p", sk);
6223
6224	if (!bdaddr_type_is_valid(cp->addr.type))
6225		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6226					 MGMT_STATUS_INVALID_PARAMS,
6227					 &cp->addr, sizeof(cp->addr));
6228
6229	hci_dev_lock(hdev);
6230
6231	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6232				  cp->addr.type);
6233	if (err < 0) {
6234		status = MGMT_STATUS_INVALID_PARAMS;
6235		goto done;
6236	}
6237
6238	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6239		   sk);
6240	status = MGMT_STATUS_SUCCESS;
6241
6242done:
6243	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6244				&cp->addr, sizeof(cp->addr));
6245
6246	hci_dev_unlock(hdev);
6247
6248	return err;
6249}
6250
6251static int set_device_id_sync(struct hci_dev *hdev, void *data)
6252{
6253	return hci_update_eir_sync(hdev);
6254}
6255
6256static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6257			 u16 len)
6258{
6259	struct mgmt_cp_set_device_id *cp = data;
6260	int err;
6261	__u16 source;
6262
6263	bt_dev_dbg(hdev, "sock %p", sk);
6264
6265	source = __le16_to_cpu(cp->source);
6266
6267	if (source > 0x0002)
6268		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6269				       MGMT_STATUS_INVALID_PARAMS);
6270
6271	hci_dev_lock(hdev);
6272
6273	hdev->devid_source = source;
6274	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6275	hdev->devid_product = __le16_to_cpu(cp->product);
6276	hdev->devid_version = __le16_to_cpu(cp->version);
6277
6278	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6279				NULL, 0);
6280
6281	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6282
6283	hci_dev_unlock(hdev);
6284
6285	return err;
6286}
6287
6288static void enable_advertising_instance(struct hci_dev *hdev, int err)
6289{
6290	if (err)
6291		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6292	else
6293		bt_dev_dbg(hdev, "status %d", err);
6294}
6295
6296static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6297{
6298	struct cmd_lookup match = { NULL, hdev };
6299	u8 instance;
6300	struct adv_info *adv_instance;
6301	u8 status = mgmt_status(err);
6302
6303	if (status) {
6304		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6305				     cmd_status_rsp, &status);
6306		return;
6307	}
6308
6309	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6310		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6311	else
6312		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6313
6314	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6315			     &match);
6316
6317	new_settings(hdev, match.sk);
6318
6319	if (match.sk)
6320		sock_put(match.sk);
6321
6322	/* If "Set Advertising" was just disabled and instance advertising was
6323	 * set up earlier, then re-enable multi-instance advertising.
6324	 */
6325	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6326	    list_empty(&hdev->adv_instances))
6327		return;
6328
6329	instance = hdev->cur_adv_instance;
6330	if (!instance) {
6331		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6332							struct adv_info, list);
6333		if (!adv_instance)
6334			return;
6335
6336		instance = adv_instance->instance;
6337	}
6338
6339	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6340
6341	enable_advertising_instance(hdev, err);
6342}
6343
6344static int set_adv_sync(struct hci_dev *hdev, void *data)
6345{
6346	struct mgmt_pending_cmd *cmd = data;
6347	struct mgmt_mode *cp = cmd->param;
6348	u8 val = !!cp->val;
6349
6350	if (cp->val == 0x02)
6351		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6352	else
6353		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6354
6355	cancel_adv_timeout(hdev);
6356
6357	if (val) {
6358		/* Switch to instance "0" for the Set Advertising setting.
6359		 * We cannot use update_[adv|scan_rsp]_data() here as the
6360		 * HCI_ADVERTISING flag is not yet set.
6361		 */
6362		hdev->cur_adv_instance = 0x00;
6363
6364		if (ext_adv_capable(hdev)) {
6365			hci_start_ext_adv_sync(hdev, 0x00);
6366		} else {
6367			hci_update_adv_data_sync(hdev, 0x00);
6368			hci_update_scan_rsp_data_sync(hdev, 0x00);
6369			hci_enable_advertising_sync(hdev);
6370		}
6371	} else {
6372		hci_disable_advertising_sync(hdev);
6373	}
6374
6375	return 0;
6376}
6377
6378static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6379			   u16 len)
6380{
6381	struct mgmt_mode *cp = data;
6382	struct mgmt_pending_cmd *cmd;
6383	u8 val, status;
6384	int err;
6385
6386	bt_dev_dbg(hdev, "sock %p", sk);
6387
6388	status = mgmt_le_support(hdev);
6389	if (status)
6390		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6391				       status);
6392
6393	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6394		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6395				       MGMT_STATUS_INVALID_PARAMS);
6396
6397	if (hdev->advertising_paused)
6398		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6399				       MGMT_STATUS_BUSY);
6400
6401	hci_dev_lock(hdev);
6402
6403	val = !!cp->val;
6404
6405	/* The following conditions are ones which mean that we should
6406	 * not do any HCI communication but directly send a mgmt
6407	 * response to user space (after toggling the flag if
6408	 * necessary).
6409	 */
6410	if (!hdev_is_powered(hdev) ||
6411	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6412	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6413	    hci_dev_test_flag(hdev, HCI_MESH) ||
6414	    hci_conn_num(hdev, LE_LINK) > 0 ||
6415	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6416	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6417		bool changed;
6418
6419		if (cp->val) {
6420			hdev->cur_adv_instance = 0x00;
6421			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6422			if (cp->val == 0x02)
6423				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6424			else
6425				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6426		} else {
6427			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6428			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6429		}
6430
6431		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6432		if (err < 0)
6433			goto unlock;
6434
6435		if (changed)
6436			err = new_settings(hdev, sk);
6437
6438		goto unlock;
6439	}
6440
6441	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6442	    pending_find(MGMT_OP_SET_LE, hdev)) {
6443		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444				      MGMT_STATUS_BUSY);
6445		goto unlock;
6446	}
6447
6448	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6449	if (!cmd)
6450		err = -ENOMEM;
6451	else
6452		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6453					 set_advertising_complete);
6454
6455	if (err < 0 && cmd)
6456		mgmt_pending_remove(cmd);
6457
6458unlock:
6459	hci_dev_unlock(hdev);
6460	return err;
6461}
6462
6463static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6464			      void *data, u16 len)
6465{
6466	struct mgmt_cp_set_static_address *cp = data;
6467	int err;
6468
6469	bt_dev_dbg(hdev, "sock %p", sk);
6470
6471	if (!lmp_le_capable(hdev))
6472		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6473				       MGMT_STATUS_NOT_SUPPORTED);
6474
6475	if (hdev_is_powered(hdev))
6476		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6477				       MGMT_STATUS_REJECTED);
6478
6479	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6480		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6481			return mgmt_cmd_status(sk, hdev->id,
6482					       MGMT_OP_SET_STATIC_ADDRESS,
6483					       MGMT_STATUS_INVALID_PARAMS);
6484
6485		/* Two most significant bits shall be set */
6486		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6487			return mgmt_cmd_status(sk, hdev->id,
6488					       MGMT_OP_SET_STATIC_ADDRESS,
6489					       MGMT_STATUS_INVALID_PARAMS);
6490	}
6491
6492	hci_dev_lock(hdev);
6493
6494	bacpy(&hdev->static_addr, &cp->bdaddr);
6495
6496	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6497	if (err < 0)
6498		goto unlock;
6499
6500	err = new_settings(hdev, sk);
6501
6502unlock:
6503	hci_dev_unlock(hdev);
6504	return err;
6505}
6506
6507static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6508			   void *data, u16 len)
6509{
6510	struct mgmt_cp_set_scan_params *cp = data;
6511	__u16 interval, window;
6512	int err;
6513
6514	bt_dev_dbg(hdev, "sock %p", sk);
6515
6516	if (!lmp_le_capable(hdev))
6517		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6518				       MGMT_STATUS_NOT_SUPPORTED);
6519
6520	interval = __le16_to_cpu(cp->interval);
6521
6522	if (interval < 0x0004 || interval > 0x4000)
6523		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6524				       MGMT_STATUS_INVALID_PARAMS);
6525
6526	window = __le16_to_cpu(cp->window);
6527
6528	if (window < 0x0004 || window > 0x4000)
6529		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6530				       MGMT_STATUS_INVALID_PARAMS);
6531
6532	if (window > interval)
6533		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6534				       MGMT_STATUS_INVALID_PARAMS);
6535
6536	hci_dev_lock(hdev);
6537
6538	hdev->le_scan_interval = interval;
6539	hdev->le_scan_window = window;
6540
6541	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6542				NULL, 0);
6543
6544	/* If background scan is running, restart it so new parameters are
6545	 * loaded.
6546	 */
6547	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6548	    hdev->discovery.state == DISCOVERY_STOPPED)
6549		hci_update_passive_scan(hdev);
6550
6551	hci_dev_unlock(hdev);
6552
6553	return err;
6554}
6555
6556static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6557{
6558	struct mgmt_pending_cmd *cmd = data;
6559
6560	bt_dev_dbg(hdev, "err %d", err);
6561
6562	if (err) {
6563		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6564				mgmt_status(err));
6565	} else {
6566		struct mgmt_mode *cp = cmd->param;
6567
6568		if (cp->val)
6569			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6570		else
6571			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6572
6573		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6574		new_settings(hdev, cmd->sk);
6575	}
6576
6577	mgmt_pending_free(cmd);
6578}
6579
6580static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6581{
6582	struct mgmt_pending_cmd *cmd = data;
6583	struct mgmt_mode *cp = cmd->param;
6584
6585	return hci_write_fast_connectable_sync(hdev, cp->val);
6586}
6587
6588static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6589				void *data, u16 len)
6590{
6591	struct mgmt_mode *cp = data;
6592	struct mgmt_pending_cmd *cmd;
6593	int err;
6594
6595	bt_dev_dbg(hdev, "sock %p", sk);
6596
6597	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6598	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6599		return mgmt_cmd_status(sk, hdev->id,
6600				       MGMT_OP_SET_FAST_CONNECTABLE,
6601				       MGMT_STATUS_NOT_SUPPORTED);
6602
6603	if (cp->val != 0x00 && cp->val != 0x01)
6604		return mgmt_cmd_status(sk, hdev->id,
6605				       MGMT_OP_SET_FAST_CONNECTABLE,
6606				       MGMT_STATUS_INVALID_PARAMS);
6607
6608	hci_dev_lock(hdev);
6609
6610	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6611		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6612		goto unlock;
6613	}
6614
6615	if (!hdev_is_powered(hdev)) {
6616		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6617		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6618		new_settings(hdev, sk);
6619		goto unlock;
6620	}
6621
6622	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6623			       len);
6624	if (!cmd)
6625		err = -ENOMEM;
6626	else
6627		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6628					 fast_connectable_complete);
6629
6630	if (err < 0) {
6631		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6632				MGMT_STATUS_FAILED);
6633
6634		if (cmd)
6635			mgmt_pending_free(cmd);
6636	}
6637
6638unlock:
6639	hci_dev_unlock(hdev);
6640
6641	return err;
6642}
6643
6644static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6645{
6646	struct mgmt_pending_cmd *cmd = data;
6647
6648	bt_dev_dbg(hdev, "err %d", err);
6649
6650	if (err) {
6651		u8 mgmt_err = mgmt_status(err);
6652
6653		/* We need to restore the flag if related HCI commands
6654		 * failed.
6655		 */
6656		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6657
6658		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6659	} else {
6660		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6661		new_settings(hdev, cmd->sk);
6662	}
6663
6664	mgmt_pending_free(cmd);
6665}
6666
6667static int set_bredr_sync(struct hci_dev *hdev, void *data)
6668{
6669	int status;
6670
6671	status = hci_write_fast_connectable_sync(hdev, false);
6672
6673	if (!status)
6674		status = hci_update_scan_sync(hdev);
6675
6676	/* Since only the advertising data flags will change, there
6677	 * is no need to update the scan response data.
6678	 */
6679	if (!status)
6680		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6681
6682	return status;
6683}
6684
6685static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6686{
6687	struct mgmt_mode *cp = data;
6688	struct mgmt_pending_cmd *cmd;
6689	int err;
6690
6691	bt_dev_dbg(hdev, "sock %p", sk);
6692
6693	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6694		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6695				       MGMT_STATUS_NOT_SUPPORTED);
6696
6697	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6698		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6699				       MGMT_STATUS_REJECTED);
6700
6701	if (cp->val != 0x00 && cp->val != 0x01)
6702		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6703				       MGMT_STATUS_INVALID_PARAMS);
6704
6705	hci_dev_lock(hdev);
6706
6707	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6708		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6709		goto unlock;
6710	}
6711
6712	if (!hdev_is_powered(hdev)) {
6713		if (!cp->val) {
6714			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6715			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6716			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6717			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6718		}
6719
6720		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6721
6722		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6723		if (err < 0)
6724			goto unlock;
6725
6726		err = new_settings(hdev, sk);
6727		goto unlock;
6728	}
6729
6730	/* Reject disabling when powered on */
6731	if (!cp->val) {
6732		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6733				      MGMT_STATUS_REJECTED);
6734		goto unlock;
6735	} else {
6736		/* When configuring a dual-mode controller to operate
6737		 * with LE only and using a static address, then switching
6738		 * BR/EDR back on is not allowed.
6739		 *
6740		 * Dual-mode controllers shall operate with the public
6741		 * address as its identity address for BR/EDR and LE. So
6742		 * reject the attempt to create an invalid configuration.
6743		 *
6744		 * The same restrictions applies when secure connections
6745		 * has been enabled. For BR/EDR this is a controller feature
6746		 * while for LE it is a host stack feature. This means that
6747		 * switching BR/EDR back on when secure connections has been
6748		 * enabled is not a supported transaction.
6749		 */
6750		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6751		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6752		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6753			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754					      MGMT_STATUS_REJECTED);
6755			goto unlock;
6756		}
6757	}
6758
6759	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6760	if (!cmd)
6761		err = -ENOMEM;
6762	else
6763		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6764					 set_bredr_complete);
6765
6766	if (err < 0) {
6767		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6768				MGMT_STATUS_FAILED);
6769		if (cmd)
6770			mgmt_pending_free(cmd);
6771
6772		goto unlock;
6773	}
6774
6775	/* We need to flip the bit already here so that
6776	 * hci_req_update_adv_data generates the correct flags.
6777	 */
6778	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6779
6780unlock:
6781	hci_dev_unlock(hdev);
6782	return err;
6783}
6784
6785static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6786{
6787	struct mgmt_pending_cmd *cmd = data;
6788	struct mgmt_mode *cp;
6789
6790	bt_dev_dbg(hdev, "err %d", err);
6791
6792	if (err) {
6793		u8 mgmt_err = mgmt_status(err);
6794
6795		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6796		goto done;
6797	}
6798
6799	cp = cmd->param;
6800
6801	switch (cp->val) {
6802	case 0x00:
6803		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6804		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6805		break;
6806	case 0x01:
6807		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6808		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6809		break;
6810	case 0x02:
6811		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6812		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6813		break;
6814	}
6815
6816	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6817	new_settings(hdev, cmd->sk);
6818
6819done:
6820	mgmt_pending_free(cmd);
6821}
6822
6823static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6824{
6825	struct mgmt_pending_cmd *cmd = data;
6826	struct mgmt_mode *cp = cmd->param;
6827	u8 val = !!cp->val;
6828
6829	/* Force write of val */
6830	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6831
6832	return hci_write_sc_support_sync(hdev, val);
6833}
6834
6835static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6836			   void *data, u16 len)
6837{
6838	struct mgmt_mode *cp = data;
6839	struct mgmt_pending_cmd *cmd;
6840	u8 val;
6841	int err;
6842
6843	bt_dev_dbg(hdev, "sock %p", sk);
6844
6845	if (!lmp_sc_capable(hdev) &&
6846	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6847		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6848				       MGMT_STATUS_NOT_SUPPORTED);
6849
6850	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6851	    lmp_sc_capable(hdev) &&
6852	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6853		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6854				       MGMT_STATUS_REJECTED);
6855
6856	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6857		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6858				       MGMT_STATUS_INVALID_PARAMS);
6859
6860	hci_dev_lock(hdev);
6861
6862	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6863	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6864		bool changed;
6865
6866		if (cp->val) {
6867			changed = !hci_dev_test_and_set_flag(hdev,
6868							     HCI_SC_ENABLED);
6869			if (cp->val == 0x02)
6870				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6871			else
6872				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6873		} else {
6874			changed = hci_dev_test_and_clear_flag(hdev,
6875							      HCI_SC_ENABLED);
6876			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6877		}
6878
6879		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6880		if (err < 0)
6881			goto failed;
6882
6883		if (changed)
6884			err = new_settings(hdev, sk);
6885
6886		goto failed;
6887	}
6888
6889	val = !!cp->val;
6890
6891	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6892	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6893		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6894		goto failed;
6895	}
6896
6897	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6898	if (!cmd)
6899		err = -ENOMEM;
6900	else
6901		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6902					 set_secure_conn_complete);
6903
6904	if (err < 0) {
6905		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906				MGMT_STATUS_FAILED);
6907		if (cmd)
6908			mgmt_pending_free(cmd);
6909	}
6910
6911failed:
6912	hci_dev_unlock(hdev);
6913	return err;
6914}
6915
6916static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6917			  void *data, u16 len)
6918{
6919	struct mgmt_mode *cp = data;
6920	bool changed, use_changed;
6921	int err;
6922
6923	bt_dev_dbg(hdev, "sock %p", sk);
6924
6925	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6926		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6927				       MGMT_STATUS_INVALID_PARAMS);
6928
6929	hci_dev_lock(hdev);
6930
6931	if (cp->val)
6932		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6933	else
6934		changed = hci_dev_test_and_clear_flag(hdev,
6935						      HCI_KEEP_DEBUG_KEYS);
6936
6937	if (cp->val == 0x02)
6938		use_changed = !hci_dev_test_and_set_flag(hdev,
6939							 HCI_USE_DEBUG_KEYS);
6940	else
6941		use_changed = hci_dev_test_and_clear_flag(hdev,
6942							  HCI_USE_DEBUG_KEYS);
6943
6944	if (hdev_is_powered(hdev) && use_changed &&
6945	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6946		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6947		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6948			     sizeof(mode), &mode);
6949	}
6950
6951	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6952	if (err < 0)
6953		goto unlock;
6954
6955	if (changed)
6956		err = new_settings(hdev, sk);
6957
6958unlock:
6959	hci_dev_unlock(hdev);
6960	return err;
6961}
6962
6963static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6964		       u16 len)
6965{
6966	struct mgmt_cp_set_privacy *cp = cp_data;
6967	bool changed;
6968	int err;
6969
6970	bt_dev_dbg(hdev, "sock %p", sk);
6971
6972	if (!lmp_le_capable(hdev))
6973		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6974				       MGMT_STATUS_NOT_SUPPORTED);
6975
6976	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6977		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6978				       MGMT_STATUS_INVALID_PARAMS);
6979
6980	if (hdev_is_powered(hdev))
6981		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6982				       MGMT_STATUS_REJECTED);
6983
6984	hci_dev_lock(hdev);
6985
6986	/* If user space supports this command it is also expected to
6987	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6988	 */
6989	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6990
6991	if (cp->privacy) {
6992		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6993		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6994		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6995		hci_adv_instances_set_rpa_expired(hdev, true);
6996		if (cp->privacy == 0x02)
6997			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6998		else
6999			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7000	} else {
7001		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7002		memset(hdev->irk, 0, sizeof(hdev->irk));
7003		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7004		hci_adv_instances_set_rpa_expired(hdev, false);
7005		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7006	}
7007
7008	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7009	if (err < 0)
7010		goto unlock;
7011
7012	if (changed)
7013		err = new_settings(hdev, sk);
7014
7015unlock:
7016	hci_dev_unlock(hdev);
7017	return err;
7018}
7019
7020static bool irk_is_valid(struct mgmt_irk_info *irk)
7021{
7022	switch (irk->addr.type) {
7023	case BDADDR_LE_PUBLIC:
7024		return true;
7025
7026	case BDADDR_LE_RANDOM:
7027		/* Two most significant bits shall be set */
7028		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7029			return false;
7030		return true;
7031	}
7032
7033	return false;
7034}
7035
7036static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7037		     u16 len)
7038{
7039	struct mgmt_cp_load_irks *cp = cp_data;
7040	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7041				   sizeof(struct mgmt_irk_info));
7042	u16 irk_count, expected_len;
7043	int i, err;
7044
7045	bt_dev_dbg(hdev, "sock %p", sk);
7046
7047	if (!lmp_le_capable(hdev))
7048		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7049				       MGMT_STATUS_NOT_SUPPORTED);
7050
7051	irk_count = __le16_to_cpu(cp->irk_count);
7052	if (irk_count > max_irk_count) {
7053		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7054			   irk_count);
7055		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7056				       MGMT_STATUS_INVALID_PARAMS);
7057	}
7058
7059	expected_len = struct_size(cp, irks, irk_count);
7060	if (expected_len != len) {
7061		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7062			   expected_len, len);
7063		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7064				       MGMT_STATUS_INVALID_PARAMS);
7065	}
7066
7067	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7068
7069	for (i = 0; i < irk_count; i++) {
7070		struct mgmt_irk_info *key = &cp->irks[i];
7071
7072		if (!irk_is_valid(key))
7073			return mgmt_cmd_status(sk, hdev->id,
7074					       MGMT_OP_LOAD_IRKS,
7075					       MGMT_STATUS_INVALID_PARAMS);
7076	}
7077
7078	hci_dev_lock(hdev);
7079
7080	hci_smp_irks_clear(hdev);
7081
7082	for (i = 0; i < irk_count; i++) {
7083		struct mgmt_irk_info *irk = &cp->irks[i];
7084		u8 addr_type = le_addr_type(irk->addr.type);
7085
7086		if (hci_is_blocked_key(hdev,
7087				       HCI_BLOCKED_KEY_TYPE_IRK,
7088				       irk->val)) {
7089			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7090				    &irk->addr.bdaddr);
7091			continue;
7092		}
7093
7094		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7095		if (irk->addr.type == BDADDR_BREDR)
7096			addr_type = BDADDR_BREDR;
7097
7098		hci_add_irk(hdev, &irk->addr.bdaddr,
7099			    addr_type, irk->val,
7100			    BDADDR_ANY);
7101	}
7102
7103	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7104
7105	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7106
7107	hci_dev_unlock(hdev);
7108
7109	return err;
7110}
7111
7112static bool ltk_is_valid(struct mgmt_ltk_info *key)
7113{
7114	if (key->initiator != 0x00 && key->initiator != 0x01)
7115		return false;
7116
7117	switch (key->addr.type) {
7118	case BDADDR_LE_PUBLIC:
7119		return true;
7120
7121	case BDADDR_LE_RANDOM:
7122		/* Two most significant bits shall be set */
7123		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7124			return false;
7125		return true;
7126	}
7127
7128	return false;
7129}
7130
7131static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7132			       void *cp_data, u16 len)
7133{
7134	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7135	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7136				   sizeof(struct mgmt_ltk_info));
7137	u16 key_count, expected_len;
7138	int i, err;
7139
7140	bt_dev_dbg(hdev, "sock %p", sk);
7141
7142	if (!lmp_le_capable(hdev))
7143		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7144				       MGMT_STATUS_NOT_SUPPORTED);
7145
7146	key_count = __le16_to_cpu(cp->key_count);
7147	if (key_count > max_key_count) {
7148		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7149			   key_count);
7150		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7151				       MGMT_STATUS_INVALID_PARAMS);
7152	}
7153
7154	expected_len = struct_size(cp, keys, key_count);
7155	if (expected_len != len) {
7156		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7157			   expected_len, len);
7158		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7159				       MGMT_STATUS_INVALID_PARAMS);
7160	}
7161
7162	bt_dev_dbg(hdev, "key_count %u", key_count);
7163
7164	for (i = 0; i < key_count; i++) {
7165		struct mgmt_ltk_info *key = &cp->keys[i];
7166
7167		if (!ltk_is_valid(key))
7168			return mgmt_cmd_status(sk, hdev->id,
7169					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7170					       MGMT_STATUS_INVALID_PARAMS);
7171	}
7172
7173	hci_dev_lock(hdev);
7174
7175	hci_smp_ltks_clear(hdev);
7176
7177	for (i = 0; i < key_count; i++) {
7178		struct mgmt_ltk_info *key = &cp->keys[i];
7179		u8 type, authenticated;
7180		u8 addr_type = le_addr_type(key->addr.type);
7181
7182		if (hci_is_blocked_key(hdev,
7183				       HCI_BLOCKED_KEY_TYPE_LTK,
7184				       key->val)) {
7185			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7186				    &key->addr.bdaddr);
7187			continue;
7188		}
7189
7190		switch (key->type) {
7191		case MGMT_LTK_UNAUTHENTICATED:
7192			authenticated = 0x00;
7193			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7194			break;
7195		case MGMT_LTK_AUTHENTICATED:
7196			authenticated = 0x01;
7197			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7198			break;
7199		case MGMT_LTK_P256_UNAUTH:
7200			authenticated = 0x00;
7201			type = SMP_LTK_P256;
7202			break;
7203		case MGMT_LTK_P256_AUTH:
7204			authenticated = 0x01;
7205			type = SMP_LTK_P256;
7206			break;
7207		case MGMT_LTK_P256_DEBUG:
7208			authenticated = 0x00;
7209			type = SMP_LTK_P256_DEBUG;
7210			fallthrough;
7211		default:
7212			continue;
7213		}
7214
7215		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7216		if (key->addr.type == BDADDR_BREDR)
7217			addr_type = BDADDR_BREDR;
7218
7219		hci_add_ltk(hdev, &key->addr.bdaddr,
7220			    addr_type, type, authenticated,
7221			    key->val, key->enc_size, key->ediv, key->rand);
7222	}
7223
7224	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7225			   NULL, 0);
7226
7227	hci_dev_unlock(hdev);
7228
7229	return err;
7230}
7231
7232static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7233{
7234	struct mgmt_pending_cmd *cmd = data;
7235	struct hci_conn *conn = cmd->user_data;
7236	struct mgmt_cp_get_conn_info *cp = cmd->param;
7237	struct mgmt_rp_get_conn_info rp;
7238	u8 status;
7239
7240	bt_dev_dbg(hdev, "err %d", err);
7241
7242	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7243
7244	status = mgmt_status(err);
7245	if (status == MGMT_STATUS_SUCCESS) {
7246		rp.rssi = conn->rssi;
7247		rp.tx_power = conn->tx_power;
7248		rp.max_tx_power = conn->max_tx_power;
7249	} else {
7250		rp.rssi = HCI_RSSI_INVALID;
7251		rp.tx_power = HCI_TX_POWER_INVALID;
7252		rp.max_tx_power = HCI_TX_POWER_INVALID;
7253	}
7254
7255	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7256			  &rp, sizeof(rp));
7257
7258	mgmt_pending_free(cmd);
7259}
7260
7261static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7262{
7263	struct mgmt_pending_cmd *cmd = data;
7264	struct mgmt_cp_get_conn_info *cp = cmd->param;
7265	struct hci_conn *conn;
7266	int err;
7267	__le16   handle;
7268
7269	/* Make sure we are still connected */
7270	if (cp->addr.type == BDADDR_BREDR)
7271		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7272					       &cp->addr.bdaddr);
7273	else
7274		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7275
7276	if (!conn || conn->state != BT_CONNECTED)
7277		return MGMT_STATUS_NOT_CONNECTED;
7278
7279	cmd->user_data = conn;
7280	handle = cpu_to_le16(conn->handle);
7281
7282	/* Refresh RSSI each time */
7283	err = hci_read_rssi_sync(hdev, handle);
7284
7285	/* For LE links TX power does not change thus we don't need to
7286	 * query for it once value is known.
7287	 */
7288	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7289		     conn->tx_power == HCI_TX_POWER_INVALID))
7290		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7291
7292	/* Max TX power needs to be read only once per connection */
7293	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7294		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7295
7296	return err;
7297}
7298
7299static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7300			 u16 len)
7301{
7302	struct mgmt_cp_get_conn_info *cp = data;
7303	struct mgmt_rp_get_conn_info rp;
7304	struct hci_conn *conn;
7305	unsigned long conn_info_age;
7306	int err = 0;
7307
7308	bt_dev_dbg(hdev, "sock %p", sk);
7309
7310	memset(&rp, 0, sizeof(rp));
7311	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7312	rp.addr.type = cp->addr.type;
7313
7314	if (!bdaddr_type_is_valid(cp->addr.type))
7315		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7316					 MGMT_STATUS_INVALID_PARAMS,
7317					 &rp, sizeof(rp));
7318
7319	hci_dev_lock(hdev);
7320
7321	if (!hdev_is_powered(hdev)) {
7322		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7323					MGMT_STATUS_NOT_POWERED, &rp,
7324					sizeof(rp));
7325		goto unlock;
7326	}
7327
7328	if (cp->addr.type == BDADDR_BREDR)
7329		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7330					       &cp->addr.bdaddr);
7331	else
7332		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7333
7334	if (!conn || conn->state != BT_CONNECTED) {
7335		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7336					MGMT_STATUS_NOT_CONNECTED, &rp,
7337					sizeof(rp));
7338		goto unlock;
7339	}
7340
7341	/* To avoid client trying to guess when to poll again for information we
7342	 * calculate conn info age as random value between min/max set in hdev.
7343	 */
7344	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7345						 hdev->conn_info_max_age - 1);
7346
7347	/* Query controller to refresh cached values if they are too old or were
7348	 * never read.
7349	 */
7350	if (time_after(jiffies, conn->conn_info_timestamp +
7351		       msecs_to_jiffies(conn_info_age)) ||
7352	    !conn->conn_info_timestamp) {
7353		struct mgmt_pending_cmd *cmd;
7354
7355		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7356				       len);
7357		if (!cmd) {
7358			err = -ENOMEM;
7359		} else {
7360			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7361						 cmd, get_conn_info_complete);
7362		}
7363
7364		if (err < 0) {
7365			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7366					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7367
7368			if (cmd)
7369				mgmt_pending_free(cmd);
7370
7371			goto unlock;
7372		}
7373
7374		conn->conn_info_timestamp = jiffies;
7375	} else {
7376		/* Cache is valid, just reply with values cached in hci_conn */
7377		rp.rssi = conn->rssi;
7378		rp.tx_power = conn->tx_power;
7379		rp.max_tx_power = conn->max_tx_power;
7380
7381		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7382					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7383	}
7384
7385unlock:
7386	hci_dev_unlock(hdev);
7387	return err;
7388}
7389
7390static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7391{
7392	struct mgmt_pending_cmd *cmd = data;
7393	struct mgmt_cp_get_clock_info *cp = cmd->param;
7394	struct mgmt_rp_get_clock_info rp;
7395	struct hci_conn *conn = cmd->user_data;
7396	u8 status = mgmt_status(err);
7397
7398	bt_dev_dbg(hdev, "err %d", err);
7399
7400	memset(&rp, 0, sizeof(rp));
7401	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7402	rp.addr.type = cp->addr.type;
7403
7404	if (err)
7405		goto complete;
7406
7407	rp.local_clock = cpu_to_le32(hdev->clock);
7408
7409	if (conn) {
7410		rp.piconet_clock = cpu_to_le32(conn->clock);
7411		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7412	}
7413
7414complete:
7415	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7416			  sizeof(rp));
7417
7418	mgmt_pending_free(cmd);
7419}
7420
7421static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7422{
7423	struct mgmt_pending_cmd *cmd = data;
7424	struct mgmt_cp_get_clock_info *cp = cmd->param;
7425	struct hci_cp_read_clock hci_cp;
7426	struct hci_conn *conn;
7427
7428	memset(&hci_cp, 0, sizeof(hci_cp));
7429	hci_read_clock_sync(hdev, &hci_cp);
7430
7431	/* Make sure connection still exists */
7432	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7433	if (!conn || conn->state != BT_CONNECTED)
7434		return MGMT_STATUS_NOT_CONNECTED;
7435
7436	cmd->user_data = conn;
7437	hci_cp.handle = cpu_to_le16(conn->handle);
7438	hci_cp.which = 0x01; /* Piconet clock */
7439
7440	return hci_read_clock_sync(hdev, &hci_cp);
7441}
7442
7443static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7444								u16 len)
7445{
7446	struct mgmt_cp_get_clock_info *cp = data;
7447	struct mgmt_rp_get_clock_info rp;
7448	struct mgmt_pending_cmd *cmd;
7449	struct hci_conn *conn;
7450	int err;
7451
7452	bt_dev_dbg(hdev, "sock %p", sk);
7453
7454	memset(&rp, 0, sizeof(rp));
7455	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7456	rp.addr.type = cp->addr.type;
7457
7458	if (cp->addr.type != BDADDR_BREDR)
7459		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7460					 MGMT_STATUS_INVALID_PARAMS,
7461					 &rp, sizeof(rp));
7462
7463	hci_dev_lock(hdev);
7464
7465	if (!hdev_is_powered(hdev)) {
7466		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7467					MGMT_STATUS_NOT_POWERED, &rp,
7468					sizeof(rp));
7469		goto unlock;
7470	}
7471
7472	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7473		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7474					       &cp->addr.bdaddr);
7475		if (!conn || conn->state != BT_CONNECTED) {
7476			err = mgmt_cmd_complete(sk, hdev->id,
7477						MGMT_OP_GET_CLOCK_INFO,
7478						MGMT_STATUS_NOT_CONNECTED,
7479						&rp, sizeof(rp));
7480			goto unlock;
7481		}
7482	} else {
7483		conn = NULL;
7484	}
7485
7486	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7487	if (!cmd)
7488		err = -ENOMEM;
7489	else
7490		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7491					 get_clock_info_complete);
7492
7493	if (err < 0) {
7494		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7495					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7496
7497		if (cmd)
7498			mgmt_pending_free(cmd);
7499	}
7500
7501
7502unlock:
7503	hci_dev_unlock(hdev);
7504	return err;
7505}
7506
7507static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7508{
7509	struct hci_conn *conn;
7510
7511	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7512	if (!conn)
7513		return false;
7514
7515	if (conn->dst_type != type)
7516		return false;
7517
7518	if (conn->state != BT_CONNECTED)
7519		return false;
7520
7521	return true;
7522}
7523
7524/* This function requires the caller holds hdev->lock */
7525static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7526			       u8 addr_type, u8 auto_connect)
7527{
7528	struct hci_conn_params *params;
7529
7530	params = hci_conn_params_add(hdev, addr, addr_type);
7531	if (!params)
7532		return -EIO;
7533
7534	if (params->auto_connect == auto_connect)
7535		return 0;
7536
7537	hci_pend_le_list_del_init(params);
7538
7539	switch (auto_connect) {
7540	case HCI_AUTO_CONN_DISABLED:
7541	case HCI_AUTO_CONN_LINK_LOSS:
7542		/* If auto connect is being disabled when we're trying to
7543		 * connect to device, keep connecting.
7544		 */
7545		if (params->explicit_connect)
7546			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7547		break;
7548	case HCI_AUTO_CONN_REPORT:
7549		if (params->explicit_connect)
7550			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7551		else
7552			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7553		break;
7554	case HCI_AUTO_CONN_DIRECT:
7555	case HCI_AUTO_CONN_ALWAYS:
7556		if (!is_connected(hdev, addr, addr_type))
7557			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7558		break;
7559	}
7560
7561	params->auto_connect = auto_connect;
7562
7563	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7564		   addr, addr_type, auto_connect);
7565
7566	return 0;
7567}
7568
7569static void device_added(struct sock *sk, struct hci_dev *hdev,
7570			 bdaddr_t *bdaddr, u8 type, u8 action)
7571{
7572	struct mgmt_ev_device_added ev;
7573
7574	bacpy(&ev.addr.bdaddr, bdaddr);
7575	ev.addr.type = type;
7576	ev.action = action;
7577
7578	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7579}
7580
7581static int add_device_sync(struct hci_dev *hdev, void *data)
7582{
7583	return hci_update_passive_scan_sync(hdev);
7584}
7585
7586static int add_device(struct sock *sk, struct hci_dev *hdev,
7587		      void *data, u16 len)
7588{
7589	struct mgmt_cp_add_device *cp = data;
7590	u8 auto_conn, addr_type;
7591	struct hci_conn_params *params;
7592	int err;
7593	u32 current_flags = 0;
7594	u32 supported_flags;
7595
7596	bt_dev_dbg(hdev, "sock %p", sk);
7597
7598	if (!bdaddr_type_is_valid(cp->addr.type) ||
7599	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7600		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7601					 MGMT_STATUS_INVALID_PARAMS,
7602					 &cp->addr, sizeof(cp->addr));
7603
7604	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7605		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7606					 MGMT_STATUS_INVALID_PARAMS,
7607					 &cp->addr, sizeof(cp->addr));
7608
7609	hci_dev_lock(hdev);
7610
7611	if (cp->addr.type == BDADDR_BREDR) {
7612		/* Only incoming connections action is supported for now */
7613		if (cp->action != 0x01) {
7614			err = mgmt_cmd_complete(sk, hdev->id,
7615						MGMT_OP_ADD_DEVICE,
7616						MGMT_STATUS_INVALID_PARAMS,
7617						&cp->addr, sizeof(cp->addr));
7618			goto unlock;
7619		}
7620
7621		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7622						     &cp->addr.bdaddr,
7623						     cp->addr.type, 0);
7624		if (err)
7625			goto unlock;
7626
7627		hci_update_scan(hdev);
7628
7629		goto added;
7630	}
7631
7632	addr_type = le_addr_type(cp->addr.type);
7633
7634	if (cp->action == 0x02)
7635		auto_conn = HCI_AUTO_CONN_ALWAYS;
7636	else if (cp->action == 0x01)
7637		auto_conn = HCI_AUTO_CONN_DIRECT;
7638	else
7639		auto_conn = HCI_AUTO_CONN_REPORT;
7640
7641	/* Kernel internally uses conn_params with resolvable private
7642	 * address, but Add Device allows only identity addresses.
7643	 * Make sure it is enforced before calling
7644	 * hci_conn_params_lookup.
7645	 */
7646	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7647		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7648					MGMT_STATUS_INVALID_PARAMS,
7649					&cp->addr, sizeof(cp->addr));
7650		goto unlock;
7651	}
7652
7653	/* If the connection parameters don't exist for this device,
7654	 * they will be created and configured with defaults.
7655	 */
7656	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7657				auto_conn) < 0) {
7658		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7659					MGMT_STATUS_FAILED, &cp->addr,
7660					sizeof(cp->addr));
7661		goto unlock;
7662	} else {
7663		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7664						addr_type);
7665		if (params)
7666			current_flags = params->flags;
7667	}
7668
7669	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7670	if (err < 0)
7671		goto unlock;
7672
7673added:
7674	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7675	supported_flags = hdev->conn_flags;
7676	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7677			     supported_flags, current_flags);
7678
7679	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7680				MGMT_STATUS_SUCCESS, &cp->addr,
7681				sizeof(cp->addr));
7682
7683unlock:
7684	hci_dev_unlock(hdev);
7685	return err;
7686}
7687
7688static void device_removed(struct sock *sk, struct hci_dev *hdev,
7689			   bdaddr_t *bdaddr, u8 type)
7690{
7691	struct mgmt_ev_device_removed ev;
7692
7693	bacpy(&ev.addr.bdaddr, bdaddr);
7694	ev.addr.type = type;
7695
7696	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7697}
7698
7699static int remove_device_sync(struct hci_dev *hdev, void *data)
7700{
7701	return hci_update_passive_scan_sync(hdev);
7702}
7703
7704static int remove_device(struct sock *sk, struct hci_dev *hdev,
7705			 void *data, u16 len)
7706{
7707	struct mgmt_cp_remove_device *cp = data;
7708	int err;
7709
7710	bt_dev_dbg(hdev, "sock %p", sk);
7711
7712	hci_dev_lock(hdev);
7713
7714	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7715		struct hci_conn_params *params;
7716		u8 addr_type;
7717
7718		if (!bdaddr_type_is_valid(cp->addr.type)) {
7719			err = mgmt_cmd_complete(sk, hdev->id,
7720						MGMT_OP_REMOVE_DEVICE,
7721						MGMT_STATUS_INVALID_PARAMS,
7722						&cp->addr, sizeof(cp->addr));
7723			goto unlock;
7724		}
7725
7726		if (cp->addr.type == BDADDR_BREDR) {
7727			err = hci_bdaddr_list_del(&hdev->accept_list,
7728						  &cp->addr.bdaddr,
7729						  cp->addr.type);
7730			if (err) {
7731				err = mgmt_cmd_complete(sk, hdev->id,
7732							MGMT_OP_REMOVE_DEVICE,
7733							MGMT_STATUS_INVALID_PARAMS,
7734							&cp->addr,
7735							sizeof(cp->addr));
7736				goto unlock;
7737			}
7738
7739			hci_update_scan(hdev);
7740
7741			device_removed(sk, hdev, &cp->addr.bdaddr,
7742				       cp->addr.type);
7743			goto complete;
7744		}
7745
7746		addr_type = le_addr_type(cp->addr.type);
7747
7748		/* Kernel internally uses conn_params with resolvable private
7749		 * address, but Remove Device allows only identity addresses.
7750		 * Make sure it is enforced before calling
7751		 * hci_conn_params_lookup.
7752		 */
7753		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7754			err = mgmt_cmd_complete(sk, hdev->id,
7755						MGMT_OP_REMOVE_DEVICE,
7756						MGMT_STATUS_INVALID_PARAMS,
7757						&cp->addr, sizeof(cp->addr));
7758			goto unlock;
7759		}
7760
7761		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7762						addr_type);
7763		if (!params) {
7764			err = mgmt_cmd_complete(sk, hdev->id,
7765						MGMT_OP_REMOVE_DEVICE,
7766						MGMT_STATUS_INVALID_PARAMS,
7767						&cp->addr, sizeof(cp->addr));
7768			goto unlock;
7769		}
7770
7771		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7772		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7773			err = mgmt_cmd_complete(sk, hdev->id,
7774						MGMT_OP_REMOVE_DEVICE,
7775						MGMT_STATUS_INVALID_PARAMS,
7776						&cp->addr, sizeof(cp->addr));
7777			goto unlock;
7778		}
7779
7780		hci_conn_params_free(params);
7781
7782		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7783	} else {
7784		struct hci_conn_params *p, *tmp;
7785		struct bdaddr_list *b, *btmp;
7786
7787		if (cp->addr.type) {
7788			err = mgmt_cmd_complete(sk, hdev->id,
7789						MGMT_OP_REMOVE_DEVICE,
7790						MGMT_STATUS_INVALID_PARAMS,
7791						&cp->addr, sizeof(cp->addr));
7792			goto unlock;
7793		}
7794
7795		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7796			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7797			list_del(&b->list);
7798			kfree(b);
7799		}
7800
7801		hci_update_scan(hdev);
7802
7803		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7804			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7805				continue;
7806			device_removed(sk, hdev, &p->addr, p->addr_type);
7807			if (p->explicit_connect) {
7808				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7809				continue;
7810			}
7811			hci_conn_params_free(p);
7812		}
7813
7814		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7815	}
7816
7817	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7818
7819complete:
7820	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7821				MGMT_STATUS_SUCCESS, &cp->addr,
7822				sizeof(cp->addr));
7823unlock:
7824	hci_dev_unlock(hdev);
7825	return err;
7826}
7827
7828static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7829			   u16 len)
7830{
7831	struct mgmt_cp_load_conn_param *cp = data;
7832	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7833				     sizeof(struct mgmt_conn_param));
7834	u16 param_count, expected_len;
7835	int i;
7836
7837	if (!lmp_le_capable(hdev))
7838		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7839				       MGMT_STATUS_NOT_SUPPORTED);
7840
7841	param_count = __le16_to_cpu(cp->param_count);
7842	if (param_count > max_param_count) {
7843		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7844			   param_count);
7845		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7846				       MGMT_STATUS_INVALID_PARAMS);
7847	}
7848
7849	expected_len = struct_size(cp, params, param_count);
7850	if (expected_len != len) {
7851		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7852			   expected_len, len);
7853		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7854				       MGMT_STATUS_INVALID_PARAMS);
7855	}
7856
7857	bt_dev_dbg(hdev, "param_count %u", param_count);
7858
7859	hci_dev_lock(hdev);
7860
7861	hci_conn_params_clear_disabled(hdev);
7862
7863	for (i = 0; i < param_count; i++) {
7864		struct mgmt_conn_param *param = &cp->params[i];
7865		struct hci_conn_params *hci_param;
7866		u16 min, max, latency, timeout;
7867		u8 addr_type;
7868
7869		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7870			   param->addr.type);
7871
7872		if (param->addr.type == BDADDR_LE_PUBLIC) {
7873			addr_type = ADDR_LE_DEV_PUBLIC;
7874		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7875			addr_type = ADDR_LE_DEV_RANDOM;
7876		} else {
7877			bt_dev_err(hdev, "ignoring invalid connection parameters");
7878			continue;
7879		}
7880
7881		min = le16_to_cpu(param->min_interval);
7882		max = le16_to_cpu(param->max_interval);
7883		latency = le16_to_cpu(param->latency);
7884		timeout = le16_to_cpu(param->timeout);
7885
7886		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7887			   min, max, latency, timeout);
7888
7889		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7890			bt_dev_err(hdev, "ignoring invalid connection parameters");
7891			continue;
7892		}
7893
7894		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7895						addr_type);
7896		if (!hci_param) {
7897			bt_dev_err(hdev, "failed to add connection parameters");
7898			continue;
7899		}
7900
7901		hci_param->conn_min_interval = min;
7902		hci_param->conn_max_interval = max;
7903		hci_param->conn_latency = latency;
7904		hci_param->supervision_timeout = timeout;
7905	}
7906
7907	hci_dev_unlock(hdev);
7908
7909	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7910				 NULL, 0);
7911}
7912
7913static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7914			       void *data, u16 len)
7915{
7916	struct mgmt_cp_set_external_config *cp = data;
7917	bool changed;
7918	int err;
7919
7920	bt_dev_dbg(hdev, "sock %p", sk);
7921
7922	if (hdev_is_powered(hdev))
7923		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7924				       MGMT_STATUS_REJECTED);
7925
7926	if (cp->config != 0x00 && cp->config != 0x01)
7927		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7928				         MGMT_STATUS_INVALID_PARAMS);
7929
7930	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7931		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7932				       MGMT_STATUS_NOT_SUPPORTED);
7933
7934	hci_dev_lock(hdev);
7935
7936	if (cp->config)
7937		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7938	else
7939		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7940
7941	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7942	if (err < 0)
7943		goto unlock;
7944
7945	if (!changed)
7946		goto unlock;
7947
7948	err = new_options(hdev, sk);
7949
7950	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7951		mgmt_index_removed(hdev);
7952
7953		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7954			hci_dev_set_flag(hdev, HCI_CONFIG);
7955			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7956
7957			queue_work(hdev->req_workqueue, &hdev->power_on);
7958		} else {
7959			set_bit(HCI_RAW, &hdev->flags);
7960			mgmt_index_added(hdev);
7961		}
7962	}
7963
7964unlock:
7965	hci_dev_unlock(hdev);
7966	return err;
7967}
7968
7969static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7970			      void *data, u16 len)
7971{
7972	struct mgmt_cp_set_public_address *cp = data;
7973	bool changed;
7974	int err;
7975
7976	bt_dev_dbg(hdev, "sock %p", sk);
7977
7978	if (hdev_is_powered(hdev))
7979		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7980				       MGMT_STATUS_REJECTED);
7981
7982	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7983		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7984				       MGMT_STATUS_INVALID_PARAMS);
7985
7986	if (!hdev->set_bdaddr)
7987		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7988				       MGMT_STATUS_NOT_SUPPORTED);
7989
7990	hci_dev_lock(hdev);
7991
7992	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7993	bacpy(&hdev->public_addr, &cp->bdaddr);
7994
7995	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7996	if (err < 0)
7997		goto unlock;
7998
7999	if (!changed)
8000		goto unlock;
8001
8002	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8003		err = new_options(hdev, sk);
8004
8005	if (is_configured(hdev)) {
8006		mgmt_index_removed(hdev);
8007
8008		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8009
8010		hci_dev_set_flag(hdev, HCI_CONFIG);
8011		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8012
8013		queue_work(hdev->req_workqueue, &hdev->power_on);
8014	}
8015
8016unlock:
8017	hci_dev_unlock(hdev);
8018	return err;
8019}
8020
8021static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8022					     int err)
8023{
8024	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8025	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8026	u8 *h192, *r192, *h256, *r256;
8027	struct mgmt_pending_cmd *cmd = data;
8028	struct sk_buff *skb = cmd->skb;
8029	u8 status = mgmt_status(err);
8030	u16 eir_len;
8031
8032	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8033		return;
8034
8035	if (!status) {
8036		if (!skb)
8037			status = MGMT_STATUS_FAILED;
8038		else if (IS_ERR(skb))
8039			status = mgmt_status(PTR_ERR(skb));
8040		else
8041			status = mgmt_status(skb->data[0]);
8042	}
8043
8044	bt_dev_dbg(hdev, "status %u", status);
8045
8046	mgmt_cp = cmd->param;
8047
8048	if (status) {
8049		status = mgmt_status(status);
8050		eir_len = 0;
8051
8052		h192 = NULL;
8053		r192 = NULL;
8054		h256 = NULL;
8055		r256 = NULL;
8056	} else if (!bredr_sc_enabled(hdev)) {
8057		struct hci_rp_read_local_oob_data *rp;
8058
8059		if (skb->len != sizeof(*rp)) {
8060			status = MGMT_STATUS_FAILED;
8061			eir_len = 0;
8062		} else {
8063			status = MGMT_STATUS_SUCCESS;
8064			rp = (void *)skb->data;
8065
8066			eir_len = 5 + 18 + 18;
8067			h192 = rp->hash;
8068			r192 = rp->rand;
8069			h256 = NULL;
8070			r256 = NULL;
8071		}
8072	} else {
8073		struct hci_rp_read_local_oob_ext_data *rp;
8074
8075		if (skb->len != sizeof(*rp)) {
8076			status = MGMT_STATUS_FAILED;
8077			eir_len = 0;
8078		} else {
8079			status = MGMT_STATUS_SUCCESS;
8080			rp = (void *)skb->data;
8081
8082			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8083				eir_len = 5 + 18 + 18;
8084				h192 = NULL;
8085				r192 = NULL;
8086			} else {
8087				eir_len = 5 + 18 + 18 + 18 + 18;
8088				h192 = rp->hash192;
8089				r192 = rp->rand192;
8090			}
8091
8092			h256 = rp->hash256;
8093			r256 = rp->rand256;
8094		}
8095	}
8096
8097	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8098	if (!mgmt_rp)
8099		goto done;
8100
8101	if (eir_len == 0)
8102		goto send_rsp;
8103
8104	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8105				  hdev->dev_class, 3);
8106
8107	if (h192 && r192) {
8108		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8109					  EIR_SSP_HASH_C192, h192, 16);
8110		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8111					  EIR_SSP_RAND_R192, r192, 16);
8112	}
8113
8114	if (h256 && r256) {
8115		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8116					  EIR_SSP_HASH_C256, h256, 16);
8117		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8118					  EIR_SSP_RAND_R256, r256, 16);
8119	}
8120
8121send_rsp:
8122	mgmt_rp->type = mgmt_cp->type;
8123	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8124
8125	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8126				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8127				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8128	if (err < 0 || status)
8129		goto done;
8130
8131	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8132
8133	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8134				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8135				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8136done:
8137	if (skb && !IS_ERR(skb))
8138		kfree_skb(skb);
8139
8140	kfree(mgmt_rp);
8141	mgmt_pending_remove(cmd);
8142}
8143
8144static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8145				  struct mgmt_cp_read_local_oob_ext_data *cp)
8146{
8147	struct mgmt_pending_cmd *cmd;
8148	int err;
8149
8150	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8151			       cp, sizeof(*cp));
8152	if (!cmd)
8153		return -ENOMEM;
8154
8155	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8156				 read_local_oob_ext_data_complete);
8157
8158	if (err < 0) {
8159		mgmt_pending_remove(cmd);
8160		return err;
8161	}
8162
8163	return 0;
8164}
8165
8166static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8167				   void *data, u16 data_len)
8168{
8169	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8170	struct mgmt_rp_read_local_oob_ext_data *rp;
8171	size_t rp_len;
8172	u16 eir_len;
8173	u8 status, flags, role, addr[7], hash[16], rand[16];
8174	int err;
8175
8176	bt_dev_dbg(hdev, "sock %p", sk);
8177
8178	if (hdev_is_powered(hdev)) {
8179		switch (cp->type) {
8180		case BIT(BDADDR_BREDR):
8181			status = mgmt_bredr_support(hdev);
8182			if (status)
8183				eir_len = 0;
8184			else
8185				eir_len = 5;
8186			break;
8187		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8188			status = mgmt_le_support(hdev);
8189			if (status)
8190				eir_len = 0;
8191			else
8192				eir_len = 9 + 3 + 18 + 18 + 3;
8193			break;
8194		default:
8195			status = MGMT_STATUS_INVALID_PARAMS;
8196			eir_len = 0;
8197			break;
8198		}
8199	} else {
8200		status = MGMT_STATUS_NOT_POWERED;
8201		eir_len = 0;
8202	}
8203
8204	rp_len = sizeof(*rp) + eir_len;
8205	rp = kmalloc(rp_len, GFP_ATOMIC);
8206	if (!rp)
8207		return -ENOMEM;
8208
8209	if (!status && !lmp_ssp_capable(hdev)) {
8210		status = MGMT_STATUS_NOT_SUPPORTED;
8211		eir_len = 0;
8212	}
8213
8214	if (status)
8215		goto complete;
8216
8217	hci_dev_lock(hdev);
8218
8219	eir_len = 0;
8220	switch (cp->type) {
8221	case BIT(BDADDR_BREDR):
8222		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8223			err = read_local_ssp_oob_req(hdev, sk, cp);
8224			hci_dev_unlock(hdev);
8225			if (!err)
8226				goto done;
8227
8228			status = MGMT_STATUS_FAILED;
8229			goto complete;
8230		} else {
8231			eir_len = eir_append_data(rp->eir, eir_len,
8232						  EIR_CLASS_OF_DEV,
8233						  hdev->dev_class, 3);
8234		}
8235		break;
8236	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8237		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8238		    smp_generate_oob(hdev, hash, rand) < 0) {
8239			hci_dev_unlock(hdev);
8240			status = MGMT_STATUS_FAILED;
8241			goto complete;
8242		}
8243
8244		/* This should return the active RPA, but since the RPA
8245		 * is only programmed on demand, it is really hard to fill
8246		 * this in at the moment. For now disallow retrieving
8247		 * local out-of-band data when privacy is in use.
8248		 *
8249		 * Returning the identity address will not help here since
8250		 * pairing happens before the identity resolving key is
8251		 * known and thus the connection establishment happens
8252		 * based on the RPA and not the identity address.
8253		 */
8254		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8255			hci_dev_unlock(hdev);
8256			status = MGMT_STATUS_REJECTED;
8257			goto complete;
8258		}
8259
8260		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8261		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8262		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8263		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8264			memcpy(addr, &hdev->static_addr, 6);
8265			addr[6] = 0x01;
8266		} else {
8267			memcpy(addr, &hdev->bdaddr, 6);
8268			addr[6] = 0x00;
8269		}
8270
8271		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8272					  addr, sizeof(addr));
8273
8274		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8275			role = 0x02;
8276		else
8277			role = 0x01;
8278
8279		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8280					  &role, sizeof(role));
8281
8282		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8283			eir_len = eir_append_data(rp->eir, eir_len,
8284						  EIR_LE_SC_CONFIRM,
8285						  hash, sizeof(hash));
8286
8287			eir_len = eir_append_data(rp->eir, eir_len,
8288						  EIR_LE_SC_RANDOM,
8289						  rand, sizeof(rand));
8290		}
8291
8292		flags = mgmt_get_adv_discov_flags(hdev);
8293
8294		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8295			flags |= LE_AD_NO_BREDR;
8296
8297		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8298					  &flags, sizeof(flags));
8299		break;
8300	}
8301
8302	hci_dev_unlock(hdev);
8303
8304	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8305
8306	status = MGMT_STATUS_SUCCESS;
8307
8308complete:
8309	rp->type = cp->type;
8310	rp->eir_len = cpu_to_le16(eir_len);
8311
8312	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8313				status, rp, sizeof(*rp) + eir_len);
8314	if (err < 0 || status)
8315		goto done;
8316
8317	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8318				 rp, sizeof(*rp) + eir_len,
8319				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8320
8321done:
8322	kfree(rp);
8323
8324	return err;
8325}
8326
8327static u32 get_supported_adv_flags(struct hci_dev *hdev)
8328{
8329	u32 flags = 0;
8330
8331	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8332	flags |= MGMT_ADV_FLAG_DISCOV;
8333	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8334	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8335	flags |= MGMT_ADV_FLAG_APPEARANCE;
8336	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8337	flags |= MGMT_ADV_PARAM_DURATION;
8338	flags |= MGMT_ADV_PARAM_TIMEOUT;
8339	flags |= MGMT_ADV_PARAM_INTERVALS;
8340	flags |= MGMT_ADV_PARAM_TX_POWER;
8341	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8342
8343	/* In extended adv TX_POWER returned from Set Adv Param
8344	 * will be always valid.
8345	 */
8346	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8347		flags |= MGMT_ADV_FLAG_TX_POWER;
8348
8349	if (ext_adv_capable(hdev)) {
8350		flags |= MGMT_ADV_FLAG_SEC_1M;
8351		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8352		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8353
8354		if (le_2m_capable(hdev))
8355			flags |= MGMT_ADV_FLAG_SEC_2M;
8356
8357		if (le_coded_capable(hdev))
8358			flags |= MGMT_ADV_FLAG_SEC_CODED;
8359	}
8360
8361	return flags;
8362}
8363
8364static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8365			     void *data, u16 data_len)
8366{
8367	struct mgmt_rp_read_adv_features *rp;
8368	size_t rp_len;
8369	int err;
8370	struct adv_info *adv_instance;
8371	u32 supported_flags;
8372	u8 *instance;
8373
8374	bt_dev_dbg(hdev, "sock %p", sk);
8375
8376	if (!lmp_le_capable(hdev))
8377		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8378				       MGMT_STATUS_REJECTED);
8379
8380	hci_dev_lock(hdev);
8381
8382	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8383	rp = kmalloc(rp_len, GFP_ATOMIC);
8384	if (!rp) {
8385		hci_dev_unlock(hdev);
8386		return -ENOMEM;
8387	}
8388
8389	supported_flags = get_supported_adv_flags(hdev);
8390
8391	rp->supported_flags = cpu_to_le32(supported_flags);
8392	rp->max_adv_data_len = max_adv_len(hdev);
8393	rp->max_scan_rsp_len = max_adv_len(hdev);
8394	rp->max_instances = hdev->le_num_of_adv_sets;
8395	rp->num_instances = hdev->adv_instance_cnt;
8396
8397	instance = rp->instance;
8398	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8399		/* Only instances 1-le_num_of_adv_sets are externally visible */
8400		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8401			*instance = adv_instance->instance;
8402			instance++;
8403		} else {
8404			rp->num_instances--;
8405			rp_len--;
8406		}
8407	}
8408
8409	hci_dev_unlock(hdev);
8410
8411	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8412				MGMT_STATUS_SUCCESS, rp, rp_len);
8413
8414	kfree(rp);
8415
8416	return err;
8417}
8418
8419static u8 calculate_name_len(struct hci_dev *hdev)
8420{
8421	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8422
8423	return eir_append_local_name(hdev, buf, 0);
8424}
8425
8426static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8427			   bool is_adv_data)
8428{
8429	u8 max_len = max_adv_len(hdev);
8430
8431	if (is_adv_data) {
8432		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8433				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8434				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8435			max_len -= 3;
8436
8437		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8438			max_len -= 3;
8439	} else {
8440		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8441			max_len -= calculate_name_len(hdev);
8442
8443		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8444			max_len -= 4;
8445	}
8446
8447	return max_len;
8448}
8449
8450static bool flags_managed(u32 adv_flags)
8451{
8452	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8453			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8454			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8455}
8456
8457static bool tx_power_managed(u32 adv_flags)
8458{
8459	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8460}
8461
8462static bool name_managed(u32 adv_flags)
8463{
8464	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8465}
8466
8467static bool appearance_managed(u32 adv_flags)
8468{
8469	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8470}
8471
8472static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8473			      u8 len, bool is_adv_data)
8474{
8475	int i, cur_len;
8476	u8 max_len;
8477
8478	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8479
8480	if (len > max_len)
8481		return false;
8482
8483	/* Make sure that the data is correctly formatted. */
8484	for (i = 0; i < len; i += (cur_len + 1)) {
8485		cur_len = data[i];
8486
8487		if (!cur_len)
8488			continue;
8489
8490		if (data[i + 1] == EIR_FLAGS &&
8491		    (!is_adv_data || flags_managed(adv_flags)))
8492			return false;
8493
8494		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8495			return false;
8496
8497		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8498			return false;
8499
8500		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8501			return false;
8502
8503		if (data[i + 1] == EIR_APPEARANCE &&
8504		    appearance_managed(adv_flags))
8505			return false;
8506
8507		/* If the current field length would exceed the total data
8508		 * length, then it's invalid.
8509		 */
8510		if (i + cur_len >= len)
8511			return false;
8512	}
8513
8514	return true;
8515}
8516
8517static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8518{
8519	u32 supported_flags, phy_flags;
8520
8521	/* The current implementation only supports a subset of the specified
8522	 * flags. Also need to check mutual exclusiveness of sec flags.
8523	 */
8524	supported_flags = get_supported_adv_flags(hdev);
8525	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8526	if (adv_flags & ~supported_flags ||
8527	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8528		return false;
8529
8530	return true;
8531}
8532
8533static bool adv_busy(struct hci_dev *hdev)
8534{
8535	return pending_find(MGMT_OP_SET_LE, hdev);
8536}
8537
8538static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8539			     int err)
8540{
8541	struct adv_info *adv, *n;
8542
8543	bt_dev_dbg(hdev, "err %d", err);
8544
8545	hci_dev_lock(hdev);
8546
8547	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8548		u8 instance;
8549
8550		if (!adv->pending)
8551			continue;
8552
8553		if (!err) {
8554			adv->pending = false;
8555			continue;
8556		}
8557
8558		instance = adv->instance;
8559
8560		if (hdev->cur_adv_instance == instance)
8561			cancel_adv_timeout(hdev);
8562
8563		hci_remove_adv_instance(hdev, instance);
8564		mgmt_advertising_removed(sk, hdev, instance);
8565	}
8566
8567	hci_dev_unlock(hdev);
8568}
8569
8570static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8571{
8572	struct mgmt_pending_cmd *cmd = data;
8573	struct mgmt_cp_add_advertising *cp = cmd->param;
8574	struct mgmt_rp_add_advertising rp;
8575
8576	memset(&rp, 0, sizeof(rp));
8577
8578	rp.instance = cp->instance;
8579
8580	if (err)
8581		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8582				mgmt_status(err));
8583	else
8584		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8585				  mgmt_status(err), &rp, sizeof(rp));
8586
8587	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8588
8589	mgmt_pending_free(cmd);
8590}
8591
8592static int add_advertising_sync(struct hci_dev *hdev, void *data)
8593{
8594	struct mgmt_pending_cmd *cmd = data;
8595	struct mgmt_cp_add_advertising *cp = cmd->param;
8596
8597	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8598}
8599
8600static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8601			   void *data, u16 data_len)
8602{
8603	struct mgmt_cp_add_advertising *cp = data;
8604	struct mgmt_rp_add_advertising rp;
8605	u32 flags;
8606	u8 status;
8607	u16 timeout, duration;
8608	unsigned int prev_instance_cnt;
8609	u8 schedule_instance = 0;
8610	struct adv_info *adv, *next_instance;
8611	int err;
8612	struct mgmt_pending_cmd *cmd;
8613
8614	bt_dev_dbg(hdev, "sock %p", sk);
8615
8616	status = mgmt_le_support(hdev);
8617	if (status)
8618		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8619				       status);
8620
8621	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8622		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8623				       MGMT_STATUS_INVALID_PARAMS);
8624
8625	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8626		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8627				       MGMT_STATUS_INVALID_PARAMS);
8628
8629	flags = __le32_to_cpu(cp->flags);
8630	timeout = __le16_to_cpu(cp->timeout);
8631	duration = __le16_to_cpu(cp->duration);
8632
8633	if (!requested_adv_flags_are_valid(hdev, flags))
8634		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8635				       MGMT_STATUS_INVALID_PARAMS);
8636
8637	hci_dev_lock(hdev);
8638
8639	if (timeout && !hdev_is_powered(hdev)) {
8640		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8641				      MGMT_STATUS_REJECTED);
8642		goto unlock;
8643	}
8644
8645	if (adv_busy(hdev)) {
8646		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8647				      MGMT_STATUS_BUSY);
8648		goto unlock;
8649	}
8650
8651	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8652	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8653			       cp->scan_rsp_len, false)) {
8654		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8655				      MGMT_STATUS_INVALID_PARAMS);
8656		goto unlock;
8657	}
8658
8659	prev_instance_cnt = hdev->adv_instance_cnt;
8660
8661	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8662				   cp->adv_data_len, cp->data,
8663				   cp->scan_rsp_len,
8664				   cp->data + cp->adv_data_len,
8665				   timeout, duration,
8666				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8667				   hdev->le_adv_min_interval,
8668				   hdev->le_adv_max_interval, 0);
8669	if (IS_ERR(adv)) {
8670		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8671				      MGMT_STATUS_FAILED);
8672		goto unlock;
8673	}
8674
8675	/* Only trigger an advertising added event if a new instance was
8676	 * actually added.
8677	 */
8678	if (hdev->adv_instance_cnt > prev_instance_cnt)
8679		mgmt_advertising_added(sk, hdev, cp->instance);
8680
8681	if (hdev->cur_adv_instance == cp->instance) {
8682		/* If the currently advertised instance is being changed then
8683		 * cancel the current advertising and schedule the next
8684		 * instance. If there is only one instance then the overridden
8685		 * advertising data will be visible right away.
8686		 */
8687		cancel_adv_timeout(hdev);
8688
8689		next_instance = hci_get_next_instance(hdev, cp->instance);
8690		if (next_instance)
8691			schedule_instance = next_instance->instance;
8692	} else if (!hdev->adv_instance_timeout) {
8693		/* Immediately advertise the new instance if no other
8694		 * instance is currently being advertised.
8695		 */
8696		schedule_instance = cp->instance;
8697	}
8698
8699	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8700	 * there is no instance to be advertised then we have no HCI
8701	 * communication to make. Simply return.
8702	 */
8703	if (!hdev_is_powered(hdev) ||
8704	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8705	    !schedule_instance) {
8706		rp.instance = cp->instance;
8707		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8708					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8709		goto unlock;
8710	}
8711
8712	/* We're good to go, update advertising data, parameters, and start
8713	 * advertising.
8714	 */
8715	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8716			       data_len);
8717	if (!cmd) {
8718		err = -ENOMEM;
8719		goto unlock;
8720	}
8721
8722	cp->instance = schedule_instance;
8723
8724	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8725				 add_advertising_complete);
8726	if (err < 0)
8727		mgmt_pending_free(cmd);
8728
8729unlock:
8730	hci_dev_unlock(hdev);
8731
8732	return err;
8733}
8734
8735static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8736					int err)
8737{
8738	struct mgmt_pending_cmd *cmd = data;
8739	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8740	struct mgmt_rp_add_ext_adv_params rp;
8741	struct adv_info *adv;
8742	u32 flags;
8743
8744	BT_DBG("%s", hdev->name);
8745
8746	hci_dev_lock(hdev);
8747
8748	adv = hci_find_adv_instance(hdev, cp->instance);
8749	if (!adv)
8750		goto unlock;
8751
8752	rp.instance = cp->instance;
8753	rp.tx_power = adv->tx_power;
8754
8755	/* While we're at it, inform userspace of the available space for this
8756	 * advertisement, given the flags that will be used.
8757	 */
8758	flags = __le32_to_cpu(cp->flags);
8759	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8760	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8761
8762	if (err) {
8763		/* If this advertisement was previously advertising and we
8764		 * failed to update it, we signal that it has been removed and
8765		 * delete its structure
8766		 */
8767		if (!adv->pending)
8768			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8769
8770		hci_remove_adv_instance(hdev, cp->instance);
8771
8772		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8773				mgmt_status(err));
8774	} else {
8775		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8776				  mgmt_status(err), &rp, sizeof(rp));
8777	}
8778
8779unlock:
8780	mgmt_pending_free(cmd);
8781
8782	hci_dev_unlock(hdev);
8783}
8784
8785static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8786{
8787	struct mgmt_pending_cmd *cmd = data;
8788	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8789
8790	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8791}
8792
8793static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8794			      void *data, u16 data_len)
8795{
8796	struct mgmt_cp_add_ext_adv_params *cp = data;
8797	struct mgmt_rp_add_ext_adv_params rp;
8798	struct mgmt_pending_cmd *cmd = NULL;
8799	struct adv_info *adv;
8800	u32 flags, min_interval, max_interval;
8801	u16 timeout, duration;
8802	u8 status;
8803	s8 tx_power;
8804	int err;
8805
8806	BT_DBG("%s", hdev->name);
8807
8808	status = mgmt_le_support(hdev);
8809	if (status)
8810		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8811				       status);
8812
8813	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8814		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8815				       MGMT_STATUS_INVALID_PARAMS);
8816
8817	/* The purpose of breaking add_advertising into two separate MGMT calls
8818	 * for params and data is to allow more parameters to be added to this
8819	 * structure in the future. For this reason, we verify that we have the
8820	 * bare minimum structure we know of when the interface was defined. Any
8821	 * extra parameters we don't know about will be ignored in this request.
8822	 */
8823	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8824		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8825				       MGMT_STATUS_INVALID_PARAMS);
8826
8827	flags = __le32_to_cpu(cp->flags);
8828
8829	if (!requested_adv_flags_are_valid(hdev, flags))
8830		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8831				       MGMT_STATUS_INVALID_PARAMS);
8832
8833	hci_dev_lock(hdev);
8834
8835	/* In new interface, we require that we are powered to register */
8836	if (!hdev_is_powered(hdev)) {
8837		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8838				      MGMT_STATUS_REJECTED);
8839		goto unlock;
8840	}
8841
8842	if (adv_busy(hdev)) {
8843		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8844				      MGMT_STATUS_BUSY);
8845		goto unlock;
8846	}
8847
8848	/* Parse defined parameters from request, use defaults otherwise */
8849	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8850		  __le16_to_cpu(cp->timeout) : 0;
8851
8852	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8853		   __le16_to_cpu(cp->duration) :
8854		   hdev->def_multi_adv_rotation_duration;
8855
8856	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8857		       __le32_to_cpu(cp->min_interval) :
8858		       hdev->le_adv_min_interval;
8859
8860	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8861		       __le32_to_cpu(cp->max_interval) :
8862		       hdev->le_adv_max_interval;
8863
8864	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8865		   cp->tx_power :
8866		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8867
8868	/* Create advertising instance with no advertising or response data */
8869	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8870				   timeout, duration, tx_power, min_interval,
8871				   max_interval, 0);
8872
8873	if (IS_ERR(adv)) {
8874		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8875				      MGMT_STATUS_FAILED);
8876		goto unlock;
8877	}
8878
8879	/* Submit request for advertising params if ext adv available */
8880	if (ext_adv_capable(hdev)) {
8881		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8882				       data, data_len);
8883		if (!cmd) {
8884			err = -ENOMEM;
8885			hci_remove_adv_instance(hdev, cp->instance);
8886			goto unlock;
8887		}
8888
8889		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8890					 add_ext_adv_params_complete);
8891		if (err < 0)
8892			mgmt_pending_free(cmd);
8893	} else {
8894		rp.instance = cp->instance;
8895		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8896		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8897		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8898		err = mgmt_cmd_complete(sk, hdev->id,
8899					MGMT_OP_ADD_EXT_ADV_PARAMS,
8900					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8901	}
8902
8903unlock:
8904	hci_dev_unlock(hdev);
8905
8906	return err;
8907}
8908
8909static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8910{
8911	struct mgmt_pending_cmd *cmd = data;
8912	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8913	struct mgmt_rp_add_advertising rp;
8914
8915	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8916
8917	memset(&rp, 0, sizeof(rp));
8918
8919	rp.instance = cp->instance;
8920
8921	if (err)
8922		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8923				mgmt_status(err));
8924	else
8925		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8926				  mgmt_status(err), &rp, sizeof(rp));
8927
8928	mgmt_pending_free(cmd);
8929}
8930
8931static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8932{
8933	struct mgmt_pending_cmd *cmd = data;
8934	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8935	int err;
8936
8937	if (ext_adv_capable(hdev)) {
8938		err = hci_update_adv_data_sync(hdev, cp->instance);
8939		if (err)
8940			return err;
8941
8942		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8943		if (err)
8944			return err;
8945
8946		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8947	}
8948
8949	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8950}
8951
8952static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8953			    u16 data_len)
8954{
8955	struct mgmt_cp_add_ext_adv_data *cp = data;
8956	struct mgmt_rp_add_ext_adv_data rp;
8957	u8 schedule_instance = 0;
8958	struct adv_info *next_instance;
8959	struct adv_info *adv_instance;
8960	int err = 0;
8961	struct mgmt_pending_cmd *cmd;
8962
8963	BT_DBG("%s", hdev->name);
8964
8965	hci_dev_lock(hdev);
8966
8967	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8968
8969	if (!adv_instance) {
8970		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8971				      MGMT_STATUS_INVALID_PARAMS);
8972		goto unlock;
8973	}
8974
8975	/* In new interface, we require that we are powered to register */
8976	if (!hdev_is_powered(hdev)) {
8977		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8978				      MGMT_STATUS_REJECTED);
8979		goto clear_new_instance;
8980	}
8981
8982	if (adv_busy(hdev)) {
8983		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8984				      MGMT_STATUS_BUSY);
8985		goto clear_new_instance;
8986	}
8987
8988	/* Validate new data */
8989	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8990			       cp->adv_data_len, true) ||
8991	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8992			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8993		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8994				      MGMT_STATUS_INVALID_PARAMS);
8995		goto clear_new_instance;
8996	}
8997
8998	/* Set the data in the advertising instance */
8999	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9000				  cp->data, cp->scan_rsp_len,
9001				  cp->data + cp->adv_data_len);
9002
9003	/* If using software rotation, determine next instance to use */
9004	if (hdev->cur_adv_instance == cp->instance) {
9005		/* If the currently advertised instance is being changed
9006		 * then cancel the current advertising and schedule the
9007		 * next instance. If there is only one instance then the
9008		 * overridden advertising data will be visible right
9009		 * away
9010		 */
9011		cancel_adv_timeout(hdev);
9012
9013		next_instance = hci_get_next_instance(hdev, cp->instance);
9014		if (next_instance)
9015			schedule_instance = next_instance->instance;
9016	} else if (!hdev->adv_instance_timeout) {
9017		/* Immediately advertise the new instance if no other
9018		 * instance is currently being advertised.
9019		 */
9020		schedule_instance = cp->instance;
9021	}
9022
9023	/* If the HCI_ADVERTISING flag is set or there is no instance to
9024	 * be advertised then we have no HCI communication to make.
9025	 * Simply return.
9026	 */
9027	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9028		if (adv_instance->pending) {
9029			mgmt_advertising_added(sk, hdev, cp->instance);
9030			adv_instance->pending = false;
9031		}
9032		rp.instance = cp->instance;
9033		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9034					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9035		goto unlock;
9036	}
9037
9038	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9039			       data_len);
9040	if (!cmd) {
9041		err = -ENOMEM;
9042		goto clear_new_instance;
9043	}
9044
9045	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9046				 add_ext_adv_data_complete);
9047	if (err < 0) {
9048		mgmt_pending_free(cmd);
9049		goto clear_new_instance;
9050	}
9051
9052	/* We were successful in updating data, so trigger advertising_added
9053	 * event if this is an instance that wasn't previously advertising. If
9054	 * a failure occurs in the requests we initiated, we will remove the
9055	 * instance again in add_advertising_complete
9056	 */
9057	if (adv_instance->pending)
9058		mgmt_advertising_added(sk, hdev, cp->instance);
9059
9060	goto unlock;
9061
9062clear_new_instance:
9063	hci_remove_adv_instance(hdev, cp->instance);
9064
9065unlock:
9066	hci_dev_unlock(hdev);
9067
9068	return err;
9069}
9070
9071static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9072					int err)
9073{
9074	struct mgmt_pending_cmd *cmd = data;
9075	struct mgmt_cp_remove_advertising *cp = cmd->param;
9076	struct mgmt_rp_remove_advertising rp;
9077
9078	bt_dev_dbg(hdev, "err %d", err);
9079
9080	memset(&rp, 0, sizeof(rp));
9081	rp.instance = cp->instance;
9082
9083	if (err)
9084		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9085				mgmt_status(err));
9086	else
9087		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9088				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9089
9090	mgmt_pending_free(cmd);
9091}
9092
9093static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9094{
9095	struct mgmt_pending_cmd *cmd = data;
9096	struct mgmt_cp_remove_advertising *cp = cmd->param;
9097	int err;
9098
9099	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9100	if (err)
9101		return err;
9102
9103	if (list_empty(&hdev->adv_instances))
9104		err = hci_disable_advertising_sync(hdev);
9105
9106	return err;
9107}
9108
9109static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9110			      void *data, u16 data_len)
9111{
9112	struct mgmt_cp_remove_advertising *cp = data;
9113	struct mgmt_pending_cmd *cmd;
9114	int err;
9115
9116	bt_dev_dbg(hdev, "sock %p", sk);
9117
9118	hci_dev_lock(hdev);
9119
9120	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9121		err = mgmt_cmd_status(sk, hdev->id,
9122				      MGMT_OP_REMOVE_ADVERTISING,
9123				      MGMT_STATUS_INVALID_PARAMS);
9124		goto unlock;
9125	}
9126
9127	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9128		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9129				      MGMT_STATUS_BUSY);
9130		goto unlock;
9131	}
9132
9133	if (list_empty(&hdev->adv_instances)) {
9134		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9135				      MGMT_STATUS_INVALID_PARAMS);
9136		goto unlock;
9137	}
9138
9139	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9140			       data_len);
9141	if (!cmd) {
9142		err = -ENOMEM;
9143		goto unlock;
9144	}
9145
9146	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9147				 remove_advertising_complete);
9148	if (err < 0)
9149		mgmt_pending_free(cmd);
9150
9151unlock:
9152	hci_dev_unlock(hdev);
9153
9154	return err;
9155}
9156
9157static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9158			     void *data, u16 data_len)
9159{
9160	struct mgmt_cp_get_adv_size_info *cp = data;
9161	struct mgmt_rp_get_adv_size_info rp;
9162	u32 flags, supported_flags;
9163
9164	bt_dev_dbg(hdev, "sock %p", sk);
9165
9166	if (!lmp_le_capable(hdev))
9167		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9168				       MGMT_STATUS_REJECTED);
9169
9170	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9171		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9172				       MGMT_STATUS_INVALID_PARAMS);
9173
9174	flags = __le32_to_cpu(cp->flags);
9175
9176	/* The current implementation only supports a subset of the specified
9177	 * flags.
9178	 */
9179	supported_flags = get_supported_adv_flags(hdev);
9180	if (flags & ~supported_flags)
9181		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9182				       MGMT_STATUS_INVALID_PARAMS);
9183
9184	rp.instance = cp->instance;
9185	rp.flags = cp->flags;
9186	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9187	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9188
9189	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9190				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9191}
9192
9193static const struct hci_mgmt_handler mgmt_handlers[] = {
9194	{ NULL }, /* 0x0000 (no command) */
9195	{ read_version,            MGMT_READ_VERSION_SIZE,
9196						HCI_MGMT_NO_HDEV |
9197						HCI_MGMT_UNTRUSTED },
9198	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9199						HCI_MGMT_NO_HDEV |
9200						HCI_MGMT_UNTRUSTED },
9201	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9202						HCI_MGMT_NO_HDEV |
9203						HCI_MGMT_UNTRUSTED },
9204	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9205						HCI_MGMT_UNTRUSTED },
9206	{ set_powered,             MGMT_SETTING_SIZE },
9207	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9208	{ set_connectable,         MGMT_SETTING_SIZE },
9209	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9210	{ set_bondable,            MGMT_SETTING_SIZE },
9211	{ set_link_security,       MGMT_SETTING_SIZE },
9212	{ set_ssp,                 MGMT_SETTING_SIZE },
9213	{ set_hs,                  MGMT_SETTING_SIZE },
9214	{ set_le,                  MGMT_SETTING_SIZE },
9215	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9216	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9217	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9218	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9219	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9220						HCI_MGMT_VAR_LEN },
9221	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9222						HCI_MGMT_VAR_LEN },
9223	{ disconnect,              MGMT_DISCONNECT_SIZE },
9224	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9225	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9226	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9227	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9228	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9229	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9230	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9231	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9232	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9233	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9234	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9235	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9236	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9237						HCI_MGMT_VAR_LEN },
9238	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9239	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9240	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9241	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9242	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9243	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9244	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9245	{ set_advertising,         MGMT_SETTING_SIZE },
9246	{ set_bredr,               MGMT_SETTING_SIZE },
9247	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9248	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9249	{ set_secure_conn,         MGMT_SETTING_SIZE },
9250	{ set_debug_keys,          MGMT_SETTING_SIZE },
9251	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9252	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9253						HCI_MGMT_VAR_LEN },
9254	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9255	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9256	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9257	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9258	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9259						HCI_MGMT_VAR_LEN },
9260	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9261						HCI_MGMT_NO_HDEV |
9262						HCI_MGMT_UNTRUSTED },
9263	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9264						HCI_MGMT_UNCONFIGURED |
9265						HCI_MGMT_UNTRUSTED },
9266	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9267						HCI_MGMT_UNCONFIGURED },
9268	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9269						HCI_MGMT_UNCONFIGURED },
9270	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9271						HCI_MGMT_VAR_LEN },
9272	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9273	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9274						HCI_MGMT_NO_HDEV |
9275						HCI_MGMT_UNTRUSTED },
9276	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9277	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9278						HCI_MGMT_VAR_LEN },
9279	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9280	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9281	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9282	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9283						HCI_MGMT_UNTRUSTED },
9284	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9285	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9286	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9287	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9288						HCI_MGMT_VAR_LEN },
9289	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9290	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9291						HCI_MGMT_UNTRUSTED },
9292	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9293						HCI_MGMT_UNTRUSTED |
9294						HCI_MGMT_HDEV_OPTIONAL },
9295	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9296						HCI_MGMT_VAR_LEN |
9297						HCI_MGMT_HDEV_OPTIONAL },
9298	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9299						HCI_MGMT_UNTRUSTED },
9300	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9301						HCI_MGMT_VAR_LEN },
9302	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9303						HCI_MGMT_UNTRUSTED },
9304	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9305						HCI_MGMT_VAR_LEN },
9306	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9307	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9308	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9309	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9310						HCI_MGMT_VAR_LEN },
9311	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9312	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9313						HCI_MGMT_VAR_LEN },
9314	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9315						HCI_MGMT_VAR_LEN },
9316	{ add_adv_patterns_monitor_rssi,
9317				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9318						HCI_MGMT_VAR_LEN },
9319	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9320						HCI_MGMT_VAR_LEN },
9321	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9322	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9323						HCI_MGMT_VAR_LEN },
9324	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9325};
9326
9327void mgmt_index_added(struct hci_dev *hdev)
9328{
9329	struct mgmt_ev_ext_index ev;
9330
9331	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9332		return;
9333
9334	switch (hdev->dev_type) {
9335	case HCI_PRIMARY:
9336		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9337			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9338					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9339			ev.type = 0x01;
9340		} else {
9341			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9342					 HCI_MGMT_INDEX_EVENTS);
9343			ev.type = 0x00;
9344		}
9345		break;
9346	case HCI_AMP:
9347		ev.type = 0x02;
9348		break;
9349	default:
9350		return;
9351	}
9352
9353	ev.bus = hdev->bus;
9354
9355	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9356			 HCI_MGMT_EXT_INDEX_EVENTS);
9357}
9358
9359void mgmt_index_removed(struct hci_dev *hdev)
9360{
9361	struct mgmt_ev_ext_index ev;
9362	u8 status = MGMT_STATUS_INVALID_INDEX;
9363
9364	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9365		return;
9366
9367	switch (hdev->dev_type) {
9368	case HCI_PRIMARY:
9369		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9370
9371		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9372			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9373					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9374			ev.type = 0x01;
9375		} else {
9376			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9377					 HCI_MGMT_INDEX_EVENTS);
9378			ev.type = 0x00;
9379		}
9380		break;
9381	case HCI_AMP:
9382		ev.type = 0x02;
9383		break;
9384	default:
9385		return;
9386	}
9387
9388	ev.bus = hdev->bus;
9389
9390	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9391			 HCI_MGMT_EXT_INDEX_EVENTS);
9392
9393	/* Cancel any remaining timed work */
9394	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9395		return;
9396	cancel_delayed_work_sync(&hdev->discov_off);
9397	cancel_delayed_work_sync(&hdev->service_cache);
9398	cancel_delayed_work_sync(&hdev->rpa_expired);
9399}
9400
9401void mgmt_power_on(struct hci_dev *hdev, int err)
9402{
9403	struct cmd_lookup match = { NULL, hdev };
9404
9405	bt_dev_dbg(hdev, "err %d", err);
9406
9407	hci_dev_lock(hdev);
9408
9409	if (!err) {
9410		restart_le_actions(hdev);
9411		hci_update_passive_scan(hdev);
9412	}
9413
9414	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9415
9416	new_settings(hdev, match.sk);
9417
9418	if (match.sk)
9419		sock_put(match.sk);
9420
9421	hci_dev_unlock(hdev);
9422}
9423
9424void __mgmt_power_off(struct hci_dev *hdev)
9425{
9426	struct cmd_lookup match = { NULL, hdev };
9427	u8 status, zero_cod[] = { 0, 0, 0 };
9428
9429	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9430
9431	/* If the power off is because of hdev unregistration let
9432	 * use the appropriate INVALID_INDEX status. Otherwise use
9433	 * NOT_POWERED. We cover both scenarios here since later in
9434	 * mgmt_index_removed() any hci_conn callbacks will have already
9435	 * been triggered, potentially causing misleading DISCONNECTED
9436	 * status responses.
9437	 */
9438	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9439		status = MGMT_STATUS_INVALID_INDEX;
9440	else
9441		status = MGMT_STATUS_NOT_POWERED;
9442
9443	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9444
9445	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9446		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9447				   zero_cod, sizeof(zero_cod),
9448				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9449		ext_info_changed(hdev, NULL);
9450	}
9451
9452	new_settings(hdev, match.sk);
9453
9454	if (match.sk)
9455		sock_put(match.sk);
9456}
9457
9458void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9459{
9460	struct mgmt_pending_cmd *cmd;
9461	u8 status;
9462
9463	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9464	if (!cmd)
9465		return;
9466
9467	if (err == -ERFKILL)
9468		status = MGMT_STATUS_RFKILLED;
9469	else
9470		status = MGMT_STATUS_FAILED;
9471
9472	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9473
9474	mgmt_pending_remove(cmd);
9475}
9476
9477void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9478		       bool persistent)
9479{
9480	struct mgmt_ev_new_link_key ev;
9481
9482	memset(&ev, 0, sizeof(ev));
9483
9484	ev.store_hint = persistent;
9485	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9486	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9487	ev.key.type = key->type;
9488	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9489	ev.key.pin_len = key->pin_len;
9490
9491	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9492}
9493
9494static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9495{
9496	switch (ltk->type) {
9497	case SMP_LTK:
9498	case SMP_LTK_RESPONDER:
9499		if (ltk->authenticated)
9500			return MGMT_LTK_AUTHENTICATED;
9501		return MGMT_LTK_UNAUTHENTICATED;
9502	case SMP_LTK_P256:
9503		if (ltk->authenticated)
9504			return MGMT_LTK_P256_AUTH;
9505		return MGMT_LTK_P256_UNAUTH;
9506	case SMP_LTK_P256_DEBUG:
9507		return MGMT_LTK_P256_DEBUG;
9508	}
9509
9510	return MGMT_LTK_UNAUTHENTICATED;
9511}
9512
9513void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9514{
9515	struct mgmt_ev_new_long_term_key ev;
9516
9517	memset(&ev, 0, sizeof(ev));
9518
9519	/* Devices using resolvable or non-resolvable random addresses
9520	 * without providing an identity resolving key don't require
9521	 * to store long term keys. Their addresses will change the
9522	 * next time around.
9523	 *
9524	 * Only when a remote device provides an identity address
9525	 * make sure the long term key is stored. If the remote
9526	 * identity is known, the long term keys are internally
9527	 * mapped to the identity address. So allow static random
9528	 * and public addresses here.
9529	 */
9530	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9531	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9532		ev.store_hint = 0x00;
9533	else
9534		ev.store_hint = persistent;
9535
9536	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9537	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9538	ev.key.type = mgmt_ltk_type(key);
9539	ev.key.enc_size = key->enc_size;
9540	ev.key.ediv = key->ediv;
9541	ev.key.rand = key->rand;
9542
9543	if (key->type == SMP_LTK)
9544		ev.key.initiator = 1;
9545
9546	/* Make sure we copy only the significant bytes based on the
9547	 * encryption key size, and set the rest of the value to zeroes.
9548	 */
9549	memcpy(ev.key.val, key->val, key->enc_size);
9550	memset(ev.key.val + key->enc_size, 0,
9551	       sizeof(ev.key.val) - key->enc_size);
9552
9553	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9554}
9555
9556void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9557{
9558	struct mgmt_ev_new_irk ev;
9559
9560	memset(&ev, 0, sizeof(ev));
9561
9562	ev.store_hint = persistent;
9563
9564	bacpy(&ev.rpa, &irk->rpa);
9565	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9566	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9567	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9568
9569	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9570}
9571
9572void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9573		   bool persistent)
9574{
9575	struct mgmt_ev_new_csrk ev;
9576
9577	memset(&ev, 0, sizeof(ev));
9578
9579	/* Devices using resolvable or non-resolvable random addresses
9580	 * without providing an identity resolving key don't require
9581	 * to store signature resolving keys. Their addresses will change
9582	 * the next time around.
9583	 *
9584	 * Only when a remote device provides an identity address
9585	 * make sure the signature resolving key is stored. So allow
9586	 * static random and public addresses here.
9587	 */
9588	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9589	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9590		ev.store_hint = 0x00;
9591	else
9592		ev.store_hint = persistent;
9593
9594	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9595	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9596	ev.key.type = csrk->type;
9597	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9598
9599	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9600}
9601
9602void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9603			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9604			 u16 max_interval, u16 latency, u16 timeout)
9605{
9606	struct mgmt_ev_new_conn_param ev;
9607
9608	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9609		return;
9610
9611	memset(&ev, 0, sizeof(ev));
9612	bacpy(&ev.addr.bdaddr, bdaddr);
9613	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9614	ev.store_hint = store_hint;
9615	ev.min_interval = cpu_to_le16(min_interval);
9616	ev.max_interval = cpu_to_le16(max_interval);
9617	ev.latency = cpu_to_le16(latency);
9618	ev.timeout = cpu_to_le16(timeout);
9619
9620	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9621}
9622
9623void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9624			   u8 *name, u8 name_len)
9625{
9626	struct sk_buff *skb;
9627	struct mgmt_ev_device_connected *ev;
9628	u16 eir_len = 0;
9629	u32 flags = 0;
9630
9631	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9632		return;
9633
9634	/* allocate buff for LE or BR/EDR adv */
9635	if (conn->le_adv_data_len > 0)
9636		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9637				     sizeof(*ev) + conn->le_adv_data_len);
9638	else
9639		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9640				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9641				     eir_precalc_len(sizeof(conn->dev_class)));
9642
9643	ev = skb_put(skb, sizeof(*ev));
9644	bacpy(&ev->addr.bdaddr, &conn->dst);
9645	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9646
9647	if (conn->out)
9648		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9649
9650	ev->flags = __cpu_to_le32(flags);
9651
9652	/* We must ensure that the EIR Data fields are ordered and
9653	 * unique. Keep it simple for now and avoid the problem by not
9654	 * adding any BR/EDR data to the LE adv.
9655	 */
9656	if (conn->le_adv_data_len > 0) {
9657		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9658		eir_len = conn->le_adv_data_len;
9659	} else {
9660		if (name)
9661			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9662
9663		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9664			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9665						    conn->dev_class, sizeof(conn->dev_class));
9666	}
9667
9668	ev->eir_len = cpu_to_le16(eir_len);
9669
9670	mgmt_event_skb(skb, NULL);
9671}
9672
9673static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9674{
9675	struct sock **sk = data;
9676
9677	cmd->cmd_complete(cmd, 0);
9678
9679	*sk = cmd->sk;
9680	sock_hold(*sk);
9681
9682	mgmt_pending_remove(cmd);
9683}
9684
9685static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9686{
9687	struct hci_dev *hdev = data;
9688	struct mgmt_cp_unpair_device *cp = cmd->param;
9689
9690	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9691
9692	cmd->cmd_complete(cmd, 0);
9693	mgmt_pending_remove(cmd);
9694}
9695
9696bool mgmt_powering_down(struct hci_dev *hdev)
9697{
9698	struct mgmt_pending_cmd *cmd;
9699	struct mgmt_mode *cp;
9700
9701	if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9702		return true;
9703
9704	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9705	if (!cmd)
9706		return false;
9707
9708	cp = cmd->param;
9709	if (!cp->val)
9710		return true;
9711
9712	return false;
9713}
9714
9715void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9716			      u8 link_type, u8 addr_type, u8 reason,
9717			      bool mgmt_connected)
9718{
9719	struct mgmt_ev_device_disconnected ev;
9720	struct sock *sk = NULL;
9721
9722	if (!mgmt_connected)
9723		return;
9724
9725	if (link_type != ACL_LINK && link_type != LE_LINK)
9726		return;
9727
9728	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9729
9730	bacpy(&ev.addr.bdaddr, bdaddr);
9731	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9732	ev.reason = reason;
9733
9734	/* Report disconnects due to suspend */
9735	if (hdev->suspended)
9736		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9737
9738	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9739
9740	if (sk)
9741		sock_put(sk);
9742
9743	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9744			     hdev);
9745}
9746
9747void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9748			    u8 link_type, u8 addr_type, u8 status)
9749{
9750	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9751	struct mgmt_cp_disconnect *cp;
9752	struct mgmt_pending_cmd *cmd;
9753
9754	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9755			     hdev);
9756
9757	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9758	if (!cmd)
9759		return;
9760
9761	cp = cmd->param;
9762
9763	if (bacmp(bdaddr, &cp->addr.bdaddr))
9764		return;
9765
9766	if (cp->addr.type != bdaddr_type)
9767		return;
9768
9769	cmd->cmd_complete(cmd, mgmt_status(status));
9770	mgmt_pending_remove(cmd);
9771}
9772
9773void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9774			 u8 addr_type, u8 status)
9775{
9776	struct mgmt_ev_connect_failed ev;
9777
9778	bacpy(&ev.addr.bdaddr, bdaddr);
9779	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9780	ev.status = mgmt_status(status);
9781
9782	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9783}
9784
9785void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9786{
9787	struct mgmt_ev_pin_code_request ev;
9788
9789	bacpy(&ev.addr.bdaddr, bdaddr);
9790	ev.addr.type = BDADDR_BREDR;
9791	ev.secure = secure;
9792
9793	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9794}
9795
9796void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9797				  u8 status)
9798{
9799	struct mgmt_pending_cmd *cmd;
9800
9801	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9802	if (!cmd)
9803		return;
9804
9805	cmd->cmd_complete(cmd, mgmt_status(status));
9806	mgmt_pending_remove(cmd);
9807}
9808
9809void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9810				      u8 status)
9811{
9812	struct mgmt_pending_cmd *cmd;
9813
9814	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9815	if (!cmd)
9816		return;
9817
9818	cmd->cmd_complete(cmd, mgmt_status(status));
9819	mgmt_pending_remove(cmd);
9820}
9821
9822int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9823			      u8 link_type, u8 addr_type, u32 value,
9824			      u8 confirm_hint)
9825{
9826	struct mgmt_ev_user_confirm_request ev;
9827
9828	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9829
9830	bacpy(&ev.addr.bdaddr, bdaddr);
9831	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9832	ev.confirm_hint = confirm_hint;
9833	ev.value = cpu_to_le32(value);
9834
9835	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9836			  NULL);
9837}
9838
9839int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9840			      u8 link_type, u8 addr_type)
9841{
9842	struct mgmt_ev_user_passkey_request ev;
9843
9844	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9845
9846	bacpy(&ev.addr.bdaddr, bdaddr);
9847	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9848
9849	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9850			  NULL);
9851}
9852
9853static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854				      u8 link_type, u8 addr_type, u8 status,
9855				      u8 opcode)
9856{
9857	struct mgmt_pending_cmd *cmd;
9858
9859	cmd = pending_find(opcode, hdev);
9860	if (!cmd)
9861		return -ENOENT;
9862
9863	cmd->cmd_complete(cmd, mgmt_status(status));
9864	mgmt_pending_remove(cmd);
9865
9866	return 0;
9867}
9868
9869int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9870				     u8 link_type, u8 addr_type, u8 status)
9871{
9872	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9873					  status, MGMT_OP_USER_CONFIRM_REPLY);
9874}
9875
9876int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9877					 u8 link_type, u8 addr_type, u8 status)
9878{
9879	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9880					  status,
9881					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9882}
9883
9884int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9885				     u8 link_type, u8 addr_type, u8 status)
9886{
9887	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9888					  status, MGMT_OP_USER_PASSKEY_REPLY);
9889}
9890
9891int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9892					 u8 link_type, u8 addr_type, u8 status)
9893{
9894	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9895					  status,
9896					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9897}
9898
9899int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9900			     u8 link_type, u8 addr_type, u32 passkey,
9901			     u8 entered)
9902{
9903	struct mgmt_ev_passkey_notify ev;
9904
9905	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9906
9907	bacpy(&ev.addr.bdaddr, bdaddr);
9908	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9909	ev.passkey = __cpu_to_le32(passkey);
9910	ev.entered = entered;
9911
9912	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9913}
9914
9915void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9916{
9917	struct mgmt_ev_auth_failed ev;
9918	struct mgmt_pending_cmd *cmd;
9919	u8 status = mgmt_status(hci_status);
9920
9921	bacpy(&ev.addr.bdaddr, &conn->dst);
9922	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9923	ev.status = status;
9924
9925	cmd = find_pairing(conn);
9926
9927	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9928		    cmd ? cmd->sk : NULL);
9929
9930	if (cmd) {
9931		cmd->cmd_complete(cmd, status);
9932		mgmt_pending_remove(cmd);
9933	}
9934}
9935
9936void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9937{
9938	struct cmd_lookup match = { NULL, hdev };
9939	bool changed;
9940
9941	if (status) {
9942		u8 mgmt_err = mgmt_status(status);
9943		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9944				     cmd_status_rsp, &mgmt_err);
9945		return;
9946	}
9947
9948	if (test_bit(HCI_AUTH, &hdev->flags))
9949		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9950	else
9951		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9952
9953	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9954			     &match);
9955
9956	if (changed)
9957		new_settings(hdev, match.sk);
9958
9959	if (match.sk)
9960		sock_put(match.sk);
9961}
9962
9963static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9964{
9965	struct cmd_lookup *match = data;
9966
9967	if (match->sk == NULL) {
9968		match->sk = cmd->sk;
9969		sock_hold(match->sk);
9970	}
9971}
9972
9973void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9974				    u8 status)
9975{
9976	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9977
9978	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9979	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9980	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9981
9982	if (!status) {
9983		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9984				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9985		ext_info_changed(hdev, NULL);
9986	}
9987
9988	if (match.sk)
9989		sock_put(match.sk);
9990}
9991
9992void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9993{
9994	struct mgmt_cp_set_local_name ev;
9995	struct mgmt_pending_cmd *cmd;
9996
9997	if (status)
9998		return;
9999
10000	memset(&ev, 0, sizeof(ev));
10001	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10002	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10003
10004	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10005	if (!cmd) {
10006		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10007
10008		/* If this is a HCI command related to powering on the
10009		 * HCI dev don't send any mgmt signals.
10010		 */
10011		if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10012			return;
10013
10014		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10015			return;
10016	}
10017
10018	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10019			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10020	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10021}
10022
10023static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10024{
10025	int i;
10026
10027	for (i = 0; i < uuid_count; i++) {
10028		if (!memcmp(uuid, uuids[i], 16))
10029			return true;
10030	}
10031
10032	return false;
10033}
10034
10035static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10036{
10037	u16 parsed = 0;
10038
10039	while (parsed < eir_len) {
10040		u8 field_len = eir[0];
10041		u8 uuid[16];
10042		int i;
10043
10044		if (field_len == 0)
10045			break;
10046
10047		if (eir_len - parsed < field_len + 1)
10048			break;
10049
10050		switch (eir[1]) {
10051		case EIR_UUID16_ALL:
10052		case EIR_UUID16_SOME:
10053			for (i = 0; i + 3 <= field_len; i += 2) {
10054				memcpy(uuid, bluetooth_base_uuid, 16);
10055				uuid[13] = eir[i + 3];
10056				uuid[12] = eir[i + 2];
10057				if (has_uuid(uuid, uuid_count, uuids))
10058					return true;
10059			}
10060			break;
10061		case EIR_UUID32_ALL:
10062		case EIR_UUID32_SOME:
10063			for (i = 0; i + 5 <= field_len; i += 4) {
10064				memcpy(uuid, bluetooth_base_uuid, 16);
10065				uuid[15] = eir[i + 5];
10066				uuid[14] = eir[i + 4];
10067				uuid[13] = eir[i + 3];
10068				uuid[12] = eir[i + 2];
10069				if (has_uuid(uuid, uuid_count, uuids))
10070					return true;
10071			}
10072			break;
10073		case EIR_UUID128_ALL:
10074		case EIR_UUID128_SOME:
10075			for (i = 0; i + 17 <= field_len; i += 16) {
10076				memcpy(uuid, eir + i + 2, 16);
10077				if (has_uuid(uuid, uuid_count, uuids))
10078					return true;
10079			}
10080			break;
10081		}
10082
10083		parsed += field_len + 1;
10084		eir += field_len + 1;
10085	}
10086
10087	return false;
10088}
10089
10090static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10091			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10092{
10093	/* If a RSSI threshold has been specified, and
10094	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10095	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10096	 * is set, let it through for further processing, as we might need to
10097	 * restart the scan.
10098	 *
10099	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10100	 * the results are also dropped.
10101	 */
10102	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10103	    (rssi == HCI_RSSI_INVALID ||
10104	    (rssi < hdev->discovery.rssi &&
10105	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10106		return  false;
10107
10108	if (hdev->discovery.uuid_count != 0) {
10109		/* If a list of UUIDs is provided in filter, results with no
10110		 * matching UUID should be dropped.
10111		 */
10112		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10113				   hdev->discovery.uuids) &&
10114		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10115				   hdev->discovery.uuid_count,
10116				   hdev->discovery.uuids))
10117			return false;
10118	}
10119
10120	/* If duplicate filtering does not report RSSI changes, then restart
10121	 * scanning to ensure updated result with updated RSSI values.
10122	 */
10123	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10124		/* Validate RSSI value against the RSSI threshold once more. */
10125		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10126		    rssi < hdev->discovery.rssi)
10127			return false;
10128	}
10129
10130	return true;
10131}
10132
10133void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10134				  bdaddr_t *bdaddr, u8 addr_type)
10135{
10136	struct mgmt_ev_adv_monitor_device_lost ev;
10137
10138	ev.monitor_handle = cpu_to_le16(handle);
10139	bacpy(&ev.addr.bdaddr, bdaddr);
10140	ev.addr.type = addr_type;
10141
10142	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10143		   NULL);
10144}
10145
10146static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10147					       struct sk_buff *skb,
10148					       struct sock *skip_sk,
10149					       u16 handle)
10150{
10151	struct sk_buff *advmon_skb;
10152	size_t advmon_skb_len;
10153	__le16 *monitor_handle;
10154
10155	if (!skb)
10156		return;
10157
10158	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10159			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10160	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10161				    advmon_skb_len);
10162	if (!advmon_skb)
10163		return;
10164
10165	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10166	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10167	 * store monitor_handle of the matched monitor.
10168	 */
10169	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10170	*monitor_handle = cpu_to_le16(handle);
10171	skb_put_data(advmon_skb, skb->data, skb->len);
10172
10173	mgmt_event_skb(advmon_skb, skip_sk);
10174}
10175
10176static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10177					  bdaddr_t *bdaddr, bool report_device,
10178					  struct sk_buff *skb,
10179					  struct sock *skip_sk)
10180{
10181	struct monitored_device *dev, *tmp;
10182	bool matched = false;
10183	bool notified = false;
10184
10185	/* We have received the Advertisement Report because:
10186	 * 1. the kernel has initiated active discovery
10187	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10188	 *    passive scanning
10189	 * 3. if none of the above is true, we have one or more active
10190	 *    Advertisement Monitor
10191	 *
10192	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10193	 * and report ONLY one advertisement per device for the matched Monitor
10194	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10195	 *
10196	 * For case 3, since we are not active scanning and all advertisements
10197	 * received are due to a matched Advertisement Monitor, report all
10198	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10199	 */
10200	if (report_device && !hdev->advmon_pend_notify) {
10201		mgmt_event_skb(skb, skip_sk);
10202		return;
10203	}
10204
10205	hdev->advmon_pend_notify = false;
10206
10207	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10208		if (!bacmp(&dev->bdaddr, bdaddr)) {
10209			matched = true;
10210
10211			if (!dev->notified) {
10212				mgmt_send_adv_monitor_device_found(hdev, skb,
10213								   skip_sk,
10214								   dev->handle);
10215				notified = true;
10216				dev->notified = true;
10217			}
10218		}
10219
10220		if (!dev->notified)
10221			hdev->advmon_pend_notify = true;
10222	}
10223
10224	if (!report_device &&
10225	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10226		/* Handle 0 indicates that we are not active scanning and this
10227		 * is a subsequent advertisement report for an already matched
10228		 * Advertisement Monitor or the controller offloading support
10229		 * is not available.
10230		 */
10231		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10232	}
10233
10234	if (report_device)
10235		mgmt_event_skb(skb, skip_sk);
10236	else
10237		kfree_skb(skb);
10238}
10239
10240static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10241			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10242			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10243			      u64 instant)
10244{
10245	struct sk_buff *skb;
10246	struct mgmt_ev_mesh_device_found *ev;
10247	int i, j;
10248
10249	if (!hdev->mesh_ad_types[0])
10250		goto accepted;
10251
10252	/* Scan for requested AD types */
10253	if (eir_len > 0) {
10254		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10255			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10256				if (!hdev->mesh_ad_types[j])
10257					break;
10258
10259				if (hdev->mesh_ad_types[j] == eir[i + 1])
10260					goto accepted;
10261			}
10262		}
10263	}
10264
10265	if (scan_rsp_len > 0) {
10266		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10267			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10268				if (!hdev->mesh_ad_types[j])
10269					break;
10270
10271				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10272					goto accepted;
10273			}
10274		}
10275	}
10276
10277	return;
10278
10279accepted:
10280	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10281			     sizeof(*ev) + eir_len + scan_rsp_len);
10282	if (!skb)
10283		return;
10284
10285	ev = skb_put(skb, sizeof(*ev));
10286
10287	bacpy(&ev->addr.bdaddr, bdaddr);
10288	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10289	ev->rssi = rssi;
10290	ev->flags = cpu_to_le32(flags);
10291	ev->instant = cpu_to_le64(instant);
10292
10293	if (eir_len > 0)
10294		/* Copy EIR or advertising data into event */
10295		skb_put_data(skb, eir, eir_len);
10296
10297	if (scan_rsp_len > 0)
10298		/* Append scan response data to event */
10299		skb_put_data(skb, scan_rsp, scan_rsp_len);
10300
10301	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10302
10303	mgmt_event_skb(skb, NULL);
10304}
10305
10306void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10307		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10308		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10309		       u64 instant)
10310{
10311	struct sk_buff *skb;
10312	struct mgmt_ev_device_found *ev;
10313	bool report_device = hci_discovery_active(hdev);
10314
10315	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10316		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10317				  eir, eir_len, scan_rsp, scan_rsp_len,
10318				  instant);
10319
10320	/* Don't send events for a non-kernel initiated discovery. With
10321	 * LE one exception is if we have pend_le_reports > 0 in which
10322	 * case we're doing passive scanning and want these events.
10323	 */
10324	if (!hci_discovery_active(hdev)) {
10325		if (link_type == ACL_LINK)
10326			return;
10327		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10328			report_device = true;
10329		else if (!hci_is_adv_monitoring(hdev))
10330			return;
10331	}
10332
10333	if (hdev->discovery.result_filtering) {
10334		/* We are using service discovery */
10335		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10336				     scan_rsp_len))
10337			return;
10338	}
10339
10340	if (hdev->discovery.limited) {
10341		/* Check for limited discoverable bit */
10342		if (dev_class) {
10343			if (!(dev_class[1] & 0x20))
10344				return;
10345		} else {
10346			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10347			if (!flags || !(flags[0] & LE_AD_LIMITED))
10348				return;
10349		}
10350	}
10351
10352	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10353	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10354			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10355	if (!skb)
10356		return;
10357
10358	ev = skb_put(skb, sizeof(*ev));
10359
10360	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10361	 * RSSI value was reported as 0 when not available. This behavior
10362	 * is kept when using device discovery. This is required for full
10363	 * backwards compatibility with the API.
10364	 *
10365	 * However when using service discovery, the value 127 will be
10366	 * returned when the RSSI is not available.
10367	 */
10368	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10369	    link_type == ACL_LINK)
10370		rssi = 0;
10371
10372	bacpy(&ev->addr.bdaddr, bdaddr);
10373	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10374	ev->rssi = rssi;
10375	ev->flags = cpu_to_le32(flags);
10376
10377	if (eir_len > 0)
10378		/* Copy EIR or advertising data into event */
10379		skb_put_data(skb, eir, eir_len);
10380
10381	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10382		u8 eir_cod[5];
10383
10384		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10385					   dev_class, 3);
10386		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10387	}
10388
10389	if (scan_rsp_len > 0)
10390		/* Append scan response data to event */
10391		skb_put_data(skb, scan_rsp, scan_rsp_len);
10392
10393	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10394
10395	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10396}
10397
10398void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10399		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10400{
10401	struct sk_buff *skb;
10402	struct mgmt_ev_device_found *ev;
10403	u16 eir_len = 0;
10404	u32 flags = 0;
10405
10406	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10407			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10408
10409	ev = skb_put(skb, sizeof(*ev));
10410	bacpy(&ev->addr.bdaddr, bdaddr);
10411	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10412	ev->rssi = rssi;
10413
10414	if (name)
10415		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10416	else
10417		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10418
10419	ev->eir_len = cpu_to_le16(eir_len);
10420	ev->flags = cpu_to_le32(flags);
10421
10422	mgmt_event_skb(skb, NULL);
10423}
10424
10425void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10426{
10427	struct mgmt_ev_discovering ev;
10428
10429	bt_dev_dbg(hdev, "discovering %u", discovering);
10430
10431	memset(&ev, 0, sizeof(ev));
10432	ev.type = hdev->discovery.type;
10433	ev.discovering = discovering;
10434
10435	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10436}
10437
10438void mgmt_suspending(struct hci_dev *hdev, u8 state)
10439{
10440	struct mgmt_ev_controller_suspend ev;
10441
10442	ev.suspend_state = state;
10443	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10444}
10445
10446void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10447		   u8 addr_type)
10448{
10449	struct mgmt_ev_controller_resume ev;
10450
10451	ev.wake_reason = reason;
10452	if (bdaddr) {
10453		bacpy(&ev.addr.bdaddr, bdaddr);
10454		ev.addr.type = addr_type;
10455	} else {
10456		memset(&ev.addr, 0, sizeof(ev.addr));
10457	}
10458
10459	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10460}
10461
10462static struct hci_mgmt_chan chan = {
10463	.channel	= HCI_CHANNEL_CONTROL,
10464	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10465	.handlers	= mgmt_handlers,
10466	.hdev_init	= mgmt_init_hdev,
10467};
10468
10469int mgmt_init(void)
10470{
10471	return hci_mgmt_chan_register(&chan);
10472}
10473
10474void mgmt_exit(void)
10475{
10476	hci_mgmt_chan_unregister(&chan);
10477}
10478
10479void mgmt_cleanup(struct sock *sk)
10480{
10481	struct mgmt_mesh_tx *mesh_tx;
10482	struct hci_dev *hdev;
10483
10484	read_lock(&hci_dev_list_lock);
10485
10486	list_for_each_entry(hdev, &hci_dev_list, list) {
10487		do {
10488			mesh_tx = mgmt_mesh_next(hdev, sk);
10489
10490			if (mesh_tx)
10491				mesh_send_complete(hdev, mesh_tx, true);
10492		} while (mesh_tx);
10493	}
10494
10495	read_unlock(&hci_dev_list_lock);
10496}
10497