1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "smp.h"
38#include "mgmt_util.h"
39#include "mgmt_config.h"
40#include "msft.h"
41#include "eir.h"
42#include "aosp.h"
43
44#define MGMT_VERSION 1
45#define MGMT_REVISION 22
46
47static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136};
137
138static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183};
184
185static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196};
197
198static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211};
212
213#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218/* HCI to MGMT error code conversion table */
219static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284};
285
286static u8 mgmt_errno_status(int err)
287{
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310}
311
312static u8 mgmt_status(int err)
313{
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321}
322
323static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325{
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
327 flag, NULL);
328}
329
330static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332{
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
334 flag, skip_sk);
335}
336
337static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339{
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
341 flag: HCI_SOCK_TRUSTED, skip_sk);
342}
343
344static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345{
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, flag: HCI_SOCK_TRUSTED,
347 skip_sk);
348}
349
350static u8 le_addr_type(u8 mgmt_addr_type)
351{
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356}
357
358void mgmt_fill_version_info(void *ver)
359{
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364}
365
366static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368{
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(ver: &rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, status: 0,
376 rp: &rp, rp_len: sizeof(rp));
377}
378
379static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381{
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, nr: HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(size: rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, nr: HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(val: mgmt_commands[i], p: opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(val: mgmt_events[i], p: opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(val: mgmt_untrusted_commands[i], p: opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(val: mgmt_untrusted_events[i], p: opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, status: 0,
425 rp, rp_len: rp_size);
426 kfree(objp: rp);
427
428 return err;
429}
430
431static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433{
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 count++;
449 }
450
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(size: rp_len, GFP_ATOMIC);
453 if (!rp) {
454 read_unlock(&hci_dev_list_lock);
455 return -ENOMEM;
456 }
457
458 count = 0;
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 continue;
464
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
467 */
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 continue;
470
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
475 }
476 }
477
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
480
481 read_unlock(&hci_dev_list_lock);
482
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 status: 0, rp, rp_len);
485
486 kfree(objp: rp);
487
488 return err;
489}
490
491static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
493{
494 struct mgmt_rp_read_unconf_index_list *rp;
495 struct hci_dev *d;
496 size_t rp_len;
497 u16 count;
498 int err;
499
500 bt_dev_dbg(hdev, "sock %p", sk);
501
502 read_lock(&hci_dev_list_lock);
503
504 count = 0;
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 count++;
509 }
510
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(size: rp_len, GFP_ATOMIC);
513 if (!rp) {
514 read_unlock(&hci_dev_list_lock);
515 return -ENOMEM;
516 }
517
518 count = 0;
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 continue;
524
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
527 */
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 continue;
530
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
535 }
536 }
537
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
540
541 read_unlock(&hci_dev_list_lock);
542
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, status: 0, rp, rp_len);
545
546 kfree(objp: rp);
547
548 return err;
549}
550
551static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
553{
554 struct mgmt_rp_read_ext_index_list *rp;
555 struct hci_dev *d;
556 u16 count;
557 int err;
558
559 bt_dev_dbg(hdev, "sock %p", sk);
560
561 read_lock(&hci_dev_list_lock);
562
563 count = 0;
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 count++;
567 }
568
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 if (!rp) {
571 read_unlock(&hci_dev_list_lock);
572 return -ENOMEM;
573 }
574
575 count = 0;
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 continue;
581
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
584 */
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 continue;
587
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
591 else
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
595 } else {
596 continue;
597 }
598
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
602 }
603
604 rp->num_controllers = cpu_to_le16(count);
605
606 read_unlock(&hci_dev_list_lock);
607
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
611 */
612 hci_sock_set_flag(sk, nr: HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, nr: HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, nr: HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, status: 0, rp,
618 struct_size(rp, entry, count));
619
620 kfree(objp: rp);
621
622 return err;
623}
624
625static bool is_configured(struct hci_dev *hdev)
626{
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 return false;
630
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(ba1: &hdev->public_addr, BDADDR_ANY))
634 return false;
635
636 return true;
637}
638
639static __le32 get_missing_options(struct hci_dev *hdev)
640{
641 u32 options = 0;
642
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(ba1: &hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652 return cpu_to_le32(options);
653}
654
655static int new_options(struct hci_dev *hdev, struct sock *skip)
656{
657 __le32 options = get_missing_options(hdev);
658
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, data: &options,
660 len: sizeof(options), flag: HCI_MGMT_OPTION_EVENTS, skip_sk: skip);
661}
662
663static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664{
665 __le32 options = get_missing_options(hdev);
666
667 return mgmt_cmd_complete(sk, index: hdev->id, cmd: opcode, status: 0, rp: &options,
668 rp_len: sizeof(options));
669}
670
671static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
673{
674 struct mgmt_rp_read_config_info rp;
675 u32 options = 0;
676
677 bt_dev_dbg(hdev, "sock %p", sk);
678
679 hci_dev_lock(hdev);
680
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
692
693 hci_dev_unlock(hdev);
694
695 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_CONFIG_INFO, status: 0,
696 rp: &rp, rp_len: sizeof(rp));
697}
698
699static u32 get_supported_phys(struct hci_dev *hdev)
700{
701 u32 supported_phys = 0;
702
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 }
730 }
731 }
732
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
740 }
741
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
745 }
746 }
747
748 return supported_phys;
749}
750
751static u32 get_selected_phys(struct hci_dev *hdev)
752{
753 u32 selected_phys = 0;
754
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 }
788 }
789 }
790
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
797
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
800
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
803
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
809 }
810
811 return selected_phys;
812}
813
814static u32 get_configurable_phys(struct hci_dev *hdev)
815{
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818}
819
820static u32 get_supported_settings(struct hci_dev *hdev)
821{
822 u32 settings = 0;
823
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
829
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
835
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
840 }
841
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
844
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 &hdev->quirks))
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 }
849
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
856 }
857
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 hdev->set_bdaddr)
860 settings |= MGMT_SETTING_CONFIGURATION;
861
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
864
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
867
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
869
870 return settings;
871}
872
873static u32 get_current_settings(struct hci_dev *hdev)
874{
875 u32 settings = 0;
876
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
879
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
882
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
885
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
888
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
891
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
894
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
897
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
900
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
903
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
906
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
909
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
912
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
915
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
918
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
922 *
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
926 *
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
929 * be evaluated.
930 */
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(ba1: &hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
936 }
937
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
943
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
946
947 if (bis_capable(hdev))
948 settings |= MGMT_SETTING_ISO_BROADCASTER;
949
950 if (sync_recv_capable(hdev))
951 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
952
953 return settings;
954}
955
956static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
957{
958 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959}
960
961u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
962{
963 struct mgmt_pending_cmd *cmd;
964
965 /* If there's a pending mgmt command the flags will not yet have
966 * their final values, so check for this first.
967 */
968 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
969 if (cmd) {
970 struct mgmt_mode *cp = cmd->param;
971 if (cp->val == 0x01)
972 return LE_AD_GENERAL;
973 else if (cp->val == 0x02)
974 return LE_AD_LIMITED;
975 } else {
976 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
977 return LE_AD_LIMITED;
978 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
979 return LE_AD_GENERAL;
980 }
981
982 return 0;
983}
984
985bool mgmt_get_connectable(struct hci_dev *hdev)
986{
987 struct mgmt_pending_cmd *cmd;
988
989 /* If there's a pending mgmt command the flag will not yet have
990 * it's final value, so check for this first.
991 */
992 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
993 if (cmd) {
994 struct mgmt_mode *cp = cmd->param;
995
996 return cp->val;
997 }
998
999 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000}
1001
1002static int service_cache_sync(struct hci_dev *hdev, void *data)
1003{
1004 hci_update_eir_sync(hdev);
1005 hci_update_class_sync(hdev);
1006
1007 return 0;
1008}
1009
1010static void service_cache_off(struct work_struct *work)
1011{
1012 struct hci_dev *hdev = container_of(work, struct hci_dev,
1013 service_cache.work);
1014
1015 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 return;
1017
1018 hci_cmd_sync_queue(hdev, func: service_cache_sync, NULL, NULL);
1019}
1020
1021static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1022{
1023 /* The generation of a new RPA and programming it into the
1024 * controller happens in the hci_req_enable_advertising()
1025 * function.
1026 */
1027 if (ext_adv_capable(hdev))
1028 return hci_start_ext_adv_sync(hdev, instance: hdev->cur_adv_instance);
1029 else
1030 return hci_enable_advertising_sync(hdev);
1031}
1032
1033static void rpa_expired(struct work_struct *work)
1034{
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 rpa_expired.work);
1037
1038 bt_dev_dbg(hdev, "");
1039
1040 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1041
1042 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 return;
1044
1045 hci_cmd_sync_queue(hdev, func: rpa_expired_sync, NULL, NULL);
1046}
1047
1048static void discov_off(struct work_struct *work)
1049{
1050 struct hci_dev *hdev = container_of(work, struct hci_dev,
1051 discov_off.work);
1052
1053 bt_dev_dbg(hdev, "");
1054
1055 hci_dev_lock(hdev);
1056
1057 /* When discoverable timeout triggers, then just make sure
1058 * the limited discoverable flag is cleared. Even in the case
1059 * of a timeout triggered from general discoverable, it is
1060 * safe to unconditionally clear the flag.
1061 */
1062 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1063 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1064 hdev->discov_timeout = 0;
1065
1066 hci_update_discoverable(hdev);
1067
1068 mgmt_new_settings(hdev);
1069
1070 hci_dev_unlock(hdev);
1071}
1072
1073static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1074
1075static void mesh_send_complete(struct hci_dev *hdev,
1076 struct mgmt_mesh_tx *mesh_tx, bool silent)
1077{
1078 u8 handle = mesh_tx->handle;
1079
1080 if (!silent)
1081 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, data: &handle,
1082 len: sizeof(handle), NULL);
1083
1084 mgmt_mesh_remove(mesh_tx);
1085}
1086
1087static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1088{
1089 struct mgmt_mesh_tx *mesh_tx;
1090
1091 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1092 hci_disable_advertising_sync(hdev);
1093 mesh_tx = mgmt_mesh_next(hdev, NULL);
1094
1095 if (mesh_tx)
1096 mesh_send_complete(hdev, mesh_tx, silent: false);
1097
1098 return 0;
1099}
1100
1101static int mesh_send_sync(struct hci_dev *hdev, void *data);
1102static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1103static void mesh_next(struct hci_dev *hdev, void *data, int err)
1104{
1105 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1106
1107 if (!mesh_tx)
1108 return;
1109
1110 err = hci_cmd_sync_queue(hdev, func: mesh_send_sync, data: mesh_tx,
1111 destroy: mesh_send_start_complete);
1112
1113 if (err < 0)
1114 mesh_send_complete(hdev, mesh_tx, silent: false);
1115 else
1116 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1117}
1118
1119static void mesh_send_done(struct work_struct *work)
1120{
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1122 mesh_send_done.work);
1123
1124 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1125 return;
1126
1127 hci_cmd_sync_queue(hdev, func: mesh_send_done_sync, NULL, destroy: mesh_next);
1128}
1129
1130static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1131{
1132 if (hci_dev_test_flag(hdev, HCI_MGMT))
1133 return;
1134
1135 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1136
1137 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1138 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1139 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1141
1142 /* Non-mgmt controlled devices get this bit set
1143 * implicitly so that pairing works for them, however
1144 * for mgmt we require user-space to explicitly enable
1145 * it
1146 */
1147 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1148
1149 hci_dev_set_flag(hdev, HCI_MGMT);
1150}
1151
1152static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1153 void *data, u16 data_len)
1154{
1155 struct mgmt_rp_read_info rp;
1156
1157 bt_dev_dbg(hdev, "sock %p", sk);
1158
1159 hci_dev_lock(hdev);
1160
1161 memset(&rp, 0, sizeof(rp));
1162
1163 bacpy(dst: &rp.bdaddr, src: &hdev->bdaddr);
1164
1165 rp.version = hdev->hci_ver;
1166 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1167
1168 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1169 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1170
1171 memcpy(rp.dev_class, hdev->dev_class, 3);
1172
1173 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1174 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1175
1176 hci_dev_unlock(hdev);
1177
1178 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_INFO, status: 0, rp: &rp,
1179 rp_len: sizeof(rp));
1180}
1181
1182static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1183{
1184 u16 eir_len = 0;
1185 size_t name_len;
1186
1187 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1188 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1189 data: hdev->dev_class, data_len: 3);
1190
1191 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1192 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1193 data: hdev->appearance);
1194
1195 name_len = strnlen(p: hdev->dev_name, maxlen: sizeof(hdev->dev_name));
1196 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1197 data: hdev->dev_name, data_len: name_len);
1198
1199 name_len = strnlen(p: hdev->short_name, maxlen: sizeof(hdev->short_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1201 data: hdev->short_name, data_len: name_len);
1202
1203 return eir_len;
1204}
1205
1206static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1207 void *data, u16 data_len)
1208{
1209 char buf[512];
1210 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1211 u16 eir_len;
1212
1213 bt_dev_dbg(hdev, "sock %p", sk);
1214
1215 memset(&buf, 0, sizeof(buf));
1216
1217 hci_dev_lock(hdev);
1218
1219 bacpy(dst: &rp->bdaddr, src: &hdev->bdaddr);
1220
1221 rp->version = hdev->hci_ver;
1222 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1223
1224 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1225 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1226
1227
1228 eir_len = append_eir_data_to_buf(hdev, eir: rp->eir);
1229 rp->eir_len = cpu_to_le16(eir_len);
1230
1231 hci_dev_unlock(hdev);
1232
1233 /* If this command is called at least once, then the events
1234 * for class of device and local name changes are disabled
1235 * and only the new extended controller information event
1236 * is used.
1237 */
1238 hci_sock_set_flag(sk, nr: HCI_MGMT_EXT_INFO_EVENTS);
1239 hci_sock_clear_flag(sk, nr: HCI_MGMT_DEV_CLASS_EVENTS);
1240 hci_sock_clear_flag(sk, nr: HCI_MGMT_LOCAL_NAME_EVENTS);
1241
1242 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_EXT_INFO, status: 0, rp,
1243 rp_len: sizeof(*rp) + eir_len);
1244}
1245
1246static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1247{
1248 char buf[512];
1249 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1250 u16 eir_len;
1251
1252 memset(buf, 0, sizeof(buf));
1253
1254 eir_len = append_eir_data_to_buf(hdev, eir: ev->eir);
1255 ev->eir_len = cpu_to_le16(eir_len);
1256
1257 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, data: ev,
1258 len: sizeof(*ev) + eir_len,
1259 flag: HCI_MGMT_EXT_INFO_EVENTS, skip_sk: skip);
1260}
1261
1262static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1263{
1264 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1265
1266 return mgmt_cmd_complete(sk, index: hdev->id, cmd: opcode, status: 0, rp: &settings,
1267 rp_len: sizeof(settings));
1268}
1269
1270void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1271{
1272 struct mgmt_ev_advertising_added ev;
1273
1274 ev.instance = instance;
1275
1276 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
1277}
1278
1279void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1280 u8 instance)
1281{
1282 struct mgmt_ev_advertising_removed ev;
1283
1284 ev.instance = instance;
1285
1286 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
1287}
1288
1289static void cancel_adv_timeout(struct hci_dev *hdev)
1290{
1291 if (hdev->adv_instance_timeout) {
1292 hdev->adv_instance_timeout = 0;
1293 cancel_delayed_work(dwork: &hdev->adv_instance_expire);
1294 }
1295}
1296
1297/* This function requires the caller holds hdev->lock */
1298static void restart_le_actions(struct hci_dev *hdev)
1299{
1300 struct hci_conn_params *p;
1301
1302 list_for_each_entry(p, &hdev->le_conn_params, list) {
1303 /* Needed for AUTO_OFF case where might not "really"
1304 * have been powered off.
1305 */
1306 hci_pend_le_list_del_init(param: p);
1307
1308 switch (p->auto_connect) {
1309 case HCI_AUTO_CONN_DIRECT:
1310 case HCI_AUTO_CONN_ALWAYS:
1311 hci_pend_le_list_add(param: p, list: &hdev->pend_le_conns);
1312 break;
1313 case HCI_AUTO_CONN_REPORT:
1314 hci_pend_le_list_add(param: p, list: &hdev->pend_le_reports);
1315 break;
1316 default:
1317 break;
1318 }
1319 }
1320}
1321
1322static int new_settings(struct hci_dev *hdev, struct sock *skip)
1323{
1324 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1325
1326 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, data: &ev,
1327 len: sizeof(ev), flag: HCI_MGMT_SETTING_EVENTS, skip_sk: skip);
1328}
1329
1330static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1331{
1332 struct mgmt_pending_cmd *cmd = data;
1333 struct mgmt_mode *cp;
1334
1335 /* Make sure cmd still outstanding. */
1336 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1337 return;
1338
1339 cp = cmd->param;
1340
1341 bt_dev_dbg(hdev, "err %d", err);
1342
1343 if (!err) {
1344 if (cp->val) {
1345 hci_dev_lock(hdev);
1346 restart_le_actions(hdev);
1347 hci_update_passive_scan(hdev);
1348 hci_dev_unlock(hdev);
1349 }
1350
1351 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev);
1352
1353 /* Only call new_setting for power on as power off is deferred
1354 * to hdev->power_off work which does call hci_dev_do_close.
1355 */
1356 if (cp->val)
1357 new_settings(hdev, skip: cmd->sk);
1358 } else {
1359 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_POWERED,
1360 status: mgmt_status(err));
1361 }
1362
1363 mgmt_pending_remove(cmd);
1364}
1365
1366static int set_powered_sync(struct hci_dev *hdev, void *data)
1367{
1368 struct mgmt_pending_cmd *cmd = data;
1369 struct mgmt_mode *cp = cmd->param;
1370
1371 BT_DBG("%s", hdev->name);
1372
1373 return hci_set_powered_sync(hdev, val: cp->val);
1374}
1375
1376static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1377 u16 len)
1378{
1379 struct mgmt_mode *cp = data;
1380 struct mgmt_pending_cmd *cmd;
1381 int err;
1382
1383 bt_dev_dbg(hdev, "sock %p", sk);
1384
1385 if (cp->val != 0x00 && cp->val != 0x01)
1386 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_POWERED,
1387 MGMT_STATUS_INVALID_PARAMS);
1388
1389 hci_dev_lock(hdev);
1390
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_POWERED,
1393 MGMT_STATUS_BUSY);
1394 goto failed;
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 __hci_cmd_sync_cancel(hdev, err: -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, func: set_powered_sync, data: cmd,
1412 destroy: mgmt_set_powered_complete);
1413 } else {
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, func: set_powered_sync, data: cmd,
1416 destroy: mgmt_set_powered_complete);
1417 }
1418
1419 if (err < 0)
1420 mgmt_pending_remove(cmd);
1421
1422failed:
1423 hci_dev_unlock(hdev);
1424 return err;
1425}
1426
1427int mgmt_new_settings(struct hci_dev *hdev)
1428{
1429 return new_settings(hdev, NULL);
1430}
1431
1432struct cmd_lookup {
1433 struct sock *sk;
1434 struct hci_dev *hdev;
1435 u8 mgmt_status;
1436};
1437
1438static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1439{
1440 struct cmd_lookup *match = data;
1441
1442 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev: match->hdev);
1443
1444 list_del(entry: &cmd->list);
1445
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(sk: match->sk);
1449 }
1450
1451 mgmt_pending_free(cmd);
1452}
1453
1454static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455{
1456 u8 *status = data;
1457
1458 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: *status);
1459 mgmt_pending_remove(cmd);
1460}
1461
1462static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1463{
1464 if (cmd->cmd_complete) {
1465 u8 *status = data;
1466
1467 cmd->cmd_complete(cmd, *status);
1468 mgmt_pending_remove(cmd);
1469
1470 return;
1471 }
1472
1473 cmd_status_rsp(cmd, data);
1474}
1475
1476static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1477{
1478 return mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status,
1479 rp: cmd->param, rp_len: cmd->param_len);
1480}
1481
1482static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1483{
1484 return mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status,
1485 rp: cmd->param, rp_len: sizeof(struct mgmt_addr_info));
1486}
1487
1488static u8 mgmt_bredr_support(struct hci_dev *hdev)
1489{
1490 if (!lmp_bredr_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1494 else
1495 return MGMT_STATUS_SUCCESS;
1496}
1497
1498static u8 mgmt_le_support(struct hci_dev *hdev)
1499{
1500 if (!lmp_le_capable(hdev))
1501 return MGMT_STATUS_NOT_SUPPORTED;
1502 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1503 return MGMT_STATUS_REJECTED;
1504 else
1505 return MGMT_STATUS_SUCCESS;
1506}
1507
1508static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1509 int err)
1510{
1511 struct mgmt_pending_cmd *cmd = data;
1512
1513 bt_dev_dbg(hdev, "err %d", err);
1514
1515 /* Make sure cmd still outstanding. */
1516 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1517 return;
1518
1519 hci_dev_lock(hdev);
1520
1521 if (err) {
1522 u8 mgmt_err = mgmt_status(err);
1523 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1525 goto done;
1526 }
1527
1528 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1529 hdev->discov_timeout > 0) {
1530 int to = msecs_to_jiffies(m: hdev->discov_timeout * 1000);
1531 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->discov_off, delay: to);
1532 }
1533
1534 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1535 new_settings(hdev, skip: cmd->sk);
1536
1537done:
1538 mgmt_pending_remove(cmd);
1539 hci_dev_unlock(hdev);
1540}
1541
1542static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1543{
1544 BT_DBG("%s", hdev->name);
1545
1546 return hci_update_discoverable_sync(hdev);
1547}
1548
1549static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1550 u16 len)
1551{
1552 struct mgmt_cp_set_discoverable *cp = data;
1553 struct mgmt_pending_cmd *cmd;
1554 u16 timeout;
1555 int err;
1556
1557 bt_dev_dbg(hdev, "sock %p", sk);
1558
1559 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1560 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1561 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_REJECTED);
1563
1564 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1565 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1567
1568 timeout = __le16_to_cpu(cp->timeout);
1569
1570 /* Disabling discoverable requires that no timeout is set,
1571 * and enabling limited discoverable requires a timeout.
1572 */
1573 if ((cp->val == 0x00 && timeout > 0) ||
1574 (cp->val == 0x02 && timeout == 0))
1575 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576 MGMT_STATUS_INVALID_PARAMS);
1577
1578 hci_dev_lock(hdev);
1579
1580 if (!hdev_is_powered(hdev) && timeout > 0) {
1581 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_NOT_POWERED);
1583 goto failed;
1584 }
1585
1586 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1587 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1588 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589 MGMT_STATUS_BUSY);
1590 goto failed;
1591 }
1592
1593 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1594 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1596 goto failed;
1597 }
1598
1599 if (hdev->advertising_paused) {
1600 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1601 MGMT_STATUS_BUSY);
1602 goto failed;
1603 }
1604
1605 if (!hdev_is_powered(hdev)) {
1606 bool changed = false;
1607
1608 /* Setting limited discoverable when powered off is
1609 * not a valid operation since it requires a timeout
1610 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1611 */
1612 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1613 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1614 changed = true;
1615 }
1616
1617 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1618 if (err < 0)
1619 goto failed;
1620
1621 if (changed)
1622 err = new_settings(hdev, skip: sk);
1623
1624 goto failed;
1625 }
1626
1627 /* If the current mode is the same, then just update the timeout
1628 * value with the new value. And if only the timeout gets updated,
1629 * then no need for any HCI transactions.
1630 */
1631 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1632 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1633 HCI_LIMITED_DISCOVERABLE)) {
1634 cancel_delayed_work(dwork: &hdev->discov_off);
1635 hdev->discov_timeout = timeout;
1636
1637 if (cp->val && hdev->discov_timeout > 0) {
1638 int to = msecs_to_jiffies(m: hdev->discov_timeout * 1000);
1639 queue_delayed_work(wq: hdev->req_workqueue,
1640 dwork: &hdev->discov_off, delay: to);
1641 }
1642
1643 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1644 goto failed;
1645 }
1646
1647 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1648 if (!cmd) {
1649 err = -ENOMEM;
1650 goto failed;
1651 }
1652
1653 /* Cancel any potential discoverable timeout that might be
1654 * still active and store new timeout value. The arming of
1655 * the timeout happens in the complete handler.
1656 */
1657 cancel_delayed_work(dwork: &hdev->discov_off);
1658 hdev->discov_timeout = timeout;
1659
1660 if (cp->val)
1661 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1662 else
1663 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1664
1665 /* Limited discoverable mode */
1666 if (cp->val == 0x02)
1667 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1668 else
1669 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1670
1671 err = hci_cmd_sync_queue(hdev, func: set_discoverable_sync, data: cmd,
1672 destroy: mgmt_set_discoverable_complete);
1673
1674 if (err < 0)
1675 mgmt_pending_remove(cmd);
1676
1677failed:
1678 hci_dev_unlock(hdev);
1679 return err;
1680}
1681
1682static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1683 int err)
1684{
1685 struct mgmt_pending_cmd *cmd = data;
1686
1687 bt_dev_dbg(hdev, "err %d", err);
1688
1689 /* Make sure cmd still outstanding. */
1690 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1691 return;
1692
1693 hci_dev_lock(hdev);
1694
1695 if (err) {
1696 u8 mgmt_err = mgmt_status(err);
1697 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
1698 goto done;
1699 }
1700
1701 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702 new_settings(hdev, skip: cmd->sk);
1703
1704done:
1705 if (cmd)
1706 mgmt_pending_remove(cmd);
1707
1708 hci_dev_unlock(hdev);
1709}
1710
1711static int set_connectable_update_settings(struct hci_dev *hdev,
1712 struct sock *sk, u8 val)
1713{
1714 bool changed = false;
1715 int err;
1716
1717 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1718 changed = true;
1719
1720 if (val) {
1721 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1722 } else {
1723 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1724 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1725 }
1726
1727 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1728 if (err < 0)
1729 return err;
1730
1731 if (changed) {
1732 hci_update_scan(hdev);
1733 hci_update_passive_scan(hdev);
1734 return new_settings(hdev, skip: sk);
1735 }
1736
1737 return 0;
1738}
1739
1740static int set_connectable_sync(struct hci_dev *hdev, void *data)
1741{
1742 BT_DBG("%s", hdev->name);
1743
1744 return hci_update_connectable_sync(hdev);
1745}
1746
1747static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1748 u16 len)
1749{
1750 struct mgmt_mode *cp = data;
1751 struct mgmt_pending_cmd *cmd;
1752 int err;
1753
1754 bt_dev_dbg(hdev, "sock %p", sk);
1755
1756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1757 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_REJECTED);
1760
1761 if (cp->val != 0x00 && cp->val != 0x01)
1762 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_INVALID_PARAMS);
1764
1765 hci_dev_lock(hdev);
1766
1767 if (!hdev_is_powered(hdev)) {
1768 err = set_connectable_update_settings(hdev, sk, val: cp->val);
1769 goto failed;
1770 }
1771
1772 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1773 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1774 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1775 MGMT_STATUS_BUSY);
1776 goto failed;
1777 }
1778
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1780 if (!cmd) {
1781 err = -ENOMEM;
1782 goto failed;
1783 }
1784
1785 if (cp->val) {
1786 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1787 } else {
1788 if (hdev->discov_timeout > 0)
1789 cancel_delayed_work(dwork: &hdev->discov_off);
1790
1791 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1793 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1794 }
1795
1796 err = hci_cmd_sync_queue(hdev, func: set_connectable_sync, data: cmd,
1797 destroy: mgmt_set_connectable_complete);
1798
1799 if (err < 0)
1800 mgmt_pending_remove(cmd);
1801
1802failed:
1803 hci_dev_unlock(hdev);
1804 return err;
1805}
1806
1807static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1808 u16 len)
1809{
1810 struct mgmt_mode *cp = data;
1811 bool changed;
1812 int err;
1813
1814 bt_dev_dbg(hdev, "sock %p", sk);
1815
1816 if (cp->val != 0x00 && cp->val != 0x01)
1817 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BONDABLE,
1818 MGMT_STATUS_INVALID_PARAMS);
1819
1820 hci_dev_lock(hdev);
1821
1822 if (cp->val)
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1824 else
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1826
1827 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1828 if (err < 0)
1829 goto unlock;
1830
1831 if (changed) {
1832 /* In limited privacy mode the change of bondable mode
1833 * may affect the local advertising address.
1834 */
1835 hci_update_discoverable(hdev);
1836
1837 err = new_settings(hdev, skip: sk);
1838 }
1839
1840unlock:
1841 hci_dev_unlock(hdev);
1842 return err;
1843}
1844
1845static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1846 u16 len)
1847{
1848 struct mgmt_mode *cp = data;
1849 struct mgmt_pending_cmd *cmd;
1850 u8 val, status;
1851 int err;
1852
1853 bt_dev_dbg(hdev, "sock %p", sk);
1854
1855 status = mgmt_bredr_support(hdev);
1856 if (status)
1857 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1858 status);
1859
1860 if (cp->val != 0x00 && cp->val != 0x01)
1861 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 MGMT_STATUS_INVALID_PARAMS);
1863
1864 hci_dev_lock(hdev);
1865
1866 if (!hdev_is_powered(hdev)) {
1867 bool changed = false;
1868
1869 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1870 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1871 changed = true;
1872 }
1873
1874 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1875 if (err < 0)
1876 goto failed;
1877
1878 if (changed)
1879 err = new_settings(hdev, skip: sk);
1880
1881 goto failed;
1882 }
1883
1884 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1885 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1886 MGMT_STATUS_BUSY);
1887 goto failed;
1888 }
1889
1890 val = !!cp->val;
1891
1892 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1893 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1894 goto failed;
1895 }
1896
1897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1898 if (!cmd) {
1899 err = -ENOMEM;
1900 goto failed;
1901 }
1902
1903 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, plen: sizeof(val), param: &val);
1904 if (err < 0) {
1905 mgmt_pending_remove(cmd);
1906 goto failed;
1907 }
1908
1909failed:
1910 hci_dev_unlock(hdev);
1911 return err;
1912}
1913
1914static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1915{
1916 struct cmd_lookup match = { NULL, hdev };
1917 struct mgmt_pending_cmd *cmd = data;
1918 struct mgmt_mode *cp = cmd->param;
1919 u8 enable = cp->val;
1920 bool changed;
1921
1922 /* Make sure cmd still outstanding. */
1923 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1924 return;
1925
1926 if (err) {
1927 u8 mgmt_err = mgmt_status(err);
1928
1929 if (enable && hci_dev_test_and_clear_flag(hdev,
1930 HCI_SSP_ENABLED)) {
1931 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1932 new_settings(hdev, NULL);
1933 }
1934
1935 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cb: cmd_status_rsp,
1936 data: &mgmt_err);
1937 return;
1938 }
1939
1940 if (enable) {
1941 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1942 } else {
1943 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1944
1945 if (!changed)
1946 changed = hci_dev_test_and_clear_flag(hdev,
1947 HCI_HS_ENABLED);
1948 else
1949 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1950 }
1951
1952 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cb: settings_rsp, data: &match);
1953
1954 if (changed)
1955 new_settings(hdev, skip: match.sk);
1956
1957 if (match.sk)
1958 sock_put(sk: match.sk);
1959
1960 hci_update_eir_sync(hdev);
1961}
1962
1963static int set_ssp_sync(struct hci_dev *hdev, void *data)
1964{
1965 struct mgmt_pending_cmd *cmd = data;
1966 struct mgmt_mode *cp = cmd->param;
1967 bool changed = false;
1968 int err;
1969
1970 if (cp->val)
1971 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1972
1973 err = hci_write_ssp_mode_sync(hdev, mode: cp->val);
1974
1975 if (!err && changed)
1976 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1977
1978 return err;
1979}
1980
1981static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1982{
1983 struct mgmt_mode *cp = data;
1984 struct mgmt_pending_cmd *cmd;
1985 u8 status;
1986 int err;
1987
1988 bt_dev_dbg(hdev, "sock %p", sk);
1989
1990 status = mgmt_bredr_support(hdev);
1991 if (status)
1992 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP, status);
1993
1994 if (!lmp_ssp_capable(hdev))
1995 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_NOT_SUPPORTED);
1997
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_INVALID_PARAMS);
2001
2002 hci_dev_lock(hdev);
2003
2004 if (!hdev_is_powered(hdev)) {
2005 bool changed;
2006
2007 if (cp->val) {
2008 changed = !hci_dev_test_and_set_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 } else {
2011 changed = hci_dev_test_and_clear_flag(hdev,
2012 HCI_SSP_ENABLED);
2013 if (!changed)
2014 changed = hci_dev_test_and_clear_flag(hdev,
2015 HCI_HS_ENABLED);
2016 else
2017 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2018 }
2019
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2021 if (err < 0)
2022 goto failed;
2023
2024 if (changed)
2025 err = new_settings(hdev, skip: sk);
2026
2027 goto failed;
2028 }
2029
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
2032 MGMT_STATUS_BUSY);
2033 goto failed;
2034 }
2035
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2038 goto failed;
2039 }
2040
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2042 if (!cmd)
2043 err = -ENOMEM;
2044 else
2045 err = hci_cmd_sync_queue(hdev, func: set_ssp_sync, data: cmd,
2046 destroy: set_ssp_complete);
2047
2048 if (err < 0) {
2049 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2051
2052 if (cmd)
2053 mgmt_pending_remove(cmd);
2054 }
2055
2056failed:
2057 hci_dev_unlock(hdev);
2058 return err;
2059}
2060
2061static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2062{
2063 struct mgmt_mode *cp = data;
2064 bool changed;
2065 u8 status;
2066 int err;
2067
2068 bt_dev_dbg(hdev, "sock %p", sk);
2069
2070 if (!IS_ENABLED(CONFIG_BT_HS))
2071 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2072 MGMT_STATUS_NOT_SUPPORTED);
2073
2074 status = mgmt_bredr_support(hdev);
2075 if (status)
2076 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS, status);
2077
2078 if (!lmp_ssp_capable(hdev))
2079 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2081
2082 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2083 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_REJECTED);
2085
2086 if (cp->val != 0x00 && cp->val != 0x01)
2087 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_INVALID_PARAMS);
2089
2090 hci_dev_lock(hdev);
2091
2092 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2093 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2094 MGMT_STATUS_BUSY);
2095 goto unlock;
2096 }
2097
2098 if (cp->val) {
2099 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2100 } else {
2101 if (hdev_is_powered(hdev)) {
2102 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2103 MGMT_STATUS_REJECTED);
2104 goto unlock;
2105 }
2106
2107 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2108 }
2109
2110 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2111 if (err < 0)
2112 goto unlock;
2113
2114 if (changed)
2115 err = new_settings(hdev, skip: sk);
2116
2117unlock:
2118 hci_dev_unlock(hdev);
2119 return err;
2120}
2121
2122static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2123{
2124 struct cmd_lookup match = { NULL, hdev };
2125 u8 status = mgmt_status(err);
2126
2127 bt_dev_dbg(hdev, "err %d", err);
2128
2129 if (status) {
2130 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cb: cmd_status_rsp,
2131 data: &status);
2132 return;
2133 }
2134
2135 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cb: settings_rsp, data: &match);
2136
2137 new_settings(hdev, skip: match.sk);
2138
2139 if (match.sk)
2140 sock_put(sk: match.sk);
2141}
2142
2143static int set_le_sync(struct hci_dev *hdev, void *data)
2144{
2145 struct mgmt_pending_cmd *cmd = data;
2146 struct mgmt_mode *cp = cmd->param;
2147 u8 val = !!cp->val;
2148 int err;
2149
2150 if (!val) {
2151 hci_clear_adv_instance_sync(hdev, NULL, instance: 0x00, force: true);
2152
2153 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2154 hci_disable_advertising_sync(hdev);
2155
2156 if (ext_adv_capable(hdev))
2157 hci_remove_ext_adv_instance_sync(hdev, instance: 0, sk: cmd->sk);
2158 } else {
2159 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2160 }
2161
2162 err = hci_write_le_host_supported_sync(hdev, le: val, simul: 0);
2163
2164 /* Make sure the controller has a good default for
2165 * advertising data. Restrict the update to when LE
2166 * has actually been enabled. During power on, the
2167 * update in powered_update_hci will take care of it.
2168 */
2169 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2170 if (ext_adv_capable(hdev)) {
2171 int status;
2172
2173 status = hci_setup_ext_adv_instance_sync(hdev, instance: 0x00);
2174 if (!status)
2175 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
2176 } else {
2177 hci_update_adv_data_sync(hdev, instance: 0x00);
2178 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
2179 }
2180
2181 hci_update_passive_scan(hdev);
2182 }
2183
2184 return err;
2185}
2186
2187static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2188{
2189 struct mgmt_pending_cmd *cmd = data;
2190 u8 status = mgmt_status(err);
2191 struct sock *sk = cmd->sk;
2192
2193 if (status) {
2194 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2195 cb: cmd_status_rsp, data: &status);
2196 return;
2197 }
2198
2199 mgmt_pending_remove(cmd);
2200 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER, status: 0, NULL, rp_len: 0);
2201}
2202
2203static int set_mesh_sync(struct hci_dev *hdev, void *data)
2204{
2205 struct mgmt_pending_cmd *cmd = data;
2206 struct mgmt_cp_set_mesh *cp = cmd->param;
2207 size_t len = cmd->param_len;
2208
2209 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2210
2211 if (cp->enable)
2212 hci_dev_set_flag(hdev, HCI_MESH);
2213 else
2214 hci_dev_clear_flag(hdev, HCI_MESH);
2215
2216 len -= sizeof(*cp);
2217
2218 /* If filters don't fit, forward all adv pkts */
2219 if (len <= sizeof(hdev->mesh_ad_types))
2220 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2221
2222 hci_update_passive_scan_sync(hdev);
2223 return 0;
2224}
2225
2226static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2227{
2228 struct mgmt_cp_set_mesh *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2230 int err = 0;
2231
2232 bt_dev_dbg(hdev, "sock %p", sk);
2233
2234 if (!lmp_le_capable(hdev) ||
2235 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2236 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237 MGMT_STATUS_NOT_SUPPORTED);
2238
2239 if (cp->enable != 0x00 && cp->enable != 0x01)
2240 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_INVALID_PARAMS);
2242
2243 hci_dev_lock(hdev);
2244
2245 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2246 if (!cmd)
2247 err = -ENOMEM;
2248 else
2249 err = hci_cmd_sync_queue(hdev, func: set_mesh_sync, data: cmd,
2250 destroy: set_mesh_complete);
2251
2252 if (err < 0) {
2253 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2254 MGMT_STATUS_FAILED);
2255
2256 if (cmd)
2257 mgmt_pending_remove(cmd);
2258 }
2259
2260 hci_dev_unlock(hdev);
2261 return err;
2262}
2263
2264static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2265{
2266 struct mgmt_mesh_tx *mesh_tx = data;
2267 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2268 unsigned long mesh_send_interval;
2269 u8 mgmt_err = mgmt_status(err);
2270
2271 /* Report any errors here, but don't report completion */
2272
2273 if (mgmt_err) {
2274 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2275 /* Send Complete Error Code for handle */
2276 mesh_send_complete(hdev, mesh_tx, silent: false);
2277 return;
2278 }
2279
2280 mesh_send_interval = msecs_to_jiffies(m: (send->cnt) * 25);
2281 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->mesh_send_done,
2282 delay: mesh_send_interval);
2283}
2284
2285static int mesh_send_sync(struct hci_dev *hdev, void *data)
2286{
2287 struct mgmt_mesh_tx *mesh_tx = data;
2288 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2289 struct adv_info *adv, *next_instance;
2290 u8 instance = hdev->le_num_of_adv_sets + 1;
2291 u16 timeout, duration;
2292 int err = 0;
2293
2294 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2295 return MGMT_STATUS_BUSY;
2296
2297 timeout = 1000;
2298 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2299 adv = hci_add_adv_instance(hdev, instance, flags: 0,
2300 adv_data_len: send->adv_data_len, adv_data: send->adv_data,
2301 scan_rsp_len: 0, NULL,
2302 timeout, duration,
2303 HCI_ADV_TX_POWER_NO_PREFERENCE,
2304 min_interval: hdev->le_adv_min_interval,
2305 max_interval: hdev->le_adv_max_interval,
2306 mesh_handle: mesh_tx->handle);
2307
2308 if (!IS_ERR(ptr: adv))
2309 mesh_tx->instance = instance;
2310 else
2311 err = PTR_ERR(ptr: adv);
2312
2313 if (hdev->cur_adv_instance == instance) {
2314 /* If the currently advertised instance is being changed then
2315 * cancel the current advertising and schedule the next
2316 * instance. If there is only one instance then the overridden
2317 * advertising data will be visible right away.
2318 */
2319 cancel_adv_timeout(hdev);
2320
2321 next_instance = hci_get_next_instance(hdev, instance);
2322 if (next_instance)
2323 instance = next_instance->instance;
2324 else
2325 instance = 0;
2326 } else if (hdev->adv_instance_timeout) {
2327 /* Immediately advertise the new instance if no other, or
2328 * let it go naturally from queue if ADV is already happening
2329 */
2330 instance = 0;
2331 }
2332
2333 if (instance)
2334 return hci_schedule_adv_instance_sync(hdev, instance, force: true);
2335
2336 return err;
2337}
2338
2339static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2340{
2341 struct mgmt_rp_mesh_read_features *rp = data;
2342
2343 if (rp->used_handles >= rp->max_handles)
2344 return;
2345
2346 rp->handles[rp->used_handles++] = mesh_tx->handle;
2347}
2348
2349static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2351{
2352 struct mgmt_rp_mesh_read_features rp;
2353
2354 if (!lmp_le_capable(hdev) ||
2355 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2356 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_READ_FEATURES,
2357 MGMT_STATUS_NOT_SUPPORTED);
2358
2359 memset(&rp, 0, sizeof(rp));
2360 rp.index = cpu_to_le16(hdev->id);
2361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 rp.max_handles = MESH_HANDLES_MAX;
2363
2364 hci_dev_lock(hdev);
2365
2366 if (rp.max_handles)
2367 mgmt_mesh_foreach(hdev, cb: send_count, data: &rp, sk);
2368
2369 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_MESH_READ_FEATURES, status: 0, rp: &rp,
2370 rp_len: rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2371
2372 hci_dev_unlock(hdev);
2373 return 0;
2374}
2375
2376static int send_cancel(struct hci_dev *hdev, void *data)
2377{
2378 struct mgmt_pending_cmd *cmd = data;
2379 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2380 struct mgmt_mesh_tx *mesh_tx;
2381
2382 if (!cancel->handle) {
2383 do {
2384 mesh_tx = mgmt_mesh_next(hdev, sk: cmd->sk);
2385
2386 if (mesh_tx)
2387 mesh_send_complete(hdev, mesh_tx, silent: false);
2388 } while (mesh_tx);
2389 } else {
2390 mesh_tx = mgmt_mesh_find(hdev, handle: cancel->handle);
2391
2392 if (mesh_tx && mesh_tx->sk == cmd->sk)
2393 mesh_send_complete(hdev, mesh_tx, silent: false);
2394 }
2395
2396 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2397 status: 0, NULL, rp_len: 0);
2398 mgmt_pending_free(cmd);
2399
2400 return 0;
2401}
2402
2403static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2404 void *data, u16 len)
2405{
2406 struct mgmt_pending_cmd *cmd;
2407 int err;
2408
2409 if (!lmp_le_capable(hdev) ||
2410 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2411 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412 MGMT_STATUS_NOT_SUPPORTED);
2413
2414 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2415 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_REJECTED);
2417
2418 hci_dev_lock(hdev);
2419 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2420 if (!cmd)
2421 err = -ENOMEM;
2422 else
2423 err = hci_cmd_sync_queue(hdev, func: send_cancel, data: cmd, NULL);
2424
2425 if (err < 0) {
2426 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2427 MGMT_STATUS_FAILED);
2428
2429 if (cmd)
2430 mgmt_pending_free(cmd);
2431 }
2432
2433 hci_dev_unlock(hdev);
2434 return err;
2435}
2436
2437static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2438{
2439 struct mgmt_mesh_tx *mesh_tx;
2440 struct mgmt_cp_mesh_send *send = data;
2441 struct mgmt_rp_mesh_read_features rp;
2442 bool sending;
2443 int err = 0;
2444
2445 if (!lmp_le_capable(hdev) ||
2446 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2447 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2450 len <= MGMT_MESH_SEND_SIZE ||
2451 len > (MGMT_MESH_SEND_SIZE + 31))
2452 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2453 MGMT_STATUS_REJECTED);
2454
2455 hci_dev_lock(hdev);
2456
2457 memset(&rp, 0, sizeof(rp));
2458 rp.max_handles = MESH_HANDLES_MAX;
2459
2460 mgmt_mesh_foreach(hdev, cb: send_count, data: &rp, sk);
2461
2462 if (rp.max_handles <= rp.used_handles) {
2463 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2464 MGMT_STATUS_BUSY);
2465 goto done;
2466 }
2467
2468 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2469 mesh_tx = mgmt_mesh_add(sk, hdev, data: send, len);
2470
2471 if (!mesh_tx)
2472 err = -ENOMEM;
2473 else if (!sending)
2474 err = hci_cmd_sync_queue(hdev, func: mesh_send_sync, data: mesh_tx,
2475 destroy: mesh_send_start_complete);
2476
2477 if (err < 0) {
2478 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2479 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2480 MGMT_STATUS_FAILED);
2481
2482 if (mesh_tx) {
2483 if (sending)
2484 mgmt_mesh_remove(mesh_tx);
2485 }
2486 } else {
2487 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2488
2489 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_MESH_SEND, status: 0,
2490 rp: &mesh_tx->handle, rp_len: 1);
2491 }
2492
2493done:
2494 hci_dev_unlock(hdev);
2495 return err;
2496}
2497
2498static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2499{
2500 struct mgmt_mode *cp = data;
2501 struct mgmt_pending_cmd *cmd;
2502 int err;
2503 u8 val, enabled;
2504
2505 bt_dev_dbg(hdev, "sock %p", sk);
2506
2507 if (!lmp_le_capable(hdev))
2508 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2509 MGMT_STATUS_NOT_SUPPORTED);
2510
2511 if (cp->val != 0x00 && cp->val != 0x01)
2512 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_INVALID_PARAMS);
2514
2515 /* Bluetooth single mode LE only controllers or dual-mode
2516 * controllers configured as LE only devices, do not allow
2517 * switching LE off. These have either LE enabled explicitly
2518 * or BR/EDR has been previously switched off.
2519 *
2520 * When trying to enable an already enabled LE, then gracefully
2521 * send a positive response. Trying to disable it however will
2522 * result into rejection.
2523 */
2524 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2525 if (cp->val == 0x01)
2526 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2527
2528 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2529 MGMT_STATUS_REJECTED);
2530 }
2531
2532 hci_dev_lock(hdev);
2533
2534 val = !!cp->val;
2535 enabled = lmp_host_le_capable(hdev);
2536
2537 if (!hdev_is_powered(hdev) || val == enabled) {
2538 bool changed = false;
2539
2540 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2541 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2542 changed = true;
2543 }
2544
2545 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2547 changed = true;
2548 }
2549
2550 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2551 if (err < 0)
2552 goto unlock;
2553
2554 if (changed)
2555 err = new_settings(hdev, skip: sk);
2556
2557 goto unlock;
2558 }
2559
2560 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2561 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2562 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2563 MGMT_STATUS_BUSY);
2564 goto unlock;
2565 }
2566
2567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2568 if (!cmd)
2569 err = -ENOMEM;
2570 else
2571 err = hci_cmd_sync_queue(hdev, func: set_le_sync, data: cmd,
2572 destroy: set_le_complete);
2573
2574 if (err < 0) {
2575 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2576 MGMT_STATUS_FAILED);
2577
2578 if (cmd)
2579 mgmt_pending_remove(cmd);
2580 }
2581
2582unlock:
2583 hci_dev_unlock(hdev);
2584 return err;
2585}
2586
2587/* This is a helper function to test for pending mgmt commands that can
2588 * cause CoD or EIR HCI commands. We can only allow one such pending
2589 * mgmt command at a time since otherwise we cannot easily track what
2590 * the current values are, will be, and based on that calculate if a new
2591 * HCI command needs to be sent and if yes with what value.
2592 */
2593static bool pending_eir_or_class(struct hci_dev *hdev)
2594{
2595 struct mgmt_pending_cmd *cmd;
2596
2597 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2598 switch (cmd->opcode) {
2599 case MGMT_OP_ADD_UUID:
2600 case MGMT_OP_REMOVE_UUID:
2601 case MGMT_OP_SET_DEV_CLASS:
2602 case MGMT_OP_SET_POWERED:
2603 return true;
2604 }
2605 }
2606
2607 return false;
2608}
2609
2610static const u8 bluetooth_base_uuid[] = {
2611 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2612 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2613};
2614
2615static u8 get_uuid_size(const u8 *uuid)
2616{
2617 u32 val;
2618
2619 if (memcmp(p: uuid, q: bluetooth_base_uuid, size: 12))
2620 return 128;
2621
2622 val = get_unaligned_le32(p: &uuid[12]);
2623 if (val > 0xffff)
2624 return 32;
2625
2626 return 16;
2627}
2628
2629static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2630{
2631 struct mgmt_pending_cmd *cmd = data;
2632
2633 bt_dev_dbg(hdev, "err %d", err);
2634
2635 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
2636 status: mgmt_status(err), rp: hdev->dev_class, rp_len: 3);
2637
2638 mgmt_pending_free(cmd);
2639}
2640
2641static int add_uuid_sync(struct hci_dev *hdev, void *data)
2642{
2643 int err;
2644
2645 err = hci_update_class_sync(hdev);
2646 if (err)
2647 return err;
2648
2649 return hci_update_eir_sync(hdev);
2650}
2651
2652static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2653{
2654 struct mgmt_cp_add_uuid *cp = data;
2655 struct mgmt_pending_cmd *cmd;
2656 struct bt_uuid *uuid;
2657 int err;
2658
2659 bt_dev_dbg(hdev, "sock %p", sk);
2660
2661 hci_dev_lock(hdev);
2662
2663 if (pending_eir_or_class(hdev)) {
2664 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_UUID,
2665 MGMT_STATUS_BUSY);
2666 goto failed;
2667 }
2668
2669 uuid = kmalloc(size: sizeof(*uuid), GFP_KERNEL);
2670 if (!uuid) {
2671 err = -ENOMEM;
2672 goto failed;
2673 }
2674
2675 memcpy(uuid->uuid, cp->uuid, 16);
2676 uuid->svc_hint = cp->svc_hint;
2677 uuid->size = get_uuid_size(uuid: cp->uuid);
2678
2679 list_add_tail(new: &uuid->list, head: &hdev->uuids);
2680
2681 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2682 if (!cmd) {
2683 err = -ENOMEM;
2684 goto failed;
2685 }
2686
2687 err = hci_cmd_sync_queue(hdev, func: add_uuid_sync, data: cmd, destroy: mgmt_class_complete);
2688 if (err < 0) {
2689 mgmt_pending_free(cmd);
2690 goto failed;
2691 }
2692
2693failed:
2694 hci_dev_unlock(hdev);
2695 return err;
2696}
2697
2698static bool enable_service_cache(struct hci_dev *hdev)
2699{
2700 if (!hdev_is_powered(hdev))
2701 return false;
2702
2703 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704 queue_delayed_work(wq: hdev->workqueue, dwork: &hdev->service_cache,
2705 CACHE_TIMEOUT);
2706 return true;
2707 }
2708
2709 return false;
2710}
2711
2712static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2713{
2714 int err;
2715
2716 err = hci_update_class_sync(hdev);
2717 if (err)
2718 return err;
2719
2720 return hci_update_eir_sync(hdev);
2721}
2722
2723static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2724 u16 len)
2725{
2726 struct mgmt_cp_remove_uuid *cp = data;
2727 struct mgmt_pending_cmd *cmd;
2728 struct bt_uuid *match, *tmp;
2729 static const u8 bt_uuid_any[] = {
2730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2731 };
2732 int err, found;
2733
2734 bt_dev_dbg(hdev, "sock %p", sk);
2735
2736 hci_dev_lock(hdev);
2737
2738 if (pending_eir_or_class(hdev)) {
2739 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_UUID,
2740 MGMT_STATUS_BUSY);
2741 goto unlock;
2742 }
2743
2744 if (memcmp(p: cp->uuid, q: bt_uuid_any, size: 16) == 0) {
2745 hci_uuids_clear(hdev);
2746
2747 if (enable_service_cache(hdev)) {
2748 err = mgmt_cmd_complete(sk, index: hdev->id,
2749 MGMT_OP_REMOVE_UUID,
2750 status: 0, rp: hdev->dev_class, rp_len: 3);
2751 goto unlock;
2752 }
2753
2754 goto update_class;
2755 }
2756
2757 found = 0;
2758
2759 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760 if (memcmp(p: match->uuid, q: cp->uuid, size: 16) != 0)
2761 continue;
2762
2763 list_del(entry: &match->list);
2764 kfree(objp: match);
2765 found++;
2766 }
2767
2768 if (found == 0) {
2769 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_UUID,
2770 MGMT_STATUS_INVALID_PARAMS);
2771 goto unlock;
2772 }
2773
2774update_class:
2775 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2776 if (!cmd) {
2777 err = -ENOMEM;
2778 goto unlock;
2779 }
2780
2781 err = hci_cmd_sync_queue(hdev, func: remove_uuid_sync, data: cmd,
2782 destroy: mgmt_class_complete);
2783 if (err < 0)
2784 mgmt_pending_free(cmd);
2785
2786unlock:
2787 hci_dev_unlock(hdev);
2788 return err;
2789}
2790
2791static int set_class_sync(struct hci_dev *hdev, void *data)
2792{
2793 int err = 0;
2794
2795 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2796 cancel_delayed_work_sync(dwork: &hdev->service_cache);
2797 err = hci_update_eir_sync(hdev);
2798 }
2799
2800 if (err)
2801 return err;
2802
2803 return hci_update_class_sync(hdev);
2804}
2805
2806static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2807 u16 len)
2808{
2809 struct mgmt_cp_set_dev_class *cp = data;
2810 struct mgmt_pending_cmd *cmd;
2811 int err;
2812
2813 bt_dev_dbg(hdev, "sock %p", sk);
2814
2815 if (!lmp_bredr_capable(hdev))
2816 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 MGMT_STATUS_NOT_SUPPORTED);
2818
2819 hci_dev_lock(hdev);
2820
2821 if (pending_eir_or_class(hdev)) {
2822 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 MGMT_STATUS_BUSY);
2824 goto unlock;
2825 }
2826
2827 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2828 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_INVALID_PARAMS);
2830 goto unlock;
2831 }
2832
2833 hdev->major_class = cp->major;
2834 hdev->minor_class = cp->minor;
2835
2836 if (!hdev_is_powered(hdev)) {
2837 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS, status: 0,
2838 rp: hdev->dev_class, rp_len: 3);
2839 goto unlock;
2840 }
2841
2842 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2843 if (!cmd) {
2844 err = -ENOMEM;
2845 goto unlock;
2846 }
2847
2848 err = hci_cmd_sync_queue(hdev, func: set_class_sync, data: cmd,
2849 destroy: mgmt_class_complete);
2850 if (err < 0)
2851 mgmt_pending_free(cmd);
2852
2853unlock:
2854 hci_dev_unlock(hdev);
2855 return err;
2856}
2857
2858static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2859 u16 len)
2860{
2861 struct mgmt_cp_load_link_keys *cp = data;
2862 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2863 sizeof(struct mgmt_link_key_info));
2864 u16 key_count, expected_len;
2865 bool changed;
2866 int i;
2867
2868 bt_dev_dbg(hdev, "sock %p", sk);
2869
2870 if (!lmp_bredr_capable(hdev))
2871 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2872 MGMT_STATUS_NOT_SUPPORTED);
2873
2874 key_count = __le16_to_cpu(cp->key_count);
2875 if (key_count > max_key_count) {
2876 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2877 key_count);
2878 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2879 MGMT_STATUS_INVALID_PARAMS);
2880 }
2881
2882 expected_len = struct_size(cp, keys, key_count);
2883 if (expected_len != len) {
2884 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2885 expected_len, len);
2886 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_INVALID_PARAMS);
2888 }
2889
2890 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2891 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2892 MGMT_STATUS_INVALID_PARAMS);
2893
2894 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2895 key_count);
2896
2897 for (i = 0; i < key_count; i++) {
2898 struct mgmt_link_key_info *key = &cp->keys[i];
2899
2900 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2901 return mgmt_cmd_status(sk, index: hdev->id,
2902 MGMT_OP_LOAD_LINK_KEYS,
2903 MGMT_STATUS_INVALID_PARAMS);
2904 }
2905
2906 hci_dev_lock(hdev);
2907
2908 hci_link_keys_clear(hdev);
2909
2910 if (cp->debug_keys)
2911 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2912 else
2913 changed = hci_dev_test_and_clear_flag(hdev,
2914 HCI_KEEP_DEBUG_KEYS);
2915
2916 if (changed)
2917 new_settings(hdev, NULL);
2918
2919 for (i = 0; i < key_count; i++) {
2920 struct mgmt_link_key_info *key = &cp->keys[i];
2921
2922 if (hci_is_blocked_key(hdev,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2924 val: key->val)) {
2925 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2926 &key->addr.bdaddr);
2927 continue;
2928 }
2929
2930 /* Always ignore debug keys and require a new pairing if
2931 * the user wants to use them.
2932 */
2933 if (key->type == HCI_LK_DEBUG_COMBINATION)
2934 continue;
2935
2936 hci_add_link_key(hdev, NULL, bdaddr: &key->addr.bdaddr, val: key->val,
2937 type: key->type, pin_len: key->pin_len, NULL);
2938 }
2939
2940 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS, status: 0, NULL, rp_len: 0);
2941
2942 hci_dev_unlock(hdev);
2943
2944 return 0;
2945}
2946
2947static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2948 u8 addr_type, struct sock *skip_sk)
2949{
2950 struct mgmt_ev_device_unpaired ev;
2951
2952 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
2953 ev.addr.type = addr_type;
2954
2955 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, data: &ev, len: sizeof(ev),
2956 skip_sk);
2957}
2958
2959static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2960{
2961 struct mgmt_pending_cmd *cmd = data;
2962 struct mgmt_cp_unpair_device *cp = cmd->param;
2963
2964 if (!err)
2965 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: cmd->sk);
2966
2967 cmd->cmd_complete(cmd, err);
2968 mgmt_pending_free(cmd);
2969}
2970
2971static int unpair_device_sync(struct hci_dev *hdev, void *data)
2972{
2973 struct mgmt_pending_cmd *cmd = data;
2974 struct mgmt_cp_unpair_device *cp = cmd->param;
2975 struct hci_conn *conn;
2976
2977 if (cp->addr.type == BDADDR_BREDR)
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2979 ba: &cp->addr.bdaddr);
2980 else
2981 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr,
2982 ba_type: le_addr_type(mgmt_addr_type: cp->addr.type));
2983
2984 if (!conn)
2985 return 0;
2986
2987 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2988}
2989
2990static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2991 u16 len)
2992{
2993 struct mgmt_cp_unpair_device *cp = data;
2994 struct mgmt_rp_unpair_device rp;
2995 struct hci_conn_params *params;
2996 struct mgmt_pending_cmd *cmd;
2997 struct hci_conn *conn;
2998 u8 addr_type;
2999 int err;
3000
3001 memset(&rp, 0, sizeof(rp));
3002 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
3003 rp.addr.type = cp->addr.type;
3004
3005 if (!bdaddr_type_is_valid(type: cp->addr.type))
3006 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
3007 MGMT_STATUS_INVALID_PARAMS,
3008 rp: &rp, rp_len: sizeof(rp));
3009
3010 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3011 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3013 rp: &rp, rp_len: sizeof(rp));
3014
3015 hci_dev_lock(hdev);
3016
3017 if (!hdev_is_powered(hdev)) {
3018 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
3019 MGMT_STATUS_NOT_POWERED, rp: &rp,
3020 rp_len: sizeof(rp));
3021 goto unlock;
3022 }
3023
3024 if (cp->addr.type == BDADDR_BREDR) {
3025 /* If disconnection is requested, then look up the
3026 * connection. If the remote device is connected, it
3027 * will be later used to terminate the link.
3028 *
3029 * Setting it to NULL explicitly will cause no
3030 * termination of the link.
3031 */
3032 if (cp->disconnect)
3033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3034 ba: &cp->addr.bdaddr);
3035 else
3036 conn = NULL;
3037
3038 err = hci_remove_link_key(hdev, bdaddr: &cp->addr.bdaddr);
3039 if (err < 0) {
3040 err = mgmt_cmd_complete(sk, index: hdev->id,
3041 MGMT_OP_UNPAIR_DEVICE,
3042 MGMT_STATUS_NOT_PAIRED, rp: &rp,
3043 rp_len: sizeof(rp));
3044 goto unlock;
3045 }
3046
3047 goto done;
3048 }
3049
3050 /* LE address type */
3051 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
3052
3053 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3054 err = smp_cancel_and_remove_pairing(hdev, bdaddr: &cp->addr.bdaddr, addr_type);
3055 if (err < 0) {
3056 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
3057 MGMT_STATUS_NOT_PAIRED, rp: &rp,
3058 rp_len: sizeof(rp));
3059 goto unlock;
3060 }
3061
3062 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr, ba_type: addr_type);
3063 if (!conn) {
3064 hci_conn_params_del(hdev, addr: &cp->addr.bdaddr, addr_type);
3065 goto done;
3066 }
3067
3068
3069 /* Defer clearing up the connection parameters until closing to
3070 * give a chance of keeping them if a repairing happens.
3071 */
3072 set_bit(nr: HCI_CONN_PARAM_REMOVAL_PEND, addr: &conn->flags);
3073
3074 /* Disable auto-connection parameters if present */
3075 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr, addr_type);
3076 if (params) {
3077 if (params->explicit_connect)
3078 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3079 else
3080 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3081 }
3082
3083 /* If disconnection is not requested, then clear the connection
3084 * variable so that the link is not terminated.
3085 */
3086 if (!cp->disconnect)
3087 conn = NULL;
3088
3089done:
3090 /* If the connection variable is set, then termination of the
3091 * link is requested.
3092 */
3093 if (!conn) {
3094 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE, status: 0,
3095 rp: &rp, rp_len: sizeof(rp));
3096 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: sk);
3097 goto unlock;
3098 }
3099
3100 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, data: cp,
3101 len: sizeof(*cp));
3102 if (!cmd) {
3103 err = -ENOMEM;
3104 goto unlock;
3105 }
3106
3107 cmd->cmd_complete = addr_cmd_complete;
3108
3109 err = hci_cmd_sync_queue(hdev, func: unpair_device_sync, data: cmd,
3110 destroy: unpair_device_complete);
3111 if (err < 0)
3112 mgmt_pending_free(cmd);
3113
3114unlock:
3115 hci_dev_unlock(hdev);
3116 return err;
3117}
3118
3119static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3120 u16 len)
3121{
3122 struct mgmt_cp_disconnect *cp = data;
3123 struct mgmt_rp_disconnect rp;
3124 struct mgmt_pending_cmd *cmd;
3125 struct hci_conn *conn;
3126 int err;
3127
3128 bt_dev_dbg(hdev, "sock %p", sk);
3129
3130 memset(&rp, 0, sizeof(rp));
3131 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
3132 rp.addr.type = cp->addr.type;
3133
3134 if (!bdaddr_type_is_valid(type: cp->addr.type))
3135 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3136 MGMT_STATUS_INVALID_PARAMS,
3137 rp: &rp, rp_len: sizeof(rp));
3138
3139 hci_dev_lock(hdev);
3140
3141 if (!test_bit(HCI_UP, &hdev->flags)) {
3142 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3143 MGMT_STATUS_NOT_POWERED, rp: &rp,
3144 rp_len: sizeof(rp));
3145 goto failed;
3146 }
3147
3148 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3149 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3150 MGMT_STATUS_BUSY, rp: &rp, rp_len: sizeof(rp));
3151 goto failed;
3152 }
3153
3154 if (cp->addr.type == BDADDR_BREDR)
3155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3156 ba: &cp->addr.bdaddr);
3157 else
3158 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr,
3159 ba_type: le_addr_type(mgmt_addr_type: cp->addr.type));
3160
3161 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3162 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3163 MGMT_STATUS_NOT_CONNECTED, rp: &rp,
3164 rp_len: sizeof(rp));
3165 goto failed;
3166 }
3167
3168 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3169 if (!cmd) {
3170 err = -ENOMEM;
3171 goto failed;
3172 }
3173
3174 cmd->cmd_complete = generic_cmd_complete;
3175
3176 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3177 if (err < 0)
3178 mgmt_pending_remove(cmd);
3179
3180failed:
3181 hci_dev_unlock(hdev);
3182 return err;
3183}
3184
3185static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3186{
3187 switch (link_type) {
3188 case LE_LINK:
3189 switch (addr_type) {
3190 case ADDR_LE_DEV_PUBLIC:
3191 return BDADDR_LE_PUBLIC;
3192
3193 default:
3194 /* Fallback to LE Random address type */
3195 return BDADDR_LE_RANDOM;
3196 }
3197
3198 default:
3199 /* Fallback to BR/EDR type */
3200 return BDADDR_BREDR;
3201 }
3202}
3203
3204static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3205 u16 data_len)
3206{
3207 struct mgmt_rp_get_connections *rp;
3208 struct hci_conn *c;
3209 int err;
3210 u16 i;
3211
3212 bt_dev_dbg(hdev, "sock %p", sk);
3213
3214 hci_dev_lock(hdev);
3215
3216 if (!hdev_is_powered(hdev)) {
3217 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_CONNECTIONS,
3218 MGMT_STATUS_NOT_POWERED);
3219 goto unlock;
3220 }
3221
3222 i = 0;
3223 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3224 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3225 i++;
3226 }
3227
3228 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3229 if (!rp) {
3230 err = -ENOMEM;
3231 goto unlock;
3232 }
3233
3234 i = 0;
3235 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3236 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3237 continue;
3238 bacpy(dst: &rp->addr[i].bdaddr, src: &c->dst);
3239 rp->addr[i].type = link_to_bdaddr(link_type: c->type, addr_type: c->dst_type);
3240 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3241 continue;
3242 i++;
3243 }
3244
3245 rp->conn_count = cpu_to_le16(i);
3246
3247 /* Recalculate length in case of filtered SCO connections, etc */
3248 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONNECTIONS, status: 0, rp,
3249 struct_size(rp, addr, i));
3250
3251 kfree(objp: rp);
3252
3253unlock:
3254 hci_dev_unlock(hdev);
3255 return err;
3256}
3257
3258static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3259 struct mgmt_cp_pin_code_neg_reply *cp)
3260{
3261 struct mgmt_pending_cmd *cmd;
3262 int err;
3263
3264 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, data: cp,
3265 len: sizeof(*cp));
3266 if (!cmd)
3267 return -ENOMEM;
3268
3269 cmd->cmd_complete = addr_cmd_complete;
3270
3271 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3272 plen: sizeof(cp->addr.bdaddr), param: &cp->addr.bdaddr);
3273 if (err < 0)
3274 mgmt_pending_remove(cmd);
3275
3276 return err;
3277}
3278
3279static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3280 u16 len)
3281{
3282 struct hci_conn *conn;
3283 struct mgmt_cp_pin_code_reply *cp = data;
3284 struct hci_cp_pin_code_reply reply;
3285 struct mgmt_pending_cmd *cmd;
3286 int err;
3287
3288 bt_dev_dbg(hdev, "sock %p", sk);
3289
3290 hci_dev_lock(hdev);
3291
3292 if (!hdev_is_powered(hdev)) {
3293 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3294 MGMT_STATUS_NOT_POWERED);
3295 goto failed;
3296 }
3297
3298 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &cp->addr.bdaddr);
3299 if (!conn) {
3300 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 MGMT_STATUS_NOT_CONNECTED);
3302 goto failed;
3303 }
3304
3305 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3306 struct mgmt_cp_pin_code_neg_reply ncp;
3307
3308 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3309
3310 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3311
3312 err = send_pin_code_neg_reply(sk, hdev, cp: &ncp);
3313 if (err >= 0)
3314 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3315 MGMT_STATUS_INVALID_PARAMS);
3316
3317 goto failed;
3318 }
3319
3320 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3321 if (!cmd) {
3322 err = -ENOMEM;
3323 goto failed;
3324 }
3325
3326 cmd->cmd_complete = addr_cmd_complete;
3327
3328 bacpy(dst: &reply.bdaddr, src: &cp->addr.bdaddr);
3329 reply.pin_len = cp->pin_len;
3330 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3331
3332 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, plen: sizeof(reply), param: &reply);
3333 if (err < 0)
3334 mgmt_pending_remove(cmd);
3335
3336failed:
3337 hci_dev_unlock(hdev);
3338 return err;
3339}
3340
3341static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3342 u16 len)
3343{
3344 struct mgmt_cp_set_io_capability *cp = data;
3345
3346 bt_dev_dbg(hdev, "sock %p", sk);
3347
3348 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3349 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3350 MGMT_STATUS_INVALID_PARAMS);
3351
3352 hci_dev_lock(hdev);
3353
3354 hdev->io_capability = cp->io_capability;
3355
3356 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3357
3358 hci_dev_unlock(hdev);
3359
3360 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_IO_CAPABILITY, status: 0,
3361 NULL, rp_len: 0);
3362}
3363
3364static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3365{
3366 struct hci_dev *hdev = conn->hdev;
3367 struct mgmt_pending_cmd *cmd;
3368
3369 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3370 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3371 continue;
3372
3373 if (cmd->user_data != conn)
3374 continue;
3375
3376 return cmd;
3377 }
3378
3379 return NULL;
3380}
3381
3382static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3383{
3384 struct mgmt_rp_pair_device rp;
3385 struct hci_conn *conn = cmd->user_data;
3386 int err;
3387
3388 bacpy(dst: &rp.addr.bdaddr, src: &conn->dst);
3389 rp.addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
3390
3391 err = mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, MGMT_OP_PAIR_DEVICE,
3392 status, rp: &rp, rp_len: sizeof(rp));
3393
3394 /* So we don't get further callbacks for this connection */
3395 conn->connect_cfm_cb = NULL;
3396 conn->security_cfm_cb = NULL;
3397 conn->disconn_cfm_cb = NULL;
3398
3399 hci_conn_drop(conn);
3400
3401 /* The device is paired so there is no need to remove
3402 * its connection parameters anymore.
3403 */
3404 clear_bit(nr: HCI_CONN_PARAM_REMOVAL_PEND, addr: &conn->flags);
3405
3406 hci_conn_put(conn);
3407
3408 return err;
3409}
3410
3411void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3412{
3413 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3414 struct mgmt_pending_cmd *cmd;
3415
3416 cmd = find_pairing(conn);
3417 if (cmd) {
3418 cmd->cmd_complete(cmd, status);
3419 mgmt_pending_remove(cmd);
3420 }
3421}
3422
3423static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3424{
3425 struct mgmt_pending_cmd *cmd;
3426
3427 BT_DBG("status %u", status);
3428
3429 cmd = find_pairing(conn);
3430 if (!cmd) {
3431 BT_DBG("Unable to find a pending command");
3432 return;
3433 }
3434
3435 cmd->cmd_complete(cmd, mgmt_status(err: status));
3436 mgmt_pending_remove(cmd);
3437}
3438
3439static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3440{
3441 struct mgmt_pending_cmd *cmd;
3442
3443 BT_DBG("status %u", status);
3444
3445 if (!status)
3446 return;
3447
3448 cmd = find_pairing(conn);
3449 if (!cmd) {
3450 BT_DBG("Unable to find a pending command");
3451 return;
3452 }
3453
3454 cmd->cmd_complete(cmd, mgmt_status(err: status));
3455 mgmt_pending_remove(cmd);
3456}
3457
3458static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3459 u16 len)
3460{
3461 struct mgmt_cp_pair_device *cp = data;
3462 struct mgmt_rp_pair_device rp;
3463 struct mgmt_pending_cmd *cmd;
3464 u8 sec_level, auth_type;
3465 struct hci_conn *conn;
3466 int err;
3467
3468 bt_dev_dbg(hdev, "sock %p", sk);
3469
3470 memset(&rp, 0, sizeof(rp));
3471 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
3472 rp.addr.type = cp->addr.type;
3473
3474 if (!bdaddr_type_is_valid(type: cp->addr.type))
3475 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3476 MGMT_STATUS_INVALID_PARAMS,
3477 rp: &rp, rp_len: sizeof(rp));
3478
3479 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3480 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS,
3482 rp: &rp, rp_len: sizeof(rp));
3483
3484 hci_dev_lock(hdev);
3485
3486 if (!hdev_is_powered(hdev)) {
3487 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3488 MGMT_STATUS_NOT_POWERED, rp: &rp,
3489 rp_len: sizeof(rp));
3490 goto unlock;
3491 }
3492
3493 if (hci_bdaddr_is_paired(hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type)) {
3494 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3495 MGMT_STATUS_ALREADY_PAIRED, rp: &rp,
3496 rp_len: sizeof(rp));
3497 goto unlock;
3498 }
3499
3500 sec_level = BT_SECURITY_MEDIUM;
3501 auth_type = HCI_AT_DEDICATED_BONDING;
3502
3503 if (cp->addr.type == BDADDR_BREDR) {
3504 conn = hci_connect_acl(hdev, dst: &cp->addr.bdaddr, sec_level,
3505 auth_type, conn_reason: CONN_REASON_PAIR_DEVICE);
3506 } else {
3507 u8 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
3508 struct hci_conn_params *p;
3509
3510 /* When pairing a new device, it is expected to remember
3511 * this device for future connections. Adding the connection
3512 * parameter information ahead of time allows tracking
3513 * of the peripheral preferred values and will speed up any
3514 * further connection establishment.
3515 *
3516 * If connection parameters already exist, then they
3517 * will be kept and this function does nothing.
3518 */
3519 p = hci_conn_params_add(hdev, addr: &cp->addr.bdaddr, addr_type);
3520
3521 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3522 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3523
3524 conn = hci_connect_le_scan(hdev, dst: &cp->addr.bdaddr, dst_type: addr_type,
3525 sec_level, HCI_LE_CONN_TIMEOUT,
3526 conn_reason: CONN_REASON_PAIR_DEVICE);
3527 }
3528
3529 if (IS_ERR(ptr: conn)) {
3530 int status;
3531
3532 if (PTR_ERR(ptr: conn) == -EBUSY)
3533 status = MGMT_STATUS_BUSY;
3534 else if (PTR_ERR(ptr: conn) == -EOPNOTSUPP)
3535 status = MGMT_STATUS_NOT_SUPPORTED;
3536 else if (PTR_ERR(ptr: conn) == -ECONNREFUSED)
3537 status = MGMT_STATUS_REJECTED;
3538 else
3539 status = MGMT_STATUS_CONNECT_FAILED;
3540
3541 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3542 status, rp: &rp, rp_len: sizeof(rp));
3543 goto unlock;
3544 }
3545
3546 if (conn->connect_cfm_cb) {
3547 hci_conn_drop(conn);
3548 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3549 MGMT_STATUS_BUSY, rp: &rp, rp_len: sizeof(rp));
3550 goto unlock;
3551 }
3552
3553 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3554 if (!cmd) {
3555 err = -ENOMEM;
3556 hci_conn_drop(conn);
3557 goto unlock;
3558 }
3559
3560 cmd->cmd_complete = pairing_complete;
3561
3562 /* For LE, just connecting isn't a proof that the pairing finished */
3563 if (cp->addr.type == BDADDR_BREDR) {
3564 conn->connect_cfm_cb = pairing_complete_cb;
3565 conn->security_cfm_cb = pairing_complete_cb;
3566 conn->disconn_cfm_cb = pairing_complete_cb;
3567 } else {
3568 conn->connect_cfm_cb = le_pairing_complete_cb;
3569 conn->security_cfm_cb = le_pairing_complete_cb;
3570 conn->disconn_cfm_cb = le_pairing_complete_cb;
3571 }
3572
3573 conn->io_capability = cp->io_cap;
3574 cmd->user_data = hci_conn_get(conn);
3575
3576 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3577 hci_conn_security(conn, sec_level, auth_type, initiator: true)) {
3578 cmd->cmd_complete(cmd, 0);
3579 mgmt_pending_remove(cmd);
3580 }
3581
3582 err = 0;
3583
3584unlock:
3585 hci_dev_unlock(hdev);
3586 return err;
3587}
3588
3589static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3590 u16 len)
3591{
3592 struct mgmt_addr_info *addr = data;
3593 struct mgmt_pending_cmd *cmd;
3594 struct hci_conn *conn;
3595 int err;
3596
3597 bt_dev_dbg(hdev, "sock %p", sk);
3598
3599 hci_dev_lock(hdev);
3600
3601 if (!hdev_is_powered(hdev)) {
3602 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3603 MGMT_STATUS_NOT_POWERED);
3604 goto unlock;
3605 }
3606
3607 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3608 if (!cmd) {
3609 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3610 MGMT_STATUS_INVALID_PARAMS);
3611 goto unlock;
3612 }
3613
3614 conn = cmd->user_data;
3615
3616 if (bacmp(ba1: &addr->bdaddr, ba2: &conn->dst) != 0) {
3617 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3618 MGMT_STATUS_INVALID_PARAMS);
3619 goto unlock;
3620 }
3621
3622 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3623 mgmt_pending_remove(cmd);
3624
3625 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, status: 0,
3626 rp: addr, rp_len: sizeof(*addr));
3627
3628 /* Since user doesn't want to proceed with the connection, abort any
3629 * ongoing pairing and then terminate the link if it was created
3630 * because of the pair device action.
3631 */
3632 if (addr->type == BDADDR_BREDR)
3633 hci_remove_link_key(hdev, bdaddr: &addr->bdaddr);
3634 else
3635 smp_cancel_and_remove_pairing(hdev, bdaddr: &addr->bdaddr,
3636 addr_type: le_addr_type(mgmt_addr_type: addr->type));
3637
3638 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3639 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3640
3641unlock:
3642 hci_dev_unlock(hdev);
3643 return err;
3644}
3645
3646static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3647 struct mgmt_addr_info *addr, u16 mgmt_op,
3648 u16 hci_op, __le32 passkey)
3649{
3650 struct mgmt_pending_cmd *cmd;
3651 struct hci_conn *conn;
3652 int err;
3653
3654 hci_dev_lock(hdev);
3655
3656 if (!hdev_is_powered(hdev)) {
3657 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3658 MGMT_STATUS_NOT_POWERED, rp: addr,
3659 rp_len: sizeof(*addr));
3660 goto done;
3661 }
3662
3663 if (addr->type == BDADDR_BREDR)
3664 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &addr->bdaddr);
3665 else
3666 conn = hci_conn_hash_lookup_le(hdev, ba: &addr->bdaddr,
3667 ba_type: le_addr_type(mgmt_addr_type: addr->type));
3668
3669 if (!conn) {
3670 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3671 MGMT_STATUS_NOT_CONNECTED, rp: addr,
3672 rp_len: sizeof(*addr));
3673 goto done;
3674 }
3675
3676 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3677 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3678 if (!err)
3679 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3680 MGMT_STATUS_SUCCESS, rp: addr,
3681 rp_len: sizeof(*addr));
3682 else
3683 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3684 MGMT_STATUS_FAILED, rp: addr,
3685 rp_len: sizeof(*addr));
3686
3687 goto done;
3688 }
3689
3690 cmd = mgmt_pending_add(sk, opcode: mgmt_op, hdev, data: addr, len: sizeof(*addr));
3691 if (!cmd) {
3692 err = -ENOMEM;
3693 goto done;
3694 }
3695
3696 cmd->cmd_complete = addr_cmd_complete;
3697
3698 /* Continue with pairing via HCI */
3699 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3700 struct hci_cp_user_passkey_reply cp;
3701
3702 bacpy(dst: &cp.bdaddr, src: &addr->bdaddr);
3703 cp.passkey = passkey;
3704 err = hci_send_cmd(hdev, opcode: hci_op, plen: sizeof(cp), param: &cp);
3705 } else
3706 err = hci_send_cmd(hdev, opcode: hci_op, plen: sizeof(addr->bdaddr),
3707 param: &addr->bdaddr);
3708
3709 if (err < 0)
3710 mgmt_pending_remove(cmd);
3711
3712done:
3713 hci_dev_unlock(hdev);
3714 return err;
3715}
3716
3717static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3718 void *data, u16 len)
3719{
3720 struct mgmt_cp_pin_code_neg_reply *cp = data;
3721
3722 bt_dev_dbg(hdev, "sock %p", sk);
3723
3724 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3725 MGMT_OP_PIN_CODE_NEG_REPLY,
3726 HCI_OP_PIN_CODE_NEG_REPLY, passkey: 0);
3727}
3728
3729static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3730 u16 len)
3731{
3732 struct mgmt_cp_user_confirm_reply *cp = data;
3733
3734 bt_dev_dbg(hdev, "sock %p", sk);
3735
3736 if (len != sizeof(*cp))
3737 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3738 MGMT_STATUS_INVALID_PARAMS);
3739
3740 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3741 MGMT_OP_USER_CONFIRM_REPLY,
3742 HCI_OP_USER_CONFIRM_REPLY, passkey: 0);
3743}
3744
3745static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3746 void *data, u16 len)
3747{
3748 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3749
3750 bt_dev_dbg(hdev, "sock %p", sk);
3751
3752 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3753 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3754 HCI_OP_USER_CONFIRM_NEG_REPLY, passkey: 0);
3755}
3756
3757static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3758 u16 len)
3759{
3760 struct mgmt_cp_user_passkey_reply *cp = data;
3761
3762 bt_dev_dbg(hdev, "sock %p", sk);
3763
3764 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3765 MGMT_OP_USER_PASSKEY_REPLY,
3766 HCI_OP_USER_PASSKEY_REPLY, passkey: cp->passkey);
3767}
3768
3769static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3770 void *data, u16 len)
3771{
3772 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3773
3774 bt_dev_dbg(hdev, "sock %p", sk);
3775
3776 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3777 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3778 HCI_OP_USER_PASSKEY_NEG_REPLY, passkey: 0);
3779}
3780
3781static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3782{
3783 struct adv_info *adv_instance;
3784
3785 adv_instance = hci_find_adv_instance(hdev, instance: hdev->cur_adv_instance);
3786 if (!adv_instance)
3787 return 0;
3788
3789 /* stop if current instance doesn't need to be changed */
3790 if (!(adv_instance->flags & flags))
3791 return 0;
3792
3793 cancel_adv_timeout(hdev);
3794
3795 adv_instance = hci_get_next_instance(hdev, instance: adv_instance->instance);
3796 if (!adv_instance)
3797 return 0;
3798
3799 hci_schedule_adv_instance_sync(hdev, instance: adv_instance->instance, force: true);
3800
3801 return 0;
3802}
3803
3804static int name_changed_sync(struct hci_dev *hdev, void *data)
3805{
3806 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3807}
3808
3809static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3810{
3811 struct mgmt_pending_cmd *cmd = data;
3812 struct mgmt_cp_set_local_name *cp = cmd->param;
3813 u8 status = mgmt_status(err);
3814
3815 bt_dev_dbg(hdev, "err %d", err);
3816
3817 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3818 return;
3819
3820 if (status) {
3821 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME,
3822 status);
3823 } else {
3824 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3825 rp: cp, rp_len: sizeof(*cp));
3826
3827 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3828 hci_cmd_sync_queue(hdev, func: name_changed_sync, NULL, NULL);
3829 }
3830
3831 mgmt_pending_remove(cmd);
3832}
3833
3834static int set_name_sync(struct hci_dev *hdev, void *data)
3835{
3836 if (lmp_bredr_capable(hdev)) {
3837 hci_update_name_sync(hdev);
3838 hci_update_eir_sync(hdev);
3839 }
3840
3841 /* The name is stored in the scan response data and so
3842 * no need to update the advertising data here.
3843 */
3844 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3845 hci_update_scan_rsp_data_sync(hdev, instance: hdev->cur_adv_instance);
3846
3847 return 0;
3848}
3849
3850static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3851 u16 len)
3852{
3853 struct mgmt_cp_set_local_name *cp = data;
3854 struct mgmt_pending_cmd *cmd;
3855 int err;
3856
3857 bt_dev_dbg(hdev, "sock %p", sk);
3858
3859 hci_dev_lock(hdev);
3860
3861 /* If the old values are the same as the new ones just return a
3862 * direct command complete event.
3863 */
3864 if (!memcmp(p: hdev->dev_name, q: cp->name, size: sizeof(hdev->dev_name)) &&
3865 !memcmp(p: hdev->short_name, q: cp->short_name,
3866 size: sizeof(hdev->short_name))) {
3867 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3868 rp: data, rp_len: len);
3869 goto failed;
3870 }
3871
3872 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3873
3874 if (!hdev_is_powered(hdev)) {
3875 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3876
3877 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3878 rp: data, rp_len: len);
3879 if (err < 0)
3880 goto failed;
3881
3882 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3883 len, flag: HCI_MGMT_LOCAL_NAME_EVENTS, skip_sk: sk);
3884 ext_info_changed(hdev, skip: sk);
3885
3886 goto failed;
3887 }
3888
3889 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3890 if (!cmd)
3891 err = -ENOMEM;
3892 else
3893 err = hci_cmd_sync_queue(hdev, func: set_name_sync, data: cmd,
3894 destroy: set_name_complete);
3895
3896 if (err < 0) {
3897 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME,
3898 MGMT_STATUS_FAILED);
3899
3900 if (cmd)
3901 mgmt_pending_remove(cmd);
3902
3903 goto failed;
3904 }
3905
3906 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3907
3908failed:
3909 hci_dev_unlock(hdev);
3910 return err;
3911}
3912
3913static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3914{
3915 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3916}
3917
3918static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3919 u16 len)
3920{
3921 struct mgmt_cp_set_appearance *cp = data;
3922 u16 appearance;
3923 int err;
3924
3925 bt_dev_dbg(hdev, "sock %p", sk);
3926
3927 if (!lmp_le_capable(hdev))
3928 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_APPEARANCE,
3929 MGMT_STATUS_NOT_SUPPORTED);
3930
3931 appearance = le16_to_cpu(cp->appearance);
3932
3933 hci_dev_lock(hdev);
3934
3935 if (hdev->appearance != appearance) {
3936 hdev->appearance = appearance;
3937
3938 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3939 hci_cmd_sync_queue(hdev, func: appearance_changed_sync, NULL,
3940 NULL);
3941
3942 ext_info_changed(hdev, skip: sk);
3943 }
3944
3945 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_APPEARANCE, status: 0, NULL,
3946 rp_len: 0);
3947
3948 hci_dev_unlock(hdev);
3949
3950 return err;
3951}
3952
3953static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3954 void *data, u16 len)
3955{
3956 struct mgmt_rp_get_phy_configuration rp;
3957
3958 bt_dev_dbg(hdev, "sock %p", sk);
3959
3960 hci_dev_lock(hdev);
3961
3962 memset(&rp, 0, sizeof(rp));
3963
3964 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3965 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3966 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3967
3968 hci_dev_unlock(hdev);
3969
3970 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, status: 0,
3971 rp: &rp, rp_len: sizeof(rp));
3972}
3973
3974int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3975{
3976 struct mgmt_ev_phy_configuration_changed ev;
3977
3978 memset(&ev, 0, sizeof(ev));
3979
3980 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3981
3982 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, data: &ev,
3983 len: sizeof(ev), skip_sk: skip);
3984}
3985
3986static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3987{
3988 struct mgmt_pending_cmd *cmd = data;
3989 struct sk_buff *skb = cmd->skb;
3990 u8 status = mgmt_status(err);
3991
3992 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3993 return;
3994
3995 if (!status) {
3996 if (!skb)
3997 status = MGMT_STATUS_FAILED;
3998 else if (IS_ERR(ptr: skb))
3999 status = mgmt_status(err: PTR_ERR(ptr: skb));
4000 else
4001 status = mgmt_status(err: skb->data[0]);
4002 }
4003
4004 bt_dev_dbg(hdev, "status %d", status);
4005
4006 if (status) {
4007 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
4008 MGMT_OP_SET_PHY_CONFIGURATION, status);
4009 } else {
4010 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, status: 0,
4012 NULL, rp_len: 0);
4013
4014 mgmt_phy_configuration_changed(hdev, skip: cmd->sk);
4015 }
4016
4017 if (skb && !IS_ERR(ptr: skb))
4018 kfree_skb(skb);
4019
4020 mgmt_pending_remove(cmd);
4021}
4022
4023static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4024{
4025 struct mgmt_pending_cmd *cmd = data;
4026 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4027 struct hci_cp_le_set_default_phy cp_phy;
4028 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4029
4030 memset(&cp_phy, 0, sizeof(cp_phy));
4031
4032 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4033 cp_phy.all_phys |= 0x01;
4034
4035 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4036 cp_phy.all_phys |= 0x02;
4037
4038 if (selected_phys & MGMT_PHY_LE_1M_TX)
4039 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4040
4041 if (selected_phys & MGMT_PHY_LE_2M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4043
4044 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4046
4047 if (selected_phys & MGMT_PHY_LE_1M_RX)
4048 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4049
4050 if (selected_phys & MGMT_PHY_LE_2M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4052
4053 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4055
4056 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4057 plen: sizeof(cp_phy), param: &cp_phy, HCI_CMD_TIMEOUT);
4058
4059 return 0;
4060}
4061
4062static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4063 void *data, u16 len)
4064{
4065 struct mgmt_cp_set_phy_configuration *cp = data;
4066 struct mgmt_pending_cmd *cmd;
4067 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4068 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4069 bool changed = false;
4070 int err;
4071
4072 bt_dev_dbg(hdev, "sock %p", sk);
4073
4074 configurable_phys = get_configurable_phys(hdev);
4075 supported_phys = get_supported_phys(hdev);
4076 selected_phys = __le32_to_cpu(cp->selected_phys);
4077
4078 if (selected_phys & ~supported_phys)
4079 return mgmt_cmd_status(sk, index: hdev->id,
4080 MGMT_OP_SET_PHY_CONFIGURATION,
4081 MGMT_STATUS_INVALID_PARAMS);
4082
4083 unconfigure_phys = supported_phys & ~configurable_phys;
4084
4085 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4086 return mgmt_cmd_status(sk, index: hdev->id,
4087 MGMT_OP_SET_PHY_CONFIGURATION,
4088 MGMT_STATUS_INVALID_PARAMS);
4089
4090 if (selected_phys == get_selected_phys(hdev))
4091 return mgmt_cmd_complete(sk, index: hdev->id,
4092 MGMT_OP_SET_PHY_CONFIGURATION,
4093 status: 0, NULL, rp_len: 0);
4094
4095 hci_dev_lock(hdev);
4096
4097 if (!hdev_is_powered(hdev)) {
4098 err = mgmt_cmd_status(sk, index: hdev->id,
4099 MGMT_OP_SET_PHY_CONFIGURATION,
4100 MGMT_STATUS_REJECTED);
4101 goto unlock;
4102 }
4103
4104 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4105 err = mgmt_cmd_status(sk, index: hdev->id,
4106 MGMT_OP_SET_PHY_CONFIGURATION,
4107 MGMT_STATUS_BUSY);
4108 goto unlock;
4109 }
4110
4111 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4112 pkt_type |= (HCI_DH3 | HCI_DM3);
4113 else
4114 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4115
4116 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4117 pkt_type |= (HCI_DH5 | HCI_DM5);
4118 else
4119 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4120
4121 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4122 pkt_type &= ~HCI_2DH1;
4123 else
4124 pkt_type |= HCI_2DH1;
4125
4126 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4127 pkt_type &= ~HCI_2DH3;
4128 else
4129 pkt_type |= HCI_2DH3;
4130
4131 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4132 pkt_type &= ~HCI_2DH5;
4133 else
4134 pkt_type |= HCI_2DH5;
4135
4136 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4137 pkt_type &= ~HCI_3DH1;
4138 else
4139 pkt_type |= HCI_3DH1;
4140
4141 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4142 pkt_type &= ~HCI_3DH3;
4143 else
4144 pkt_type |= HCI_3DH3;
4145
4146 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4147 pkt_type &= ~HCI_3DH5;
4148 else
4149 pkt_type |= HCI_3DH5;
4150
4151 if (pkt_type != hdev->pkt_type) {
4152 hdev->pkt_type = pkt_type;
4153 changed = true;
4154 }
4155
4156 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4157 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4158 if (changed)
4159 mgmt_phy_configuration_changed(hdev, skip: sk);
4160
4161 err = mgmt_cmd_complete(sk, index: hdev->id,
4162 MGMT_OP_SET_PHY_CONFIGURATION,
4163 status: 0, NULL, rp_len: 0);
4164
4165 goto unlock;
4166 }
4167
4168 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4169 len);
4170 if (!cmd)
4171 err = -ENOMEM;
4172 else
4173 err = hci_cmd_sync_queue(hdev, func: set_default_phy_sync, data: cmd,
4174 destroy: set_default_phy_complete);
4175
4176 if (err < 0) {
4177 err = mgmt_cmd_status(sk, index: hdev->id,
4178 MGMT_OP_SET_PHY_CONFIGURATION,
4179 MGMT_STATUS_FAILED);
4180
4181 if (cmd)
4182 mgmt_pending_remove(cmd);
4183 }
4184
4185unlock:
4186 hci_dev_unlock(hdev);
4187
4188 return err;
4189}
4190
4191static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4192 u16 len)
4193{
4194 int err = MGMT_STATUS_SUCCESS;
4195 struct mgmt_cp_set_blocked_keys *keys = data;
4196 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4197 sizeof(struct mgmt_blocked_key_info));
4198 u16 key_count, expected_len;
4199 int i;
4200
4201 bt_dev_dbg(hdev, "sock %p", sk);
4202
4203 key_count = __le16_to_cpu(keys->key_count);
4204 if (key_count > max_key_count) {
4205 bt_dev_err(hdev, "too big key_count value %u", key_count);
4206 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4207 MGMT_STATUS_INVALID_PARAMS);
4208 }
4209
4210 expected_len = struct_size(keys, keys, key_count);
4211 if (expected_len != len) {
4212 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4213 expected_len, len);
4214 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4215 MGMT_STATUS_INVALID_PARAMS);
4216 }
4217
4218 hci_dev_lock(hdev);
4219
4220 hci_blocked_keys_clear(hdev);
4221
4222 for (i = 0; i < key_count; ++i) {
4223 struct blocked_key *b = kzalloc(size: sizeof(*b), GFP_KERNEL);
4224
4225 if (!b) {
4226 err = MGMT_STATUS_NO_RESOURCES;
4227 break;
4228 }
4229
4230 b->type = keys->keys[i].type;
4231 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4232 list_add_rcu(new: &b->list, head: &hdev->blocked_keys);
4233 }
4234 hci_dev_unlock(hdev);
4235
4236 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4237 status: err, NULL, rp_len: 0);
4238}
4239
4240static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4242{
4243 struct mgmt_mode *cp = data;
4244 int err;
4245 bool changed = false;
4246
4247 bt_dev_dbg(hdev, "sock %p", sk);
4248
4249 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4250 return mgmt_cmd_status(sk, index: hdev->id,
4251 MGMT_OP_SET_WIDEBAND_SPEECH,
4252 MGMT_STATUS_NOT_SUPPORTED);
4253
4254 if (cp->val != 0x00 && cp->val != 0x01)
4255 return mgmt_cmd_status(sk, index: hdev->id,
4256 MGMT_OP_SET_WIDEBAND_SPEECH,
4257 MGMT_STATUS_INVALID_PARAMS);
4258
4259 hci_dev_lock(hdev);
4260
4261 if (hdev_is_powered(hdev) &&
4262 !!cp->val != hci_dev_test_flag(hdev,
4263 HCI_WIDEBAND_SPEECH_ENABLED)) {
4264 err = mgmt_cmd_status(sk, index: hdev->id,
4265 MGMT_OP_SET_WIDEBAND_SPEECH,
4266 MGMT_STATUS_REJECTED);
4267 goto unlock;
4268 }
4269
4270 if (cp->val)
4271 changed = !hci_dev_test_and_set_flag(hdev,
4272 HCI_WIDEBAND_SPEECH_ENABLED);
4273 else
4274 changed = hci_dev_test_and_clear_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4276
4277 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4278 if (err < 0)
4279 goto unlock;
4280
4281 if (changed)
4282 err = new_settings(hdev, skip: sk);
4283
4284unlock:
4285 hci_dev_unlock(hdev);
4286 return err;
4287}
4288
4289static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4290 void *data, u16 data_len)
4291{
4292 char buf[20];
4293 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4294 u16 cap_len = 0;
4295 u8 flags = 0;
4296 u8 tx_power_range[2];
4297
4298 bt_dev_dbg(hdev, "sock %p", sk);
4299
4300 memset(&buf, 0, sizeof(buf));
4301
4302 hci_dev_lock(hdev);
4303
4304 /* When the Read Simple Pairing Options command is supported, then
4305 * the remote public key validation is supported.
4306 *
4307 * Alternatively, when Microsoft extensions are available, they can
4308 * indicate support for public key validation as well.
4309 */
4310 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4311 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4312
4313 flags |= 0x02; /* Remote public key validation (LE) */
4314
4315 /* When the Read Encryption Key Size command is supported, then the
4316 * encryption key size is enforced.
4317 */
4318 if (hdev->commands[20] & 0x10)
4319 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4320
4321 flags |= 0x08; /* Encryption key size enforcement (LE) */
4322
4323 cap_len = eir_append_data(eir: rp->cap, eir_len: cap_len, MGMT_CAP_SEC_FLAGS,
4324 data: &flags, data_len: 1);
4325
4326 /* When the Read Simple Pairing Options command is supported, then
4327 * also max encryption key size information is provided.
4328 */
4329 if (hdev->commands[41] & 0x08)
4330 cap_len = eir_append_le16(eir: rp->cap, eir_len: cap_len,
4331 MGMT_CAP_MAX_ENC_KEY_SIZE,
4332 data: hdev->max_enc_key_size);
4333
4334 cap_len = eir_append_le16(eir: rp->cap, eir_len: cap_len,
4335 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4336 SMP_MAX_ENC_KEY_SIZE);
4337
4338 /* Append the min/max LE tx power parameters if we were able to fetch
4339 * it from the controller
4340 */
4341 if (hdev->commands[38] & 0x80) {
4342 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4343 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4344 cap_len = eir_append_data(eir: rp->cap, eir_len: cap_len, MGMT_CAP_LE_TX_PWR,
4345 data: tx_power_range, data_len: 2);
4346 }
4347
4348 rp->cap_len = cpu_to_le16(cap_len);
4349
4350 hci_dev_unlock(hdev);
4351
4352 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_CONTROLLER_CAP, status: 0,
4353 rp, rp_len: sizeof(*rp) + cap_len);
4354}
4355
4356#ifdef CONFIG_BT_FEATURE_DEBUG
4357/* d4992530-b9ec-469f-ab01-6c481c47da1c */
4358static const u8 debug_uuid[16] = {
4359 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4360 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4361};
4362#endif
4363
4364/* 330859bc-7506-492d-9370-9a6f0614037f */
4365static const u8 quality_report_uuid[16] = {
4366 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4367 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4368};
4369
4370/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4371static const u8 offload_codecs_uuid[16] = {
4372 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4373 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4374};
4375
4376/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4377static const u8 le_simultaneous_roles_uuid[16] = {
4378 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4379 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4380};
4381
4382/* 15c0a148-c273-11ea-b3de-0242ac130004 */
4383static const u8 rpa_resolution_uuid[16] = {
4384 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4385 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4386};
4387
4388/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4389static const u8 iso_socket_uuid[16] = {
4390 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4391 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4392};
4393
4394/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4395static const u8 mgmt_mesh_uuid[16] = {
4396 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4397 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4398};
4399
4400static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4401 void *data, u16 data_len)
4402{
4403 struct mgmt_rp_read_exp_features_info *rp;
4404 size_t len;
4405 u16 idx = 0;
4406 u32 flags;
4407 int status;
4408
4409 bt_dev_dbg(hdev, "sock %p", sk);
4410
4411 /* Enough space for 7 features */
4412 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4413 rp = kzalloc(size: len, GFP_KERNEL);
4414 if (!rp)
4415 return -ENOMEM;
4416
4417#ifdef CONFIG_BT_FEATURE_DEBUG
4418 if (!hdev) {
4419 flags = bt_dbg_get() ? BIT(0) : 0;
4420
4421 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4422 rp->features[idx].flags = cpu_to_le32(flags);
4423 idx++;
4424 }
4425#endif
4426
4427 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4428 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4429 flags = BIT(0);
4430 else
4431 flags = 0;
4432
4433 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4434 rp->features[idx].flags = cpu_to_le32(flags);
4435 idx++;
4436 }
4437
4438 if (hdev && ll_privacy_capable(hdev)) {
4439 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4440 flags = BIT(0) | BIT(1);
4441 else
4442 flags = BIT(1);
4443
4444 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4445 rp->features[idx].flags = cpu_to_le32(flags);
4446 idx++;
4447 }
4448
4449 if (hdev && (aosp_has_quality_report(hdev) ||
4450 hdev->set_quality_report)) {
4451 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4452 flags = BIT(0);
4453 else
4454 flags = 0;
4455
4456 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4457 rp->features[idx].flags = cpu_to_le32(flags);
4458 idx++;
4459 }
4460
4461 if (hdev && hdev->get_data_path_id) {
4462 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4463 flags = BIT(0);
4464 else
4465 flags = 0;
4466
4467 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4468 rp->features[idx].flags = cpu_to_le32(flags);
4469 idx++;
4470 }
4471
4472 if (IS_ENABLED(CONFIG_BT_LE)) {
4473 flags = iso_enabled() ? BIT(0) : 0;
4474 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4475 rp->features[idx].flags = cpu_to_le32(flags);
4476 idx++;
4477 }
4478
4479 if (hdev && lmp_le_capable(hdev)) {
4480 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4481 flags = BIT(0);
4482 else
4483 flags = 0;
4484
4485 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4486 rp->features[idx].flags = cpu_to_le32(flags);
4487 idx++;
4488 }
4489
4490 rp->feature_count = cpu_to_le16(idx);
4491
4492 /* After reading the experimental features information, enable
4493 * the events to update client on any future change.
4494 */
4495 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4496
4497 status = mgmt_cmd_complete(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
4498 MGMT_OP_READ_EXP_FEATURES_INFO,
4499 status: 0, rp, rp_len: sizeof(*rp) + (20 * idx));
4500
4501 kfree(objp: rp);
4502 return status;
4503}
4504
4505static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4506 struct sock *skip)
4507{
4508 struct mgmt_ev_exp_feature_changed ev;
4509
4510 memset(&ev, 0, sizeof(ev));
4511 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4512 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4513
4514 // Do we need to be atomic with the conn_flags?
4515 if (enabled && privacy_mode_capable(hdev))
4516 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4517 else
4518 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4519
4520 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4521 data: &ev, len: sizeof(ev),
4522 flag: HCI_MGMT_EXP_FEATURE_EVENTS, skip_sk: skip);
4523
4524}
4525
4526static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4527 bool enabled, struct sock *skip)
4528{
4529 struct mgmt_ev_exp_feature_changed ev;
4530
4531 memset(&ev, 0, sizeof(ev));
4532 memcpy(ev.uuid, uuid, 16);
4533 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4534
4535 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4536 data: &ev, len: sizeof(ev),
4537 flag: HCI_MGMT_EXP_FEATURE_EVENTS, skip_sk: skip);
4538}
4539
4540#define EXP_FEAT(_uuid, _set_func) \
4541{ \
4542 .uuid = _uuid, \
4543 .set_func = _set_func, \
4544}
4545
4546/* The zero key uuid is special. Multiple exp features are set through it. */
4547static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4548 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4549{
4550 struct mgmt_rp_set_exp_feature rp;
4551
4552 memset(rp.uuid, 0, 16);
4553 rp.flags = cpu_to_le32(0);
4554
4555#ifdef CONFIG_BT_FEATURE_DEBUG
4556 if (!hdev) {
4557 bool changed = bt_dbg_get();
4558
4559 bt_dbg_set(false);
4560
4561 if (changed)
4562 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4563 }
4564#endif
4565
4566 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4567 bool changed;
4568
4569 changed = hci_dev_test_and_clear_flag(hdev,
4570 HCI_ENABLE_LL_PRIVACY);
4571 if (changed)
4572 exp_feature_changed(hdev, uuid: rpa_resolution_uuid, enabled: false,
4573 skip: sk);
4574 }
4575
4576 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4577
4578 return mgmt_cmd_complete(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE, status: 0,
4580 rp: &rp, rp_len: sizeof(rp));
4581}
4582
4583#ifdef CONFIG_BT_FEATURE_DEBUG
4584static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4585 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4586{
4587 struct mgmt_rp_set_exp_feature rp;
4588
4589 bool val, changed;
4590 int err;
4591
4592 /* Command requires to use the non-controller index */
4593 if (hdev)
4594 return mgmt_cmd_status(sk, hdev->id,
4595 MGMT_OP_SET_EXP_FEATURE,
4596 MGMT_STATUS_INVALID_INDEX);
4597
4598 /* Parameters are limited to a single octet */
4599 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4600 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4601 MGMT_OP_SET_EXP_FEATURE,
4602 MGMT_STATUS_INVALID_PARAMS);
4603
4604 /* Only boolean on/off is supported */
4605 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4606 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4607 MGMT_OP_SET_EXP_FEATURE,
4608 MGMT_STATUS_INVALID_PARAMS);
4609
4610 val = !!cp->param[0];
4611 changed = val ? !bt_dbg_get() : bt_dbg_get();
4612 bt_dbg_set(val);
4613
4614 memcpy(rp.uuid, debug_uuid, 16);
4615 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4616
4617 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4618
4619 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4620 MGMT_OP_SET_EXP_FEATURE, 0,
4621 &rp, sizeof(rp));
4622
4623 if (changed)
4624 exp_feature_changed(hdev, debug_uuid, val, sk);
4625
4626 return err;
4627}
4628#endif
4629
4630static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4631 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4632{
4633 struct mgmt_rp_set_exp_feature rp;
4634 bool val, changed;
4635 int err;
4636
4637 /* Command requires to use the controller index */
4638 if (!hdev)
4639 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4640 MGMT_OP_SET_EXP_FEATURE,
4641 MGMT_STATUS_INVALID_INDEX);
4642
4643 /* Parameters are limited to a single octet */
4644 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4645 return mgmt_cmd_status(sk, index: hdev->id,
4646 MGMT_OP_SET_EXP_FEATURE,
4647 MGMT_STATUS_INVALID_PARAMS);
4648
4649 /* Only boolean on/off is supported */
4650 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4651 return mgmt_cmd_status(sk, index: hdev->id,
4652 MGMT_OP_SET_EXP_FEATURE,
4653 MGMT_STATUS_INVALID_PARAMS);
4654
4655 val = !!cp->param[0];
4656
4657 if (val) {
4658 changed = !hci_dev_test_and_set_flag(hdev,
4659 HCI_MESH_EXPERIMENTAL);
4660 } else {
4661 hci_dev_clear_flag(hdev, HCI_MESH);
4662 changed = hci_dev_test_and_clear_flag(hdev,
4663 HCI_MESH_EXPERIMENTAL);
4664 }
4665
4666 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4667 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4668
4669 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4670
4671 err = mgmt_cmd_complete(sk, index: hdev->id,
4672 MGMT_OP_SET_EXP_FEATURE, status: 0,
4673 rp: &rp, rp_len: sizeof(rp));
4674
4675 if (changed)
4676 exp_feature_changed(hdev, uuid: mgmt_mesh_uuid, enabled: val, skip: sk);
4677
4678 return err;
4679}
4680
4681static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4682 struct mgmt_cp_set_exp_feature *cp,
4683 u16 data_len)
4684{
4685 struct mgmt_rp_set_exp_feature rp;
4686 bool val, changed;
4687 int err;
4688 u32 flags;
4689
4690 /* Command requires to use the controller index */
4691 if (!hdev)
4692 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4693 MGMT_OP_SET_EXP_FEATURE,
4694 MGMT_STATUS_INVALID_INDEX);
4695
4696 /* Changes can only be made when controller is powered down */
4697 if (hdev_is_powered(hdev))
4698 return mgmt_cmd_status(sk, index: hdev->id,
4699 MGMT_OP_SET_EXP_FEATURE,
4700 MGMT_STATUS_REJECTED);
4701
4702 /* Parameters are limited to a single octet */
4703 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4704 return mgmt_cmd_status(sk, index: hdev->id,
4705 MGMT_OP_SET_EXP_FEATURE,
4706 MGMT_STATUS_INVALID_PARAMS);
4707
4708 /* Only boolean on/off is supported */
4709 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4710 return mgmt_cmd_status(sk, index: hdev->id,
4711 MGMT_OP_SET_EXP_FEATURE,
4712 MGMT_STATUS_INVALID_PARAMS);
4713
4714 val = !!cp->param[0];
4715
4716 if (val) {
4717 changed = !hci_dev_test_and_set_flag(hdev,
4718 HCI_ENABLE_LL_PRIVACY);
4719 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4720
4721 /* Enable LL privacy + supported settings changed */
4722 flags = BIT(0) | BIT(1);
4723 } else {
4724 changed = hci_dev_test_and_clear_flag(hdev,
4725 HCI_ENABLE_LL_PRIVACY);
4726
4727 /* Disable LL privacy + supported settings changed */
4728 flags = BIT(1);
4729 }
4730
4731 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4732 rp.flags = cpu_to_le32(flags);
4733
4734 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4735
4736 err = mgmt_cmd_complete(sk, index: hdev->id,
4737 MGMT_OP_SET_EXP_FEATURE, status: 0,
4738 rp: &rp, rp_len: sizeof(rp));
4739
4740 if (changed)
4741 exp_ll_privacy_feature_changed(enabled: val, hdev, skip: sk);
4742
4743 return err;
4744}
4745
4746static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4747 struct mgmt_cp_set_exp_feature *cp,
4748 u16 data_len)
4749{
4750 struct mgmt_rp_set_exp_feature rp;
4751 bool val, changed;
4752 int err;
4753
4754 /* Command requires to use a valid controller index */
4755 if (!hdev)
4756 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4757 MGMT_OP_SET_EXP_FEATURE,
4758 MGMT_STATUS_INVALID_INDEX);
4759
4760 /* Parameters are limited to a single octet */
4761 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4762 return mgmt_cmd_status(sk, index: hdev->id,
4763 MGMT_OP_SET_EXP_FEATURE,
4764 MGMT_STATUS_INVALID_PARAMS);
4765
4766 /* Only boolean on/off is supported */
4767 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4768 return mgmt_cmd_status(sk, index: hdev->id,
4769 MGMT_OP_SET_EXP_FEATURE,
4770 MGMT_STATUS_INVALID_PARAMS);
4771
4772 hci_req_sync_lock(hdev);
4773
4774 val = !!cp->param[0];
4775 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4776
4777 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4778 err = mgmt_cmd_status(sk, index: hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_NOT_SUPPORTED);
4781 goto unlock_quality_report;
4782 }
4783
4784 if (changed) {
4785 if (hdev->set_quality_report)
4786 err = hdev->set_quality_report(hdev, val);
4787 else
4788 err = aosp_set_quality_report(hdev, enable: val);
4789
4790 if (err) {
4791 err = mgmt_cmd_status(sk, index: hdev->id,
4792 MGMT_OP_SET_EXP_FEATURE,
4793 MGMT_STATUS_FAILED);
4794 goto unlock_quality_report;
4795 }
4796
4797 if (val)
4798 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4799 else
4800 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4801 }
4802
4803 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4804
4805 memcpy(rp.uuid, quality_report_uuid, 16);
4806 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4807 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4808
4809 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_EXP_FEATURE, status: 0,
4810 rp: &rp, rp_len: sizeof(rp));
4811
4812 if (changed)
4813 exp_feature_changed(hdev, uuid: quality_report_uuid, enabled: val, skip: sk);
4814
4815unlock_quality_report:
4816 hci_req_sync_unlock(hdev);
4817 return err;
4818}
4819
4820static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4821 struct mgmt_cp_set_exp_feature *cp,
4822 u16 data_len)
4823{
4824 bool val, changed;
4825 int err;
4826 struct mgmt_rp_set_exp_feature rp;
4827
4828 /* Command requires to use a valid controller index */
4829 if (!hdev)
4830 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4831 MGMT_OP_SET_EXP_FEATURE,
4832 MGMT_STATUS_INVALID_INDEX);
4833
4834 /* Parameters are limited to a single octet */
4835 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4836 return mgmt_cmd_status(sk, index: hdev->id,
4837 MGMT_OP_SET_EXP_FEATURE,
4838 MGMT_STATUS_INVALID_PARAMS);
4839
4840 /* Only boolean on/off is supported */
4841 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4842 return mgmt_cmd_status(sk, index: hdev->id,
4843 MGMT_OP_SET_EXP_FEATURE,
4844 MGMT_STATUS_INVALID_PARAMS);
4845
4846 val = !!cp->param[0];
4847 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4848
4849 if (!hdev->get_data_path_id) {
4850 return mgmt_cmd_status(sk, index: hdev->id,
4851 MGMT_OP_SET_EXP_FEATURE,
4852 MGMT_STATUS_NOT_SUPPORTED);
4853 }
4854
4855 if (changed) {
4856 if (val)
4857 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4858 else
4859 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4860 }
4861
4862 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4863 val, changed);
4864
4865 memcpy(rp.uuid, offload_codecs_uuid, 16);
4866 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4867 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4868 err = mgmt_cmd_complete(sk, index: hdev->id,
4869 MGMT_OP_SET_EXP_FEATURE, status: 0,
4870 rp: &rp, rp_len: sizeof(rp));
4871
4872 if (changed)
4873 exp_feature_changed(hdev, uuid: offload_codecs_uuid, enabled: val, skip: sk);
4874
4875 return err;
4876}
4877
4878static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4879 struct mgmt_cp_set_exp_feature *cp,
4880 u16 data_len)
4881{
4882 bool val, changed;
4883 int err;
4884 struct mgmt_rp_set_exp_feature rp;
4885
4886 /* Command requires to use a valid controller index */
4887 if (!hdev)
4888 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4889 MGMT_OP_SET_EXP_FEATURE,
4890 MGMT_STATUS_INVALID_INDEX);
4891
4892 /* Parameters are limited to a single octet */
4893 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4894 return mgmt_cmd_status(sk, index: hdev->id,
4895 MGMT_OP_SET_EXP_FEATURE,
4896 MGMT_STATUS_INVALID_PARAMS);
4897
4898 /* Only boolean on/off is supported */
4899 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4900 return mgmt_cmd_status(sk, index: hdev->id,
4901 MGMT_OP_SET_EXP_FEATURE,
4902 MGMT_STATUS_INVALID_PARAMS);
4903
4904 val = !!cp->param[0];
4905 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4906
4907 if (!hci_dev_le_state_simultaneous(hdev)) {
4908 return mgmt_cmd_status(sk, index: hdev->id,
4909 MGMT_OP_SET_EXP_FEATURE,
4910 MGMT_STATUS_NOT_SUPPORTED);
4911 }
4912
4913 if (changed) {
4914 if (val)
4915 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4916 else
4917 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4918 }
4919
4920 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4921 val, changed);
4922
4923 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4924 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4925 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4926 err = mgmt_cmd_complete(sk, index: hdev->id,
4927 MGMT_OP_SET_EXP_FEATURE, status: 0,
4928 rp: &rp, rp_len: sizeof(rp));
4929
4930 if (changed)
4931 exp_feature_changed(hdev, uuid: le_simultaneous_roles_uuid, enabled: val, skip: sk);
4932
4933 return err;
4934}
4935
4936#ifdef CONFIG_BT_LE
4937static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4938 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4939{
4940 struct mgmt_rp_set_exp_feature rp;
4941 bool val, changed = false;
4942 int err;
4943
4944 /* Command requires to use the non-controller index */
4945 if (hdev)
4946 return mgmt_cmd_status(sk, index: hdev->id,
4947 MGMT_OP_SET_EXP_FEATURE,
4948 MGMT_STATUS_INVALID_INDEX);
4949
4950 /* Parameters are limited to a single octet */
4951 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4952 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4953 MGMT_OP_SET_EXP_FEATURE,
4954 MGMT_STATUS_INVALID_PARAMS);
4955
4956 /* Only boolean on/off is supported */
4957 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4959 MGMT_OP_SET_EXP_FEATURE,
4960 MGMT_STATUS_INVALID_PARAMS);
4961
4962 val = cp->param[0] ? true : false;
4963 if (val)
4964 err = iso_init();
4965 else
4966 err = iso_exit();
4967
4968 if (!err)
4969 changed = true;
4970
4971 memcpy(rp.uuid, iso_socket_uuid, 16);
4972 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4973
4974 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4975
4976 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4977 MGMT_OP_SET_EXP_FEATURE, status: 0,
4978 rp: &rp, rp_len: sizeof(rp));
4979
4980 if (changed)
4981 exp_feature_changed(hdev, uuid: iso_socket_uuid, enabled: val, skip: sk);
4982
4983 return err;
4984}
4985#endif
4986
4987static const struct mgmt_exp_feature {
4988 const u8 *uuid;
4989 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4990 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4991} exp_features[] = {
4992 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4993#ifdef CONFIG_BT_FEATURE_DEBUG
4994 EXP_FEAT(debug_uuid, set_debug_func),
4995#endif
4996 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4997 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4998 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4999 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5000 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5001#ifdef CONFIG_BT_LE
5002 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5003#endif
5004
5005 /* end with a null feature */
5006 EXP_FEAT(NULL, NULL)
5007};
5008
5009static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5010 void *data, u16 data_len)
5011{
5012 struct mgmt_cp_set_exp_feature *cp = data;
5013 size_t i = 0;
5014
5015 bt_dev_dbg(hdev, "sock %p", sk);
5016
5017 for (i = 0; exp_features[i].uuid; i++) {
5018 if (!memcmp(p: cp->uuid, q: exp_features[i].uuid, size: 16))
5019 return exp_features[i].set_func(sk, hdev, cp, data_len);
5020 }
5021
5022 return mgmt_cmd_status(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
5023 MGMT_OP_SET_EXP_FEATURE,
5024 MGMT_STATUS_NOT_SUPPORTED);
5025}
5026
5027static u32 get_params_flags(struct hci_dev *hdev,
5028 struct hci_conn_params *params)
5029{
5030 u32 flags = hdev->conn_flags;
5031
5032 /* Devices using RPAs can only be programmed in the acceptlist if
5033 * LL Privacy has been enable otherwise they cannot mark
5034 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5035 */
5036 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5037 hci_find_irk_by_addr(hdev, bdaddr: &params->addr, addr_type: params->addr_type))
5038 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5039
5040 return flags;
5041}
5042
5043static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5044 u16 data_len)
5045{
5046 struct mgmt_cp_get_device_flags *cp = data;
5047 struct mgmt_rp_get_device_flags rp;
5048 struct bdaddr_list_with_flags *br_params;
5049 struct hci_conn_params *params;
5050 u32 supported_flags;
5051 u32 current_flags = 0;
5052 u8 status = MGMT_STATUS_INVALID_PARAMS;
5053
5054 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5055 &cp->addr.bdaddr, cp->addr.type);
5056
5057 hci_dev_lock(hdev);
5058
5059 supported_flags = hdev->conn_flags;
5060
5061 memset(&rp, 0, sizeof(rp));
5062
5063 if (cp->addr.type == BDADDR_BREDR) {
5064 br_params = hci_bdaddr_list_lookup_with_flags(list: &hdev->accept_list,
5065 bdaddr: &cp->addr.bdaddr,
5066 type: cp->addr.type);
5067 if (!br_params)
5068 goto done;
5069
5070 current_flags = br_params->flags;
5071 } else {
5072 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
5073 addr_type: le_addr_type(mgmt_addr_type: cp->addr.type));
5074 if (!params)
5075 goto done;
5076
5077 supported_flags = get_params_flags(hdev, params);
5078 current_flags = params->flags;
5079 }
5080
5081 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
5082 rp.addr.type = cp->addr.type;
5083 rp.supported_flags = cpu_to_le32(supported_flags);
5084 rp.current_flags = cpu_to_le32(current_flags);
5085
5086 status = MGMT_STATUS_SUCCESS;
5087
5088done:
5089 hci_dev_unlock(hdev);
5090
5091 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5092 rp: &rp, rp_len: sizeof(rp));
5093}
5094
5095static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5096 bdaddr_t *bdaddr, u8 bdaddr_type,
5097 u32 supported_flags, u32 current_flags)
5098{
5099 struct mgmt_ev_device_flags_changed ev;
5100
5101 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
5102 ev.addr.type = bdaddr_type;
5103 ev.supported_flags = cpu_to_le32(supported_flags);
5104 ev.current_flags = cpu_to_le32(current_flags);
5105
5106 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
5107}
5108
5109static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5110 u16 len)
5111{
5112 struct mgmt_cp_set_device_flags *cp = data;
5113 struct bdaddr_list_with_flags *br_params;
5114 struct hci_conn_params *params;
5115 u8 status = MGMT_STATUS_INVALID_PARAMS;
5116 u32 supported_flags;
5117 u32 current_flags = __le32_to_cpu(cp->current_flags);
5118
5119 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5120 &cp->addr.bdaddr, cp->addr.type, current_flags);
5121
5122 // We should take hci_dev_lock() early, I think.. conn_flags can change
5123 supported_flags = hdev->conn_flags;
5124
5125 if ((supported_flags | current_flags) != supported_flags) {
5126 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5127 current_flags, supported_flags);
5128 goto done;
5129 }
5130
5131 hci_dev_lock(hdev);
5132
5133 if (cp->addr.type == BDADDR_BREDR) {
5134 br_params = hci_bdaddr_list_lookup_with_flags(list: &hdev->accept_list,
5135 bdaddr: &cp->addr.bdaddr,
5136 type: cp->addr.type);
5137
5138 if (br_params) {
5139 br_params->flags = current_flags;
5140 status = MGMT_STATUS_SUCCESS;
5141 } else {
5142 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5143 &cp->addr.bdaddr, cp->addr.type);
5144 }
5145
5146 goto unlock;
5147 }
5148
5149 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
5150 addr_type: le_addr_type(mgmt_addr_type: cp->addr.type));
5151 if (!params) {
5152 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5153 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5154 goto unlock;
5155 }
5156
5157 supported_flags = get_params_flags(hdev, params);
5158
5159 if ((supported_flags | current_flags) != supported_flags) {
5160 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5161 current_flags, supported_flags);
5162 goto unlock;
5163 }
5164
5165 WRITE_ONCE(params->flags, current_flags);
5166 status = MGMT_STATUS_SUCCESS;
5167
5168 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5169 * has been set.
5170 */
5171 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5172 hci_update_passive_scan(hdev);
5173
5174unlock:
5175 hci_dev_unlock(hdev);
5176
5177done:
5178 if (status == MGMT_STATUS_SUCCESS)
5179 device_flags_changed(sk, hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type,
5180 supported_flags, current_flags);
5181
5182 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5183 rp: &cp->addr, rp_len: sizeof(cp->addr));
5184}
5185
5186static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5187 u16 handle)
5188{
5189 struct mgmt_ev_adv_monitor_added ev;
5190
5191 ev.monitor_handle = cpu_to_le16(handle);
5192
5193 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
5194}
5195
5196void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5197{
5198 struct mgmt_ev_adv_monitor_removed ev;
5199 struct mgmt_pending_cmd *cmd;
5200 struct sock *sk_skip = NULL;
5201 struct mgmt_cp_remove_adv_monitor *cp;
5202
5203 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5204 if (cmd) {
5205 cp = cmd->param;
5206
5207 if (cp->monitor_handle)
5208 sk_skip = cmd->sk;
5209 }
5210
5211 ev.monitor_handle = cpu_to_le16(handle);
5212
5213 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk_skip);
5214}
5215
5216static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5217 void *data, u16 len)
5218{
5219 struct adv_monitor *monitor = NULL;
5220 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5221 int handle, err;
5222 size_t rp_size = 0;
5223 __u32 supported = 0;
5224 __u32 enabled = 0;
5225 __u16 num_handles = 0;
5226 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5227
5228 BT_DBG("request for %s", hdev->name);
5229
5230 hci_dev_lock(hdev);
5231
5232 if (msft_monitor_supported(hdev))
5233 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5234
5235 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5236 handles[num_handles++] = monitor->handle;
5237
5238 hci_dev_unlock(hdev);
5239
5240 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5241 rp = kmalloc(size: rp_size, GFP_KERNEL);
5242 if (!rp)
5243 return -ENOMEM;
5244
5245 /* All supported features are currently enabled */
5246 enabled = supported;
5247
5248 rp->supported_features = cpu_to_le32(supported);
5249 rp->enabled_features = cpu_to_le32(enabled);
5250 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5251 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5252 rp->num_handles = cpu_to_le16(num_handles);
5253 if (num_handles)
5254 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5255
5256 err = mgmt_cmd_complete(sk, index: hdev->id,
5257 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5258 MGMT_STATUS_SUCCESS, rp, rp_len: rp_size);
5259
5260 kfree(objp: rp);
5261
5262 return err;
5263}
5264
5265static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5266 void *data, int status)
5267{
5268 struct mgmt_rp_add_adv_patterns_monitor rp;
5269 struct mgmt_pending_cmd *cmd = data;
5270 struct adv_monitor *monitor = cmd->user_data;
5271
5272 hci_dev_lock(hdev);
5273
5274 rp.monitor_handle = cpu_to_le16(monitor->handle);
5275
5276 if (!status) {
5277 mgmt_adv_monitor_added(sk: cmd->sk, hdev, handle: monitor->handle);
5278 hdev->adv_monitors_cnt++;
5279 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5280 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5281 hci_update_passive_scan(hdev);
5282 }
5283
5284 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
5285 status: mgmt_status(err: status), rp: &rp, rp_len: sizeof(rp));
5286 mgmt_pending_remove(cmd);
5287
5288 hci_dev_unlock(hdev);
5289 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5290 rp.monitor_handle, status);
5291}
5292
5293static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5294{
5295 struct mgmt_pending_cmd *cmd = data;
5296 struct adv_monitor *monitor = cmd->user_data;
5297
5298 return hci_add_adv_monitor(hdev, monitor);
5299}
5300
5301static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5302 struct adv_monitor *m, u8 status,
5303 void *data, u16 len, u16 op)
5304{
5305 struct mgmt_pending_cmd *cmd;
5306 int err;
5307
5308 hci_dev_lock(hdev);
5309
5310 if (status)
5311 goto unlock;
5312
5313 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5314 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5315 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5316 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5317 status = MGMT_STATUS_BUSY;
5318 goto unlock;
5319 }
5320
5321 cmd = mgmt_pending_add(sk, opcode: op, hdev, data, len);
5322 if (!cmd) {
5323 status = MGMT_STATUS_NO_RESOURCES;
5324 goto unlock;
5325 }
5326
5327 cmd->user_data = m;
5328 err = hci_cmd_sync_queue(hdev, func: mgmt_add_adv_patterns_monitor_sync, data: cmd,
5329 destroy: mgmt_add_adv_patterns_monitor_complete);
5330 if (err) {
5331 if (err == -ENOMEM)
5332 status = MGMT_STATUS_NO_RESOURCES;
5333 else
5334 status = MGMT_STATUS_FAILED;
5335
5336 goto unlock;
5337 }
5338
5339 hci_dev_unlock(hdev);
5340
5341 return 0;
5342
5343unlock:
5344 hci_free_adv_monitor(hdev, monitor: m);
5345 hci_dev_unlock(hdev);
5346 return mgmt_cmd_status(sk, index: hdev->id, cmd: op, status);
5347}
5348
5349static void parse_adv_monitor_rssi(struct adv_monitor *m,
5350 struct mgmt_adv_rssi_thresholds *rssi)
5351{
5352 if (rssi) {
5353 m->rssi.low_threshold = rssi->low_threshold;
5354 m->rssi.low_threshold_timeout =
5355 __le16_to_cpu(rssi->low_threshold_timeout);
5356 m->rssi.high_threshold = rssi->high_threshold;
5357 m->rssi.high_threshold_timeout =
5358 __le16_to_cpu(rssi->high_threshold_timeout);
5359 m->rssi.sampling_period = rssi->sampling_period;
5360 } else {
5361 /* Default values. These numbers are the least constricting
5362 * parameters for MSFT API to work, so it behaves as if there
5363 * are no rssi parameter to consider. May need to be changed
5364 * if other API are to be supported.
5365 */
5366 m->rssi.low_threshold = -127;
5367 m->rssi.low_threshold_timeout = 60;
5368 m->rssi.high_threshold = -127;
5369 m->rssi.high_threshold_timeout = 0;
5370 m->rssi.sampling_period = 0;
5371 }
5372}
5373
5374static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5375 struct mgmt_adv_pattern *patterns)
5376{
5377 u8 offset = 0, length = 0;
5378 struct adv_pattern *p = NULL;
5379 int i;
5380
5381 for (i = 0; i < pattern_count; i++) {
5382 offset = patterns[i].offset;
5383 length = patterns[i].length;
5384 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5385 length > HCI_MAX_EXT_AD_LENGTH ||
5386 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5387 return MGMT_STATUS_INVALID_PARAMS;
5388
5389 p = kmalloc(size: sizeof(*p), GFP_KERNEL);
5390 if (!p)
5391 return MGMT_STATUS_NO_RESOURCES;
5392
5393 p->ad_type = patterns[i].ad_type;
5394 p->offset = patterns[i].offset;
5395 p->length = patterns[i].length;
5396 memcpy(p->value, patterns[i].value, p->length);
5397
5398 INIT_LIST_HEAD(list: &p->list);
5399 list_add(new: &p->list, head: &m->patterns);
5400 }
5401
5402 return MGMT_STATUS_SUCCESS;
5403}
5404
5405static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5406 void *data, u16 len)
5407{
5408 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5409 struct adv_monitor *m = NULL;
5410 u8 status = MGMT_STATUS_SUCCESS;
5411 size_t expected_size = sizeof(*cp);
5412
5413 BT_DBG("request for %s", hdev->name);
5414
5415 if (len <= sizeof(*cp)) {
5416 status = MGMT_STATUS_INVALID_PARAMS;
5417 goto done;
5418 }
5419
5420 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5421 if (len != expected_size) {
5422 status = MGMT_STATUS_INVALID_PARAMS;
5423 goto done;
5424 }
5425
5426 m = kzalloc(size: sizeof(*m), GFP_KERNEL);
5427 if (!m) {
5428 status = MGMT_STATUS_NO_RESOURCES;
5429 goto done;
5430 }
5431
5432 INIT_LIST_HEAD(list: &m->patterns);
5433
5434 parse_adv_monitor_rssi(m, NULL);
5435 status = parse_adv_monitor_pattern(m, pattern_count: cp->pattern_count, patterns: cp->patterns);
5436
5437done:
5438 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5439 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5440}
5441
5442static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5443 void *data, u16 len)
5444{
5445 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5446 struct adv_monitor *m = NULL;
5447 u8 status = MGMT_STATUS_SUCCESS;
5448 size_t expected_size = sizeof(*cp);
5449
5450 BT_DBG("request for %s", hdev->name);
5451
5452 if (len <= sizeof(*cp)) {
5453 status = MGMT_STATUS_INVALID_PARAMS;
5454 goto done;
5455 }
5456
5457 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5458 if (len != expected_size) {
5459 status = MGMT_STATUS_INVALID_PARAMS;
5460 goto done;
5461 }
5462
5463 m = kzalloc(size: sizeof(*m), GFP_KERNEL);
5464 if (!m) {
5465 status = MGMT_STATUS_NO_RESOURCES;
5466 goto done;
5467 }
5468
5469 INIT_LIST_HEAD(list: &m->patterns);
5470
5471 parse_adv_monitor_rssi(m, rssi: &cp->rssi);
5472 status = parse_adv_monitor_pattern(m, pattern_count: cp->pattern_count, patterns: cp->patterns);
5473
5474done:
5475 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5476 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5477}
5478
5479static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5480 void *data, int status)
5481{
5482 struct mgmt_rp_remove_adv_monitor rp;
5483 struct mgmt_pending_cmd *cmd = data;
5484 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5485
5486 hci_dev_lock(hdev);
5487
5488 rp.monitor_handle = cp->monitor_handle;
5489
5490 if (!status)
5491 hci_update_passive_scan(hdev);
5492
5493 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
5494 status: mgmt_status(err: status), rp: &rp, rp_len: sizeof(rp));
5495 mgmt_pending_remove(cmd);
5496
5497 hci_dev_unlock(hdev);
5498 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5499 rp.monitor_handle, status);
5500}
5501
5502static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5503{
5504 struct mgmt_pending_cmd *cmd = data;
5505 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5506 u16 handle = __le16_to_cpu(cp->monitor_handle);
5507
5508 if (!handle)
5509 return hci_remove_all_adv_monitor(hdev);
5510
5511 return hci_remove_single_adv_monitor(hdev, handle);
5512}
5513
5514static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5515 void *data, u16 len)
5516{
5517 struct mgmt_pending_cmd *cmd;
5518 int err, status;
5519
5520 hci_dev_lock(hdev);
5521
5522 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5523 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5524 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5525 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5526 status = MGMT_STATUS_BUSY;
5527 goto unlock;
5528 }
5529
5530 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5531 if (!cmd) {
5532 status = MGMT_STATUS_NO_RESOURCES;
5533 goto unlock;
5534 }
5535
5536 err = hci_cmd_sync_queue(hdev, func: mgmt_remove_adv_monitor_sync, data: cmd,
5537 destroy: mgmt_remove_adv_monitor_complete);
5538
5539 if (err) {
5540 mgmt_pending_remove(cmd);
5541
5542 if (err == -ENOMEM)
5543 status = MGMT_STATUS_NO_RESOURCES;
5544 else
5545 status = MGMT_STATUS_FAILED;
5546
5547 goto unlock;
5548 }
5549
5550 hci_dev_unlock(hdev);
5551
5552 return 0;
5553
5554unlock:
5555 hci_dev_unlock(hdev);
5556 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5557 status);
5558}
5559
5560static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5561{
5562 struct mgmt_rp_read_local_oob_data mgmt_rp;
5563 size_t rp_size = sizeof(mgmt_rp);
5564 struct mgmt_pending_cmd *cmd = data;
5565 struct sk_buff *skb = cmd->skb;
5566 u8 status = mgmt_status(err);
5567
5568 if (!status) {
5569 if (!skb)
5570 status = MGMT_STATUS_FAILED;
5571 else if (IS_ERR(ptr: skb))
5572 status = mgmt_status(err: PTR_ERR(ptr: skb));
5573 else
5574 status = mgmt_status(err: skb->data[0]);
5575 }
5576
5577 bt_dev_dbg(hdev, "status %d", status);
5578
5579 if (status) {
5580 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5581 goto remove;
5582 }
5583
5584 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5585
5586 if (!bredr_sc_enabled(hdev)) {
5587 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5588
5589 if (skb->len < sizeof(*rp)) {
5590 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
5591 MGMT_OP_READ_LOCAL_OOB_DATA,
5592 MGMT_STATUS_FAILED);
5593 goto remove;
5594 }
5595
5596 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5597 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5598
5599 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5600 } else {
5601 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5602
5603 if (skb->len < sizeof(*rp)) {
5604 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
5605 MGMT_OP_READ_LOCAL_OOB_DATA,
5606 MGMT_STATUS_FAILED);
5607 goto remove;
5608 }
5609
5610 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5611 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5612
5613 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5614 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5615 }
5616
5617 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5618 MGMT_STATUS_SUCCESS, rp: &mgmt_rp, rp_len: rp_size);
5619
5620remove:
5621 if (skb && !IS_ERR(ptr: skb))
5622 kfree_skb(skb);
5623
5624 mgmt_pending_free(cmd);
5625}
5626
5627static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5628{
5629 struct mgmt_pending_cmd *cmd = data;
5630
5631 if (bredr_sc_enabled(hdev))
5632 cmd->skb = hci_read_local_oob_data_sync(hdev, ext: true, sk: cmd->sk);
5633 else
5634 cmd->skb = hci_read_local_oob_data_sync(hdev, ext: false, sk: cmd->sk);
5635
5636 if (IS_ERR(ptr: cmd->skb))
5637 return PTR_ERR(ptr: cmd->skb);
5638 else
5639 return 0;
5640}
5641
5642static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5643 void *data, u16 data_len)
5644{
5645 struct mgmt_pending_cmd *cmd;
5646 int err;
5647
5648 bt_dev_dbg(hdev, "sock %p", sk);
5649
5650 hci_dev_lock(hdev);
5651
5652 if (!hdev_is_powered(hdev)) {
5653 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5654 MGMT_STATUS_NOT_POWERED);
5655 goto unlock;
5656 }
5657
5658 if (!lmp_ssp_capable(hdev)) {
5659 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5660 MGMT_STATUS_NOT_SUPPORTED);
5661 goto unlock;
5662 }
5663
5664 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, len: 0);
5665 if (!cmd)
5666 err = -ENOMEM;
5667 else
5668 err = hci_cmd_sync_queue(hdev, func: read_local_oob_data_sync, data: cmd,
5669 destroy: read_local_oob_data_complete);
5670
5671 if (err < 0) {
5672 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5673 MGMT_STATUS_FAILED);
5674
5675 if (cmd)
5676 mgmt_pending_free(cmd);
5677 }
5678
5679unlock:
5680 hci_dev_unlock(hdev);
5681 return err;
5682}
5683
5684static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5685 void *data, u16 len)
5686{
5687 struct mgmt_addr_info *addr = data;
5688 int err;
5689
5690 bt_dev_dbg(hdev, "sock %p", sk);
5691
5692 if (!bdaddr_type_is_valid(type: addr->type))
5693 return mgmt_cmd_complete(sk, index: hdev->id,
5694 MGMT_OP_ADD_REMOTE_OOB_DATA,
5695 MGMT_STATUS_INVALID_PARAMS,
5696 rp: addr, rp_len: sizeof(*addr));
5697
5698 hci_dev_lock(hdev);
5699
5700 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5701 struct mgmt_cp_add_remote_oob_data *cp = data;
5702 u8 status;
5703
5704 if (cp->addr.type != BDADDR_BREDR) {
5705 err = mgmt_cmd_complete(sk, index: hdev->id,
5706 MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS,
5708 rp: &cp->addr, rp_len: sizeof(cp->addr));
5709 goto unlock;
5710 }
5711
5712 err = hci_add_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr,
5713 bdaddr_type: cp->addr.type, hash192: cp->hash,
5714 rand192: cp->rand, NULL, NULL);
5715 if (err < 0)
5716 status = MGMT_STATUS_FAILED;
5717 else
5718 status = MGMT_STATUS_SUCCESS;
5719
5720 err = mgmt_cmd_complete(sk, index: hdev->id,
5721 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5722 rp: &cp->addr, rp_len: sizeof(cp->addr));
5723 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5724 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5725 u8 *rand192, *hash192, *rand256, *hash256;
5726 u8 status;
5727
5728 if (bdaddr_type_is_le(type: cp->addr.type)) {
5729 /* Enforce zero-valued 192-bit parameters as
5730 * long as legacy SMP OOB isn't implemented.
5731 */
5732 if (memcmp(p: cp->rand192, ZERO_KEY, size: 16) ||
5733 memcmp(p: cp->hash192, ZERO_KEY, size: 16)) {
5734 err = mgmt_cmd_complete(sk, index: hdev->id,
5735 MGMT_OP_ADD_REMOTE_OOB_DATA,
5736 MGMT_STATUS_INVALID_PARAMS,
5737 rp: addr, rp_len: sizeof(*addr));
5738 goto unlock;
5739 }
5740
5741 rand192 = NULL;
5742 hash192 = NULL;
5743 } else {
5744 /* In case one of the P-192 values is set to zero,
5745 * then just disable OOB data for P-192.
5746 */
5747 if (!memcmp(p: cp->rand192, ZERO_KEY, size: 16) ||
5748 !memcmp(p: cp->hash192, ZERO_KEY, size: 16)) {
5749 rand192 = NULL;
5750 hash192 = NULL;
5751 } else {
5752 rand192 = cp->rand192;
5753 hash192 = cp->hash192;
5754 }
5755 }
5756
5757 /* In case one of the P-256 values is set to zero, then just
5758 * disable OOB data for P-256.
5759 */
5760 if (!memcmp(p: cp->rand256, ZERO_KEY, size: 16) ||
5761 !memcmp(p: cp->hash256, ZERO_KEY, size: 16)) {
5762 rand256 = NULL;
5763 hash256 = NULL;
5764 } else {
5765 rand256 = cp->rand256;
5766 hash256 = cp->hash256;
5767 }
5768
5769 err = hci_add_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr,
5770 bdaddr_type: cp->addr.type, hash192, rand192,
5771 hash256, rand256);
5772 if (err < 0)
5773 status = MGMT_STATUS_FAILED;
5774 else
5775 status = MGMT_STATUS_SUCCESS;
5776
5777 err = mgmt_cmd_complete(sk, index: hdev->id,
5778 MGMT_OP_ADD_REMOTE_OOB_DATA,
5779 status, rp: &cp->addr, rp_len: sizeof(cp->addr));
5780 } else {
5781 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5782 len);
5783 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 MGMT_STATUS_INVALID_PARAMS);
5785 }
5786
5787unlock:
5788 hci_dev_unlock(hdev);
5789 return err;
5790}
5791
5792static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5793 void *data, u16 len)
5794{
5795 struct mgmt_cp_remove_remote_oob_data *cp = data;
5796 u8 status;
5797 int err;
5798
5799 bt_dev_dbg(hdev, "sock %p", sk);
5800
5801 if (cp->addr.type != BDADDR_BREDR)
5802 return mgmt_cmd_complete(sk, index: hdev->id,
5803 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5804 MGMT_STATUS_INVALID_PARAMS,
5805 rp: &cp->addr, rp_len: sizeof(cp->addr));
5806
5807 hci_dev_lock(hdev);
5808
5809 if (!bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
5810 hci_remote_oob_data_clear(hdev);
5811 status = MGMT_STATUS_SUCCESS;
5812 goto done;
5813 }
5814
5815 err = hci_remove_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type);
5816 if (err < 0)
5817 status = MGMT_STATUS_INVALID_PARAMS;
5818 else
5819 status = MGMT_STATUS_SUCCESS;
5820
5821done:
5822 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5823 status, rp: &cp->addr, rp_len: sizeof(cp->addr));
5824
5825 hci_dev_unlock(hdev);
5826 return err;
5827}
5828
5829void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5830{
5831 struct mgmt_pending_cmd *cmd;
5832
5833 bt_dev_dbg(hdev, "status %u", status);
5834
5835 hci_dev_lock(hdev);
5836
5837 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5838 if (!cmd)
5839 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5840
5841 if (!cmd)
5842 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5843
5844 if (cmd) {
5845 cmd->cmd_complete(cmd, mgmt_status(err: status));
5846 mgmt_pending_remove(cmd);
5847 }
5848
5849 hci_dev_unlock(hdev);
5850}
5851
5852static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5853 uint8_t *mgmt_status)
5854{
5855 switch (type) {
5856 case DISCOV_TYPE_LE:
5857 *mgmt_status = mgmt_le_support(hdev);
5858 if (*mgmt_status)
5859 return false;
5860 break;
5861 case DISCOV_TYPE_INTERLEAVED:
5862 *mgmt_status = mgmt_le_support(hdev);
5863 if (*mgmt_status)
5864 return false;
5865 fallthrough;
5866 case DISCOV_TYPE_BREDR:
5867 *mgmt_status = mgmt_bredr_support(hdev);
5868 if (*mgmt_status)
5869 return false;
5870 break;
5871 default:
5872 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5873 return false;
5874 }
5875
5876 return true;
5877}
5878
5879static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5880{
5881 struct mgmt_pending_cmd *cmd = data;
5882
5883 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5884 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5885 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5886 return;
5887
5888 bt_dev_dbg(hdev, "err %d", err);
5889
5890 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_status(err),
5891 rp: cmd->param, rp_len: 1);
5892 mgmt_pending_remove(cmd);
5893
5894 hci_discovery_set_state(hdev, state: err ? DISCOVERY_STOPPED:
5895 DISCOVERY_FINDING);
5896}
5897
5898static int start_discovery_sync(struct hci_dev *hdev, void *data)
5899{
5900 return hci_start_discovery_sync(hdev);
5901}
5902
5903static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5904 u16 op, void *data, u16 len)
5905{
5906 struct mgmt_cp_start_discovery *cp = data;
5907 struct mgmt_pending_cmd *cmd;
5908 u8 status;
5909 int err;
5910
5911 bt_dev_dbg(hdev, "sock %p", sk);
5912
5913 hci_dev_lock(hdev);
5914
5915 if (!hdev_is_powered(hdev)) {
5916 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op,
5917 MGMT_STATUS_NOT_POWERED,
5918 rp: &cp->type, rp_len: sizeof(cp->type));
5919 goto failed;
5920 }
5921
5922 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5923 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5924 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, MGMT_STATUS_BUSY,
5925 rp: &cp->type, rp_len: sizeof(cp->type));
5926 goto failed;
5927 }
5928
5929 if (!discovery_type_is_valid(hdev, type: cp->type, mgmt_status: &status)) {
5930 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, status,
5931 rp: &cp->type, rp_len: sizeof(cp->type));
5932 goto failed;
5933 }
5934
5935 /* Can't start discovery when it is paused */
5936 if (hdev->discovery_paused) {
5937 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, MGMT_STATUS_BUSY,
5938 rp: &cp->type, rp_len: sizeof(cp->type));
5939 goto failed;
5940 }
5941
5942 /* Clear the discovery filter first to free any previously
5943 * allocated memory for the UUID list.
5944 */
5945 hci_discovery_filter_clear(hdev);
5946
5947 hdev->discovery.type = cp->type;
5948 hdev->discovery.report_invalid_rssi = false;
5949 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5950 hdev->discovery.limited = true;
5951 else
5952 hdev->discovery.limited = false;
5953
5954 cmd = mgmt_pending_add(sk, opcode: op, hdev, data, len);
5955 if (!cmd) {
5956 err = -ENOMEM;
5957 goto failed;
5958 }
5959
5960 err = hci_cmd_sync_queue(hdev, func: start_discovery_sync, data: cmd,
5961 destroy: start_discovery_complete);
5962 if (err < 0) {
5963 mgmt_pending_remove(cmd);
5964 goto failed;
5965 }
5966
5967 hci_discovery_set_state(hdev, state: DISCOVERY_STARTING);
5968
5969failed:
5970 hci_dev_unlock(hdev);
5971 return err;
5972}
5973
5974static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5976{
5977 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5978 data, len);
5979}
5980
5981static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5982 void *data, u16 len)
5983{
5984 return start_discovery_internal(sk, hdev,
5985 MGMT_OP_START_LIMITED_DISCOVERY,
5986 data, len);
5987}
5988
5989static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5990 void *data, u16 len)
5991{
5992 struct mgmt_cp_start_service_discovery *cp = data;
5993 struct mgmt_pending_cmd *cmd;
5994 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5995 u16 uuid_count, expected_len;
5996 u8 status;
5997 int err;
5998
5999 bt_dev_dbg(hdev, "sock %p", sk);
6000
6001 hci_dev_lock(hdev);
6002
6003 if (!hdev_is_powered(hdev)) {
6004 err = mgmt_cmd_complete(sk, index: hdev->id,
6005 MGMT_OP_START_SERVICE_DISCOVERY,
6006 MGMT_STATUS_NOT_POWERED,
6007 rp: &cp->type, rp_len: sizeof(cp->type));
6008 goto failed;
6009 }
6010
6011 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6012 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6013 err = mgmt_cmd_complete(sk, index: hdev->id,
6014 MGMT_OP_START_SERVICE_DISCOVERY,
6015 MGMT_STATUS_BUSY, rp: &cp->type,
6016 rp_len: sizeof(cp->type));
6017 goto failed;
6018 }
6019
6020 if (hdev->discovery_paused) {
6021 err = mgmt_cmd_complete(sk, index: hdev->id,
6022 MGMT_OP_START_SERVICE_DISCOVERY,
6023 MGMT_STATUS_BUSY, rp: &cp->type,
6024 rp_len: sizeof(cp->type));
6025 goto failed;
6026 }
6027
6028 uuid_count = __le16_to_cpu(cp->uuid_count);
6029 if (uuid_count > max_uuid_count) {
6030 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6031 uuid_count);
6032 err = mgmt_cmd_complete(sk, index: hdev->id,
6033 MGMT_OP_START_SERVICE_DISCOVERY,
6034 MGMT_STATUS_INVALID_PARAMS, rp: &cp->type,
6035 rp_len: sizeof(cp->type));
6036 goto failed;
6037 }
6038
6039 expected_len = sizeof(*cp) + uuid_count * 16;
6040 if (expected_len != len) {
6041 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6042 expected_len, len);
6043 err = mgmt_cmd_complete(sk, index: hdev->id,
6044 MGMT_OP_START_SERVICE_DISCOVERY,
6045 MGMT_STATUS_INVALID_PARAMS, rp: &cp->type,
6046 rp_len: sizeof(cp->type));
6047 goto failed;
6048 }
6049
6050 if (!discovery_type_is_valid(hdev, type: cp->type, mgmt_status: &status)) {
6051 err = mgmt_cmd_complete(sk, index: hdev->id,
6052 MGMT_OP_START_SERVICE_DISCOVERY,
6053 status, rp: &cp->type, rp_len: sizeof(cp->type));
6054 goto failed;
6055 }
6056
6057 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6058 hdev, data, len);
6059 if (!cmd) {
6060 err = -ENOMEM;
6061 goto failed;
6062 }
6063
6064 /* Clear the discovery filter first to free any previously
6065 * allocated memory for the UUID list.
6066 */
6067 hci_discovery_filter_clear(hdev);
6068
6069 hdev->discovery.result_filtering = true;
6070 hdev->discovery.type = cp->type;
6071 hdev->discovery.rssi = cp->rssi;
6072 hdev->discovery.uuid_count = uuid_count;
6073
6074 if (uuid_count > 0) {
6075 hdev->discovery.uuids = kmemdup(p: cp->uuids, size: uuid_count * 16,
6076 GFP_KERNEL);
6077 if (!hdev->discovery.uuids) {
6078 err = mgmt_cmd_complete(sk, index: hdev->id,
6079 MGMT_OP_START_SERVICE_DISCOVERY,
6080 MGMT_STATUS_FAILED,
6081 rp: &cp->type, rp_len: sizeof(cp->type));
6082 mgmt_pending_remove(cmd);
6083 goto failed;
6084 }
6085 }
6086
6087 err = hci_cmd_sync_queue(hdev, func: start_discovery_sync, data: cmd,
6088 destroy: start_discovery_complete);
6089 if (err < 0) {
6090 mgmt_pending_remove(cmd);
6091 goto failed;
6092 }
6093
6094 hci_discovery_set_state(hdev, state: DISCOVERY_STARTING);
6095
6096failed:
6097 hci_dev_unlock(hdev);
6098 return err;
6099}
6100
6101void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6102{
6103 struct mgmt_pending_cmd *cmd;
6104
6105 bt_dev_dbg(hdev, "status %u", status);
6106
6107 hci_dev_lock(hdev);
6108
6109 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6110 if (cmd) {
6111 cmd->cmd_complete(cmd, mgmt_status(err: status));
6112 mgmt_pending_remove(cmd);
6113 }
6114
6115 hci_dev_unlock(hdev);
6116}
6117
6118static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6119{
6120 struct mgmt_pending_cmd *cmd = data;
6121
6122 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6123 return;
6124
6125 bt_dev_dbg(hdev, "err %d", err);
6126
6127 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_status(err),
6128 rp: cmd->param, rp_len: 1);
6129 mgmt_pending_remove(cmd);
6130
6131 if (!err)
6132 hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED);
6133}
6134
6135static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6136{
6137 return hci_stop_discovery_sync(hdev);
6138}
6139
6140static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6141 u16 len)
6142{
6143 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6144 struct mgmt_pending_cmd *cmd;
6145 int err;
6146
6147 bt_dev_dbg(hdev, "sock %p", sk);
6148
6149 hci_dev_lock(hdev);
6150
6151 if (!hci_discovery_active(hdev)) {
6152 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_STOP_DISCOVERY,
6153 MGMT_STATUS_REJECTED, rp: &mgmt_cp->type,
6154 rp_len: sizeof(mgmt_cp->type));
6155 goto unlock;
6156 }
6157
6158 if (hdev->discovery.type != mgmt_cp->type) {
6159 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_STOP_DISCOVERY,
6160 MGMT_STATUS_INVALID_PARAMS,
6161 rp: &mgmt_cp->type, rp_len: sizeof(mgmt_cp->type));
6162 goto unlock;
6163 }
6164
6165 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6166 if (!cmd) {
6167 err = -ENOMEM;
6168 goto unlock;
6169 }
6170
6171 err = hci_cmd_sync_queue(hdev, func: stop_discovery_sync, data: cmd,
6172 destroy: stop_discovery_complete);
6173 if (err < 0) {
6174 mgmt_pending_remove(cmd);
6175 goto unlock;
6176 }
6177
6178 hci_discovery_set_state(hdev, state: DISCOVERY_STOPPING);
6179
6180unlock:
6181 hci_dev_unlock(hdev);
6182 return err;
6183}
6184
6185static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6186 u16 len)
6187{
6188 struct mgmt_cp_confirm_name *cp = data;
6189 struct inquiry_entry *e;
6190 int err;
6191
6192 bt_dev_dbg(hdev, "sock %p", sk);
6193
6194 hci_dev_lock(hdev);
6195
6196 if (!hci_discovery_active(hdev)) {
6197 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME,
6198 MGMT_STATUS_FAILED, rp: &cp->addr,
6199 rp_len: sizeof(cp->addr));
6200 goto failed;
6201 }
6202
6203 e = hci_inquiry_cache_lookup_unknown(hdev, bdaddr: &cp->addr.bdaddr);
6204 if (!e) {
6205 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME,
6206 MGMT_STATUS_INVALID_PARAMS, rp: &cp->addr,
6207 rp_len: sizeof(cp->addr));
6208 goto failed;
6209 }
6210
6211 if (cp->name_known) {
6212 e->name_state = NAME_KNOWN;
6213 list_del(entry: &e->list);
6214 } else {
6215 e->name_state = NAME_NEEDED;
6216 hci_inquiry_cache_update_resolve(hdev, ie: e);
6217 }
6218
6219 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME, status: 0,
6220 rp: &cp->addr, rp_len: sizeof(cp->addr));
6221
6222failed:
6223 hci_dev_unlock(hdev);
6224 return err;
6225}
6226
6227static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6228 u16 len)
6229{
6230 struct mgmt_cp_block_device *cp = data;
6231 u8 status;
6232 int err;
6233
6234 bt_dev_dbg(hdev, "sock %p", sk);
6235
6236 if (!bdaddr_type_is_valid(type: cp->addr.type))
6237 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_BLOCK_DEVICE,
6238 MGMT_STATUS_INVALID_PARAMS,
6239 rp: &cp->addr, rp_len: sizeof(cp->addr));
6240
6241 hci_dev_lock(hdev);
6242
6243 err = hci_bdaddr_list_add(list: &hdev->reject_list, bdaddr: &cp->addr.bdaddr,
6244 type: cp->addr.type);
6245 if (err < 0) {
6246 status = MGMT_STATUS_FAILED;
6247 goto done;
6248 }
6249
6250 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, data: &cp->addr, len: sizeof(cp->addr),
6251 skip_sk: sk);
6252 status = MGMT_STATUS_SUCCESS;
6253
6254done:
6255 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6256 rp: &cp->addr, rp_len: sizeof(cp->addr));
6257
6258 hci_dev_unlock(hdev);
6259
6260 return err;
6261}
6262
6263static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6264 u16 len)
6265{
6266 struct mgmt_cp_unblock_device *cp = data;
6267 u8 status;
6268 int err;
6269
6270 bt_dev_dbg(hdev, "sock %p", sk);
6271
6272 if (!bdaddr_type_is_valid(type: cp->addr.type))
6273 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6274 MGMT_STATUS_INVALID_PARAMS,
6275 rp: &cp->addr, rp_len: sizeof(cp->addr));
6276
6277 hci_dev_lock(hdev);
6278
6279 err = hci_bdaddr_list_del(list: &hdev->reject_list, bdaddr: &cp->addr.bdaddr,
6280 type: cp->addr.type);
6281 if (err < 0) {
6282 status = MGMT_STATUS_INVALID_PARAMS;
6283 goto done;
6284 }
6285
6286 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, data: &cp->addr, len: sizeof(cp->addr),
6287 skip_sk: sk);
6288 status = MGMT_STATUS_SUCCESS;
6289
6290done:
6291 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6292 rp: &cp->addr, rp_len: sizeof(cp->addr));
6293
6294 hci_dev_unlock(hdev);
6295
6296 return err;
6297}
6298
6299static int set_device_id_sync(struct hci_dev *hdev, void *data)
6300{
6301 return hci_update_eir_sync(hdev);
6302}
6303
6304static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6305 u16 len)
6306{
6307 struct mgmt_cp_set_device_id *cp = data;
6308 int err;
6309 __u16 source;
6310
6311 bt_dev_dbg(hdev, "sock %p", sk);
6312
6313 source = __le16_to_cpu(cp->source);
6314
6315 if (source > 0x0002)
6316 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEVICE_ID,
6317 MGMT_STATUS_INVALID_PARAMS);
6318
6319 hci_dev_lock(hdev);
6320
6321 hdev->devid_source = source;
6322 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6323 hdev->devid_product = __le16_to_cpu(cp->product);
6324 hdev->devid_version = __le16_to_cpu(cp->version);
6325
6326 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEVICE_ID, status: 0,
6327 NULL, rp_len: 0);
6328
6329 hci_cmd_sync_queue(hdev, func: set_device_id_sync, NULL, NULL);
6330
6331 hci_dev_unlock(hdev);
6332
6333 return err;
6334}
6335
6336static void enable_advertising_instance(struct hci_dev *hdev, int err)
6337{
6338 if (err)
6339 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6340 else
6341 bt_dev_dbg(hdev, "status %d", err);
6342}
6343
6344static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6345{
6346 struct cmd_lookup match = { NULL, hdev };
6347 u8 instance;
6348 struct adv_info *adv_instance;
6349 u8 status = mgmt_status(err);
6350
6351 if (status) {
6352 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6353 cb: cmd_status_rsp, data: &status);
6354 return;
6355 }
6356
6357 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6358 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6359 else
6360 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6361
6362 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, cb: settings_rsp,
6363 data: &match);
6364
6365 new_settings(hdev, skip: match.sk);
6366
6367 if (match.sk)
6368 sock_put(sk: match.sk);
6369
6370 /* If "Set Advertising" was just disabled and instance advertising was
6371 * set up earlier, then re-enable multi-instance advertising.
6372 */
6373 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6374 list_empty(head: &hdev->adv_instances))
6375 return;
6376
6377 instance = hdev->cur_adv_instance;
6378 if (!instance) {
6379 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6380 struct adv_info, list);
6381 if (!adv_instance)
6382 return;
6383
6384 instance = adv_instance->instance;
6385 }
6386
6387 err = hci_schedule_adv_instance_sync(hdev, instance, force: true);
6388
6389 enable_advertising_instance(hdev, err);
6390}
6391
6392static int set_adv_sync(struct hci_dev *hdev, void *data)
6393{
6394 struct mgmt_pending_cmd *cmd = data;
6395 struct mgmt_mode *cp = cmd->param;
6396 u8 val = !!cp->val;
6397
6398 if (cp->val == 0x02)
6399 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6400 else
6401 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6402
6403 cancel_adv_timeout(hdev);
6404
6405 if (val) {
6406 /* Switch to instance "0" for the Set Advertising setting.
6407 * We cannot use update_[adv|scan_rsp]_data() here as the
6408 * HCI_ADVERTISING flag is not yet set.
6409 */
6410 hdev->cur_adv_instance = 0x00;
6411
6412 if (ext_adv_capable(hdev)) {
6413 hci_start_ext_adv_sync(hdev, instance: 0x00);
6414 } else {
6415 hci_update_adv_data_sync(hdev, instance: 0x00);
6416 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
6417 hci_enable_advertising_sync(hdev);
6418 }
6419 } else {
6420 hci_disable_advertising_sync(hdev);
6421 }
6422
6423 return 0;
6424}
6425
6426static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6427 u16 len)
6428{
6429 struct mgmt_mode *cp = data;
6430 struct mgmt_pending_cmd *cmd;
6431 u8 val, status;
6432 int err;
6433
6434 bt_dev_dbg(hdev, "sock %p", sk);
6435
6436 status = mgmt_le_support(hdev);
6437 if (status)
6438 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6439 status);
6440
6441 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6442 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6443 MGMT_STATUS_INVALID_PARAMS);
6444
6445 if (hdev->advertising_paused)
6446 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6447 MGMT_STATUS_BUSY);
6448
6449 hci_dev_lock(hdev);
6450
6451 val = !!cp->val;
6452
6453 /* The following conditions are ones which mean that we should
6454 * not do any HCI communication but directly send a mgmt
6455 * response to user space (after toggling the flag if
6456 * necessary).
6457 */
6458 if (!hdev_is_powered(hdev) ||
6459 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6460 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6461 hci_dev_test_flag(hdev, HCI_MESH) ||
6462 hci_conn_num(hdev, LE_LINK) > 0 ||
6463 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6464 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6465 bool changed;
6466
6467 if (cp->val) {
6468 hdev->cur_adv_instance = 0x00;
6469 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6470 if (cp->val == 0x02)
6471 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6472 else
6473 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6474 } else {
6475 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6477 }
6478
6479 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6480 if (err < 0)
6481 goto unlock;
6482
6483 if (changed)
6484 err = new_settings(hdev, skip: sk);
6485
6486 goto unlock;
6487 }
6488
6489 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6490 pending_find(MGMT_OP_SET_LE, hdev)) {
6491 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6492 MGMT_STATUS_BUSY);
6493 goto unlock;
6494 }
6495
6496 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6497 if (!cmd)
6498 err = -ENOMEM;
6499 else
6500 err = hci_cmd_sync_queue(hdev, func: set_adv_sync, data: cmd,
6501 destroy: set_advertising_complete);
6502
6503 if (err < 0 && cmd)
6504 mgmt_pending_remove(cmd);
6505
6506unlock:
6507 hci_dev_unlock(hdev);
6508 return err;
6509}
6510
6511static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6512 void *data, u16 len)
6513{
6514 struct mgmt_cp_set_static_address *cp = data;
6515 int err;
6516
6517 bt_dev_dbg(hdev, "sock %p", sk);
6518
6519 if (!lmp_le_capable(hdev))
6520 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6521 MGMT_STATUS_NOT_SUPPORTED);
6522
6523 if (hdev_is_powered(hdev))
6524 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6525 MGMT_STATUS_REJECTED);
6526
6527 if (bacmp(ba1: &cp->bdaddr, BDADDR_ANY)) {
6528 if (!bacmp(ba1: &cp->bdaddr, BDADDR_NONE))
6529 return mgmt_cmd_status(sk, index: hdev->id,
6530 MGMT_OP_SET_STATIC_ADDRESS,
6531 MGMT_STATUS_INVALID_PARAMS);
6532
6533 /* Two most significant bits shall be set */
6534 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6535 return mgmt_cmd_status(sk, index: hdev->id,
6536 MGMT_OP_SET_STATIC_ADDRESS,
6537 MGMT_STATUS_INVALID_PARAMS);
6538 }
6539
6540 hci_dev_lock(hdev);
6541
6542 bacpy(dst: &hdev->static_addr, src: &cp->bdaddr);
6543
6544 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6545 if (err < 0)
6546 goto unlock;
6547
6548 err = new_settings(hdev, skip: sk);
6549
6550unlock:
6551 hci_dev_unlock(hdev);
6552 return err;
6553}
6554
6555static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6556 void *data, u16 len)
6557{
6558 struct mgmt_cp_set_scan_params *cp = data;
6559 __u16 interval, window;
6560 int err;
6561
6562 bt_dev_dbg(hdev, "sock %p", sk);
6563
6564 if (!lmp_le_capable(hdev))
6565 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6566 MGMT_STATUS_NOT_SUPPORTED);
6567
6568 interval = __le16_to_cpu(cp->interval);
6569
6570 if (interval < 0x0004 || interval > 0x4000)
6571 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6572 MGMT_STATUS_INVALID_PARAMS);
6573
6574 window = __le16_to_cpu(cp->window);
6575
6576 if (window < 0x0004 || window > 0x4000)
6577 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6578 MGMT_STATUS_INVALID_PARAMS);
6579
6580 if (window > interval)
6581 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6582 MGMT_STATUS_INVALID_PARAMS);
6583
6584 hci_dev_lock(hdev);
6585
6586 hdev->le_scan_interval = interval;
6587 hdev->le_scan_window = window;
6588
6589 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS, status: 0,
6590 NULL, rp_len: 0);
6591
6592 /* If background scan is running, restart it so new parameters are
6593 * loaded.
6594 */
6595 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6596 hdev->discovery.state == DISCOVERY_STOPPED)
6597 hci_update_passive_scan(hdev);
6598
6599 hci_dev_unlock(hdev);
6600
6601 return err;
6602}
6603
6604static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6605{
6606 struct mgmt_pending_cmd *cmd = data;
6607
6608 bt_dev_dbg(hdev, "err %d", err);
6609
6610 if (err) {
6611 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6612 status: mgmt_status(err));
6613 } else {
6614 struct mgmt_mode *cp = cmd->param;
6615
6616 if (cp->val)
6617 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6618 else
6619 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6620
6621 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6622 new_settings(hdev, skip: cmd->sk);
6623 }
6624
6625 mgmt_pending_free(cmd);
6626}
6627
6628static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6629{
6630 struct mgmt_pending_cmd *cmd = data;
6631 struct mgmt_mode *cp = cmd->param;
6632
6633 return hci_write_fast_connectable_sync(hdev, enable: cp->val);
6634}
6635
6636static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6637 void *data, u16 len)
6638{
6639 struct mgmt_mode *cp = data;
6640 struct mgmt_pending_cmd *cmd;
6641 int err;
6642
6643 bt_dev_dbg(hdev, "sock %p", sk);
6644
6645 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6646 hdev->hci_ver < BLUETOOTH_VER_1_2)
6647 return mgmt_cmd_status(sk, index: hdev->id,
6648 MGMT_OP_SET_FAST_CONNECTABLE,
6649 MGMT_STATUS_NOT_SUPPORTED);
6650
6651 if (cp->val != 0x00 && cp->val != 0x01)
6652 return mgmt_cmd_status(sk, index: hdev->id,
6653 MGMT_OP_SET_FAST_CONNECTABLE,
6654 MGMT_STATUS_INVALID_PARAMS);
6655
6656 hci_dev_lock(hdev);
6657
6658 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6659 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6660 goto unlock;
6661 }
6662
6663 if (!hdev_is_powered(hdev)) {
6664 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6665 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 new_settings(hdev, skip: sk);
6667 goto unlock;
6668 }
6669
6670 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6671 len);
6672 if (!cmd)
6673 err = -ENOMEM;
6674 else
6675 err = hci_cmd_sync_queue(hdev, func: write_fast_connectable_sync, data: cmd,
6676 destroy: fast_connectable_complete);
6677
6678 if (err < 0) {
6679 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6680 MGMT_STATUS_FAILED);
6681
6682 if (cmd)
6683 mgmt_pending_free(cmd);
6684 }
6685
6686unlock:
6687 hci_dev_unlock(hdev);
6688
6689 return err;
6690}
6691
6692static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6693{
6694 struct mgmt_pending_cmd *cmd = data;
6695
6696 bt_dev_dbg(hdev, "err %d", err);
6697
6698 if (err) {
6699 u8 mgmt_err = mgmt_status(err);
6700
6701 /* We need to restore the flag if related HCI commands
6702 * failed.
6703 */
6704 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6705
6706 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
6707 } else {
6708 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_BREDR, hdev);
6709 new_settings(hdev, skip: cmd->sk);
6710 }
6711
6712 mgmt_pending_free(cmd);
6713}
6714
6715static int set_bredr_sync(struct hci_dev *hdev, void *data)
6716{
6717 int status;
6718
6719 status = hci_write_fast_connectable_sync(hdev, enable: false);
6720
6721 if (!status)
6722 status = hci_update_scan_sync(hdev);
6723
6724 /* Since only the advertising data flags will change, there
6725 * is no need to update the scan response data.
6726 */
6727 if (!status)
6728 status = hci_update_adv_data_sync(hdev, instance: hdev->cur_adv_instance);
6729
6730 return status;
6731}
6732
6733static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6734{
6735 struct mgmt_mode *cp = data;
6736 struct mgmt_pending_cmd *cmd;
6737 int err;
6738
6739 bt_dev_dbg(hdev, "sock %p", sk);
6740
6741 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6742 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6743 MGMT_STATUS_NOT_SUPPORTED);
6744
6745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6746 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6747 MGMT_STATUS_REJECTED);
6748
6749 if (cp->val != 0x00 && cp->val != 0x01)
6750 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6751 MGMT_STATUS_INVALID_PARAMS);
6752
6753 hci_dev_lock(hdev);
6754
6755 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6756 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6757 goto unlock;
6758 }
6759
6760 if (!hdev_is_powered(hdev)) {
6761 if (!cp->val) {
6762 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6763 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6764 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6765 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6766 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6767 }
6768
6769 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6770
6771 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6772 if (err < 0)
6773 goto unlock;
6774
6775 err = new_settings(hdev, skip: sk);
6776 goto unlock;
6777 }
6778
6779 /* Reject disabling when powered on */
6780 if (!cp->val) {
6781 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6782 MGMT_STATUS_REJECTED);
6783 goto unlock;
6784 } else {
6785 /* When configuring a dual-mode controller to operate
6786 * with LE only and using a static address, then switching
6787 * BR/EDR back on is not allowed.
6788 *
6789 * Dual-mode controllers shall operate with the public
6790 * address as its identity address for BR/EDR and LE. So
6791 * reject the attempt to create an invalid configuration.
6792 *
6793 * The same restrictions applies when secure connections
6794 * has been enabled. For BR/EDR this is a controller feature
6795 * while for LE it is a host stack feature. This means that
6796 * switching BR/EDR back on when secure connections has been
6797 * enabled is not a supported transaction.
6798 */
6799 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6800 (bacmp(ba1: &hdev->static_addr, BDADDR_ANY) ||
6801 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6802 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6803 MGMT_STATUS_REJECTED);
6804 goto unlock;
6805 }
6806 }
6807
6808 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6809 if (!cmd)
6810 err = -ENOMEM;
6811 else
6812 err = hci_cmd_sync_queue(hdev, func: set_bredr_sync, data: cmd,
6813 destroy: set_bredr_complete);
6814
6815 if (err < 0) {
6816 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6817 MGMT_STATUS_FAILED);
6818 if (cmd)
6819 mgmt_pending_free(cmd);
6820
6821 goto unlock;
6822 }
6823
6824 /* We need to flip the bit already here so that
6825 * hci_req_update_adv_data generates the correct flags.
6826 */
6827 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6828
6829unlock:
6830 hci_dev_unlock(hdev);
6831 return err;
6832}
6833
6834static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6835{
6836 struct mgmt_pending_cmd *cmd = data;
6837 struct mgmt_mode *cp;
6838
6839 bt_dev_dbg(hdev, "err %d", err);
6840
6841 if (err) {
6842 u8 mgmt_err = mgmt_status(err);
6843
6844 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
6845 goto done;
6846 }
6847
6848 cp = cmd->param;
6849
6850 switch (cp->val) {
6851 case 0x00:
6852 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6853 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6854 break;
6855 case 0x01:
6856 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6857 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6858 break;
6859 case 0x02:
6860 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6861 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6862 break;
6863 }
6864
6865 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev);
6866 new_settings(hdev, skip: cmd->sk);
6867
6868done:
6869 mgmt_pending_free(cmd);
6870}
6871
6872static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6873{
6874 struct mgmt_pending_cmd *cmd = data;
6875 struct mgmt_mode *cp = cmd->param;
6876 u8 val = !!cp->val;
6877
6878 /* Force write of val */
6879 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6880
6881 return hci_write_sc_support_sync(hdev, val);
6882}
6883
6884static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6885 void *data, u16 len)
6886{
6887 struct mgmt_mode *cp = data;
6888 struct mgmt_pending_cmd *cmd;
6889 u8 val;
6890 int err;
6891
6892 bt_dev_dbg(hdev, "sock %p", sk);
6893
6894 if (!lmp_sc_capable(hdev) &&
6895 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6896 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6897 MGMT_STATUS_NOT_SUPPORTED);
6898
6899 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6900 lmp_sc_capable(hdev) &&
6901 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6902 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6903 MGMT_STATUS_REJECTED);
6904
6905 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6906 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6907 MGMT_STATUS_INVALID_PARAMS);
6908
6909 hci_dev_lock(hdev);
6910
6911 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6912 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6913 bool changed;
6914
6915 if (cp->val) {
6916 changed = !hci_dev_test_and_set_flag(hdev,
6917 HCI_SC_ENABLED);
6918 if (cp->val == 0x02)
6919 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6920 else
6921 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6922 } else {
6923 changed = hci_dev_test_and_clear_flag(hdev,
6924 HCI_SC_ENABLED);
6925 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 }
6927
6928 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6929 if (err < 0)
6930 goto failed;
6931
6932 if (changed)
6933 err = new_settings(hdev, skip: sk);
6934
6935 goto failed;
6936 }
6937
6938 val = !!cp->val;
6939
6940 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6941 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6942 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6943 goto failed;
6944 }
6945
6946 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6947 if (!cmd)
6948 err = -ENOMEM;
6949 else
6950 err = hci_cmd_sync_queue(hdev, func: set_secure_conn_sync, data: cmd,
6951 destroy: set_secure_conn_complete);
6952
6953 if (err < 0) {
6954 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6955 MGMT_STATUS_FAILED);
6956 if (cmd)
6957 mgmt_pending_free(cmd);
6958 }
6959
6960failed:
6961 hci_dev_unlock(hdev);
6962 return err;
6963}
6964
6965static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6966 void *data, u16 len)
6967{
6968 struct mgmt_mode *cp = data;
6969 bool changed, use_changed;
6970 int err;
6971
6972 bt_dev_dbg(hdev, "sock %p", sk);
6973
6974 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6975 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6976 MGMT_STATUS_INVALID_PARAMS);
6977
6978 hci_dev_lock(hdev);
6979
6980 if (cp->val)
6981 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6982 else
6983 changed = hci_dev_test_and_clear_flag(hdev,
6984 HCI_KEEP_DEBUG_KEYS);
6985
6986 if (cp->val == 0x02)
6987 use_changed = !hci_dev_test_and_set_flag(hdev,
6988 HCI_USE_DEBUG_KEYS);
6989 else
6990 use_changed = hci_dev_test_and_clear_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6992
6993 if (hdev_is_powered(hdev) && use_changed &&
6994 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6995 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6996 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6997 plen: sizeof(mode), param: &mode);
6998 }
6999
7000 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7001 if (err < 0)
7002 goto unlock;
7003
7004 if (changed)
7005 err = new_settings(hdev, skip: sk);
7006
7007unlock:
7008 hci_dev_unlock(hdev);
7009 return err;
7010}
7011
7012static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7013 u16 len)
7014{
7015 struct mgmt_cp_set_privacy *cp = cp_data;
7016 bool changed;
7017 int err;
7018
7019 bt_dev_dbg(hdev, "sock %p", sk);
7020
7021 if (!lmp_le_capable(hdev))
7022 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
7023 MGMT_STATUS_NOT_SUPPORTED);
7024
7025 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7026 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
7027 MGMT_STATUS_INVALID_PARAMS);
7028
7029 if (hdev_is_powered(hdev))
7030 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
7031 MGMT_STATUS_REJECTED);
7032
7033 hci_dev_lock(hdev);
7034
7035 /* If user space supports this command it is also expected to
7036 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7037 */
7038 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7039
7040 if (cp->privacy) {
7041 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7042 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7043 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7044 hci_adv_instances_set_rpa_expired(hdev, rpa_expired: true);
7045 if (cp->privacy == 0x02)
7046 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7047 else
7048 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7049 } else {
7050 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7051 memset(hdev->irk, 0, sizeof(hdev->irk));
7052 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7053 hci_adv_instances_set_rpa_expired(hdev, rpa_expired: false);
7054 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7055 }
7056
7057 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7058 if (err < 0)
7059 goto unlock;
7060
7061 if (changed)
7062 err = new_settings(hdev, skip: sk);
7063
7064unlock:
7065 hci_dev_unlock(hdev);
7066 return err;
7067}
7068
7069static bool irk_is_valid(struct mgmt_irk_info *irk)
7070{
7071 switch (irk->addr.type) {
7072 case BDADDR_LE_PUBLIC:
7073 return true;
7074
7075 case BDADDR_LE_RANDOM:
7076 /* Two most significant bits shall be set */
7077 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7078 return false;
7079 return true;
7080 }
7081
7082 return false;
7083}
7084
7085static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7086 u16 len)
7087{
7088 struct mgmt_cp_load_irks *cp = cp_data;
7089 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7090 sizeof(struct mgmt_irk_info));
7091 u16 irk_count, expected_len;
7092 int i, err;
7093
7094 bt_dev_dbg(hdev, "sock %p", sk);
7095
7096 if (!lmp_le_capable(hdev))
7097 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7098 MGMT_STATUS_NOT_SUPPORTED);
7099
7100 irk_count = __le16_to_cpu(cp->irk_count);
7101 if (irk_count > max_irk_count) {
7102 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7103 irk_count);
7104 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7105 MGMT_STATUS_INVALID_PARAMS);
7106 }
7107
7108 expected_len = struct_size(cp, irks, irk_count);
7109 if (expected_len != len) {
7110 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7111 expected_len, len);
7112 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7113 MGMT_STATUS_INVALID_PARAMS);
7114 }
7115
7116 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7117
7118 for (i = 0; i < irk_count; i++) {
7119 struct mgmt_irk_info *key = &cp->irks[i];
7120
7121 if (!irk_is_valid(irk: key))
7122 return mgmt_cmd_status(sk, index: hdev->id,
7123 MGMT_OP_LOAD_IRKS,
7124 MGMT_STATUS_INVALID_PARAMS);
7125 }
7126
7127 hci_dev_lock(hdev);
7128
7129 hci_smp_irks_clear(hdev);
7130
7131 for (i = 0; i < irk_count; i++) {
7132 struct mgmt_irk_info *irk = &cp->irks[i];
7133
7134 if (hci_is_blocked_key(hdev,
7135 HCI_BLOCKED_KEY_TYPE_IRK,
7136 val: irk->val)) {
7137 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7138 &irk->addr.bdaddr);
7139 continue;
7140 }
7141
7142 hci_add_irk(hdev, bdaddr: &irk->addr.bdaddr,
7143 addr_type: le_addr_type(mgmt_addr_type: irk->addr.type), val: irk->val,
7144 BDADDR_ANY);
7145 }
7146
7147 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7148
7149 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_IRKS, status: 0, NULL, rp_len: 0);
7150
7151 hci_dev_unlock(hdev);
7152
7153 return err;
7154}
7155
7156static bool ltk_is_valid(struct mgmt_ltk_info *key)
7157{
7158 if (key->initiator != 0x00 && key->initiator != 0x01)
7159 return false;
7160
7161 switch (key->addr.type) {
7162 case BDADDR_LE_PUBLIC:
7163 return true;
7164
7165 case BDADDR_LE_RANDOM:
7166 /* Two most significant bits shall be set */
7167 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7168 return false;
7169 return true;
7170 }
7171
7172 return false;
7173}
7174
7175static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7176 void *cp_data, u16 len)
7177{
7178 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7179 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7180 sizeof(struct mgmt_ltk_info));
7181 u16 key_count, expected_len;
7182 int i, err;
7183
7184 bt_dev_dbg(hdev, "sock %p", sk);
7185
7186 if (!lmp_le_capable(hdev))
7187 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7188 MGMT_STATUS_NOT_SUPPORTED);
7189
7190 key_count = __le16_to_cpu(cp->key_count);
7191 if (key_count > max_key_count) {
7192 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7193 key_count);
7194 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7195 MGMT_STATUS_INVALID_PARAMS);
7196 }
7197
7198 expected_len = struct_size(cp, keys, key_count);
7199 if (expected_len != len) {
7200 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7201 expected_len, len);
7202 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7203 MGMT_STATUS_INVALID_PARAMS);
7204 }
7205
7206 bt_dev_dbg(hdev, "key_count %u", key_count);
7207
7208 for (i = 0; i < key_count; i++) {
7209 struct mgmt_ltk_info *key = &cp->keys[i];
7210
7211 if (!ltk_is_valid(key))
7212 return mgmt_cmd_status(sk, index: hdev->id,
7213 MGMT_OP_LOAD_LONG_TERM_KEYS,
7214 MGMT_STATUS_INVALID_PARAMS);
7215 }
7216
7217 hci_dev_lock(hdev);
7218
7219 hci_smp_ltks_clear(hdev);
7220
7221 for (i = 0; i < key_count; i++) {
7222 struct mgmt_ltk_info *key = &cp->keys[i];
7223 u8 type, authenticated;
7224
7225 if (hci_is_blocked_key(hdev,
7226 HCI_BLOCKED_KEY_TYPE_LTK,
7227 val: key->val)) {
7228 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7229 &key->addr.bdaddr);
7230 continue;
7231 }
7232
7233 switch (key->type) {
7234 case MGMT_LTK_UNAUTHENTICATED:
7235 authenticated = 0x00;
7236 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7237 break;
7238 case MGMT_LTK_AUTHENTICATED:
7239 authenticated = 0x01;
7240 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7241 break;
7242 case MGMT_LTK_P256_UNAUTH:
7243 authenticated = 0x00;
7244 type = SMP_LTK_P256;
7245 break;
7246 case MGMT_LTK_P256_AUTH:
7247 authenticated = 0x01;
7248 type = SMP_LTK_P256;
7249 break;
7250 case MGMT_LTK_P256_DEBUG:
7251 authenticated = 0x00;
7252 type = SMP_LTK_P256_DEBUG;
7253 fallthrough;
7254 default:
7255 continue;
7256 }
7257
7258 hci_add_ltk(hdev, bdaddr: &key->addr.bdaddr,
7259 addr_type: le_addr_type(mgmt_addr_type: key->addr.type), type, authenticated,
7260 tk: key->val, enc_size: key->enc_size, ediv: key->ediv, rand: key->rand);
7261 }
7262
7263 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, status: 0,
7264 NULL, rp_len: 0);
7265
7266 hci_dev_unlock(hdev);
7267
7268 return err;
7269}
7270
7271static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7272{
7273 struct mgmt_pending_cmd *cmd = data;
7274 struct hci_conn *conn = cmd->user_data;
7275 struct mgmt_cp_get_conn_info *cp = cmd->param;
7276 struct mgmt_rp_get_conn_info rp;
7277 u8 status;
7278
7279 bt_dev_dbg(hdev, "err %d", err);
7280
7281 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7282
7283 status = mgmt_status(err);
7284 if (status == MGMT_STATUS_SUCCESS) {
7285 rp.rssi = conn->rssi;
7286 rp.tx_power = conn->tx_power;
7287 rp.max_tx_power = conn->max_tx_power;
7288 } else {
7289 rp.rssi = HCI_RSSI_INVALID;
7290 rp.tx_power = HCI_TX_POWER_INVALID;
7291 rp.max_tx_power = HCI_TX_POWER_INVALID;
7292 }
7293
7294 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, MGMT_OP_GET_CONN_INFO, status,
7295 rp: &rp, rp_len: sizeof(rp));
7296
7297 mgmt_pending_free(cmd);
7298}
7299
7300static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7301{
7302 struct mgmt_pending_cmd *cmd = data;
7303 struct mgmt_cp_get_conn_info *cp = cmd->param;
7304 struct hci_conn *conn;
7305 int err;
7306 __le16 handle;
7307
7308 /* Make sure we are still connected */
7309 if (cp->addr.type == BDADDR_BREDR)
7310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7311 ba: &cp->addr.bdaddr);
7312 else
7313 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: &cp->addr.bdaddr);
7314
7315 if (!conn || conn->state != BT_CONNECTED)
7316 return MGMT_STATUS_NOT_CONNECTED;
7317
7318 cmd->user_data = conn;
7319 handle = cpu_to_le16(conn->handle);
7320
7321 /* Refresh RSSI each time */
7322 err = hci_read_rssi_sync(hdev, handle);
7323
7324 /* For LE links TX power does not change thus we don't need to
7325 * query for it once value is known.
7326 */
7327 if (!err && (!bdaddr_type_is_le(type: cp->addr.type) ||
7328 conn->tx_power == HCI_TX_POWER_INVALID))
7329 err = hci_read_tx_power_sync(hdev, handle, type: 0x00);
7330
7331 /* Max TX power needs to be read only once per connection */
7332 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7333 err = hci_read_tx_power_sync(hdev, handle, type: 0x01);
7334
7335 return err;
7336}
7337
7338static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7339 u16 len)
7340{
7341 struct mgmt_cp_get_conn_info *cp = data;
7342 struct mgmt_rp_get_conn_info rp;
7343 struct hci_conn *conn;
7344 unsigned long conn_info_age;
7345 int err = 0;
7346
7347 bt_dev_dbg(hdev, "sock %p", sk);
7348
7349 memset(&rp, 0, sizeof(rp));
7350 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7351 rp.addr.type = cp->addr.type;
7352
7353 if (!bdaddr_type_is_valid(type: cp->addr.type))
7354 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7355 MGMT_STATUS_INVALID_PARAMS,
7356 rp: &rp, rp_len: sizeof(rp));
7357
7358 hci_dev_lock(hdev);
7359
7360 if (!hdev_is_powered(hdev)) {
7361 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_NOT_POWERED, rp: &rp,
7363 rp_len: sizeof(rp));
7364 goto unlock;
7365 }
7366
7367 if (cp->addr.type == BDADDR_BREDR)
7368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7369 ba: &cp->addr.bdaddr);
7370 else
7371 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: &cp->addr.bdaddr);
7372
7373 if (!conn || conn->state != BT_CONNECTED) {
7374 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7375 MGMT_STATUS_NOT_CONNECTED, rp: &rp,
7376 rp_len: sizeof(rp));
7377 goto unlock;
7378 }
7379
7380 /* To avoid client trying to guess when to poll again for information we
7381 * calculate conn info age as random value between min/max set in hdev.
7382 */
7383 conn_info_age = get_random_u32_inclusive(floor: hdev->conn_info_min_age,
7384 ceil: hdev->conn_info_max_age - 1);
7385
7386 /* Query controller to refresh cached values if they are too old or were
7387 * never read.
7388 */
7389 if (time_after(jiffies, conn->conn_info_timestamp +
7390 msecs_to_jiffies(conn_info_age)) ||
7391 !conn->conn_info_timestamp) {
7392 struct mgmt_pending_cmd *cmd;
7393
7394 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7395 len);
7396 if (!cmd) {
7397 err = -ENOMEM;
7398 } else {
7399 err = hci_cmd_sync_queue(hdev, func: get_conn_info_sync,
7400 data: cmd, destroy: get_conn_info_complete);
7401 }
7402
7403 if (err < 0) {
7404 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7405 MGMT_STATUS_FAILED, rp: &rp, rp_len: sizeof(rp));
7406
7407 if (cmd)
7408 mgmt_pending_free(cmd);
7409
7410 goto unlock;
7411 }
7412
7413 conn->conn_info_timestamp = jiffies;
7414 } else {
7415 /* Cache is valid, just reply with values cached in hci_conn */
7416 rp.rssi = conn->rssi;
7417 rp.tx_power = conn->tx_power;
7418 rp.max_tx_power = conn->max_tx_power;
7419
7420 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7421 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
7422 }
7423
7424unlock:
7425 hci_dev_unlock(hdev);
7426 return err;
7427}
7428
7429static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7430{
7431 struct mgmt_pending_cmd *cmd = data;
7432 struct mgmt_cp_get_clock_info *cp = cmd->param;
7433 struct mgmt_rp_get_clock_info rp;
7434 struct hci_conn *conn = cmd->user_data;
7435 u8 status = mgmt_status(err);
7436
7437 bt_dev_dbg(hdev, "err %d", err);
7438
7439 memset(&rp, 0, sizeof(rp));
7440 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7441 rp.addr.type = cp->addr.type;
7442
7443 if (err)
7444 goto complete;
7445
7446 rp.local_clock = cpu_to_le32(hdev->clock);
7447
7448 if (conn) {
7449 rp.piconet_clock = cpu_to_le32(conn->clock);
7450 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7451 }
7452
7453complete:
7454 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status, rp: &rp,
7455 rp_len: sizeof(rp));
7456
7457 mgmt_pending_free(cmd);
7458}
7459
7460static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7461{
7462 struct mgmt_pending_cmd *cmd = data;
7463 struct mgmt_cp_get_clock_info *cp = cmd->param;
7464 struct hci_cp_read_clock hci_cp;
7465 struct hci_conn *conn;
7466
7467 memset(&hci_cp, 0, sizeof(hci_cp));
7468 hci_read_clock_sync(hdev, cp: &hci_cp);
7469
7470 /* Make sure connection still exists */
7471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &cp->addr.bdaddr);
7472 if (!conn || conn->state != BT_CONNECTED)
7473 return MGMT_STATUS_NOT_CONNECTED;
7474
7475 cmd->user_data = conn;
7476 hci_cp.handle = cpu_to_le16(conn->handle);
7477 hci_cp.which = 0x01; /* Piconet clock */
7478
7479 return hci_read_clock_sync(hdev, cp: &hci_cp);
7480}
7481
7482static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7483 u16 len)
7484{
7485 struct mgmt_cp_get_clock_info *cp = data;
7486 struct mgmt_rp_get_clock_info rp;
7487 struct mgmt_pending_cmd *cmd;
7488 struct hci_conn *conn;
7489 int err;
7490
7491 bt_dev_dbg(hdev, "sock %p", sk);
7492
7493 memset(&rp, 0, sizeof(rp));
7494 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7495 rp.addr.type = cp->addr.type;
7496
7497 if (cp->addr.type != BDADDR_BREDR)
7498 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7499 MGMT_STATUS_INVALID_PARAMS,
7500 rp: &rp, rp_len: sizeof(rp));
7501
7502 hci_dev_lock(hdev);
7503
7504 if (!hdev_is_powered(hdev)) {
7505 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 MGMT_STATUS_NOT_POWERED, rp: &rp,
7507 rp_len: sizeof(rp));
7508 goto unlock;
7509 }
7510
7511 if (bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
7512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7513 ba: &cp->addr.bdaddr);
7514 if (!conn || conn->state != BT_CONNECTED) {
7515 err = mgmt_cmd_complete(sk, index: hdev->id,
7516 MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_NOT_CONNECTED,
7518 rp: &rp, rp_len: sizeof(rp));
7519 goto unlock;
7520 }
7521 } else {
7522 conn = NULL;
7523 }
7524
7525 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7526 if (!cmd)
7527 err = -ENOMEM;
7528 else
7529 err = hci_cmd_sync_queue(hdev, func: get_clock_info_sync, data: cmd,
7530 destroy: get_clock_info_complete);
7531
7532 if (err < 0) {
7533 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7534 MGMT_STATUS_FAILED, rp: &rp, rp_len: sizeof(rp));
7535
7536 if (cmd)
7537 mgmt_pending_free(cmd);
7538 }
7539
7540
7541unlock:
7542 hci_dev_unlock(hdev);
7543 return err;
7544}
7545
7546static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7547{
7548 struct hci_conn *conn;
7549
7550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: addr);
7551 if (!conn)
7552 return false;
7553
7554 if (conn->dst_type != type)
7555 return false;
7556
7557 if (conn->state != BT_CONNECTED)
7558 return false;
7559
7560 return true;
7561}
7562
7563/* This function requires the caller holds hdev->lock */
7564static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7565 u8 addr_type, u8 auto_connect)
7566{
7567 struct hci_conn_params *params;
7568
7569 params = hci_conn_params_add(hdev, addr, addr_type);
7570 if (!params)
7571 return -EIO;
7572
7573 if (params->auto_connect == auto_connect)
7574 return 0;
7575
7576 hci_pend_le_list_del_init(param: params);
7577
7578 switch (auto_connect) {
7579 case HCI_AUTO_CONN_DISABLED:
7580 case HCI_AUTO_CONN_LINK_LOSS:
7581 /* If auto connect is being disabled when we're trying to
7582 * connect to device, keep connecting.
7583 */
7584 if (params->explicit_connect)
7585 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7586 break;
7587 case HCI_AUTO_CONN_REPORT:
7588 if (params->explicit_connect)
7589 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7590 else
7591 hci_pend_le_list_add(param: params, list: &hdev->pend_le_reports);
7592 break;
7593 case HCI_AUTO_CONN_DIRECT:
7594 case HCI_AUTO_CONN_ALWAYS:
7595 if (!is_connected(hdev, addr, type: addr_type))
7596 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7597 break;
7598 }
7599
7600 params->auto_connect = auto_connect;
7601
7602 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7603 addr, addr_type, auto_connect);
7604
7605 return 0;
7606}
7607
7608static void device_added(struct sock *sk, struct hci_dev *hdev,
7609 bdaddr_t *bdaddr, u8 type, u8 action)
7610{
7611 struct mgmt_ev_device_added ev;
7612
7613 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
7614 ev.addr.type = type;
7615 ev.action = action;
7616
7617 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
7618}
7619
7620static int add_device_sync(struct hci_dev *hdev, void *data)
7621{
7622 return hci_update_passive_scan_sync(hdev);
7623}
7624
7625static int add_device(struct sock *sk, struct hci_dev *hdev,
7626 void *data, u16 len)
7627{
7628 struct mgmt_cp_add_device *cp = data;
7629 u8 auto_conn, addr_type;
7630 struct hci_conn_params *params;
7631 int err;
7632 u32 current_flags = 0;
7633 u32 supported_flags;
7634
7635 bt_dev_dbg(hdev, "sock %p", sk);
7636
7637 if (!bdaddr_type_is_valid(type: cp->addr.type) ||
7638 !bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY))
7639 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7640 MGMT_STATUS_INVALID_PARAMS,
7641 rp: &cp->addr, rp_len: sizeof(cp->addr));
7642
7643 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7644 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7645 MGMT_STATUS_INVALID_PARAMS,
7646 rp: &cp->addr, rp_len: sizeof(cp->addr));
7647
7648 hci_dev_lock(hdev);
7649
7650 if (cp->addr.type == BDADDR_BREDR) {
7651 /* Only incoming connections action is supported for now */
7652 if (cp->action != 0x01) {
7653 err = mgmt_cmd_complete(sk, index: hdev->id,
7654 MGMT_OP_ADD_DEVICE,
7655 MGMT_STATUS_INVALID_PARAMS,
7656 rp: &cp->addr, rp_len: sizeof(cp->addr));
7657 goto unlock;
7658 }
7659
7660 err = hci_bdaddr_list_add_with_flags(list: &hdev->accept_list,
7661 bdaddr: &cp->addr.bdaddr,
7662 type: cp->addr.type, flags: 0);
7663 if (err)
7664 goto unlock;
7665
7666 hci_update_scan(hdev);
7667
7668 goto added;
7669 }
7670
7671 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
7672
7673 if (cp->action == 0x02)
7674 auto_conn = HCI_AUTO_CONN_ALWAYS;
7675 else if (cp->action == 0x01)
7676 auto_conn = HCI_AUTO_CONN_DIRECT;
7677 else
7678 auto_conn = HCI_AUTO_CONN_REPORT;
7679
7680 /* Kernel internally uses conn_params with resolvable private
7681 * address, but Add Device allows only identity addresses.
7682 * Make sure it is enforced before calling
7683 * hci_conn_params_lookup.
7684 */
7685 if (!hci_is_identity_address(addr: &cp->addr.bdaddr, addr_type)) {
7686 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7687 MGMT_STATUS_INVALID_PARAMS,
7688 rp: &cp->addr, rp_len: sizeof(cp->addr));
7689 goto unlock;
7690 }
7691
7692 /* If the connection parameters don't exist for this device,
7693 * they will be created and configured with defaults.
7694 */
7695 if (hci_conn_params_set(hdev, addr: &cp->addr.bdaddr, addr_type,
7696 auto_connect: auto_conn) < 0) {
7697 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7698 MGMT_STATUS_FAILED, rp: &cp->addr,
7699 rp_len: sizeof(cp->addr));
7700 goto unlock;
7701 } else {
7702 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
7703 addr_type);
7704 if (params)
7705 current_flags = params->flags;
7706 }
7707
7708 err = hci_cmd_sync_queue(hdev, func: add_device_sync, NULL, NULL);
7709 if (err < 0)
7710 goto unlock;
7711
7712added:
7713 device_added(sk, hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type, action: cp->action);
7714 supported_flags = hdev->conn_flags;
7715 device_flags_changed(NULL, hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type,
7716 supported_flags, current_flags);
7717
7718 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7719 MGMT_STATUS_SUCCESS, rp: &cp->addr,
7720 rp_len: sizeof(cp->addr));
7721
7722unlock:
7723 hci_dev_unlock(hdev);
7724 return err;
7725}
7726
7727static void device_removed(struct sock *sk, struct hci_dev *hdev,
7728 bdaddr_t *bdaddr, u8 type)
7729{
7730 struct mgmt_ev_device_removed ev;
7731
7732 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
7733 ev.addr.type = type;
7734
7735 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
7736}
7737
7738static int remove_device_sync(struct hci_dev *hdev, void *data)
7739{
7740 return hci_update_passive_scan_sync(hdev);
7741}
7742
7743static int remove_device(struct sock *sk, struct hci_dev *hdev,
7744 void *data, u16 len)
7745{
7746 struct mgmt_cp_remove_device *cp = data;
7747 int err;
7748
7749 bt_dev_dbg(hdev, "sock %p", sk);
7750
7751 hci_dev_lock(hdev);
7752
7753 if (bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
7754 struct hci_conn_params *params;
7755 u8 addr_type;
7756
7757 if (!bdaddr_type_is_valid(type: cp->addr.type)) {
7758 err = mgmt_cmd_complete(sk, index: hdev->id,
7759 MGMT_OP_REMOVE_DEVICE,
7760 MGMT_STATUS_INVALID_PARAMS,
7761 rp: &cp->addr, rp_len: sizeof(cp->addr));
7762 goto unlock;
7763 }
7764
7765 if (cp->addr.type == BDADDR_BREDR) {
7766 err = hci_bdaddr_list_del(list: &hdev->accept_list,
7767 bdaddr: &cp->addr.bdaddr,
7768 type: cp->addr.type);
7769 if (err) {
7770 err = mgmt_cmd_complete(sk, index: hdev->id,
7771 MGMT_OP_REMOVE_DEVICE,
7772 MGMT_STATUS_INVALID_PARAMS,
7773 rp: &cp->addr,
7774 rp_len: sizeof(cp->addr));
7775 goto unlock;
7776 }
7777
7778 hci_update_scan(hdev);
7779
7780 device_removed(sk, hdev, bdaddr: &cp->addr.bdaddr,
7781 type: cp->addr.type);
7782 goto complete;
7783 }
7784
7785 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
7786
7787 /* Kernel internally uses conn_params with resolvable private
7788 * address, but Remove Device allows only identity addresses.
7789 * Make sure it is enforced before calling
7790 * hci_conn_params_lookup.
7791 */
7792 if (!hci_is_identity_address(addr: &cp->addr.bdaddr, addr_type)) {
7793 err = mgmt_cmd_complete(sk, index: hdev->id,
7794 MGMT_OP_REMOVE_DEVICE,
7795 MGMT_STATUS_INVALID_PARAMS,
7796 rp: &cp->addr, rp_len: sizeof(cp->addr));
7797 goto unlock;
7798 }
7799
7800 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
7801 addr_type);
7802 if (!params) {
7803 err = mgmt_cmd_complete(sk, index: hdev->id,
7804 MGMT_OP_REMOVE_DEVICE,
7805 MGMT_STATUS_INVALID_PARAMS,
7806 rp: &cp->addr, rp_len: sizeof(cp->addr));
7807 goto unlock;
7808 }
7809
7810 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7811 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7812 err = mgmt_cmd_complete(sk, index: hdev->id,
7813 MGMT_OP_REMOVE_DEVICE,
7814 MGMT_STATUS_INVALID_PARAMS,
7815 rp: &cp->addr, rp_len: sizeof(cp->addr));
7816 goto unlock;
7817 }
7818
7819 hci_conn_params_free(param: params);
7820
7821 device_removed(sk, hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type);
7822 } else {
7823 struct hci_conn_params *p, *tmp;
7824 struct bdaddr_list *b, *btmp;
7825
7826 if (cp->addr.type) {
7827 err = mgmt_cmd_complete(sk, index: hdev->id,
7828 MGMT_OP_REMOVE_DEVICE,
7829 MGMT_STATUS_INVALID_PARAMS,
7830 rp: &cp->addr, rp_len: sizeof(cp->addr));
7831 goto unlock;
7832 }
7833
7834 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7835 device_removed(sk, hdev, bdaddr: &b->bdaddr, type: b->bdaddr_type);
7836 list_del(entry: &b->list);
7837 kfree(objp: b);
7838 }
7839
7840 hci_update_scan(hdev);
7841
7842 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7843 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7844 continue;
7845 device_removed(sk, hdev, bdaddr: &p->addr, type: p->addr_type);
7846 if (p->explicit_connect) {
7847 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7848 continue;
7849 }
7850 hci_conn_params_free(param: p);
7851 }
7852
7853 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7854 }
7855
7856 hci_cmd_sync_queue(hdev, func: remove_device_sync, NULL, NULL);
7857
7858complete:
7859 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_REMOVE_DEVICE,
7860 MGMT_STATUS_SUCCESS, rp: &cp->addr,
7861 rp_len: sizeof(cp->addr));
7862unlock:
7863 hci_dev_unlock(hdev);
7864 return err;
7865}
7866
7867static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7868 u16 len)
7869{
7870 struct mgmt_cp_load_conn_param *cp = data;
7871 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7872 sizeof(struct mgmt_conn_param));
7873 u16 param_count, expected_len;
7874 int i;
7875
7876 if (!lmp_le_capable(hdev))
7877 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7878 MGMT_STATUS_NOT_SUPPORTED);
7879
7880 param_count = __le16_to_cpu(cp->param_count);
7881 if (param_count > max_param_count) {
7882 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7883 param_count);
7884 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7885 MGMT_STATUS_INVALID_PARAMS);
7886 }
7887
7888 expected_len = struct_size(cp, params, param_count);
7889 if (expected_len != len) {
7890 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7891 expected_len, len);
7892 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7893 MGMT_STATUS_INVALID_PARAMS);
7894 }
7895
7896 bt_dev_dbg(hdev, "param_count %u", param_count);
7897
7898 hci_dev_lock(hdev);
7899
7900 hci_conn_params_clear_disabled(hdev);
7901
7902 for (i = 0; i < param_count; i++) {
7903 struct mgmt_conn_param *param = &cp->params[i];
7904 struct hci_conn_params *hci_param;
7905 u16 min, max, latency, timeout;
7906 u8 addr_type;
7907
7908 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7909 param->addr.type);
7910
7911 if (param->addr.type == BDADDR_LE_PUBLIC) {
7912 addr_type = ADDR_LE_DEV_PUBLIC;
7913 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7914 addr_type = ADDR_LE_DEV_RANDOM;
7915 } else {
7916 bt_dev_err(hdev, "ignoring invalid connection parameters");
7917 continue;
7918 }
7919
7920 min = le16_to_cpu(param->min_interval);
7921 max = le16_to_cpu(param->max_interval);
7922 latency = le16_to_cpu(param->latency);
7923 timeout = le16_to_cpu(param->timeout);
7924
7925 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7926 min, max, latency, timeout);
7927
7928 if (hci_check_conn_params(min, max, latency, to_multiplier: timeout) < 0) {
7929 bt_dev_err(hdev, "ignoring invalid connection parameters");
7930 continue;
7931 }
7932
7933 hci_param = hci_conn_params_add(hdev, addr: &param->addr.bdaddr,
7934 addr_type);
7935 if (!hci_param) {
7936 bt_dev_err(hdev, "failed to add connection parameters");
7937 continue;
7938 }
7939
7940 hci_param->conn_min_interval = min;
7941 hci_param->conn_max_interval = max;
7942 hci_param->conn_latency = latency;
7943 hci_param->supervision_timeout = timeout;
7944 }
7945
7946 hci_dev_unlock(hdev);
7947
7948 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM, status: 0,
7949 NULL, rp_len: 0);
7950}
7951
7952static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7953 void *data, u16 len)
7954{
7955 struct mgmt_cp_set_external_config *cp = data;
7956 bool changed;
7957 int err;
7958
7959 bt_dev_dbg(hdev, "sock %p", sk);
7960
7961 if (hdev_is_powered(hdev))
7962 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7963 MGMT_STATUS_REJECTED);
7964
7965 if (cp->config != 0x00 && cp->config != 0x01)
7966 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7967 MGMT_STATUS_INVALID_PARAMS);
7968
7969 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7970 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7971 MGMT_STATUS_NOT_SUPPORTED);
7972
7973 hci_dev_lock(hdev);
7974
7975 if (cp->config)
7976 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7977 else
7978 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7979
7980 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7981 if (err < 0)
7982 goto unlock;
7983
7984 if (!changed)
7985 goto unlock;
7986
7987 err = new_options(hdev, skip: sk);
7988
7989 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7990 mgmt_index_removed(hdev);
7991
7992 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7993 hci_dev_set_flag(hdev, HCI_CONFIG);
7994 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7995
7996 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
7997 } else {
7998 set_bit(nr: HCI_RAW, addr: &hdev->flags);
7999 mgmt_index_added(hdev);
8000 }
8001 }
8002
8003unlock:
8004 hci_dev_unlock(hdev);
8005 return err;
8006}
8007
8008static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8009 void *data, u16 len)
8010{
8011 struct mgmt_cp_set_public_address *cp = data;
8012 bool changed;
8013 int err;
8014
8015 bt_dev_dbg(hdev, "sock %p", sk);
8016
8017 if (hdev_is_powered(hdev))
8018 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8019 MGMT_STATUS_REJECTED);
8020
8021 if (!bacmp(ba1: &cp->bdaddr, BDADDR_ANY))
8022 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8023 MGMT_STATUS_INVALID_PARAMS);
8024
8025 if (!hdev->set_bdaddr)
8026 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8027 MGMT_STATUS_NOT_SUPPORTED);
8028
8029 hci_dev_lock(hdev);
8030
8031 changed = !!bacmp(ba1: &hdev->public_addr, ba2: &cp->bdaddr);
8032 bacpy(dst: &hdev->public_addr, src: &cp->bdaddr);
8033
8034 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8035 if (err < 0)
8036 goto unlock;
8037
8038 if (!changed)
8039 goto unlock;
8040
8041 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8042 err = new_options(hdev, skip: sk);
8043
8044 if (is_configured(hdev)) {
8045 mgmt_index_removed(hdev);
8046
8047 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8048
8049 hci_dev_set_flag(hdev, HCI_CONFIG);
8050 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8051
8052 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
8053 }
8054
8055unlock:
8056 hci_dev_unlock(hdev);
8057 return err;
8058}
8059
8060static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8061 int err)
8062{
8063 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8064 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8065 u8 *h192, *r192, *h256, *r256;
8066 struct mgmt_pending_cmd *cmd = data;
8067 struct sk_buff *skb = cmd->skb;
8068 u8 status = mgmt_status(err);
8069 u16 eir_len;
8070
8071 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8072 return;
8073
8074 if (!status) {
8075 if (!skb)
8076 status = MGMT_STATUS_FAILED;
8077 else if (IS_ERR(ptr: skb))
8078 status = mgmt_status(err: PTR_ERR(ptr: skb));
8079 else
8080 status = mgmt_status(err: skb->data[0]);
8081 }
8082
8083 bt_dev_dbg(hdev, "status %u", status);
8084
8085 mgmt_cp = cmd->param;
8086
8087 if (status) {
8088 status = mgmt_status(err: status);
8089 eir_len = 0;
8090
8091 h192 = NULL;
8092 r192 = NULL;
8093 h256 = NULL;
8094 r256 = NULL;
8095 } else if (!bredr_sc_enabled(hdev)) {
8096 struct hci_rp_read_local_oob_data *rp;
8097
8098 if (skb->len != sizeof(*rp)) {
8099 status = MGMT_STATUS_FAILED;
8100 eir_len = 0;
8101 } else {
8102 status = MGMT_STATUS_SUCCESS;
8103 rp = (void *)skb->data;
8104
8105 eir_len = 5 + 18 + 18;
8106 h192 = rp->hash;
8107 r192 = rp->rand;
8108 h256 = NULL;
8109 r256 = NULL;
8110 }
8111 } else {
8112 struct hci_rp_read_local_oob_ext_data *rp;
8113
8114 if (skb->len != sizeof(*rp)) {
8115 status = MGMT_STATUS_FAILED;
8116 eir_len = 0;
8117 } else {
8118 status = MGMT_STATUS_SUCCESS;
8119 rp = (void *)skb->data;
8120
8121 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8122 eir_len = 5 + 18 + 18;
8123 h192 = NULL;
8124 r192 = NULL;
8125 } else {
8126 eir_len = 5 + 18 + 18 + 18 + 18;
8127 h192 = rp->hash192;
8128 r192 = rp->rand192;
8129 }
8130
8131 h256 = rp->hash256;
8132 r256 = rp->rand256;
8133 }
8134 }
8135
8136 mgmt_rp = kmalloc(size: sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8137 if (!mgmt_rp)
8138 goto done;
8139
8140 if (eir_len == 0)
8141 goto send_rsp;
8142
8143 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len: 0, EIR_CLASS_OF_DEV,
8144 data: hdev->dev_class, data_len: 3);
8145
8146 if (h192 && r192) {
8147 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8148 EIR_SSP_HASH_C192, data: h192, data_len: 16);
8149 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8150 EIR_SSP_RAND_R192, data: r192, data_len: 16);
8151 }
8152
8153 if (h256 && r256) {
8154 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8155 EIR_SSP_HASH_C256, data: h256, data_len: 16);
8156 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8157 EIR_SSP_RAND_R256, data: r256, data_len: 16);
8158 }
8159
8160send_rsp:
8161 mgmt_rp->type = mgmt_cp->type;
8162 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8163
8164 err = mgmt_cmd_complete(sk: cmd->sk, index: hdev->id,
8165 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8166 rp: mgmt_rp, rp_len: sizeof(*mgmt_rp) + eir_len);
8167 if (err < 0 || status)
8168 goto done;
8169
8170 hci_sock_set_flag(sk: cmd->sk, nr: HCI_MGMT_OOB_DATA_EVENTS);
8171
8172 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8173 data: mgmt_rp, len: sizeof(*mgmt_rp) + eir_len,
8174 flag: HCI_MGMT_OOB_DATA_EVENTS, skip_sk: cmd->sk);
8175done:
8176 if (skb && !IS_ERR(ptr: skb))
8177 kfree_skb(skb);
8178
8179 kfree(objp: mgmt_rp);
8180 mgmt_pending_remove(cmd);
8181}
8182
8183static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8184 struct mgmt_cp_read_local_oob_ext_data *cp)
8185{
8186 struct mgmt_pending_cmd *cmd;
8187 int err;
8188
8189 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8190 data: cp, len: sizeof(*cp));
8191 if (!cmd)
8192 return -ENOMEM;
8193
8194 err = hci_cmd_sync_queue(hdev, func: read_local_oob_data_sync, data: cmd,
8195 destroy: read_local_oob_ext_data_complete);
8196
8197 if (err < 0) {
8198 mgmt_pending_remove(cmd);
8199 return err;
8200 }
8201
8202 return 0;
8203}
8204
8205static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8206 void *data, u16 data_len)
8207{
8208 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8209 struct mgmt_rp_read_local_oob_ext_data *rp;
8210 size_t rp_len;
8211 u16 eir_len;
8212 u8 status, flags, role, addr[7], hash[16], rand[16];
8213 int err;
8214
8215 bt_dev_dbg(hdev, "sock %p", sk);
8216
8217 if (hdev_is_powered(hdev)) {
8218 switch (cp->type) {
8219 case BIT(BDADDR_BREDR):
8220 status = mgmt_bredr_support(hdev);
8221 if (status)
8222 eir_len = 0;
8223 else
8224 eir_len = 5;
8225 break;
8226 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8227 status = mgmt_le_support(hdev);
8228 if (status)
8229 eir_len = 0;
8230 else
8231 eir_len = 9 + 3 + 18 + 18 + 3;
8232 break;
8233 default:
8234 status = MGMT_STATUS_INVALID_PARAMS;
8235 eir_len = 0;
8236 break;
8237 }
8238 } else {
8239 status = MGMT_STATUS_NOT_POWERED;
8240 eir_len = 0;
8241 }
8242
8243 rp_len = sizeof(*rp) + eir_len;
8244 rp = kmalloc(size: rp_len, GFP_ATOMIC);
8245 if (!rp)
8246 return -ENOMEM;
8247
8248 if (!status && !lmp_ssp_capable(hdev)) {
8249 status = MGMT_STATUS_NOT_SUPPORTED;
8250 eir_len = 0;
8251 }
8252
8253 if (status)
8254 goto complete;
8255
8256 hci_dev_lock(hdev);
8257
8258 eir_len = 0;
8259 switch (cp->type) {
8260 case BIT(BDADDR_BREDR):
8261 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8262 err = read_local_ssp_oob_req(hdev, sk, cp);
8263 hci_dev_unlock(hdev);
8264 if (!err)
8265 goto done;
8266
8267 status = MGMT_STATUS_FAILED;
8268 goto complete;
8269 } else {
8270 eir_len = eir_append_data(eir: rp->eir, eir_len,
8271 EIR_CLASS_OF_DEV,
8272 data: hdev->dev_class, data_len: 3);
8273 }
8274 break;
8275 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8276 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8277 smp_generate_oob(hdev, hash, rand) < 0) {
8278 hci_dev_unlock(hdev);
8279 status = MGMT_STATUS_FAILED;
8280 goto complete;
8281 }
8282
8283 /* This should return the active RPA, but since the RPA
8284 * is only programmed on demand, it is really hard to fill
8285 * this in at the moment. For now disallow retrieving
8286 * local out-of-band data when privacy is in use.
8287 *
8288 * Returning the identity address will not help here since
8289 * pairing happens before the identity resolving key is
8290 * known and thus the connection establishment happens
8291 * based on the RPA and not the identity address.
8292 */
8293 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8294 hci_dev_unlock(hdev);
8295 status = MGMT_STATUS_REJECTED;
8296 goto complete;
8297 }
8298
8299 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8300 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) ||
8301 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8302 bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
8303 memcpy(addr, &hdev->static_addr, 6);
8304 addr[6] = 0x01;
8305 } else {
8306 memcpy(addr, &hdev->bdaddr, 6);
8307 addr[6] = 0x00;
8308 }
8309
8310 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_LE_BDADDR,
8311 data: addr, data_len: sizeof(addr));
8312
8313 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8314 role = 0x02;
8315 else
8316 role = 0x01;
8317
8318 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_LE_ROLE,
8319 data: &role, data_len: sizeof(role));
8320
8321 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8322 eir_len = eir_append_data(eir: rp->eir, eir_len,
8323 EIR_LE_SC_CONFIRM,
8324 data: hash, data_len: sizeof(hash));
8325
8326 eir_len = eir_append_data(eir: rp->eir, eir_len,
8327 EIR_LE_SC_RANDOM,
8328 data: rand, data_len: sizeof(rand));
8329 }
8330
8331 flags = mgmt_get_adv_discov_flags(hdev);
8332
8333 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8334 flags |= LE_AD_NO_BREDR;
8335
8336 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_FLAGS,
8337 data: &flags, data_len: sizeof(flags));
8338 break;
8339 }
8340
8341 hci_dev_unlock(hdev);
8342
8343 hci_sock_set_flag(sk, nr: HCI_MGMT_OOB_DATA_EVENTS);
8344
8345 status = MGMT_STATUS_SUCCESS;
8346
8347complete:
8348 rp->type = cp->type;
8349 rp->eir_len = cpu_to_le16(eir_len);
8350
8351 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8352 status, rp, rp_len: sizeof(*rp) + eir_len);
8353 if (err < 0 || status)
8354 goto done;
8355
8356 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8357 data: rp, len: sizeof(*rp) + eir_len,
8358 flag: HCI_MGMT_OOB_DATA_EVENTS, skip_sk: sk);
8359
8360done:
8361 kfree(objp: rp);
8362
8363 return err;
8364}
8365
8366static u32 get_supported_adv_flags(struct hci_dev *hdev)
8367{
8368 u32 flags = 0;
8369
8370 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8371 flags |= MGMT_ADV_FLAG_DISCOV;
8372 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8373 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8374 flags |= MGMT_ADV_FLAG_APPEARANCE;
8375 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8376 flags |= MGMT_ADV_PARAM_DURATION;
8377 flags |= MGMT_ADV_PARAM_TIMEOUT;
8378 flags |= MGMT_ADV_PARAM_INTERVALS;
8379 flags |= MGMT_ADV_PARAM_TX_POWER;
8380 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8381
8382 /* In extended adv TX_POWER returned from Set Adv Param
8383 * will be always valid.
8384 */
8385 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8386 flags |= MGMT_ADV_FLAG_TX_POWER;
8387
8388 if (ext_adv_capable(hdev)) {
8389 flags |= MGMT_ADV_FLAG_SEC_1M;
8390 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8391 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8392
8393 if (le_2m_capable(hdev))
8394 flags |= MGMT_ADV_FLAG_SEC_2M;
8395
8396 if (le_coded_capable(hdev))
8397 flags |= MGMT_ADV_FLAG_SEC_CODED;
8398 }
8399
8400 return flags;
8401}
8402
8403static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8404 void *data, u16 data_len)
8405{
8406 struct mgmt_rp_read_adv_features *rp;
8407 size_t rp_len;
8408 int err;
8409 struct adv_info *adv_instance;
8410 u32 supported_flags;
8411 u8 *instance;
8412
8413 bt_dev_dbg(hdev, "sock %p", sk);
8414
8415 if (!lmp_le_capable(hdev))
8416 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_ADV_FEATURES,
8417 MGMT_STATUS_REJECTED);
8418
8419 hci_dev_lock(hdev);
8420
8421 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8422 rp = kmalloc(size: rp_len, GFP_ATOMIC);
8423 if (!rp) {
8424 hci_dev_unlock(hdev);
8425 return -ENOMEM;
8426 }
8427
8428 supported_flags = get_supported_adv_flags(hdev);
8429
8430 rp->supported_flags = cpu_to_le32(supported_flags);
8431 rp->max_adv_data_len = max_adv_len(hdev);
8432 rp->max_scan_rsp_len = max_adv_len(hdev);
8433 rp->max_instances = hdev->le_num_of_adv_sets;
8434 rp->num_instances = hdev->adv_instance_cnt;
8435
8436 instance = rp->instance;
8437 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8438 /* Only instances 1-le_num_of_adv_sets are externally visible */
8439 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8440 *instance = adv_instance->instance;
8441 instance++;
8442 } else {
8443 rp->num_instances--;
8444 rp_len--;
8445 }
8446 }
8447
8448 hci_dev_unlock(hdev);
8449
8450 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_ADV_FEATURES,
8451 MGMT_STATUS_SUCCESS, rp, rp_len);
8452
8453 kfree(objp: rp);
8454
8455 return err;
8456}
8457
8458static u8 calculate_name_len(struct hci_dev *hdev)
8459{
8460 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8461
8462 return eir_append_local_name(hdev, eir: buf, ad_len: 0);
8463}
8464
8465static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8466 bool is_adv_data)
8467{
8468 u8 max_len = max_adv_len(hdev);
8469
8470 if (is_adv_data) {
8471 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8472 MGMT_ADV_FLAG_LIMITED_DISCOV |
8473 MGMT_ADV_FLAG_MANAGED_FLAGS))
8474 max_len -= 3;
8475
8476 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8477 max_len -= 3;
8478 } else {
8479 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8480 max_len -= calculate_name_len(hdev);
8481
8482 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8483 max_len -= 4;
8484 }
8485
8486 return max_len;
8487}
8488
8489static bool flags_managed(u32 adv_flags)
8490{
8491 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8492 MGMT_ADV_FLAG_LIMITED_DISCOV |
8493 MGMT_ADV_FLAG_MANAGED_FLAGS);
8494}
8495
8496static bool tx_power_managed(u32 adv_flags)
8497{
8498 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8499}
8500
8501static bool name_managed(u32 adv_flags)
8502{
8503 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8504}
8505
8506static bool appearance_managed(u32 adv_flags)
8507{
8508 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8509}
8510
8511static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8512 u8 len, bool is_adv_data)
8513{
8514 int i, cur_len;
8515 u8 max_len;
8516
8517 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8518
8519 if (len > max_len)
8520 return false;
8521
8522 /* Make sure that the data is correctly formatted. */
8523 for (i = 0; i < len; i += (cur_len + 1)) {
8524 cur_len = data[i];
8525
8526 if (!cur_len)
8527 continue;
8528
8529 if (data[i + 1] == EIR_FLAGS &&
8530 (!is_adv_data || flags_managed(adv_flags)))
8531 return false;
8532
8533 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8534 return false;
8535
8536 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8537 return false;
8538
8539 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8540 return false;
8541
8542 if (data[i + 1] == EIR_APPEARANCE &&
8543 appearance_managed(adv_flags))
8544 return false;
8545
8546 /* If the current field length would exceed the total data
8547 * length, then it's invalid.
8548 */
8549 if (i + cur_len >= len)
8550 return false;
8551 }
8552
8553 return true;
8554}
8555
8556static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8557{
8558 u32 supported_flags, phy_flags;
8559
8560 /* The current implementation only supports a subset of the specified
8561 * flags. Also need to check mutual exclusiveness of sec flags.
8562 */
8563 supported_flags = get_supported_adv_flags(hdev);
8564 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8565 if (adv_flags & ~supported_flags ||
8566 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8567 return false;
8568
8569 return true;
8570}
8571
8572static bool adv_busy(struct hci_dev *hdev)
8573{
8574 return pending_find(MGMT_OP_SET_LE, hdev);
8575}
8576
8577static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8578 int err)
8579{
8580 struct adv_info *adv, *n;
8581
8582 bt_dev_dbg(hdev, "err %d", err);
8583
8584 hci_dev_lock(hdev);
8585
8586 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8587 u8 instance;
8588
8589 if (!adv->pending)
8590 continue;
8591
8592 if (!err) {
8593 adv->pending = false;
8594 continue;
8595 }
8596
8597 instance = adv->instance;
8598
8599 if (hdev->cur_adv_instance == instance)
8600 cancel_adv_timeout(hdev);
8601
8602 hci_remove_adv_instance(hdev, instance);
8603 mgmt_advertising_removed(sk, hdev, instance);
8604 }
8605
8606 hci_dev_unlock(hdev);
8607}
8608
8609static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8610{
8611 struct mgmt_pending_cmd *cmd = data;
8612 struct mgmt_cp_add_advertising *cp = cmd->param;
8613 struct mgmt_rp_add_advertising rp;
8614
8615 memset(&rp, 0, sizeof(rp));
8616
8617 rp.instance = cp->instance;
8618
8619 if (err)
8620 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8621 status: mgmt_status(err));
8622 else
8623 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8624 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8625
8626 add_adv_complete(hdev, sk: cmd->sk, instance: cp->instance, err);
8627
8628 mgmt_pending_free(cmd);
8629}
8630
8631static int add_advertising_sync(struct hci_dev *hdev, void *data)
8632{
8633 struct mgmt_pending_cmd *cmd = data;
8634 struct mgmt_cp_add_advertising *cp = cmd->param;
8635
8636 return hci_schedule_adv_instance_sync(hdev, instance: cp->instance, force: true);
8637}
8638
8639static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8640 void *data, u16 data_len)
8641{
8642 struct mgmt_cp_add_advertising *cp = data;
8643 struct mgmt_rp_add_advertising rp;
8644 u32 flags;
8645 u8 status;
8646 u16 timeout, duration;
8647 unsigned int prev_instance_cnt;
8648 u8 schedule_instance = 0;
8649 struct adv_info *adv, *next_instance;
8650 int err;
8651 struct mgmt_pending_cmd *cmd;
8652
8653 bt_dev_dbg(hdev, "sock %p", sk);
8654
8655 status = mgmt_le_support(hdev);
8656 if (status)
8657 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8658 status);
8659
8660 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8661 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8662 MGMT_STATUS_INVALID_PARAMS);
8663
8664 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8665 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8666 MGMT_STATUS_INVALID_PARAMS);
8667
8668 flags = __le32_to_cpu(cp->flags);
8669 timeout = __le16_to_cpu(cp->timeout);
8670 duration = __le16_to_cpu(cp->duration);
8671
8672 if (!requested_adv_flags_are_valid(hdev, adv_flags: flags))
8673 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 MGMT_STATUS_INVALID_PARAMS);
8675
8676 hci_dev_lock(hdev);
8677
8678 if (timeout && !hdev_is_powered(hdev)) {
8679 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_REJECTED);
8681 goto unlock;
8682 }
8683
8684 if (adv_busy(hdev)) {
8685 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8686 MGMT_STATUS_BUSY);
8687 goto unlock;
8688 }
8689
8690 if (!tlv_data_is_valid(hdev, adv_flags: flags, data: cp->data, len: cp->adv_data_len, is_adv_data: true) ||
8691 !tlv_data_is_valid(hdev, adv_flags: flags, data: cp->data + cp->adv_data_len,
8692 len: cp->scan_rsp_len, is_adv_data: false)) {
8693 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8694 MGMT_STATUS_INVALID_PARAMS);
8695 goto unlock;
8696 }
8697
8698 prev_instance_cnt = hdev->adv_instance_cnt;
8699
8700 adv = hci_add_adv_instance(hdev, instance: cp->instance, flags,
8701 adv_data_len: cp->adv_data_len, adv_data: cp->data,
8702 scan_rsp_len: cp->scan_rsp_len,
8703 scan_rsp_data: cp->data + cp->adv_data_len,
8704 timeout, duration,
8705 HCI_ADV_TX_POWER_NO_PREFERENCE,
8706 min_interval: hdev->le_adv_min_interval,
8707 max_interval: hdev->le_adv_max_interval, mesh_handle: 0);
8708 if (IS_ERR(ptr: adv)) {
8709 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8710 MGMT_STATUS_FAILED);
8711 goto unlock;
8712 }
8713
8714 /* Only trigger an advertising added event if a new instance was
8715 * actually added.
8716 */
8717 if (hdev->adv_instance_cnt > prev_instance_cnt)
8718 mgmt_advertising_added(sk, hdev, instance: cp->instance);
8719
8720 if (hdev->cur_adv_instance == cp->instance) {
8721 /* If the currently advertised instance is being changed then
8722 * cancel the current advertising and schedule the next
8723 * instance. If there is only one instance then the overridden
8724 * advertising data will be visible right away.
8725 */
8726 cancel_adv_timeout(hdev);
8727
8728 next_instance = hci_get_next_instance(hdev, instance: cp->instance);
8729 if (next_instance)
8730 schedule_instance = next_instance->instance;
8731 } else if (!hdev->adv_instance_timeout) {
8732 /* Immediately advertise the new instance if no other
8733 * instance is currently being advertised.
8734 */
8735 schedule_instance = cp->instance;
8736 }
8737
8738 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8739 * there is no instance to be advertised then we have no HCI
8740 * communication to make. Simply return.
8741 */
8742 if (!hdev_is_powered(hdev) ||
8743 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8744 !schedule_instance) {
8745 rp.instance = cp->instance;
8746 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8747 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
8748 goto unlock;
8749 }
8750
8751 /* We're good to go, update advertising data, parameters, and start
8752 * advertising.
8753 */
8754 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8755 len: data_len);
8756 if (!cmd) {
8757 err = -ENOMEM;
8758 goto unlock;
8759 }
8760
8761 cp->instance = schedule_instance;
8762
8763 err = hci_cmd_sync_queue(hdev, func: add_advertising_sync, data: cmd,
8764 destroy: add_advertising_complete);
8765 if (err < 0)
8766 mgmt_pending_free(cmd);
8767
8768unlock:
8769 hci_dev_unlock(hdev);
8770
8771 return err;
8772}
8773
8774static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8775 int err)
8776{
8777 struct mgmt_pending_cmd *cmd = data;
8778 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8779 struct mgmt_rp_add_ext_adv_params rp;
8780 struct adv_info *adv;
8781 u32 flags;
8782
8783 BT_DBG("%s", hdev->name);
8784
8785 hci_dev_lock(hdev);
8786
8787 adv = hci_find_adv_instance(hdev, instance: cp->instance);
8788 if (!adv)
8789 goto unlock;
8790
8791 rp.instance = cp->instance;
8792 rp.tx_power = adv->tx_power;
8793
8794 /* While we're at it, inform userspace of the available space for this
8795 * advertisement, given the flags that will be used.
8796 */
8797 flags = __le32_to_cpu(cp->flags);
8798 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
8799 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
8800
8801 if (err) {
8802 /* If this advertisement was previously advertising and we
8803 * failed to update it, we signal that it has been removed and
8804 * delete its structure
8805 */
8806 if (!adv->pending)
8807 mgmt_advertising_removed(sk: cmd->sk, hdev, instance: cp->instance);
8808
8809 hci_remove_adv_instance(hdev, instance: cp->instance);
8810
8811 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8812 status: mgmt_status(err));
8813 } else {
8814 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8815 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8816 }
8817
8818unlock:
8819 if (cmd)
8820 mgmt_pending_free(cmd);
8821
8822 hci_dev_unlock(hdev);
8823}
8824
8825static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8826{
8827 struct mgmt_pending_cmd *cmd = data;
8828 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8829
8830 return hci_setup_ext_adv_instance_sync(hdev, instance: cp->instance);
8831}
8832
8833static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8834 void *data, u16 data_len)
8835{
8836 struct mgmt_cp_add_ext_adv_params *cp = data;
8837 struct mgmt_rp_add_ext_adv_params rp;
8838 struct mgmt_pending_cmd *cmd = NULL;
8839 struct adv_info *adv;
8840 u32 flags, min_interval, max_interval;
8841 u16 timeout, duration;
8842 u8 status;
8843 s8 tx_power;
8844 int err;
8845
8846 BT_DBG("%s", hdev->name);
8847
8848 status = mgmt_le_support(hdev);
8849 if (status)
8850 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8851 status);
8852
8853 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8854 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8855 MGMT_STATUS_INVALID_PARAMS);
8856
8857 /* The purpose of breaking add_advertising into two separate MGMT calls
8858 * for params and data is to allow more parameters to be added to this
8859 * structure in the future. For this reason, we verify that we have the
8860 * bare minimum structure we know of when the interface was defined. Any
8861 * extra parameters we don't know about will be ignored in this request.
8862 */
8863 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8864 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8865 MGMT_STATUS_INVALID_PARAMS);
8866
8867 flags = __le32_to_cpu(cp->flags);
8868
8869 if (!requested_adv_flags_are_valid(hdev, adv_flags: flags))
8870 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8871 MGMT_STATUS_INVALID_PARAMS);
8872
8873 hci_dev_lock(hdev);
8874
8875 /* In new interface, we require that we are powered to register */
8876 if (!hdev_is_powered(hdev)) {
8877 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 MGMT_STATUS_REJECTED);
8879 goto unlock;
8880 }
8881
8882 if (adv_busy(hdev)) {
8883 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8884 MGMT_STATUS_BUSY);
8885 goto unlock;
8886 }
8887
8888 /* Parse defined parameters from request, use defaults otherwise */
8889 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8890 __le16_to_cpu(cp->timeout) : 0;
8891
8892 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8893 __le16_to_cpu(cp->duration) :
8894 hdev->def_multi_adv_rotation_duration;
8895
8896 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8897 __le32_to_cpu(cp->min_interval) :
8898 hdev->le_adv_min_interval;
8899
8900 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8901 __le32_to_cpu(cp->max_interval) :
8902 hdev->le_adv_max_interval;
8903
8904 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8905 cp->tx_power :
8906 HCI_ADV_TX_POWER_NO_PREFERENCE;
8907
8908 /* Create advertising instance with no advertising or response data */
8909 adv = hci_add_adv_instance(hdev, instance: cp->instance, flags, adv_data_len: 0, NULL, scan_rsp_len: 0, NULL,
8910 timeout, duration, tx_power, min_interval,
8911 max_interval, mesh_handle: 0);
8912
8913 if (IS_ERR(ptr: adv)) {
8914 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8915 MGMT_STATUS_FAILED);
8916 goto unlock;
8917 }
8918
8919 /* Submit request for advertising params if ext adv available */
8920 if (ext_adv_capable(hdev)) {
8921 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8922 data, len: data_len);
8923 if (!cmd) {
8924 err = -ENOMEM;
8925 hci_remove_adv_instance(hdev, instance: cp->instance);
8926 goto unlock;
8927 }
8928
8929 err = hci_cmd_sync_queue(hdev, func: add_ext_adv_params_sync, data: cmd,
8930 destroy: add_ext_adv_params_complete);
8931 if (err < 0)
8932 mgmt_pending_free(cmd);
8933 } else {
8934 rp.instance = cp->instance;
8935 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8936 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
8937 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
8938 err = mgmt_cmd_complete(sk, index: hdev->id,
8939 MGMT_OP_ADD_EXT_ADV_PARAMS,
8940 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
8941 }
8942
8943unlock:
8944 hci_dev_unlock(hdev);
8945
8946 return err;
8947}
8948
8949static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8950{
8951 struct mgmt_pending_cmd *cmd = data;
8952 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8953 struct mgmt_rp_add_advertising rp;
8954
8955 add_adv_complete(hdev, sk: cmd->sk, instance: cp->instance, err);
8956
8957 memset(&rp, 0, sizeof(rp));
8958
8959 rp.instance = cp->instance;
8960
8961 if (err)
8962 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8963 status: mgmt_status(err));
8964 else
8965 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8966 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8967
8968 mgmt_pending_free(cmd);
8969}
8970
8971static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8972{
8973 struct mgmt_pending_cmd *cmd = data;
8974 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8975 int err;
8976
8977 if (ext_adv_capable(hdev)) {
8978 err = hci_update_adv_data_sync(hdev, instance: cp->instance);
8979 if (err)
8980 return err;
8981
8982 err = hci_update_scan_rsp_data_sync(hdev, instance: cp->instance);
8983 if (err)
8984 return err;
8985
8986 return hci_enable_ext_advertising_sync(hdev, instance: cp->instance);
8987 }
8988
8989 return hci_schedule_adv_instance_sync(hdev, instance: cp->instance, force: true);
8990}
8991
8992static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8993 u16 data_len)
8994{
8995 struct mgmt_cp_add_ext_adv_data *cp = data;
8996 struct mgmt_rp_add_ext_adv_data rp;
8997 u8 schedule_instance = 0;
8998 struct adv_info *next_instance;
8999 struct adv_info *adv_instance;
9000 int err = 0;
9001 struct mgmt_pending_cmd *cmd;
9002
9003 BT_DBG("%s", hdev->name);
9004
9005 hci_dev_lock(hdev);
9006
9007 adv_instance = hci_find_adv_instance(hdev, instance: cp->instance);
9008
9009 if (!adv_instance) {
9010 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9011 MGMT_STATUS_INVALID_PARAMS);
9012 goto unlock;
9013 }
9014
9015 /* In new interface, we require that we are powered to register */
9016 if (!hdev_is_powered(hdev)) {
9017 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9018 MGMT_STATUS_REJECTED);
9019 goto clear_new_instance;
9020 }
9021
9022 if (adv_busy(hdev)) {
9023 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9024 MGMT_STATUS_BUSY);
9025 goto clear_new_instance;
9026 }
9027
9028 /* Validate new data */
9029 if (!tlv_data_is_valid(hdev, adv_flags: adv_instance->flags, data: cp->data,
9030 len: cp->adv_data_len, is_adv_data: true) ||
9031 !tlv_data_is_valid(hdev, adv_flags: adv_instance->flags, data: cp->data +
9032 cp->adv_data_len, len: cp->scan_rsp_len, is_adv_data: false)) {
9033 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9034 MGMT_STATUS_INVALID_PARAMS);
9035 goto clear_new_instance;
9036 }
9037
9038 /* Set the data in the advertising instance */
9039 hci_set_adv_instance_data(hdev, instance: cp->instance, adv_data_len: cp->adv_data_len,
9040 adv_data: cp->data, scan_rsp_len: cp->scan_rsp_len,
9041 scan_rsp_data: cp->data + cp->adv_data_len);
9042
9043 /* If using software rotation, determine next instance to use */
9044 if (hdev->cur_adv_instance == cp->instance) {
9045 /* If the currently advertised instance is being changed
9046 * then cancel the current advertising and schedule the
9047 * next instance. If there is only one instance then the
9048 * overridden advertising data will be visible right
9049 * away
9050 */
9051 cancel_adv_timeout(hdev);
9052
9053 next_instance = hci_get_next_instance(hdev, instance: cp->instance);
9054 if (next_instance)
9055 schedule_instance = next_instance->instance;
9056 } else if (!hdev->adv_instance_timeout) {
9057 /* Immediately advertise the new instance if no other
9058 * instance is currently being advertised.
9059 */
9060 schedule_instance = cp->instance;
9061 }
9062
9063 /* If the HCI_ADVERTISING flag is set or there is no instance to
9064 * be advertised then we have no HCI communication to make.
9065 * Simply return.
9066 */
9067 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9068 if (adv_instance->pending) {
9069 mgmt_advertising_added(sk, hdev, instance: cp->instance);
9070 adv_instance->pending = false;
9071 }
9072 rp.instance = cp->instance;
9073 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9074 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9075 goto unlock;
9076 }
9077
9078 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9079 len: data_len);
9080 if (!cmd) {
9081 err = -ENOMEM;
9082 goto clear_new_instance;
9083 }
9084
9085 err = hci_cmd_sync_queue(hdev, func: add_ext_adv_data_sync, data: cmd,
9086 destroy: add_ext_adv_data_complete);
9087 if (err < 0) {
9088 mgmt_pending_free(cmd);
9089 goto clear_new_instance;
9090 }
9091
9092 /* We were successful in updating data, so trigger advertising_added
9093 * event if this is an instance that wasn't previously advertising. If
9094 * a failure occurs in the requests we initiated, we will remove the
9095 * instance again in add_advertising_complete
9096 */
9097 if (adv_instance->pending)
9098 mgmt_advertising_added(sk, hdev, instance: cp->instance);
9099
9100 goto unlock;
9101
9102clear_new_instance:
9103 hci_remove_adv_instance(hdev, instance: cp->instance);
9104
9105unlock:
9106 hci_dev_unlock(hdev);
9107
9108 return err;
9109}
9110
9111static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9112 int err)
9113{
9114 struct mgmt_pending_cmd *cmd = data;
9115 struct mgmt_cp_remove_advertising *cp = cmd->param;
9116 struct mgmt_rp_remove_advertising rp;
9117
9118 bt_dev_dbg(hdev, "err %d", err);
9119
9120 memset(&rp, 0, sizeof(rp));
9121 rp.instance = cp->instance;
9122
9123 if (err)
9124 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
9125 status: mgmt_status(err));
9126 else
9127 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
9128 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9129
9130 mgmt_pending_free(cmd);
9131}
9132
9133static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9134{
9135 struct mgmt_pending_cmd *cmd = data;
9136 struct mgmt_cp_remove_advertising *cp = cmd->param;
9137 int err;
9138
9139 err = hci_remove_advertising_sync(hdev, sk: cmd->sk, instance: cp->instance, force: true);
9140 if (err)
9141 return err;
9142
9143 if (list_empty(head: &hdev->adv_instances))
9144 err = hci_disable_advertising_sync(hdev);
9145
9146 return err;
9147}
9148
9149static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9150 void *data, u16 data_len)
9151{
9152 struct mgmt_cp_remove_advertising *cp = data;
9153 struct mgmt_pending_cmd *cmd;
9154 int err;
9155
9156 bt_dev_dbg(hdev, "sock %p", sk);
9157
9158 hci_dev_lock(hdev);
9159
9160 if (cp->instance && !hci_find_adv_instance(hdev, instance: cp->instance)) {
9161 err = mgmt_cmd_status(sk, index: hdev->id,
9162 MGMT_OP_REMOVE_ADVERTISING,
9163 MGMT_STATUS_INVALID_PARAMS);
9164 goto unlock;
9165 }
9166
9167 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9168 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9169 MGMT_STATUS_BUSY);
9170 goto unlock;
9171 }
9172
9173 if (list_empty(head: &hdev->adv_instances)) {
9174 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9175 MGMT_STATUS_INVALID_PARAMS);
9176 goto unlock;
9177 }
9178
9179 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9180 len: data_len);
9181 if (!cmd) {
9182 err = -ENOMEM;
9183 goto unlock;
9184 }
9185
9186 err = hci_cmd_sync_queue(hdev, func: remove_advertising_sync, data: cmd,
9187 destroy: remove_advertising_complete);
9188 if (err < 0)
9189 mgmt_pending_free(cmd);
9190
9191unlock:
9192 hci_dev_unlock(hdev);
9193
9194 return err;
9195}
9196
9197static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9198 void *data, u16 data_len)
9199{
9200 struct mgmt_cp_get_adv_size_info *cp = data;
9201 struct mgmt_rp_get_adv_size_info rp;
9202 u32 flags, supported_flags;
9203
9204 bt_dev_dbg(hdev, "sock %p", sk);
9205
9206 if (!lmp_le_capable(hdev))
9207 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9208 MGMT_STATUS_REJECTED);
9209
9210 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9211 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9212 MGMT_STATUS_INVALID_PARAMS);
9213
9214 flags = __le32_to_cpu(cp->flags);
9215
9216 /* The current implementation only supports a subset of the specified
9217 * flags.
9218 */
9219 supported_flags = get_supported_adv_flags(hdev);
9220 if (flags & ~supported_flags)
9221 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9222 MGMT_STATUS_INVALID_PARAMS);
9223
9224 rp.instance = cp->instance;
9225 rp.flags = cp->flags;
9226 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
9227 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
9228
9229 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9230 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9231}
9232
9233static const struct hci_mgmt_handler mgmt_handlers[] = {
9234 { NULL }, /* 0x0000 (no command) */
9235 { read_version, MGMT_READ_VERSION_SIZE,
9236 HCI_MGMT_NO_HDEV |
9237 HCI_MGMT_UNTRUSTED },
9238 { read_commands, MGMT_READ_COMMANDS_SIZE,
9239 HCI_MGMT_NO_HDEV |
9240 HCI_MGMT_UNTRUSTED },
9241 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9242 HCI_MGMT_NO_HDEV |
9243 HCI_MGMT_UNTRUSTED },
9244 { read_controller_info, MGMT_READ_INFO_SIZE,
9245 HCI_MGMT_UNTRUSTED },
9246 { set_powered, MGMT_SETTING_SIZE },
9247 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9248 { set_connectable, MGMT_SETTING_SIZE },
9249 { set_fast_connectable, MGMT_SETTING_SIZE },
9250 { set_bondable, MGMT_SETTING_SIZE },
9251 { set_link_security, MGMT_SETTING_SIZE },
9252 { set_ssp, MGMT_SETTING_SIZE },
9253 { set_hs, MGMT_SETTING_SIZE },
9254 { set_le, MGMT_SETTING_SIZE },
9255 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9256 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9257 { add_uuid, MGMT_ADD_UUID_SIZE },
9258 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9259 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9260 HCI_MGMT_VAR_LEN },
9261 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9262 HCI_MGMT_VAR_LEN },
9263 { disconnect, MGMT_DISCONNECT_SIZE },
9264 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9265 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9266 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9267 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9268 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9269 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9270 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9271 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9272 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9273 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9274 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9275 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9276 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9277 HCI_MGMT_VAR_LEN },
9278 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9279 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9280 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9281 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9282 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9283 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9284 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9285 { set_advertising, MGMT_SETTING_SIZE },
9286 { set_bredr, MGMT_SETTING_SIZE },
9287 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9288 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9289 { set_secure_conn, MGMT_SETTING_SIZE },
9290 { set_debug_keys, MGMT_SETTING_SIZE },
9291 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9292 { load_irks, MGMT_LOAD_IRKS_SIZE,
9293 HCI_MGMT_VAR_LEN },
9294 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9295 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9296 { add_device, MGMT_ADD_DEVICE_SIZE },
9297 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9298 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9299 HCI_MGMT_VAR_LEN },
9300 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9301 HCI_MGMT_NO_HDEV |
9302 HCI_MGMT_UNTRUSTED },
9303 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9304 HCI_MGMT_UNCONFIGURED |
9305 HCI_MGMT_UNTRUSTED },
9306 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9307 HCI_MGMT_UNCONFIGURED },
9308 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9309 HCI_MGMT_UNCONFIGURED },
9310 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9311 HCI_MGMT_VAR_LEN },
9312 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9313 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9314 HCI_MGMT_NO_HDEV |
9315 HCI_MGMT_UNTRUSTED },
9316 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9317 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9318 HCI_MGMT_VAR_LEN },
9319 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9320 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9321 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9322 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9323 HCI_MGMT_UNTRUSTED },
9324 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9325 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9326 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9327 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9328 HCI_MGMT_VAR_LEN },
9329 { set_wideband_speech, MGMT_SETTING_SIZE },
9330 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9331 HCI_MGMT_UNTRUSTED },
9332 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9333 HCI_MGMT_UNTRUSTED |
9334 HCI_MGMT_HDEV_OPTIONAL },
9335 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9336 HCI_MGMT_VAR_LEN |
9337 HCI_MGMT_HDEV_OPTIONAL },
9338 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9339 HCI_MGMT_UNTRUSTED },
9340 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9341 HCI_MGMT_VAR_LEN },
9342 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9343 HCI_MGMT_UNTRUSTED },
9344 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9345 HCI_MGMT_VAR_LEN },
9346 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9347 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9348 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9349 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9350 HCI_MGMT_VAR_LEN },
9351 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9352 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9353 HCI_MGMT_VAR_LEN },
9354 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9355 HCI_MGMT_VAR_LEN },
9356 { add_adv_patterns_monitor_rssi,
9357 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9358 HCI_MGMT_VAR_LEN },
9359 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9360 HCI_MGMT_VAR_LEN },
9361 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9362 { mesh_send, MGMT_MESH_SEND_SIZE,
9363 HCI_MGMT_VAR_LEN },
9364 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9365};
9366
9367void mgmt_index_added(struct hci_dev *hdev)
9368{
9369 struct mgmt_ev_ext_index ev;
9370
9371 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9372 return;
9373
9374 switch (hdev->dev_type) {
9375 case HCI_PRIMARY:
9376 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9377 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9378 NULL, len: 0, flag: HCI_MGMT_UNCONF_INDEX_EVENTS);
9379 ev.type = 0x01;
9380 } else {
9381 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, len: 0,
9382 flag: HCI_MGMT_INDEX_EVENTS);
9383 ev.type = 0x00;
9384 }
9385 break;
9386 case HCI_AMP:
9387 ev.type = 0x02;
9388 break;
9389 default:
9390 return;
9391 }
9392
9393 ev.bus = hdev->bus;
9394
9395 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, data: &ev, len: sizeof(ev),
9396 flag: HCI_MGMT_EXT_INDEX_EVENTS);
9397}
9398
9399void mgmt_index_removed(struct hci_dev *hdev)
9400{
9401 struct mgmt_ev_ext_index ev;
9402 u8 status = MGMT_STATUS_INVALID_INDEX;
9403
9404 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9405 return;
9406
9407 switch (hdev->dev_type) {
9408 case HCI_PRIMARY:
9409 mgmt_pending_foreach(opcode: 0, hdev, cb: cmd_complete_rsp, data: &status);
9410
9411 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9412 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9413 NULL, len: 0, flag: HCI_MGMT_UNCONF_INDEX_EVENTS);
9414 ev.type = 0x01;
9415 } else {
9416 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, len: 0,
9417 flag: HCI_MGMT_INDEX_EVENTS);
9418 ev.type = 0x00;
9419 }
9420 break;
9421 case HCI_AMP:
9422 ev.type = 0x02;
9423 break;
9424 default:
9425 return;
9426 }
9427
9428 ev.bus = hdev->bus;
9429
9430 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, data: &ev, len: sizeof(ev),
9431 flag: HCI_MGMT_EXT_INDEX_EVENTS);
9432
9433 /* Cancel any remaining timed work */
9434 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9435 return;
9436 cancel_delayed_work_sync(dwork: &hdev->discov_off);
9437 cancel_delayed_work_sync(dwork: &hdev->service_cache);
9438 cancel_delayed_work_sync(dwork: &hdev->rpa_expired);
9439}
9440
9441void mgmt_power_on(struct hci_dev *hdev, int err)
9442{
9443 struct cmd_lookup match = { NULL, hdev };
9444
9445 bt_dev_dbg(hdev, "err %d", err);
9446
9447 hci_dev_lock(hdev);
9448
9449 if (!err) {
9450 restart_le_actions(hdev);
9451 hci_update_passive_scan(hdev);
9452 }
9453
9454 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, cb: settings_rsp, data: &match);
9455
9456 new_settings(hdev, skip: match.sk);
9457
9458 if (match.sk)
9459 sock_put(sk: match.sk);
9460
9461 hci_dev_unlock(hdev);
9462}
9463
9464void __mgmt_power_off(struct hci_dev *hdev)
9465{
9466 struct cmd_lookup match = { NULL, hdev };
9467 u8 status, zero_cod[] = { 0, 0, 0 };
9468
9469 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, cb: settings_rsp, data: &match);
9470
9471 /* If the power off is because of hdev unregistration let
9472 * use the appropriate INVALID_INDEX status. Otherwise use
9473 * NOT_POWERED. We cover both scenarios here since later in
9474 * mgmt_index_removed() any hci_conn callbacks will have already
9475 * been triggered, potentially causing misleading DISCONNECTED
9476 * status responses.
9477 */
9478 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9479 status = MGMT_STATUS_INVALID_INDEX;
9480 else
9481 status = MGMT_STATUS_NOT_POWERED;
9482
9483 mgmt_pending_foreach(opcode: 0, hdev, cb: cmd_complete_rsp, data: &status);
9484
9485 if (memcmp(p: hdev->dev_class, q: zero_cod, size: sizeof(zero_cod)) != 0) {
9486 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9487 data: zero_cod, len: sizeof(zero_cod),
9488 flag: HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9489 ext_info_changed(hdev, NULL);
9490 }
9491
9492 new_settings(hdev, skip: match.sk);
9493
9494 if (match.sk)
9495 sock_put(sk: match.sk);
9496}
9497
9498void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9499{
9500 struct mgmt_pending_cmd *cmd;
9501 u8 status;
9502
9503 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9504 if (!cmd)
9505 return;
9506
9507 if (err == -ERFKILL)
9508 status = MGMT_STATUS_RFKILLED;
9509 else
9510 status = MGMT_STATUS_FAILED;
9511
9512 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_POWERED, status);
9513
9514 mgmt_pending_remove(cmd);
9515}
9516
9517void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9518 bool persistent)
9519{
9520 struct mgmt_ev_new_link_key ev;
9521
9522 memset(&ev, 0, sizeof(ev));
9523
9524 ev.store_hint = persistent;
9525 bacpy(dst: &ev.key.addr.bdaddr, src: &key->bdaddr);
9526 ev.key.addr.type = BDADDR_BREDR;
9527 ev.key.type = key->type;
9528 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9529 ev.key.pin_len = key->pin_len;
9530
9531 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, data: &ev, len: sizeof(ev), NULL);
9532}
9533
9534static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9535{
9536 switch (ltk->type) {
9537 case SMP_LTK:
9538 case SMP_LTK_RESPONDER:
9539 if (ltk->authenticated)
9540 return MGMT_LTK_AUTHENTICATED;
9541 return MGMT_LTK_UNAUTHENTICATED;
9542 case SMP_LTK_P256:
9543 if (ltk->authenticated)
9544 return MGMT_LTK_P256_AUTH;
9545 return MGMT_LTK_P256_UNAUTH;
9546 case SMP_LTK_P256_DEBUG:
9547 return MGMT_LTK_P256_DEBUG;
9548 }
9549
9550 return MGMT_LTK_UNAUTHENTICATED;
9551}
9552
9553void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9554{
9555 struct mgmt_ev_new_long_term_key ev;
9556
9557 memset(&ev, 0, sizeof(ev));
9558
9559 /* Devices using resolvable or non-resolvable random addresses
9560 * without providing an identity resolving key don't require
9561 * to store long term keys. Their addresses will change the
9562 * next time around.
9563 *
9564 * Only when a remote device provides an identity address
9565 * make sure the long term key is stored. If the remote
9566 * identity is known, the long term keys are internally
9567 * mapped to the identity address. So allow static random
9568 * and public addresses here.
9569 */
9570 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9571 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9572 ev.store_hint = 0x00;
9573 else
9574 ev.store_hint = persistent;
9575
9576 bacpy(dst: &ev.key.addr.bdaddr, src: &key->bdaddr);
9577 ev.key.addr.type = link_to_bdaddr(LE_LINK, addr_type: key->bdaddr_type);
9578 ev.key.type = mgmt_ltk_type(ltk: key);
9579 ev.key.enc_size = key->enc_size;
9580 ev.key.ediv = key->ediv;
9581 ev.key.rand = key->rand;
9582
9583 if (key->type == SMP_LTK)
9584 ev.key.initiator = 1;
9585
9586 /* Make sure we copy only the significant bytes based on the
9587 * encryption key size, and set the rest of the value to zeroes.
9588 */
9589 memcpy(ev.key.val, key->val, key->enc_size);
9590 memset(ev.key.val + key->enc_size, 0,
9591 sizeof(ev.key.val) - key->enc_size);
9592
9593 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, data: &ev, len: sizeof(ev), NULL);
9594}
9595
9596void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9597{
9598 struct mgmt_ev_new_irk ev;
9599
9600 memset(&ev, 0, sizeof(ev));
9601
9602 ev.store_hint = persistent;
9603
9604 bacpy(dst: &ev.rpa, src: &irk->rpa);
9605 bacpy(dst: &ev.irk.addr.bdaddr, src: &irk->bdaddr);
9606 ev.irk.addr.type = link_to_bdaddr(LE_LINK, addr_type: irk->addr_type);
9607 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9608
9609 mgmt_event(MGMT_EV_NEW_IRK, hdev, data: &ev, len: sizeof(ev), NULL);
9610}
9611
9612void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9613 bool persistent)
9614{
9615 struct mgmt_ev_new_csrk ev;
9616
9617 memset(&ev, 0, sizeof(ev));
9618
9619 /* Devices using resolvable or non-resolvable random addresses
9620 * without providing an identity resolving key don't require
9621 * to store signature resolving keys. Their addresses will change
9622 * the next time around.
9623 *
9624 * Only when a remote device provides an identity address
9625 * make sure the signature resolving key is stored. So allow
9626 * static random and public addresses here.
9627 */
9628 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9629 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9630 ev.store_hint = 0x00;
9631 else
9632 ev.store_hint = persistent;
9633
9634 bacpy(dst: &ev.key.addr.bdaddr, src: &csrk->bdaddr);
9635 ev.key.addr.type = link_to_bdaddr(LE_LINK, addr_type: csrk->bdaddr_type);
9636 ev.key.type = csrk->type;
9637 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9638
9639 mgmt_event(MGMT_EV_NEW_CSRK, hdev, data: &ev, len: sizeof(ev), NULL);
9640}
9641
9642void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9643 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9644 u16 max_interval, u16 latency, u16 timeout)
9645{
9646 struct mgmt_ev_new_conn_param ev;
9647
9648 if (!hci_is_identity_address(addr: bdaddr, addr_type: bdaddr_type))
9649 return;
9650
9651 memset(&ev, 0, sizeof(ev));
9652 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9653 ev.addr.type = link_to_bdaddr(LE_LINK, addr_type: bdaddr_type);
9654 ev.store_hint = store_hint;
9655 ev.min_interval = cpu_to_le16(min_interval);
9656 ev.max_interval = cpu_to_le16(max_interval);
9657 ev.latency = cpu_to_le16(latency);
9658 ev.timeout = cpu_to_le16(timeout);
9659
9660 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, data: &ev, len: sizeof(ev), NULL);
9661}
9662
9663void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9664 u8 *name, u8 name_len)
9665{
9666 struct sk_buff *skb;
9667 struct mgmt_ev_device_connected *ev;
9668 u16 eir_len = 0;
9669 u32 flags = 0;
9670
9671 /* allocate buff for LE or BR/EDR adv */
9672 if (conn->le_adv_data_len > 0)
9673 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9674 size: sizeof(*ev) + conn->le_adv_data_len);
9675 else
9676 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9677 size: sizeof(*ev) + (name ? eir_precalc_len(data_len: name_len) : 0) +
9678 eir_precalc_len(data_len: sizeof(conn->dev_class)));
9679
9680 ev = skb_put(skb, len: sizeof(*ev));
9681 bacpy(dst: &ev->addr.bdaddr, src: &conn->dst);
9682 ev->addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
9683
9684 if (conn->out)
9685 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9686
9687 ev->flags = __cpu_to_le32(flags);
9688
9689 /* We must ensure that the EIR Data fields are ordered and
9690 * unique. Keep it simple for now and avoid the problem by not
9691 * adding any BR/EDR data to the LE adv.
9692 */
9693 if (conn->le_adv_data_len > 0) {
9694 skb_put_data(skb, data: conn->le_adv_data, len: conn->le_adv_data_len);
9695 eir_len = conn->le_adv_data_len;
9696 } else {
9697 if (name)
9698 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, data: name, data_len: name_len);
9699
9700 if (memcmp(p: conn->dev_class, q: "\0\0\0", size: sizeof(conn->dev_class)))
9701 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9702 data: conn->dev_class, data_len: sizeof(conn->dev_class));
9703 }
9704
9705 ev->eir_len = cpu_to_le16(eir_len);
9706
9707 mgmt_event_skb(skb, NULL);
9708}
9709
9710static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9711{
9712 struct sock **sk = data;
9713
9714 cmd->cmd_complete(cmd, 0);
9715
9716 *sk = cmd->sk;
9717 sock_hold(sk: *sk);
9718
9719 mgmt_pending_remove(cmd);
9720}
9721
9722static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9723{
9724 struct hci_dev *hdev = data;
9725 struct mgmt_cp_unpair_device *cp = cmd->param;
9726
9727 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: cmd->sk);
9728
9729 cmd->cmd_complete(cmd, 0);
9730 mgmt_pending_remove(cmd);
9731}
9732
9733bool mgmt_powering_down(struct hci_dev *hdev)
9734{
9735 struct mgmt_pending_cmd *cmd;
9736 struct mgmt_mode *cp;
9737
9738 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9739 if (!cmd)
9740 return false;
9741
9742 cp = cmd->param;
9743 if (!cp->val)
9744 return true;
9745
9746 return false;
9747}
9748
9749void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9750 u8 link_type, u8 addr_type, u8 reason,
9751 bool mgmt_connected)
9752{
9753 struct mgmt_ev_device_disconnected ev;
9754 struct sock *sk = NULL;
9755
9756 /* The connection is still in hci_conn_hash so test for 1
9757 * instead of 0 to know if this is the last one.
9758 */
9759 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9760 cancel_delayed_work(dwork: &hdev->power_off);
9761 queue_work(wq: hdev->req_workqueue, work: &hdev->power_off.work);
9762 }
9763
9764 if (!mgmt_connected)
9765 return;
9766
9767 if (link_type != ACL_LINK && link_type != LE_LINK)
9768 return;
9769
9770 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, cb: disconnect_rsp, data: &sk);
9771
9772 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9773 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9774 ev.reason = reason;
9775
9776 /* Report disconnects due to suspend */
9777 if (hdev->suspended)
9778 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9779
9780 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
9781
9782 if (sk)
9783 sock_put(sk);
9784
9785 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, cb: unpair_device_rsp,
9786 data: hdev);
9787}
9788
9789void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 u8 link_type, u8 addr_type, u8 status)
9791{
9792 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9793 struct mgmt_cp_disconnect *cp;
9794 struct mgmt_pending_cmd *cmd;
9795
9796 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, cb: unpair_device_rsp,
9797 data: hdev);
9798
9799 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9800 if (!cmd)
9801 return;
9802
9803 cp = cmd->param;
9804
9805 if (bacmp(ba1: bdaddr, ba2: &cp->addr.bdaddr))
9806 return;
9807
9808 if (cp->addr.type != bdaddr_type)
9809 return;
9810
9811 cmd->cmd_complete(cmd, mgmt_status(err: status));
9812 mgmt_pending_remove(cmd);
9813}
9814
9815void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9816 u8 addr_type, u8 status)
9817{
9818 struct mgmt_ev_connect_failed ev;
9819
9820 /* The connection is still in hci_conn_hash so test for 1
9821 * instead of 0 to know if this is the last one.
9822 */
9823 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9824 cancel_delayed_work(dwork: &hdev->power_off);
9825 queue_work(wq: hdev->req_workqueue, work: &hdev->power_off.work);
9826 }
9827
9828 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9829 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9830 ev.status = mgmt_status(err: status);
9831
9832 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, data: &ev, len: sizeof(ev), NULL);
9833}
9834
9835void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9836{
9837 struct mgmt_ev_pin_code_request ev;
9838
9839 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9840 ev.addr.type = BDADDR_BREDR;
9841 ev.secure = secure;
9842
9843 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, data: &ev, len: sizeof(ev), NULL);
9844}
9845
9846void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9847 u8 status)
9848{
9849 struct mgmt_pending_cmd *cmd;
9850
9851 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9852 if (!cmd)
9853 return;
9854
9855 cmd->cmd_complete(cmd, mgmt_status(err: status));
9856 mgmt_pending_remove(cmd);
9857}
9858
9859void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 u8 status)
9861{
9862 struct mgmt_pending_cmd *cmd;
9863
9864 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9865 if (!cmd)
9866 return;
9867
9868 cmd->cmd_complete(cmd, mgmt_status(err: status));
9869 mgmt_pending_remove(cmd);
9870}
9871
9872int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 u8 link_type, u8 addr_type, u32 value,
9874 u8 confirm_hint)
9875{
9876 struct mgmt_ev_user_confirm_request ev;
9877
9878 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9879
9880 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9881 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9882 ev.confirm_hint = confirm_hint;
9883 ev.value = cpu_to_le32(value);
9884
9885 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, data: &ev, len: sizeof(ev),
9886 NULL);
9887}
9888
9889int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9890 u8 link_type, u8 addr_type)
9891{
9892 struct mgmt_ev_user_passkey_request ev;
9893
9894 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9895
9896 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9897 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9898
9899 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, data: &ev, len: sizeof(ev),
9900 NULL);
9901}
9902
9903static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9904 u8 link_type, u8 addr_type, u8 status,
9905 u8 opcode)
9906{
9907 struct mgmt_pending_cmd *cmd;
9908
9909 cmd = pending_find(opcode, hdev);
9910 if (!cmd)
9911 return -ENOENT;
9912
9913 cmd->cmd_complete(cmd, mgmt_status(err: status));
9914 mgmt_pending_remove(cmd);
9915
9916 return 0;
9917}
9918
9919int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9920 u8 link_type, u8 addr_type, u8 status)
9921{
9922 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9923 status, MGMT_OP_USER_CONFIRM_REPLY);
9924}
9925
9926int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9927 u8 link_type, u8 addr_type, u8 status)
9928{
9929 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9930 status,
9931 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9932}
9933
9934int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 u8 link_type, u8 addr_type, u8 status)
9936{
9937 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9938 status, MGMT_OP_USER_PASSKEY_REPLY);
9939}
9940
9941int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9942 u8 link_type, u8 addr_type, u8 status)
9943{
9944 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9945 status,
9946 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9947}
9948
9949int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9950 u8 link_type, u8 addr_type, u32 passkey,
9951 u8 entered)
9952{
9953 struct mgmt_ev_passkey_notify ev;
9954
9955 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9956
9957 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9958 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9959 ev.passkey = __cpu_to_le32(passkey);
9960 ev.entered = entered;
9961
9962 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, data: &ev, len: sizeof(ev), NULL);
9963}
9964
9965void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9966{
9967 struct mgmt_ev_auth_failed ev;
9968 struct mgmt_pending_cmd *cmd;
9969 u8 status = mgmt_status(err: hci_status);
9970
9971 bacpy(dst: &ev.addr.bdaddr, src: &conn->dst);
9972 ev.addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
9973 ev.status = status;
9974
9975 cmd = find_pairing(conn);
9976
9977 mgmt_event(MGMT_EV_AUTH_FAILED, hdev: conn->hdev, data: &ev, len: sizeof(ev),
9978 skip_sk: cmd ? cmd->sk : NULL);
9979
9980 if (cmd) {
9981 cmd->cmd_complete(cmd, status);
9982 mgmt_pending_remove(cmd);
9983 }
9984}
9985
9986void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9987{
9988 struct cmd_lookup match = { NULL, hdev };
9989 bool changed;
9990
9991 if (status) {
9992 u8 mgmt_err = mgmt_status(err: status);
9993 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9994 cb: cmd_status_rsp, data: &mgmt_err);
9995 return;
9996 }
9997
9998 if (test_bit(HCI_AUTH, &hdev->flags))
9999 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10000 else
10001 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10002
10003 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, cb: settings_rsp,
10004 data: &match);
10005
10006 if (changed)
10007 new_settings(hdev, skip: match.sk);
10008
10009 if (match.sk)
10010 sock_put(sk: match.sk);
10011}
10012
10013static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10014{
10015 struct cmd_lookup *match = data;
10016
10017 if (match->sk == NULL) {
10018 match->sk = cmd->sk;
10019 sock_hold(sk: match->sk);
10020 }
10021}
10022
10023void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10024 u8 status)
10025{
10026 struct cmd_lookup match = { NULL, hdev, mgmt_status(err: status) };
10027
10028 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, cb: sk_lookup, data: &match);
10029 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, cb: sk_lookup, data: &match);
10030 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, cb: sk_lookup, data: &match);
10031
10032 if (!status) {
10033 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, data: dev_class,
10034 len: 3, flag: HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10035 ext_info_changed(hdev, NULL);
10036 }
10037
10038 if (match.sk)
10039 sock_put(sk: match.sk);
10040}
10041
10042void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10043{
10044 struct mgmt_cp_set_local_name ev;
10045 struct mgmt_pending_cmd *cmd;
10046
10047 if (status)
10048 return;
10049
10050 memset(&ev, 0, sizeof(ev));
10051 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10052 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10053
10054 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10055 if (!cmd) {
10056 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10057
10058 /* If this is a HCI command related to powering on the
10059 * HCI dev don't send any mgmt signals.
10060 */
10061 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10062 return;
10063 }
10064
10065 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data: &ev, len: sizeof(ev),
10066 flag: HCI_MGMT_LOCAL_NAME_EVENTS, skip_sk: cmd ? cmd->sk : NULL);
10067 ext_info_changed(hdev, skip: cmd ? cmd->sk : NULL);
10068}
10069
10070static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10071{
10072 int i;
10073
10074 for (i = 0; i < uuid_count; i++) {
10075 if (!memcmp(p: uuid, q: uuids[i], size: 16))
10076 return true;
10077 }
10078
10079 return false;
10080}
10081
10082static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10083{
10084 u16 parsed = 0;
10085
10086 while (parsed < eir_len) {
10087 u8 field_len = eir[0];
10088 u8 uuid[16];
10089 int i;
10090
10091 if (field_len == 0)
10092 break;
10093
10094 if (eir_len - parsed < field_len + 1)
10095 break;
10096
10097 switch (eir[1]) {
10098 case EIR_UUID16_ALL:
10099 case EIR_UUID16_SOME:
10100 for (i = 0; i + 3 <= field_len; i += 2) {
10101 memcpy(uuid, bluetooth_base_uuid, 16);
10102 uuid[13] = eir[i + 3];
10103 uuid[12] = eir[i + 2];
10104 if (has_uuid(uuid, uuid_count, uuids))
10105 return true;
10106 }
10107 break;
10108 case EIR_UUID32_ALL:
10109 case EIR_UUID32_SOME:
10110 for (i = 0; i + 5 <= field_len; i += 4) {
10111 memcpy(uuid, bluetooth_base_uuid, 16);
10112 uuid[15] = eir[i + 5];
10113 uuid[14] = eir[i + 4];
10114 uuid[13] = eir[i + 3];
10115 uuid[12] = eir[i + 2];
10116 if (has_uuid(uuid, uuid_count, uuids))
10117 return true;
10118 }
10119 break;
10120 case EIR_UUID128_ALL:
10121 case EIR_UUID128_SOME:
10122 for (i = 0; i + 17 <= field_len; i += 16) {
10123 memcpy(uuid, eir + i + 2, 16);
10124 if (has_uuid(uuid, uuid_count, uuids))
10125 return true;
10126 }
10127 break;
10128 }
10129
10130 parsed += field_len + 1;
10131 eir += field_len + 1;
10132 }
10133
10134 return false;
10135}
10136
10137static void restart_le_scan(struct hci_dev *hdev)
10138{
10139 /* If controller is not scanning we are done. */
10140 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10141 return;
10142
10143 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10144 hdev->discovery.scan_start +
10145 hdev->discovery.scan_duration))
10146 return;
10147
10148 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->le_scan_restart,
10149 DISCOV_LE_RESTART_DELAY);
10150}
10151
10152static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10153 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10154{
10155 /* If a RSSI threshold has been specified, and
10156 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10157 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10158 * is set, let it through for further processing, as we might need to
10159 * restart the scan.
10160 *
10161 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10162 * the results are also dropped.
10163 */
10164 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10165 (rssi == HCI_RSSI_INVALID ||
10166 (rssi < hdev->discovery.rssi &&
10167 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10168 return false;
10169
10170 if (hdev->discovery.uuid_count != 0) {
10171 /* If a list of UUIDs is provided in filter, results with no
10172 * matching UUID should be dropped.
10173 */
10174 if (!eir_has_uuids(eir, eir_len, uuid_count: hdev->discovery.uuid_count,
10175 uuids: hdev->discovery.uuids) &&
10176 !eir_has_uuids(eir: scan_rsp, eir_len: scan_rsp_len,
10177 uuid_count: hdev->discovery.uuid_count,
10178 uuids: hdev->discovery.uuids))
10179 return false;
10180 }
10181
10182 /* If duplicate filtering does not report RSSI changes, then restart
10183 * scanning to ensure updated result with updated RSSI values.
10184 */
10185 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10186 restart_le_scan(hdev);
10187
10188 /* Validate RSSI value against the RSSI threshold once more. */
10189 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10190 rssi < hdev->discovery.rssi)
10191 return false;
10192 }
10193
10194 return true;
10195}
10196
10197void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10198 bdaddr_t *bdaddr, u8 addr_type)
10199{
10200 struct mgmt_ev_adv_monitor_device_lost ev;
10201
10202 ev.monitor_handle = cpu_to_le16(handle);
10203 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
10204 ev.addr.type = addr_type;
10205
10206 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, data: &ev, len: sizeof(ev),
10207 NULL);
10208}
10209
10210static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10211 struct sk_buff *skb,
10212 struct sock *skip_sk,
10213 u16 handle)
10214{
10215 struct sk_buff *advmon_skb;
10216 size_t advmon_skb_len;
10217 __le16 *monitor_handle;
10218
10219 if (!skb)
10220 return;
10221
10222 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10223 sizeof(struct mgmt_ev_device_found)) + skb->len;
10224 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10225 size: advmon_skb_len);
10226 if (!advmon_skb)
10227 return;
10228
10229 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10230 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10231 * store monitor_handle of the matched monitor.
10232 */
10233 monitor_handle = skb_put(skb: advmon_skb, len: sizeof(*monitor_handle));
10234 *monitor_handle = cpu_to_le16(handle);
10235 skb_put_data(skb: advmon_skb, data: skb->data, len: skb->len);
10236
10237 mgmt_event_skb(skb: advmon_skb, skip_sk);
10238}
10239
10240static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10241 bdaddr_t *bdaddr, bool report_device,
10242 struct sk_buff *skb,
10243 struct sock *skip_sk)
10244{
10245 struct monitored_device *dev, *tmp;
10246 bool matched = false;
10247 bool notified = false;
10248
10249 /* We have received the Advertisement Report because:
10250 * 1. the kernel has initiated active discovery
10251 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10252 * passive scanning
10253 * 3. if none of the above is true, we have one or more active
10254 * Advertisement Monitor
10255 *
10256 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10257 * and report ONLY one advertisement per device for the matched Monitor
10258 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10259 *
10260 * For case 3, since we are not active scanning and all advertisements
10261 * received are due to a matched Advertisement Monitor, report all
10262 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10263 */
10264 if (report_device && !hdev->advmon_pend_notify) {
10265 mgmt_event_skb(skb, skip_sk);
10266 return;
10267 }
10268
10269 hdev->advmon_pend_notify = false;
10270
10271 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10272 if (!bacmp(ba1: &dev->bdaddr, ba2: bdaddr)) {
10273 matched = true;
10274
10275 if (!dev->notified) {
10276 mgmt_send_adv_monitor_device_found(hdev, skb,
10277 skip_sk,
10278 handle: dev->handle);
10279 notified = true;
10280 dev->notified = true;
10281 }
10282 }
10283
10284 if (!dev->notified)
10285 hdev->advmon_pend_notify = true;
10286 }
10287
10288 if (!report_device &&
10289 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10290 /* Handle 0 indicates that we are not active scanning and this
10291 * is a subsequent advertisement report for an already matched
10292 * Advertisement Monitor or the controller offloading support
10293 * is not available.
10294 */
10295 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, handle: 0);
10296 }
10297
10298 if (report_device)
10299 mgmt_event_skb(skb, skip_sk);
10300 else
10301 kfree_skb(skb);
10302}
10303
10304static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10305 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10306 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10307 u64 instant)
10308{
10309 struct sk_buff *skb;
10310 struct mgmt_ev_mesh_device_found *ev;
10311 int i, j;
10312
10313 if (!hdev->mesh_ad_types[0])
10314 goto accepted;
10315
10316 /* Scan for requested AD types */
10317 if (eir_len > 0) {
10318 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10319 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10320 if (!hdev->mesh_ad_types[j])
10321 break;
10322
10323 if (hdev->mesh_ad_types[j] == eir[i + 1])
10324 goto accepted;
10325 }
10326 }
10327 }
10328
10329 if (scan_rsp_len > 0) {
10330 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10331 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10332 if (!hdev->mesh_ad_types[j])
10333 break;
10334
10335 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10336 goto accepted;
10337 }
10338 }
10339 }
10340
10341 return;
10342
10343accepted:
10344 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10345 size: sizeof(*ev) + eir_len + scan_rsp_len);
10346 if (!skb)
10347 return;
10348
10349 ev = skb_put(skb, len: sizeof(*ev));
10350
10351 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10352 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10353 ev->rssi = rssi;
10354 ev->flags = cpu_to_le32(flags);
10355 ev->instant = cpu_to_le64(instant);
10356
10357 if (eir_len > 0)
10358 /* Copy EIR or advertising data into event */
10359 skb_put_data(skb, data: eir, len: eir_len);
10360
10361 if (scan_rsp_len > 0)
10362 /* Append scan response data to event */
10363 skb_put_data(skb, data: scan_rsp, len: scan_rsp_len);
10364
10365 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10366
10367 mgmt_event_skb(skb, NULL);
10368}
10369
10370void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10371 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10372 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10373 u64 instant)
10374{
10375 struct sk_buff *skb;
10376 struct mgmt_ev_device_found *ev;
10377 bool report_device = hci_discovery_active(hdev);
10378
10379 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10380 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10381 eir, eir_len, scan_rsp, scan_rsp_len,
10382 instant);
10383
10384 /* Don't send events for a non-kernel initiated discovery. With
10385 * LE one exception is if we have pend_le_reports > 0 in which
10386 * case we're doing passive scanning and want these events.
10387 */
10388 if (!hci_discovery_active(hdev)) {
10389 if (link_type == ACL_LINK)
10390 return;
10391 if (link_type == LE_LINK && !list_empty(head: &hdev->pend_le_reports))
10392 report_device = true;
10393 else if (!hci_is_adv_monitoring(hdev))
10394 return;
10395 }
10396
10397 if (hdev->discovery.result_filtering) {
10398 /* We are using service discovery */
10399 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10400 scan_rsp_len))
10401 return;
10402 }
10403
10404 if (hdev->discovery.limited) {
10405 /* Check for limited discoverable bit */
10406 if (dev_class) {
10407 if (!(dev_class[1] & 0x20))
10408 return;
10409 } else {
10410 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10411 if (!flags || !(flags[0] & LE_AD_LIMITED))
10412 return;
10413 }
10414 }
10415
10416 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10417 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10418 size: sizeof(*ev) + eir_len + scan_rsp_len + 5);
10419 if (!skb)
10420 return;
10421
10422 ev = skb_put(skb, len: sizeof(*ev));
10423
10424 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10425 * RSSI value was reported as 0 when not available. This behavior
10426 * is kept when using device discovery. This is required for full
10427 * backwards compatibility with the API.
10428 *
10429 * However when using service discovery, the value 127 will be
10430 * returned when the RSSI is not available.
10431 */
10432 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10433 link_type == ACL_LINK)
10434 rssi = 0;
10435
10436 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10437 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10438 ev->rssi = rssi;
10439 ev->flags = cpu_to_le32(flags);
10440
10441 if (eir_len > 0)
10442 /* Copy EIR or advertising data into event */
10443 skb_put_data(skb, data: eir, len: eir_len);
10444
10445 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10446 u8 eir_cod[5];
10447
10448 eir_len += eir_append_data(eir: eir_cod, eir_len: 0, EIR_CLASS_OF_DEV,
10449 data: dev_class, data_len: 3);
10450 skb_put_data(skb, data: eir_cod, len: sizeof(eir_cod));
10451 }
10452
10453 if (scan_rsp_len > 0)
10454 /* Append scan response data to event */
10455 skb_put_data(skb, data: scan_rsp, len: scan_rsp_len);
10456
10457 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10458
10459 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10460}
10461
10462void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10463 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10464{
10465 struct sk_buff *skb;
10466 struct mgmt_ev_device_found *ev;
10467 u16 eir_len = 0;
10468 u32 flags = 0;
10469
10470 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10471 size: sizeof(*ev) + (name ? eir_precalc_len(data_len: name_len) : 0));
10472
10473 ev = skb_put(skb, len: sizeof(*ev));
10474 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10475 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10476 ev->rssi = rssi;
10477
10478 if (name)
10479 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, data: name, data_len: name_len);
10480 else
10481 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10482
10483 ev->eir_len = cpu_to_le16(eir_len);
10484 ev->flags = cpu_to_le32(flags);
10485
10486 mgmt_event_skb(skb, NULL);
10487}
10488
10489void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10490{
10491 struct mgmt_ev_discovering ev;
10492
10493 bt_dev_dbg(hdev, "discovering %u", discovering);
10494
10495 memset(&ev, 0, sizeof(ev));
10496 ev.type = hdev->discovery.type;
10497 ev.discovering = discovering;
10498
10499 mgmt_event(MGMT_EV_DISCOVERING, hdev, data: &ev, len: sizeof(ev), NULL);
10500}
10501
10502void mgmt_suspending(struct hci_dev *hdev, u8 state)
10503{
10504 struct mgmt_ev_controller_suspend ev;
10505
10506 ev.suspend_state = state;
10507 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, data: &ev, len: sizeof(ev), NULL);
10508}
10509
10510void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10511 u8 addr_type)
10512{
10513 struct mgmt_ev_controller_resume ev;
10514
10515 ev.wake_reason = reason;
10516 if (bdaddr) {
10517 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
10518 ev.addr.type = addr_type;
10519 } else {
10520 memset(&ev.addr, 0, sizeof(ev.addr));
10521 }
10522
10523 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, data: &ev, len: sizeof(ev), NULL);
10524}
10525
10526static struct hci_mgmt_chan chan = {
10527 .channel = HCI_CHANNEL_CONTROL,
10528 .handler_count = ARRAY_SIZE(mgmt_handlers),
10529 .handlers = mgmt_handlers,
10530 .hdev_init = mgmt_init_hdev,
10531};
10532
10533int mgmt_init(void)
10534{
10535 return hci_mgmt_chan_register(c: &chan);
10536}
10537
10538void mgmt_exit(void)
10539{
10540 hci_mgmt_chan_unregister(c: &chan);
10541}
10542
10543void mgmt_cleanup(struct sock *sk)
10544{
10545 struct mgmt_mesh_tx *mesh_tx;
10546 struct hci_dev *hdev;
10547
10548 read_lock(&hci_dev_list_lock);
10549
10550 list_for_each_entry(hdev, &hci_dev_list, list) {
10551 do {
10552 mesh_tx = mgmt_mesh_next(hdev, sk);
10553
10554 if (mesh_tx)
10555 mesh_send_complete(hdev, mesh_tx, silent: true);
10556 } while (mesh_tx);
10557 }
10558
10559 read_unlock(&hci_dev_list_lock);
10560}
10561

source code of linux/net/bluetooth/mgmt.c