1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/kcov.h>
33#include <linux/property.h>
34#include <linux/suspend.h>
35#include <linux/wait.h>
36#include <asm/unaligned.h>
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
40#include <net/bluetooth/l2cap.h>
41#include <net/bluetooth/mgmt.h>
42
43#include "hci_request.h"
44#include "hci_debugfs.h"
45#include "smp.h"
46#include "leds.h"
47#include "msft.h"
48#include "aosp.h"
49#include "hci_codec.h"
50
51static void hci_rx_work(struct work_struct *work);
52static void hci_cmd_work(struct work_struct *work);
53static void hci_tx_work(struct work_struct *work);
54
55/* HCI device list */
56LIST_HEAD(hci_dev_list);
57DEFINE_RWLOCK(hci_dev_list_lock);
58
59/* HCI callback list */
60LIST_HEAD(hci_cb_list);
61DEFINE_MUTEX(hci_cb_list_lock);
62
63/* HCI ID Numbering */
64static DEFINE_IDA(hci_index_ida);
65
66static int hci_scan_req(struct hci_request *req, unsigned long opt)
67{
68 __u8 scan = opt;
69
70 BT_DBG("%s %x", req->hdev->name, scan);
71
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, plen: 1, param: &scan);
74 return 0;
75}
76
77static int hci_auth_req(struct hci_request *req, unsigned long opt)
78{
79 __u8 auth = opt;
80
81 BT_DBG("%s %x", req->hdev->name, auth);
82
83 /* Authentication */
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, plen: 1, param: &auth);
85 return 0;
86}
87
88static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89{
90 __u8 encrypt = opt;
91
92 BT_DBG("%s %x", req->hdev->name, encrypt);
93
94 /* Encryption */
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, plen: 1, param: &encrypt);
96 return 0;
97}
98
99static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100{
101 __le16 policy = cpu_to_le16(opt);
102
103 BT_DBG("%s %x", req->hdev->name, policy);
104
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, plen: 2, param: &policy);
107 return 0;
108}
109
110/* Get HCI device by index.
111 * Device is held on return. */
112struct hci_dev *hci_dev_get(int index)
113{
114 struct hci_dev *hdev = NULL, *d;
115
116 BT_DBG("%d", index);
117
118 if (index < 0)
119 return NULL;
120
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
125 break;
126 }
127 }
128 read_unlock(&hci_dev_list_lock);
129 return hdev;
130}
131
132/* ---- Inquiry support ---- */
133
134bool hci_discovery_active(struct hci_dev *hdev)
135{
136 struct discovery_state *discov = &hdev->discovery;
137
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
141 return true;
142
143 default:
144 return false;
145 }
146}
147
148void hci_discovery_set_state(struct hci_dev *hdev, int state)
149{
150 int old_state = hdev->discovery.state;
151
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154 if (old_state == state)
155 return;
156
157 hdev->discovery.state = state;
158
159 switch (state) {
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
162
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, discovering: 0);
165 break;
166 case DISCOVERY_STARTING:
167 break;
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, discovering: 1);
170 break;
171 case DISCOVERY_RESOLVING:
172 break;
173 case DISCOVERY_STOPPING:
174 break;
175 }
176}
177
178void hci_inquiry_cache_flush(struct hci_dev *hdev)
179{
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
182
183 list_for_each_entry_safe(p, n, &cache->all, all) {
184 list_del(entry: &p->all);
185 kfree(objp: p);
186 }
187
188 INIT_LIST_HEAD(list: &cache->unknown);
189 INIT_LIST_HEAD(list: &cache->resolve);
190}
191
192struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193 bdaddr_t *bdaddr)
194{
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
197
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
202 return e;
203 }
204
205 return NULL;
206}
207
208struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209 bdaddr_t *bdaddr)
210{
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
213
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
218 return e;
219 }
220
221 return NULL;
222}
223
224struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225 bdaddr_t *bdaddr,
226 int state)
227{
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
230
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(ba1: bdaddr, BDADDR_ANY) && e->name_state == state)
235 return e;
236 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
237 return e;
238 }
239
240 return NULL;
241}
242
243void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
245{
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
249
250 list_del(entry: &ie->list);
251
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
255 break;
256 pos = &p->list;
257 }
258
259 list_add(new: &ie->list, head: pos);
260}
261
262u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263 bool name_known)
264{
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
267 u32 flags = 0;
268
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271 hci_remove_remote_oob_data(hdev, bdaddr: &data->bdaddr, BDADDR_BREDR);
272
273 if (!data->ssp_mode)
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276 ie = hci_inquiry_cache_lookup(hdev, bdaddr: &data->bdaddr);
277 if (ie) {
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
285 }
286
287 goto update;
288 }
289
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(size: sizeof(*ie), GFP_KERNEL);
292 if (!ie) {
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294 goto done;
295 }
296
297 list_add(new: &ie->all, head: &cache->all);
298
299 if (name_known) {
300 ie->name_state = NAME_KNOWN;
301 } else {
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(new: &ie->list, head: &cache->unknown);
304 }
305
306update:
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
310 list_del(entry: &ie->list);
311 }
312
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
316
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320done:
321 return flags;
322}
323
324static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325{
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
329 int copied = 0;
330
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
333
334 if (copied >= num)
335 break;
336
337 bacpy(dst: &info->bdaddr, src: &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
343
344 info++;
345 copied++;
346 }
347
348 BT_DBG("cache %p, copied %d", cache, copied);
349 return copied;
350}
351
352static int hci_inq_req(struct hci_request *req, unsigned long opt)
353{
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
357
358 BT_DBG("%s", hdev->name);
359
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
361 return 0;
362
363 /* Start Inquiry */
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, plen: sizeof(cp), param: &cp);
368
369 return 0;
370}
371
372int hci_inquiry(void __user *arg)
373{
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
378 long timeo;
379 __u8 *buf;
380
381 if (copy_from_user(to: &ir, from: ptr, n: sizeof(ir)))
382 return -EFAULT;
383
384 hdev = hci_dev_get(index: ir.dev_id);
385 if (!hdev)
386 return -ENODEV;
387
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389 err = -EBUSY;
390 goto done;
391 }
392
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394 err = -EOPNOTSUPP;
395 goto done;
396 }
397
398 if (hdev->dev_type != HCI_PRIMARY) {
399 err = -EOPNOTSUPP;
400 goto done;
401 }
402
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404 err = -EOPNOTSUPP;
405 goto done;
406 }
407
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
410 err = -EINVAL;
411 goto done;
412 }
413
414 hci_dev_lock(hdev);
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
418 do_inquiry = 1;
419 }
420 hci_dev_unlock(hdev);
421
422 timeo = ir.length * msecs_to_jiffies(m: 2000);
423
424 if (do_inquiry) {
425 err = hci_req_sync(hdev, req: hci_inq_req, opt: (unsigned long) &ir,
426 timeout: timeo, NULL);
427 if (err < 0)
428 goto done;
429
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
432 */
433 if (wait_on_bit(word: &hdev->flags, bit: HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
435 err = -EINTR;
436 goto done;
437 }
438 }
439
440 /* for unlimited number of responses we will use buffer with
441 * 255 entries
442 */
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
447 */
448 buf = kmalloc_array(n: max_rsp, size: sizeof(struct inquiry_info), GFP_KERNEL);
449 if (!buf) {
450 err = -ENOMEM;
451 goto done;
452 }
453
454 hci_dev_lock(hdev);
455 ir.num_rsp = inquiry_cache_dump(hdev, num: max_rsp, buf);
456 hci_dev_unlock(hdev);
457
458 BT_DBG("num_rsp %d", ir.num_rsp);
459
460 if (!copy_to_user(to: ptr, from: &ir, n: sizeof(ir))) {
461 ptr += sizeof(ir);
462 if (copy_to_user(to: ptr, from: buf, n: sizeof(struct inquiry_info) *
463 ir.num_rsp))
464 err = -EFAULT;
465 } else
466 err = -EFAULT;
467
468 kfree(objp: buf);
469
470done:
471 hci_dev_put(d: hdev);
472 return err;
473}
474
475static int hci_dev_do_open(struct hci_dev *hdev)
476{
477 int ret = 0;
478
479 BT_DBG("%s %p", hdev->name, hdev);
480
481 hci_req_sync_lock(hdev);
482
483 ret = hci_dev_open_sync(hdev);
484
485 hci_req_sync_unlock(hdev);
486 return ret;
487}
488
489/* ---- HCI ioctl helpers ---- */
490
491int hci_dev_open(__u16 dev)
492{
493 struct hci_dev *hdev;
494 int err;
495
496 hdev = hci_dev_get(index: dev);
497 if (!hdev)
498 return -ENODEV;
499
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
503 * possible.
504 *
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
507 * open the device.
508 */
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511 err = -EOPNOTSUPP;
512 goto done;
513 }
514
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
518 * completed.
519 */
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(dwork: &hdev->power_off);
522
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
526 */
527 flush_workqueue(hdev->req_workqueue);
528
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
534 */
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539 err = hci_dev_do_open(hdev);
540
541done:
542 hci_dev_put(d: hdev);
543 return err;
544}
545
546int hci_dev_do_close(struct hci_dev *hdev)
547{
548 int err;
549
550 BT_DBG("%s %p", hdev->name, hdev);
551
552 hci_req_sync_lock(hdev);
553
554 err = hci_dev_close_sync(hdev);
555
556 hci_req_sync_unlock(hdev);
557
558 return err;
559}
560
561int hci_dev_close(__u16 dev)
562{
563 struct hci_dev *hdev;
564 int err;
565
566 hdev = hci_dev_get(index: dev);
567 if (!hdev)
568 return -ENODEV;
569
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571 err = -EBUSY;
572 goto done;
573 }
574
575 cancel_work_sync(work: &hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(dwork: &hdev->power_off);
578
579 err = hci_dev_do_close(hdev);
580
581done:
582 hci_dev_put(d: hdev);
583 return err;
584}
585
586static int hci_dev_do_reset(struct hci_dev *hdev)
587{
588 int ret;
589
590 BT_DBG("%s %p", hdev->name, hdev);
591
592 hci_req_sync_lock(hdev);
593
594 /* Drop queues */
595 skb_queue_purge(list: &hdev->rx_q);
596 skb_queue_purge(list: &hdev->cmd_q);
597
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600 /* Wait for
601 *
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604 *
605 * inside RCU section to see the flag or complete scheduling.
606 */
607 synchronize_rcu();
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(dwork: &hdev->cmd_timer);
610 cancel_delayed_work(dwork: &hdev->ncmd_timer);
611
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
614 */
615 drain_workqueue(wq: hdev->workqueue);
616
617 hci_dev_lock(hdev);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
621
622 if (hdev->flush)
623 hdev->flush(hdev);
624
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627 atomic_set(v: &hdev->cmd_cnt, i: 1);
628 hdev->acl_cnt = 0;
629 hdev->sco_cnt = 0;
630 hdev->le_cnt = 0;
631 hdev->iso_cnt = 0;
632
633 ret = hci_reset_sync(hdev);
634
635 hci_req_sync_unlock(hdev);
636 return ret;
637}
638
639int hci_dev_reset(__u16 dev)
640{
641 struct hci_dev *hdev;
642 int err;
643
644 hdev = hci_dev_get(index: dev);
645 if (!hdev)
646 return -ENODEV;
647
648 if (!test_bit(HCI_UP, &hdev->flags)) {
649 err = -ENETDOWN;
650 goto done;
651 }
652
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654 err = -EBUSY;
655 goto done;
656 }
657
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659 err = -EOPNOTSUPP;
660 goto done;
661 }
662
663 err = hci_dev_do_reset(hdev);
664
665done:
666 hci_dev_put(d: hdev);
667 return err;
668}
669
670int hci_dev_reset_stat(__u16 dev)
671{
672 struct hci_dev *hdev;
673 int ret = 0;
674
675 hdev = hci_dev_get(index: dev);
676 if (!hdev)
677 return -ENODEV;
678
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680 ret = -EBUSY;
681 goto done;
682 }
683
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685 ret = -EOPNOTSUPP;
686 goto done;
687 }
688
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691done:
692 hci_dev_put(d: hdev);
693 return ret;
694}
695
696static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697{
698 bool conn_changed, discov_changed;
699
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
704 HCI_CONNECTABLE);
705 else
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
707 HCI_CONNECTABLE);
708
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
711 HCI_DISCOVERABLE);
712 } else {
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
715 HCI_DISCOVERABLE);
716 }
717
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
719 return;
720
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, instance: hdev->cur_adv_instance);
727
728 mgmt_new_settings(hdev);
729 }
730}
731
732int hci_dev_cmd(unsigned int cmd, void __user *arg)
733{
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
736 int err = 0;
737
738 if (copy_from_user(to: &dr, from: arg, n: sizeof(dr)))
739 return -EFAULT;
740
741 hdev = hci_dev_get(index: dr.dev_id);
742 if (!hdev)
743 return -ENODEV;
744
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746 err = -EBUSY;
747 goto done;
748 }
749
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751 err = -EOPNOTSUPP;
752 goto done;
753 }
754
755 if (hdev->dev_type != HCI_PRIMARY) {
756 err = -EOPNOTSUPP;
757 goto done;
758 }
759
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761 err = -EOPNOTSUPP;
762 goto done;
763 }
764
765 switch (cmd) {
766 case HCISETAUTH:
767 err = hci_req_sync(hdev, req: hci_auth_req, opt: dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
769 break;
770
771 case HCISETENCRYPT:
772 if (!lmp_encrypt_capable(hdev)) {
773 err = -EOPNOTSUPP;
774 break;
775 }
776
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, req: hci_auth_req, opt: dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
781 if (err)
782 break;
783 }
784
785 err = hci_req_sync(hdev, req: hci_encrypt_req, opt: dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
787 break;
788
789 case HCISETSCAN:
790 err = hci_req_sync(hdev, req: hci_scan_req, opt: dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
792
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
795 */
796 if (!err)
797 hci_update_passive_scan_state(hdev, scan: dr.dev_opt);
798 break;
799
800 case HCISETLINKPOL:
801 err = hci_req_sync(hdev, req: hci_linkpol_req, opt: dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
803 break;
804
805 case HCISETLINKMODE:
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
808 break;
809
810 case HCISETPTYPE:
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
812 break;
813
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
816 break;
817
818 case HCISETACLMTU:
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821 break;
822
823 case HCISETSCOMTU:
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826 break;
827
828 default:
829 err = -EINVAL;
830 break;
831 }
832
833done:
834 hci_dev_put(d: hdev);
835 return err;
836}
837
838int hci_get_dev_list(void __user *arg)
839{
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
844 __u16 dev_num;
845
846 if (get_user(dev_num, (__u16 __user *) arg))
847 return -EFAULT;
848
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850 return -EINVAL;
851
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854 dl = kzalloc(size, GFP_KERNEL);
855 if (!dl)
856 return -ENOMEM;
857
858 dr = dl->dev_req;
859
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
863
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
867 */
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
870
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
873
874 if (++n >= dev_num)
875 break;
876 }
877 read_unlock(&hci_dev_list_lock);
878
879 dl->dev_num = n;
880 size = sizeof(*dl) + n * sizeof(*dr);
881
882 err = copy_to_user(to: arg, from: dl, n: size);
883 kfree(objp: dl);
884
885 return err ? -EFAULT : 0;
886}
887
888int hci_get_dev_info(void __user *arg)
889{
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
892 unsigned long flags;
893 int err = 0;
894
895 if (copy_from_user(to: &di, from: arg, n: sizeof(di)))
896 return -EFAULT;
897
898 hdev = hci_dev_get(index: di.dev_id);
899 if (!hdev)
900 return -ENODEV;
901
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
905 */
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
908 else
909 flags = hdev->flags;
910
911 strcpy(p: di.name, q: hdev->name);
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914 di.flags = flags;
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
921 } else {
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
924 di.sco_mtu = 0;
925 di.sco_pkts = 0;
926 }
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
929
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933 if (copy_to_user(to: arg, from: &di, n: sizeof(di)))
934 err = -EFAULT;
935
936 hci_dev_put(d: hdev);
937
938 return err;
939}
940
941/* ---- Interface to HCI drivers ---- */
942
943static int hci_rfkill_set_block(void *data, bool blocked)
944{
945 struct hci_dev *hdev = data;
946
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
948
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
950 return -EBUSY;
951
952 if (blocked) {
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
957 } else {
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
959 }
960
961 return 0;
962}
963
964static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
966};
967
968static void hci_power_on(struct work_struct *work)
969{
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
971 int err;
972
973 BT_DBG("%s", hdev->name);
974
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(dwork: &hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
981 return;
982 }
983
984 err = hci_dev_do_open(hdev);
985 if (err < 0) {
986 hci_dev_lock(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
989 return;
990 }
991
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
995 */
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1006 }
1007
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1011 */
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(nr: HCI_RAW, addr: &hdev->flags);
1014
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1018 *
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1021 */
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1026 */
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(nr: HCI_RAW, addr: &hdev->flags);
1029
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1033 */
1034 mgmt_index_added(hdev);
1035 }
1036}
1037
1038static void hci_power_off(struct work_struct *work)
1039{
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1041 power_off.work);
1042
1043 BT_DBG("%s", hdev->name);
1044
1045 hci_dev_do_close(hdev);
1046}
1047
1048static void hci_error_reset(struct work_struct *work)
1049{
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 if (hdev->hw_error)
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1056 else
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1058
1059 if (hci_dev_do_close(hdev))
1060 return;
1061
1062 hci_dev_do_open(hdev);
1063}
1064
1065void hci_uuids_clear(struct hci_dev *hdev)
1066{
1067 struct bt_uuid *uuid, *tmp;
1068
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(entry: &uuid->list);
1071 kfree(objp: uuid);
1072 }
1073}
1074
1075void hci_link_keys_clear(struct hci_dev *hdev)
1076{
1077 struct link_key *key, *tmp;
1078
1079 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1080 list_del_rcu(entry: &key->list);
1081 kfree_rcu(key, rcu);
1082 }
1083}
1084
1085void hci_smp_ltks_clear(struct hci_dev *hdev)
1086{
1087 struct smp_ltk *k, *tmp;
1088
1089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1090 list_del_rcu(entry: &k->list);
1091 kfree_rcu(k, rcu);
1092 }
1093}
1094
1095void hci_smp_irks_clear(struct hci_dev *hdev)
1096{
1097 struct smp_irk *k, *tmp;
1098
1099 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(entry: &k->list);
1101 kfree_rcu(k, rcu);
1102 }
1103}
1104
1105void hci_blocked_keys_clear(struct hci_dev *hdev)
1106{
1107 struct blocked_key *b, *tmp;
1108
1109 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1110 list_del_rcu(entry: &b->list);
1111 kfree_rcu(b, rcu);
1112 }
1113}
1114
1115bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1116{
1117 bool blocked = false;
1118 struct blocked_key *b;
1119
1120 rcu_read_lock();
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(p: b->val, q: val, size: sizeof(b->val))) {
1123 blocked = true;
1124 break;
1125 }
1126 }
1127
1128 rcu_read_unlock();
1129 return blocked;
1130}
1131
1132struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1133{
1134 struct link_key *k;
1135
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) == 0) {
1139 rcu_read_unlock();
1140
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1143 val: k->val)) {
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1146 &k->bdaddr);
1147 return NULL;
1148 }
1149
1150 return k;
1151 }
1152 }
1153 rcu_read_unlock();
1154
1155 return NULL;
1156}
1157
1158static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1160{
1161 /* Legacy key */
1162 if (key_type < 0x03)
1163 return true;
1164
1165 /* Debug keys are insecure so don't store them persistently */
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1167 return false;
1168
1169 /* Changed combination key and there's no previous one */
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1171 return false;
1172
1173 /* Security mode 3 case */
1174 if (!conn)
1175 return true;
1176
1177 /* BR/EDR key derived using SC from an LE link */
1178 if (conn->type == LE_LINK)
1179 return true;
1180
1181 /* Neither local nor remote side had no-bonding as requirement */
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1183 return true;
1184
1185 /* Local side had dedicated bonding as requirement */
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1187 return true;
1188
1189 /* Remote side had dedicated bonding as requirement */
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1191 return true;
1192
1193 /* If none of the above criteria match, then don't store the key
1194 * persistently */
1195 return false;
1196}
1197
1198static u8 ltk_role(u8 type)
1199{
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1202
1203 return HCI_ROLE_SLAVE;
1204}
1205
1206struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1208{
1209 struct smp_ltk *k;
1210
1211 rcu_read_lock();
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(ba1: bdaddr, ba2: &k->bdaddr))
1214 continue;
1215
1216 if (smp_ltk_is_sc(key: k) || ltk_role(type: k->type) == role) {
1217 rcu_read_unlock();
1218
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1220 val: k->val)) {
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1223 &k->bdaddr);
1224 return NULL;
1225 }
1226
1227 return k;
1228 }
1229 }
1230 rcu_read_unlock();
1231
1232 return NULL;
1233}
1234
1235struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1236{
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1239
1240 rcu_read_lock();
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(ba1: &irk->rpa, ba2: rpa)) {
1243 irk_to_return = irk;
1244 goto done;
1245 }
1246 }
1247
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk: irk->val, bdaddr: rpa)) {
1250 bacpy(dst: &irk->rpa, src: rpa);
1251 irk_to_return = irk;
1252 goto done;
1253 }
1254 }
1255
1256done:
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 val: irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1262 }
1263
1264 rcu_read_unlock();
1265
1266 return irk_to_return;
1267}
1268
1269struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1270 u8 addr_type)
1271{
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1274
1275 /* Identity Address must be public or static random */
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1277 return NULL;
1278
1279 rcu_read_lock();
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(ba1: bdaddr, ba2: &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1284 goto done;
1285 }
1286 }
1287
1288done:
1289
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 val: irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1295 }
1296
1297 rcu_read_unlock();
1298
1299 return irk_to_return;
1300}
1301
1302struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1305{
1306 struct link_key *key, *old_key;
1307 u8 old_key_type;
1308
1309 old_key = hci_find_link_key(hdev, bdaddr);
1310 if (old_key) {
1311 old_key_type = old_key->type;
1312 key = old_key;
1313 } else {
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(size: sizeof(*key), GFP_KERNEL);
1316 if (!key)
1317 return NULL;
1318 list_add_rcu(new: &key->list, head: &hdev->link_keys);
1319 }
1320
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1322
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1325 * previous key */
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1329 if (conn)
1330 conn->key_type = type;
1331 }
1332
1333 bacpy(dst: &key->bdaddr, src: bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1336
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1339 else
1340 key->type = type;
1341
1342 if (persistent)
1343 *persistent = hci_persistent_key(hdev, conn, key_type: type,
1344 old_key_type);
1345
1346 return key;
1347}
1348
1349struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1352{
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1355
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1357 if (old_key)
1358 key = old_key;
1359 else {
1360 key = kzalloc(size: sizeof(*key), GFP_KERNEL);
1361 if (!key)
1362 return NULL;
1363 list_add_rcu(new: &key->list, head: &hdev->long_term_keys);
1364 }
1365
1366 bacpy(dst: &key->bdaddr, src: bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1370 key->ediv = ediv;
1371 key->rand = rand;
1372 key->enc_size = enc_size;
1373 key->type = type;
1374
1375 return key;
1376}
1377
1378struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1380{
1381 struct smp_irk *irk;
1382
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1384 if (!irk) {
1385 irk = kzalloc(size: sizeof(*irk), GFP_KERNEL);
1386 if (!irk)
1387 return NULL;
1388
1389 bacpy(dst: &irk->bdaddr, src: bdaddr);
1390 irk->addr_type = addr_type;
1391
1392 list_add_rcu(new: &irk->list, head: &hdev->identity_resolving_keys);
1393 }
1394
1395 memcpy(irk->val, val, 16);
1396 bacpy(dst: &irk->rpa, src: rpa);
1397
1398 return irk;
1399}
1400
1401int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402{
1403 struct link_key *key;
1404
1405 key = hci_find_link_key(hdev, bdaddr);
1406 if (!key)
1407 return -ENOENT;
1408
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1410
1411 list_del_rcu(entry: &key->list);
1412 kfree_rcu(key, rcu);
1413
1414 return 0;
1415}
1416
1417int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1418{
1419 struct smp_ltk *k, *tmp;
1420 int removed = 0;
1421
1422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1423 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1424 continue;
1425
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1427
1428 list_del_rcu(entry: &k->list);
1429 kfree_rcu(k, rcu);
1430 removed++;
1431 }
1432
1433 return removed ? 0 : -ENOENT;
1434}
1435
1436void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1437{
1438 struct smp_irk *k, *tmp;
1439
1440 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) || k->addr_type != addr_type)
1442 continue;
1443
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1445
1446 list_del_rcu(entry: &k->list);
1447 kfree_rcu(k, rcu);
1448 }
1449}
1450
1451bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1452{
1453 struct smp_ltk *k;
1454 struct smp_irk *irk;
1455 u8 addr_type;
1456
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1459 return true;
1460 return false;
1461 }
1462
1463 /* Convert to HCI addr type which struct smp_ltk uses */
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1466 else
1467 addr_type = ADDR_LE_DEV_RANDOM;
1468
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1470 if (irk) {
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1473 }
1474
1475 rcu_read_lock();
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(ba1: bdaddr, ba2: &k->bdaddr)) {
1478 rcu_read_unlock();
1479 return true;
1480 }
1481 }
1482 rcu_read_unlock();
1483
1484 return false;
1485}
1486
1487/* HCI command timer function */
1488static void hci_cmd_timeout(struct work_struct *work)
1489{
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1491 cmd_timer.work);
1492
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1496
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1498 } else {
1499 bt_dev_err(hdev, "command tx timeout");
1500 }
1501
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1504
1505 atomic_set(v: &hdev->cmd_cnt, i: 1);
1506 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
1507}
1508
1509/* HCI ncmd timer function */
1510static void hci_ncmd_timeout(struct work_struct *work)
1511{
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1513 ncmd_timer.work);
1514
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1516
1517 /* During HCI_INIT phase no events can be injected if the ncmd timer
1518 * triggers since the procedure has its own timeout handling.
1519 */
1520 if (test_bit(HCI_INIT, &hdev->flags))
1521 return;
1522
1523 /* This is an irrecoverable state, inject hardware error event */
1524 hci_reset_dev(hdev);
1525}
1526
1527struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1529{
1530 struct oob_data *data;
1531
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(ba1: bdaddr, ba2: &data->bdaddr) != 0)
1534 continue;
1535 if (data->bdaddr_type != bdaddr_type)
1536 continue;
1537 return data;
1538 }
1539
1540 return NULL;
1541}
1542
1543int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1544 u8 bdaddr_type)
1545{
1546 struct oob_data *data;
1547
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1549 if (!data)
1550 return -ENOENT;
1551
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1553
1554 list_del(entry: &data->list);
1555 kfree(objp: data);
1556
1557 return 0;
1558}
1559
1560void hci_remote_oob_data_clear(struct hci_dev *hdev)
1561{
1562 struct oob_data *data, *n;
1563
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(entry: &data->list);
1566 kfree(objp: data);
1567 }
1568}
1569
1570int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1573{
1574 struct oob_data *data;
1575
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1577 if (!data) {
1578 data = kmalloc(size: sizeof(*data), GFP_KERNEL);
1579 if (!data)
1580 return -ENOMEM;
1581
1582 bacpy(dst: &data->bdaddr, src: bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(new: &data->list, head: &hdev->remote_oob_data);
1585 }
1586
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1592 } else {
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1597 else
1598 data->present = 0x00;
1599 }
1600
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1604 } else {
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1609 }
1610
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1612
1613 return 0;
1614}
1615
1616/* This function requires the caller holds hdev->lock */
1617struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1618{
1619 struct adv_info *adv_instance;
1620
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1624 }
1625
1626 return NULL;
1627}
1628
1629/* This function requires the caller holds hdev->lock */
1630struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1631{
1632 struct adv_info *cur_instance;
1633
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1635 if (!cur_instance)
1636 return NULL;
1637
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1642 else
1643 return list_next_entry(cur_instance, list);
1644}
1645
1646/* This function requires the caller holds hdev->lock */
1647int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1648{
1649 struct adv_info *adv_instance;
1650
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1652 if (!adv_instance)
1653 return -ENOENT;
1654
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1656
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(dwork: &hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1661 }
1662 hdev->cur_adv_instance = 0x00;
1663 }
1664
1665 cancel_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb);
1666
1667 list_del(entry: &adv_instance->list);
1668 kfree(objp: adv_instance);
1669
1670 hdev->adv_instance_cnt--;
1671
1672 return 0;
1673}
1674
1675void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1676{
1677 struct adv_info *adv_instance, *n;
1678
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1681}
1682
1683/* This function requires the caller holds hdev->lock */
1684void hci_adv_instances_clear(struct hci_dev *hdev)
1685{
1686 struct adv_info *adv_instance, *n;
1687
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(dwork: &hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1691 }
1692
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb);
1695 list_del(entry: &adv_instance->list);
1696 kfree(objp: adv_instance);
1697 }
1698
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1701}
1702
1703static void adv_instance_rpa_expired(struct work_struct *work)
1704{
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1707
1708 BT_DBG("");
1709
1710 adv_instance->rpa_expired = true;
1711}
1712
1713/* This function requires the caller holds hdev->lock */
1714struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval,
1719 u8 mesh_handle)
1720{
1721 struct adv_info *adv;
1722
1723 adv = hci_find_adv_instance(hdev, instance);
1724 if (adv) {
1725 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1728 } else {
1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 return ERR_PTR(error: -EOVERFLOW);
1732
1733 adv = kzalloc(size: sizeof(*adv), GFP_KERNEL);
1734 if (!adv)
1735 return ERR_PTR(error: -ENOMEM);
1736
1737 adv->pending = true;
1738 adv->instance = instance;
1739 list_add(new: &adv->list, head: &hdev->adv_instances);
1740 hdev->adv_instance_cnt++;
1741 }
1742
1743 adv->flags = flags;
1744 adv->min_interval = min_interval;
1745 adv->max_interval = max_interval;
1746 adv->tx_power = tx_power;
1747 /* Defining a mesh_handle changes the timing units to ms,
1748 * rather than seconds, and ties the instance to the requested
1749 * mesh_tx queue.
1750 */
1751 adv->mesh = mesh_handle;
1752
1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 scan_rsp_len, scan_rsp_data);
1755
1756 adv->timeout = timeout;
1757 adv->remaining_time = timeout;
1758
1759 if (duration == 0)
1760 adv->duration = hdev->def_multi_adv_rotation_duration;
1761 else
1762 adv->duration = duration;
1763
1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1765
1766 BT_DBG("%s for %dMR", hdev->name, instance);
1767
1768 return adv;
1769}
1770
1771/* This function requires the caller holds hdev->lock */
1772struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 u32 flags, u8 data_len, u8 *data,
1774 u32 min_interval, u32 max_interval)
1775{
1776 struct adv_info *adv;
1777
1778 adv = hci_add_adv_instance(hdev, instance, flags, adv_data_len: 0, NULL, scan_rsp_len: 0, NULL,
1779 timeout: 0, duration: 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 min_interval, max_interval, mesh_handle: 0);
1781 if (IS_ERR(ptr: adv))
1782 return adv;
1783
1784 adv->periodic = true;
1785 adv->per_adv_data_len = data_len;
1786
1787 if (data)
1788 memcpy(adv->per_adv_data, data, data_len);
1789
1790 return adv;
1791}
1792
1793/* This function requires the caller holds hdev->lock */
1794int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 u16 adv_data_len, u8 *adv_data,
1796 u16 scan_rsp_len, u8 *scan_rsp_data)
1797{
1798 struct adv_info *adv;
1799
1800 adv = hci_find_adv_instance(hdev, instance);
1801
1802 /* If advertisement doesn't exist, we can't modify its data */
1803 if (!adv)
1804 return -ENOENT;
1805
1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 memcpy(adv->adv_data, adv_data, adv_data_len);
1809 adv->adv_data_len = adv_data_len;
1810 adv->adv_data_changed = true;
1811 }
1812
1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 adv->scan_rsp_len = scan_rsp_len;
1817 adv->scan_rsp_changed = true;
1818 }
1819
1820 /* Mark as changed if there are flags which would affect it */
1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 adv->scan_rsp_changed = true;
1824
1825 return 0;
1826}
1827
1828/* This function requires the caller holds hdev->lock */
1829u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1830{
1831 u32 flags;
1832 struct adv_info *adv;
1833
1834 if (instance == 0x00) {
1835 /* Instance 0 always manages the "Tx Power" and "Flags"
1836 * fields
1837 */
1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1839
1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 * corresponds to the "connectable" instance flag.
1842 */
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1845
1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_DISCOV;
1850
1851 return flags;
1852 }
1853
1854 adv = hci_find_adv_instance(hdev, instance);
1855
1856 /* Return 0 when we got an invalid instance identifier. */
1857 if (!adv)
1858 return 0;
1859
1860 return adv->flags;
1861}
1862
1863bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1864{
1865 struct adv_info *adv;
1866
1867 /* Instance 0x00 always set local name */
1868 if (instance == 0x00)
1869 return true;
1870
1871 adv = hci_find_adv_instance(hdev, instance);
1872 if (!adv)
1873 return false;
1874
1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1877 return true;
1878
1879 return adv->scan_rsp_len ? true : false;
1880}
1881
1882/* This function requires the caller holds hdev->lock */
1883void hci_adv_monitors_clear(struct hci_dev *hdev)
1884{
1885 struct adv_monitor *monitor;
1886 int handle;
1887
1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 hci_free_adv_monitor(hdev, monitor);
1890
1891 idr_destroy(&hdev->adv_monitors_idr);
1892}
1893
1894/* Frees the monitor structure and do some bookkeepings.
1895 * This function requires the caller holds hdev->lock.
1896 */
1897void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1898{
1899 struct adv_pattern *pattern;
1900 struct adv_pattern *tmp;
1901
1902 if (!monitor)
1903 return;
1904
1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 list_del(entry: &pattern->list);
1907 kfree(objp: pattern);
1908 }
1909
1910 if (monitor->handle)
1911 idr_remove(&hdev->adv_monitors_idr, id: monitor->handle);
1912
1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 hdev->adv_monitors_cnt--;
1915 mgmt_adv_monitor_removed(hdev, handle: monitor->handle);
1916 }
1917
1918 kfree(objp: monitor);
1919}
1920
1921/* Assigns handle to a monitor, and if offloading is supported and power is on,
1922 * also attempts to forward the request to the controller.
1923 * This function requires the caller holds hci_req_sync_lock.
1924 */
1925int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1926{
1927 int min, max, handle;
1928 int status = 0;
1929
1930 if (!monitor)
1931 return -EINVAL;
1932
1933 hci_dev_lock(hdev);
1934
1935 min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 handle = idr_alloc(&hdev->adv_monitors_idr, ptr: monitor, start: min, end: max,
1938 GFP_KERNEL);
1939
1940 hci_dev_unlock(hdev);
1941
1942 if (handle < 0)
1943 return handle;
1944
1945 monitor->handle = handle;
1946
1947 if (!hdev_is_powered(hdev))
1948 return status;
1949
1950 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 case HCI_ADV_MONITOR_EXT_NONE:
1952 bt_dev_dbg(hdev, "add monitor %d status %d",
1953 monitor->handle, status);
1954 /* Message was not forwarded to controller - not an error */
1955 break;
1956
1957 case HCI_ADV_MONITOR_EXT_MSFT:
1958 status = msft_add_monitor_pattern(hdev, monitor);
1959 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1960 handle, status);
1961 break;
1962 }
1963
1964 return status;
1965}
1966
1967/* Attempts to tell the controller and free the monitor. If somehow the
1968 * controller doesn't have a corresponding handle, remove anyway.
1969 * This function requires the caller holds hci_req_sync_lock.
1970 */
1971static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 struct adv_monitor *monitor)
1973{
1974 int status = 0;
1975 int handle;
1976
1977 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1978 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1979 bt_dev_dbg(hdev, "remove monitor %d status %d",
1980 monitor->handle, status);
1981 goto free_monitor;
1982
1983 case HCI_ADV_MONITOR_EXT_MSFT:
1984 handle = monitor->handle;
1985 status = msft_remove_monitor(hdev, monitor);
1986 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1987 handle, status);
1988 break;
1989 }
1990
1991 /* In case no matching handle registered, just free the monitor */
1992 if (status == -ENOENT)
1993 goto free_monitor;
1994
1995 return status;
1996
1997free_monitor:
1998 if (status == -ENOENT)
1999 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2000 monitor->handle);
2001 hci_free_adv_monitor(hdev, monitor);
2002
2003 return status;
2004}
2005
2006/* This function requires the caller holds hci_req_sync_lock */
2007int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2008{
2009 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, id: handle);
2010
2011 if (!monitor)
2012 return -EINVAL;
2013
2014 return hci_remove_adv_monitor(hdev, monitor);
2015}
2016
2017/* This function requires the caller holds hci_req_sync_lock */
2018int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2019{
2020 struct adv_monitor *monitor;
2021 int idr_next_id = 0;
2022 int status = 0;
2023
2024 while (1) {
2025 monitor = idr_get_next(&hdev->adv_monitors_idr, nextid: &idr_next_id);
2026 if (!monitor)
2027 break;
2028
2029 status = hci_remove_adv_monitor(hdev, monitor);
2030 if (status)
2031 return status;
2032
2033 idr_next_id++;
2034 }
2035
2036 return status;
2037}
2038
2039/* This function requires the caller holds hdev->lock */
2040bool hci_is_adv_monitoring(struct hci_dev *hdev)
2041{
2042 return !idr_is_empty(idr: &hdev->adv_monitors_idr);
2043}
2044
2045int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2046{
2047 if (msft_monitor_supported(hdev))
2048 return HCI_ADV_MONITOR_EXT_MSFT;
2049
2050 return HCI_ADV_MONITOR_EXT_NONE;
2051}
2052
2053struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2054 bdaddr_t *bdaddr, u8 type)
2055{
2056 struct bdaddr_list *b;
2057
2058 list_for_each_entry(b, bdaddr_list, list) {
2059 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2060 return b;
2061 }
2062
2063 return NULL;
2064}
2065
2066struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2067 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2068 u8 type)
2069{
2070 struct bdaddr_list_with_irk *b;
2071
2072 list_for_each_entry(b, bdaddr_list, list) {
2073 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2074 return b;
2075 }
2076
2077 return NULL;
2078}
2079
2080struct bdaddr_list_with_flags *
2081hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2082 bdaddr_t *bdaddr, u8 type)
2083{
2084 struct bdaddr_list_with_flags *b;
2085
2086 list_for_each_entry(b, bdaddr_list, list) {
2087 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2088 return b;
2089 }
2090
2091 return NULL;
2092}
2093
2094void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2095{
2096 struct bdaddr_list *b, *n;
2097
2098 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2099 list_del(entry: &b->list);
2100 kfree(objp: b);
2101 }
2102}
2103
2104int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2105{
2106 struct bdaddr_list *entry;
2107
2108 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2109 return -EBADF;
2110
2111 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2112 return -EEXIST;
2113
2114 entry = kzalloc(size: sizeof(*entry), GFP_KERNEL);
2115 if (!entry)
2116 return -ENOMEM;
2117
2118 bacpy(dst: &entry->bdaddr, src: bdaddr);
2119 entry->bdaddr_type = type;
2120
2121 list_add(new: &entry->list, head: list);
2122
2123 return 0;
2124}
2125
2126int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2127 u8 type, u8 *peer_irk, u8 *local_irk)
2128{
2129 struct bdaddr_list_with_irk *entry;
2130
2131 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2132 return -EBADF;
2133
2134 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2135 return -EEXIST;
2136
2137 entry = kzalloc(size: sizeof(*entry), GFP_KERNEL);
2138 if (!entry)
2139 return -ENOMEM;
2140
2141 bacpy(dst: &entry->bdaddr, src: bdaddr);
2142 entry->bdaddr_type = type;
2143
2144 if (peer_irk)
2145 memcpy(entry->peer_irk, peer_irk, 16);
2146
2147 if (local_irk)
2148 memcpy(entry->local_irk, local_irk, 16);
2149
2150 list_add(new: &entry->list, head: list);
2151
2152 return 0;
2153}
2154
2155int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2156 u8 type, u32 flags)
2157{
2158 struct bdaddr_list_with_flags *entry;
2159
2160 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2161 return -EBADF;
2162
2163 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2164 return -EEXIST;
2165
2166 entry = kzalloc(size: sizeof(*entry), GFP_KERNEL);
2167 if (!entry)
2168 return -ENOMEM;
2169
2170 bacpy(dst: &entry->bdaddr, src: bdaddr);
2171 entry->bdaddr_type = type;
2172 entry->flags = flags;
2173
2174 list_add(new: &entry->list, head: list);
2175
2176 return 0;
2177}
2178
2179int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2180{
2181 struct bdaddr_list *entry;
2182
2183 if (!bacmp(ba1: bdaddr, BDADDR_ANY)) {
2184 hci_bdaddr_list_clear(bdaddr_list: list);
2185 return 0;
2186 }
2187
2188 entry = hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type);
2189 if (!entry)
2190 return -ENOENT;
2191
2192 list_del(entry: &entry->list);
2193 kfree(objp: entry);
2194
2195 return 0;
2196}
2197
2198int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2199 u8 type)
2200{
2201 struct bdaddr_list_with_irk *entry;
2202
2203 if (!bacmp(ba1: bdaddr, BDADDR_ANY)) {
2204 hci_bdaddr_list_clear(bdaddr_list: list);
2205 return 0;
2206 }
2207
2208 entry = hci_bdaddr_list_lookup_with_irk(bdaddr_list: list, bdaddr, type);
2209 if (!entry)
2210 return -ENOENT;
2211
2212 list_del(entry: &entry->list);
2213 kfree(objp: entry);
2214
2215 return 0;
2216}
2217
2218int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2219 u8 type)
2220{
2221 struct bdaddr_list_with_flags *entry;
2222
2223 if (!bacmp(ba1: bdaddr, BDADDR_ANY)) {
2224 hci_bdaddr_list_clear(bdaddr_list: list);
2225 return 0;
2226 }
2227
2228 entry = hci_bdaddr_list_lookup_with_flags(bdaddr_list: list, bdaddr, type);
2229 if (!entry)
2230 return -ENOENT;
2231
2232 list_del(entry: &entry->list);
2233 kfree(objp: entry);
2234
2235 return 0;
2236}
2237
2238/* This function requires the caller holds hdev->lock */
2239struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2240 bdaddr_t *addr, u8 addr_type)
2241{
2242 struct hci_conn_params *params;
2243
2244 list_for_each_entry(params, &hdev->le_conn_params, list) {
2245 if (bacmp(ba1: &params->addr, ba2: addr) == 0 &&
2246 params->addr_type == addr_type) {
2247 return params;
2248 }
2249 }
2250
2251 return NULL;
2252}
2253
2254/* This function requires the caller holds hdev->lock or rcu_read_lock */
2255struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2256 bdaddr_t *addr, u8 addr_type)
2257{
2258 struct hci_conn_params *param;
2259
2260 rcu_read_lock();
2261
2262 list_for_each_entry_rcu(param, list, action) {
2263 if (bacmp(ba1: &param->addr, ba2: addr) == 0 &&
2264 param->addr_type == addr_type) {
2265 rcu_read_unlock();
2266 return param;
2267 }
2268 }
2269
2270 rcu_read_unlock();
2271
2272 return NULL;
2273}
2274
2275/* This function requires the caller holds hdev->lock */
2276void hci_pend_le_list_del_init(struct hci_conn_params *param)
2277{
2278 if (list_empty(head: &param->action))
2279 return;
2280
2281 list_del_rcu(entry: &param->action);
2282 synchronize_rcu();
2283 INIT_LIST_HEAD(list: &param->action);
2284}
2285
2286/* This function requires the caller holds hdev->lock */
2287void hci_pend_le_list_add(struct hci_conn_params *param,
2288 struct list_head *list)
2289{
2290 list_add_rcu(new: &param->action, head: list);
2291}
2292
2293/* This function requires the caller holds hdev->lock */
2294struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2295 bdaddr_t *addr, u8 addr_type)
2296{
2297 struct hci_conn_params *params;
2298
2299 params = hci_conn_params_lookup(hdev, addr, addr_type);
2300 if (params)
2301 return params;
2302
2303 params = kzalloc(size: sizeof(*params), GFP_KERNEL);
2304 if (!params) {
2305 bt_dev_err(hdev, "out of memory");
2306 return NULL;
2307 }
2308
2309 bacpy(dst: &params->addr, src: addr);
2310 params->addr_type = addr_type;
2311
2312 list_add(new: &params->list, head: &hdev->le_conn_params);
2313 INIT_LIST_HEAD(list: &params->action);
2314
2315 params->conn_min_interval = hdev->le_conn_min_interval;
2316 params->conn_max_interval = hdev->le_conn_max_interval;
2317 params->conn_latency = hdev->le_conn_latency;
2318 params->supervision_timeout = hdev->le_supv_timeout;
2319 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2320
2321 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2322
2323 return params;
2324}
2325
2326void hci_conn_params_free(struct hci_conn_params *params)
2327{
2328 hci_pend_le_list_del_init(param: params);
2329
2330 if (params->conn) {
2331 hci_conn_drop(conn: params->conn);
2332 hci_conn_put(conn: params->conn);
2333 }
2334
2335 list_del(entry: &params->list);
2336 kfree(objp: params);
2337}
2338
2339/* This function requires the caller holds hdev->lock */
2340void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2341{
2342 struct hci_conn_params *params;
2343
2344 params = hci_conn_params_lookup(hdev, addr, addr_type);
2345 if (!params)
2346 return;
2347
2348 hci_conn_params_free(params);
2349
2350 hci_update_passive_scan(hdev);
2351
2352 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2353}
2354
2355/* This function requires the caller holds hdev->lock */
2356void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2357{
2358 struct hci_conn_params *params, *tmp;
2359
2360 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2361 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2362 continue;
2363
2364 /* If trying to establish one time connection to disabled
2365 * device, leave the params, but mark them as just once.
2366 */
2367 if (params->explicit_connect) {
2368 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2369 continue;
2370 }
2371
2372 hci_conn_params_free(params);
2373 }
2374
2375 BT_DBG("All LE disabled connection parameters were removed");
2376}
2377
2378/* This function requires the caller holds hdev->lock */
2379static void hci_conn_params_clear_all(struct hci_dev *hdev)
2380{
2381 struct hci_conn_params *params, *tmp;
2382
2383 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2384 hci_conn_params_free(params);
2385
2386 BT_DBG("All LE connection parameters were removed");
2387}
2388
2389/* Copy the Identity Address of the controller.
2390 *
2391 * If the controller has a public BD_ADDR, then by default use that one.
2392 * If this is a LE only controller without a public address, default to
2393 * the static random address.
2394 *
2395 * For debugging purposes it is possible to force controllers with a
2396 * public address to use the static random address instead.
2397 *
2398 * In case BR/EDR has been disabled on a dual-mode controller and
2399 * userspace has configured a static address, then that address
2400 * becomes the identity address instead of the public BR/EDR address.
2401 */
2402void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2403 u8 *bdaddr_type)
2404{
2405 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2406 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) ||
2407 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2408 bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
2409 bacpy(dst: bdaddr, src: &hdev->static_addr);
2410 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2411 } else {
2412 bacpy(dst: bdaddr, src: &hdev->bdaddr);
2413 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2414 }
2415}
2416
2417static void hci_clear_wake_reason(struct hci_dev *hdev)
2418{
2419 hci_dev_lock(hdev);
2420
2421 hdev->wake_reason = 0;
2422 bacpy(dst: &hdev->wake_addr, BDADDR_ANY);
2423 hdev->wake_addr_type = 0;
2424
2425 hci_dev_unlock(hdev);
2426}
2427
2428static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2429 void *data)
2430{
2431 struct hci_dev *hdev =
2432 container_of(nb, struct hci_dev, suspend_notifier);
2433 int ret = 0;
2434
2435 /* Userspace has full control of this device. Do nothing. */
2436 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2437 return NOTIFY_DONE;
2438
2439 /* To avoid a potential race with hci_unregister_dev. */
2440 hci_dev_hold(d: hdev);
2441
2442 if (action == PM_SUSPEND_PREPARE)
2443 ret = hci_suspend_dev(hdev);
2444 else if (action == PM_POST_SUSPEND)
2445 ret = hci_resume_dev(hdev);
2446
2447 if (ret)
2448 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2449 action, ret);
2450
2451 hci_dev_put(d: hdev);
2452 return NOTIFY_DONE;
2453}
2454
2455/* Alloc HCI device */
2456struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2457{
2458 struct hci_dev *hdev;
2459 unsigned int alloc_size;
2460
2461 alloc_size = sizeof(*hdev);
2462 if (sizeof_priv) {
2463 /* Fixme: May need ALIGN-ment? */
2464 alloc_size += sizeof_priv;
2465 }
2466
2467 hdev = kzalloc(size: alloc_size, GFP_KERNEL);
2468 if (!hdev)
2469 return NULL;
2470
2471 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2472 hdev->esco_type = (ESCO_HV1);
2473 hdev->link_mode = (HCI_LM_ACCEPT);
2474 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2475 hdev->io_capability = 0x03; /* No Input No Output */
2476 hdev->manufacturer = 0xffff; /* Default to internal use */
2477 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2478 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2479 hdev->adv_instance_cnt = 0;
2480 hdev->cur_adv_instance = 0x00;
2481 hdev->adv_instance_timeout = 0;
2482
2483 hdev->advmon_allowlist_duration = 300;
2484 hdev->advmon_no_filter_duration = 500;
2485 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2486
2487 hdev->sniff_max_interval = 800;
2488 hdev->sniff_min_interval = 80;
2489
2490 hdev->le_adv_channel_map = 0x07;
2491 hdev->le_adv_min_interval = 0x0800;
2492 hdev->le_adv_max_interval = 0x0800;
2493 hdev->le_scan_interval = 0x0060;
2494 hdev->le_scan_window = 0x0030;
2495 hdev->le_scan_int_suspend = 0x0400;
2496 hdev->le_scan_window_suspend = 0x0012;
2497 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2498 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2499 hdev->le_scan_int_adv_monitor = 0x0060;
2500 hdev->le_scan_window_adv_monitor = 0x0030;
2501 hdev->le_scan_int_connect = 0x0060;
2502 hdev->le_scan_window_connect = 0x0060;
2503 hdev->le_conn_min_interval = 0x0018;
2504 hdev->le_conn_max_interval = 0x0028;
2505 hdev->le_conn_latency = 0x0000;
2506 hdev->le_supv_timeout = 0x002a;
2507 hdev->le_def_tx_len = 0x001b;
2508 hdev->le_def_tx_time = 0x0148;
2509 hdev->le_max_tx_len = 0x001b;
2510 hdev->le_max_tx_time = 0x0148;
2511 hdev->le_max_rx_len = 0x001b;
2512 hdev->le_max_rx_time = 0x0148;
2513 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2514 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2515 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2516 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2517 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2518 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2519 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2520 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2521 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2522
2523 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2524 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2525 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2526 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2527 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2528 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2529
2530 /* default 1.28 sec page scan */
2531 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2532 hdev->def_page_scan_int = 0x0800;
2533 hdev->def_page_scan_window = 0x0012;
2534
2535 mutex_init(&hdev->lock);
2536 mutex_init(&hdev->req_lock);
2537
2538 ida_init(ida: &hdev->unset_handle_ida);
2539
2540 INIT_LIST_HEAD(list: &hdev->mesh_pending);
2541 INIT_LIST_HEAD(list: &hdev->mgmt_pending);
2542 INIT_LIST_HEAD(list: &hdev->reject_list);
2543 INIT_LIST_HEAD(list: &hdev->accept_list);
2544 INIT_LIST_HEAD(list: &hdev->uuids);
2545 INIT_LIST_HEAD(list: &hdev->link_keys);
2546 INIT_LIST_HEAD(list: &hdev->long_term_keys);
2547 INIT_LIST_HEAD(list: &hdev->identity_resolving_keys);
2548 INIT_LIST_HEAD(list: &hdev->remote_oob_data);
2549 INIT_LIST_HEAD(list: &hdev->le_accept_list);
2550 INIT_LIST_HEAD(list: &hdev->le_resolv_list);
2551 INIT_LIST_HEAD(list: &hdev->le_conn_params);
2552 INIT_LIST_HEAD(list: &hdev->pend_le_conns);
2553 INIT_LIST_HEAD(list: &hdev->pend_le_reports);
2554 INIT_LIST_HEAD(list: &hdev->conn_hash.list);
2555 INIT_LIST_HEAD(list: &hdev->adv_instances);
2556 INIT_LIST_HEAD(list: &hdev->blocked_keys);
2557 INIT_LIST_HEAD(list: &hdev->monitored_devices);
2558
2559 INIT_LIST_HEAD(list: &hdev->local_codecs);
2560 INIT_WORK(&hdev->rx_work, hci_rx_work);
2561 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2562 INIT_WORK(&hdev->tx_work, hci_tx_work);
2563 INIT_WORK(&hdev->power_on, hci_power_on);
2564 INIT_WORK(&hdev->error_reset, hci_error_reset);
2565
2566 hci_cmd_sync_init(hdev);
2567
2568 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2569
2570 skb_queue_head_init(list: &hdev->rx_q);
2571 skb_queue_head_init(list: &hdev->cmd_q);
2572 skb_queue_head_init(list: &hdev->raw_q);
2573
2574 init_waitqueue_head(&hdev->req_wait_q);
2575
2576 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2577 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2578
2579 hci_devcd_setup(hdev);
2580 hci_request_setup(hdev);
2581
2582 hci_init_sysfs(hdev);
2583 discovery_init(hdev);
2584
2585 return hdev;
2586}
2587EXPORT_SYMBOL(hci_alloc_dev_priv);
2588
2589/* Free HCI device */
2590void hci_free_dev(struct hci_dev *hdev)
2591{
2592 /* will free via device release */
2593 put_device(dev: &hdev->dev);
2594}
2595EXPORT_SYMBOL(hci_free_dev);
2596
2597/* Register HCI device */
2598int hci_register_dev(struct hci_dev *hdev)
2599{
2600 int id, error;
2601
2602 if (!hdev->open || !hdev->close || !hdev->send)
2603 return -EINVAL;
2604
2605 /* Do not allow HCI_AMP devices to register at index 0,
2606 * so the index can be used as the AMP controller ID.
2607 */
2608 switch (hdev->dev_type) {
2609 case HCI_PRIMARY:
2610 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2611 break;
2612 case HCI_AMP:
2613 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2614 break;
2615 default:
2616 return -EINVAL;
2617 }
2618
2619 if (id < 0)
2620 return id;
2621
2622 error = dev_set_name(dev: &hdev->dev, name: "hci%u", id);
2623 if (error)
2624 return error;
2625
2626 hdev->name = dev_name(dev: &hdev->dev);
2627 hdev->id = id;
2628
2629 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2630
2631 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2632 if (!hdev->workqueue) {
2633 error = -ENOMEM;
2634 goto err;
2635 }
2636
2637 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2638 hdev->name);
2639 if (!hdev->req_workqueue) {
2640 destroy_workqueue(wq: hdev->workqueue);
2641 error = -ENOMEM;
2642 goto err;
2643 }
2644
2645 if (!IS_ERR_OR_NULL(ptr: bt_debugfs))
2646 hdev->debugfs = debugfs_create_dir(name: hdev->name, parent: bt_debugfs);
2647
2648 error = device_add(dev: &hdev->dev);
2649 if (error < 0)
2650 goto err_wqueue;
2651
2652 hci_leds_init(hdev);
2653
2654 hdev->rfkill = rfkill_alloc(name: hdev->name, parent: &hdev->dev,
2655 type: RFKILL_TYPE_BLUETOOTH, ops: &hci_rfkill_ops,
2656 ops_data: hdev);
2657 if (hdev->rfkill) {
2658 if (rfkill_register(rfkill: hdev->rfkill) < 0) {
2659 rfkill_destroy(rfkill: hdev->rfkill);
2660 hdev->rfkill = NULL;
2661 }
2662 }
2663
2664 if (hdev->rfkill && rfkill_blocked(rfkill: hdev->rfkill))
2665 hci_dev_set_flag(hdev, HCI_RFKILLED);
2666
2667 hci_dev_set_flag(hdev, HCI_SETUP);
2668 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2669
2670 if (hdev->dev_type == HCI_PRIMARY) {
2671 /* Assume BR/EDR support until proven otherwise (such as
2672 * through reading supported features during init.
2673 */
2674 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2675 }
2676
2677 write_lock(&hci_dev_list_lock);
2678 list_add(new: &hdev->list, head: &hci_dev_list);
2679 write_unlock(&hci_dev_list_lock);
2680
2681 /* Devices that are marked for raw-only usage are unconfigured
2682 * and should not be included in normal operation.
2683 */
2684 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2685 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2686
2687 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2688 * callback.
2689 */
2690 if (hdev->wakeup)
2691 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2692
2693 hci_sock_dev_event(hdev, HCI_DEV_REG);
2694 hci_dev_hold(d: hdev);
2695
2696 error = hci_register_suspend_notifier(hdev);
2697 if (error)
2698 BT_WARN("register suspend notifier failed error:%d\n", error);
2699
2700 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
2701
2702 idr_init(idr: &hdev->adv_monitors_idr);
2703 msft_register(hdev);
2704
2705 return id;
2706
2707err_wqueue:
2708 debugfs_remove_recursive(dentry: hdev->debugfs);
2709 destroy_workqueue(wq: hdev->workqueue);
2710 destroy_workqueue(wq: hdev->req_workqueue);
2711err:
2712 ida_simple_remove(&hci_index_ida, hdev->id);
2713
2714 return error;
2715}
2716EXPORT_SYMBOL(hci_register_dev);
2717
2718/* Unregister HCI device */
2719void hci_unregister_dev(struct hci_dev *hdev)
2720{
2721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2722
2723 mutex_lock(&hdev->unregister_lock);
2724 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2725 mutex_unlock(lock: &hdev->unregister_lock);
2726
2727 write_lock(&hci_dev_list_lock);
2728 list_del(entry: &hdev->list);
2729 write_unlock(&hci_dev_list_lock);
2730
2731 cancel_work_sync(work: &hdev->power_on);
2732
2733 hci_cmd_sync_clear(hdev);
2734
2735 hci_unregister_suspend_notifier(hdev);
2736
2737 msft_unregister(hdev);
2738
2739 hci_dev_do_close(hdev);
2740
2741 if (!test_bit(HCI_INIT, &hdev->flags) &&
2742 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2743 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2744 hci_dev_lock(hdev);
2745 mgmt_index_removed(hdev);
2746 hci_dev_unlock(hdev);
2747 }
2748
2749 /* mgmt_index_removed should take care of emptying the
2750 * pending list */
2751 BUG_ON(!list_empty(&hdev->mgmt_pending));
2752
2753 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2754
2755 if (hdev->rfkill) {
2756 rfkill_unregister(rfkill: hdev->rfkill);
2757 rfkill_destroy(rfkill: hdev->rfkill);
2758 }
2759
2760 device_del(dev: &hdev->dev);
2761 /* Actual cleanup is deferred until hci_release_dev(). */
2762 hci_dev_put(d: hdev);
2763}
2764EXPORT_SYMBOL(hci_unregister_dev);
2765
2766/* Release HCI device */
2767void hci_release_dev(struct hci_dev *hdev)
2768{
2769 debugfs_remove_recursive(dentry: hdev->debugfs);
2770 kfree_const(x: hdev->hw_info);
2771 kfree_const(x: hdev->fw_info);
2772
2773 destroy_workqueue(wq: hdev->workqueue);
2774 destroy_workqueue(wq: hdev->req_workqueue);
2775
2776 hci_dev_lock(hdev);
2777 hci_bdaddr_list_clear(bdaddr_list: &hdev->reject_list);
2778 hci_bdaddr_list_clear(bdaddr_list: &hdev->accept_list);
2779 hci_uuids_clear(hdev);
2780 hci_link_keys_clear(hdev);
2781 hci_smp_ltks_clear(hdev);
2782 hci_smp_irks_clear(hdev);
2783 hci_remote_oob_data_clear(hdev);
2784 hci_adv_instances_clear(hdev);
2785 hci_adv_monitors_clear(hdev);
2786 hci_bdaddr_list_clear(bdaddr_list: &hdev->le_accept_list);
2787 hci_bdaddr_list_clear(bdaddr_list: &hdev->le_resolv_list);
2788 hci_conn_params_clear_all(hdev);
2789 hci_discovery_filter_clear(hdev);
2790 hci_blocked_keys_clear(hdev);
2791 hci_codec_list_clear(codec_list: &hdev->local_codecs);
2792 hci_dev_unlock(hdev);
2793
2794 ida_destroy(ida: &hdev->unset_handle_ida);
2795 ida_simple_remove(&hci_index_ida, hdev->id);
2796 kfree_skb(skb: hdev->sent_cmd);
2797 kfree_skb(skb: hdev->recv_event);
2798 kfree(objp: hdev);
2799}
2800EXPORT_SYMBOL(hci_release_dev);
2801
2802int hci_register_suspend_notifier(struct hci_dev *hdev)
2803{
2804 int ret = 0;
2805
2806 if (!hdev->suspend_notifier.notifier_call &&
2807 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2808 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2809 ret = register_pm_notifier(nb: &hdev->suspend_notifier);
2810 }
2811
2812 return ret;
2813}
2814
2815int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2816{
2817 int ret = 0;
2818
2819 if (hdev->suspend_notifier.notifier_call) {
2820 ret = unregister_pm_notifier(nb: &hdev->suspend_notifier);
2821 if (!ret)
2822 hdev->suspend_notifier.notifier_call = NULL;
2823 }
2824
2825 return ret;
2826}
2827
2828/* Suspend HCI device */
2829int hci_suspend_dev(struct hci_dev *hdev)
2830{
2831 int ret;
2832
2833 bt_dev_dbg(hdev, "");
2834
2835 /* Suspend should only act on when powered. */
2836 if (!hdev_is_powered(hdev) ||
2837 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2838 return 0;
2839
2840 /* If powering down don't attempt to suspend */
2841 if (mgmt_powering_down(hdev))
2842 return 0;
2843
2844 /* Cancel potentially blocking sync operation before suspend */
2845 __hci_cmd_sync_cancel(hdev, err: -EHOSTDOWN);
2846
2847 hci_req_sync_lock(hdev);
2848 ret = hci_suspend_sync(hdev);
2849 hci_req_sync_unlock(hdev);
2850
2851 hci_clear_wake_reason(hdev);
2852 mgmt_suspending(hdev, state: hdev->suspend_state);
2853
2854 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2855 return ret;
2856}
2857EXPORT_SYMBOL(hci_suspend_dev);
2858
2859/* Resume HCI device */
2860int hci_resume_dev(struct hci_dev *hdev)
2861{
2862 int ret;
2863
2864 bt_dev_dbg(hdev, "");
2865
2866 /* Resume should only act on when powered. */
2867 if (!hdev_is_powered(hdev) ||
2868 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2869 return 0;
2870
2871 /* If powering down don't attempt to resume */
2872 if (mgmt_powering_down(hdev))
2873 return 0;
2874
2875 hci_req_sync_lock(hdev);
2876 ret = hci_resume_sync(hdev);
2877 hci_req_sync_unlock(hdev);
2878
2879 mgmt_resuming(hdev, reason: hdev->wake_reason, bdaddr: &hdev->wake_addr,
2880 addr_type: hdev->wake_addr_type);
2881
2882 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2883 return ret;
2884}
2885EXPORT_SYMBOL(hci_resume_dev);
2886
2887/* Reset HCI device */
2888int hci_reset_dev(struct hci_dev *hdev)
2889{
2890 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2891 struct sk_buff *skb;
2892
2893 skb = bt_skb_alloc(len: 3, GFP_ATOMIC);
2894 if (!skb)
2895 return -ENOMEM;
2896
2897 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2898 skb_put_data(skb, data: hw_err, len: 3);
2899
2900 bt_dev_err(hdev, "Injecting HCI hardware error event");
2901
2902 /* Send Hardware Error to upper stack */
2903 return hci_recv_frame(hdev, skb);
2904}
2905EXPORT_SYMBOL(hci_reset_dev);
2906
2907/* Receive frame from HCI drivers */
2908int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2909{
2910 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2911 && !test_bit(HCI_INIT, &hdev->flags))) {
2912 kfree_skb(skb);
2913 return -ENXIO;
2914 }
2915
2916 switch (hci_skb_pkt_type(skb)) {
2917 case HCI_EVENT_PKT:
2918 break;
2919 case HCI_ACLDATA_PKT:
2920 /* Detect if ISO packet has been sent as ACL */
2921 if (hci_conn_num(hdev, ISO_LINK)) {
2922 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2923 __u8 type;
2924
2925 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2926 if (type == ISO_LINK)
2927 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2928 }
2929 break;
2930 case HCI_SCODATA_PKT:
2931 break;
2932 case HCI_ISODATA_PKT:
2933 break;
2934 default:
2935 kfree_skb(skb);
2936 return -EINVAL;
2937 }
2938
2939 /* Incoming skb */
2940 bt_cb(skb)->incoming = 1;
2941
2942 /* Time stamp */
2943 __net_timestamp(skb);
2944
2945 skb_queue_tail(list: &hdev->rx_q, newsk: skb);
2946 queue_work(wq: hdev->workqueue, work: &hdev->rx_work);
2947
2948 return 0;
2949}
2950EXPORT_SYMBOL(hci_recv_frame);
2951
2952/* Receive diagnostic message from HCI drivers */
2953int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2954{
2955 /* Mark as diagnostic packet */
2956 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2957
2958 /* Time stamp */
2959 __net_timestamp(skb);
2960
2961 skb_queue_tail(list: &hdev->rx_q, newsk: skb);
2962 queue_work(wq: hdev->workqueue, work: &hdev->rx_work);
2963
2964 return 0;
2965}
2966EXPORT_SYMBOL(hci_recv_diag);
2967
2968void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2969{
2970 va_list vargs;
2971
2972 va_start(vargs, fmt);
2973 kfree_const(x: hdev->hw_info);
2974 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, args: vargs);
2975 va_end(vargs);
2976}
2977EXPORT_SYMBOL(hci_set_hw_info);
2978
2979void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2980{
2981 va_list vargs;
2982
2983 va_start(vargs, fmt);
2984 kfree_const(x: hdev->fw_info);
2985 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, args: vargs);
2986 va_end(vargs);
2987}
2988EXPORT_SYMBOL(hci_set_fw_info);
2989
2990/* ---- Interface to upper protocols ---- */
2991
2992int hci_register_cb(struct hci_cb *cb)
2993{
2994 BT_DBG("%p name %s", cb, cb->name);
2995
2996 mutex_lock(&hci_cb_list_lock);
2997 list_add_tail(new: &cb->list, head: &hci_cb_list);
2998 mutex_unlock(lock: &hci_cb_list_lock);
2999
3000 return 0;
3001}
3002EXPORT_SYMBOL(hci_register_cb);
3003
3004int hci_unregister_cb(struct hci_cb *cb)
3005{
3006 BT_DBG("%p name %s", cb, cb->name);
3007
3008 mutex_lock(&hci_cb_list_lock);
3009 list_del(entry: &cb->list);
3010 mutex_unlock(lock: &hci_cb_list_lock);
3011
3012 return 0;
3013}
3014EXPORT_SYMBOL(hci_unregister_cb);
3015
3016static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3017{
3018 int err;
3019
3020 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3021 skb->len);
3022
3023 /* Time stamp */
3024 __net_timestamp(skb);
3025
3026 /* Send copy to monitor */
3027 hci_send_to_monitor(hdev, skb);
3028
3029 if (atomic_read(v: &hdev->promisc)) {
3030 /* Send copy to the sockets */
3031 hci_send_to_sock(hdev, skb);
3032 }
3033
3034 /* Get rid of skb owner, prior to sending to the driver. */
3035 skb_orphan(skb);
3036
3037 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3038 kfree_skb(skb);
3039 return -EINVAL;
3040 }
3041
3042 err = hdev->send(hdev, skb);
3043 if (err < 0) {
3044 bt_dev_err(hdev, "sending frame failed (%d)", err);
3045 kfree_skb(skb);
3046 return err;
3047 }
3048
3049 return 0;
3050}
3051
3052/* Send HCI command */
3053int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3054 const void *param)
3055{
3056 struct sk_buff *skb;
3057
3058 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3059
3060 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3061 if (!skb) {
3062 bt_dev_err(hdev, "no memory for command");
3063 return -ENOMEM;
3064 }
3065
3066 /* Stand-alone HCI commands must be flagged as
3067 * single-command requests.
3068 */
3069 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3070
3071 skb_queue_tail(list: &hdev->cmd_q, newsk: skb);
3072 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
3073
3074 return 0;
3075}
3076
3077int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3078 const void *param)
3079{
3080 struct sk_buff *skb;
3081
3082 if (hci_opcode_ogf(opcode) != 0x3f) {
3083 /* A controller receiving a command shall respond with either
3084 * a Command Status Event or a Command Complete Event.
3085 * Therefore, all standard HCI commands must be sent via the
3086 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3087 * Some vendors do not comply with this rule for vendor-specific
3088 * commands and do not return any event. We want to support
3089 * unresponded commands for such cases only.
3090 */
3091 bt_dev_err(hdev, "unresponded command not supported");
3092 return -EINVAL;
3093 }
3094
3095 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3096 if (!skb) {
3097 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3098 opcode);
3099 return -ENOMEM;
3100 }
3101
3102 hci_send_frame(hdev, skb);
3103
3104 return 0;
3105}
3106EXPORT_SYMBOL(__hci_cmd_send);
3107
3108/* Get data from the previously sent command */
3109void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3110{
3111 struct hci_command_hdr *hdr;
3112
3113 if (!hdev->sent_cmd)
3114 return NULL;
3115
3116 hdr = (void *) hdev->sent_cmd->data;
3117
3118 if (hdr->opcode != cpu_to_le16(opcode))
3119 return NULL;
3120
3121 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3122
3123 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3124}
3125
3126/* Get data from last received event */
3127void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3128{
3129 struct hci_event_hdr *hdr;
3130 int offset;
3131
3132 if (!hdev->recv_event)
3133 return NULL;
3134
3135 hdr = (void *)hdev->recv_event->data;
3136 offset = sizeof(*hdr);
3137
3138 if (hdr->evt != event) {
3139 /* In case of LE metaevent check the subevent match */
3140 if (hdr->evt == HCI_EV_LE_META) {
3141 struct hci_ev_le_meta *ev;
3142
3143 ev = (void *)hdev->recv_event->data + offset;
3144 offset += sizeof(*ev);
3145 if (ev->subevent == event)
3146 goto found;
3147 }
3148 return NULL;
3149 }
3150
3151found:
3152 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3153
3154 return hdev->recv_event->data + offset;
3155}
3156
3157/* Send ACL data */
3158static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3159{
3160 struct hci_acl_hdr *hdr;
3161 int len = skb->len;
3162
3163 skb_push(skb, HCI_ACL_HDR_SIZE);
3164 skb_reset_transport_header(skb);
3165 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3166 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3167 hdr->dlen = cpu_to_le16(len);
3168}
3169
3170static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3171 struct sk_buff *skb, __u16 flags)
3172{
3173 struct hci_conn *conn = chan->conn;
3174 struct hci_dev *hdev = conn->hdev;
3175 struct sk_buff *list;
3176
3177 skb->len = skb_headlen(skb);
3178 skb->data_len = 0;
3179
3180 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3181
3182 switch (hdev->dev_type) {
3183 case HCI_PRIMARY:
3184 hci_add_acl_hdr(skb, handle: conn->handle, flags);
3185 break;
3186 case HCI_AMP:
3187 hci_add_acl_hdr(skb, handle: chan->handle, flags);
3188 break;
3189 default:
3190 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3191 return;
3192 }
3193
3194 list = skb_shinfo(skb)->frag_list;
3195 if (!list) {
3196 /* Non fragmented */
3197 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3198
3199 skb_queue_tail(list: queue, newsk: skb);
3200 } else {
3201 /* Fragmented */
3202 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3203
3204 skb_shinfo(skb)->frag_list = NULL;
3205
3206 /* Queue all fragments atomically. We need to use spin_lock_bh
3207 * here because of 6LoWPAN links, as there this function is
3208 * called from softirq and using normal spin lock could cause
3209 * deadlocks.
3210 */
3211 spin_lock_bh(lock: &queue->lock);
3212
3213 __skb_queue_tail(list: queue, newsk: skb);
3214
3215 flags &= ~ACL_START;
3216 flags |= ACL_CONT;
3217 do {
3218 skb = list; list = list->next;
3219
3220 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3221 hci_add_acl_hdr(skb, handle: conn->handle, flags);
3222
3223 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3224
3225 __skb_queue_tail(list: queue, newsk: skb);
3226 } while (list);
3227
3228 spin_unlock_bh(lock: &queue->lock);
3229 }
3230}
3231
3232void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3233{
3234 struct hci_dev *hdev = chan->conn->hdev;
3235
3236 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3237
3238 hci_queue_acl(chan, queue: &chan->data_q, skb, flags);
3239
3240 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3241}
3242
3243/* Send SCO data */
3244void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3245{
3246 struct hci_dev *hdev = conn->hdev;
3247 struct hci_sco_hdr hdr;
3248
3249 BT_DBG("%s len %d", hdev->name, skb->len);
3250
3251 hdr.handle = cpu_to_le16(conn->handle);
3252 hdr.dlen = skb->len;
3253
3254 skb_push(skb, HCI_SCO_HDR_SIZE);
3255 skb_reset_transport_header(skb);
3256 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3257
3258 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3259
3260 skb_queue_tail(list: &conn->data_q, newsk: skb);
3261 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3262}
3263
3264/* Send ISO data */
3265static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3266{
3267 struct hci_iso_hdr *hdr;
3268 int len = skb->len;
3269
3270 skb_push(skb, HCI_ISO_HDR_SIZE);
3271 skb_reset_transport_header(skb);
3272 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3273 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3274 hdr->dlen = cpu_to_le16(len);
3275}
3276
3277static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3278 struct sk_buff *skb)
3279{
3280 struct hci_dev *hdev = conn->hdev;
3281 struct sk_buff *list;
3282 __u16 flags;
3283
3284 skb->len = skb_headlen(skb);
3285 skb->data_len = 0;
3286
3287 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3288
3289 list = skb_shinfo(skb)->frag_list;
3290
3291 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3292 hci_add_iso_hdr(skb, handle: conn->handle, flags);
3293
3294 if (!list) {
3295 /* Non fragmented */
3296 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3297
3298 skb_queue_tail(list: queue, newsk: skb);
3299 } else {
3300 /* Fragmented */
3301 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3302
3303 skb_shinfo(skb)->frag_list = NULL;
3304
3305 __skb_queue_tail(list: queue, newsk: skb);
3306
3307 do {
3308 skb = list; list = list->next;
3309
3310 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3311 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3312 0x00);
3313 hci_add_iso_hdr(skb, handle: conn->handle, flags);
3314
3315 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3316
3317 __skb_queue_tail(list: queue, newsk: skb);
3318 } while (list);
3319 }
3320}
3321
3322void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3323{
3324 struct hci_dev *hdev = conn->hdev;
3325
3326 BT_DBG("%s len %d", hdev->name, skb->len);
3327
3328 hci_queue_iso(conn, queue: &conn->data_q, skb);
3329
3330 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3331}
3332
3333/* ---- HCI TX task (outgoing data) ---- */
3334
3335/* HCI Connection scheduler */
3336static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3337{
3338 struct hci_dev *hdev;
3339 int cnt, q;
3340
3341 if (!conn) {
3342 *quote = 0;
3343 return;
3344 }
3345
3346 hdev = conn->hdev;
3347
3348 switch (conn->type) {
3349 case ACL_LINK:
3350 cnt = hdev->acl_cnt;
3351 break;
3352 case AMP_LINK:
3353 cnt = hdev->block_cnt;
3354 break;
3355 case SCO_LINK:
3356 case ESCO_LINK:
3357 cnt = hdev->sco_cnt;
3358 break;
3359 case LE_LINK:
3360 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3361 break;
3362 case ISO_LINK:
3363 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3364 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3365 break;
3366 default:
3367 cnt = 0;
3368 bt_dev_err(hdev, "unknown link type %d", conn->type);
3369 }
3370
3371 q = cnt / num;
3372 *quote = q ? q : 1;
3373}
3374
3375static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3376 int *quote)
3377{
3378 struct hci_conn_hash *h = &hdev->conn_hash;
3379 struct hci_conn *conn = NULL, *c;
3380 unsigned int num = 0, min = ~0;
3381
3382 /* We don't have to lock device here. Connections are always
3383 * added and removed with TX task disabled. */
3384
3385 rcu_read_lock();
3386
3387 list_for_each_entry_rcu(c, &h->list, list) {
3388 if (c->type != type || skb_queue_empty(list: &c->data_q))
3389 continue;
3390
3391 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3392 continue;
3393
3394 num++;
3395
3396 if (c->sent < min) {
3397 min = c->sent;
3398 conn = c;
3399 }
3400
3401 if (hci_conn_num(hdev, type) == num)
3402 break;
3403 }
3404
3405 rcu_read_unlock();
3406
3407 hci_quote_sent(conn, num, quote);
3408
3409 BT_DBG("conn %p quote %d", conn, *quote);
3410 return conn;
3411}
3412
3413static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3414{
3415 struct hci_conn_hash *h = &hdev->conn_hash;
3416 struct hci_conn *c;
3417
3418 bt_dev_err(hdev, "link tx timeout");
3419
3420 rcu_read_lock();
3421
3422 /* Kill stalled connections */
3423 list_for_each_entry_rcu(c, &h->list, list) {
3424 if (c->type == type && c->sent) {
3425 bt_dev_err(hdev, "killing stalled connection %pMR",
3426 &c->dst);
3427 /* hci_disconnect might sleep, so, we have to release
3428 * the RCU read lock before calling it.
3429 */
3430 rcu_read_unlock();
3431 hci_disconnect(conn: c, HCI_ERROR_REMOTE_USER_TERM);
3432 rcu_read_lock();
3433 }
3434 }
3435
3436 rcu_read_unlock();
3437}
3438
3439static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3440 int *quote)
3441{
3442 struct hci_conn_hash *h = &hdev->conn_hash;
3443 struct hci_chan *chan = NULL;
3444 unsigned int num = 0, min = ~0, cur_prio = 0;
3445 struct hci_conn *conn;
3446 int conn_num = 0;
3447
3448 BT_DBG("%s", hdev->name);
3449
3450 rcu_read_lock();
3451
3452 list_for_each_entry_rcu(conn, &h->list, list) {
3453 struct hci_chan *tmp;
3454
3455 if (conn->type != type)
3456 continue;
3457
3458 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3459 continue;
3460
3461 conn_num++;
3462
3463 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3464 struct sk_buff *skb;
3465
3466 if (skb_queue_empty(list: &tmp->data_q))
3467 continue;
3468
3469 skb = skb_peek(list_: &tmp->data_q);
3470 if (skb->priority < cur_prio)
3471 continue;
3472
3473 if (skb->priority > cur_prio) {
3474 num = 0;
3475 min = ~0;
3476 cur_prio = skb->priority;
3477 }
3478
3479 num++;
3480
3481 if (conn->sent < min) {
3482 min = conn->sent;
3483 chan = tmp;
3484 }
3485 }
3486
3487 if (hci_conn_num(hdev, type) == conn_num)
3488 break;
3489 }
3490
3491 rcu_read_unlock();
3492
3493 if (!chan)
3494 return NULL;
3495
3496 hci_quote_sent(conn: chan->conn, num, quote);
3497
3498 BT_DBG("chan %p quote %d", chan, *quote);
3499 return chan;
3500}
3501
3502static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3503{
3504 struct hci_conn_hash *h = &hdev->conn_hash;
3505 struct hci_conn *conn;
3506 int num = 0;
3507
3508 BT_DBG("%s", hdev->name);
3509
3510 rcu_read_lock();
3511
3512 list_for_each_entry_rcu(conn, &h->list, list) {
3513 struct hci_chan *chan;
3514
3515 if (conn->type != type)
3516 continue;
3517
3518 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3519 continue;
3520
3521 num++;
3522
3523 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3524 struct sk_buff *skb;
3525
3526 if (chan->sent) {
3527 chan->sent = 0;
3528 continue;
3529 }
3530
3531 if (skb_queue_empty(list: &chan->data_q))
3532 continue;
3533
3534 skb = skb_peek(list_: &chan->data_q);
3535 if (skb->priority >= HCI_PRIO_MAX - 1)
3536 continue;
3537
3538 skb->priority = HCI_PRIO_MAX - 1;
3539
3540 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3541 skb->priority);
3542 }
3543
3544 if (hci_conn_num(hdev, type) == num)
3545 break;
3546 }
3547
3548 rcu_read_unlock();
3549
3550}
3551
3552static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3553{
3554 /* Calculate count of blocks used by this packet */
3555 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3556}
3557
3558static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3559{
3560 unsigned long last_tx;
3561
3562 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3563 return;
3564
3565 switch (type) {
3566 case LE_LINK:
3567 last_tx = hdev->le_last_tx;
3568 break;
3569 default:
3570 last_tx = hdev->acl_last_tx;
3571 break;
3572 }
3573
3574 /* tx timeout must be longer than maximum link supervision timeout
3575 * (40.9 seconds)
3576 */
3577 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3578 hci_link_tx_to(hdev, type);
3579}
3580
3581/* Schedule SCO */
3582static void hci_sched_sco(struct hci_dev *hdev)
3583{
3584 struct hci_conn *conn;
3585 struct sk_buff *skb;
3586 int quote;
3587
3588 BT_DBG("%s", hdev->name);
3589
3590 if (!hci_conn_num(hdev, SCO_LINK))
3591 return;
3592
3593 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, quote: &quote))) {
3594 while (quote-- && (skb = skb_dequeue(list: &conn->data_q))) {
3595 BT_DBG("skb %p len %d", skb, skb->len);
3596 hci_send_frame(hdev, skb);
3597
3598 conn->sent++;
3599 if (conn->sent == ~0)
3600 conn->sent = 0;
3601 }
3602 }
3603}
3604
3605static void hci_sched_esco(struct hci_dev *hdev)
3606{
3607 struct hci_conn *conn;
3608 struct sk_buff *skb;
3609 int quote;
3610
3611 BT_DBG("%s", hdev->name);
3612
3613 if (!hci_conn_num(hdev, ESCO_LINK))
3614 return;
3615
3616 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3617 quote: &quote))) {
3618 while (quote-- && (skb = skb_dequeue(list: &conn->data_q))) {
3619 BT_DBG("skb %p len %d", skb, skb->len);
3620 hci_send_frame(hdev, skb);
3621
3622 conn->sent++;
3623 if (conn->sent == ~0)
3624 conn->sent = 0;
3625 }
3626 }
3627}
3628
3629static void hci_sched_acl_pkt(struct hci_dev *hdev)
3630{
3631 unsigned int cnt = hdev->acl_cnt;
3632 struct hci_chan *chan;
3633 struct sk_buff *skb;
3634 int quote;
3635
3636 __check_timeout(hdev, cnt, ACL_LINK);
3637
3638 while (hdev->acl_cnt &&
3639 (chan = hci_chan_sent(hdev, ACL_LINK, quote: &quote))) {
3640 u32 priority = (skb_peek(list_: &chan->data_q))->priority;
3641 while (quote-- && (skb = skb_peek(list_: &chan->data_q))) {
3642 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3643 skb->len, skb->priority);
3644
3645 /* Stop if priority has changed */
3646 if (skb->priority < priority)
3647 break;
3648
3649 skb = skb_dequeue(list: &chan->data_q);
3650
3651 hci_conn_enter_active_mode(conn: chan->conn,
3652 bt_cb(skb)->force_active);
3653
3654 hci_send_frame(hdev, skb);
3655 hdev->acl_last_tx = jiffies;
3656
3657 hdev->acl_cnt--;
3658 chan->sent++;
3659 chan->conn->sent++;
3660
3661 /* Send pending SCO packets right away */
3662 hci_sched_sco(hdev);
3663 hci_sched_esco(hdev);
3664 }
3665 }
3666
3667 if (cnt != hdev->acl_cnt)
3668 hci_prio_recalculate(hdev, ACL_LINK);
3669}
3670
3671static void hci_sched_acl_blk(struct hci_dev *hdev)
3672{
3673 unsigned int cnt = hdev->block_cnt;
3674 struct hci_chan *chan;
3675 struct sk_buff *skb;
3676 int quote;
3677 u8 type;
3678
3679 BT_DBG("%s", hdev->name);
3680
3681 if (hdev->dev_type == HCI_AMP)
3682 type = AMP_LINK;
3683 else
3684 type = ACL_LINK;
3685
3686 __check_timeout(hdev, cnt, type);
3687
3688 while (hdev->block_cnt > 0 &&
3689 (chan = hci_chan_sent(hdev, type, quote: &quote))) {
3690 u32 priority = (skb_peek(list_: &chan->data_q))->priority;
3691 while (quote > 0 && (skb = skb_peek(list_: &chan->data_q))) {
3692 int blocks;
3693
3694 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3695 skb->len, skb->priority);
3696
3697 /* Stop if priority has changed */
3698 if (skb->priority < priority)
3699 break;
3700
3701 skb = skb_dequeue(list: &chan->data_q);
3702
3703 blocks = __get_blocks(hdev, skb);
3704 if (blocks > hdev->block_cnt)
3705 return;
3706
3707 hci_conn_enter_active_mode(conn: chan->conn,
3708 bt_cb(skb)->force_active);
3709
3710 hci_send_frame(hdev, skb);
3711 hdev->acl_last_tx = jiffies;
3712
3713 hdev->block_cnt -= blocks;
3714 quote -= blocks;
3715
3716 chan->sent += blocks;
3717 chan->conn->sent += blocks;
3718 }
3719 }
3720
3721 if (cnt != hdev->block_cnt)
3722 hci_prio_recalculate(hdev, type);
3723}
3724
3725static void hci_sched_acl(struct hci_dev *hdev)
3726{
3727 BT_DBG("%s", hdev->name);
3728
3729 /* No ACL link over BR/EDR controller */
3730 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3731 return;
3732
3733 /* No AMP link over AMP controller */
3734 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3735 return;
3736
3737 switch (hdev->flow_ctl_mode) {
3738 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3739 hci_sched_acl_pkt(hdev);
3740 break;
3741
3742 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3743 hci_sched_acl_blk(hdev);
3744 break;
3745 }
3746}
3747
3748static void hci_sched_le(struct hci_dev *hdev)
3749{
3750 struct hci_chan *chan;
3751 struct sk_buff *skb;
3752 int quote, cnt, tmp;
3753
3754 BT_DBG("%s", hdev->name);
3755
3756 if (!hci_conn_num(hdev, LE_LINK))
3757 return;
3758
3759 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3760
3761 __check_timeout(hdev, cnt, LE_LINK);
3762
3763 tmp = cnt;
3764 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, quote: &quote))) {
3765 u32 priority = (skb_peek(list_: &chan->data_q))->priority;
3766 while (quote-- && (skb = skb_peek(list_: &chan->data_q))) {
3767 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3768 skb->len, skb->priority);
3769
3770 /* Stop if priority has changed */
3771 if (skb->priority < priority)
3772 break;
3773
3774 skb = skb_dequeue(list: &chan->data_q);
3775
3776 hci_send_frame(hdev, skb);
3777 hdev->le_last_tx = jiffies;
3778
3779 cnt--;
3780 chan->sent++;
3781 chan->conn->sent++;
3782
3783 /* Send pending SCO packets right away */
3784 hci_sched_sco(hdev);
3785 hci_sched_esco(hdev);
3786 }
3787 }
3788
3789 if (hdev->le_pkts)
3790 hdev->le_cnt = cnt;
3791 else
3792 hdev->acl_cnt = cnt;
3793
3794 if (cnt != tmp)
3795 hci_prio_recalculate(hdev, LE_LINK);
3796}
3797
3798/* Schedule CIS */
3799static void hci_sched_iso(struct hci_dev *hdev)
3800{
3801 struct hci_conn *conn;
3802 struct sk_buff *skb;
3803 int quote, *cnt;
3804
3805 BT_DBG("%s", hdev->name);
3806
3807 if (!hci_conn_num(hdev, ISO_LINK))
3808 return;
3809
3810 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3811 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3812 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, quote: &quote))) {
3813 while (quote-- && (skb = skb_dequeue(list: &conn->data_q))) {
3814 BT_DBG("skb %p len %d", skb, skb->len);
3815 hci_send_frame(hdev, skb);
3816
3817 conn->sent++;
3818 if (conn->sent == ~0)
3819 conn->sent = 0;
3820 (*cnt)--;
3821 }
3822 }
3823}
3824
3825static void hci_tx_work(struct work_struct *work)
3826{
3827 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3828 struct sk_buff *skb;
3829
3830 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3831 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3832
3833 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3834 /* Schedule queues and send stuff to HCI driver */
3835 hci_sched_sco(hdev);
3836 hci_sched_esco(hdev);
3837 hci_sched_iso(hdev);
3838 hci_sched_acl(hdev);
3839 hci_sched_le(hdev);
3840 }
3841
3842 /* Send next queued raw (unknown type) packet */
3843 while ((skb = skb_dequeue(list: &hdev->raw_q)))
3844 hci_send_frame(hdev, skb);
3845}
3846
3847/* ----- HCI RX task (incoming data processing) ----- */
3848
3849/* ACL data packet */
3850static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3851{
3852 struct hci_acl_hdr *hdr = (void *) skb->data;
3853 struct hci_conn *conn;
3854 __u16 handle, flags;
3855
3856 skb_pull(skb, HCI_ACL_HDR_SIZE);
3857
3858 handle = __le16_to_cpu(hdr->handle);
3859 flags = hci_flags(handle);
3860 handle = hci_handle(handle);
3861
3862 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3863 handle, flags);
3864
3865 hdev->stat.acl_rx++;
3866
3867 hci_dev_lock(hdev);
3868 conn = hci_conn_hash_lookup_handle(hdev, handle);
3869 hci_dev_unlock(hdev);
3870
3871 if (conn) {
3872 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3873
3874 /* Send to upper protocol */
3875 l2cap_recv_acldata(hcon: conn, skb, flags);
3876 return;
3877 } else {
3878 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3879 handle);
3880 }
3881
3882 kfree_skb(skb);
3883}
3884
3885/* SCO data packet */
3886static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3887{
3888 struct hci_sco_hdr *hdr = (void *) skb->data;
3889 struct hci_conn *conn;
3890 __u16 handle, flags;
3891
3892 skb_pull(skb, HCI_SCO_HDR_SIZE);
3893
3894 handle = __le16_to_cpu(hdr->handle);
3895 flags = hci_flags(handle);
3896 handle = hci_handle(handle);
3897
3898 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3899 handle, flags);
3900
3901 hdev->stat.sco_rx++;
3902
3903 hci_dev_lock(hdev);
3904 conn = hci_conn_hash_lookup_handle(hdev, handle);
3905 hci_dev_unlock(hdev);
3906
3907 if (conn) {
3908 /* Send to upper protocol */
3909 hci_skb_pkt_status(skb) = flags & 0x03;
3910 sco_recv_scodata(hcon: conn, skb);
3911 return;
3912 } else {
3913 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3914 handle);
3915 }
3916
3917 kfree_skb(skb);
3918}
3919
3920static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3921{
3922 struct hci_iso_hdr *hdr;
3923 struct hci_conn *conn;
3924 __u16 handle, flags;
3925
3926 hdr = skb_pull_data(skb, len: sizeof(*hdr));
3927 if (!hdr) {
3928 bt_dev_err(hdev, "ISO packet too small");
3929 goto drop;
3930 }
3931
3932 handle = __le16_to_cpu(hdr->handle);
3933 flags = hci_flags(handle);
3934 handle = hci_handle(handle);
3935
3936 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3937 handle, flags);
3938
3939 hci_dev_lock(hdev);
3940 conn = hci_conn_hash_lookup_handle(hdev, handle);
3941 hci_dev_unlock(hdev);
3942
3943 if (!conn) {
3944 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3945 handle);
3946 goto drop;
3947 }
3948
3949 /* Send to upper protocol */
3950 iso_recv(hcon: conn, skb, flags);
3951 return;
3952
3953drop:
3954 kfree_skb(skb);
3955}
3956
3957static bool hci_req_is_complete(struct hci_dev *hdev)
3958{
3959 struct sk_buff *skb;
3960
3961 skb = skb_peek(list_: &hdev->cmd_q);
3962 if (!skb)
3963 return true;
3964
3965 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3966}
3967
3968static void hci_resend_last(struct hci_dev *hdev)
3969{
3970 struct hci_command_hdr *sent;
3971 struct sk_buff *skb;
3972 u16 opcode;
3973
3974 if (!hdev->sent_cmd)
3975 return;
3976
3977 sent = (void *) hdev->sent_cmd->data;
3978 opcode = __le16_to_cpu(sent->opcode);
3979 if (opcode == HCI_OP_RESET)
3980 return;
3981
3982 skb = skb_clone(skb: hdev->sent_cmd, GFP_KERNEL);
3983 if (!skb)
3984 return;
3985
3986 skb_queue_head(list: &hdev->cmd_q, newsk: skb);
3987 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
3988}
3989
3990void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3991 hci_req_complete_t *req_complete,
3992 hci_req_complete_skb_t *req_complete_skb)
3993{
3994 struct sk_buff *skb;
3995 unsigned long flags;
3996
3997 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3998
3999 /* If the completed command doesn't match the last one that was
4000 * sent we need to do special handling of it.
4001 */
4002 if (!hci_sent_cmd_data(hdev, opcode)) {
4003 /* Some CSR based controllers generate a spontaneous
4004 * reset complete event during init and any pending
4005 * command will never be completed. In such a case we
4006 * need to resend whatever was the last sent
4007 * command.
4008 */
4009 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4010 hci_resend_last(hdev);
4011
4012 return;
4013 }
4014
4015 /* If we reach this point this event matches the last command sent */
4016 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4017
4018 /* If the command succeeded and there's still more commands in
4019 * this request the request is not yet complete.
4020 */
4021 if (!status && !hci_req_is_complete(hdev))
4022 return;
4023
4024 /* If this was the last command in a request the complete
4025 * callback would be found in hdev->sent_cmd instead of the
4026 * command queue (hdev->cmd_q).
4027 */
4028 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4029 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4030 return;
4031 }
4032
4033 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4034 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4035 return;
4036 }
4037
4038 /* Remove all pending commands belonging to this request */
4039 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4040 while ((skb = __skb_dequeue(list: &hdev->cmd_q))) {
4041 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4042 __skb_queue_head(list: &hdev->cmd_q, newsk: skb);
4043 break;
4044 }
4045
4046 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4047 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4048 else
4049 *req_complete = bt_cb(skb)->hci.req_complete;
4050 dev_kfree_skb_irq(skb);
4051 }
4052 spin_unlock_irqrestore(lock: &hdev->cmd_q.lock, flags);
4053}
4054
4055static void hci_rx_work(struct work_struct *work)
4056{
4057 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4058 struct sk_buff *skb;
4059
4060 BT_DBG("%s", hdev->name);
4061
4062 /* The kcov_remote functions used for collecting packet parsing
4063 * coverage information from this background thread and associate
4064 * the coverage with the syscall's thread which originally injected
4065 * the packet. This helps fuzzing the kernel.
4066 */
4067 for (; (skb = skb_dequeue(list: &hdev->rx_q)); kcov_remote_stop()) {
4068 kcov_remote_start_common(id: skb_get_kcov_handle(skb));
4069
4070 /* Send copy to monitor */
4071 hci_send_to_monitor(hdev, skb);
4072
4073 if (atomic_read(v: &hdev->promisc)) {
4074 /* Send copy to the sockets */
4075 hci_send_to_sock(hdev, skb);
4076 }
4077
4078 /* If the device has been opened in HCI_USER_CHANNEL,
4079 * the userspace has exclusive access to device.
4080 * When device is HCI_INIT, we still need to process
4081 * the data packets to the driver in order
4082 * to complete its setup().
4083 */
4084 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4085 !test_bit(HCI_INIT, &hdev->flags)) {
4086 kfree_skb(skb);
4087 continue;
4088 }
4089
4090 if (test_bit(HCI_INIT, &hdev->flags)) {
4091 /* Don't process data packets in this states. */
4092 switch (hci_skb_pkt_type(skb)) {
4093 case HCI_ACLDATA_PKT:
4094 case HCI_SCODATA_PKT:
4095 case HCI_ISODATA_PKT:
4096 kfree_skb(skb);
4097 continue;
4098 }
4099 }
4100
4101 /* Process frame */
4102 switch (hci_skb_pkt_type(skb)) {
4103 case HCI_EVENT_PKT:
4104 BT_DBG("%s Event packet", hdev->name);
4105 hci_event_packet(hdev, skb);
4106 break;
4107
4108 case HCI_ACLDATA_PKT:
4109 BT_DBG("%s ACL data packet", hdev->name);
4110 hci_acldata_packet(hdev, skb);
4111 break;
4112
4113 case HCI_SCODATA_PKT:
4114 BT_DBG("%s SCO data packet", hdev->name);
4115 hci_scodata_packet(hdev, skb);
4116 break;
4117
4118 case HCI_ISODATA_PKT:
4119 BT_DBG("%s ISO data packet", hdev->name);
4120 hci_isodata_packet(hdev, skb);
4121 break;
4122
4123 default:
4124 kfree_skb(skb);
4125 break;
4126 }
4127 }
4128}
4129
4130static void hci_cmd_work(struct work_struct *work)
4131{
4132 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4133 struct sk_buff *skb;
4134
4135 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4136 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4137
4138 /* Send queued commands */
4139 if (atomic_read(v: &hdev->cmd_cnt)) {
4140 skb = skb_dequeue(list: &hdev->cmd_q);
4141 if (!skb)
4142 return;
4143
4144 kfree_skb(skb: hdev->sent_cmd);
4145
4146 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4147 if (hdev->sent_cmd) {
4148 int res;
4149 if (hci_req_status_pend(hdev))
4150 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4151 atomic_dec(v: &hdev->cmd_cnt);
4152
4153 res = hci_send_frame(hdev, skb);
4154 if (res < 0)
4155 __hci_cmd_sync_cancel(hdev, err: -res);
4156
4157 rcu_read_lock();
4158 if (test_bit(HCI_RESET, &hdev->flags) ||
4159 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4160 cancel_delayed_work(dwork: &hdev->cmd_timer);
4161 else
4162 queue_delayed_work(wq: hdev->workqueue, dwork: &hdev->cmd_timer,
4163 HCI_CMD_TIMEOUT);
4164 rcu_read_unlock();
4165 } else {
4166 skb_queue_head(list: &hdev->cmd_q, newsk: skb);
4167 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
4168 }
4169 }
4170}
4171

source code of linux/net/bluetooth/hci_core.c