1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
28#include <linux/export.h>
29#include <linux/rfkill.h>
30#include <linux/debugfs.h>
31#include <linux/crypto.h>
32#include <linux/kcov.h>
33#include <linux/property.h>
34#include <linux/suspend.h>
35#include <linux/wait.h>
36#include <linux/unaligned.h>
37
38#include <net/bluetooth/bluetooth.h>
39#include <net/bluetooth/hci_core.h>
40#include <net/bluetooth/l2cap.h>
41#include <net/bluetooth/mgmt.h>
42
43#include "hci_debugfs.h"
44#include "smp.h"
45#include "leds.h"
46#include "msft.h"
47#include "aosp.h"
48#include "hci_codec.h"
49
50static void hci_rx_work(struct work_struct *work);
51static void hci_cmd_work(struct work_struct *work);
52static void hci_tx_work(struct work_struct *work);
53
54/* HCI device list */
55LIST_HEAD(hci_dev_list);
56DEFINE_RWLOCK(hci_dev_list_lock);
57
58/* HCI callback list */
59LIST_HEAD(hci_cb_list);
60DEFINE_MUTEX(hci_cb_list_lock);
61
62/* HCI ID Numbering */
63static DEFINE_IDA(hci_index_ida);
64
65/* Get HCI device by index.
66 * Device is held on return. */
67struct hci_dev *hci_dev_get(int index)
68{
69 struct hci_dev *hdev = NULL, *d;
70
71 BT_DBG("%d", index);
72
73 if (index < 0)
74 return NULL;
75
76 read_lock(&hci_dev_list_lock);
77 list_for_each_entry(d, &hci_dev_list, list) {
78 if (d->id == index) {
79 hdev = hci_dev_hold(d);
80 break;
81 }
82 }
83 read_unlock(&hci_dev_list_lock);
84 return hdev;
85}
86
87/* ---- Inquiry support ---- */
88
89bool hci_discovery_active(struct hci_dev *hdev)
90{
91 struct discovery_state *discov = &hdev->discovery;
92
93 switch (discov->state) {
94 case DISCOVERY_FINDING:
95 case DISCOVERY_RESOLVING:
96 return true;
97
98 default:
99 return false;
100 }
101}
102
103void hci_discovery_set_state(struct hci_dev *hdev, int state)
104{
105 int old_state = hdev->discovery.state;
106
107 if (old_state == state)
108 return;
109
110 hdev->discovery.state = state;
111
112 switch (state) {
113 case DISCOVERY_STOPPED:
114 hci_update_passive_scan(hdev);
115
116 if (old_state != DISCOVERY_STARTING)
117 mgmt_discovering(hdev, discovering: 0);
118 break;
119 case DISCOVERY_STARTING:
120 break;
121 case DISCOVERY_FINDING:
122 mgmt_discovering(hdev, discovering: 1);
123 break;
124 case DISCOVERY_RESOLVING:
125 break;
126 case DISCOVERY_STOPPING:
127 break;
128 }
129
130 bt_dev_dbg(hdev, "state %u -> %u", old_state, state);
131}
132
133void hci_inquiry_cache_flush(struct hci_dev *hdev)
134{
135 struct discovery_state *cache = &hdev->discovery;
136 struct inquiry_entry *p, *n;
137
138 list_for_each_entry_safe(p, n, &cache->all, all) {
139 list_del(entry: &p->all);
140 kfree(objp: p);
141 }
142
143 INIT_LIST_HEAD(list: &cache->unknown);
144 INIT_LIST_HEAD(list: &cache->resolve);
145}
146
147struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
148 bdaddr_t *bdaddr)
149{
150 struct discovery_state *cache = &hdev->discovery;
151 struct inquiry_entry *e;
152
153 BT_DBG("cache %p, %pMR", cache, bdaddr);
154
155 list_for_each_entry(e, &cache->all, all) {
156 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
157 return e;
158 }
159
160 return NULL;
161}
162
163struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
164 bdaddr_t *bdaddr)
165{
166 struct discovery_state *cache = &hdev->discovery;
167 struct inquiry_entry *e;
168
169 BT_DBG("cache %p, %pMR", cache, bdaddr);
170
171 list_for_each_entry(e, &cache->unknown, list) {
172 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
173 return e;
174 }
175
176 return NULL;
177}
178
179struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
180 bdaddr_t *bdaddr,
181 int state)
182{
183 struct discovery_state *cache = &hdev->discovery;
184 struct inquiry_entry *e;
185
186 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
187
188 list_for_each_entry(e, &cache->resolve, list) {
189 if (!bacmp(ba1: bdaddr, BDADDR_ANY) && e->name_state == state)
190 return e;
191 if (!bacmp(ba1: &e->data.bdaddr, ba2: bdaddr))
192 return e;
193 }
194
195 return NULL;
196}
197
198void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
199 struct inquiry_entry *ie)
200{
201 struct discovery_state *cache = &hdev->discovery;
202 struct list_head *pos = &cache->resolve;
203 struct inquiry_entry *p;
204
205 list_del(entry: &ie->list);
206
207 list_for_each_entry(p, &cache->resolve, list) {
208 if (p->name_state != NAME_PENDING &&
209 abs(p->data.rssi) >= abs(ie->data.rssi))
210 break;
211 pos = &p->list;
212 }
213
214 list_add(new: &ie->list, head: pos);
215}
216
217u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
218 bool name_known)
219{
220 struct discovery_state *cache = &hdev->discovery;
221 struct inquiry_entry *ie;
222 u32 flags = 0;
223
224 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
225
226 hci_remove_remote_oob_data(hdev, bdaddr: &data->bdaddr, BDADDR_BREDR);
227
228 if (!data->ssp_mode)
229 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
230
231 ie = hci_inquiry_cache_lookup(hdev, bdaddr: &data->bdaddr);
232 if (ie) {
233 if (!ie->data.ssp_mode)
234 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
235
236 if (ie->name_state == NAME_NEEDED &&
237 data->rssi != ie->data.rssi) {
238 ie->data.rssi = data->rssi;
239 hci_inquiry_cache_update_resolve(hdev, ie);
240 }
241
242 goto update;
243 }
244
245 /* Entry not in the cache. Add new one. */
246 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
247 if (!ie) {
248 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
249 goto done;
250 }
251
252 list_add(new: &ie->all, head: &cache->all);
253
254 if (name_known) {
255 ie->name_state = NAME_KNOWN;
256 } else {
257 ie->name_state = NAME_NOT_KNOWN;
258 list_add(new: &ie->list, head: &cache->unknown);
259 }
260
261update:
262 if (name_known && ie->name_state != NAME_KNOWN &&
263 ie->name_state != NAME_PENDING) {
264 ie->name_state = NAME_KNOWN;
265 list_del(entry: &ie->list);
266 }
267
268 memcpy(&ie->data, data, sizeof(*data));
269 ie->timestamp = jiffies;
270 cache->timestamp = jiffies;
271
272 if (ie->name_state == NAME_NOT_KNOWN)
273 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
274
275done:
276 return flags;
277}
278
279static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
280{
281 struct discovery_state *cache = &hdev->discovery;
282 struct inquiry_info *info = (struct inquiry_info *) buf;
283 struct inquiry_entry *e;
284 int copied = 0;
285
286 list_for_each_entry(e, &cache->all, all) {
287 struct inquiry_data *data = &e->data;
288
289 if (copied >= num)
290 break;
291
292 bacpy(dst: &info->bdaddr, src: &data->bdaddr);
293 info->pscan_rep_mode = data->pscan_rep_mode;
294 info->pscan_period_mode = data->pscan_period_mode;
295 info->pscan_mode = data->pscan_mode;
296 memcpy(info->dev_class, data->dev_class, 3);
297 info->clock_offset = data->clock_offset;
298
299 info++;
300 copied++;
301 }
302
303 BT_DBG("cache %p, copied %d", cache, copied);
304 return copied;
305}
306
307int hci_inquiry(void __user *arg)
308{
309 __u8 __user *ptr = arg;
310 struct hci_inquiry_req ir;
311 struct hci_dev *hdev;
312 int err = 0, do_inquiry = 0, max_rsp;
313 __u8 *buf;
314
315 if (copy_from_user(to: &ir, from: ptr, n: sizeof(ir)))
316 return -EFAULT;
317
318 hdev = hci_dev_get(index: ir.dev_id);
319 if (!hdev)
320 return -ENODEV;
321
322 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
323 err = -EBUSY;
324 goto done;
325 }
326
327 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
328 err = -EOPNOTSUPP;
329 goto done;
330 }
331
332 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
333 err = -EOPNOTSUPP;
334 goto done;
335 }
336
337 /* Restrict maximum inquiry length to 60 seconds */
338 if (ir.length > 60) {
339 err = -EINVAL;
340 goto done;
341 }
342
343 hci_dev_lock(hdev);
344 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
345 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
346 hci_inquiry_cache_flush(hdev);
347 do_inquiry = 1;
348 }
349 hci_dev_unlock(hdev);
350
351 if (do_inquiry) {
352 hci_req_sync_lock(hdev);
353 err = hci_inquiry_sync(hdev, length: ir.length, num_rsp: ir.num_rsp);
354 hci_req_sync_unlock(hdev);
355
356 if (err < 0)
357 goto done;
358
359 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
360 * cleared). If it is interrupted by a signal, return -EINTR.
361 */
362 if (wait_on_bit(word: &hdev->flags, bit: HCI_INQUIRY,
363 TASK_INTERRUPTIBLE)) {
364 err = -EINTR;
365 goto done;
366 }
367 }
368
369 /* for unlimited number of responses we will use buffer with
370 * 255 entries
371 */
372 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
373
374 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
375 * copy it to the user space.
376 */
377 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
378 if (!buf) {
379 err = -ENOMEM;
380 goto done;
381 }
382
383 hci_dev_lock(hdev);
384 ir.num_rsp = inquiry_cache_dump(hdev, num: max_rsp, buf);
385 hci_dev_unlock(hdev);
386
387 BT_DBG("num_rsp %d", ir.num_rsp);
388
389 if (!copy_to_user(to: ptr, from: &ir, n: sizeof(ir))) {
390 ptr += sizeof(ir);
391 if (copy_to_user(to: ptr, from: buf, n: sizeof(struct inquiry_info) *
392 ir.num_rsp))
393 err = -EFAULT;
394 } else
395 err = -EFAULT;
396
397 kfree(objp: buf);
398
399done:
400 hci_dev_put(d: hdev);
401 return err;
402}
403
404static int hci_dev_do_open(struct hci_dev *hdev)
405{
406 int ret = 0;
407
408 BT_DBG("%s %p", hdev->name, hdev);
409
410 hci_req_sync_lock(hdev);
411
412 ret = hci_dev_open_sync(hdev);
413
414 hci_req_sync_unlock(hdev);
415 return ret;
416}
417
418/* ---- HCI ioctl helpers ---- */
419
420int hci_dev_open(__u16 dev)
421{
422 struct hci_dev *hdev;
423 int err;
424
425 hdev = hci_dev_get(index: dev);
426 if (!hdev)
427 return -ENODEV;
428
429 /* Devices that are marked as unconfigured can only be powered
430 * up as user channel. Trying to bring them up as normal devices
431 * will result into a failure. Only user channel operation is
432 * possible.
433 *
434 * When this function is called for a user channel, the flag
435 * HCI_USER_CHANNEL will be set first before attempting to
436 * open the device.
437 */
438 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
439 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
440 err = -EOPNOTSUPP;
441 goto done;
442 }
443
444 /* We need to ensure that no other power on/off work is pending
445 * before proceeding to call hci_dev_do_open. This is
446 * particularly important if the setup procedure has not yet
447 * completed.
448 */
449 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
450 cancel_delayed_work(dwork: &hdev->power_off);
451
452 /* After this call it is guaranteed that the setup procedure
453 * has finished. This means that error conditions like RFKILL
454 * or no valid public or static random address apply.
455 */
456 flush_workqueue(hdev->req_workqueue);
457
458 /* For controllers not using the management interface and that
459 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
460 * so that pairing works for them. Once the management interface
461 * is in use this bit will be cleared again and userspace has
462 * to explicitly enable it.
463 */
464 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
465 !hci_dev_test_flag(hdev, HCI_MGMT))
466 hci_dev_set_flag(hdev, HCI_BONDABLE);
467
468 err = hci_dev_do_open(hdev);
469
470done:
471 hci_dev_put(d: hdev);
472 return err;
473}
474
475int hci_dev_do_close(struct hci_dev *hdev)
476{
477 int err;
478
479 BT_DBG("%s %p", hdev->name, hdev);
480
481 hci_req_sync_lock(hdev);
482
483 err = hci_dev_close_sync(hdev);
484
485 hci_req_sync_unlock(hdev);
486
487 return err;
488}
489
490int hci_dev_close(__u16 dev)
491{
492 struct hci_dev *hdev;
493 int err;
494
495 hdev = hci_dev_get(index: dev);
496 if (!hdev)
497 return -ENODEV;
498
499 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
500 err = -EBUSY;
501 goto done;
502 }
503
504 cancel_work_sync(work: &hdev->power_on);
505 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
506 cancel_delayed_work(dwork: &hdev->power_off);
507
508 err = hci_dev_do_close(hdev);
509
510done:
511 hci_dev_put(d: hdev);
512 return err;
513}
514
515static int hci_dev_do_reset(struct hci_dev *hdev)
516{
517 int ret;
518
519 BT_DBG("%s %p", hdev->name, hdev);
520
521 hci_req_sync_lock(hdev);
522
523 /* Drop queues */
524 skb_queue_purge(list: &hdev->rx_q);
525 skb_queue_purge(list: &hdev->cmd_q);
526
527 /* Cancel these to avoid queueing non-chained pending work */
528 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
529 /* Wait for
530 *
531 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
532 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
533 *
534 * inside RCU section to see the flag or complete scheduling.
535 */
536 synchronize_rcu();
537 /* Explicitly cancel works in case scheduled after setting the flag. */
538 cancel_delayed_work(dwork: &hdev->cmd_timer);
539 cancel_delayed_work(dwork: &hdev->ncmd_timer);
540
541 /* Avoid potential lockdep warnings from the *_flush() calls by
542 * ensuring the workqueue is empty up front.
543 */
544 drain_workqueue(wq: hdev->workqueue);
545
546 hci_dev_lock(hdev);
547 hci_inquiry_cache_flush(hdev);
548 hci_conn_hash_flush(hdev);
549 hci_dev_unlock(hdev);
550
551 if (hdev->flush)
552 hdev->flush(hdev);
553
554 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
555
556 atomic_set(v: &hdev->cmd_cnt, i: 1);
557 hdev->acl_cnt = 0;
558 hdev->sco_cnt = 0;
559 hdev->le_cnt = 0;
560 hdev->iso_cnt = 0;
561
562 ret = hci_reset_sync(hdev);
563
564 hci_req_sync_unlock(hdev);
565 return ret;
566}
567
568int hci_dev_reset(__u16 dev)
569{
570 struct hci_dev *hdev;
571 int err;
572
573 hdev = hci_dev_get(index: dev);
574 if (!hdev)
575 return -ENODEV;
576
577 if (!test_bit(HCI_UP, &hdev->flags)) {
578 err = -ENETDOWN;
579 goto done;
580 }
581
582 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
583 err = -EBUSY;
584 goto done;
585 }
586
587 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
588 err = -EOPNOTSUPP;
589 goto done;
590 }
591
592 err = hci_dev_do_reset(hdev);
593
594done:
595 hci_dev_put(d: hdev);
596 return err;
597}
598
599int hci_dev_reset_stat(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
604 hdev = hci_dev_get(index: dev);
605 if (!hdev)
606 return -ENODEV;
607
608 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
609 ret = -EBUSY;
610 goto done;
611 }
612
613 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
614 ret = -EOPNOTSUPP;
615 goto done;
616 }
617
618 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
619
620done:
621 hci_dev_put(d: hdev);
622 return ret;
623}
624
625static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
626{
627 bool conn_changed, discov_changed;
628
629 BT_DBG("%s scan 0x%02x", hdev->name, scan);
630
631 if ((scan & SCAN_PAGE))
632 conn_changed = !hci_dev_test_and_set_flag(hdev,
633 HCI_CONNECTABLE);
634 else
635 conn_changed = hci_dev_test_and_clear_flag(hdev,
636 HCI_CONNECTABLE);
637
638 if ((scan & SCAN_INQUIRY)) {
639 discov_changed = !hci_dev_test_and_set_flag(hdev,
640 HCI_DISCOVERABLE);
641 } else {
642 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
643 discov_changed = hci_dev_test_and_clear_flag(hdev,
644 HCI_DISCOVERABLE);
645 }
646
647 if (!hci_dev_test_flag(hdev, HCI_MGMT))
648 return;
649
650 if (conn_changed || discov_changed) {
651 /* In case this was disabled through mgmt */
652 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
653
654 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
655 hci_update_adv_data(hdev, instance: hdev->cur_adv_instance);
656
657 mgmt_new_settings(hdev);
658 }
659}
660
661int hci_dev_cmd(unsigned int cmd, void __user *arg)
662{
663 struct hci_dev *hdev;
664 struct hci_dev_req dr;
665 __le16 policy;
666 int err = 0;
667
668 if (copy_from_user(to: &dr, from: arg, n: sizeof(dr)))
669 return -EFAULT;
670
671 hdev = hci_dev_get(index: dr.dev_id);
672 if (!hdev)
673 return -ENODEV;
674
675 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
676 err = -EBUSY;
677 goto done;
678 }
679
680 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
681 err = -EOPNOTSUPP;
682 goto done;
683 }
684
685 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
686 err = -EOPNOTSUPP;
687 goto done;
688 }
689
690 switch (cmd) {
691 case HCISETAUTH:
692 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
693 plen: 1, param: &dr.dev_opt, HCI_CMD_TIMEOUT);
694 break;
695
696 case HCISETENCRYPT:
697 if (!lmp_encrypt_capable(hdev)) {
698 err = -EOPNOTSUPP;
699 break;
700 }
701
702 if (!test_bit(HCI_AUTH, &hdev->flags)) {
703 /* Auth must be enabled first */
704 err = hci_cmd_sync_status(hdev,
705 HCI_OP_WRITE_AUTH_ENABLE,
706 plen: 1, param: &dr.dev_opt,
707 HCI_CMD_TIMEOUT);
708 if (err)
709 break;
710 }
711
712 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE,
713 plen: 1, param: &dr.dev_opt, HCI_CMD_TIMEOUT);
714 break;
715
716 case HCISETSCAN:
717 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
718 plen: 1, param: &dr.dev_opt, HCI_CMD_TIMEOUT);
719
720 /* Ensure that the connectable and discoverable states
721 * get correctly modified as this was a non-mgmt change.
722 */
723 if (!err)
724 hci_update_passive_scan_state(hdev, scan: dr.dev_opt);
725 break;
726
727 case HCISETLINKPOL:
728 policy = cpu_to_le16(dr.dev_opt);
729
730 err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
731 plen: 2, param: &policy, HCI_CMD_TIMEOUT);
732 break;
733
734 case HCISETLINKMODE:
735 hdev->link_mode = ((__u16) dr.dev_opt) &
736 (HCI_LM_MASTER | HCI_LM_ACCEPT);
737 break;
738
739 case HCISETPTYPE:
740 if (hdev->pkt_type == (__u16) dr.dev_opt)
741 break;
742
743 hdev->pkt_type = (__u16) dr.dev_opt;
744 mgmt_phy_configuration_changed(hdev, NULL);
745 break;
746
747 case HCISETACLMTU:
748 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
749 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
750 break;
751
752 case HCISETSCOMTU:
753 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
754 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
755 break;
756
757 default:
758 err = -EINVAL;
759 break;
760 }
761
762done:
763 hci_dev_put(d: hdev);
764 return err;
765}
766
767int hci_get_dev_list(void __user *arg)
768{
769 struct hci_dev *hdev;
770 struct hci_dev_list_req *dl;
771 struct hci_dev_req *dr;
772 int n = 0, err;
773 __u16 dev_num;
774
775 if (get_user(dev_num, (__u16 __user *) arg))
776 return -EFAULT;
777
778 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
779 return -EINVAL;
780
781 dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL);
782 if (!dl)
783 return -ENOMEM;
784
785 dl->dev_num = dev_num;
786 dr = dl->dev_req;
787
788 read_lock(&hci_dev_list_lock);
789 list_for_each_entry(hdev, &hci_dev_list, list) {
790 unsigned long flags = hdev->flags;
791
792 /* When the auto-off is configured it means the transport
793 * is running, but in that case still indicate that the
794 * device is actually down.
795 */
796 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
797 flags &= ~BIT(HCI_UP);
798
799 dr[n].dev_id = hdev->id;
800 dr[n].dev_opt = flags;
801
802 if (++n >= dev_num)
803 break;
804 }
805 read_unlock(&hci_dev_list_lock);
806
807 dl->dev_num = n;
808 err = copy_to_user(to: arg, from: dl, struct_size(dl, dev_req, n));
809 kfree(objp: dl);
810
811 return err ? -EFAULT : 0;
812}
813
814int hci_get_dev_info(void __user *arg)
815{
816 struct hci_dev *hdev;
817 struct hci_dev_info di;
818 unsigned long flags;
819 int err = 0;
820
821 if (copy_from_user(to: &di, from: arg, n: sizeof(di)))
822 return -EFAULT;
823
824 hdev = hci_dev_get(index: di.dev_id);
825 if (!hdev)
826 return -ENODEV;
827
828 /* When the auto-off is configured it means the transport
829 * is running, but in that case still indicate that the
830 * device is actually down.
831 */
832 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
833 flags = hdev->flags & ~BIT(HCI_UP);
834 else
835 flags = hdev->flags;
836
837 strscpy(di.name, hdev->name, sizeof(di.name));
838 di.bdaddr = hdev->bdaddr;
839 di.type = (hdev->bus & 0x0f);
840 di.flags = flags;
841 di.pkt_type = hdev->pkt_type;
842 if (lmp_bredr_capable(hdev)) {
843 di.acl_mtu = hdev->acl_mtu;
844 di.acl_pkts = hdev->acl_pkts;
845 di.sco_mtu = hdev->sco_mtu;
846 di.sco_pkts = hdev->sco_pkts;
847 } else {
848 di.acl_mtu = hdev->le_mtu;
849 di.acl_pkts = hdev->le_pkts;
850 di.sco_mtu = 0;
851 di.sco_pkts = 0;
852 }
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(to: arg, from: &di, n: sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(d: hdev);
863
864 return err;
865}
866
867/* ---- Interface to HCI drivers ---- */
868
869static int hci_dev_do_poweroff(struct hci_dev *hdev)
870{
871 int err;
872
873 BT_DBG("%s %p", hdev->name, hdev);
874
875 hci_req_sync_lock(hdev);
876
877 err = hci_set_powered_sync(hdev, val: false);
878
879 hci_req_sync_unlock(hdev);
880
881 return err;
882}
883
884static int hci_rfkill_set_block(void *data, bool blocked)
885{
886 struct hci_dev *hdev = data;
887 int err;
888
889 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890
891 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
892 return -EBUSY;
893
894 if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
895 return 0;
896
897 if (blocked) {
898 hci_dev_set_flag(hdev, HCI_RFKILLED);
899
900 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
901 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
902 err = hci_dev_do_poweroff(hdev);
903 if (err) {
904 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
905 err);
906
907 /* Make sure the device is still closed even if
908 * anything during power off sequence (eg.
909 * disconnecting devices) failed.
910 */
911 hci_dev_do_close(hdev);
912 }
913 }
914 } else {
915 hci_dev_clear_flag(hdev, HCI_RFKILLED);
916 }
917
918 return 0;
919}
920
921static const struct rfkill_ops hci_rfkill_ops = {
922 .set_block = hci_rfkill_set_block,
923};
924
925static void hci_power_on(struct work_struct *work)
926{
927 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
928 int err;
929
930 BT_DBG("%s", hdev->name);
931
932 if (test_bit(HCI_UP, &hdev->flags) &&
933 hci_dev_test_flag(hdev, HCI_MGMT) &&
934 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
935 cancel_delayed_work(dwork: &hdev->power_off);
936 err = hci_powered_update_sync(hdev);
937 mgmt_power_on(hdev, err);
938 return;
939 }
940
941 err = hci_dev_do_open(hdev);
942 if (err < 0) {
943 hci_dev_lock(hdev);
944 mgmt_set_powered_failed(hdev, err);
945 hci_dev_unlock(hdev);
946 return;
947 }
948
949 /* During the HCI setup phase, a few error conditions are
950 * ignored and they need to be checked now. If they are still
951 * valid, it is important to turn the device back off.
952 */
953 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
954 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
955 (!bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) &&
956 !bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
957 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
958 hci_dev_do_close(hdev);
959 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
960 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->power_off,
961 HCI_AUTO_OFF_TIMEOUT);
962 }
963
964 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
965 /* For unconfigured devices, set the HCI_RAW flag
966 * so that userspace can easily identify them.
967 */
968 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
969 set_bit(nr: HCI_RAW, addr: &hdev->flags);
970
971 /* For fully configured devices, this will send
972 * the Index Added event. For unconfigured devices,
973 * it will send Unconfigued Index Added event.
974 *
975 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
976 * and no event will be send.
977 */
978 mgmt_index_added(hdev);
979 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
980 /* When the controller is now configured, then it
981 * is important to clear the HCI_RAW flag.
982 */
983 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
984 clear_bit(nr: HCI_RAW, addr: &hdev->flags);
985
986 /* Powering on the controller with HCI_CONFIG set only
987 * happens with the transition from unconfigured to
988 * configured. This will send the Index Added event.
989 */
990 mgmt_index_added(hdev);
991 }
992}
993
994static void hci_power_off(struct work_struct *work)
995{
996 struct hci_dev *hdev = container_of(work, struct hci_dev,
997 power_off.work);
998
999 BT_DBG("%s", hdev->name);
1000
1001 hci_dev_do_close(hdev);
1002}
1003
1004static void hci_error_reset(struct work_struct *work)
1005{
1006 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1007
1008 hci_dev_hold(d: hdev);
1009 BT_DBG("%s", hdev->name);
1010
1011 if (hdev->hw_error)
1012 hdev->hw_error(hdev, hdev->hw_error_code);
1013 else
1014 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1015
1016 if (!hci_dev_do_close(hdev))
1017 hci_dev_do_open(hdev);
1018
1019 hci_dev_put(d: hdev);
1020}
1021
1022void hci_uuids_clear(struct hci_dev *hdev)
1023{
1024 struct bt_uuid *uuid, *tmp;
1025
1026 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1027 list_del(entry: &uuid->list);
1028 kfree(objp: uuid);
1029 }
1030}
1031
1032void hci_link_keys_clear(struct hci_dev *hdev)
1033{
1034 struct link_key *key, *tmp;
1035
1036 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1037 list_del_rcu(entry: &key->list);
1038 kfree_rcu(key, rcu);
1039 }
1040}
1041
1042void hci_smp_ltks_clear(struct hci_dev *hdev)
1043{
1044 struct smp_ltk *k, *tmp;
1045
1046 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1047 list_del_rcu(entry: &k->list);
1048 kfree_rcu(k, rcu);
1049 }
1050}
1051
1052void hci_smp_irks_clear(struct hci_dev *hdev)
1053{
1054 struct smp_irk *k, *tmp;
1055
1056 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1057 list_del_rcu(entry: &k->list);
1058 kfree_rcu(k, rcu);
1059 }
1060}
1061
1062void hci_blocked_keys_clear(struct hci_dev *hdev)
1063{
1064 struct blocked_key *b, *tmp;
1065
1066 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1067 list_del_rcu(entry: &b->list);
1068 kfree_rcu(b, rcu);
1069 }
1070}
1071
1072bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1073{
1074 bool blocked = false;
1075 struct blocked_key *b;
1076
1077 rcu_read_lock();
1078 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1079 if (b->type == type && !memcmp(p: b->val, q: val, size: sizeof(b->val))) {
1080 blocked = true;
1081 break;
1082 }
1083 }
1084
1085 rcu_read_unlock();
1086 return blocked;
1087}
1088
1089struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1090{
1091 struct link_key *k;
1092
1093 rcu_read_lock();
1094 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1095 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) == 0) {
1096 rcu_read_unlock();
1097
1098 if (hci_is_blocked_key(hdev,
1099 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1100 val: k->val)) {
1101 bt_dev_warn_ratelimited(hdev,
1102 "Link key blocked for %pMR",
1103 &k->bdaddr);
1104 return NULL;
1105 }
1106
1107 return k;
1108 }
1109 }
1110 rcu_read_unlock();
1111
1112 return NULL;
1113}
1114
1115static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1116 u8 key_type, u8 old_key_type)
1117{
1118 /* Legacy key */
1119 if (key_type < 0x03)
1120 return true;
1121
1122 /* Debug keys are insecure so don't store them persistently */
1123 if (key_type == HCI_LK_DEBUG_COMBINATION)
1124 return false;
1125
1126 /* Changed combination key and there's no previous one */
1127 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1128 return false;
1129
1130 /* Security mode 3 case */
1131 if (!conn)
1132 return true;
1133
1134 /* BR/EDR key derived using SC from an LE link */
1135 if (conn->type == LE_LINK)
1136 return true;
1137
1138 /* Neither local nor remote side had no-bonding as requirement */
1139 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1140 return true;
1141
1142 /* Local side had dedicated bonding as requirement */
1143 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1144 return true;
1145
1146 /* Remote side had dedicated bonding as requirement */
1147 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1148 return true;
1149
1150 /* If none of the above criteria match, then don't store the key
1151 * persistently */
1152 return false;
1153}
1154
1155static u8 ltk_role(u8 type)
1156{
1157 if (type == SMP_LTK)
1158 return HCI_ROLE_MASTER;
1159
1160 return HCI_ROLE_SLAVE;
1161}
1162
1163struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1164 u8 addr_type, u8 role)
1165{
1166 struct smp_ltk *k;
1167
1168 rcu_read_lock();
1169 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1170 if (addr_type != k->bdaddr_type || bacmp(ba1: bdaddr, ba2: &k->bdaddr))
1171 continue;
1172
1173 if (smp_ltk_is_sc(key: k) || ltk_role(type: k->type) == role) {
1174 rcu_read_unlock();
1175
1176 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1177 val: k->val)) {
1178 bt_dev_warn_ratelimited(hdev,
1179 "LTK blocked for %pMR",
1180 &k->bdaddr);
1181 return NULL;
1182 }
1183
1184 return k;
1185 }
1186 }
1187 rcu_read_unlock();
1188
1189 return NULL;
1190}
1191
1192struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1193{
1194 struct smp_irk *irk_to_return = NULL;
1195 struct smp_irk *irk;
1196
1197 rcu_read_lock();
1198 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1199 if (!bacmp(ba1: &irk->rpa, ba2: rpa)) {
1200 irk_to_return = irk;
1201 goto done;
1202 }
1203 }
1204
1205 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1206 if (smp_irk_matches(hdev, irk: irk->val, bdaddr: rpa)) {
1207 bacpy(dst: &irk->rpa, src: rpa);
1208 irk_to_return = irk;
1209 goto done;
1210 }
1211 }
1212
1213done:
1214 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1215 val: irk_to_return->val)) {
1216 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1217 &irk_to_return->bdaddr);
1218 irk_to_return = NULL;
1219 }
1220
1221 rcu_read_unlock();
1222
1223 return irk_to_return;
1224}
1225
1226struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 addr_type)
1228{
1229 struct smp_irk *irk_to_return = NULL;
1230 struct smp_irk *irk;
1231
1232 /* Identity Address must be public or static random */
1233 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1234 return NULL;
1235
1236 rcu_read_lock();
1237 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1238 if (addr_type == irk->addr_type &&
1239 bacmp(ba1: bdaddr, ba2: &irk->bdaddr) == 0) {
1240 irk_to_return = irk;
1241 goto done;
1242 }
1243 }
1244
1245done:
1246
1247 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1248 val: irk_to_return->val)) {
1249 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1250 &irk_to_return->bdaddr);
1251 irk_to_return = NULL;
1252 }
1253
1254 rcu_read_unlock();
1255
1256 return irk_to_return;
1257}
1258
1259struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1260 bdaddr_t *bdaddr, u8 *val, u8 type,
1261 u8 pin_len, bool *persistent)
1262{
1263 struct link_key *key, *old_key;
1264 u8 old_key_type;
1265
1266 old_key = hci_find_link_key(hdev, bdaddr);
1267 if (old_key) {
1268 old_key_type = old_key->type;
1269 key = old_key;
1270 } else {
1271 old_key_type = conn ? conn->key_type : 0xff;
1272 key = kzalloc(sizeof(*key), GFP_KERNEL);
1273 if (!key)
1274 return NULL;
1275 list_add_rcu(new: &key->list, head: &hdev->link_keys);
1276 }
1277
1278 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1279
1280 /* Some buggy controller combinations generate a changed
1281 * combination key for legacy pairing even when there's no
1282 * previous key */
1283 if (type == HCI_LK_CHANGED_COMBINATION &&
1284 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1285 type = HCI_LK_COMBINATION;
1286 if (conn)
1287 conn->key_type = type;
1288 }
1289
1290 bacpy(dst: &key->bdaddr, src: bdaddr);
1291 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1292 key->pin_len = pin_len;
1293
1294 if (type == HCI_LK_CHANGED_COMBINATION)
1295 key->type = old_key_type;
1296 else
1297 key->type = type;
1298
1299 if (persistent)
1300 *persistent = hci_persistent_key(hdev, conn, key_type: type,
1301 old_key_type);
1302
1303 return key;
1304}
1305
1306struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1307 u8 addr_type, u8 type, u8 authenticated,
1308 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1309{
1310 struct smp_ltk *key, *old_key;
1311 u8 role = ltk_role(type);
1312
1313 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1314 if (old_key)
1315 key = old_key;
1316 else {
1317 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 if (!key)
1319 return NULL;
1320 list_add_rcu(new: &key->list, head: &hdev->long_term_keys);
1321 }
1322
1323 bacpy(dst: &key->bdaddr, src: bdaddr);
1324 key->bdaddr_type = addr_type;
1325 memcpy(key->val, tk, sizeof(key->val));
1326 key->authenticated = authenticated;
1327 key->ediv = ediv;
1328 key->rand = rand;
1329 key->enc_size = enc_size;
1330 key->type = type;
1331
1332 return key;
1333}
1334
1335struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1336 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1337{
1338 struct smp_irk *irk;
1339
1340 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1341 if (!irk) {
1342 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1343 if (!irk)
1344 return NULL;
1345
1346 bacpy(dst: &irk->bdaddr, src: bdaddr);
1347 irk->addr_type = addr_type;
1348
1349 list_add_rcu(new: &irk->list, head: &hdev->identity_resolving_keys);
1350 }
1351
1352 memcpy(irk->val, val, 16);
1353 bacpy(dst: &irk->rpa, src: rpa);
1354
1355 return irk;
1356}
1357
1358int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359{
1360 struct link_key *key;
1361
1362 key = hci_find_link_key(hdev, bdaddr);
1363 if (!key)
1364 return -ENOENT;
1365
1366 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1367
1368 list_del_rcu(entry: &key->list);
1369 kfree_rcu(key, rcu);
1370
1371 return 0;
1372}
1373
1374int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1375{
1376 struct smp_ltk *k, *tmp;
1377 int removed = 0;
1378
1379 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1380 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1381 continue;
1382
1383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1384
1385 list_del_rcu(entry: &k->list);
1386 kfree_rcu(k, rcu);
1387 removed++;
1388 }
1389
1390 return removed ? 0 : -ENOENT;
1391}
1392
1393void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1394{
1395 struct smp_irk *k, *tmp;
1396
1397 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1398 if (bacmp(ba1: bdaddr, ba2: &k->bdaddr) || k->addr_type != addr_type)
1399 continue;
1400
1401 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1402
1403 list_del_rcu(entry: &k->list);
1404 kfree_rcu(k, rcu);
1405 }
1406}
1407
1408bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1409{
1410 struct smp_ltk *k;
1411 struct smp_irk *irk;
1412 u8 addr_type;
1413
1414 if (type == BDADDR_BREDR) {
1415 if (hci_find_link_key(hdev, bdaddr))
1416 return true;
1417 return false;
1418 }
1419
1420 /* Convert to HCI addr type which struct smp_ltk uses */
1421 if (type == BDADDR_LE_PUBLIC)
1422 addr_type = ADDR_LE_DEV_PUBLIC;
1423 else
1424 addr_type = ADDR_LE_DEV_RANDOM;
1425
1426 irk = hci_get_irk(hdev, bdaddr, addr_type);
1427 if (irk) {
1428 bdaddr = &irk->bdaddr;
1429 addr_type = irk->addr_type;
1430 }
1431
1432 rcu_read_lock();
1433 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1434 if (k->bdaddr_type == addr_type && !bacmp(ba1: bdaddr, ba2: &k->bdaddr)) {
1435 rcu_read_unlock();
1436 return true;
1437 }
1438 }
1439 rcu_read_unlock();
1440
1441 return false;
1442}
1443
1444/* HCI command timer function */
1445static void hci_cmd_timeout(struct work_struct *work)
1446{
1447 struct hci_dev *hdev = container_of(work, struct hci_dev,
1448 cmd_timer.work);
1449
1450 if (hdev->req_skb) {
1451 u16 opcode = hci_skb_opcode(hdev->req_skb);
1452
1453 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1454
1455 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1456 } else {
1457 bt_dev_err(hdev, "command tx timeout");
1458 }
1459
1460 if (hdev->reset)
1461 hdev->reset(hdev);
1462
1463 atomic_set(v: &hdev->cmd_cnt, i: 1);
1464 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
1465}
1466
1467/* HCI ncmd timer function */
1468static void hci_ncmd_timeout(struct work_struct *work)
1469{
1470 struct hci_dev *hdev = container_of(work, struct hci_dev,
1471 ncmd_timer.work);
1472
1473 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1474
1475 /* During HCI_INIT phase no events can be injected if the ncmd timer
1476 * triggers since the procedure has its own timeout handling.
1477 */
1478 if (test_bit(HCI_INIT, &hdev->flags))
1479 return;
1480
1481 /* This is an irrecoverable state, inject hardware error event */
1482 hci_reset_dev(hdev);
1483}
1484
1485struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1486 bdaddr_t *bdaddr, u8 bdaddr_type)
1487{
1488 struct oob_data *data;
1489
1490 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1491 if (bacmp(ba1: bdaddr, ba2: &data->bdaddr) != 0)
1492 continue;
1493 if (data->bdaddr_type != bdaddr_type)
1494 continue;
1495 return data;
1496 }
1497
1498 return NULL;
1499}
1500
1501int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1502 u8 bdaddr_type)
1503{
1504 struct oob_data *data;
1505
1506 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1507 if (!data)
1508 return -ENOENT;
1509
1510 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1511
1512 list_del(entry: &data->list);
1513 kfree(objp: data);
1514
1515 return 0;
1516}
1517
1518void hci_remote_oob_data_clear(struct hci_dev *hdev)
1519{
1520 struct oob_data *data, *n;
1521
1522 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1523 list_del(entry: &data->list);
1524 kfree(objp: data);
1525 }
1526}
1527
1528int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1529 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1530 u8 *hash256, u8 *rand256)
1531{
1532 struct oob_data *data;
1533
1534 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1535 if (!data) {
1536 data = kmalloc(sizeof(*data), GFP_KERNEL);
1537 if (!data)
1538 return -ENOMEM;
1539
1540 bacpy(dst: &data->bdaddr, src: bdaddr);
1541 data->bdaddr_type = bdaddr_type;
1542 list_add(new: &data->list, head: &hdev->remote_oob_data);
1543 }
1544
1545 if (hash192 && rand192) {
1546 memcpy(data->hash192, hash192, sizeof(data->hash192));
1547 memcpy(data->rand192, rand192, sizeof(data->rand192));
1548 if (hash256 && rand256)
1549 data->present = 0x03;
1550 } else {
1551 memset(data->hash192, 0, sizeof(data->hash192));
1552 memset(data->rand192, 0, sizeof(data->rand192));
1553 if (hash256 && rand256)
1554 data->present = 0x02;
1555 else
1556 data->present = 0x00;
1557 }
1558
1559 if (hash256 && rand256) {
1560 memcpy(data->hash256, hash256, sizeof(data->hash256));
1561 memcpy(data->rand256, rand256, sizeof(data->rand256));
1562 } else {
1563 memset(data->hash256, 0, sizeof(data->hash256));
1564 memset(data->rand256, 0, sizeof(data->rand256));
1565 if (hash192 && rand192)
1566 data->present = 0x01;
1567 }
1568
1569 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1570
1571 return 0;
1572}
1573
1574/* This function requires the caller holds hdev->lock */
1575struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1576{
1577 struct adv_info *adv_instance;
1578
1579 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1580 if (adv_instance->instance == instance)
1581 return adv_instance;
1582 }
1583
1584 return NULL;
1585}
1586
1587/* This function requires the caller holds hdev->lock */
1588struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1589{
1590 struct adv_info *cur_instance;
1591
1592 cur_instance = hci_find_adv_instance(hdev, instance);
1593 if (!cur_instance)
1594 return NULL;
1595
1596 if (cur_instance == list_last_entry(&hdev->adv_instances,
1597 struct adv_info, list))
1598 return list_first_entry(&hdev->adv_instances,
1599 struct adv_info, list);
1600 else
1601 return list_next_entry(cur_instance, list);
1602}
1603
1604/* This function requires the caller holds hdev->lock */
1605int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1606{
1607 struct adv_info *adv_instance;
1608
1609 adv_instance = hci_find_adv_instance(hdev, instance);
1610 if (!adv_instance)
1611 return -ENOENT;
1612
1613 BT_DBG("%s removing %dMR", hdev->name, instance);
1614
1615 if (hdev->cur_adv_instance == instance) {
1616 if (hdev->adv_instance_timeout) {
1617 cancel_delayed_work(dwork: &hdev->adv_instance_expire);
1618 hdev->adv_instance_timeout = 0;
1619 }
1620 hdev->cur_adv_instance = 0x00;
1621 }
1622
1623 cancel_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb);
1624
1625 list_del(entry: &adv_instance->list);
1626 kfree(objp: adv_instance);
1627
1628 hdev->adv_instance_cnt--;
1629
1630 return 0;
1631}
1632
1633void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1634{
1635 struct adv_info *adv_instance, *n;
1636
1637 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1638 adv_instance->rpa_expired = rpa_expired;
1639}
1640
1641/* This function requires the caller holds hdev->lock */
1642void hci_adv_instances_clear(struct hci_dev *hdev)
1643{
1644 struct adv_info *adv_instance, *n;
1645
1646 if (hdev->adv_instance_timeout) {
1647 disable_delayed_work(dwork: &hdev->adv_instance_expire);
1648 hdev->adv_instance_timeout = 0;
1649 }
1650
1651 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1652 disable_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb);
1653 list_del(entry: &adv_instance->list);
1654 kfree(objp: adv_instance);
1655 }
1656
1657 hdev->adv_instance_cnt = 0;
1658 hdev->cur_adv_instance = 0x00;
1659}
1660
1661static void adv_instance_rpa_expired(struct work_struct *work)
1662{
1663 struct adv_info *adv_instance = container_of(work, struct adv_info,
1664 rpa_expired_cb.work);
1665
1666 BT_DBG("");
1667
1668 adv_instance->rpa_expired = true;
1669}
1670
1671/* This function requires the caller holds hdev->lock */
1672struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1673 u32 flags, u16 adv_data_len, u8 *adv_data,
1674 u16 scan_rsp_len, u8 *scan_rsp_data,
1675 u16 timeout, u16 duration, s8 tx_power,
1676 u32 min_interval, u32 max_interval,
1677 u8 mesh_handle)
1678{
1679 struct adv_info *adv;
1680
1681 adv = hci_find_adv_instance(hdev, instance);
1682 if (adv) {
1683 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1684 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1685 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1686 } else {
1687 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1688 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1689 return ERR_PTR(error: -EOVERFLOW);
1690
1691 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1692 if (!adv)
1693 return ERR_PTR(error: -ENOMEM);
1694
1695 adv->pending = true;
1696 adv->instance = instance;
1697
1698 /* If controller support only one set and the instance is set to
1699 * 1 then there is no option other than using handle 0x00.
1700 */
1701 if (hdev->le_num_of_adv_sets == 1 && instance == 1)
1702 adv->handle = 0x00;
1703 else
1704 adv->handle = instance;
1705
1706 list_add(new: &adv->list, head: &hdev->adv_instances);
1707 hdev->adv_instance_cnt++;
1708 }
1709
1710 adv->flags = flags;
1711 adv->min_interval = min_interval;
1712 adv->max_interval = max_interval;
1713 adv->tx_power = tx_power;
1714 /* Defining a mesh_handle changes the timing units to ms,
1715 * rather than seconds, and ties the instance to the requested
1716 * mesh_tx queue.
1717 */
1718 adv->mesh = mesh_handle;
1719
1720 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1721 scan_rsp_len, scan_rsp_data);
1722
1723 adv->timeout = timeout;
1724 adv->remaining_time = timeout;
1725
1726 if (duration == 0)
1727 adv->duration = hdev->def_multi_adv_rotation_duration;
1728 else
1729 adv->duration = duration;
1730
1731 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1732
1733 BT_DBG("%s for %dMR", hdev->name, instance);
1734
1735 return adv;
1736}
1737
1738/* This function requires the caller holds hdev->lock */
1739struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1740 u32 flags, u8 data_len, u8 *data,
1741 u32 min_interval, u32 max_interval)
1742{
1743 struct adv_info *adv;
1744
1745 adv = hci_add_adv_instance(hdev, instance, flags, adv_data_len: 0, NULL, scan_rsp_len: 0, NULL,
1746 timeout: 0, duration: 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1747 min_interval, max_interval, mesh_handle: 0);
1748 if (IS_ERR(ptr: adv))
1749 return adv;
1750
1751 adv->periodic = true;
1752 adv->per_adv_data_len = data_len;
1753
1754 if (data)
1755 memcpy(adv->per_adv_data, data, data_len);
1756
1757 return adv;
1758}
1759
1760/* This function requires the caller holds hdev->lock */
1761int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1762 u16 adv_data_len, u8 *adv_data,
1763 u16 scan_rsp_len, u8 *scan_rsp_data)
1764{
1765 struct adv_info *adv;
1766
1767 adv = hci_find_adv_instance(hdev, instance);
1768
1769 /* If advertisement doesn't exist, we can't modify its data */
1770 if (!adv)
1771 return -ENOENT;
1772
1773 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1774 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1775 memcpy(adv->adv_data, adv_data, adv_data_len);
1776 adv->adv_data_len = adv_data_len;
1777 adv->adv_data_changed = true;
1778 }
1779
1780 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1781 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1782 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1783 adv->scan_rsp_len = scan_rsp_len;
1784 adv->scan_rsp_changed = true;
1785 }
1786
1787 /* Mark as changed if there are flags which would affect it */
1788 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1789 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1790 adv->scan_rsp_changed = true;
1791
1792 return 0;
1793}
1794
1795/* This function requires the caller holds hdev->lock */
1796u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1797{
1798 u32 flags;
1799 struct adv_info *adv;
1800
1801 if (instance == 0x00) {
1802 /* Instance 0 always manages the "Tx Power" and "Flags"
1803 * fields
1804 */
1805 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1806
1807 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1808 * corresponds to the "connectable" instance flag.
1809 */
1810 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1811 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1812
1813 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1814 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1815 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1816 flags |= MGMT_ADV_FLAG_DISCOV;
1817
1818 return flags;
1819 }
1820
1821 adv = hci_find_adv_instance(hdev, instance);
1822
1823 /* Return 0 when we got an invalid instance identifier. */
1824 if (!adv)
1825 return 0;
1826
1827 return adv->flags;
1828}
1829
1830bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1831{
1832 struct adv_info *adv;
1833
1834 /* Instance 0x00 always set local name */
1835 if (instance == 0x00)
1836 return true;
1837
1838 adv = hci_find_adv_instance(hdev, instance);
1839 if (!adv)
1840 return false;
1841
1842 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1843 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1844 return true;
1845
1846 return adv->scan_rsp_len ? true : false;
1847}
1848
1849/* This function requires the caller holds hdev->lock */
1850void hci_adv_monitors_clear(struct hci_dev *hdev)
1851{
1852 struct adv_monitor *monitor;
1853 int handle;
1854
1855 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1856 hci_free_adv_monitor(hdev, monitor);
1857
1858 idr_destroy(&hdev->adv_monitors_idr);
1859}
1860
1861/* Frees the monitor structure and do some bookkeepings.
1862 * This function requires the caller holds hdev->lock.
1863 */
1864void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1865{
1866 struct adv_pattern *pattern;
1867 struct adv_pattern *tmp;
1868
1869 if (!monitor)
1870 return;
1871
1872 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1873 list_del(entry: &pattern->list);
1874 kfree(objp: pattern);
1875 }
1876
1877 if (monitor->handle)
1878 idr_remove(&hdev->adv_monitors_idr, id: monitor->handle);
1879
1880 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1881 hdev->adv_monitors_cnt--;
1882 mgmt_adv_monitor_removed(hdev, handle: monitor->handle);
1883 }
1884
1885 kfree(objp: monitor);
1886}
1887
1888/* Assigns handle to a monitor, and if offloading is supported and power is on,
1889 * also attempts to forward the request to the controller.
1890 * This function requires the caller holds hci_req_sync_lock.
1891 */
1892int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1893{
1894 int min, max, handle;
1895 int status = 0;
1896
1897 if (!monitor)
1898 return -EINVAL;
1899
1900 hci_dev_lock(hdev);
1901
1902 min = HCI_MIN_ADV_MONITOR_HANDLE;
1903 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1904 handle = idr_alloc(&hdev->adv_monitors_idr, ptr: monitor, start: min, end: max,
1905 GFP_KERNEL);
1906
1907 hci_dev_unlock(hdev);
1908
1909 if (handle < 0)
1910 return handle;
1911
1912 monitor->handle = handle;
1913
1914 if (!hdev_is_powered(hdev))
1915 return status;
1916
1917 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1918 case HCI_ADV_MONITOR_EXT_NONE:
1919 bt_dev_dbg(hdev, "add monitor %d status %d",
1920 monitor->handle, status);
1921 /* Message was not forwarded to controller - not an error */
1922 break;
1923
1924 case HCI_ADV_MONITOR_EXT_MSFT:
1925 status = msft_add_monitor_pattern(hdev, monitor);
1926 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1927 handle, status);
1928 break;
1929 }
1930
1931 return status;
1932}
1933
1934/* Attempts to tell the controller and free the monitor. If somehow the
1935 * controller doesn't have a corresponding handle, remove anyway.
1936 * This function requires the caller holds hci_req_sync_lock.
1937 */
1938static int hci_remove_adv_monitor(struct hci_dev *hdev,
1939 struct adv_monitor *monitor)
1940{
1941 int status = 0;
1942 int handle;
1943
1944 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1945 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1946 bt_dev_dbg(hdev, "remove monitor %d status %d",
1947 monitor->handle, status);
1948 goto free_monitor;
1949
1950 case HCI_ADV_MONITOR_EXT_MSFT:
1951 handle = monitor->handle;
1952 status = msft_remove_monitor(hdev, monitor);
1953 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1954 handle, status);
1955 break;
1956 }
1957
1958 /* In case no matching handle registered, just free the monitor */
1959 if (status == -ENOENT)
1960 goto free_monitor;
1961
1962 return status;
1963
1964free_monitor:
1965 if (status == -ENOENT)
1966 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
1967 monitor->handle);
1968 hci_free_adv_monitor(hdev, monitor);
1969
1970 return status;
1971}
1972
1973/* This function requires the caller holds hci_req_sync_lock */
1974int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
1975{
1976 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, id: handle);
1977
1978 if (!monitor)
1979 return -EINVAL;
1980
1981 return hci_remove_adv_monitor(hdev, monitor);
1982}
1983
1984/* This function requires the caller holds hci_req_sync_lock */
1985int hci_remove_all_adv_monitor(struct hci_dev *hdev)
1986{
1987 struct adv_monitor *monitor;
1988 int idr_next_id = 0;
1989 int status = 0;
1990
1991 while (1) {
1992 monitor = idr_get_next(&hdev->adv_monitors_idr, nextid: &idr_next_id);
1993 if (!monitor)
1994 break;
1995
1996 status = hci_remove_adv_monitor(hdev, monitor);
1997 if (status)
1998 return status;
1999
2000 idr_next_id++;
2001 }
2002
2003 return status;
2004}
2005
2006/* This function requires the caller holds hdev->lock */
2007bool hci_is_adv_monitoring(struct hci_dev *hdev)
2008{
2009 return !idr_is_empty(idr: &hdev->adv_monitors_idr);
2010}
2011
2012int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2013{
2014 if (msft_monitor_supported(hdev))
2015 return HCI_ADV_MONITOR_EXT_MSFT;
2016
2017 return HCI_ADV_MONITOR_EXT_NONE;
2018}
2019
2020struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2021 bdaddr_t *bdaddr, u8 type)
2022{
2023 struct bdaddr_list *b;
2024
2025 list_for_each_entry(b, bdaddr_list, list) {
2026 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2027 return b;
2028 }
2029
2030 return NULL;
2031}
2032
2033struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2034 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2035 u8 type)
2036{
2037 struct bdaddr_list_with_irk *b;
2038
2039 list_for_each_entry(b, bdaddr_list, list) {
2040 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2041 return b;
2042 }
2043
2044 return NULL;
2045}
2046
2047struct bdaddr_list_with_flags *
2048hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2049 bdaddr_t *bdaddr, u8 type)
2050{
2051 struct bdaddr_list_with_flags *b;
2052
2053 list_for_each_entry(b, bdaddr_list, list) {
2054 if (!bacmp(ba1: &b->bdaddr, ba2: bdaddr) && b->bdaddr_type == type)
2055 return b;
2056 }
2057
2058 return NULL;
2059}
2060
2061void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2062{
2063 struct bdaddr_list *b, *n;
2064
2065 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2066 list_del(entry: &b->list);
2067 kfree(objp: b);
2068 }
2069}
2070
2071int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2072{
2073 struct bdaddr_list *entry;
2074
2075 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2076 return -EBADF;
2077
2078 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2079 return -EEXIST;
2080
2081 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2082 if (!entry)
2083 return -ENOMEM;
2084
2085 bacpy(dst: &entry->bdaddr, src: bdaddr);
2086 entry->bdaddr_type = type;
2087
2088 list_add(new: &entry->list, head: list);
2089
2090 return 0;
2091}
2092
2093int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2094 u8 type, u8 *peer_irk, u8 *local_irk)
2095{
2096 struct bdaddr_list_with_irk *entry;
2097
2098 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2099 return -EBADF;
2100
2101 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2102 return -EEXIST;
2103
2104 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2105 if (!entry)
2106 return -ENOMEM;
2107
2108 bacpy(dst: &entry->bdaddr, src: bdaddr);
2109 entry->bdaddr_type = type;
2110
2111 if (peer_irk)
2112 memcpy(entry->peer_irk, peer_irk, 16);
2113
2114 if (local_irk)
2115 memcpy(entry->local_irk, local_irk, 16);
2116
2117 list_add(new: &entry->list, head: list);
2118
2119 return 0;
2120}
2121
2122int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2123 u8 type, u32 flags)
2124{
2125 struct bdaddr_list_with_flags *entry;
2126
2127 if (!bacmp(ba1: bdaddr, BDADDR_ANY))
2128 return -EBADF;
2129
2130 if (hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type))
2131 return -EEXIST;
2132
2133 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2134 if (!entry)
2135 return -ENOMEM;
2136
2137 bacpy(dst: &entry->bdaddr, src: bdaddr);
2138 entry->bdaddr_type = type;
2139 entry->flags = flags;
2140
2141 list_add(new: &entry->list, head: list);
2142
2143 return 0;
2144}
2145
2146int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2147{
2148 struct bdaddr_list *entry;
2149
2150 if (!bacmp(ba1: bdaddr, BDADDR_ANY)) {
2151 hci_bdaddr_list_clear(bdaddr_list: list);
2152 return 0;
2153 }
2154
2155 entry = hci_bdaddr_list_lookup(bdaddr_list: list, bdaddr, type);
2156 if (!entry)
2157 return -ENOENT;
2158
2159 list_del(entry: &entry->list);
2160 kfree(objp: entry);
2161
2162 return 0;
2163}
2164
2165int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2166 u8 type)
2167{
2168 struct bdaddr_list_with_irk *entry;
2169
2170 if (!bacmp(ba1: bdaddr, BDADDR_ANY)) {
2171 hci_bdaddr_list_clear(bdaddr_list: list);
2172 return 0;
2173 }
2174
2175 entry = hci_bdaddr_list_lookup_with_irk(bdaddr_list: list, bdaddr, type);
2176 if (!entry)
2177 return -ENOENT;
2178
2179 list_del(entry: &entry->list);
2180 kfree(objp: entry);
2181
2182 return 0;
2183}
2184
2185/* This function requires the caller holds hdev->lock */
2186struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2187 bdaddr_t *addr, u8 addr_type)
2188{
2189 struct hci_conn_params *params;
2190
2191 list_for_each_entry(params, &hdev->le_conn_params, list) {
2192 if (bacmp(ba1: &params->addr, ba2: addr) == 0 &&
2193 params->addr_type == addr_type) {
2194 return params;
2195 }
2196 }
2197
2198 return NULL;
2199}
2200
2201/* This function requires the caller holds hdev->lock or rcu_read_lock */
2202struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2203 bdaddr_t *addr, u8 addr_type)
2204{
2205 struct hci_conn_params *param;
2206
2207 rcu_read_lock();
2208
2209 list_for_each_entry_rcu(param, list, action) {
2210 if (bacmp(ba1: &param->addr, ba2: addr) == 0 &&
2211 param->addr_type == addr_type) {
2212 rcu_read_unlock();
2213 return param;
2214 }
2215 }
2216
2217 rcu_read_unlock();
2218
2219 return NULL;
2220}
2221
2222/* This function requires the caller holds hdev->lock */
2223void hci_pend_le_list_del_init(struct hci_conn_params *param)
2224{
2225 if (list_empty(head: &param->action))
2226 return;
2227
2228 list_del_rcu(entry: &param->action);
2229 synchronize_rcu();
2230 INIT_LIST_HEAD(list: &param->action);
2231}
2232
2233/* This function requires the caller holds hdev->lock */
2234void hci_pend_le_list_add(struct hci_conn_params *param,
2235 struct list_head *list)
2236{
2237 list_add_rcu(new: &param->action, head: list);
2238}
2239
2240/* This function requires the caller holds hdev->lock */
2241struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2242 bdaddr_t *addr, u8 addr_type)
2243{
2244 struct hci_conn_params *params;
2245
2246 params = hci_conn_params_lookup(hdev, addr, addr_type);
2247 if (params)
2248 return params;
2249
2250 params = kzalloc(sizeof(*params), GFP_KERNEL);
2251 if (!params) {
2252 bt_dev_err(hdev, "out of memory");
2253 return NULL;
2254 }
2255
2256 bacpy(dst: &params->addr, src: addr);
2257 params->addr_type = addr_type;
2258
2259 list_add(new: &params->list, head: &hdev->le_conn_params);
2260 INIT_LIST_HEAD(list: &params->action);
2261
2262 params->conn_min_interval = hdev->le_conn_min_interval;
2263 params->conn_max_interval = hdev->le_conn_max_interval;
2264 params->conn_latency = hdev->le_conn_latency;
2265 params->supervision_timeout = hdev->le_supv_timeout;
2266 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2267
2268 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2269
2270 return params;
2271}
2272
2273void hci_conn_params_free(struct hci_conn_params *params)
2274{
2275 hci_pend_le_list_del_init(param: params);
2276
2277 if (params->conn) {
2278 hci_conn_drop(conn: params->conn);
2279 hci_conn_put(conn: params->conn);
2280 }
2281
2282 list_del(entry: &params->list);
2283 kfree(objp: params);
2284}
2285
2286/* This function requires the caller holds hdev->lock */
2287void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2288{
2289 struct hci_conn_params *params;
2290
2291 params = hci_conn_params_lookup(hdev, addr, addr_type);
2292 if (!params)
2293 return;
2294
2295 hci_conn_params_free(params);
2296
2297 hci_update_passive_scan(hdev);
2298
2299 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2300}
2301
2302/* This function requires the caller holds hdev->lock */
2303void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2304{
2305 struct hci_conn_params *params, *tmp;
2306
2307 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2308 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2309 continue;
2310
2311 /* If trying to establish one time connection to disabled
2312 * device, leave the params, but mark them as just once.
2313 */
2314 if (params->explicit_connect) {
2315 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2316 continue;
2317 }
2318
2319 hci_conn_params_free(params);
2320 }
2321
2322 BT_DBG("All LE disabled connection parameters were removed");
2323}
2324
2325/* This function requires the caller holds hdev->lock */
2326static void hci_conn_params_clear_all(struct hci_dev *hdev)
2327{
2328 struct hci_conn_params *params, *tmp;
2329
2330 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2331 hci_conn_params_free(params);
2332
2333 BT_DBG("All LE connection parameters were removed");
2334}
2335
2336/* Copy the Identity Address of the controller.
2337 *
2338 * If the controller has a public BD_ADDR, then by default use that one.
2339 * If this is a LE only controller without a public address, default to
2340 * the static random address.
2341 *
2342 * For debugging purposes it is possible to force controllers with a
2343 * public address to use the static random address instead.
2344 *
2345 * In case BR/EDR has been disabled on a dual-mode controller and
2346 * userspace has configured a static address, then that address
2347 * becomes the identity address instead of the public BR/EDR address.
2348 */
2349void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2350 u8 *bdaddr_type)
2351{
2352 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2353 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) ||
2354 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2355 bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
2356 bacpy(dst: bdaddr, src: &hdev->static_addr);
2357 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2358 } else {
2359 bacpy(dst: bdaddr, src: &hdev->bdaddr);
2360 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2361 }
2362}
2363
2364static void hci_clear_wake_reason(struct hci_dev *hdev)
2365{
2366 hci_dev_lock(hdev);
2367
2368 hdev->wake_reason = 0;
2369 bacpy(dst: &hdev->wake_addr, BDADDR_ANY);
2370 hdev->wake_addr_type = 0;
2371
2372 hci_dev_unlock(hdev);
2373}
2374
2375static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2376 void *data)
2377{
2378 struct hci_dev *hdev =
2379 container_of(nb, struct hci_dev, suspend_notifier);
2380 int ret = 0;
2381
2382 /* Userspace has full control of this device. Do nothing. */
2383 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2384 return NOTIFY_DONE;
2385
2386 /* To avoid a potential race with hci_unregister_dev. */
2387 hci_dev_hold(d: hdev);
2388
2389 switch (action) {
2390 case PM_HIBERNATION_PREPARE:
2391 case PM_SUSPEND_PREPARE:
2392 ret = hci_suspend_dev(hdev);
2393 break;
2394 case PM_POST_HIBERNATION:
2395 case PM_POST_SUSPEND:
2396 ret = hci_resume_dev(hdev);
2397 break;
2398 }
2399
2400 if (ret)
2401 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2402 action, ret);
2403
2404 hci_dev_put(d: hdev);
2405 return NOTIFY_DONE;
2406}
2407
2408/* Alloc HCI device */
2409struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2410{
2411 struct hci_dev *hdev;
2412 unsigned int alloc_size;
2413
2414 alloc_size = sizeof(*hdev);
2415 if (sizeof_priv) {
2416 /* Fixme: May need ALIGN-ment? */
2417 alloc_size += sizeof_priv;
2418 }
2419
2420 hdev = kzalloc(alloc_size, GFP_KERNEL);
2421 if (!hdev)
2422 return NULL;
2423
2424 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2425 hdev->esco_type = (ESCO_HV1);
2426 hdev->link_mode = (HCI_LM_ACCEPT);
2427 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2428 hdev->io_capability = 0x03; /* No Input No Output */
2429 hdev->manufacturer = 0xffff; /* Default to internal use */
2430 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2431 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2432 hdev->adv_instance_cnt = 0;
2433 hdev->cur_adv_instance = 0x00;
2434 hdev->adv_instance_timeout = 0;
2435
2436 hdev->advmon_allowlist_duration = 300;
2437 hdev->advmon_no_filter_duration = 500;
2438 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2439
2440 hdev->sniff_max_interval = 800;
2441 hdev->sniff_min_interval = 80;
2442
2443 hdev->le_adv_channel_map = 0x07;
2444 hdev->le_adv_min_interval = 0x0800;
2445 hdev->le_adv_max_interval = 0x0800;
2446 hdev->le_scan_interval = DISCOV_LE_SCAN_INT_FAST;
2447 hdev->le_scan_window = DISCOV_LE_SCAN_WIN_FAST;
2448 hdev->le_scan_int_suspend = DISCOV_LE_SCAN_INT_SLOW1;
2449 hdev->le_scan_window_suspend = DISCOV_LE_SCAN_WIN_SLOW1;
2450 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2451 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2452 hdev->le_scan_int_adv_monitor = DISCOV_LE_SCAN_INT_FAST;
2453 hdev->le_scan_window_adv_monitor = DISCOV_LE_SCAN_WIN_FAST;
2454 hdev->le_scan_int_connect = DISCOV_LE_SCAN_INT_CONN;
2455 hdev->le_scan_window_connect = DISCOV_LE_SCAN_WIN_CONN;
2456 hdev->le_conn_min_interval = 0x0018;
2457 hdev->le_conn_max_interval = 0x0028;
2458 hdev->le_conn_latency = 0x0000;
2459 hdev->le_supv_timeout = 0x002a;
2460 hdev->le_def_tx_len = 0x001b;
2461 hdev->le_def_tx_time = 0x0148;
2462 hdev->le_max_tx_len = 0x001b;
2463 hdev->le_max_tx_time = 0x0148;
2464 hdev->le_max_rx_len = 0x001b;
2465 hdev->le_max_rx_time = 0x0148;
2466 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2467 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2468 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2469 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2470 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2471 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2472 hdev->def_le_autoconnect_timeout = HCI_LE_CONN_TIMEOUT;
2473 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2474 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2475
2476 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2477 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2478 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2479 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2480 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2481 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2482
2483 /* default 1.28 sec page scan */
2484 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2485 hdev->def_page_scan_int = 0x0800;
2486 hdev->def_page_scan_window = 0x0012;
2487
2488 mutex_init(&hdev->lock);
2489 mutex_init(&hdev->req_lock);
2490
2491 ida_init(ida: &hdev->unset_handle_ida);
2492
2493 INIT_LIST_HEAD(list: &hdev->mesh_pending);
2494 INIT_LIST_HEAD(list: &hdev->mgmt_pending);
2495 INIT_LIST_HEAD(list: &hdev->reject_list);
2496 INIT_LIST_HEAD(list: &hdev->accept_list);
2497 INIT_LIST_HEAD(list: &hdev->uuids);
2498 INIT_LIST_HEAD(list: &hdev->link_keys);
2499 INIT_LIST_HEAD(list: &hdev->long_term_keys);
2500 INIT_LIST_HEAD(list: &hdev->identity_resolving_keys);
2501 INIT_LIST_HEAD(list: &hdev->remote_oob_data);
2502 INIT_LIST_HEAD(list: &hdev->le_accept_list);
2503 INIT_LIST_HEAD(list: &hdev->le_resolv_list);
2504 INIT_LIST_HEAD(list: &hdev->le_conn_params);
2505 INIT_LIST_HEAD(list: &hdev->pend_le_conns);
2506 INIT_LIST_HEAD(list: &hdev->pend_le_reports);
2507 INIT_LIST_HEAD(list: &hdev->conn_hash.list);
2508 INIT_LIST_HEAD(list: &hdev->adv_instances);
2509 INIT_LIST_HEAD(list: &hdev->blocked_keys);
2510 INIT_LIST_HEAD(list: &hdev->monitored_devices);
2511
2512 INIT_LIST_HEAD(list: &hdev->local_codecs);
2513 INIT_WORK(&hdev->rx_work, hci_rx_work);
2514 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2515 INIT_WORK(&hdev->tx_work, hci_tx_work);
2516 INIT_WORK(&hdev->power_on, hci_power_on);
2517 INIT_WORK(&hdev->error_reset, hci_error_reset);
2518
2519 hci_cmd_sync_init(hdev);
2520
2521 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2522
2523 skb_queue_head_init(list: &hdev->rx_q);
2524 skb_queue_head_init(list: &hdev->cmd_q);
2525 skb_queue_head_init(list: &hdev->raw_q);
2526
2527 init_waitqueue_head(&hdev->req_wait_q);
2528
2529 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2530 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2531
2532 hci_devcd_setup(hdev);
2533
2534 hci_init_sysfs(hdev);
2535 discovery_init(hdev);
2536
2537 return hdev;
2538}
2539EXPORT_SYMBOL(hci_alloc_dev_priv);
2540
2541/* Free HCI device */
2542void hci_free_dev(struct hci_dev *hdev)
2543{
2544 /* will free via device release */
2545 put_device(dev: &hdev->dev);
2546}
2547EXPORT_SYMBOL(hci_free_dev);
2548
2549/* Register HCI device */
2550int hci_register_dev(struct hci_dev *hdev)
2551{
2552 int id, error;
2553
2554 if (!hdev->open || !hdev->close || !hdev->send)
2555 return -EINVAL;
2556
2557 id = ida_alloc_max(ida: &hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2558 if (id < 0)
2559 return id;
2560
2561 error = dev_set_name(dev: &hdev->dev, name: "hci%u", id);
2562 if (error)
2563 return error;
2564
2565 hdev->name = dev_name(dev: &hdev->dev);
2566 hdev->id = id;
2567
2568 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2569
2570 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2571 if (!hdev->workqueue) {
2572 error = -ENOMEM;
2573 goto err;
2574 }
2575
2576 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2577 hdev->name);
2578 if (!hdev->req_workqueue) {
2579 destroy_workqueue(wq: hdev->workqueue);
2580 error = -ENOMEM;
2581 goto err;
2582 }
2583
2584 if (!IS_ERR_OR_NULL(ptr: bt_debugfs))
2585 hdev->debugfs = debugfs_create_dir(name: hdev->name, parent: bt_debugfs);
2586
2587 error = device_add(dev: &hdev->dev);
2588 if (error < 0)
2589 goto err_wqueue;
2590
2591 hci_leds_init(hdev);
2592
2593 hdev->rfkill = rfkill_alloc(name: hdev->name, parent: &hdev->dev,
2594 type: RFKILL_TYPE_BLUETOOTH, ops: &hci_rfkill_ops,
2595 ops_data: hdev);
2596 if (hdev->rfkill) {
2597 if (rfkill_register(rfkill: hdev->rfkill) < 0) {
2598 rfkill_destroy(rfkill: hdev->rfkill);
2599 hdev->rfkill = NULL;
2600 }
2601 }
2602
2603 if (hdev->rfkill && rfkill_blocked(rfkill: hdev->rfkill))
2604 hci_dev_set_flag(hdev, HCI_RFKILLED);
2605
2606 hci_dev_set_flag(hdev, HCI_SETUP);
2607 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2608
2609 /* Assume BR/EDR support until proven otherwise (such as
2610 * through reading supported features during init.
2611 */
2612 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2613
2614 write_lock(&hci_dev_list_lock);
2615 list_add(new: &hdev->list, head: &hci_dev_list);
2616 write_unlock(&hci_dev_list_lock);
2617
2618 /* Devices that are marked for raw-only usage are unconfigured
2619 * and should not be included in normal operation.
2620 */
2621 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2622 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2623
2624 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2625 * callback.
2626 */
2627 if (hdev->wakeup)
2628 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2629
2630 hci_sock_dev_event(hdev, HCI_DEV_REG);
2631 hci_dev_hold(d: hdev);
2632
2633 error = hci_register_suspend_notifier(hdev);
2634 if (error)
2635 BT_WARN("register suspend notifier failed error:%d\n", error);
2636
2637 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
2638
2639 idr_init(idr: &hdev->adv_monitors_idr);
2640 msft_register(hdev);
2641
2642 return id;
2643
2644err_wqueue:
2645 debugfs_remove_recursive(dentry: hdev->debugfs);
2646 destroy_workqueue(wq: hdev->workqueue);
2647 destroy_workqueue(wq: hdev->req_workqueue);
2648err:
2649 ida_free(&hci_index_ida, id: hdev->id);
2650
2651 return error;
2652}
2653EXPORT_SYMBOL(hci_register_dev);
2654
2655/* Unregister HCI device */
2656void hci_unregister_dev(struct hci_dev *hdev)
2657{
2658 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2659
2660 mutex_lock(&hdev->unregister_lock);
2661 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2662 mutex_unlock(lock: &hdev->unregister_lock);
2663
2664 write_lock(&hci_dev_list_lock);
2665 list_del(entry: &hdev->list);
2666 write_unlock(&hci_dev_list_lock);
2667
2668 disable_work_sync(work: &hdev->rx_work);
2669 disable_work_sync(work: &hdev->cmd_work);
2670 disable_work_sync(work: &hdev->tx_work);
2671 disable_work_sync(work: &hdev->power_on);
2672 disable_work_sync(work: &hdev->error_reset);
2673
2674 hci_cmd_sync_clear(hdev);
2675
2676 hci_unregister_suspend_notifier(hdev);
2677
2678 hci_dev_do_close(hdev);
2679
2680 if (!test_bit(HCI_INIT, &hdev->flags) &&
2681 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2682 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2683 hci_dev_lock(hdev);
2684 mgmt_index_removed(hdev);
2685 hci_dev_unlock(hdev);
2686 }
2687
2688 /* mgmt_index_removed should take care of emptying the
2689 * pending list */
2690 BUG_ON(!list_empty(&hdev->mgmt_pending));
2691
2692 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2693
2694 if (hdev->rfkill) {
2695 rfkill_unregister(rfkill: hdev->rfkill);
2696 rfkill_destroy(rfkill: hdev->rfkill);
2697 }
2698
2699 device_del(dev: &hdev->dev);
2700 /* Actual cleanup is deferred until hci_release_dev(). */
2701 hci_dev_put(d: hdev);
2702}
2703EXPORT_SYMBOL(hci_unregister_dev);
2704
2705/* Release HCI device */
2706void hci_release_dev(struct hci_dev *hdev)
2707{
2708 debugfs_remove_recursive(dentry: hdev->debugfs);
2709 kfree_const(x: hdev->hw_info);
2710 kfree_const(x: hdev->fw_info);
2711
2712 destroy_workqueue(wq: hdev->workqueue);
2713 destroy_workqueue(wq: hdev->req_workqueue);
2714
2715 hci_dev_lock(hdev);
2716 hci_bdaddr_list_clear(bdaddr_list: &hdev->reject_list);
2717 hci_bdaddr_list_clear(bdaddr_list: &hdev->accept_list);
2718 hci_uuids_clear(hdev);
2719 hci_link_keys_clear(hdev);
2720 hci_smp_ltks_clear(hdev);
2721 hci_smp_irks_clear(hdev);
2722 hci_remote_oob_data_clear(hdev);
2723 hci_adv_instances_clear(hdev);
2724 hci_adv_monitors_clear(hdev);
2725 hci_bdaddr_list_clear(bdaddr_list: &hdev->le_accept_list);
2726 hci_bdaddr_list_clear(bdaddr_list: &hdev->le_resolv_list);
2727 hci_conn_params_clear_all(hdev);
2728 hci_discovery_filter_clear(hdev);
2729 hci_blocked_keys_clear(hdev);
2730 hci_codec_list_clear(codec_list: &hdev->local_codecs);
2731 msft_release(hdev);
2732 hci_dev_unlock(hdev);
2733
2734 ida_destroy(ida: &hdev->unset_handle_ida);
2735 ida_free(&hci_index_ida, id: hdev->id);
2736 kfree_skb(skb: hdev->sent_cmd);
2737 kfree_skb(skb: hdev->req_skb);
2738 kfree_skb(skb: hdev->recv_event);
2739 kfree(objp: hdev);
2740}
2741EXPORT_SYMBOL(hci_release_dev);
2742
2743int hci_register_suspend_notifier(struct hci_dev *hdev)
2744{
2745 int ret = 0;
2746
2747 if (!hdev->suspend_notifier.notifier_call &&
2748 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2749 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2750 ret = register_pm_notifier(nb: &hdev->suspend_notifier);
2751 }
2752
2753 return ret;
2754}
2755
2756int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2757{
2758 int ret = 0;
2759
2760 if (hdev->suspend_notifier.notifier_call) {
2761 ret = unregister_pm_notifier(nb: &hdev->suspend_notifier);
2762 if (!ret)
2763 hdev->suspend_notifier.notifier_call = NULL;
2764 }
2765
2766 return ret;
2767}
2768
2769/* Cancel ongoing command synchronously:
2770 *
2771 * - Cancel command timer
2772 * - Reset command counter
2773 * - Cancel command request
2774 */
2775static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2776{
2777 bt_dev_dbg(hdev, "err 0x%2.2x", err);
2778
2779 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
2780 disable_delayed_work_sync(dwork: &hdev->cmd_timer);
2781 disable_delayed_work_sync(dwork: &hdev->ncmd_timer);
2782 } else {
2783 cancel_delayed_work_sync(dwork: &hdev->cmd_timer);
2784 cancel_delayed_work_sync(dwork: &hdev->ncmd_timer);
2785 }
2786
2787 atomic_set(v: &hdev->cmd_cnt, i: 1);
2788
2789 hci_cmd_sync_cancel_sync(hdev, err);
2790}
2791
2792/* Suspend HCI device */
2793int hci_suspend_dev(struct hci_dev *hdev)
2794{
2795 int ret;
2796
2797 bt_dev_dbg(hdev, "");
2798
2799 /* Suspend should only act on when powered. */
2800 if (!hdev_is_powered(hdev) ||
2801 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2802 return 0;
2803
2804 /* If powering down don't attempt to suspend */
2805 if (mgmt_powering_down(hdev))
2806 return 0;
2807
2808 /* Cancel potentially blocking sync operation before suspend */
2809 hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2810
2811 hci_req_sync_lock(hdev);
2812 ret = hci_suspend_sync(hdev);
2813 hci_req_sync_unlock(hdev);
2814
2815 hci_clear_wake_reason(hdev);
2816 mgmt_suspending(hdev, state: hdev->suspend_state);
2817
2818 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2819 return ret;
2820}
2821EXPORT_SYMBOL(hci_suspend_dev);
2822
2823/* Resume HCI device */
2824int hci_resume_dev(struct hci_dev *hdev)
2825{
2826 int ret;
2827
2828 bt_dev_dbg(hdev, "");
2829
2830 /* Resume should only act on when powered. */
2831 if (!hdev_is_powered(hdev) ||
2832 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2833 return 0;
2834
2835 /* If powering down don't attempt to resume */
2836 if (mgmt_powering_down(hdev))
2837 return 0;
2838
2839 hci_req_sync_lock(hdev);
2840 ret = hci_resume_sync(hdev);
2841 hci_req_sync_unlock(hdev);
2842
2843 mgmt_resuming(hdev, reason: hdev->wake_reason, bdaddr: &hdev->wake_addr,
2844 addr_type: hdev->wake_addr_type);
2845
2846 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2847 return ret;
2848}
2849EXPORT_SYMBOL(hci_resume_dev);
2850
2851/* Reset HCI device */
2852int hci_reset_dev(struct hci_dev *hdev)
2853{
2854 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2855 struct sk_buff *skb;
2856
2857 skb = bt_skb_alloc(len: 3, GFP_ATOMIC);
2858 if (!skb)
2859 return -ENOMEM;
2860
2861 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2862 skb_put_data(skb, data: hw_err, len: 3);
2863
2864 bt_dev_err(hdev, "Injecting HCI hardware error event");
2865
2866 /* Send Hardware Error to upper stack */
2867 return hci_recv_frame(hdev, skb);
2868}
2869EXPORT_SYMBOL(hci_reset_dev);
2870
2871static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
2872{
2873 if (hdev->classify_pkt_type)
2874 return hdev->classify_pkt_type(hdev, skb);
2875
2876 return hci_skb_pkt_type(skb);
2877}
2878
2879/* Receive frame from HCI drivers */
2880int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2881{
2882 u8 dev_pkt_type;
2883
2884 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2885 && !test_bit(HCI_INIT, &hdev->flags))) {
2886 kfree_skb(skb);
2887 return -ENXIO;
2888 }
2889
2890 /* Check if the driver agree with packet type classification */
2891 dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb);
2892 if (hci_skb_pkt_type(skb) != dev_pkt_type) {
2893 hci_skb_pkt_type(skb) = dev_pkt_type;
2894 }
2895
2896 switch (hci_skb_pkt_type(skb)) {
2897 case HCI_EVENT_PKT:
2898 break;
2899 case HCI_ACLDATA_PKT:
2900 /* Detect if ISO packet has been sent as ACL */
2901 if (hci_conn_num(hdev, CIS_LINK) ||
2902 hci_conn_num(hdev, BIS_LINK)) {
2903 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2904 __u8 type;
2905
2906 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2907 if (type == CIS_LINK || type == BIS_LINK)
2908 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2909 }
2910 break;
2911 case HCI_SCODATA_PKT:
2912 break;
2913 case HCI_ISODATA_PKT:
2914 break;
2915 case HCI_DRV_PKT:
2916 break;
2917 default:
2918 kfree_skb(skb);
2919 return -EINVAL;
2920 }
2921
2922 /* Incoming skb */
2923 bt_cb(skb)->incoming = 1;
2924
2925 /* Time stamp */
2926 __net_timestamp(skb);
2927
2928 skb_queue_tail(list: &hdev->rx_q, newsk: skb);
2929 queue_work(wq: hdev->workqueue, work: &hdev->rx_work);
2930
2931 return 0;
2932}
2933EXPORT_SYMBOL(hci_recv_frame);
2934
2935/* Receive diagnostic message from HCI drivers */
2936int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2937{
2938 /* Mark as diagnostic packet */
2939 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2940
2941 /* Time stamp */
2942 __net_timestamp(skb);
2943
2944 skb_queue_tail(list: &hdev->rx_q, newsk: skb);
2945 queue_work(wq: hdev->workqueue, work: &hdev->rx_work);
2946
2947 return 0;
2948}
2949EXPORT_SYMBOL(hci_recv_diag);
2950
2951void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2952{
2953 va_list vargs;
2954
2955 va_start(vargs, fmt);
2956 kfree_const(x: hdev->hw_info);
2957 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, args: vargs);
2958 va_end(vargs);
2959}
2960EXPORT_SYMBOL(hci_set_hw_info);
2961
2962void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2963{
2964 va_list vargs;
2965
2966 va_start(vargs, fmt);
2967 kfree_const(x: hdev->fw_info);
2968 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, args: vargs);
2969 va_end(vargs);
2970}
2971EXPORT_SYMBOL(hci_set_fw_info);
2972
2973/* ---- Interface to upper protocols ---- */
2974
2975int hci_register_cb(struct hci_cb *cb)
2976{
2977 BT_DBG("%p name %s", cb, cb->name);
2978
2979 mutex_lock(&hci_cb_list_lock);
2980 list_add_tail(new: &cb->list, head: &hci_cb_list);
2981 mutex_unlock(lock: &hci_cb_list_lock);
2982
2983 return 0;
2984}
2985EXPORT_SYMBOL(hci_register_cb);
2986
2987int hci_unregister_cb(struct hci_cb *cb)
2988{
2989 BT_DBG("%p name %s", cb, cb->name);
2990
2991 mutex_lock(&hci_cb_list_lock);
2992 list_del(entry: &cb->list);
2993 mutex_unlock(lock: &hci_cb_list_lock);
2994
2995 return 0;
2996}
2997EXPORT_SYMBOL(hci_unregister_cb);
2998
2999static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3000{
3001 int err;
3002
3003 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3004 skb->len);
3005
3006 /* Time stamp */
3007 __net_timestamp(skb);
3008
3009 /* Send copy to monitor */
3010 hci_send_to_monitor(hdev, skb);
3011
3012 if (atomic_read(v: &hdev->promisc)) {
3013 /* Send copy to the sockets */
3014 hci_send_to_sock(hdev, skb);
3015 }
3016
3017 /* Get rid of skb owner, prior to sending to the driver. */
3018 skb_orphan(skb);
3019
3020 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3021 kfree_skb(skb);
3022 return -EINVAL;
3023 }
3024
3025 if (hci_skb_pkt_type(skb) == HCI_DRV_PKT) {
3026 /* Intercept HCI Drv packet here and don't go with hdev->send
3027 * callback.
3028 */
3029 err = hci_drv_process_cmd(hdev, cmd_skb: skb);
3030 kfree_skb(skb);
3031 return err;
3032 }
3033
3034 err = hdev->send(hdev, skb);
3035 if (err < 0) {
3036 bt_dev_err(hdev, "sending frame failed (%d)", err);
3037 kfree_skb(skb);
3038 return err;
3039 }
3040
3041 return 0;
3042}
3043
3044static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn,
3045 struct sk_buff *skb)
3046{
3047 hci_conn_tx_queue(conn, skb);
3048 return hci_send_frame(hdev, skb);
3049}
3050
3051/* Send HCI command */
3052int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3053 const void *param)
3054{
3055 struct sk_buff *skb;
3056
3057 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3058
3059 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3060 if (!skb) {
3061 bt_dev_err(hdev, "no memory for command");
3062 return -ENOMEM;
3063 }
3064
3065 /* Stand-alone HCI commands must be flagged as
3066 * single-command requests.
3067 */
3068 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3069
3070 skb_queue_tail(list: &hdev->cmd_q, newsk: skb);
3071 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
3072
3073 return 0;
3074}
3075
3076int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3077 const void *param)
3078{
3079 struct sk_buff *skb;
3080
3081 if (hci_opcode_ogf(opcode) != 0x3f) {
3082 /* A controller receiving a command shall respond with either
3083 * a Command Status Event or a Command Complete Event.
3084 * Therefore, all standard HCI commands must be sent via the
3085 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3086 * Some vendors do not comply with this rule for vendor-specific
3087 * commands and do not return any event. We want to support
3088 * unresponded commands for such cases only.
3089 */
3090 bt_dev_err(hdev, "unresponded command not supported");
3091 return -EINVAL;
3092 }
3093
3094 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL);
3095 if (!skb) {
3096 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3097 opcode);
3098 return -ENOMEM;
3099 }
3100
3101 hci_send_frame(hdev, skb);
3102
3103 return 0;
3104}
3105EXPORT_SYMBOL(__hci_cmd_send);
3106
3107/* Get data from the previously sent command */
3108static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3109{
3110 struct hci_command_hdr *hdr;
3111
3112 if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3113 return NULL;
3114
3115 hdr = (void *)skb->data;
3116
3117 if (hdr->opcode != cpu_to_le16(opcode))
3118 return NULL;
3119
3120 return skb->data + HCI_COMMAND_HDR_SIZE;
3121}
3122
3123/* Get data from the previously sent command */
3124void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3125{
3126 void *data;
3127
3128 /* Check if opcode matches last sent command */
3129 data = hci_cmd_data(skb: hdev->sent_cmd, opcode);
3130 if (!data)
3131 /* Check if opcode matches last request */
3132 data = hci_cmd_data(skb: hdev->req_skb, opcode);
3133
3134 return data;
3135}
3136
3137/* Get data from last received event */
3138void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3139{
3140 struct hci_event_hdr *hdr;
3141 int offset;
3142
3143 if (!hdev->recv_event)
3144 return NULL;
3145
3146 hdr = (void *)hdev->recv_event->data;
3147 offset = sizeof(*hdr);
3148
3149 if (hdr->evt != event) {
3150 /* In case of LE metaevent check the subevent match */
3151 if (hdr->evt == HCI_EV_LE_META) {
3152 struct hci_ev_le_meta *ev;
3153
3154 ev = (void *)hdev->recv_event->data + offset;
3155 offset += sizeof(*ev);
3156 if (ev->subevent == event)
3157 goto found;
3158 }
3159 return NULL;
3160 }
3161
3162found:
3163 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3164
3165 return hdev->recv_event->data + offset;
3166}
3167
3168/* Send ACL data */
3169static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3170{
3171 struct hci_acl_hdr *hdr;
3172 int len = skb->len;
3173
3174 skb_push(skb, HCI_ACL_HDR_SIZE);
3175 skb_reset_transport_header(skb);
3176 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3177 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3178 hdr->dlen = cpu_to_le16(len);
3179}
3180
3181static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3182 struct sk_buff *skb, __u16 flags)
3183{
3184 struct hci_conn *conn = chan->conn;
3185 struct hci_dev *hdev = conn->hdev;
3186 struct sk_buff *list;
3187
3188 skb->len = skb_headlen(skb);
3189 skb->data_len = 0;
3190
3191 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3192
3193 hci_add_acl_hdr(skb, handle: conn->handle, flags);
3194
3195 list = skb_shinfo(skb)->frag_list;
3196 if (!list) {
3197 /* Non fragmented */
3198 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3199
3200 skb_queue_tail(list: queue, newsk: skb);
3201 } else {
3202 /* Fragmented */
3203 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3204
3205 skb_shinfo(skb)->frag_list = NULL;
3206
3207 /* Queue all fragments atomically. We need to use spin_lock_bh
3208 * here because of 6LoWPAN links, as there this function is
3209 * called from softirq and using normal spin lock could cause
3210 * deadlocks.
3211 */
3212 spin_lock_bh(lock: &queue->lock);
3213
3214 __skb_queue_tail(list: queue, newsk: skb);
3215
3216 flags &= ~ACL_START;
3217 flags |= ACL_CONT;
3218 do {
3219 skb = list; list = list->next;
3220
3221 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3222 hci_add_acl_hdr(skb, handle: conn->handle, flags);
3223
3224 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3225
3226 __skb_queue_tail(list: queue, newsk: skb);
3227 } while (list);
3228
3229 spin_unlock_bh(lock: &queue->lock);
3230 }
3231}
3232
3233void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3234{
3235 struct hci_dev *hdev = chan->conn->hdev;
3236
3237 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3238
3239 hci_queue_acl(chan, queue: &chan->data_q, skb, flags);
3240
3241 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3242}
3243
3244/* Send SCO data */
3245void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3246{
3247 struct hci_dev *hdev = conn->hdev;
3248 struct hci_sco_hdr hdr;
3249
3250 BT_DBG("%s len %d", hdev->name, skb->len);
3251
3252 hdr.handle = cpu_to_le16(conn->handle);
3253 hdr.dlen = skb->len;
3254
3255 skb_push(skb, HCI_SCO_HDR_SIZE);
3256 skb_reset_transport_header(skb);
3257 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3258
3259 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3260
3261 skb_queue_tail(list: &conn->data_q, newsk: skb);
3262 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3263}
3264
3265/* Send ISO data */
3266static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3267{
3268 struct hci_iso_hdr *hdr;
3269 int len = skb->len;
3270
3271 skb_push(skb, HCI_ISO_HDR_SIZE);
3272 skb_reset_transport_header(skb);
3273 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3274 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3275 hdr->dlen = cpu_to_le16(len);
3276}
3277
3278static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3279 struct sk_buff *skb)
3280{
3281 struct hci_dev *hdev = conn->hdev;
3282 struct sk_buff *list;
3283 __u16 flags;
3284
3285 skb->len = skb_headlen(skb);
3286 skb->data_len = 0;
3287
3288 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3289
3290 list = skb_shinfo(skb)->frag_list;
3291
3292 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3293 hci_add_iso_hdr(skb, handle: conn->handle, flags);
3294
3295 if (!list) {
3296 /* Non fragmented */
3297 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3298
3299 skb_queue_tail(list: queue, newsk: skb);
3300 } else {
3301 /* Fragmented */
3302 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3303
3304 skb_shinfo(skb)->frag_list = NULL;
3305
3306 __skb_queue_tail(list: queue, newsk: skb);
3307
3308 do {
3309 skb = list; list = list->next;
3310
3311 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3312 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3313 0x00);
3314 hci_add_iso_hdr(skb, handle: conn->handle, flags);
3315
3316 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3317
3318 __skb_queue_tail(list: queue, newsk: skb);
3319 } while (list);
3320 }
3321}
3322
3323void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3324{
3325 struct hci_dev *hdev = conn->hdev;
3326
3327 BT_DBG("%s len %d", hdev->name, skb->len);
3328
3329 hci_queue_iso(conn, queue: &conn->data_q, skb);
3330
3331 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3332}
3333
3334/* ---- HCI TX task (outgoing data) ---- */
3335
3336/* HCI Connection scheduler */
3337static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3338{
3339 struct hci_dev *hdev;
3340 int cnt, q;
3341
3342 if (!conn) {
3343 *quote = 0;
3344 return;
3345 }
3346
3347 hdev = conn->hdev;
3348
3349 switch (conn->type) {
3350 case ACL_LINK:
3351 cnt = hdev->acl_cnt;
3352 break;
3353 case SCO_LINK:
3354 case ESCO_LINK:
3355 cnt = hdev->sco_cnt;
3356 break;
3357 case LE_LINK:
3358 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3359 break;
3360 case CIS_LINK:
3361 case BIS_LINK:
3362 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3363 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3364 break;
3365 default:
3366 cnt = 0;
3367 bt_dev_err(hdev, "unknown link type %d", conn->type);
3368 }
3369
3370 q = cnt / num;
3371 *quote = q ? q : 1;
3372}
3373
3374static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3375 __u8 type2, int *quote)
3376{
3377 struct hci_conn_hash *h = &hdev->conn_hash;
3378 struct hci_conn *conn = NULL, *c;
3379 unsigned int num = 0, min = ~0;
3380
3381 /* We don't have to lock device here. Connections are always
3382 * added and removed with TX task disabled. */
3383
3384 rcu_read_lock();
3385
3386 list_for_each_entry_rcu(c, &h->list, list) {
3387 if ((c->type != type && c->type != type2) ||
3388 skb_queue_empty(list: &c->data_q))
3389 continue;
3390
3391 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3392 continue;
3393
3394 num++;
3395
3396 if (c->sent < min) {
3397 min = c->sent;
3398 conn = c;
3399 }
3400
3401 if (hci_conn_num(hdev, type) == num)
3402 break;
3403 }
3404
3405 rcu_read_unlock();
3406
3407 hci_quote_sent(conn, num, quote);
3408
3409 BT_DBG("conn %p quote %d", conn, *quote);
3410 return conn;
3411}
3412
3413static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3414{
3415 struct hci_conn_hash *h = &hdev->conn_hash;
3416 struct hci_conn *c;
3417
3418 bt_dev_err(hdev, "link tx timeout");
3419
3420 rcu_read_lock();
3421
3422 /* Kill stalled connections */
3423 list_for_each_entry_rcu(c, &h->list, list) {
3424 if (c->type == type && c->sent) {
3425 bt_dev_err(hdev, "killing stalled connection %pMR",
3426 &c->dst);
3427 /* hci_disconnect might sleep, so, we have to release
3428 * the RCU read lock before calling it.
3429 */
3430 rcu_read_unlock();
3431 hci_disconnect(conn: c, HCI_ERROR_REMOTE_USER_TERM);
3432 rcu_read_lock();
3433 }
3434 }
3435
3436 rcu_read_unlock();
3437}
3438
3439static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3440 int *quote)
3441{
3442 struct hci_conn_hash *h = &hdev->conn_hash;
3443 struct hci_chan *chan = NULL;
3444 unsigned int num = 0, min = ~0, cur_prio = 0;
3445 struct hci_conn *conn;
3446 int conn_num = 0;
3447
3448 BT_DBG("%s", hdev->name);
3449
3450 rcu_read_lock();
3451
3452 list_for_each_entry_rcu(conn, &h->list, list) {
3453 struct hci_chan *tmp;
3454
3455 if (conn->type != type)
3456 continue;
3457
3458 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3459 continue;
3460
3461 conn_num++;
3462
3463 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3464 struct sk_buff *skb;
3465
3466 if (skb_queue_empty(list: &tmp->data_q))
3467 continue;
3468
3469 skb = skb_peek(list_: &tmp->data_q);
3470 if (skb->priority < cur_prio)
3471 continue;
3472
3473 if (skb->priority > cur_prio) {
3474 num = 0;
3475 min = ~0;
3476 cur_prio = skb->priority;
3477 }
3478
3479 num++;
3480
3481 if (conn->sent < min) {
3482 min = conn->sent;
3483 chan = tmp;
3484 }
3485 }
3486
3487 if (hci_conn_num(hdev, type) == conn_num)
3488 break;
3489 }
3490
3491 rcu_read_unlock();
3492
3493 if (!chan)
3494 return NULL;
3495
3496 hci_quote_sent(conn: chan->conn, num, quote);
3497
3498 BT_DBG("chan %p quote %d", chan, *quote);
3499 return chan;
3500}
3501
3502static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3503{
3504 struct hci_conn_hash *h = &hdev->conn_hash;
3505 struct hci_conn *conn;
3506 int num = 0;
3507
3508 BT_DBG("%s", hdev->name);
3509
3510 rcu_read_lock();
3511
3512 list_for_each_entry_rcu(conn, &h->list, list) {
3513 struct hci_chan *chan;
3514
3515 if (conn->type != type)
3516 continue;
3517
3518 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3519 continue;
3520
3521 num++;
3522
3523 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3524 struct sk_buff *skb;
3525
3526 if (chan->sent) {
3527 chan->sent = 0;
3528 continue;
3529 }
3530
3531 if (skb_queue_empty(list: &chan->data_q))
3532 continue;
3533
3534 skb = skb_peek(list_: &chan->data_q);
3535 if (skb->priority >= HCI_PRIO_MAX - 1)
3536 continue;
3537
3538 skb->priority = HCI_PRIO_MAX - 1;
3539
3540 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3541 skb->priority);
3542 }
3543
3544 if (hci_conn_num(hdev, type) == num)
3545 break;
3546 }
3547
3548 rcu_read_unlock();
3549
3550}
3551
3552static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3553{
3554 unsigned long last_tx;
3555
3556 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3557 return;
3558
3559 switch (type) {
3560 case LE_LINK:
3561 last_tx = hdev->le_last_tx;
3562 break;
3563 default:
3564 last_tx = hdev->acl_last_tx;
3565 break;
3566 }
3567
3568 /* tx timeout must be longer than maximum link supervision timeout
3569 * (40.9 seconds)
3570 */
3571 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3572 hci_link_tx_to(hdev, type);
3573}
3574
3575/* Schedule SCO */
3576static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
3577{
3578 struct hci_conn *conn;
3579 struct sk_buff *skb;
3580 int quote, *cnt;
3581 unsigned int pkts = hdev->sco_pkts;
3582
3583 bt_dev_dbg(hdev, "type %u", type);
3584
3585 if (!hci_conn_num(hdev, type) || !pkts)
3586 return;
3587
3588 /* Use sco_pkts if flow control has not been enabled which will limit
3589 * the amount of buffer sent in a row.
3590 */
3591 if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3592 cnt = &pkts;
3593 else
3594 cnt = &hdev->sco_cnt;
3595
3596 while (*cnt && (conn = hci_low_sent(hdev, type, type2: type, quote: &quote))) {
3597 while (quote-- && (skb = skb_dequeue(list: &conn->data_q))) {
3598 BT_DBG("skb %p len %d", skb, skb->len);
3599 hci_send_conn_frame(hdev, conn, skb);
3600
3601 conn->sent++;
3602 if (conn->sent == ~0)
3603 conn->sent = 0;
3604 (*cnt)--;
3605 }
3606 }
3607
3608 /* Rescheduled if all packets were sent and flow control is not enabled
3609 * as there could be more packets queued that could not be sent and
3610 * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule
3611 * needs to be forced.
3612 */
3613 if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL))
3614 queue_work(wq: hdev->workqueue, work: &hdev->tx_work);
3615}
3616
3617static void hci_sched_acl_pkt(struct hci_dev *hdev)
3618{
3619 unsigned int cnt = hdev->acl_cnt;
3620 struct hci_chan *chan;
3621 struct sk_buff *skb;
3622 int quote;
3623
3624 __check_timeout(hdev, cnt, ACL_LINK);
3625
3626 while (hdev->acl_cnt &&
3627 (chan = hci_chan_sent(hdev, ACL_LINK, quote: &quote))) {
3628 u32 priority = (skb_peek(list_: &chan->data_q))->priority;
3629 while (quote-- && (skb = skb_peek(list_: &chan->data_q))) {
3630 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3631 skb->len, skb->priority);
3632
3633 /* Stop if priority has changed */
3634 if (skb->priority < priority)
3635 break;
3636
3637 skb = skb_dequeue(list: &chan->data_q);
3638
3639 hci_conn_enter_active_mode(conn: chan->conn,
3640 bt_cb(skb)->force_active);
3641
3642 hci_send_conn_frame(hdev, conn: chan->conn, skb);
3643 hdev->acl_last_tx = jiffies;
3644
3645 hdev->acl_cnt--;
3646 chan->sent++;
3647 chan->conn->sent++;
3648
3649 /* Send pending SCO packets right away */
3650 hci_sched_sco(hdev, SCO_LINK);
3651 hci_sched_sco(hdev, ESCO_LINK);
3652 }
3653 }
3654
3655 if (cnt != hdev->acl_cnt)
3656 hci_prio_recalculate(hdev, ACL_LINK);
3657}
3658
3659static void hci_sched_acl(struct hci_dev *hdev)
3660{
3661 BT_DBG("%s", hdev->name);
3662
3663 /* No ACL link over BR/EDR controller */
3664 if (!hci_conn_num(hdev, ACL_LINK))
3665 return;
3666
3667 hci_sched_acl_pkt(hdev);
3668}
3669
3670static void hci_sched_le(struct hci_dev *hdev)
3671{
3672 struct hci_chan *chan;
3673 struct sk_buff *skb;
3674 int quote, *cnt, tmp;
3675
3676 BT_DBG("%s", hdev->name);
3677
3678 if (!hci_conn_num(hdev, LE_LINK))
3679 return;
3680
3681 cnt = hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3682
3683 __check_timeout(hdev, cnt: *cnt, LE_LINK);
3684
3685 tmp = *cnt;
3686 while (*cnt && (chan = hci_chan_sent(hdev, LE_LINK, quote: &quote))) {
3687 u32 priority = (skb_peek(list_: &chan->data_q))->priority;
3688 while (quote-- && (skb = skb_peek(list_: &chan->data_q))) {
3689 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3690 skb->len, skb->priority);
3691
3692 /* Stop if priority has changed */
3693 if (skb->priority < priority)
3694 break;
3695
3696 skb = skb_dequeue(list: &chan->data_q);
3697
3698 hci_send_conn_frame(hdev, conn: chan->conn, skb);
3699 hdev->le_last_tx = jiffies;
3700
3701 (*cnt)--;
3702 chan->sent++;
3703 chan->conn->sent++;
3704
3705 /* Send pending SCO packets right away */
3706 hci_sched_sco(hdev, SCO_LINK);
3707 hci_sched_sco(hdev, ESCO_LINK);
3708 }
3709 }
3710
3711 if (*cnt != tmp)
3712 hci_prio_recalculate(hdev, LE_LINK);
3713}
3714
3715/* Schedule CIS */
3716static void hci_sched_iso(struct hci_dev *hdev)
3717{
3718 struct hci_conn *conn;
3719 struct sk_buff *skb;
3720 int quote, *cnt;
3721
3722 BT_DBG("%s", hdev->name);
3723
3724 if (!hci_conn_num(hdev, CIS_LINK) &&
3725 !hci_conn_num(hdev, BIS_LINK))
3726 return;
3727
3728 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3729 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3730 while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK,
3731 quote: &quote))) {
3732 while (quote-- && (skb = skb_dequeue(list: &conn->data_q))) {
3733 BT_DBG("skb %p len %d", skb, skb->len);
3734 hci_send_conn_frame(hdev, conn, skb);
3735
3736 conn->sent++;
3737 if (conn->sent == ~0)
3738 conn->sent = 0;
3739 (*cnt)--;
3740 }
3741 }
3742}
3743
3744static void hci_tx_work(struct work_struct *work)
3745{
3746 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3747 struct sk_buff *skb;
3748
3749 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3750 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3751
3752 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3753 /* Schedule queues and send stuff to HCI driver */
3754 hci_sched_sco(hdev, SCO_LINK);
3755 hci_sched_sco(hdev, ESCO_LINK);
3756 hci_sched_iso(hdev);
3757 hci_sched_acl(hdev);
3758 hci_sched_le(hdev);
3759 }
3760
3761 /* Send next queued raw (unknown type) packet */
3762 while ((skb = skb_dequeue(list: &hdev->raw_q)))
3763 hci_send_frame(hdev, skb);
3764}
3765
3766/* ----- HCI RX task (incoming data processing) ----- */
3767
3768/* ACL data packet */
3769static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3770{
3771 struct hci_acl_hdr *hdr;
3772 struct hci_conn *conn;
3773 __u16 handle, flags;
3774
3775 hdr = skb_pull_data(skb, len: sizeof(*hdr));
3776 if (!hdr) {
3777 bt_dev_err(hdev, "ACL packet too small");
3778 goto drop;
3779 }
3780
3781 handle = __le16_to_cpu(hdr->handle);
3782 flags = hci_flags(handle);
3783 handle = hci_handle(handle);
3784
3785 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3786 handle, flags);
3787
3788 hdev->stat.acl_rx++;
3789
3790 hci_dev_lock(hdev);
3791 conn = hci_conn_hash_lookup_handle(hdev, handle);
3792 hci_dev_unlock(hdev);
3793
3794 if (conn) {
3795 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3796
3797 /* Send to upper protocol */
3798 l2cap_recv_acldata(hcon: conn, skb, flags);
3799 return;
3800 } else {
3801 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3802 handle);
3803 }
3804
3805drop:
3806 kfree_skb(skb);
3807}
3808
3809/* SCO data packet */
3810static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3811{
3812 struct hci_sco_hdr *hdr;
3813 struct hci_conn *conn;
3814 __u16 handle, flags;
3815
3816 hdr = skb_pull_data(skb, len: sizeof(*hdr));
3817 if (!hdr) {
3818 bt_dev_err(hdev, "SCO packet too small");
3819 goto drop;
3820 }
3821
3822 handle = __le16_to_cpu(hdr->handle);
3823 flags = hci_flags(handle);
3824 handle = hci_handle(handle);
3825
3826 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3827 handle, flags);
3828
3829 hdev->stat.sco_rx++;
3830
3831 hci_dev_lock(hdev);
3832 conn = hci_conn_hash_lookup_handle(hdev, handle);
3833 hci_dev_unlock(hdev);
3834
3835 if (conn) {
3836 /* Send to upper protocol */
3837 hci_skb_pkt_status(skb) = flags & 0x03;
3838 sco_recv_scodata(hcon: conn, skb);
3839 return;
3840 } else {
3841 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3842 handle);
3843 }
3844
3845drop:
3846 kfree_skb(skb);
3847}
3848
3849static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3850{
3851 struct hci_iso_hdr *hdr;
3852 struct hci_conn *conn;
3853 __u16 handle, flags;
3854
3855 hdr = skb_pull_data(skb, len: sizeof(*hdr));
3856 if (!hdr) {
3857 bt_dev_err(hdev, "ISO packet too small");
3858 goto drop;
3859 }
3860
3861 handle = __le16_to_cpu(hdr->handle);
3862 flags = hci_flags(handle);
3863 handle = hci_handle(handle);
3864
3865 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3866 handle, flags);
3867
3868 hci_dev_lock(hdev);
3869 conn = hci_conn_hash_lookup_handle(hdev, handle);
3870 hci_dev_unlock(hdev);
3871
3872 if (!conn) {
3873 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3874 handle);
3875 goto drop;
3876 }
3877
3878 /* Send to upper protocol */
3879 iso_recv(hcon: conn, skb, flags);
3880 return;
3881
3882drop:
3883 kfree_skb(skb);
3884}
3885
3886static bool hci_req_is_complete(struct hci_dev *hdev)
3887{
3888 struct sk_buff *skb;
3889
3890 skb = skb_peek(list_: &hdev->cmd_q);
3891 if (!skb)
3892 return true;
3893
3894 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3895}
3896
3897static void hci_resend_last(struct hci_dev *hdev)
3898{
3899 struct hci_command_hdr *sent;
3900 struct sk_buff *skb;
3901 u16 opcode;
3902
3903 if (!hdev->sent_cmd)
3904 return;
3905
3906 sent = (void *) hdev->sent_cmd->data;
3907 opcode = __le16_to_cpu(sent->opcode);
3908 if (opcode == HCI_OP_RESET)
3909 return;
3910
3911 skb = skb_clone(skb: hdev->sent_cmd, GFP_KERNEL);
3912 if (!skb)
3913 return;
3914
3915 skb_queue_head(list: &hdev->cmd_q, newsk: skb);
3916 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
3917}
3918
3919void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3920 hci_req_complete_t *req_complete,
3921 hci_req_complete_skb_t *req_complete_skb)
3922{
3923 struct sk_buff *skb;
3924 unsigned long flags;
3925
3926 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3927
3928 /* If the completed command doesn't match the last one that was
3929 * sent we need to do special handling of it.
3930 */
3931 if (!hci_sent_cmd_data(hdev, opcode)) {
3932 /* Some CSR based controllers generate a spontaneous
3933 * reset complete event during init and any pending
3934 * command will never be completed. In such a case we
3935 * need to resend whatever was the last sent
3936 * command.
3937 */
3938 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3939 hci_resend_last(hdev);
3940
3941 return;
3942 }
3943
3944 /* If we reach this point this event matches the last command sent */
3945 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
3946
3947 /* If the command succeeded and there's still more commands in
3948 * this request the request is not yet complete.
3949 */
3950 if (!status && !hci_req_is_complete(hdev))
3951 return;
3952
3953 skb = hdev->req_skb;
3954
3955 /* If this was the last command in a request the complete
3956 * callback would be found in hdev->req_skb instead of the
3957 * command queue (hdev->cmd_q).
3958 */
3959 if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
3960 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3961 return;
3962 }
3963
3964 if (skb && bt_cb(skb)->hci.req_complete) {
3965 *req_complete = bt_cb(skb)->hci.req_complete;
3966 return;
3967 }
3968
3969 /* Remove all pending commands belonging to this request */
3970 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3971 while ((skb = __skb_dequeue(list: &hdev->cmd_q))) {
3972 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
3973 __skb_queue_head(list: &hdev->cmd_q, newsk: skb);
3974 break;
3975 }
3976
3977 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
3978 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
3979 else
3980 *req_complete = bt_cb(skb)->hci.req_complete;
3981 dev_kfree_skb_irq(skb);
3982 }
3983 spin_unlock_irqrestore(lock: &hdev->cmd_q.lock, flags);
3984}
3985
3986static void hci_rx_work(struct work_struct *work)
3987{
3988 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3989 struct sk_buff *skb;
3990
3991 BT_DBG("%s", hdev->name);
3992
3993 /* The kcov_remote functions used for collecting packet parsing
3994 * coverage information from this background thread and associate
3995 * the coverage with the syscall's thread which originally injected
3996 * the packet. This helps fuzzing the kernel.
3997 */
3998 for (; (skb = skb_dequeue(list: &hdev->rx_q)); kcov_remote_stop()) {
3999 kcov_remote_start_common(id: skb_get_kcov_handle(skb));
4000
4001 /* Send copy to monitor */
4002 hci_send_to_monitor(hdev, skb);
4003
4004 if (atomic_read(v: &hdev->promisc)) {
4005 /* Send copy to the sockets */
4006 hci_send_to_sock(hdev, skb);
4007 }
4008
4009 /* If the device has been opened in HCI_USER_CHANNEL,
4010 * the userspace has exclusive access to device.
4011 * When device is HCI_INIT, we still need to process
4012 * the data packets to the driver in order
4013 * to complete its setup().
4014 */
4015 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4016 !test_bit(HCI_INIT, &hdev->flags)) {
4017 kfree_skb(skb);
4018 continue;
4019 }
4020
4021 if (test_bit(HCI_INIT, &hdev->flags)) {
4022 /* Don't process data packets in this states. */
4023 switch (hci_skb_pkt_type(skb)) {
4024 case HCI_ACLDATA_PKT:
4025 case HCI_SCODATA_PKT:
4026 case HCI_ISODATA_PKT:
4027 kfree_skb(skb);
4028 continue;
4029 }
4030 }
4031
4032 /* Process frame */
4033 switch (hci_skb_pkt_type(skb)) {
4034 case HCI_EVENT_PKT:
4035 BT_DBG("%s Event packet", hdev->name);
4036 hci_event_packet(hdev, skb);
4037 break;
4038
4039 case HCI_ACLDATA_PKT:
4040 BT_DBG("%s ACL data packet", hdev->name);
4041 hci_acldata_packet(hdev, skb);
4042 break;
4043
4044 case HCI_SCODATA_PKT:
4045 BT_DBG("%s SCO data packet", hdev->name);
4046 hci_scodata_packet(hdev, skb);
4047 break;
4048
4049 case HCI_ISODATA_PKT:
4050 BT_DBG("%s ISO data packet", hdev->name);
4051 hci_isodata_packet(hdev, skb);
4052 break;
4053
4054 default:
4055 kfree_skb(skb);
4056 break;
4057 }
4058 }
4059}
4060
4061static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4062{
4063 int err;
4064
4065 bt_dev_dbg(hdev, "skb %p", skb);
4066
4067 kfree_skb(skb: hdev->sent_cmd);
4068
4069 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4070 if (!hdev->sent_cmd) {
4071 skb_queue_head(list: &hdev->cmd_q, newsk: skb);
4072 queue_work(wq: hdev->workqueue, work: &hdev->cmd_work);
4073 return;
4074 }
4075
4076 if (hci_skb_opcode(skb) != HCI_OP_NOP) {
4077 err = hci_send_frame(hdev, skb);
4078 if (err < 0) {
4079 hci_cmd_sync_cancel_sync(hdev, err: -err);
4080 return;
4081 }
4082 atomic_dec(v: &hdev->cmd_cnt);
4083 }
4084
4085 if (hdev->req_status == HCI_REQ_PEND &&
4086 !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4087 kfree_skb(skb: hdev->req_skb);
4088 hdev->req_skb = skb_clone(skb: hdev->sent_cmd, GFP_KERNEL);
4089 }
4090}
4091
4092static void hci_cmd_work(struct work_struct *work)
4093{
4094 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4095 struct sk_buff *skb;
4096
4097 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4098 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4099
4100 /* Send queued commands */
4101 if (atomic_read(v: &hdev->cmd_cnt)) {
4102 skb = skb_dequeue(list: &hdev->cmd_q);
4103 if (!skb)
4104 return;
4105
4106 hci_send_cmd_sync(hdev, skb);
4107
4108 rcu_read_lock();
4109 if (test_bit(HCI_RESET, &hdev->flags) ||
4110 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4111 cancel_delayed_work(dwork: &hdev->cmd_timer);
4112 else
4113 queue_delayed_work(wq: hdev->workqueue, dwork: &hdev->cmd_timer,
4114 HCI_CMD_TIMEOUT);
4115 rcu_read_unlock();
4116 }
4117}
4118

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of linux/net/bluetooth/hci_core.c