1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * BlueZ - Bluetooth protocol stack for Linux |
4 | * |
5 | * Copyright (C) 2021 Intel Corporation |
6 | * Copyright 2023 NXP |
7 | */ |
8 | |
9 | #include <linux/property.h> |
10 | |
11 | #include <net/bluetooth/bluetooth.h> |
12 | #include <net/bluetooth/hci_core.h> |
13 | #include <net/bluetooth/mgmt.h> |
14 | |
15 | #include "hci_codec.h" |
16 | #include "hci_debugfs.h" |
17 | #include "smp.h" |
18 | #include "eir.h" |
19 | #include "msft.h" |
20 | #include "aosp.h" |
21 | #include "leds.h" |
22 | |
23 | static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
24 | struct sk_buff *skb) |
25 | { |
26 | bt_dev_dbg(hdev, "result 0x%2.2x", result); |
27 | |
28 | if (hdev->req_status != HCI_REQ_PEND) |
29 | return; |
30 | |
31 | hdev->req_result = result; |
32 | hdev->req_status = HCI_REQ_DONE; |
33 | |
34 | /* Free the request command so it is not used as response */ |
35 | kfree_skb(skb: hdev->req_skb); |
36 | hdev->req_skb = NULL; |
37 | |
38 | if (skb) { |
39 | struct sock *sk = hci_skb_sk(skb); |
40 | |
41 | /* Drop sk reference if set */ |
42 | if (sk) |
43 | sock_put(sk); |
44 | |
45 | hdev->req_rsp = skb_get(skb); |
46 | } |
47 | |
48 | wake_up_interruptible(&hdev->req_wait_q); |
49 | } |
50 | |
51 | struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, |
52 | const void *param, struct sock *sk) |
53 | { |
54 | int len = HCI_COMMAND_HDR_SIZE + plen; |
55 | struct hci_command_hdr *hdr; |
56 | struct sk_buff *skb; |
57 | |
58 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
59 | if (!skb) |
60 | return NULL; |
61 | |
62 | hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); |
63 | hdr->opcode = cpu_to_le16(opcode); |
64 | hdr->plen = plen; |
65 | |
66 | if (plen) |
67 | skb_put_data(skb, data: param, len: plen); |
68 | |
69 | bt_dev_dbg(hdev, "skb len %d", skb->len); |
70 | |
71 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
72 | hci_skb_opcode(skb) = opcode; |
73 | |
74 | /* Grab a reference if command needs to be associated with a sock (e.g. |
75 | * likely mgmt socket that initiated the command). |
76 | */ |
77 | if (sk) { |
78 | hci_skb_sk(skb) = sk; |
79 | sock_hold(sk); |
80 | } |
81 | |
82 | return skb; |
83 | } |
84 | |
85 | static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, |
86 | const void *param, u8 event, struct sock *sk) |
87 | { |
88 | struct hci_dev *hdev = req->hdev; |
89 | struct sk_buff *skb; |
90 | |
91 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); |
92 | |
93 | /* If an error occurred during request building, there is no point in |
94 | * queueing the HCI command. We can simply return. |
95 | */ |
96 | if (req->err) |
97 | return; |
98 | |
99 | skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); |
100 | if (!skb) { |
101 | bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", |
102 | opcode); |
103 | req->err = -ENOMEM; |
104 | return; |
105 | } |
106 | |
107 | if (skb_queue_empty(list: &req->cmd_q)) |
108 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
109 | |
110 | hci_skb_event(skb) = event; |
111 | |
112 | skb_queue_tail(list: &req->cmd_q, newsk: skb); |
113 | } |
114 | |
115 | static int hci_req_sync_run(struct hci_request *req) |
116 | { |
117 | struct hci_dev *hdev = req->hdev; |
118 | struct sk_buff *skb; |
119 | unsigned long flags; |
120 | |
121 | bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); |
122 | |
123 | /* If an error occurred during request building, remove all HCI |
124 | * commands queued on the HCI request queue. |
125 | */ |
126 | if (req->err) { |
127 | skb_queue_purge(list: &req->cmd_q); |
128 | return req->err; |
129 | } |
130 | |
131 | /* Do not allow empty requests */ |
132 | if (skb_queue_empty(list: &req->cmd_q)) |
133 | return -ENODATA; |
134 | |
135 | skb = skb_peek_tail(list_: &req->cmd_q); |
136 | bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; |
137 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; |
138 | |
139 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
140 | skb_queue_splice_tail(list: &req->cmd_q, head: &hdev->cmd_q); |
141 | spin_unlock_irqrestore(lock: &hdev->cmd_q.lock, flags); |
142 | |
143 | queue_work(wq: hdev->workqueue, work: &hdev->cmd_work); |
144 | |
145 | return 0; |
146 | } |
147 | |
148 | static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) |
149 | { |
150 | skb_queue_head_init(list: &req->cmd_q); |
151 | req->hdev = hdev; |
152 | req->err = 0; |
153 | } |
154 | |
155 | /* This function requires the caller holds hdev->req_lock. */ |
156 | struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, |
157 | const void *param, u8 event, u32 timeout, |
158 | struct sock *sk) |
159 | { |
160 | struct hci_request req; |
161 | struct sk_buff *skb; |
162 | int err = 0; |
163 | |
164 | bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); |
165 | |
166 | hci_request_init(req: &req, hdev); |
167 | |
168 | hci_cmd_sync_add(req: &req, opcode, plen, param, event, sk); |
169 | |
170 | hdev->req_status = HCI_REQ_PEND; |
171 | |
172 | err = hci_req_sync_run(req: &req); |
173 | if (err < 0) |
174 | return ERR_PTR(error: err); |
175 | |
176 | err = wait_event_interruptible_timeout(hdev->req_wait_q, |
177 | hdev->req_status != HCI_REQ_PEND, |
178 | timeout); |
179 | |
180 | if (err == -ERESTARTSYS) |
181 | return ERR_PTR(error: -EINTR); |
182 | |
183 | switch (hdev->req_status) { |
184 | case HCI_REQ_DONE: |
185 | err = -bt_to_errno(code: hdev->req_result); |
186 | break; |
187 | |
188 | case HCI_REQ_CANCELED: |
189 | err = -hdev->req_result; |
190 | break; |
191 | |
192 | default: |
193 | err = -ETIMEDOUT; |
194 | break; |
195 | } |
196 | |
197 | hdev->req_status = 0; |
198 | hdev->req_result = 0; |
199 | skb = hdev->req_rsp; |
200 | hdev->req_rsp = NULL; |
201 | |
202 | bt_dev_dbg(hdev, "end: err %d", err); |
203 | |
204 | if (err < 0) { |
205 | kfree_skb(skb); |
206 | return ERR_PTR(error: err); |
207 | } |
208 | |
209 | /* If command return a status event skb will be set to NULL as there are |
210 | * no parameters. |
211 | */ |
212 | if (!skb) |
213 | return ERR_PTR(error: -ENODATA); |
214 | |
215 | return skb; |
216 | } |
217 | EXPORT_SYMBOL(__hci_cmd_sync_sk); |
218 | |
219 | /* This function requires the caller holds hdev->req_lock. */ |
220 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, |
221 | const void *param, u32 timeout) |
222 | { |
223 | return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); |
224 | } |
225 | EXPORT_SYMBOL(__hci_cmd_sync); |
226 | |
227 | /* Send HCI command and wait for command complete event */ |
228 | struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, |
229 | const void *param, u32 timeout) |
230 | { |
231 | struct sk_buff *skb; |
232 | |
233 | if (!test_bit(HCI_UP, &hdev->flags)) |
234 | return ERR_PTR(error: -ENETDOWN); |
235 | |
236 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); |
237 | |
238 | hci_req_sync_lock(hdev); |
239 | skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); |
240 | hci_req_sync_unlock(hdev); |
241 | |
242 | return skb; |
243 | } |
244 | EXPORT_SYMBOL(hci_cmd_sync); |
245 | |
246 | /* This function requires the caller holds hdev->req_lock. */ |
247 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, |
248 | const void *param, u8 event, u32 timeout) |
249 | { |
250 | return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, |
251 | NULL); |
252 | } |
253 | EXPORT_SYMBOL(__hci_cmd_sync_ev); |
254 | |
255 | /* This function requires the caller holds hdev->req_lock. */ |
256 | int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, |
257 | const void *param, u8 event, u32 timeout, |
258 | struct sock *sk) |
259 | { |
260 | struct sk_buff *skb; |
261 | u8 status; |
262 | |
263 | skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); |
264 | |
265 | /* If command return a status event, skb will be set to -ENODATA */ |
266 | if (skb == ERR_PTR(error: -ENODATA)) |
267 | return 0; |
268 | |
269 | if (IS_ERR(ptr: skb)) { |
270 | if (!event) |
271 | bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, |
272 | PTR_ERR(skb)); |
273 | return PTR_ERR(ptr: skb); |
274 | } |
275 | |
276 | status = skb->data[0]; |
277 | |
278 | kfree_skb(skb); |
279 | |
280 | return status; |
281 | } |
282 | EXPORT_SYMBOL(__hci_cmd_sync_status_sk); |
283 | |
284 | int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, |
285 | const void *param, u32 timeout) |
286 | { |
287 | return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, |
288 | NULL); |
289 | } |
290 | EXPORT_SYMBOL(__hci_cmd_sync_status); |
291 | |
292 | int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, |
293 | const void *param, u32 timeout) |
294 | { |
295 | int err; |
296 | |
297 | hci_req_sync_lock(hdev); |
298 | err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout); |
299 | hci_req_sync_unlock(hdev); |
300 | |
301 | return err; |
302 | } |
303 | EXPORT_SYMBOL(hci_cmd_sync_status); |
304 | |
305 | static void hci_cmd_sync_work(struct work_struct *work) |
306 | { |
307 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); |
308 | |
309 | bt_dev_dbg(hdev, ""); |
310 | |
311 | /* Dequeue all entries and run them */ |
312 | while (1) { |
313 | struct hci_cmd_sync_work_entry *entry; |
314 | |
315 | mutex_lock(&hdev->cmd_sync_work_lock); |
316 | entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, |
317 | struct hci_cmd_sync_work_entry, |
318 | list); |
319 | if (entry) |
320 | list_del(entry: &entry->list); |
321 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
322 | |
323 | if (!entry) |
324 | break; |
325 | |
326 | bt_dev_dbg(hdev, "entry %p", entry); |
327 | |
328 | if (entry->func) { |
329 | int err; |
330 | |
331 | hci_req_sync_lock(hdev); |
332 | err = entry->func(hdev, entry->data); |
333 | if (entry->destroy) |
334 | entry->destroy(hdev, entry->data, err); |
335 | hci_req_sync_unlock(hdev); |
336 | } |
337 | |
338 | kfree(objp: entry); |
339 | } |
340 | } |
341 | |
342 | static void hci_cmd_sync_cancel_work(struct work_struct *work) |
343 | { |
344 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); |
345 | |
346 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
347 | cancel_delayed_work_sync(dwork: &hdev->ncmd_timer); |
348 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
349 | |
350 | wake_up_interruptible(&hdev->req_wait_q); |
351 | } |
352 | |
353 | static int hci_scan_disable_sync(struct hci_dev *hdev); |
354 | static int scan_disable_sync(struct hci_dev *hdev, void *data) |
355 | { |
356 | return hci_scan_disable_sync(hdev); |
357 | } |
358 | |
359 | static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) |
360 | { |
361 | return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, num_rsp: 0); |
362 | } |
363 | |
364 | static void le_scan_disable(struct work_struct *work) |
365 | { |
366 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
367 | le_scan_disable.work); |
368 | int status; |
369 | |
370 | bt_dev_dbg(hdev, ""); |
371 | hci_dev_lock(hdev); |
372 | |
373 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
374 | goto _return; |
375 | |
376 | status = hci_cmd_sync_queue(hdev, func: scan_disable_sync, NULL, NULL); |
377 | if (status) { |
378 | bt_dev_err(hdev, "failed to disable LE scan: %d", status); |
379 | goto _return; |
380 | } |
381 | |
382 | /* If we were running LE only scan, change discovery state. If |
383 | * we were running both LE and BR/EDR inquiry simultaneously, |
384 | * and BR/EDR inquiry is already finished, stop discovery, |
385 | * otherwise BR/EDR inquiry will stop discovery when finished. |
386 | * If we will resolve remote device name, do not change |
387 | * discovery state. |
388 | */ |
389 | |
390 | if (hdev->discovery.type == DISCOV_TYPE_LE) |
391 | goto discov_stopped; |
392 | |
393 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) |
394 | goto _return; |
395 | |
396 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { |
397 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && |
398 | hdev->discovery.state != DISCOVERY_RESOLVING) |
399 | goto discov_stopped; |
400 | |
401 | goto _return; |
402 | } |
403 | |
404 | status = hci_cmd_sync_queue(hdev, func: interleaved_inquiry_sync, NULL, NULL); |
405 | if (status) { |
406 | bt_dev_err(hdev, "inquiry failed: status %d", status); |
407 | goto discov_stopped; |
408 | } |
409 | |
410 | goto _return; |
411 | |
412 | discov_stopped: |
413 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
414 | |
415 | _return: |
416 | hci_dev_unlock(hdev); |
417 | } |
418 | |
419 | static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, |
420 | u8 filter_dup); |
421 | |
422 | static int reenable_adv_sync(struct hci_dev *hdev, void *data) |
423 | { |
424 | bt_dev_dbg(hdev, ""); |
425 | |
426 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
427 | list_empty(head: &hdev->adv_instances)) |
428 | return 0; |
429 | |
430 | if (hdev->cur_adv_instance) { |
431 | return hci_schedule_adv_instance_sync(hdev, |
432 | instance: hdev->cur_adv_instance, |
433 | force: true); |
434 | } else { |
435 | if (ext_adv_capable(hdev)) { |
436 | hci_start_ext_adv_sync(hdev, instance: 0x00); |
437 | } else { |
438 | hci_update_adv_data_sync(hdev, instance: 0x00); |
439 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
440 | hci_enable_advertising_sync(hdev); |
441 | } |
442 | } |
443 | |
444 | return 0; |
445 | } |
446 | |
447 | static void reenable_adv(struct work_struct *work) |
448 | { |
449 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
450 | reenable_adv_work); |
451 | int status; |
452 | |
453 | bt_dev_dbg(hdev, ""); |
454 | |
455 | hci_dev_lock(hdev); |
456 | |
457 | status = hci_cmd_sync_queue(hdev, func: reenable_adv_sync, NULL, NULL); |
458 | if (status) |
459 | bt_dev_err(hdev, "failed to reenable ADV: %d", status); |
460 | |
461 | hci_dev_unlock(hdev); |
462 | } |
463 | |
464 | static void cancel_adv_timeout(struct hci_dev *hdev) |
465 | { |
466 | if (hdev->adv_instance_timeout) { |
467 | hdev->adv_instance_timeout = 0; |
468 | cancel_delayed_work(dwork: &hdev->adv_instance_expire); |
469 | } |
470 | } |
471 | |
472 | /* For a single instance: |
473 | * - force == true: The instance will be removed even when its remaining |
474 | * lifetime is not zero. |
475 | * - force == false: the instance will be deactivated but kept stored unless |
476 | * the remaining lifetime is zero. |
477 | * |
478 | * For instance == 0x00: |
479 | * - force == true: All instances will be removed regardless of their timeout |
480 | * setting. |
481 | * - force == false: Only instances that have a timeout will be removed. |
482 | */ |
483 | int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, |
484 | u8 instance, bool force) |
485 | { |
486 | struct adv_info *adv_instance, *n, *next_instance = NULL; |
487 | int err; |
488 | u8 rem_inst; |
489 | |
490 | /* Cancel any timeout concerning the removed instance(s). */ |
491 | if (!instance || hdev->cur_adv_instance == instance) |
492 | cancel_adv_timeout(hdev); |
493 | |
494 | /* Get the next instance to advertise BEFORE we remove |
495 | * the current one. This can be the same instance again |
496 | * if there is only one instance. |
497 | */ |
498 | if (instance && hdev->cur_adv_instance == instance) |
499 | next_instance = hci_get_next_instance(hdev, instance); |
500 | |
501 | if (instance == 0x00) { |
502 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, |
503 | list) { |
504 | if (!(force || adv_instance->timeout)) |
505 | continue; |
506 | |
507 | rem_inst = adv_instance->instance; |
508 | err = hci_remove_adv_instance(hdev, instance: rem_inst); |
509 | if (!err) |
510 | mgmt_advertising_removed(sk, hdev, instance: rem_inst); |
511 | } |
512 | } else { |
513 | adv_instance = hci_find_adv_instance(hdev, instance); |
514 | |
515 | if (force || (adv_instance && adv_instance->timeout && |
516 | !adv_instance->remaining_time)) { |
517 | /* Don't advertise a removed instance. */ |
518 | if (next_instance && |
519 | next_instance->instance == instance) |
520 | next_instance = NULL; |
521 | |
522 | err = hci_remove_adv_instance(hdev, instance); |
523 | if (!err) |
524 | mgmt_advertising_removed(sk, hdev, instance); |
525 | } |
526 | } |
527 | |
528 | if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
529 | return 0; |
530 | |
531 | if (next_instance && !ext_adv_capable(hdev)) |
532 | return hci_schedule_adv_instance_sync(hdev, |
533 | instance: next_instance->instance, |
534 | force: false); |
535 | |
536 | return 0; |
537 | } |
538 | |
539 | static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) |
540 | { |
541 | u8 instance = *(u8 *)data; |
542 | |
543 | kfree(objp: data); |
544 | |
545 | hci_clear_adv_instance_sync(hdev, NULL, instance, force: false); |
546 | |
547 | if (list_empty(head: &hdev->adv_instances)) |
548 | return hci_disable_advertising_sync(hdev); |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | static void adv_timeout_expire(struct work_struct *work) |
554 | { |
555 | u8 *inst_ptr; |
556 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
557 | adv_instance_expire.work); |
558 | |
559 | bt_dev_dbg(hdev, ""); |
560 | |
561 | hci_dev_lock(hdev); |
562 | |
563 | hdev->adv_instance_timeout = 0; |
564 | |
565 | if (hdev->cur_adv_instance == 0x00) |
566 | goto unlock; |
567 | |
568 | inst_ptr = kmalloc(1, GFP_KERNEL); |
569 | if (!inst_ptr) |
570 | goto unlock; |
571 | |
572 | *inst_ptr = hdev->cur_adv_instance; |
573 | hci_cmd_sync_queue(hdev, func: adv_timeout_expire_sync, data: inst_ptr, NULL); |
574 | |
575 | unlock: |
576 | hci_dev_unlock(hdev); |
577 | } |
578 | |
579 | static bool is_interleave_scanning(struct hci_dev *hdev) |
580 | { |
581 | return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; |
582 | } |
583 | |
584 | static int hci_passive_scan_sync(struct hci_dev *hdev); |
585 | |
586 | static void interleave_scan_work(struct work_struct *work) |
587 | { |
588 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
589 | interleave_scan.work); |
590 | unsigned long timeout; |
591 | |
592 | if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { |
593 | timeout = msecs_to_jiffies(m: hdev->advmon_allowlist_duration); |
594 | } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { |
595 | timeout = msecs_to_jiffies(m: hdev->advmon_no_filter_duration); |
596 | } else { |
597 | bt_dev_err(hdev, "unexpected error"); |
598 | return; |
599 | } |
600 | |
601 | hci_passive_scan_sync(hdev); |
602 | |
603 | hci_dev_lock(hdev); |
604 | |
605 | switch (hdev->interleave_scan_state) { |
606 | case INTERLEAVE_SCAN_ALLOWLIST: |
607 | bt_dev_dbg(hdev, "next state: allowlist"); |
608 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; |
609 | break; |
610 | case INTERLEAVE_SCAN_NO_FILTER: |
611 | bt_dev_dbg(hdev, "next state: no filter"); |
612 | hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; |
613 | break; |
614 | case INTERLEAVE_SCAN_NONE: |
615 | bt_dev_err(hdev, "unexpected error"); |
616 | } |
617 | |
618 | hci_dev_unlock(hdev); |
619 | |
620 | /* Don't continue interleaving if it was canceled */ |
621 | if (is_interleave_scanning(hdev)) |
622 | queue_delayed_work(wq: hdev->req_workqueue, |
623 | dwork: &hdev->interleave_scan, delay: timeout); |
624 | } |
625 | |
626 | void hci_cmd_sync_init(struct hci_dev *hdev) |
627 | { |
628 | INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); |
629 | INIT_LIST_HEAD(list: &hdev->cmd_sync_work_list); |
630 | mutex_init(&hdev->cmd_sync_work_lock); |
631 | mutex_init(&hdev->unregister_lock); |
632 | |
633 | INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); |
634 | INIT_WORK(&hdev->reenable_adv_work, reenable_adv); |
635 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); |
636 | INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); |
637 | INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); |
638 | } |
639 | |
640 | static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, |
641 | struct hci_cmd_sync_work_entry *entry, |
642 | int err) |
643 | { |
644 | if (entry->destroy) |
645 | entry->destroy(hdev, entry->data, err); |
646 | |
647 | list_del(entry: &entry->list); |
648 | kfree(objp: entry); |
649 | } |
650 | |
651 | void hci_cmd_sync_clear(struct hci_dev *hdev) |
652 | { |
653 | struct hci_cmd_sync_work_entry *entry, *tmp; |
654 | |
655 | cancel_work_sync(work: &hdev->cmd_sync_work); |
656 | cancel_work_sync(work: &hdev->reenable_adv_work); |
657 | |
658 | mutex_lock(&hdev->cmd_sync_work_lock); |
659 | list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) |
660 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
661 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
662 | } |
663 | |
664 | void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) |
665 | { |
666 | bt_dev_dbg(hdev, "err 0x%2.2x", err); |
667 | |
668 | if (hdev->req_status == HCI_REQ_PEND) { |
669 | hdev->req_result = err; |
670 | hdev->req_status = HCI_REQ_CANCELED; |
671 | |
672 | queue_work(wq: hdev->workqueue, work: &hdev->cmd_sync_cancel_work); |
673 | } |
674 | } |
675 | EXPORT_SYMBOL(hci_cmd_sync_cancel); |
676 | |
677 | /* Cancel ongoing command request synchronously: |
678 | * |
679 | * - Set result and mark status to HCI_REQ_CANCELED |
680 | * - Wakeup command sync thread |
681 | */ |
682 | void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) |
683 | { |
684 | bt_dev_dbg(hdev, "err 0x%2.2x", err); |
685 | |
686 | if (hdev->req_status == HCI_REQ_PEND) { |
687 | /* req_result is __u32 so error must be positive to be properly |
688 | * propagated. |
689 | */ |
690 | hdev->req_result = err < 0 ? -err : err; |
691 | hdev->req_status = HCI_REQ_CANCELED; |
692 | |
693 | wake_up_interruptible(&hdev->req_wait_q); |
694 | } |
695 | } |
696 | EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); |
697 | |
698 | /* Submit HCI command to be run in as cmd_sync_work: |
699 | * |
700 | * - hdev must _not_ be unregistered |
701 | */ |
702 | int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
703 | void *data, hci_cmd_sync_work_destroy_t destroy) |
704 | { |
705 | struct hci_cmd_sync_work_entry *entry; |
706 | int err = 0; |
707 | |
708 | mutex_lock(&hdev->unregister_lock); |
709 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { |
710 | err = -ENODEV; |
711 | goto unlock; |
712 | } |
713 | |
714 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
715 | if (!entry) { |
716 | err = -ENOMEM; |
717 | goto unlock; |
718 | } |
719 | entry->func = func; |
720 | entry->data = data; |
721 | entry->destroy = destroy; |
722 | |
723 | mutex_lock(&hdev->cmd_sync_work_lock); |
724 | list_add_tail(new: &entry->list, head: &hdev->cmd_sync_work_list); |
725 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
726 | |
727 | queue_work(wq: hdev->req_workqueue, work: &hdev->cmd_sync_work); |
728 | |
729 | unlock: |
730 | mutex_unlock(lock: &hdev->unregister_lock); |
731 | return err; |
732 | } |
733 | EXPORT_SYMBOL(hci_cmd_sync_submit); |
734 | |
735 | /* Queue HCI command: |
736 | * |
737 | * - hdev must be running |
738 | */ |
739 | int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
740 | void *data, hci_cmd_sync_work_destroy_t destroy) |
741 | { |
742 | /* Only queue command if hdev is running which means it had been opened |
743 | * and is either on init phase or is already up. |
744 | */ |
745 | if (!test_bit(HCI_RUNNING, &hdev->flags)) |
746 | return -ENETDOWN; |
747 | |
748 | return hci_cmd_sync_submit(hdev, func, data, destroy); |
749 | } |
750 | EXPORT_SYMBOL(hci_cmd_sync_queue); |
751 | |
752 | static struct hci_cmd_sync_work_entry * |
753 | _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
754 | void *data, hci_cmd_sync_work_destroy_t destroy) |
755 | { |
756 | struct hci_cmd_sync_work_entry *entry, *tmp; |
757 | |
758 | list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { |
759 | if (func && entry->func != func) |
760 | continue; |
761 | |
762 | if (data && entry->data != data) |
763 | continue; |
764 | |
765 | if (destroy && entry->destroy != destroy) |
766 | continue; |
767 | |
768 | return entry; |
769 | } |
770 | |
771 | return NULL; |
772 | } |
773 | |
774 | /* Queue HCI command entry once: |
775 | * |
776 | * - Lookup if an entry already exist and only if it doesn't creates a new entry |
777 | * and queue it. |
778 | */ |
779 | int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
780 | void *data, hci_cmd_sync_work_destroy_t destroy) |
781 | { |
782 | if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) |
783 | return 0; |
784 | |
785 | return hci_cmd_sync_queue(hdev, func, data, destroy); |
786 | } |
787 | EXPORT_SYMBOL(hci_cmd_sync_queue_once); |
788 | |
789 | /* Run HCI command: |
790 | * |
791 | * - hdev must be running |
792 | * - if on cmd_sync_work then run immediately otherwise queue |
793 | */ |
794 | int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
795 | void *data, hci_cmd_sync_work_destroy_t destroy) |
796 | { |
797 | /* Only queue command if hdev is running which means it had been opened |
798 | * and is either on init phase or is already up. |
799 | */ |
800 | if (!test_bit(HCI_RUNNING, &hdev->flags)) |
801 | return -ENETDOWN; |
802 | |
803 | /* If on cmd_sync_work then run immediately otherwise queue */ |
804 | if (current_work() == &hdev->cmd_sync_work) |
805 | return func(hdev, data); |
806 | |
807 | return hci_cmd_sync_submit(hdev, func, data, destroy); |
808 | } |
809 | EXPORT_SYMBOL(hci_cmd_sync_run); |
810 | |
811 | /* Run HCI command entry once: |
812 | * |
813 | * - Lookup if an entry already exist and only if it doesn't creates a new entry |
814 | * and run it. |
815 | * - if on cmd_sync_work then run immediately otherwise queue |
816 | */ |
817 | int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
818 | void *data, hci_cmd_sync_work_destroy_t destroy) |
819 | { |
820 | if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) |
821 | return 0; |
822 | |
823 | return hci_cmd_sync_run(hdev, func, data, destroy); |
824 | } |
825 | EXPORT_SYMBOL(hci_cmd_sync_run_once); |
826 | |
827 | /* Lookup HCI command entry: |
828 | * |
829 | * - Return first entry that matches by function callback or data or |
830 | * destroy callback. |
831 | */ |
832 | struct hci_cmd_sync_work_entry * |
833 | hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
834 | void *data, hci_cmd_sync_work_destroy_t destroy) |
835 | { |
836 | struct hci_cmd_sync_work_entry *entry; |
837 | |
838 | mutex_lock(&hdev->cmd_sync_work_lock); |
839 | entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); |
840 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
841 | |
842 | return entry; |
843 | } |
844 | EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); |
845 | |
846 | /* Cancel HCI command entry */ |
847 | void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, |
848 | struct hci_cmd_sync_work_entry *entry) |
849 | { |
850 | mutex_lock(&hdev->cmd_sync_work_lock); |
851 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
852 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
853 | } |
854 | EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); |
855 | |
856 | /* Dequeue one HCI command entry: |
857 | * |
858 | * - Lookup and cancel first entry that matches. |
859 | */ |
860 | bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, |
861 | hci_cmd_sync_work_func_t func, |
862 | void *data, hci_cmd_sync_work_destroy_t destroy) |
863 | { |
864 | struct hci_cmd_sync_work_entry *entry; |
865 | |
866 | entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); |
867 | if (!entry) |
868 | return false; |
869 | |
870 | hci_cmd_sync_cancel_entry(hdev, entry); |
871 | |
872 | return true; |
873 | } |
874 | EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); |
875 | |
876 | /* Dequeue HCI command entry: |
877 | * |
878 | * - Lookup and cancel any entry that matches by function callback or data or |
879 | * destroy callback. |
880 | */ |
881 | bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
882 | void *data, hci_cmd_sync_work_destroy_t destroy) |
883 | { |
884 | struct hci_cmd_sync_work_entry *entry; |
885 | bool ret = false; |
886 | |
887 | mutex_lock(&hdev->cmd_sync_work_lock); |
888 | while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, |
889 | destroy))) { |
890 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
891 | ret = true; |
892 | } |
893 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
894 | |
895 | return ret; |
896 | } |
897 | EXPORT_SYMBOL(hci_cmd_sync_dequeue); |
898 | |
899 | int hci_update_eir_sync(struct hci_dev *hdev) |
900 | { |
901 | struct hci_cp_write_eir cp; |
902 | |
903 | bt_dev_dbg(hdev, ""); |
904 | |
905 | if (!hdev_is_powered(hdev)) |
906 | return 0; |
907 | |
908 | if (!lmp_ext_inq_capable(hdev)) |
909 | return 0; |
910 | |
911 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
912 | return 0; |
913 | |
914 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) |
915 | return 0; |
916 | |
917 | memset(&cp, 0, sizeof(cp)); |
918 | |
919 | eir_create(hdev, data: cp.data); |
920 | |
921 | if (memcmp(p: cp.data, q: hdev->eir, size: sizeof(cp.data)) == 0) |
922 | return 0; |
923 | |
924 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); |
925 | |
926 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, |
927 | HCI_CMD_TIMEOUT); |
928 | } |
929 | |
930 | static u8 get_service_classes(struct hci_dev *hdev) |
931 | { |
932 | struct bt_uuid *uuid; |
933 | u8 val = 0; |
934 | |
935 | list_for_each_entry(uuid, &hdev->uuids, list) |
936 | val |= uuid->svc_hint; |
937 | |
938 | return val; |
939 | } |
940 | |
941 | int hci_update_class_sync(struct hci_dev *hdev) |
942 | { |
943 | u8 cod[3]; |
944 | |
945 | bt_dev_dbg(hdev, ""); |
946 | |
947 | if (!hdev_is_powered(hdev)) |
948 | return 0; |
949 | |
950 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
951 | return 0; |
952 | |
953 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) |
954 | return 0; |
955 | |
956 | cod[0] = hdev->minor_class; |
957 | cod[1] = hdev->major_class; |
958 | cod[2] = get_service_classes(hdev); |
959 | |
960 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) |
961 | cod[1] |= 0x20; |
962 | |
963 | if (memcmp(p: cod, q: hdev->dev_class, size: 3) == 0) |
964 | return 0; |
965 | |
966 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, |
967 | sizeof(cod), cod, HCI_CMD_TIMEOUT); |
968 | } |
969 | |
970 | static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) |
971 | { |
972 | /* If there is no connection we are OK to advertise. */ |
973 | if (hci_conn_num(hdev, LE_LINK) == 0) |
974 | return true; |
975 | |
976 | /* Check le_states if there is any connection in peripheral role. */ |
977 | if (hdev->conn_hash.le_num_peripheral > 0) { |
978 | /* Peripheral connection state and non connectable mode |
979 | * bit 20. |
980 | */ |
981 | if (!connectable && !(hdev->le_states[2] & 0x10)) |
982 | return false; |
983 | |
984 | /* Peripheral connection state and connectable mode bit 38 |
985 | * and scannable bit 21. |
986 | */ |
987 | if (connectable && (!(hdev->le_states[4] & 0x40) || |
988 | !(hdev->le_states[2] & 0x20))) |
989 | return false; |
990 | } |
991 | |
992 | /* Check le_states if there is any connection in central role. */ |
993 | if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { |
994 | /* Central connection state and non connectable mode bit 18. */ |
995 | if (!connectable && !(hdev->le_states[2] & 0x02)) |
996 | return false; |
997 | |
998 | /* Central connection state and connectable mode bit 35 and |
999 | * scannable 19. |
1000 | */ |
1001 | if (connectable && (!(hdev->le_states[4] & 0x08) || |
1002 | !(hdev->le_states[2] & 0x08))) |
1003 | return false; |
1004 | } |
1005 | |
1006 | return true; |
1007 | } |
1008 | |
1009 | static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) |
1010 | { |
1011 | /* If privacy is not enabled don't use RPA */ |
1012 | if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) |
1013 | return false; |
1014 | |
1015 | /* If basic privacy mode is enabled use RPA */ |
1016 | if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) |
1017 | return true; |
1018 | |
1019 | /* If limited privacy mode is enabled don't use RPA if we're |
1020 | * both discoverable and bondable. |
1021 | */ |
1022 | if ((flags & MGMT_ADV_FLAG_DISCOV) && |
1023 | hci_dev_test_flag(hdev, HCI_BONDABLE)) |
1024 | return false; |
1025 | |
1026 | /* We're neither bondable nor discoverable in the limited |
1027 | * privacy mode, therefore use RPA. |
1028 | */ |
1029 | return true; |
1030 | } |
1031 | |
1032 | static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) |
1033 | { |
1034 | /* If a random_addr has been set we're advertising or initiating an LE |
1035 | * connection we can't go ahead and change the random address at this |
1036 | * time. This is because the eventual initiator address used for the |
1037 | * subsequently created connection will be undefined (some |
1038 | * controllers use the new address and others the one we had |
1039 | * when the operation started). |
1040 | * |
1041 | * In this kind of scenario skip the update and let the random |
1042 | * address be updated at the next cycle. |
1043 | */ |
1044 | if (bacmp(ba1: &hdev->random_addr, BDADDR_ANY) && |
1045 | (hci_dev_test_flag(hdev, HCI_LE_ADV) || |
1046 | hci_lookup_le_connect(hdev))) { |
1047 | bt_dev_dbg(hdev, "Deferring random address update"); |
1048 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
1049 | return 0; |
1050 | } |
1051 | |
1052 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, |
1053 | 6, rpa, HCI_CMD_TIMEOUT); |
1054 | } |
1055 | |
1056 | int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, |
1057 | bool rpa, u8 *own_addr_type) |
1058 | { |
1059 | int err; |
1060 | |
1061 | /* If privacy is enabled use a resolvable private address. If |
1062 | * current RPA has expired or there is something else than |
1063 | * the current RPA in use, then generate a new one. |
1064 | */ |
1065 | if (rpa) { |
1066 | /* If Controller supports LL Privacy use own address type is |
1067 | * 0x03 |
1068 | */ |
1069 | if (ll_privacy_capable(hdev)) |
1070 | *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; |
1071 | else |
1072 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1073 | |
1074 | /* Check if RPA is valid */ |
1075 | if (rpa_valid(hdev)) |
1076 | return 0; |
1077 | |
1078 | err = smp_generate_rpa(hdev, irk: hdev->irk, rpa: &hdev->rpa); |
1079 | if (err < 0) { |
1080 | bt_dev_err(hdev, "failed to generate new RPA"); |
1081 | return err; |
1082 | } |
1083 | |
1084 | err = hci_set_random_addr_sync(hdev, rpa: &hdev->rpa); |
1085 | if (err) |
1086 | return err; |
1087 | |
1088 | return 0; |
1089 | } |
1090 | |
1091 | /* In case of required privacy without resolvable private address, |
1092 | * use an non-resolvable private address. This is useful for active |
1093 | * scanning and non-connectable advertising. |
1094 | */ |
1095 | if (require_privacy) { |
1096 | bdaddr_t nrpa; |
1097 | |
1098 | while (true) { |
1099 | /* The non-resolvable private address is generated |
1100 | * from random six bytes with the two most significant |
1101 | * bits cleared. |
1102 | */ |
1103 | get_random_bytes(buf: &nrpa, len: 6); |
1104 | nrpa.b[5] &= 0x3f; |
1105 | |
1106 | /* The non-resolvable private address shall not be |
1107 | * equal to the public address. |
1108 | */ |
1109 | if (bacmp(ba1: &hdev->bdaddr, ba2: &nrpa)) |
1110 | break; |
1111 | } |
1112 | |
1113 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1114 | |
1115 | return hci_set_random_addr_sync(hdev, rpa: &nrpa); |
1116 | } |
1117 | |
1118 | /* If forcing static address is in use or there is no public |
1119 | * address use the static address as random address (but skip |
1120 | * the HCI command if the current random address is already the |
1121 | * static one. |
1122 | * |
1123 | * In case BR/EDR has been disabled on a dual-mode controller |
1124 | * and a static address has been configured, then use that |
1125 | * address instead of the public BR/EDR address. |
1126 | */ |
1127 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
1128 | !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) || |
1129 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && |
1130 | bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) { |
1131 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1132 | if (bacmp(ba1: &hdev->static_addr, ba2: &hdev->random_addr)) |
1133 | return hci_set_random_addr_sync(hdev, |
1134 | rpa: &hdev->static_addr); |
1135 | return 0; |
1136 | } |
1137 | |
1138 | /* Neither privacy nor static address is being used so use a |
1139 | * public address. |
1140 | */ |
1141 | *own_addr_type = ADDR_LE_DEV_PUBLIC; |
1142 | |
1143 | return 0; |
1144 | } |
1145 | |
1146 | static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) |
1147 | { |
1148 | struct hci_cp_le_set_ext_adv_enable *cp; |
1149 | struct hci_cp_ext_adv_set *set; |
1150 | u8 data[sizeof(*cp) + sizeof(*set) * 1]; |
1151 | u8 size; |
1152 | struct adv_info *adv = NULL; |
1153 | |
1154 | /* If request specifies an instance that doesn't exist, fail */ |
1155 | if (instance > 0) { |
1156 | adv = hci_find_adv_instance(hdev, instance); |
1157 | if (!adv) |
1158 | return -EINVAL; |
1159 | |
1160 | /* If not enabled there is nothing to do */ |
1161 | if (!adv->enabled) |
1162 | return 0; |
1163 | } |
1164 | |
1165 | memset(data, 0, sizeof(data)); |
1166 | |
1167 | cp = (void *)data; |
1168 | set = (void *)cp->data; |
1169 | |
1170 | /* Instance 0x00 indicates all advertising instances will be disabled */ |
1171 | cp->num_of_sets = !!instance; |
1172 | cp->enable = 0x00; |
1173 | |
1174 | set->handle = adv ? adv->handle : instance; |
1175 | |
1176 | size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; |
1177 | |
1178 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, |
1179 | size, data, HCI_CMD_TIMEOUT); |
1180 | } |
1181 | |
1182 | static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, |
1183 | bdaddr_t *random_addr) |
1184 | { |
1185 | struct hci_cp_le_set_adv_set_rand_addr cp; |
1186 | int err; |
1187 | |
1188 | if (!instance) { |
1189 | /* Instance 0x00 doesn't have an adv_info, instead it uses |
1190 | * hdev->random_addr to track its address so whenever it needs |
1191 | * to be updated this also set the random address since |
1192 | * hdev->random_addr is shared with scan state machine. |
1193 | */ |
1194 | err = hci_set_random_addr_sync(hdev, rpa: random_addr); |
1195 | if (err) |
1196 | return err; |
1197 | } |
1198 | |
1199 | memset(&cp, 0, sizeof(cp)); |
1200 | |
1201 | cp.handle = instance; |
1202 | bacpy(dst: &cp.bdaddr, src: random_addr); |
1203 | |
1204 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, |
1205 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1206 | } |
1207 | |
1208 | int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) |
1209 | { |
1210 | struct hci_cp_le_set_ext_adv_params cp; |
1211 | bool connectable; |
1212 | u32 flags; |
1213 | bdaddr_t random_addr; |
1214 | u8 own_addr_type; |
1215 | int err; |
1216 | struct adv_info *adv; |
1217 | bool secondary_adv; |
1218 | |
1219 | if (instance > 0) { |
1220 | adv = hci_find_adv_instance(hdev, instance); |
1221 | if (!adv) |
1222 | return -EINVAL; |
1223 | } else { |
1224 | adv = NULL; |
1225 | } |
1226 | |
1227 | /* Updating parameters of an active instance will return a |
1228 | * Command Disallowed error, so we must first disable the |
1229 | * instance if it is active. |
1230 | */ |
1231 | if (adv && !adv->pending) { |
1232 | err = hci_disable_ext_adv_instance_sync(hdev, instance); |
1233 | if (err) |
1234 | return err; |
1235 | } |
1236 | |
1237 | flags = hci_adv_instance_flags(hdev, instance); |
1238 | |
1239 | /* If the "connectable" instance flag was not set, then choose between |
1240 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. |
1241 | */ |
1242 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || |
1243 | mgmt_get_connectable(hdev); |
1244 | |
1245 | if (!is_advertising_allowed(hdev, connectable)) |
1246 | return -EPERM; |
1247 | |
1248 | /* Set require_privacy to true only when non-connectable |
1249 | * advertising is used. In that case it is fine to use a |
1250 | * non-resolvable private address. |
1251 | */ |
1252 | err = hci_get_random_address(hdev, require_privacy: !connectable, |
1253 | use_rpa: adv_use_rpa(hdev, flags), adv_instance: adv, |
1254 | own_addr_type: &own_addr_type, rand_addr: &random_addr); |
1255 | if (err < 0) |
1256 | return err; |
1257 | |
1258 | memset(&cp, 0, sizeof(cp)); |
1259 | |
1260 | if (adv) { |
1261 | hci_cpu_to_le24(val: adv->min_interval, dst: cp.min_interval); |
1262 | hci_cpu_to_le24(val: adv->max_interval, dst: cp.max_interval); |
1263 | cp.tx_power = adv->tx_power; |
1264 | } else { |
1265 | hci_cpu_to_le24(val: hdev->le_adv_min_interval, dst: cp.min_interval); |
1266 | hci_cpu_to_le24(val: hdev->le_adv_max_interval, dst: cp.max_interval); |
1267 | cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; |
1268 | } |
1269 | |
1270 | secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); |
1271 | |
1272 | if (connectable) { |
1273 | if (secondary_adv) |
1274 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); |
1275 | else |
1276 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); |
1277 | } else if (hci_adv_instance_is_scannable(hdev, instance) || |
1278 | (flags & MGMT_ADV_PARAM_SCAN_RSP)) { |
1279 | if (secondary_adv) |
1280 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); |
1281 | else |
1282 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); |
1283 | } else { |
1284 | if (secondary_adv) |
1285 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); |
1286 | else |
1287 | cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); |
1288 | } |
1289 | |
1290 | /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter |
1291 | * contains the peer’s Identity Address and the Peer_Address_Type |
1292 | * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). |
1293 | * These parameters are used to locate the corresponding local IRK in |
1294 | * the resolving list; this IRK is used to generate their own address |
1295 | * used in the advertisement. |
1296 | */ |
1297 | if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) |
1298 | hci_copy_identity_address(hdev, bdaddr: &cp.peer_addr, |
1299 | bdaddr_type: &cp.peer_addr_type); |
1300 | |
1301 | cp.own_addr_type = own_addr_type; |
1302 | cp.channel_map = hdev->le_adv_channel_map; |
1303 | cp.handle = adv ? adv->handle : instance; |
1304 | |
1305 | if (flags & MGMT_ADV_FLAG_SEC_2M) { |
1306 | cp.primary_phy = HCI_ADV_PHY_1M; |
1307 | cp.secondary_phy = HCI_ADV_PHY_2M; |
1308 | } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { |
1309 | cp.primary_phy = HCI_ADV_PHY_CODED; |
1310 | cp.secondary_phy = HCI_ADV_PHY_CODED; |
1311 | } else { |
1312 | /* In all other cases use 1M */ |
1313 | cp.primary_phy = HCI_ADV_PHY_1M; |
1314 | cp.secondary_phy = HCI_ADV_PHY_1M; |
1315 | } |
1316 | |
1317 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, |
1318 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1319 | if (err) |
1320 | return err; |
1321 | |
1322 | if ((own_addr_type == ADDR_LE_DEV_RANDOM || |
1323 | own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && |
1324 | bacmp(ba1: &random_addr, BDADDR_ANY)) { |
1325 | /* Check if random address need to be updated */ |
1326 | if (adv) { |
1327 | if (!bacmp(ba1: &random_addr, ba2: &adv->random_addr)) |
1328 | return 0; |
1329 | } else { |
1330 | if (!bacmp(ba1: &random_addr, ba2: &hdev->random_addr)) |
1331 | return 0; |
1332 | } |
1333 | |
1334 | return hci_set_adv_set_random_addr_sync(hdev, instance, |
1335 | random_addr: &random_addr); |
1336 | } |
1337 | |
1338 | return 0; |
1339 | } |
1340 | |
1341 | static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1342 | { |
1343 | DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length, |
1344 | HCI_MAX_EXT_AD_LENGTH); |
1345 | u8 len; |
1346 | struct adv_info *adv = NULL; |
1347 | int err; |
1348 | |
1349 | if (instance) { |
1350 | adv = hci_find_adv_instance(hdev, instance); |
1351 | if (!adv || !adv->scan_rsp_changed) |
1352 | return 0; |
1353 | } |
1354 | |
1355 | len = eir_create_scan_rsp(hdev, instance, ptr: pdu->data); |
1356 | |
1357 | pdu->handle = adv ? adv->handle : instance; |
1358 | pdu->length = len; |
1359 | pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1360 | pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; |
1361 | |
1362 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, |
1363 | struct_size(pdu, data, len), pdu, |
1364 | HCI_CMD_TIMEOUT); |
1365 | if (err) |
1366 | return err; |
1367 | |
1368 | if (adv) { |
1369 | adv->scan_rsp_changed = false; |
1370 | } else { |
1371 | memcpy(hdev->scan_rsp_data, pdu->data, len); |
1372 | hdev->scan_rsp_data_len = len; |
1373 | } |
1374 | |
1375 | return 0; |
1376 | } |
1377 | |
1378 | static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1379 | { |
1380 | struct hci_cp_le_set_scan_rsp_data cp; |
1381 | u8 len; |
1382 | |
1383 | memset(&cp, 0, sizeof(cp)); |
1384 | |
1385 | len = eir_create_scan_rsp(hdev, instance, ptr: cp.data); |
1386 | |
1387 | if (hdev->scan_rsp_data_len == len && |
1388 | !memcmp(p: cp.data, q: hdev->scan_rsp_data, size: len)) |
1389 | return 0; |
1390 | |
1391 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); |
1392 | hdev->scan_rsp_data_len = len; |
1393 | |
1394 | cp.length = len; |
1395 | |
1396 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, |
1397 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1398 | } |
1399 | |
1400 | int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1401 | { |
1402 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
1403 | return 0; |
1404 | |
1405 | if (ext_adv_capable(hdev)) |
1406 | return hci_set_ext_scan_rsp_data_sync(hdev, instance); |
1407 | |
1408 | return __hci_set_scan_rsp_data_sync(hdev, instance); |
1409 | } |
1410 | |
1411 | int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) |
1412 | { |
1413 | struct hci_cp_le_set_ext_adv_enable *cp; |
1414 | struct hci_cp_ext_adv_set *set; |
1415 | u8 data[sizeof(*cp) + sizeof(*set) * 1]; |
1416 | struct adv_info *adv; |
1417 | |
1418 | if (instance > 0) { |
1419 | adv = hci_find_adv_instance(hdev, instance); |
1420 | if (!adv) |
1421 | return -EINVAL; |
1422 | /* If already enabled there is nothing to do */ |
1423 | if (adv->enabled) |
1424 | return 0; |
1425 | } else { |
1426 | adv = NULL; |
1427 | } |
1428 | |
1429 | cp = (void *)data; |
1430 | set = (void *)cp->data; |
1431 | |
1432 | memset(cp, 0, sizeof(*cp)); |
1433 | |
1434 | cp->enable = 0x01; |
1435 | cp->num_of_sets = 0x01; |
1436 | |
1437 | memset(set, 0, sizeof(*set)); |
1438 | |
1439 | set->handle = adv ? adv->handle : instance; |
1440 | |
1441 | /* Set duration per instance since controller is responsible for |
1442 | * scheduling it. |
1443 | */ |
1444 | if (adv && adv->timeout) { |
1445 | u16 duration = adv->timeout * MSEC_PER_SEC; |
1446 | |
1447 | /* Time = N * 10 ms */ |
1448 | set->duration = cpu_to_le16(duration / 10); |
1449 | } |
1450 | |
1451 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, |
1452 | sizeof(*cp) + |
1453 | sizeof(*set) * cp->num_of_sets, |
1454 | data, HCI_CMD_TIMEOUT); |
1455 | } |
1456 | |
1457 | int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) |
1458 | { |
1459 | int err; |
1460 | |
1461 | err = hci_setup_ext_adv_instance_sync(hdev, instance); |
1462 | if (err) |
1463 | return err; |
1464 | |
1465 | err = hci_set_ext_scan_rsp_data_sync(hdev, instance); |
1466 | if (err) |
1467 | return err; |
1468 | |
1469 | return hci_enable_ext_advertising_sync(hdev, instance); |
1470 | } |
1471 | |
1472 | int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) |
1473 | { |
1474 | struct hci_cp_le_set_per_adv_enable cp; |
1475 | struct adv_info *adv = NULL; |
1476 | |
1477 | /* If periodic advertising already disabled there is nothing to do. */ |
1478 | adv = hci_find_adv_instance(hdev, instance); |
1479 | if (!adv || !adv->periodic || !adv->enabled) |
1480 | return 0; |
1481 | |
1482 | memset(&cp, 0, sizeof(cp)); |
1483 | |
1484 | cp.enable = 0x00; |
1485 | cp.handle = instance; |
1486 | |
1487 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, |
1488 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1489 | } |
1490 | |
1491 | static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, |
1492 | u16 min_interval, u16 max_interval) |
1493 | { |
1494 | struct hci_cp_le_set_per_adv_params cp; |
1495 | |
1496 | memset(&cp, 0, sizeof(cp)); |
1497 | |
1498 | if (!min_interval) |
1499 | min_interval = DISCOV_LE_PER_ADV_INT_MIN; |
1500 | |
1501 | if (!max_interval) |
1502 | max_interval = DISCOV_LE_PER_ADV_INT_MAX; |
1503 | |
1504 | cp.handle = instance; |
1505 | cp.min_interval = cpu_to_le16(min_interval); |
1506 | cp.max_interval = cpu_to_le16(max_interval); |
1507 | cp.periodic_properties = 0x0000; |
1508 | |
1509 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, |
1510 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1511 | } |
1512 | |
1513 | static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1514 | { |
1515 | DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length, |
1516 | HCI_MAX_PER_AD_LENGTH); |
1517 | u8 len; |
1518 | struct adv_info *adv = NULL; |
1519 | |
1520 | if (instance) { |
1521 | adv = hci_find_adv_instance(hdev, instance); |
1522 | if (!adv || !adv->periodic) |
1523 | return 0; |
1524 | } |
1525 | |
1526 | len = eir_create_per_adv_data(hdev, instance, ptr: pdu->data); |
1527 | |
1528 | pdu->length = len; |
1529 | pdu->handle = adv ? adv->handle : instance; |
1530 | pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1531 | |
1532 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, |
1533 | struct_size(pdu, data, len), pdu, |
1534 | HCI_CMD_TIMEOUT); |
1535 | } |
1536 | |
1537 | static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) |
1538 | { |
1539 | struct hci_cp_le_set_per_adv_enable cp; |
1540 | struct adv_info *adv = NULL; |
1541 | |
1542 | /* If periodic advertising already enabled there is nothing to do. */ |
1543 | adv = hci_find_adv_instance(hdev, instance); |
1544 | if (adv && adv->periodic && adv->enabled) |
1545 | return 0; |
1546 | |
1547 | memset(&cp, 0, sizeof(cp)); |
1548 | |
1549 | cp.enable = 0x01; |
1550 | cp.handle = instance; |
1551 | |
1552 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, |
1553 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1554 | } |
1555 | |
1556 | /* Checks if periodic advertising data contains a Basic Announcement and if it |
1557 | * does generates a Broadcast ID and add Broadcast Announcement. |
1558 | */ |
1559 | static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) |
1560 | { |
1561 | u8 bid[3]; |
1562 | u8 ad[4 + 3]; |
1563 | |
1564 | /* Skip if NULL adv as instance 0x00 is used for general purpose |
1565 | * advertising so it cannot used for the likes of Broadcast Announcement |
1566 | * as it can be overwritten at any point. |
1567 | */ |
1568 | if (!adv) |
1569 | return 0; |
1570 | |
1571 | /* Check if PA data doesn't contains a Basic Audio Announcement then |
1572 | * there is nothing to do. |
1573 | */ |
1574 | if (!eir_get_service_data(eir: adv->per_adv_data, eir_len: adv->per_adv_data_len, |
1575 | uuid: 0x1851, NULL)) |
1576 | return 0; |
1577 | |
1578 | /* Check if advertising data already has a Broadcast Announcement since |
1579 | * the process may want to control the Broadcast ID directly and in that |
1580 | * case the kernel shall no interfere. |
1581 | */ |
1582 | if (eir_get_service_data(eir: adv->adv_data, eir_len: adv->adv_data_len, uuid: 0x1852, |
1583 | NULL)) |
1584 | return 0; |
1585 | |
1586 | /* Generate Broadcast ID */ |
1587 | get_random_bytes(buf: bid, len: sizeof(bid)); |
1588 | eir_append_service_data(eir: ad, eir_len: 0, uuid: 0x1852, data: bid, data_len: sizeof(bid)); |
1589 | hci_set_adv_instance_data(hdev, instance: adv->instance, adv_data_len: sizeof(ad), adv_data: ad, scan_rsp_len: 0, NULL); |
1590 | |
1591 | return hci_update_adv_data_sync(hdev, instance: adv->instance); |
1592 | } |
1593 | |
1594 | int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, |
1595 | u8 *data, u32 flags, u16 min_interval, |
1596 | u16 max_interval, u16 sync_interval) |
1597 | { |
1598 | struct adv_info *adv = NULL; |
1599 | int err; |
1600 | bool added = false; |
1601 | |
1602 | hci_disable_per_advertising_sync(hdev, instance); |
1603 | |
1604 | if (instance) { |
1605 | adv = hci_find_adv_instance(hdev, instance); |
1606 | /* Create an instance if that could not be found */ |
1607 | if (!adv) { |
1608 | adv = hci_add_per_instance(hdev, instance, flags, |
1609 | data_len, data, |
1610 | min_interval: sync_interval, |
1611 | max_interval: sync_interval); |
1612 | if (IS_ERR(ptr: adv)) |
1613 | return PTR_ERR(ptr: adv); |
1614 | adv->pending = false; |
1615 | added = true; |
1616 | } |
1617 | } |
1618 | |
1619 | /* Start advertising */ |
1620 | err = hci_start_ext_adv_sync(hdev, instance); |
1621 | if (err < 0) |
1622 | goto fail; |
1623 | |
1624 | err = hci_adv_bcast_annoucement(hdev, adv); |
1625 | if (err < 0) |
1626 | goto fail; |
1627 | |
1628 | err = hci_set_per_adv_params_sync(hdev, instance, min_interval, |
1629 | max_interval); |
1630 | if (err < 0) |
1631 | goto fail; |
1632 | |
1633 | err = hci_set_per_adv_data_sync(hdev, instance); |
1634 | if (err < 0) |
1635 | goto fail; |
1636 | |
1637 | err = hci_enable_per_advertising_sync(hdev, instance); |
1638 | if (err < 0) |
1639 | goto fail; |
1640 | |
1641 | return 0; |
1642 | |
1643 | fail: |
1644 | if (added) |
1645 | hci_remove_adv_instance(hdev, instance); |
1646 | |
1647 | return err; |
1648 | } |
1649 | |
1650 | static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) |
1651 | { |
1652 | int err; |
1653 | |
1654 | if (ext_adv_capable(hdev)) |
1655 | return hci_start_ext_adv_sync(hdev, instance); |
1656 | |
1657 | err = hci_update_adv_data_sync(hdev, instance); |
1658 | if (err) |
1659 | return err; |
1660 | |
1661 | err = hci_update_scan_rsp_data_sync(hdev, instance); |
1662 | if (err) |
1663 | return err; |
1664 | |
1665 | return hci_enable_advertising_sync(hdev); |
1666 | } |
1667 | |
1668 | int hci_enable_advertising_sync(struct hci_dev *hdev) |
1669 | { |
1670 | struct adv_info *adv_instance; |
1671 | struct hci_cp_le_set_adv_param cp; |
1672 | u8 own_addr_type, enable = 0x01; |
1673 | bool connectable; |
1674 | u16 adv_min_interval, adv_max_interval; |
1675 | u32 flags; |
1676 | u8 status; |
1677 | |
1678 | if (ext_adv_capable(hdev)) |
1679 | return hci_enable_ext_advertising_sync(hdev, |
1680 | instance: hdev->cur_adv_instance); |
1681 | |
1682 | flags = hci_adv_instance_flags(hdev, instance: hdev->cur_adv_instance); |
1683 | adv_instance = hci_find_adv_instance(hdev, instance: hdev->cur_adv_instance); |
1684 | |
1685 | /* If the "connectable" instance flag was not set, then choose between |
1686 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. |
1687 | */ |
1688 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || |
1689 | mgmt_get_connectable(hdev); |
1690 | |
1691 | if (!is_advertising_allowed(hdev, connectable)) |
1692 | return -EINVAL; |
1693 | |
1694 | status = hci_disable_advertising_sync(hdev); |
1695 | if (status) |
1696 | return status; |
1697 | |
1698 | /* Clear the HCI_LE_ADV bit temporarily so that the |
1699 | * hci_update_random_address knows that it's safe to go ahead |
1700 | * and write a new random address. The flag will be set back on |
1701 | * as soon as the SET_ADV_ENABLE HCI command completes. |
1702 | */ |
1703 | hci_dev_clear_flag(hdev, HCI_LE_ADV); |
1704 | |
1705 | /* Set require_privacy to true only when non-connectable |
1706 | * advertising is used. In that case it is fine to use a |
1707 | * non-resolvable private address. |
1708 | */ |
1709 | status = hci_update_random_address_sync(hdev, require_privacy: !connectable, |
1710 | rpa: adv_use_rpa(hdev, flags), |
1711 | own_addr_type: &own_addr_type); |
1712 | if (status) |
1713 | return status; |
1714 | |
1715 | memset(&cp, 0, sizeof(cp)); |
1716 | |
1717 | if (adv_instance) { |
1718 | adv_min_interval = adv_instance->min_interval; |
1719 | adv_max_interval = adv_instance->max_interval; |
1720 | } else { |
1721 | adv_min_interval = hdev->le_adv_min_interval; |
1722 | adv_max_interval = hdev->le_adv_max_interval; |
1723 | } |
1724 | |
1725 | if (connectable) { |
1726 | cp.type = LE_ADV_IND; |
1727 | } else { |
1728 | if (hci_adv_instance_is_scannable(hdev, instance: hdev->cur_adv_instance)) |
1729 | cp.type = LE_ADV_SCAN_IND; |
1730 | else |
1731 | cp.type = LE_ADV_NONCONN_IND; |
1732 | |
1733 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || |
1734 | hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { |
1735 | adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; |
1736 | adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; |
1737 | } |
1738 | } |
1739 | |
1740 | cp.min_interval = cpu_to_le16(adv_min_interval); |
1741 | cp.max_interval = cpu_to_le16(adv_max_interval); |
1742 | cp.own_address_type = own_addr_type; |
1743 | cp.channel_map = hdev->le_adv_channel_map; |
1744 | |
1745 | status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, |
1746 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1747 | if (status) |
1748 | return status; |
1749 | |
1750 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
1751 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
1752 | } |
1753 | |
1754 | static int enable_advertising_sync(struct hci_dev *hdev, void *data) |
1755 | { |
1756 | return hci_enable_advertising_sync(hdev); |
1757 | } |
1758 | |
1759 | int hci_enable_advertising(struct hci_dev *hdev) |
1760 | { |
1761 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
1762 | list_empty(head: &hdev->adv_instances)) |
1763 | return 0; |
1764 | |
1765 | return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); |
1766 | } |
1767 | |
1768 | int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, |
1769 | struct sock *sk) |
1770 | { |
1771 | int err; |
1772 | |
1773 | if (!ext_adv_capable(hdev)) |
1774 | return 0; |
1775 | |
1776 | err = hci_disable_ext_adv_instance_sync(hdev, instance); |
1777 | if (err) |
1778 | return err; |
1779 | |
1780 | /* If request specifies an instance that doesn't exist, fail */ |
1781 | if (instance > 0 && !hci_find_adv_instance(hdev, instance)) |
1782 | return -EINVAL; |
1783 | |
1784 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, |
1785 | sizeof(instance), &instance, 0, |
1786 | HCI_CMD_TIMEOUT, sk); |
1787 | } |
1788 | |
1789 | int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) |
1790 | { |
1791 | struct hci_cp_le_term_big cp; |
1792 | |
1793 | memset(&cp, 0, sizeof(cp)); |
1794 | cp.handle = handle; |
1795 | cp.reason = reason; |
1796 | |
1797 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, |
1798 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1799 | } |
1800 | |
1801 | static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1802 | { |
1803 | DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length, |
1804 | HCI_MAX_EXT_AD_LENGTH); |
1805 | u8 len; |
1806 | struct adv_info *adv = NULL; |
1807 | int err; |
1808 | |
1809 | if (instance) { |
1810 | adv = hci_find_adv_instance(hdev, instance); |
1811 | if (!adv || !adv->adv_data_changed) |
1812 | return 0; |
1813 | } |
1814 | |
1815 | len = eir_create_adv_data(hdev, instance, ptr: pdu->data); |
1816 | |
1817 | pdu->length = len; |
1818 | pdu->handle = adv ? adv->handle : instance; |
1819 | pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1820 | pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; |
1821 | |
1822 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, |
1823 | struct_size(pdu, data, len), pdu, |
1824 | HCI_CMD_TIMEOUT); |
1825 | if (err) |
1826 | return err; |
1827 | |
1828 | /* Update data if the command succeed */ |
1829 | if (adv) { |
1830 | adv->adv_data_changed = false; |
1831 | } else { |
1832 | memcpy(hdev->adv_data, pdu->data, len); |
1833 | hdev->adv_data_len = len; |
1834 | } |
1835 | |
1836 | return 0; |
1837 | } |
1838 | |
1839 | static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1840 | { |
1841 | struct hci_cp_le_set_adv_data cp; |
1842 | u8 len; |
1843 | |
1844 | memset(&cp, 0, sizeof(cp)); |
1845 | |
1846 | len = eir_create_adv_data(hdev, instance, ptr: cp.data); |
1847 | |
1848 | /* There's nothing to do if the data hasn't changed */ |
1849 | if (hdev->adv_data_len == len && |
1850 | memcmp(p: cp.data, q: hdev->adv_data, size: len) == 0) |
1851 | return 0; |
1852 | |
1853 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); |
1854 | hdev->adv_data_len = len; |
1855 | |
1856 | cp.length = len; |
1857 | |
1858 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, |
1859 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1860 | } |
1861 | |
1862 | int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1863 | { |
1864 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
1865 | return 0; |
1866 | |
1867 | if (ext_adv_capable(hdev)) |
1868 | return hci_set_ext_adv_data_sync(hdev, instance); |
1869 | |
1870 | return hci_set_adv_data_sync(hdev, instance); |
1871 | } |
1872 | |
1873 | int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, |
1874 | bool force) |
1875 | { |
1876 | struct adv_info *adv = NULL; |
1877 | u16 timeout; |
1878 | |
1879 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) |
1880 | return -EPERM; |
1881 | |
1882 | if (hdev->adv_instance_timeout) |
1883 | return -EBUSY; |
1884 | |
1885 | adv = hci_find_adv_instance(hdev, instance); |
1886 | if (!adv) |
1887 | return -ENOENT; |
1888 | |
1889 | /* A zero timeout means unlimited advertising. As long as there is |
1890 | * only one instance, duration should be ignored. We still set a timeout |
1891 | * in case further instances are being added later on. |
1892 | * |
1893 | * If the remaining lifetime of the instance is more than the duration |
1894 | * then the timeout corresponds to the duration, otherwise it will be |
1895 | * reduced to the remaining instance lifetime. |
1896 | */ |
1897 | if (adv->timeout == 0 || adv->duration <= adv->remaining_time) |
1898 | timeout = adv->duration; |
1899 | else |
1900 | timeout = adv->remaining_time; |
1901 | |
1902 | /* The remaining time is being reduced unless the instance is being |
1903 | * advertised without time limit. |
1904 | */ |
1905 | if (adv->timeout) |
1906 | adv->remaining_time = adv->remaining_time - timeout; |
1907 | |
1908 | /* Only use work for scheduling instances with legacy advertising */ |
1909 | if (!ext_adv_capable(hdev)) { |
1910 | hdev->adv_instance_timeout = timeout; |
1911 | queue_delayed_work(wq: hdev->req_workqueue, |
1912 | dwork: &hdev->adv_instance_expire, |
1913 | secs_to_jiffies(timeout)); |
1914 | } |
1915 | |
1916 | /* If we're just re-scheduling the same instance again then do not |
1917 | * execute any HCI commands. This happens when a single instance is |
1918 | * being advertised. |
1919 | */ |
1920 | if (!force && hdev->cur_adv_instance == instance && |
1921 | hci_dev_test_flag(hdev, HCI_LE_ADV)) |
1922 | return 0; |
1923 | |
1924 | hdev->cur_adv_instance = instance; |
1925 | |
1926 | return hci_start_adv_sync(hdev, instance); |
1927 | } |
1928 | |
1929 | static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) |
1930 | { |
1931 | int err; |
1932 | |
1933 | if (!ext_adv_capable(hdev)) |
1934 | return 0; |
1935 | |
1936 | /* Disable instance 0x00 to disable all instances */ |
1937 | err = hci_disable_ext_adv_instance_sync(hdev, instance: 0x00); |
1938 | if (err) |
1939 | return err; |
1940 | |
1941 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, |
1942 | 0, NULL, 0, HCI_CMD_TIMEOUT, sk); |
1943 | } |
1944 | |
1945 | static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) |
1946 | { |
1947 | struct adv_info *adv, *n; |
1948 | int err = 0; |
1949 | |
1950 | if (ext_adv_capable(hdev)) |
1951 | /* Remove all existing sets */ |
1952 | err = hci_clear_adv_sets_sync(hdev, sk); |
1953 | if (ext_adv_capable(hdev)) |
1954 | return err; |
1955 | |
1956 | /* This is safe as long as there is no command send while the lock is |
1957 | * held. |
1958 | */ |
1959 | hci_dev_lock(hdev); |
1960 | |
1961 | /* Cleanup non-ext instances */ |
1962 | list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { |
1963 | u8 instance = adv->instance; |
1964 | int err; |
1965 | |
1966 | if (!(force || adv->timeout)) |
1967 | continue; |
1968 | |
1969 | err = hci_remove_adv_instance(hdev, instance); |
1970 | if (!err) |
1971 | mgmt_advertising_removed(sk, hdev, instance); |
1972 | } |
1973 | |
1974 | hci_dev_unlock(hdev); |
1975 | |
1976 | return 0; |
1977 | } |
1978 | |
1979 | static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, |
1980 | struct sock *sk) |
1981 | { |
1982 | int err = 0; |
1983 | |
1984 | /* If we use extended advertising, instance has to be removed first. */ |
1985 | if (ext_adv_capable(hdev)) |
1986 | err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); |
1987 | if (ext_adv_capable(hdev)) |
1988 | return err; |
1989 | |
1990 | /* This is safe as long as there is no command send while the lock is |
1991 | * held. |
1992 | */ |
1993 | hci_dev_lock(hdev); |
1994 | |
1995 | err = hci_remove_adv_instance(hdev, instance); |
1996 | if (!err) |
1997 | mgmt_advertising_removed(sk, hdev, instance); |
1998 | |
1999 | hci_dev_unlock(hdev); |
2000 | |
2001 | return err; |
2002 | } |
2003 | |
2004 | /* For a single instance: |
2005 | * - force == true: The instance will be removed even when its remaining |
2006 | * lifetime is not zero. |
2007 | * - force == false: the instance will be deactivated but kept stored unless |
2008 | * the remaining lifetime is zero. |
2009 | * |
2010 | * For instance == 0x00: |
2011 | * - force == true: All instances will be removed regardless of their timeout |
2012 | * setting. |
2013 | * - force == false: Only instances that have a timeout will be removed. |
2014 | */ |
2015 | int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, |
2016 | u8 instance, bool force) |
2017 | { |
2018 | struct adv_info *next = NULL; |
2019 | int err; |
2020 | |
2021 | /* Cancel any timeout concerning the removed instance(s). */ |
2022 | if (!instance || hdev->cur_adv_instance == instance) |
2023 | cancel_adv_timeout(hdev); |
2024 | |
2025 | /* Get the next instance to advertise BEFORE we remove |
2026 | * the current one. This can be the same instance again |
2027 | * if there is only one instance. |
2028 | */ |
2029 | if (hdev->cur_adv_instance == instance) |
2030 | next = hci_get_next_instance(hdev, instance); |
2031 | |
2032 | if (!instance) { |
2033 | err = hci_clear_adv_sync(hdev, sk, force); |
2034 | if (err) |
2035 | return err; |
2036 | } else { |
2037 | struct adv_info *adv = hci_find_adv_instance(hdev, instance); |
2038 | |
2039 | if (force || (adv && adv->timeout && !adv->remaining_time)) { |
2040 | /* Don't advertise a removed instance. */ |
2041 | if (next && next->instance == instance) |
2042 | next = NULL; |
2043 | |
2044 | err = hci_remove_adv_sync(hdev, instance, sk); |
2045 | if (err) |
2046 | return err; |
2047 | } |
2048 | } |
2049 | |
2050 | if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
2051 | return 0; |
2052 | |
2053 | if (next && !ext_adv_capable(hdev)) |
2054 | hci_schedule_adv_instance_sync(hdev, instance: next->instance, force: false); |
2055 | |
2056 | return 0; |
2057 | } |
2058 | |
2059 | int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) |
2060 | { |
2061 | struct hci_cp_read_rssi cp; |
2062 | |
2063 | cp.handle = handle; |
2064 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, |
2065 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2066 | } |
2067 | |
2068 | int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) |
2069 | { |
2070 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, |
2071 | sizeof(*cp), cp, HCI_CMD_TIMEOUT); |
2072 | } |
2073 | |
2074 | int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) |
2075 | { |
2076 | struct hci_cp_read_tx_power cp; |
2077 | |
2078 | cp.handle = handle; |
2079 | cp.type = type; |
2080 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, |
2081 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2082 | } |
2083 | |
2084 | int hci_disable_advertising_sync(struct hci_dev *hdev) |
2085 | { |
2086 | u8 enable = 0x00; |
2087 | int err = 0; |
2088 | |
2089 | /* If controller is not advertising we are done. */ |
2090 | if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) |
2091 | return 0; |
2092 | |
2093 | if (ext_adv_capable(hdev)) |
2094 | err = hci_disable_ext_adv_instance_sync(hdev, instance: 0x00); |
2095 | if (ext_adv_capable(hdev)) |
2096 | return err; |
2097 | |
2098 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
2099 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
2100 | } |
2101 | |
2102 | static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, |
2103 | u8 filter_dup) |
2104 | { |
2105 | struct hci_cp_le_set_ext_scan_enable cp; |
2106 | |
2107 | memset(&cp, 0, sizeof(cp)); |
2108 | cp.enable = val; |
2109 | |
2110 | if (hci_dev_test_flag(hdev, HCI_MESH)) |
2111 | cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
2112 | else |
2113 | cp.filter_dup = filter_dup; |
2114 | |
2115 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, |
2116 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2117 | } |
2118 | |
2119 | static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, |
2120 | u8 filter_dup) |
2121 | { |
2122 | struct hci_cp_le_set_scan_enable cp; |
2123 | |
2124 | if (use_ext_scan(hdev)) |
2125 | return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); |
2126 | |
2127 | memset(&cp, 0, sizeof(cp)); |
2128 | cp.enable = val; |
2129 | |
2130 | if (val && hci_dev_test_flag(hdev, HCI_MESH)) |
2131 | cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
2132 | else |
2133 | cp.filter_dup = filter_dup; |
2134 | |
2135 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, |
2136 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2137 | } |
2138 | |
2139 | static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) |
2140 | { |
2141 | if (!ll_privacy_capable(hdev)) |
2142 | return 0; |
2143 | |
2144 | /* If controller is not/already resolving we are done. */ |
2145 | if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) |
2146 | return 0; |
2147 | |
2148 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, |
2149 | sizeof(val), &val, HCI_CMD_TIMEOUT); |
2150 | } |
2151 | |
2152 | static int hci_scan_disable_sync(struct hci_dev *hdev) |
2153 | { |
2154 | int err; |
2155 | |
2156 | /* If controller is not scanning we are done. */ |
2157 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
2158 | return 0; |
2159 | |
2160 | if (hdev->scanning_paused) { |
2161 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); |
2162 | return 0; |
2163 | } |
2164 | |
2165 | err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, filter_dup: 0x00); |
2166 | if (err) { |
2167 | bt_dev_err(hdev, "Unable to disable scanning: %d", err); |
2168 | return err; |
2169 | } |
2170 | |
2171 | return err; |
2172 | } |
2173 | |
2174 | static bool scan_use_rpa(struct hci_dev *hdev) |
2175 | { |
2176 | return hci_dev_test_flag(hdev, HCI_PRIVACY); |
2177 | } |
2178 | |
2179 | static void hci_start_interleave_scan(struct hci_dev *hdev) |
2180 | { |
2181 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; |
2182 | queue_delayed_work(wq: hdev->req_workqueue, |
2183 | dwork: &hdev->interleave_scan, delay: 0); |
2184 | } |
2185 | |
2186 | static void cancel_interleave_scan(struct hci_dev *hdev) |
2187 | { |
2188 | bt_dev_dbg(hdev, "cancelling interleave scan"); |
2189 | |
2190 | cancel_delayed_work_sync(dwork: &hdev->interleave_scan); |
2191 | |
2192 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; |
2193 | } |
2194 | |
2195 | /* Return true if interleave_scan wasn't started until exiting this function, |
2196 | * otherwise, return false |
2197 | */ |
2198 | static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) |
2199 | { |
2200 | /* Do interleaved scan only if all of the following are true: |
2201 | * - There is at least one ADV monitor |
2202 | * - At least one pending LE connection or one device to be scanned for |
2203 | * - Monitor offloading is not supported |
2204 | * If so, we should alternate between allowlist scan and one without |
2205 | * any filters to save power. |
2206 | */ |
2207 | bool use_interleaving = hci_is_adv_monitoring(hdev) && |
2208 | !(list_empty(head: &hdev->pend_le_conns) && |
2209 | list_empty(head: &hdev->pend_le_reports)) && |
2210 | hci_get_adv_monitor_offload_ext(hdev) == |
2211 | HCI_ADV_MONITOR_EXT_NONE; |
2212 | bool is_interleaving = is_interleave_scanning(hdev); |
2213 | |
2214 | if (use_interleaving && !is_interleaving) { |
2215 | hci_start_interleave_scan(hdev); |
2216 | bt_dev_dbg(hdev, "starting interleave scan"); |
2217 | return true; |
2218 | } |
2219 | |
2220 | if (!use_interleaving && is_interleaving) |
2221 | cancel_interleave_scan(hdev); |
2222 | |
2223 | return false; |
2224 | } |
2225 | |
2226 | /* Removes connection to resolve list if needed.*/ |
2227 | static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, |
2228 | bdaddr_t *bdaddr, u8 bdaddr_type) |
2229 | { |
2230 | struct hci_cp_le_del_from_resolv_list cp; |
2231 | struct bdaddr_list_with_irk *entry; |
2232 | |
2233 | if (!ll_privacy_capable(hdev)) |
2234 | return 0; |
2235 | |
2236 | /* Check if the IRK has been programmed */ |
2237 | entry = hci_bdaddr_list_lookup_with_irk(list: &hdev->le_resolv_list, bdaddr, |
2238 | type: bdaddr_type); |
2239 | if (!entry) |
2240 | return 0; |
2241 | |
2242 | cp.bdaddr_type = bdaddr_type; |
2243 | bacpy(dst: &cp.bdaddr, src: bdaddr); |
2244 | |
2245 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, |
2246 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2247 | } |
2248 | |
2249 | static int hci_le_del_accept_list_sync(struct hci_dev *hdev, |
2250 | bdaddr_t *bdaddr, u8 bdaddr_type) |
2251 | { |
2252 | struct hci_cp_le_del_from_accept_list cp; |
2253 | int err; |
2254 | |
2255 | /* Check if device is on accept list before removing it */ |
2256 | if (!hci_bdaddr_list_lookup(list: &hdev->le_accept_list, bdaddr, type: bdaddr_type)) |
2257 | return 0; |
2258 | |
2259 | cp.bdaddr_type = bdaddr_type; |
2260 | bacpy(dst: &cp.bdaddr, src: bdaddr); |
2261 | |
2262 | /* Ignore errors when removing from resolving list as that is likely |
2263 | * that the device was never added. |
2264 | */ |
2265 | hci_le_del_resolve_list_sync(hdev, bdaddr: &cp.bdaddr, bdaddr_type: cp.bdaddr_type); |
2266 | |
2267 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, |
2268 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2269 | if (err) { |
2270 | bt_dev_err(hdev, "Unable to remove from allow list: %d", err); |
2271 | return err; |
2272 | } |
2273 | |
2274 | bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, |
2275 | cp.bdaddr_type); |
2276 | |
2277 | return 0; |
2278 | } |
2279 | |
2280 | struct conn_params { |
2281 | bdaddr_t addr; |
2282 | u8 addr_type; |
2283 | hci_conn_flags_t flags; |
2284 | u8 privacy_mode; |
2285 | }; |
2286 | |
2287 | /* Adds connection to resolve list if needed. |
2288 | * Setting params to NULL programs local hdev->irk |
2289 | */ |
2290 | static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, |
2291 | struct conn_params *params) |
2292 | { |
2293 | struct hci_cp_le_add_to_resolv_list cp; |
2294 | struct smp_irk *irk; |
2295 | struct bdaddr_list_with_irk *entry; |
2296 | struct hci_conn_params *p; |
2297 | |
2298 | if (!ll_privacy_capable(hdev)) |
2299 | return 0; |
2300 | |
2301 | /* Attempt to program local identity address, type and irk if params is |
2302 | * NULL. |
2303 | */ |
2304 | if (!params) { |
2305 | if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) |
2306 | return 0; |
2307 | |
2308 | hci_copy_identity_address(hdev, bdaddr: &cp.bdaddr, bdaddr_type: &cp.bdaddr_type); |
2309 | memcpy(cp.peer_irk, hdev->irk, 16); |
2310 | goto done; |
2311 | } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) |
2312 | return 0; |
2313 | |
2314 | irk = hci_find_irk_by_addr(hdev, bdaddr: ¶ms->addr, addr_type: params->addr_type); |
2315 | if (!irk) |
2316 | return 0; |
2317 | |
2318 | /* Check if the IK has _not_ been programmed yet. */ |
2319 | entry = hci_bdaddr_list_lookup_with_irk(list: &hdev->le_resolv_list, |
2320 | bdaddr: ¶ms->addr, |
2321 | type: params->addr_type); |
2322 | if (entry) |
2323 | return 0; |
2324 | |
2325 | cp.bdaddr_type = params->addr_type; |
2326 | bacpy(dst: &cp.bdaddr, src: ¶ms->addr); |
2327 | memcpy(cp.peer_irk, irk->val, 16); |
2328 | |
2329 | /* Default privacy mode is always Network */ |
2330 | params->privacy_mode = HCI_NETWORK_PRIVACY; |
2331 | |
2332 | rcu_read_lock(); |
2333 | p = hci_pend_le_action_lookup(list: &hdev->pend_le_conns, |
2334 | addr: ¶ms->addr, addr_type: params->addr_type); |
2335 | if (!p) |
2336 | p = hci_pend_le_action_lookup(list: &hdev->pend_le_reports, |
2337 | addr: ¶ms->addr, addr_type: params->addr_type); |
2338 | if (p) |
2339 | WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); |
2340 | rcu_read_unlock(); |
2341 | |
2342 | done: |
2343 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) |
2344 | memcpy(cp.local_irk, hdev->irk, 16); |
2345 | else |
2346 | memset(cp.local_irk, 0, 16); |
2347 | |
2348 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, |
2349 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2350 | } |
2351 | |
2352 | /* Set Device Privacy Mode. */ |
2353 | static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, |
2354 | struct conn_params *params) |
2355 | { |
2356 | struct hci_cp_le_set_privacy_mode cp; |
2357 | struct smp_irk *irk; |
2358 | |
2359 | if (!ll_privacy_capable(hdev) || |
2360 | !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) |
2361 | return 0; |
2362 | |
2363 | /* If device privacy mode has already been set there is nothing to do */ |
2364 | if (params->privacy_mode == HCI_DEVICE_PRIVACY) |
2365 | return 0; |
2366 | |
2367 | /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also |
2368 | * indicates that LL Privacy has been enabled and |
2369 | * HCI_OP_LE_SET_PRIVACY_MODE is supported. |
2370 | */ |
2371 | if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) |
2372 | return 0; |
2373 | |
2374 | irk = hci_find_irk_by_addr(hdev, bdaddr: ¶ms->addr, addr_type: params->addr_type); |
2375 | if (!irk) |
2376 | return 0; |
2377 | |
2378 | memset(&cp, 0, sizeof(cp)); |
2379 | cp.bdaddr_type = irk->addr_type; |
2380 | bacpy(dst: &cp.bdaddr, src: &irk->bdaddr); |
2381 | cp.mode = HCI_DEVICE_PRIVACY; |
2382 | |
2383 | /* Note: params->privacy_mode is not updated since it is a copy */ |
2384 | |
2385 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, |
2386 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2387 | } |
2388 | |
2389 | /* Adds connection to allow list if needed, if the device uses RPA (has IRK) |
2390 | * this attempts to program the device in the resolving list as well and |
2391 | * properly set the privacy mode. |
2392 | */ |
2393 | static int hci_le_add_accept_list_sync(struct hci_dev *hdev, |
2394 | struct conn_params *params, |
2395 | u8 *num_entries) |
2396 | { |
2397 | struct hci_cp_le_add_to_accept_list cp; |
2398 | int err; |
2399 | |
2400 | /* During suspend, only wakeable devices can be in acceptlist */ |
2401 | if (hdev->suspended && |
2402 | !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { |
2403 | hci_le_del_accept_list_sync(hdev, bdaddr: ¶ms->addr, |
2404 | bdaddr_type: params->addr_type); |
2405 | return 0; |
2406 | } |
2407 | |
2408 | /* Select filter policy to accept all advertising */ |
2409 | if (*num_entries >= hdev->le_accept_list_size) |
2410 | return -ENOSPC; |
2411 | |
2412 | /* Attempt to program the device in the resolving list first to avoid |
2413 | * having to rollback in case it fails since the resolving list is |
2414 | * dynamic it can probably be smaller than the accept list. |
2415 | */ |
2416 | err = hci_le_add_resolve_list_sync(hdev, params); |
2417 | if (err) { |
2418 | bt_dev_err(hdev, "Unable to add to resolve list: %d", err); |
2419 | return err; |
2420 | } |
2421 | |
2422 | /* Set Privacy Mode */ |
2423 | err = hci_le_set_privacy_mode_sync(hdev, params); |
2424 | if (err) { |
2425 | bt_dev_err(hdev, "Unable to set privacy mode: %d", err); |
2426 | return err; |
2427 | } |
2428 | |
2429 | /* Check if already in accept list */ |
2430 | if (hci_bdaddr_list_lookup(list: &hdev->le_accept_list, bdaddr: ¶ms->addr, |
2431 | type: params->addr_type)) |
2432 | return 0; |
2433 | |
2434 | *num_entries += 1; |
2435 | cp.bdaddr_type = params->addr_type; |
2436 | bacpy(dst: &cp.bdaddr, src: ¶ms->addr); |
2437 | |
2438 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, |
2439 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2440 | if (err) { |
2441 | bt_dev_err(hdev, "Unable to add to allow list: %d", err); |
2442 | /* Rollback the device from the resolving list */ |
2443 | hci_le_del_resolve_list_sync(hdev, bdaddr: &cp.bdaddr, bdaddr_type: cp.bdaddr_type); |
2444 | return err; |
2445 | } |
2446 | |
2447 | bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, |
2448 | cp.bdaddr_type); |
2449 | |
2450 | return 0; |
2451 | } |
2452 | |
2453 | /* This function disables/pause all advertising instances */ |
2454 | static int hci_pause_advertising_sync(struct hci_dev *hdev) |
2455 | { |
2456 | int err; |
2457 | int old_state; |
2458 | |
2459 | /* If already been paused there is nothing to do. */ |
2460 | if (hdev->advertising_paused) |
2461 | return 0; |
2462 | |
2463 | bt_dev_dbg(hdev, "Pausing directed advertising"); |
2464 | |
2465 | /* Stop directed advertising */ |
2466 | old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); |
2467 | if (old_state) { |
2468 | /* When discoverable timeout triggers, then just make sure |
2469 | * the limited discoverable flag is cleared. Even in the case |
2470 | * of a timeout triggered from general discoverable, it is |
2471 | * safe to unconditionally clear the flag. |
2472 | */ |
2473 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); |
2474 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); |
2475 | hdev->discov_timeout = 0; |
2476 | } |
2477 | |
2478 | bt_dev_dbg(hdev, "Pausing advertising instances"); |
2479 | |
2480 | /* Call to disable any advertisements active on the controller. |
2481 | * This will succeed even if no advertisements are configured. |
2482 | */ |
2483 | err = hci_disable_advertising_sync(hdev); |
2484 | if (err) |
2485 | return err; |
2486 | |
2487 | /* If we are using software rotation, pause the loop */ |
2488 | if (!ext_adv_capable(hdev)) |
2489 | cancel_adv_timeout(hdev); |
2490 | |
2491 | hdev->advertising_paused = true; |
2492 | hdev->advertising_old_state = old_state; |
2493 | |
2494 | return 0; |
2495 | } |
2496 | |
2497 | /* This function enables all user advertising instances */ |
2498 | static int hci_resume_advertising_sync(struct hci_dev *hdev) |
2499 | { |
2500 | struct adv_info *adv, *tmp; |
2501 | int err; |
2502 | |
2503 | /* If advertising has not been paused there is nothing to do. */ |
2504 | if (!hdev->advertising_paused) |
2505 | return 0; |
2506 | |
2507 | /* Resume directed advertising */ |
2508 | hdev->advertising_paused = false; |
2509 | if (hdev->advertising_old_state) { |
2510 | hci_dev_set_flag(hdev, HCI_ADVERTISING); |
2511 | hdev->advertising_old_state = 0; |
2512 | } |
2513 | |
2514 | bt_dev_dbg(hdev, "Resuming advertising instances"); |
2515 | |
2516 | if (ext_adv_capable(hdev)) { |
2517 | /* Call for each tracked instance to be re-enabled */ |
2518 | list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { |
2519 | err = hci_enable_ext_advertising_sync(hdev, |
2520 | instance: adv->instance); |
2521 | if (!err) |
2522 | continue; |
2523 | |
2524 | /* If the instance cannot be resumed remove it */ |
2525 | hci_remove_ext_adv_instance_sync(hdev, instance: adv->instance, |
2526 | NULL); |
2527 | } |
2528 | } else { |
2529 | /* Schedule for most recent instance to be restarted and begin |
2530 | * the software rotation loop |
2531 | */ |
2532 | err = hci_schedule_adv_instance_sync(hdev, |
2533 | instance: hdev->cur_adv_instance, |
2534 | force: true); |
2535 | } |
2536 | |
2537 | hdev->advertising_paused = false; |
2538 | |
2539 | return err; |
2540 | } |
2541 | |
2542 | static int hci_pause_addr_resolution(struct hci_dev *hdev) |
2543 | { |
2544 | int err; |
2545 | |
2546 | if (!ll_privacy_capable(hdev)) |
2547 | return 0; |
2548 | |
2549 | if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) |
2550 | return 0; |
2551 | |
2552 | /* Cannot disable addr resolution if scanning is enabled or |
2553 | * when initiating an LE connection. |
2554 | */ |
2555 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || |
2556 | hci_lookup_le_connect(hdev)) { |
2557 | bt_dev_err(hdev, "Command not allowed when scan/LE connect"); |
2558 | return -EPERM; |
2559 | } |
2560 | |
2561 | /* Cannot disable addr resolution if advertising is enabled. */ |
2562 | err = hci_pause_advertising_sync(hdev); |
2563 | if (err) { |
2564 | bt_dev_err(hdev, "Pause advertising failed: %d", err); |
2565 | return err; |
2566 | } |
2567 | |
2568 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x00); |
2569 | if (err) |
2570 | bt_dev_err(hdev, "Unable to disable Address Resolution: %d", |
2571 | err); |
2572 | |
2573 | /* Return if address resolution is disabled and RPA is not used. */ |
2574 | if (!err && scan_use_rpa(hdev)) |
2575 | return 0; |
2576 | |
2577 | hci_resume_advertising_sync(hdev); |
2578 | return err; |
2579 | } |
2580 | |
2581 | struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, |
2582 | bool extended, struct sock *sk) |
2583 | { |
2584 | u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : |
2585 | HCI_OP_READ_LOCAL_OOB_DATA; |
2586 | |
2587 | return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); |
2588 | } |
2589 | |
2590 | static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) |
2591 | { |
2592 | struct hci_conn_params *params; |
2593 | struct conn_params *p; |
2594 | size_t i; |
2595 | |
2596 | rcu_read_lock(); |
2597 | |
2598 | i = 0; |
2599 | list_for_each_entry_rcu(params, list, action) |
2600 | ++i; |
2601 | *n = i; |
2602 | |
2603 | rcu_read_unlock(); |
2604 | |
2605 | p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); |
2606 | if (!p) |
2607 | return NULL; |
2608 | |
2609 | rcu_read_lock(); |
2610 | |
2611 | i = 0; |
2612 | list_for_each_entry_rcu(params, list, action) { |
2613 | /* Racing adds are handled in next scan update */ |
2614 | if (i >= *n) |
2615 | break; |
2616 | |
2617 | /* No hdev->lock, but: addr, addr_type are immutable. |
2618 | * privacy_mode is only written by us or in |
2619 | * hci_cc_le_set_privacy_mode that we wait for. |
2620 | * We should be idempotent so MGMT updating flags |
2621 | * while we are processing is OK. |
2622 | */ |
2623 | bacpy(dst: &p[i].addr, src: ¶ms->addr); |
2624 | p[i].addr_type = params->addr_type; |
2625 | p[i].flags = READ_ONCE(params->flags); |
2626 | p[i].privacy_mode = READ_ONCE(params->privacy_mode); |
2627 | ++i; |
2628 | } |
2629 | |
2630 | rcu_read_unlock(); |
2631 | |
2632 | *n = i; |
2633 | return p; |
2634 | } |
2635 | |
2636 | /* Clear LE Accept List */ |
2637 | static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) |
2638 | { |
2639 | if (!(hdev->commands[26] & 0x80)) |
2640 | return 0; |
2641 | |
2642 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, |
2643 | HCI_CMD_TIMEOUT); |
2644 | } |
2645 | |
2646 | /* Device must not be scanning when updating the accept list. |
2647 | * |
2648 | * Update is done using the following sequence: |
2649 | * |
2650 | * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) -> |
2651 | * Remove Devices From Accept List -> |
2652 | * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))-> |
2653 | * Add Devices to Accept List -> |
2654 | * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) -> |
2655 | * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) -> |
2656 | * Enable Scanning |
2657 | * |
2658 | * In case of failure advertising shall be restored to its original state and |
2659 | * return would disable accept list since either accept or resolving list could |
2660 | * not be programmed. |
2661 | * |
2662 | */ |
2663 | static u8 hci_update_accept_list_sync(struct hci_dev *hdev) |
2664 | { |
2665 | struct conn_params *params; |
2666 | struct bdaddr_list *b, *t; |
2667 | u8 num_entries = 0; |
2668 | bool pend_conn, pend_report; |
2669 | u8 filter_policy; |
2670 | size_t i, n; |
2671 | int err; |
2672 | |
2673 | /* Pause advertising if resolving list can be used as controllers |
2674 | * cannot accept resolving list modifications while advertising. |
2675 | */ |
2676 | if (ll_privacy_capable(hdev)) { |
2677 | err = hci_pause_advertising_sync(hdev); |
2678 | if (err) { |
2679 | bt_dev_err(hdev, "pause advertising failed: %d", err); |
2680 | return 0x00; |
2681 | } |
2682 | } |
2683 | |
2684 | /* Disable address resolution while reprogramming accept list since |
2685 | * devices that do have an IRK will be programmed in the resolving list |
2686 | * when LL Privacy is enabled. |
2687 | */ |
2688 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x00); |
2689 | if (err) { |
2690 | bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); |
2691 | goto done; |
2692 | } |
2693 | |
2694 | /* Force address filtering if PA Sync is in progress */ |
2695 | if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
2696 | struct hci_conn *conn; |
2697 | |
2698 | conn = hci_conn_hash_lookup_create_pa_sync(hdev); |
2699 | if (conn) { |
2700 | struct conn_params pa; |
2701 | |
2702 | memset(&pa, 0, sizeof(pa)); |
2703 | |
2704 | bacpy(dst: &pa.addr, src: &conn->dst); |
2705 | pa.addr_type = conn->dst_type; |
2706 | |
2707 | /* Clear first since there could be addresses left |
2708 | * behind. |
2709 | */ |
2710 | hci_le_clear_accept_list_sync(hdev); |
2711 | |
2712 | num_entries = 1; |
2713 | err = hci_le_add_accept_list_sync(hdev, params: &pa, |
2714 | num_entries: &num_entries); |
2715 | goto done; |
2716 | } |
2717 | } |
2718 | |
2719 | /* Go through the current accept list programmed into the |
2720 | * controller one by one and check if that address is connected or is |
2721 | * still in the list of pending connections or list of devices to |
2722 | * report. If not present in either list, then remove it from |
2723 | * the controller. |
2724 | */ |
2725 | list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { |
2726 | if (hci_conn_hash_lookup_le(hdev, ba: &b->bdaddr, ba_type: b->bdaddr_type)) |
2727 | continue; |
2728 | |
2729 | /* Pointers not dereferenced, no locks needed */ |
2730 | pend_conn = hci_pend_le_action_lookup(list: &hdev->pend_le_conns, |
2731 | addr: &b->bdaddr, |
2732 | addr_type: b->bdaddr_type); |
2733 | pend_report = hci_pend_le_action_lookup(list: &hdev->pend_le_reports, |
2734 | addr: &b->bdaddr, |
2735 | addr_type: b->bdaddr_type); |
2736 | |
2737 | /* If the device is not likely to connect or report, |
2738 | * remove it from the acceptlist. |
2739 | */ |
2740 | if (!pend_conn && !pend_report) { |
2741 | hci_le_del_accept_list_sync(hdev, bdaddr: &b->bdaddr, |
2742 | bdaddr_type: b->bdaddr_type); |
2743 | continue; |
2744 | } |
2745 | |
2746 | num_entries++; |
2747 | } |
2748 | |
2749 | /* Since all no longer valid accept list entries have been |
2750 | * removed, walk through the list of pending connections |
2751 | * and ensure that any new device gets programmed into |
2752 | * the controller. |
2753 | * |
2754 | * If the list of the devices is larger than the list of |
2755 | * available accept list entries in the controller, then |
2756 | * just abort and return filer policy value to not use the |
2757 | * accept list. |
2758 | * |
2759 | * The list and params may be mutated while we wait for events, |
2760 | * so make a copy and iterate it. |
2761 | */ |
2762 | |
2763 | params = conn_params_copy(list: &hdev->pend_le_conns, n: &n); |
2764 | if (!params) { |
2765 | err = -ENOMEM; |
2766 | goto done; |
2767 | } |
2768 | |
2769 | for (i = 0; i < n; ++i) { |
2770 | err = hci_le_add_accept_list_sync(hdev, params: ¶ms[i], |
2771 | num_entries: &num_entries); |
2772 | if (err) { |
2773 | kvfree(addr: params); |
2774 | goto done; |
2775 | } |
2776 | } |
2777 | |
2778 | kvfree(addr: params); |
2779 | |
2780 | /* After adding all new pending connections, walk through |
2781 | * the list of pending reports and also add these to the |
2782 | * accept list if there is still space. Abort if space runs out. |
2783 | */ |
2784 | |
2785 | params = conn_params_copy(list: &hdev->pend_le_reports, n: &n); |
2786 | if (!params) { |
2787 | err = -ENOMEM; |
2788 | goto done; |
2789 | } |
2790 | |
2791 | for (i = 0; i < n; ++i) { |
2792 | err = hci_le_add_accept_list_sync(hdev, params: ¶ms[i], |
2793 | num_entries: &num_entries); |
2794 | if (err) { |
2795 | kvfree(addr: params); |
2796 | goto done; |
2797 | } |
2798 | } |
2799 | |
2800 | kvfree(addr: params); |
2801 | |
2802 | /* Use the allowlist unless the following conditions are all true: |
2803 | * - We are not currently suspending |
2804 | * - There are 1 or more ADV monitors registered and it's not offloaded |
2805 | * - Interleaved scanning is not currently using the allowlist |
2806 | */ |
2807 | if (!idr_is_empty(idr: &hdev->adv_monitors_idr) && !hdev->suspended && |
2808 | hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && |
2809 | hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) |
2810 | err = -EINVAL; |
2811 | |
2812 | done: |
2813 | filter_policy = err ? 0x00 : 0x01; |
2814 | |
2815 | /* Enable address resolution when LL Privacy is enabled. */ |
2816 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x01); |
2817 | if (err) |
2818 | bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); |
2819 | |
2820 | /* Resume advertising if it was paused */ |
2821 | if (ll_privacy_capable(hdev)) |
2822 | hci_resume_advertising_sync(hdev); |
2823 | |
2824 | /* Select filter policy to use accept list */ |
2825 | return filter_policy; |
2826 | } |
2827 | |
2828 | static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, |
2829 | u8 type, u16 interval, u16 window) |
2830 | { |
2831 | cp->type = type; |
2832 | cp->interval = cpu_to_le16(interval); |
2833 | cp->window = cpu_to_le16(window); |
2834 | } |
2835 | |
2836 | static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, |
2837 | u16 interval, u16 window, |
2838 | u8 own_addr_type, u8 filter_policy) |
2839 | { |
2840 | struct hci_cp_le_set_ext_scan_params *cp; |
2841 | struct hci_cp_le_scan_phy_params *phy; |
2842 | u8 data[sizeof(*cp) + sizeof(*phy) * 2]; |
2843 | u8 num_phy = 0x00; |
2844 | |
2845 | cp = (void *)data; |
2846 | phy = (void *)cp->data; |
2847 | |
2848 | memset(data, 0, sizeof(data)); |
2849 | |
2850 | cp->own_addr_type = own_addr_type; |
2851 | cp->filter_policy = filter_policy; |
2852 | |
2853 | /* Check if PA Sync is in progress then select the PHY based on the |
2854 | * hci_conn.iso_qos. |
2855 | */ |
2856 | if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
2857 | struct hci_cp_le_add_to_accept_list *sent; |
2858 | |
2859 | sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); |
2860 | if (sent) { |
2861 | struct hci_conn *conn; |
2862 | |
2863 | conn = hci_conn_hash_lookup_ba(hdev, BIS_LINK, |
2864 | ba: &sent->bdaddr); |
2865 | if (conn) { |
2866 | struct bt_iso_qos *qos = &conn->iso_qos; |
2867 | |
2868 | if (qos->bcast.in.phy & BT_ISO_PHY_1M || |
2869 | qos->bcast.in.phy & BT_ISO_PHY_2M) { |
2870 | cp->scanning_phys |= LE_SCAN_PHY_1M; |
2871 | hci_le_scan_phy_params(cp: phy, type, |
2872 | interval, |
2873 | window); |
2874 | num_phy++; |
2875 | phy++; |
2876 | } |
2877 | |
2878 | if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { |
2879 | cp->scanning_phys |= LE_SCAN_PHY_CODED; |
2880 | hci_le_scan_phy_params(cp: phy, type, |
2881 | interval: interval * 3, |
2882 | window: window * 3); |
2883 | num_phy++; |
2884 | phy++; |
2885 | } |
2886 | |
2887 | if (num_phy) |
2888 | goto done; |
2889 | } |
2890 | } |
2891 | } |
2892 | |
2893 | if (scan_1m(hdev) || scan_2m(hdev)) { |
2894 | cp->scanning_phys |= LE_SCAN_PHY_1M; |
2895 | hci_le_scan_phy_params(cp: phy, type, interval, window); |
2896 | num_phy++; |
2897 | phy++; |
2898 | } |
2899 | |
2900 | if (scan_coded(hdev)) { |
2901 | cp->scanning_phys |= LE_SCAN_PHY_CODED; |
2902 | hci_le_scan_phy_params(cp: phy, type, interval: interval * 3, window: window * 3); |
2903 | num_phy++; |
2904 | phy++; |
2905 | } |
2906 | |
2907 | done: |
2908 | if (!num_phy) |
2909 | return -EINVAL; |
2910 | |
2911 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, |
2912 | sizeof(*cp) + sizeof(*phy) * num_phy, |
2913 | data, HCI_CMD_TIMEOUT); |
2914 | } |
2915 | |
2916 | static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, |
2917 | u16 interval, u16 window, |
2918 | u8 own_addr_type, u8 filter_policy) |
2919 | { |
2920 | struct hci_cp_le_set_scan_param cp; |
2921 | |
2922 | if (use_ext_scan(hdev)) |
2923 | return hci_le_set_ext_scan_param_sync(hdev, type, interval, |
2924 | window, own_addr_type, |
2925 | filter_policy); |
2926 | |
2927 | memset(&cp, 0, sizeof(cp)); |
2928 | cp.type = type; |
2929 | cp.interval = cpu_to_le16(interval); |
2930 | cp.window = cpu_to_le16(window); |
2931 | cp.own_address_type = own_addr_type; |
2932 | cp.filter_policy = filter_policy; |
2933 | |
2934 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, |
2935 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2936 | } |
2937 | |
2938 | static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, |
2939 | u16 window, u8 own_addr_type, u8 filter_policy, |
2940 | u8 filter_dup) |
2941 | { |
2942 | int err; |
2943 | |
2944 | if (hdev->scanning_paused) { |
2945 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); |
2946 | return 0; |
2947 | } |
2948 | |
2949 | err = hci_le_set_scan_param_sync(hdev, type, interval, window, |
2950 | own_addr_type, filter_policy); |
2951 | if (err) |
2952 | return err; |
2953 | |
2954 | return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); |
2955 | } |
2956 | |
2957 | static int hci_passive_scan_sync(struct hci_dev *hdev) |
2958 | { |
2959 | u8 own_addr_type; |
2960 | u8 filter_policy; |
2961 | u16 window, interval; |
2962 | u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; |
2963 | int err; |
2964 | |
2965 | if (hdev->scanning_paused) { |
2966 | bt_dev_dbg(hdev, "Scanning is paused for suspend"); |
2967 | return 0; |
2968 | } |
2969 | |
2970 | err = hci_scan_disable_sync(hdev); |
2971 | if (err) { |
2972 | bt_dev_err(hdev, "disable scanning failed: %d", err); |
2973 | return err; |
2974 | } |
2975 | |
2976 | /* Set require_privacy to false since no SCAN_REQ are send |
2977 | * during passive scanning. Not using an non-resolvable address |
2978 | * here is important so that peer devices using direct |
2979 | * advertising with our address will be correctly reported |
2980 | * by the controller. |
2981 | */ |
2982 | if (hci_update_random_address_sync(hdev, require_privacy: false, rpa: scan_use_rpa(hdev), |
2983 | own_addr_type: &own_addr_type)) |
2984 | return 0; |
2985 | |
2986 | if (hdev->enable_advmon_interleave_scan && |
2987 | hci_update_interleaved_scan_sync(hdev)) |
2988 | return 0; |
2989 | |
2990 | bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); |
2991 | |
2992 | /* Adding or removing entries from the accept list must |
2993 | * happen before enabling scanning. The controller does |
2994 | * not allow accept list modification while scanning. |
2995 | */ |
2996 | filter_policy = hci_update_accept_list_sync(hdev); |
2997 | |
2998 | /* If suspended and filter_policy set to 0x00 (no acceptlist) then |
2999 | * passive scanning cannot be started since that would require the host |
3000 | * to be woken up to process the reports. |
3001 | */ |
3002 | if (hdev->suspended && !filter_policy) { |
3003 | /* Check if accept list is empty then there is no need to scan |
3004 | * while suspended. |
3005 | */ |
3006 | if (list_empty(head: &hdev->le_accept_list)) |
3007 | return 0; |
3008 | |
3009 | /* If there are devices is the accept_list that means some |
3010 | * devices could not be programmed which in non-suspended case |
3011 | * means filter_policy needs to be set to 0x00 so the host needs |
3012 | * to filter, but since this is treating suspended case we |
3013 | * can ignore device needing host to filter to allow devices in |
3014 | * the acceptlist to be able to wakeup the system. |
3015 | */ |
3016 | filter_policy = 0x01; |
3017 | } |
3018 | |
3019 | /* When the controller is using random resolvable addresses and |
3020 | * with that having LE privacy enabled, then controllers with |
3021 | * Extended Scanner Filter Policies support can now enable support |
3022 | * for handling directed advertising. |
3023 | * |
3024 | * So instead of using filter polices 0x00 (no acceptlist) |
3025 | * and 0x01 (acceptlist enabled) use the new filter policies |
3026 | * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). |
3027 | */ |
3028 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && |
3029 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) |
3030 | filter_policy |= 0x02; |
3031 | |
3032 | if (hdev->suspended) { |
3033 | window = hdev->le_scan_window_suspend; |
3034 | interval = hdev->le_scan_int_suspend; |
3035 | } else if (hci_is_le_conn_scanning(hdev)) { |
3036 | window = hdev->le_scan_window_connect; |
3037 | interval = hdev->le_scan_int_connect; |
3038 | } else if (hci_is_adv_monitoring(hdev)) { |
3039 | window = hdev->le_scan_window_adv_monitor; |
3040 | interval = hdev->le_scan_int_adv_monitor; |
3041 | |
3042 | /* Disable duplicates filter when scanning for advertisement |
3043 | * monitor for the following reasons. |
3044 | * |
3045 | * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm |
3046 | * controllers ignore RSSI_Sampling_Period when the duplicates |
3047 | * filter is enabled. |
3048 | * |
3049 | * For SW pattern filtering, when we're not doing interleaved |
3050 | * scanning, it is necessary to disable duplicates filter, |
3051 | * otherwise hosts can only receive one advertisement and it's |
3052 | * impossible to know if a peer is still in range. |
3053 | */ |
3054 | filter_dups = LE_SCAN_FILTER_DUP_DISABLE; |
3055 | } else { |
3056 | window = hdev->le_scan_window; |
3057 | interval = hdev->le_scan_interval; |
3058 | } |
3059 | |
3060 | /* Disable all filtering for Mesh */ |
3061 | if (hci_dev_test_flag(hdev, HCI_MESH)) { |
3062 | filter_policy = 0; |
3063 | filter_dups = LE_SCAN_FILTER_DUP_DISABLE; |
3064 | } |
3065 | |
3066 | bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); |
3067 | |
3068 | return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, |
3069 | own_addr_type, filter_policy, filter_dup: filter_dups); |
3070 | } |
3071 | |
3072 | /* This function controls the passive scanning based on hdev->pend_le_conns |
3073 | * list. If there are pending LE connection we start the background scanning, |
3074 | * otherwise we stop it in the following sequence: |
3075 | * |
3076 | * If there are devices to scan: |
3077 | * |
3078 | * Disable Scanning -> Update Accept List -> |
3079 | * ll_privacy_capable((Disable Advertising) -> Disable Resolving List -> |
3080 | * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> |
3081 | * Enable Scanning |
3082 | * |
3083 | * Otherwise: |
3084 | * |
3085 | * Disable Scanning |
3086 | */ |
3087 | int hci_update_passive_scan_sync(struct hci_dev *hdev) |
3088 | { |
3089 | int err; |
3090 | |
3091 | if (!test_bit(HCI_UP, &hdev->flags) || |
3092 | test_bit(HCI_INIT, &hdev->flags) || |
3093 | hci_dev_test_flag(hdev, HCI_SETUP) || |
3094 | hci_dev_test_flag(hdev, HCI_CONFIG) || |
3095 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || |
3096 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) |
3097 | return 0; |
3098 | |
3099 | /* No point in doing scanning if LE support hasn't been enabled */ |
3100 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
3101 | return 0; |
3102 | |
3103 | /* If discovery is active don't interfere with it */ |
3104 | if (hdev->discovery.state != DISCOVERY_STOPPED) |
3105 | return 0; |
3106 | |
3107 | /* Reset RSSI and UUID filters when starting background scanning |
3108 | * since these filters are meant for service discovery only. |
3109 | * |
3110 | * The Start Discovery and Start Service Discovery operations |
3111 | * ensure to set proper values for RSSI threshold and UUID |
3112 | * filter list. So it is safe to just reset them here. |
3113 | */ |
3114 | hci_discovery_filter_clear(hdev); |
3115 | |
3116 | bt_dev_dbg(hdev, "ADV monitoring is %s", |
3117 | hci_is_adv_monitoring(hdev) ? "on": "off"); |
3118 | |
3119 | if (!hci_dev_test_flag(hdev, HCI_MESH) && |
3120 | list_empty(head: &hdev->pend_le_conns) && |
3121 | list_empty(head: &hdev->pend_le_reports) && |
3122 | !hci_is_adv_monitoring(hdev) && |
3123 | !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
3124 | /* If there is no pending LE connections or devices |
3125 | * to be scanned for or no ADV monitors, we should stop the |
3126 | * background scanning. |
3127 | */ |
3128 | |
3129 | bt_dev_dbg(hdev, "stopping background scanning"); |
3130 | |
3131 | err = hci_scan_disable_sync(hdev); |
3132 | if (err) |
3133 | bt_dev_err(hdev, "stop background scanning failed: %d", |
3134 | err); |
3135 | } else { |
3136 | /* If there is at least one pending LE connection, we should |
3137 | * keep the background scan running. |
3138 | */ |
3139 | |
3140 | /* If controller is connecting, we should not start scanning |
3141 | * since some controllers are not able to scan and connect at |
3142 | * the same time. |
3143 | */ |
3144 | if (hci_lookup_le_connect(hdev)) |
3145 | return 0; |
3146 | |
3147 | bt_dev_dbg(hdev, "start background scanning"); |
3148 | |
3149 | err = hci_passive_scan_sync(hdev); |
3150 | if (err) |
3151 | bt_dev_err(hdev, "start background scanning failed: %d", |
3152 | err); |
3153 | } |
3154 | |
3155 | return err; |
3156 | } |
3157 | |
3158 | static int update_scan_sync(struct hci_dev *hdev, void *data) |
3159 | { |
3160 | return hci_update_scan_sync(hdev); |
3161 | } |
3162 | |
3163 | int hci_update_scan(struct hci_dev *hdev) |
3164 | { |
3165 | return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); |
3166 | } |
3167 | |
3168 | static int update_passive_scan_sync(struct hci_dev *hdev, void *data) |
3169 | { |
3170 | return hci_update_passive_scan_sync(hdev); |
3171 | } |
3172 | |
3173 | int hci_update_passive_scan(struct hci_dev *hdev) |
3174 | { |
3175 | /* Only queue if it would have any effect */ |
3176 | if (!test_bit(HCI_UP, &hdev->flags) || |
3177 | test_bit(HCI_INIT, &hdev->flags) || |
3178 | hci_dev_test_flag(hdev, HCI_SETUP) || |
3179 | hci_dev_test_flag(hdev, HCI_CONFIG) || |
3180 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || |
3181 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) |
3182 | return 0; |
3183 | |
3184 | return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, |
3185 | NULL); |
3186 | } |
3187 | |
3188 | int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) |
3189 | { |
3190 | int err; |
3191 | |
3192 | if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) |
3193 | return 0; |
3194 | |
3195 | err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, |
3196 | sizeof(val), &val, HCI_CMD_TIMEOUT); |
3197 | |
3198 | if (!err) { |
3199 | if (val) { |
3200 | hdev->features[1][0] |= LMP_HOST_SC; |
3201 | hci_dev_set_flag(hdev, HCI_SC_ENABLED); |
3202 | } else { |
3203 | hdev->features[1][0] &= ~LMP_HOST_SC; |
3204 | hci_dev_clear_flag(hdev, HCI_SC_ENABLED); |
3205 | } |
3206 | } |
3207 | |
3208 | return err; |
3209 | } |
3210 | |
3211 | int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) |
3212 | { |
3213 | int err; |
3214 | |
3215 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || |
3216 | lmp_host_ssp_capable(hdev)) |
3217 | return 0; |
3218 | |
3219 | if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { |
3220 | __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, |
3221 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3222 | } |
3223 | |
3224 | err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, |
3225 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3226 | if (err) |
3227 | return err; |
3228 | |
3229 | return hci_write_sc_support_sync(hdev, val: 0x01); |
3230 | } |
3231 | |
3232 | int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) |
3233 | { |
3234 | struct hci_cp_write_le_host_supported cp; |
3235 | |
3236 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || |
3237 | !lmp_bredr_capable(hdev)) |
3238 | return 0; |
3239 | |
3240 | /* Check first if we already have the right host state |
3241 | * (host features set) |
3242 | */ |
3243 | if (le == lmp_host_le_capable(hdev) && |
3244 | simul == lmp_host_le_br_capable(hdev)) |
3245 | return 0; |
3246 | |
3247 | memset(&cp, 0, sizeof(cp)); |
3248 | |
3249 | cp.le = le; |
3250 | cp.simul = simul; |
3251 | |
3252 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, |
3253 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3254 | } |
3255 | |
3256 | static int hci_powered_update_adv_sync(struct hci_dev *hdev) |
3257 | { |
3258 | struct adv_info *adv, *tmp; |
3259 | int err; |
3260 | |
3261 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
3262 | return 0; |
3263 | |
3264 | /* If RPA Resolution has not been enable yet it means the |
3265 | * resolving list is empty and we should attempt to program the |
3266 | * local IRK in order to support using own_addr_type |
3267 | * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). |
3268 | */ |
3269 | if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { |
3270 | hci_le_add_resolve_list_sync(hdev, NULL); |
3271 | hci_le_set_addr_resolution_enable_sync(hdev, val: 0x01); |
3272 | } |
3273 | |
3274 | /* Make sure the controller has a good default for |
3275 | * advertising data. This also applies to the case |
3276 | * where BR/EDR was toggled during the AUTO_OFF phase. |
3277 | */ |
3278 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || |
3279 | list_empty(head: &hdev->adv_instances)) { |
3280 | if (ext_adv_capable(hdev)) { |
3281 | err = hci_setup_ext_adv_instance_sync(hdev, instance: 0x00); |
3282 | if (!err) |
3283 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
3284 | } else { |
3285 | err = hci_update_adv_data_sync(hdev, instance: 0x00); |
3286 | if (!err) |
3287 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
3288 | } |
3289 | |
3290 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
3291 | hci_enable_advertising_sync(hdev); |
3292 | } |
3293 | |
3294 | /* Call for each tracked instance to be scheduled */ |
3295 | list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) |
3296 | hci_schedule_adv_instance_sync(hdev, instance: adv->instance, force: true); |
3297 | |
3298 | return 0; |
3299 | } |
3300 | |
3301 | static int hci_write_auth_enable_sync(struct hci_dev *hdev) |
3302 | { |
3303 | u8 link_sec; |
3304 | |
3305 | link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); |
3306 | if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) |
3307 | return 0; |
3308 | |
3309 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, |
3310 | sizeof(link_sec), &link_sec, |
3311 | HCI_CMD_TIMEOUT); |
3312 | } |
3313 | |
3314 | int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) |
3315 | { |
3316 | struct hci_cp_write_page_scan_activity cp; |
3317 | u8 type; |
3318 | int err = 0; |
3319 | |
3320 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3321 | return 0; |
3322 | |
3323 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
3324 | return 0; |
3325 | |
3326 | memset(&cp, 0, sizeof(cp)); |
3327 | |
3328 | if (enable) { |
3329 | type = PAGE_SCAN_TYPE_INTERLACED; |
3330 | |
3331 | /* 160 msec page scan interval */ |
3332 | cp.interval = cpu_to_le16(0x0100); |
3333 | } else { |
3334 | type = hdev->def_page_scan_type; |
3335 | cp.interval = cpu_to_le16(hdev->def_page_scan_int); |
3336 | } |
3337 | |
3338 | cp.window = cpu_to_le16(hdev->def_page_scan_window); |
3339 | |
3340 | if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || |
3341 | __cpu_to_le16(hdev->page_scan_window) != cp.window) { |
3342 | err = __hci_cmd_sync_status(hdev, |
3343 | HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, |
3344 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3345 | if (err) |
3346 | return err; |
3347 | } |
3348 | |
3349 | if (hdev->page_scan_type != type) |
3350 | err = __hci_cmd_sync_status(hdev, |
3351 | HCI_OP_WRITE_PAGE_SCAN_TYPE, |
3352 | sizeof(type), &type, |
3353 | HCI_CMD_TIMEOUT); |
3354 | |
3355 | return err; |
3356 | } |
3357 | |
3358 | static bool disconnected_accept_list_entries(struct hci_dev *hdev) |
3359 | { |
3360 | struct bdaddr_list *b; |
3361 | |
3362 | list_for_each_entry(b, &hdev->accept_list, list) { |
3363 | struct hci_conn *conn; |
3364 | |
3365 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &b->bdaddr); |
3366 | if (!conn) |
3367 | return true; |
3368 | |
3369 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) |
3370 | return true; |
3371 | } |
3372 | |
3373 | return false; |
3374 | } |
3375 | |
3376 | static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) |
3377 | { |
3378 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, |
3379 | sizeof(val), &val, |
3380 | HCI_CMD_TIMEOUT); |
3381 | } |
3382 | |
3383 | int hci_update_scan_sync(struct hci_dev *hdev) |
3384 | { |
3385 | u8 scan; |
3386 | |
3387 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3388 | return 0; |
3389 | |
3390 | if (!hdev_is_powered(hdev)) |
3391 | return 0; |
3392 | |
3393 | if (mgmt_powering_down(hdev)) |
3394 | return 0; |
3395 | |
3396 | if (hdev->scanning_paused) |
3397 | return 0; |
3398 | |
3399 | if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || |
3400 | disconnected_accept_list_entries(hdev)) |
3401 | scan = SCAN_PAGE; |
3402 | else |
3403 | scan = SCAN_DISABLED; |
3404 | |
3405 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
3406 | scan |= SCAN_INQUIRY; |
3407 | |
3408 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && |
3409 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) |
3410 | return 0; |
3411 | |
3412 | return hci_write_scan_enable_sync(hdev, val: scan); |
3413 | } |
3414 | |
3415 | int hci_update_name_sync(struct hci_dev *hdev) |
3416 | { |
3417 | struct hci_cp_write_local_name cp; |
3418 | |
3419 | memset(&cp, 0, sizeof(cp)); |
3420 | |
3421 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); |
3422 | |
3423 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, |
3424 | sizeof(cp), &cp, |
3425 | HCI_CMD_TIMEOUT); |
3426 | } |
3427 | |
3428 | /* This function perform powered update HCI command sequence after the HCI init |
3429 | * sequence which end up resetting all states, the sequence is as follows: |
3430 | * |
3431 | * HCI_SSP_ENABLED(Enable SSP) |
3432 | * HCI_LE_ENABLED(Enable LE) |
3433 | * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) -> |
3434 | * Update adv data) |
3435 | * Enable Authentication |
3436 | * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> |
3437 | * Set Name -> Set EIR) |
3438 | * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) |
3439 | */ |
3440 | int hci_powered_update_sync(struct hci_dev *hdev) |
3441 | { |
3442 | int err; |
3443 | |
3444 | /* Register the available SMP channels (BR/EDR and LE) only when |
3445 | * successfully powering on the controller. This late |
3446 | * registration is required so that LE SMP can clearly decide if |
3447 | * the public address or static address is used. |
3448 | */ |
3449 | smp_register(hdev); |
3450 | |
3451 | err = hci_write_ssp_mode_sync(hdev, mode: 0x01); |
3452 | if (err) |
3453 | return err; |
3454 | |
3455 | err = hci_write_le_host_supported_sync(hdev, le: 0x01, simul: 0x00); |
3456 | if (err) |
3457 | return err; |
3458 | |
3459 | err = hci_powered_update_adv_sync(hdev); |
3460 | if (err) |
3461 | return err; |
3462 | |
3463 | err = hci_write_auth_enable_sync(hdev); |
3464 | if (err) |
3465 | return err; |
3466 | |
3467 | if (lmp_bredr_capable(hdev)) { |
3468 | if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) |
3469 | hci_write_fast_connectable_sync(hdev, enable: true); |
3470 | else |
3471 | hci_write_fast_connectable_sync(hdev, enable: false); |
3472 | hci_update_scan_sync(hdev); |
3473 | hci_update_class_sync(hdev); |
3474 | hci_update_name_sync(hdev); |
3475 | hci_update_eir_sync(hdev); |
3476 | } |
3477 | |
3478 | /* If forcing static address is in use or there is no public |
3479 | * address use the static address as random address (but skip |
3480 | * the HCI command if the current random address is already the |
3481 | * static one. |
3482 | * |
3483 | * In case BR/EDR has been disabled on a dual-mode controller |
3484 | * and a static address has been configured, then use that |
3485 | * address instead of the public BR/EDR address. |
3486 | */ |
3487 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
3488 | (!bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
3489 | !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { |
3490 | if (bacmp(ba1: &hdev->static_addr, BDADDR_ANY)) |
3491 | return hci_set_random_addr_sync(hdev, |
3492 | rpa: &hdev->static_addr); |
3493 | } |
3494 | |
3495 | return 0; |
3496 | } |
3497 | |
3498 | /** |
3499 | * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address |
3500 | * (BD_ADDR) for a HCI device from |
3501 | * a firmware node property. |
3502 | * @hdev: The HCI device |
3503 | * |
3504 | * Search the firmware node for 'local-bd-address'. |
3505 | * |
3506 | * All-zero BD addresses are rejected, because those could be properties |
3507 | * that exist in the firmware tables, but were not updated by the firmware. For |
3508 | * example, the DTS could define 'local-bd-address', with zero BD addresses. |
3509 | */ |
3510 | static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) |
3511 | { |
3512 | struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); |
3513 | bdaddr_t ba; |
3514 | int ret; |
3515 | |
3516 | ret = fwnode_property_read_u8_array(fwnode, propname: "local-bd-address", |
3517 | val: (u8 *)&ba, nval: sizeof(ba)); |
3518 | if (ret < 0 || !bacmp(ba1: &ba, BDADDR_ANY)) |
3519 | return; |
3520 | |
3521 | if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks)) |
3522 | baswap(dst: &hdev->public_addr, src: &ba); |
3523 | else |
3524 | bacpy(dst: &hdev->public_addr, src: &ba); |
3525 | } |
3526 | |
3527 | struct hci_init_stage { |
3528 | int (*func)(struct hci_dev *hdev); |
3529 | }; |
3530 | |
3531 | /* Run init stage NULL terminated function table */ |
3532 | static int hci_init_stage_sync(struct hci_dev *hdev, |
3533 | const struct hci_init_stage *stage) |
3534 | { |
3535 | size_t i; |
3536 | |
3537 | for (i = 0; stage[i].func; i++) { |
3538 | int err; |
3539 | |
3540 | err = stage[i].func(hdev); |
3541 | if (err) |
3542 | return err; |
3543 | } |
3544 | |
3545 | return 0; |
3546 | } |
3547 | |
3548 | /* Read Local Version */ |
3549 | static int hci_read_local_version_sync(struct hci_dev *hdev) |
3550 | { |
3551 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, |
3552 | 0, NULL, HCI_CMD_TIMEOUT); |
3553 | } |
3554 | |
3555 | /* Read BD Address */ |
3556 | static int hci_read_bd_addr_sync(struct hci_dev *hdev) |
3557 | { |
3558 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, |
3559 | 0, NULL, HCI_CMD_TIMEOUT); |
3560 | } |
3561 | |
3562 | #define HCI_INIT(_func) \ |
3563 | { \ |
3564 | .func = _func, \ |
3565 | } |
3566 | |
3567 | static const struct hci_init_stage hci_init0[] = { |
3568 | /* HCI_OP_READ_LOCAL_VERSION */ |
3569 | HCI_INIT(hci_read_local_version_sync), |
3570 | /* HCI_OP_READ_BD_ADDR */ |
3571 | HCI_INIT(hci_read_bd_addr_sync), |
3572 | {} |
3573 | }; |
3574 | |
3575 | int hci_reset_sync(struct hci_dev *hdev) |
3576 | { |
3577 | int err; |
3578 | |
3579 | set_bit(nr: HCI_RESET, addr: &hdev->flags); |
3580 | |
3581 | err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, |
3582 | HCI_CMD_TIMEOUT); |
3583 | if (err) |
3584 | return err; |
3585 | |
3586 | return 0; |
3587 | } |
3588 | |
3589 | static int hci_init0_sync(struct hci_dev *hdev) |
3590 | { |
3591 | int err; |
3592 | |
3593 | bt_dev_dbg(hdev, ""); |
3594 | |
3595 | /* Reset */ |
3596 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
3597 | err = hci_reset_sync(hdev); |
3598 | if (err) |
3599 | return err; |
3600 | } |
3601 | |
3602 | return hci_init_stage_sync(hdev, stage: hci_init0); |
3603 | } |
3604 | |
3605 | static int hci_unconf_init_sync(struct hci_dev *hdev) |
3606 | { |
3607 | int err; |
3608 | |
3609 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
3610 | return 0; |
3611 | |
3612 | err = hci_init0_sync(hdev); |
3613 | if (err < 0) |
3614 | return err; |
3615 | |
3616 | if (hci_dev_test_flag(hdev, HCI_SETUP)) |
3617 | hci_debugfs_create_basic(hdev); |
3618 | |
3619 | return 0; |
3620 | } |
3621 | |
3622 | /* Read Local Supported Features. */ |
3623 | static int hci_read_local_features_sync(struct hci_dev *hdev) |
3624 | { |
3625 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, |
3626 | 0, NULL, HCI_CMD_TIMEOUT); |
3627 | } |
3628 | |
3629 | /* BR Controller init stage 1 command sequence */ |
3630 | static const struct hci_init_stage br_init1[] = { |
3631 | /* HCI_OP_READ_LOCAL_FEATURES */ |
3632 | HCI_INIT(hci_read_local_features_sync), |
3633 | /* HCI_OP_READ_LOCAL_VERSION */ |
3634 | HCI_INIT(hci_read_local_version_sync), |
3635 | /* HCI_OP_READ_BD_ADDR */ |
3636 | HCI_INIT(hci_read_bd_addr_sync), |
3637 | {} |
3638 | }; |
3639 | |
3640 | /* Read Local Commands */ |
3641 | static int hci_read_local_cmds_sync(struct hci_dev *hdev) |
3642 | { |
3643 | /* All Bluetooth 1.2 and later controllers should support the |
3644 | * HCI command for reading the local supported commands. |
3645 | * |
3646 | * Unfortunately some controllers indicate Bluetooth 1.2 support, |
3647 | * but do not have support for this command. If that is the case, |
3648 | * the driver can quirk the behavior and skip reading the local |
3649 | * supported commands. |
3650 | */ |
3651 | if (hdev->hci_ver > BLUETOOTH_VER_1_1 && |
3652 | !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) |
3653 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, |
3654 | 0, NULL, HCI_CMD_TIMEOUT); |
3655 | |
3656 | return 0; |
3657 | } |
3658 | |
3659 | static int hci_init1_sync(struct hci_dev *hdev) |
3660 | { |
3661 | int err; |
3662 | |
3663 | bt_dev_dbg(hdev, ""); |
3664 | |
3665 | /* Reset */ |
3666 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
3667 | err = hci_reset_sync(hdev); |
3668 | if (err) |
3669 | return err; |
3670 | } |
3671 | |
3672 | return hci_init_stage_sync(hdev, stage: br_init1); |
3673 | } |
3674 | |
3675 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ |
3676 | static int hci_read_buffer_size_sync(struct hci_dev *hdev) |
3677 | { |
3678 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, |
3679 | 0, NULL, HCI_CMD_TIMEOUT); |
3680 | } |
3681 | |
3682 | /* Read Class of Device */ |
3683 | static int hci_read_dev_class_sync(struct hci_dev *hdev) |
3684 | { |
3685 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, |
3686 | 0, NULL, HCI_CMD_TIMEOUT); |
3687 | } |
3688 | |
3689 | /* Read Local Name */ |
3690 | static int hci_read_local_name_sync(struct hci_dev *hdev) |
3691 | { |
3692 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, |
3693 | 0, NULL, HCI_CMD_TIMEOUT); |
3694 | } |
3695 | |
3696 | /* Read Voice Setting */ |
3697 | static int hci_read_voice_setting_sync(struct hci_dev *hdev) |
3698 | { |
3699 | if (!read_voice_setting_capable(hdev)) |
3700 | return 0; |
3701 | |
3702 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, |
3703 | 0, NULL, HCI_CMD_TIMEOUT); |
3704 | } |
3705 | |
3706 | /* Read Number of Supported IAC */ |
3707 | static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) |
3708 | { |
3709 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, |
3710 | 0, NULL, HCI_CMD_TIMEOUT); |
3711 | } |
3712 | |
3713 | /* Read Current IAC LAP */ |
3714 | static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) |
3715 | { |
3716 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, |
3717 | 0, NULL, HCI_CMD_TIMEOUT); |
3718 | } |
3719 | |
3720 | static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, |
3721 | u8 cond_type, bdaddr_t *bdaddr, |
3722 | u8 auto_accept) |
3723 | { |
3724 | struct hci_cp_set_event_filter cp; |
3725 | |
3726 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3727 | return 0; |
3728 | |
3729 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
3730 | return 0; |
3731 | |
3732 | memset(&cp, 0, sizeof(cp)); |
3733 | cp.flt_type = flt_type; |
3734 | |
3735 | if (flt_type != HCI_FLT_CLEAR_ALL) { |
3736 | cp.cond_type = cond_type; |
3737 | bacpy(dst: &cp.addr_conn_flt.bdaddr, src: bdaddr); |
3738 | cp.addr_conn_flt.auto_accept = auto_accept; |
3739 | } |
3740 | |
3741 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, |
3742 | flt_type == HCI_FLT_CLEAR_ALL ? |
3743 | sizeof(cp.flt_type) : sizeof(cp), &cp, |
3744 | HCI_CMD_TIMEOUT); |
3745 | } |
3746 | |
3747 | static int hci_clear_event_filter_sync(struct hci_dev *hdev) |
3748 | { |
3749 | if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) |
3750 | return 0; |
3751 | |
3752 | /* In theory the state machine should not reach here unless |
3753 | * a hci_set_event_filter_sync() call succeeds, but we do |
3754 | * the check both for parity and as a future reminder. |
3755 | */ |
3756 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
3757 | return 0; |
3758 | |
3759 | return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, cond_type: 0x00, |
3760 | BDADDR_ANY, auto_accept: 0x00); |
3761 | } |
3762 | |
3763 | /* Connection accept timeout ~20 secs */ |
3764 | static int hci_write_ca_timeout_sync(struct hci_dev *hdev) |
3765 | { |
3766 | __le16 param = cpu_to_le16(0x7d00); |
3767 | |
3768 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, |
3769 | sizeof(param), ¶m, HCI_CMD_TIMEOUT); |
3770 | } |
3771 | |
3772 | /* Enable SCO flow control if supported */ |
3773 | static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) |
3774 | { |
3775 | struct hci_cp_write_sync_flowctl cp; |
3776 | int err; |
3777 | |
3778 | /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ |
3779 | if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || |
3780 | !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks)) |
3781 | return 0; |
3782 | |
3783 | memset(&cp, 0, sizeof(cp)); |
3784 | cp.enable = 0x01; |
3785 | |
3786 | err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, |
3787 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3788 | if (!err) |
3789 | hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); |
3790 | |
3791 | return err; |
3792 | } |
3793 | |
3794 | /* BR Controller init stage 2 command sequence */ |
3795 | static const struct hci_init_stage br_init2[] = { |
3796 | /* HCI_OP_READ_BUFFER_SIZE */ |
3797 | HCI_INIT(hci_read_buffer_size_sync), |
3798 | /* HCI_OP_READ_CLASS_OF_DEV */ |
3799 | HCI_INIT(hci_read_dev_class_sync), |
3800 | /* HCI_OP_READ_LOCAL_NAME */ |
3801 | HCI_INIT(hci_read_local_name_sync), |
3802 | /* HCI_OP_READ_VOICE_SETTING */ |
3803 | HCI_INIT(hci_read_voice_setting_sync), |
3804 | /* HCI_OP_READ_NUM_SUPPORTED_IAC */ |
3805 | HCI_INIT(hci_read_num_supported_iac_sync), |
3806 | /* HCI_OP_READ_CURRENT_IAC_LAP */ |
3807 | HCI_INIT(hci_read_current_iac_lap_sync), |
3808 | /* HCI_OP_SET_EVENT_FLT */ |
3809 | HCI_INIT(hci_clear_event_filter_sync), |
3810 | /* HCI_OP_WRITE_CA_TIMEOUT */ |
3811 | HCI_INIT(hci_write_ca_timeout_sync), |
3812 | /* HCI_OP_WRITE_SYNC_FLOWCTL */ |
3813 | HCI_INIT(hci_write_sync_flowctl_sync), |
3814 | {} |
3815 | }; |
3816 | |
3817 | static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) |
3818 | { |
3819 | u8 mode = 0x01; |
3820 | |
3821 | if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
3822 | return 0; |
3823 | |
3824 | /* When SSP is available, then the host features page |
3825 | * should also be available as well. However some |
3826 | * controllers list the max_page as 0 as long as SSP |
3827 | * has not been enabled. To achieve proper debugging |
3828 | * output, force the minimum max_page to 1 at least. |
3829 | */ |
3830 | hdev->max_page = 0x01; |
3831 | |
3832 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, |
3833 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3834 | } |
3835 | |
3836 | static int hci_write_eir_sync(struct hci_dev *hdev) |
3837 | { |
3838 | struct hci_cp_write_eir cp; |
3839 | |
3840 | if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
3841 | return 0; |
3842 | |
3843 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
3844 | memset(&cp, 0, sizeof(cp)); |
3845 | |
3846 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, |
3847 | HCI_CMD_TIMEOUT); |
3848 | } |
3849 | |
3850 | static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) |
3851 | { |
3852 | u8 mode; |
3853 | |
3854 | if (!lmp_inq_rssi_capable(hdev) && |
3855 | !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) |
3856 | return 0; |
3857 | |
3858 | /* If Extended Inquiry Result events are supported, then |
3859 | * they are clearly preferred over Inquiry Result with RSSI |
3860 | * events. |
3861 | */ |
3862 | mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; |
3863 | |
3864 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, |
3865 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3866 | } |
3867 | |
3868 | static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) |
3869 | { |
3870 | if (!lmp_inq_tx_pwr_capable(hdev)) |
3871 | return 0; |
3872 | |
3873 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, |
3874 | 0, NULL, HCI_CMD_TIMEOUT); |
3875 | } |
3876 | |
3877 | static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) |
3878 | { |
3879 | struct hci_cp_read_local_ext_features cp; |
3880 | |
3881 | if (!lmp_ext_feat_capable(hdev)) |
3882 | return 0; |
3883 | |
3884 | memset(&cp, 0, sizeof(cp)); |
3885 | cp.page = page; |
3886 | |
3887 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, |
3888 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3889 | } |
3890 | |
3891 | static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) |
3892 | { |
3893 | return hci_read_local_ext_features_sync(hdev, page: 0x01); |
3894 | } |
3895 | |
3896 | /* HCI Controller init stage 2 command sequence */ |
3897 | static const struct hci_init_stage hci_init2[] = { |
3898 | /* HCI_OP_READ_LOCAL_COMMANDS */ |
3899 | HCI_INIT(hci_read_local_cmds_sync), |
3900 | /* HCI_OP_WRITE_SSP_MODE */ |
3901 | HCI_INIT(hci_write_ssp_mode_1_sync), |
3902 | /* HCI_OP_WRITE_EIR */ |
3903 | HCI_INIT(hci_write_eir_sync), |
3904 | /* HCI_OP_WRITE_INQUIRY_MODE */ |
3905 | HCI_INIT(hci_write_inquiry_mode_sync), |
3906 | /* HCI_OP_READ_INQ_RSP_TX_POWER */ |
3907 | HCI_INIT(hci_read_inq_rsp_tx_power_sync), |
3908 | /* HCI_OP_READ_LOCAL_EXT_FEATURES */ |
3909 | HCI_INIT(hci_read_local_ext_features_1_sync), |
3910 | /* HCI_OP_WRITE_AUTH_ENABLE */ |
3911 | HCI_INIT(hci_write_auth_enable_sync), |
3912 | {} |
3913 | }; |
3914 | |
3915 | /* Read LE Buffer Size */ |
3916 | static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) |
3917 | { |
3918 | /* Use Read LE Buffer Size V2 if supported */ |
3919 | if (iso_capable(hdev) && hdev->commands[41] & 0x20) |
3920 | return __hci_cmd_sync_status(hdev, |
3921 | HCI_OP_LE_READ_BUFFER_SIZE_V2, |
3922 | 0, NULL, HCI_CMD_TIMEOUT); |
3923 | |
3924 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, |
3925 | 0, NULL, HCI_CMD_TIMEOUT); |
3926 | } |
3927 | |
3928 | /* Read LE Local Supported Features */ |
3929 | static int hci_le_read_local_features_sync(struct hci_dev *hdev) |
3930 | { |
3931 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, |
3932 | 0, NULL, HCI_CMD_TIMEOUT); |
3933 | } |
3934 | |
3935 | /* Read LE Supported States */ |
3936 | static int hci_le_read_supported_states_sync(struct hci_dev *hdev) |
3937 | { |
3938 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, |
3939 | 0, NULL, HCI_CMD_TIMEOUT); |
3940 | } |
3941 | |
3942 | /* LE Controller init stage 2 command sequence */ |
3943 | static const struct hci_init_stage le_init2[] = { |
3944 | /* HCI_OP_LE_READ_LOCAL_FEATURES */ |
3945 | HCI_INIT(hci_le_read_local_features_sync), |
3946 | /* HCI_OP_LE_READ_BUFFER_SIZE */ |
3947 | HCI_INIT(hci_le_read_buffer_size_sync), |
3948 | /* HCI_OP_LE_READ_SUPPORTED_STATES */ |
3949 | HCI_INIT(hci_le_read_supported_states_sync), |
3950 | {} |
3951 | }; |
3952 | |
3953 | static int hci_init2_sync(struct hci_dev *hdev) |
3954 | { |
3955 | int err; |
3956 | |
3957 | bt_dev_dbg(hdev, ""); |
3958 | |
3959 | err = hci_init_stage_sync(hdev, stage: hci_init2); |
3960 | if (err) |
3961 | return err; |
3962 | |
3963 | if (lmp_bredr_capable(hdev)) { |
3964 | err = hci_init_stage_sync(hdev, stage: br_init2); |
3965 | if (err) |
3966 | return err; |
3967 | } else { |
3968 | hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); |
3969 | } |
3970 | |
3971 | if (lmp_le_capable(hdev)) { |
3972 | err = hci_init_stage_sync(hdev, stage: le_init2); |
3973 | if (err) |
3974 | return err; |
3975 | /* LE-only controllers have LE implicitly enabled */ |
3976 | if (!lmp_bredr_capable(hdev)) |
3977 | hci_dev_set_flag(hdev, HCI_LE_ENABLED); |
3978 | } |
3979 | |
3980 | return 0; |
3981 | } |
3982 | |
3983 | static int hci_set_event_mask_sync(struct hci_dev *hdev) |
3984 | { |
3985 | /* The second byte is 0xff instead of 0x9f (two reserved bits |
3986 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the |
3987 | * command otherwise. |
3988 | */ |
3989 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; |
3990 | |
3991 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set |
3992 | * any event mask for pre 1.2 devices. |
3993 | */ |
3994 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
3995 | return 0; |
3996 | |
3997 | if (lmp_bredr_capable(hdev)) { |
3998 | events[4] |= 0x01; /* Flow Specification Complete */ |
3999 | |
4000 | /* Don't set Disconnect Complete and mode change when |
4001 | * suspended as that would wakeup the host when disconnecting |
4002 | * due to suspend. |
4003 | */ |
4004 | if (hdev->suspended) { |
4005 | events[0] &= 0xef; |
4006 | events[2] &= 0xf7; |
4007 | } |
4008 | } else { |
4009 | /* Use a different default for LE-only devices */ |
4010 | memset(events, 0, sizeof(events)); |
4011 | events[1] |= 0x20; /* Command Complete */ |
4012 | events[1] |= 0x40; /* Command Status */ |
4013 | events[1] |= 0x80; /* Hardware Error */ |
4014 | |
4015 | /* If the controller supports the Disconnect command, enable |
4016 | * the corresponding event. In addition enable packet flow |
4017 | * control related events. |
4018 | */ |
4019 | if (hdev->commands[0] & 0x20) { |
4020 | /* Don't set Disconnect Complete when suspended as that |
4021 | * would wakeup the host when disconnecting due to |
4022 | * suspend. |
4023 | */ |
4024 | if (!hdev->suspended) |
4025 | events[0] |= 0x10; /* Disconnection Complete */ |
4026 | events[2] |= 0x04; /* Number of Completed Packets */ |
4027 | events[3] |= 0x02; /* Data Buffer Overflow */ |
4028 | } |
4029 | |
4030 | /* If the controller supports the Read Remote Version |
4031 | * Information command, enable the corresponding event. |
4032 | */ |
4033 | if (hdev->commands[2] & 0x80) |
4034 | events[1] |= 0x08; /* Read Remote Version Information |
4035 | * Complete |
4036 | */ |
4037 | |
4038 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { |
4039 | events[0] |= 0x80; /* Encryption Change */ |
4040 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
4041 | } |
4042 | } |
4043 | |
4044 | if (lmp_inq_rssi_capable(hdev) || |
4045 | test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) |
4046 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
4047 | |
4048 | if (lmp_ext_feat_capable(hdev)) |
4049 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ |
4050 | |
4051 | if (lmp_esco_capable(hdev)) { |
4052 | events[5] |= 0x08; /* Synchronous Connection Complete */ |
4053 | events[5] |= 0x10; /* Synchronous Connection Changed */ |
4054 | } |
4055 | |
4056 | if (lmp_sniffsubr_capable(hdev)) |
4057 | events[5] |= 0x20; /* Sniff Subrating */ |
4058 | |
4059 | if (lmp_pause_enc_capable(hdev)) |
4060 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
4061 | |
4062 | if (lmp_ext_inq_capable(hdev)) |
4063 | events[5] |= 0x40; /* Extended Inquiry Result */ |
4064 | |
4065 | if (lmp_no_flush_capable(hdev)) |
4066 | events[7] |= 0x01; /* Enhanced Flush Complete */ |
4067 | |
4068 | if (lmp_lsto_capable(hdev)) |
4069 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ |
4070 | |
4071 | if (lmp_ssp_capable(hdev)) { |
4072 | events[6] |= 0x01; /* IO Capability Request */ |
4073 | events[6] |= 0x02; /* IO Capability Response */ |
4074 | events[6] |= 0x04; /* User Confirmation Request */ |
4075 | events[6] |= 0x08; /* User Passkey Request */ |
4076 | events[6] |= 0x10; /* Remote OOB Data Request */ |
4077 | events[6] |= 0x20; /* Simple Pairing Complete */ |
4078 | events[7] |= 0x04; /* User Passkey Notification */ |
4079 | events[7] |= 0x08; /* Keypress Notification */ |
4080 | events[7] |= 0x10; /* Remote Host Supported |
4081 | * Features Notification |
4082 | */ |
4083 | } |
4084 | |
4085 | if (lmp_le_capable(hdev)) |
4086 | events[7] |= 0x20; /* LE Meta-Event */ |
4087 | |
4088 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, |
4089 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4090 | } |
4091 | |
4092 | static int hci_read_stored_link_key_sync(struct hci_dev *hdev) |
4093 | { |
4094 | struct hci_cp_read_stored_link_key cp; |
4095 | |
4096 | if (!(hdev->commands[6] & 0x20) || |
4097 | test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) |
4098 | return 0; |
4099 | |
4100 | memset(&cp, 0, sizeof(cp)); |
4101 | bacpy(dst: &cp.bdaddr, BDADDR_ANY); |
4102 | cp.read_all = 0x01; |
4103 | |
4104 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, |
4105 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4106 | } |
4107 | |
4108 | static int hci_setup_link_policy_sync(struct hci_dev *hdev) |
4109 | { |
4110 | struct hci_cp_write_def_link_policy cp; |
4111 | u16 link_policy = 0; |
4112 | |
4113 | if (!(hdev->commands[5] & 0x10)) |
4114 | return 0; |
4115 | |
4116 | memset(&cp, 0, sizeof(cp)); |
4117 | |
4118 | if (lmp_rswitch_capable(hdev)) |
4119 | link_policy |= HCI_LP_RSWITCH; |
4120 | if (lmp_hold_capable(hdev)) |
4121 | link_policy |= HCI_LP_HOLD; |
4122 | if (lmp_sniff_capable(hdev)) |
4123 | link_policy |= HCI_LP_SNIFF; |
4124 | if (lmp_park_capable(hdev)) |
4125 | link_policy |= HCI_LP_PARK; |
4126 | |
4127 | cp.policy = cpu_to_le16(link_policy); |
4128 | |
4129 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, |
4130 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4131 | } |
4132 | |
4133 | static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) |
4134 | { |
4135 | if (!(hdev->commands[8] & 0x01)) |
4136 | return 0; |
4137 | |
4138 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, |
4139 | 0, NULL, HCI_CMD_TIMEOUT); |
4140 | } |
4141 | |
4142 | static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) |
4143 | { |
4144 | if (!(hdev->commands[18] & 0x04) || |
4145 | !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || |
4146 | test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) |
4147 | return 0; |
4148 | |
4149 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, |
4150 | 0, NULL, HCI_CMD_TIMEOUT); |
4151 | } |
4152 | |
4153 | static int hci_read_page_scan_type_sync(struct hci_dev *hdev) |
4154 | { |
4155 | /* Some older Broadcom based Bluetooth 1.2 controllers do not |
4156 | * support the Read Page Scan Type command. Check support for |
4157 | * this command in the bit mask of supported commands. |
4158 | */ |
4159 | if (!(hdev->commands[13] & 0x01) || |
4160 | test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) |
4161 | return 0; |
4162 | |
4163 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, |
4164 | 0, NULL, HCI_CMD_TIMEOUT); |
4165 | } |
4166 | |
4167 | /* Read features beyond page 1 if available */ |
4168 | static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) |
4169 | { |
4170 | u8 page; |
4171 | int err; |
4172 | |
4173 | if (!lmp_ext_feat_capable(hdev)) |
4174 | return 0; |
4175 | |
4176 | for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; |
4177 | page++) { |
4178 | err = hci_read_local_ext_features_sync(hdev, page); |
4179 | if (err) |
4180 | return err; |
4181 | } |
4182 | |
4183 | return 0; |
4184 | } |
4185 | |
4186 | /* HCI Controller init stage 3 command sequence */ |
4187 | static const struct hci_init_stage hci_init3[] = { |
4188 | /* HCI_OP_SET_EVENT_MASK */ |
4189 | HCI_INIT(hci_set_event_mask_sync), |
4190 | /* HCI_OP_READ_STORED_LINK_KEY */ |
4191 | HCI_INIT(hci_read_stored_link_key_sync), |
4192 | /* HCI_OP_WRITE_DEF_LINK_POLICY */ |
4193 | HCI_INIT(hci_setup_link_policy_sync), |
4194 | /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ |
4195 | HCI_INIT(hci_read_page_scan_activity_sync), |
4196 | /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ |
4197 | HCI_INIT(hci_read_def_err_data_reporting_sync), |
4198 | /* HCI_OP_READ_PAGE_SCAN_TYPE */ |
4199 | HCI_INIT(hci_read_page_scan_type_sync), |
4200 | /* HCI_OP_READ_LOCAL_EXT_FEATURES */ |
4201 | HCI_INIT(hci_read_local_ext_features_all_sync), |
4202 | {} |
4203 | }; |
4204 | |
4205 | static int hci_le_set_event_mask_sync(struct hci_dev *hdev) |
4206 | { |
4207 | u8 events[8]; |
4208 | |
4209 | if (!lmp_le_capable(hdev)) |
4210 | return 0; |
4211 | |
4212 | memset(events, 0, sizeof(events)); |
4213 | |
4214 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) |
4215 | events[0] |= 0x10; /* LE Long Term Key Request */ |
4216 | |
4217 | /* If controller supports the Connection Parameters Request |
4218 | * Link Layer Procedure, enable the corresponding event. |
4219 | */ |
4220 | if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) |
4221 | /* LE Remote Connection Parameter Request */ |
4222 | events[0] |= 0x20; |
4223 | |
4224 | /* If the controller supports the Data Length Extension |
4225 | * feature, enable the corresponding event. |
4226 | */ |
4227 | if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) |
4228 | events[0] |= 0x40; /* LE Data Length Change */ |
4229 | |
4230 | /* If the controller supports LL Privacy feature or LE Extended Adv, |
4231 | * enable the corresponding event. |
4232 | */ |
4233 | if (use_enhanced_conn_complete(hdev)) |
4234 | events[1] |= 0x02; /* LE Enhanced Connection Complete */ |
4235 | |
4236 | /* Mark Device Privacy if Privacy Mode is supported */ |
4237 | if (privacy_mode_capable(hdev)) |
4238 | hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; |
4239 | |
4240 | /* Mark Address Resolution if LL Privacy is supported */ |
4241 | if (ll_privacy_capable(hdev)) |
4242 | hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; |
4243 | |
4244 | /* If the controller supports Extended Scanner Filter |
4245 | * Policies, enable the corresponding event. |
4246 | */ |
4247 | if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) |
4248 | events[1] |= 0x04; /* LE Direct Advertising Report */ |
4249 | |
4250 | /* If the controller supports Channel Selection Algorithm #2 |
4251 | * feature, enable the corresponding event. |
4252 | */ |
4253 | if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) |
4254 | events[2] |= 0x08; /* LE Channel Selection Algorithm */ |
4255 | |
4256 | /* If the controller supports the LE Set Scan Enable command, |
4257 | * enable the corresponding advertising report event. |
4258 | */ |
4259 | if (hdev->commands[26] & 0x08) |
4260 | events[0] |= 0x02; /* LE Advertising Report */ |
4261 | |
4262 | /* If the controller supports the LE Create Connection |
4263 | * command, enable the corresponding event. |
4264 | */ |
4265 | if (hdev->commands[26] & 0x10) |
4266 | events[0] |= 0x01; /* LE Connection Complete */ |
4267 | |
4268 | /* If the controller supports the LE Connection Update |
4269 | * command, enable the corresponding event. |
4270 | */ |
4271 | if (hdev->commands[27] & 0x04) |
4272 | events[0] |= 0x04; /* LE Connection Update Complete */ |
4273 | |
4274 | /* If the controller supports the LE Read Remote Used Features |
4275 | * command, enable the corresponding event. |
4276 | */ |
4277 | if (hdev->commands[27] & 0x20) |
4278 | /* LE Read Remote Used Features Complete */ |
4279 | events[0] |= 0x08; |
4280 | |
4281 | /* If the controller supports the LE Read Local P-256 |
4282 | * Public Key command, enable the corresponding event. |
4283 | */ |
4284 | if (hdev->commands[34] & 0x02) |
4285 | /* LE Read Local P-256 Public Key Complete */ |
4286 | events[0] |= 0x80; |
4287 | |
4288 | /* If the controller supports the LE Generate DHKey |
4289 | * command, enable the corresponding event. |
4290 | */ |
4291 | if (hdev->commands[34] & 0x04) |
4292 | events[1] |= 0x01; /* LE Generate DHKey Complete */ |
4293 | |
4294 | /* If the controller supports the LE Set Default PHY or |
4295 | * LE Set PHY commands, enable the corresponding event. |
4296 | */ |
4297 | if (hdev->commands[35] & (0x20 | 0x40)) |
4298 | events[1] |= 0x08; /* LE PHY Update Complete */ |
4299 | |
4300 | /* If the controller supports LE Set Extended Scan Parameters |
4301 | * and LE Set Extended Scan Enable commands, enable the |
4302 | * corresponding event. |
4303 | */ |
4304 | if (use_ext_scan(hdev)) |
4305 | events[1] |= 0x10; /* LE Extended Advertising Report */ |
4306 | |
4307 | /* If the controller supports the LE Extended Advertising |
4308 | * command, enable the corresponding event. |
4309 | */ |
4310 | if (ext_adv_capable(hdev)) |
4311 | events[2] |= 0x02; /* LE Advertising Set Terminated */ |
4312 | |
4313 | if (cis_capable(hdev)) { |
4314 | events[3] |= 0x01; /* LE CIS Established */ |
4315 | if (cis_peripheral_capable(hdev)) |
4316 | events[3] |= 0x02; /* LE CIS Request */ |
4317 | } |
4318 | |
4319 | if (bis_capable(hdev)) { |
4320 | events[1] |= 0x20; /* LE PA Report */ |
4321 | events[1] |= 0x40; /* LE PA Sync Established */ |
4322 | events[3] |= 0x04; /* LE Create BIG Complete */ |
4323 | events[3] |= 0x08; /* LE Terminate BIG Complete */ |
4324 | events[3] |= 0x10; /* LE BIG Sync Established */ |
4325 | events[3] |= 0x20; /* LE BIG Sync Loss */ |
4326 | events[4] |= 0x02; /* LE BIG Info Advertising Report */ |
4327 | } |
4328 | |
4329 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, |
4330 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4331 | } |
4332 | |
4333 | /* Read LE Advertising Channel TX Power */ |
4334 | static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) |
4335 | { |
4336 | if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { |
4337 | /* HCI TS spec forbids mixing of legacy and extended |
4338 | * advertising commands wherein READ_ADV_TX_POWER is |
4339 | * also included. So do not call it if extended adv |
4340 | * is supported otherwise controller will return |
4341 | * COMMAND_DISALLOWED for extended commands. |
4342 | */ |
4343 | return __hci_cmd_sync_status(hdev, |
4344 | HCI_OP_LE_READ_ADV_TX_POWER, |
4345 | 0, NULL, HCI_CMD_TIMEOUT); |
4346 | } |
4347 | |
4348 | return 0; |
4349 | } |
4350 | |
4351 | /* Read LE Min/Max Tx Power*/ |
4352 | static int hci_le_read_tx_power_sync(struct hci_dev *hdev) |
4353 | { |
4354 | if (!(hdev->commands[38] & 0x80) || |
4355 | test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) |
4356 | return 0; |
4357 | |
4358 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, |
4359 | 0, NULL, HCI_CMD_TIMEOUT); |
4360 | } |
4361 | |
4362 | /* Read LE Accept List Size */ |
4363 | static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) |
4364 | { |
4365 | if (!(hdev->commands[26] & 0x40)) |
4366 | return 0; |
4367 | |
4368 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, |
4369 | 0, NULL, HCI_CMD_TIMEOUT); |
4370 | } |
4371 | |
4372 | /* Read LE Resolving List Size */ |
4373 | static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) |
4374 | { |
4375 | if (!(hdev->commands[34] & 0x40)) |
4376 | return 0; |
4377 | |
4378 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, |
4379 | 0, NULL, HCI_CMD_TIMEOUT); |
4380 | } |
4381 | |
4382 | /* Clear LE Resolving List */ |
4383 | static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) |
4384 | { |
4385 | if (!(hdev->commands[34] & 0x20)) |
4386 | return 0; |
4387 | |
4388 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, |
4389 | HCI_CMD_TIMEOUT); |
4390 | } |
4391 | |
4392 | /* Set RPA timeout */ |
4393 | static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) |
4394 | { |
4395 | __le16 timeout = cpu_to_le16(hdev->rpa_timeout); |
4396 | |
4397 | if (!(hdev->commands[35] & 0x04) || |
4398 | test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) |
4399 | return 0; |
4400 | |
4401 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, |
4402 | sizeof(timeout), &timeout, |
4403 | HCI_CMD_TIMEOUT); |
4404 | } |
4405 | |
4406 | /* Read LE Maximum Data Length */ |
4407 | static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) |
4408 | { |
4409 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4410 | return 0; |
4411 | |
4412 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, |
4413 | HCI_CMD_TIMEOUT); |
4414 | } |
4415 | |
4416 | /* Read LE Suggested Default Data Length */ |
4417 | static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) |
4418 | { |
4419 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4420 | return 0; |
4421 | |
4422 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, |
4423 | HCI_CMD_TIMEOUT); |
4424 | } |
4425 | |
4426 | /* Read LE Number of Supported Advertising Sets */ |
4427 | static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) |
4428 | { |
4429 | if (!ext_adv_capable(hdev)) |
4430 | return 0; |
4431 | |
4432 | return __hci_cmd_sync_status(hdev, |
4433 | HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, |
4434 | 0, NULL, HCI_CMD_TIMEOUT); |
4435 | } |
4436 | |
4437 | /* Write LE Host Supported */ |
4438 | static int hci_set_le_support_sync(struct hci_dev *hdev) |
4439 | { |
4440 | struct hci_cp_write_le_host_supported cp; |
4441 | |
4442 | /* LE-only devices do not support explicit enablement */ |
4443 | if (!lmp_bredr_capable(hdev)) |
4444 | return 0; |
4445 | |
4446 | memset(&cp, 0, sizeof(cp)); |
4447 | |
4448 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { |
4449 | cp.le = 0x01; |
4450 | cp.simul = 0x00; |
4451 | } |
4452 | |
4453 | if (cp.le == lmp_host_le_capable(hdev)) |
4454 | return 0; |
4455 | |
4456 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, |
4457 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4458 | } |
4459 | |
4460 | /* LE Set Host Feature */ |
4461 | static int hci_le_set_host_feature_sync(struct hci_dev *hdev) |
4462 | { |
4463 | struct hci_cp_le_set_host_feature cp; |
4464 | |
4465 | if (!cis_capable(hdev)) |
4466 | return 0; |
4467 | |
4468 | memset(&cp, 0, sizeof(cp)); |
4469 | |
4470 | /* Connected Isochronous Channels (Host Support) */ |
4471 | cp.bit_number = 32; |
4472 | cp.bit_value = 1; |
4473 | |
4474 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, |
4475 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4476 | } |
4477 | |
4478 | /* LE Controller init stage 3 command sequence */ |
4479 | static const struct hci_init_stage le_init3[] = { |
4480 | /* HCI_OP_LE_SET_EVENT_MASK */ |
4481 | HCI_INIT(hci_le_set_event_mask_sync), |
4482 | /* HCI_OP_LE_READ_ADV_TX_POWER */ |
4483 | HCI_INIT(hci_le_read_adv_tx_power_sync), |
4484 | /* HCI_OP_LE_READ_TRANSMIT_POWER */ |
4485 | HCI_INIT(hci_le_read_tx_power_sync), |
4486 | /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ |
4487 | HCI_INIT(hci_le_read_accept_list_size_sync), |
4488 | /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ |
4489 | HCI_INIT(hci_le_clear_accept_list_sync), |
4490 | /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ |
4491 | HCI_INIT(hci_le_read_resolv_list_size_sync), |
4492 | /* HCI_OP_LE_CLEAR_RESOLV_LIST */ |
4493 | HCI_INIT(hci_le_clear_resolv_list_sync), |
4494 | /* HCI_OP_LE_SET_RPA_TIMEOUT */ |
4495 | HCI_INIT(hci_le_set_rpa_timeout_sync), |
4496 | /* HCI_OP_LE_READ_MAX_DATA_LEN */ |
4497 | HCI_INIT(hci_le_read_max_data_len_sync), |
4498 | /* HCI_OP_LE_READ_DEF_DATA_LEN */ |
4499 | HCI_INIT(hci_le_read_def_data_len_sync), |
4500 | /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ |
4501 | HCI_INIT(hci_le_read_num_support_adv_sets_sync), |
4502 | /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ |
4503 | HCI_INIT(hci_set_le_support_sync), |
4504 | /* HCI_OP_LE_SET_HOST_FEATURE */ |
4505 | HCI_INIT(hci_le_set_host_feature_sync), |
4506 | {} |
4507 | }; |
4508 | |
4509 | static int hci_init3_sync(struct hci_dev *hdev) |
4510 | { |
4511 | int err; |
4512 | |
4513 | bt_dev_dbg(hdev, ""); |
4514 | |
4515 | err = hci_init_stage_sync(hdev, stage: hci_init3); |
4516 | if (err) |
4517 | return err; |
4518 | |
4519 | if (lmp_le_capable(hdev)) |
4520 | return hci_init_stage_sync(hdev, stage: le_init3); |
4521 | |
4522 | return 0; |
4523 | } |
4524 | |
4525 | static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) |
4526 | { |
4527 | struct hci_cp_delete_stored_link_key cp; |
4528 | |
4529 | /* Some Broadcom based Bluetooth controllers do not support the |
4530 | * Delete Stored Link Key command. They are clearly indicating its |
4531 | * absence in the bit mask of supported commands. |
4532 | * |
4533 | * Check the supported commands and only if the command is marked |
4534 | * as supported send it. If not supported assume that the controller |
4535 | * does not have actual support for stored link keys which makes this |
4536 | * command redundant anyway. |
4537 | * |
4538 | * Some controllers indicate that they support handling deleting |
4539 | * stored link keys, but they don't. The quirk lets a driver |
4540 | * just disable this command. |
4541 | */ |
4542 | if (!(hdev->commands[6] & 0x80) || |
4543 | test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) |
4544 | return 0; |
4545 | |
4546 | memset(&cp, 0, sizeof(cp)); |
4547 | bacpy(dst: &cp.bdaddr, BDADDR_ANY); |
4548 | cp.delete_all = 0x01; |
4549 | |
4550 | return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, |
4551 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4552 | } |
4553 | |
4554 | static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) |
4555 | { |
4556 | u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
4557 | bool changed = false; |
4558 | |
4559 | /* Set event mask page 2 if the HCI command for it is supported */ |
4560 | if (!(hdev->commands[22] & 0x04)) |
4561 | return 0; |
4562 | |
4563 | /* If Connectionless Peripheral Broadcast central role is supported |
4564 | * enable all necessary events for it. |
4565 | */ |
4566 | if (lmp_cpb_central_capable(hdev)) { |
4567 | events[1] |= 0x40; /* Triggered Clock Capture */ |
4568 | events[1] |= 0x80; /* Synchronization Train Complete */ |
4569 | events[2] |= 0x08; /* Truncated Page Complete */ |
4570 | events[2] |= 0x20; /* CPB Channel Map Change */ |
4571 | changed = true; |
4572 | } |
4573 | |
4574 | /* If Connectionless Peripheral Broadcast peripheral role is supported |
4575 | * enable all necessary events for it. |
4576 | */ |
4577 | if (lmp_cpb_peripheral_capable(hdev)) { |
4578 | events[2] |= 0x01; /* Synchronization Train Received */ |
4579 | events[2] |= 0x02; /* CPB Receive */ |
4580 | events[2] |= 0x04; /* CPB Timeout */ |
4581 | events[2] |= 0x10; /* Peripheral Page Response Timeout */ |
4582 | changed = true; |
4583 | } |
4584 | |
4585 | /* Enable Authenticated Payload Timeout Expired event if supported */ |
4586 | if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { |
4587 | events[2] |= 0x80; |
4588 | changed = true; |
4589 | } |
4590 | |
4591 | /* Some Broadcom based controllers indicate support for Set Event |
4592 | * Mask Page 2 command, but then actually do not support it. Since |
4593 | * the default value is all bits set to zero, the command is only |
4594 | * required if the event mask has to be changed. In case no change |
4595 | * to the event mask is needed, skip this command. |
4596 | */ |
4597 | if (!changed) |
4598 | return 0; |
4599 | |
4600 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, |
4601 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4602 | } |
4603 | |
4604 | /* Read local codec list if the HCI command is supported */ |
4605 | static int hci_read_local_codecs_sync(struct hci_dev *hdev) |
4606 | { |
4607 | if (hdev->commands[45] & 0x04) |
4608 | hci_read_supported_codecs_v2(hdev); |
4609 | else if (hdev->commands[29] & 0x20) |
4610 | hci_read_supported_codecs(hdev); |
4611 | |
4612 | return 0; |
4613 | } |
4614 | |
4615 | /* Read local pairing options if the HCI command is supported */ |
4616 | static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) |
4617 | { |
4618 | if (!(hdev->commands[41] & 0x08)) |
4619 | return 0; |
4620 | |
4621 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, |
4622 | 0, NULL, HCI_CMD_TIMEOUT); |
4623 | } |
4624 | |
4625 | /* Get MWS transport configuration if the HCI command is supported */ |
4626 | static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) |
4627 | { |
4628 | if (!mws_transport_config_capable(hdev)) |
4629 | return 0; |
4630 | |
4631 | return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, |
4632 | 0, NULL, HCI_CMD_TIMEOUT); |
4633 | } |
4634 | |
4635 | /* Check for Synchronization Train support */ |
4636 | static int hci_read_sync_train_params_sync(struct hci_dev *hdev) |
4637 | { |
4638 | if (!lmp_sync_train_capable(hdev)) |
4639 | return 0; |
4640 | |
4641 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, |
4642 | 0, NULL, HCI_CMD_TIMEOUT); |
4643 | } |
4644 | |
4645 | /* Enable Secure Connections if supported and configured */ |
4646 | static int hci_write_sc_support_1_sync(struct hci_dev *hdev) |
4647 | { |
4648 | u8 support = 0x01; |
4649 | |
4650 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || |
4651 | !bredr_sc_enabled(hdev)) |
4652 | return 0; |
4653 | |
4654 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, |
4655 | sizeof(support), &support, |
4656 | HCI_CMD_TIMEOUT); |
4657 | } |
4658 | |
4659 | /* Set erroneous data reporting if supported to the wideband speech |
4660 | * setting value |
4661 | */ |
4662 | static int hci_set_err_data_report_sync(struct hci_dev *hdev) |
4663 | { |
4664 | struct hci_cp_write_def_err_data_reporting cp; |
4665 | bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); |
4666 | |
4667 | if (!(hdev->commands[18] & 0x08) || |
4668 | !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || |
4669 | test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) |
4670 | return 0; |
4671 | |
4672 | if (enabled == hdev->err_data_reporting) |
4673 | return 0; |
4674 | |
4675 | memset(&cp, 0, sizeof(cp)); |
4676 | cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : |
4677 | ERR_DATA_REPORTING_DISABLED; |
4678 | |
4679 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, |
4680 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4681 | } |
4682 | |
4683 | static const struct hci_init_stage hci_init4[] = { |
4684 | /* HCI_OP_DELETE_STORED_LINK_KEY */ |
4685 | HCI_INIT(hci_delete_stored_link_key_sync), |
4686 | /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ |
4687 | HCI_INIT(hci_set_event_mask_page_2_sync), |
4688 | /* HCI_OP_READ_LOCAL_CODECS */ |
4689 | HCI_INIT(hci_read_local_codecs_sync), |
4690 | /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ |
4691 | HCI_INIT(hci_read_local_pairing_opts_sync), |
4692 | /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ |
4693 | HCI_INIT(hci_get_mws_transport_config_sync), |
4694 | /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ |
4695 | HCI_INIT(hci_read_sync_train_params_sync), |
4696 | /* HCI_OP_WRITE_SC_SUPPORT */ |
4697 | HCI_INIT(hci_write_sc_support_1_sync), |
4698 | /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ |
4699 | HCI_INIT(hci_set_err_data_report_sync), |
4700 | {} |
4701 | }; |
4702 | |
4703 | /* Set Suggested Default Data Length to maximum if supported */ |
4704 | static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) |
4705 | { |
4706 | struct hci_cp_le_write_def_data_len cp; |
4707 | |
4708 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4709 | return 0; |
4710 | |
4711 | memset(&cp, 0, sizeof(cp)); |
4712 | cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); |
4713 | cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); |
4714 | |
4715 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, |
4716 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4717 | } |
4718 | |
4719 | /* Set Default PHY parameters if command is supported, enables all supported |
4720 | * PHYs according to the LE Features bits. |
4721 | */ |
4722 | static int hci_le_set_default_phy_sync(struct hci_dev *hdev) |
4723 | { |
4724 | struct hci_cp_le_set_default_phy cp; |
4725 | |
4726 | if (!(hdev->commands[35] & 0x20)) { |
4727 | /* If the command is not supported it means only 1M PHY is |
4728 | * supported. |
4729 | */ |
4730 | hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; |
4731 | hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; |
4732 | return 0; |
4733 | } |
4734 | |
4735 | memset(&cp, 0, sizeof(cp)); |
4736 | cp.all_phys = 0x00; |
4737 | cp.tx_phys = HCI_LE_SET_PHY_1M; |
4738 | cp.rx_phys = HCI_LE_SET_PHY_1M; |
4739 | |
4740 | /* Enables 2M PHY if supported */ |
4741 | if (le_2m_capable(hdev)) { |
4742 | cp.tx_phys |= HCI_LE_SET_PHY_2M; |
4743 | cp.rx_phys |= HCI_LE_SET_PHY_2M; |
4744 | } |
4745 | |
4746 | /* Enables Coded PHY if supported */ |
4747 | if (le_coded_capable(hdev)) { |
4748 | cp.tx_phys |= HCI_LE_SET_PHY_CODED; |
4749 | cp.rx_phys |= HCI_LE_SET_PHY_CODED; |
4750 | } |
4751 | |
4752 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, |
4753 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4754 | } |
4755 | |
4756 | static const struct hci_init_stage le_init4[] = { |
4757 | /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ |
4758 | HCI_INIT(hci_le_set_write_def_data_len_sync), |
4759 | /* HCI_OP_LE_SET_DEFAULT_PHY */ |
4760 | HCI_INIT(hci_le_set_default_phy_sync), |
4761 | {} |
4762 | }; |
4763 | |
4764 | static int hci_init4_sync(struct hci_dev *hdev) |
4765 | { |
4766 | int err; |
4767 | |
4768 | bt_dev_dbg(hdev, ""); |
4769 | |
4770 | err = hci_init_stage_sync(hdev, stage: hci_init4); |
4771 | if (err) |
4772 | return err; |
4773 | |
4774 | if (lmp_le_capable(hdev)) |
4775 | return hci_init_stage_sync(hdev, stage: le_init4); |
4776 | |
4777 | return 0; |
4778 | } |
4779 | |
4780 | static int hci_init_sync(struct hci_dev *hdev) |
4781 | { |
4782 | int err; |
4783 | |
4784 | err = hci_init1_sync(hdev); |
4785 | if (err < 0) |
4786 | return err; |
4787 | |
4788 | if (hci_dev_test_flag(hdev, HCI_SETUP)) |
4789 | hci_debugfs_create_basic(hdev); |
4790 | |
4791 | err = hci_init2_sync(hdev); |
4792 | if (err < 0) |
4793 | return err; |
4794 | |
4795 | err = hci_init3_sync(hdev); |
4796 | if (err < 0) |
4797 | return err; |
4798 | |
4799 | err = hci_init4_sync(hdev); |
4800 | if (err < 0) |
4801 | return err; |
4802 | |
4803 | /* This function is only called when the controller is actually in |
4804 | * configured state. When the controller is marked as unconfigured, |
4805 | * this initialization procedure is not run. |
4806 | * |
4807 | * It means that it is possible that a controller runs through its |
4808 | * setup phase and then discovers missing settings. If that is the |
4809 | * case, then this function will not be called. It then will only |
4810 | * be called during the config phase. |
4811 | * |
4812 | * So only when in setup phase or config phase, create the debugfs |
4813 | * entries and register the SMP channels. |
4814 | */ |
4815 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
4816 | !hci_dev_test_flag(hdev, HCI_CONFIG)) |
4817 | return 0; |
4818 | |
4819 | if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) |
4820 | return 0; |
4821 | |
4822 | hci_debugfs_create_common(hdev); |
4823 | |
4824 | if (lmp_bredr_capable(hdev)) |
4825 | hci_debugfs_create_bredr(hdev); |
4826 | |
4827 | if (lmp_le_capable(hdev)) |
4828 | hci_debugfs_create_le(hdev); |
4829 | |
4830 | return 0; |
4831 | } |
4832 | |
4833 | #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } |
4834 | |
4835 | static const struct { |
4836 | unsigned long quirk; |
4837 | const char *desc; |
4838 | } hci_broken_table[] = { |
4839 | HCI_QUIRK_BROKEN(LOCAL_COMMANDS, |
4840 | "HCI Read Local Supported Commands not supported"), |
4841 | HCI_QUIRK_BROKEN(STORED_LINK_KEY, |
4842 | "HCI Delete Stored Link Key command is advertised, " |
4843 | "but not supported."), |
4844 | HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, |
4845 | "HCI Read Default Erroneous Data Reporting command is " |
4846 | "advertised, but not supported."), |
4847 | HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, |
4848 | "HCI Read Transmit Power Level command is advertised, " |
4849 | "but not supported."), |
4850 | HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, |
4851 | "HCI Set Event Filter command not supported."), |
4852 | HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, |
4853 | "HCI Enhanced Setup Synchronous Connection command is " |
4854 | "advertised, but not supported."), |
4855 | HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, |
4856 | "HCI LE Set Random Private Address Timeout command is " |
4857 | "advertised, but not supported."), |
4858 | HCI_QUIRK_BROKEN(EXT_CREATE_CONN, |
4859 | "HCI LE Extended Create Connection command is " |
4860 | "advertised, but not supported."), |
4861 | HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT, |
4862 | "HCI WRITE AUTH PAYLOAD TIMEOUT command leads " |
4863 | "to unexpected SMP errors when pairing " |
4864 | "and will not be used."), |
4865 | HCI_QUIRK_BROKEN(LE_CODED, |
4866 | "HCI LE Coded PHY feature bit is set, " |
4867 | "but its usage is not supported.") |
4868 | }; |
4869 | |
4870 | /* This function handles hdev setup stage: |
4871 | * |
4872 | * Calls hdev->setup |
4873 | * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. |
4874 | */ |
4875 | static int hci_dev_setup_sync(struct hci_dev *hdev) |
4876 | { |
4877 | int ret = 0; |
4878 | bool invalid_bdaddr; |
4879 | size_t i; |
4880 | |
4881 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
4882 | !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) |
4883 | return 0; |
4884 | |
4885 | bt_dev_dbg(hdev, ""); |
4886 | |
4887 | hci_sock_dev_event(hdev, HCI_DEV_SETUP); |
4888 | |
4889 | if (hdev->setup) |
4890 | ret = hdev->setup(hdev); |
4891 | |
4892 | for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { |
4893 | if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) |
4894 | bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); |
4895 | } |
4896 | |
4897 | /* The transport driver can set the quirk to mark the |
4898 | * BD_ADDR invalid before creating the HCI device or in |
4899 | * its setup callback. |
4900 | */ |
4901 | invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || |
4902 | test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); |
4903 | if (!ret) { |
4904 | if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && |
4905 | !bacmp(ba1: &hdev->public_addr, BDADDR_ANY)) |
4906 | hci_dev_get_bd_addr_from_property(hdev); |
4907 | |
4908 | if (invalid_bdaddr && bacmp(ba1: &hdev->public_addr, BDADDR_ANY) && |
4909 | hdev->set_bdaddr) { |
4910 | ret = hdev->set_bdaddr(hdev, &hdev->public_addr); |
4911 | if (!ret) |
4912 | invalid_bdaddr = false; |
4913 | } |
4914 | } |
4915 | |
4916 | /* The transport driver can set these quirks before |
4917 | * creating the HCI device or in its setup callback. |
4918 | * |
4919 | * For the invalid BD_ADDR quirk it is possible that |
4920 | * it becomes a valid address if the bootloader does |
4921 | * provide it (see above). |
4922 | * |
4923 | * In case any of them is set, the controller has to |
4924 | * start up as unconfigured. |
4925 | */ |
4926 | if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || |
4927 | invalid_bdaddr) |
4928 | hci_dev_set_flag(hdev, HCI_UNCONFIGURED); |
4929 | |
4930 | /* For an unconfigured controller it is required to |
4931 | * read at least the version information provided by |
4932 | * the Read Local Version Information command. |
4933 | * |
4934 | * If the set_bdaddr driver callback is provided, then |
4935 | * also the original Bluetooth public device address |
4936 | * will be read using the Read BD Address command. |
4937 | */ |
4938 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
4939 | return hci_unconf_init_sync(hdev); |
4940 | |
4941 | return ret; |
4942 | } |
4943 | |
4944 | /* This function handles hdev init stage: |
4945 | * |
4946 | * Calls hci_dev_setup_sync to perform setup stage |
4947 | * Calls hci_init_sync to perform HCI command init sequence |
4948 | */ |
4949 | static int hci_dev_init_sync(struct hci_dev *hdev) |
4950 | { |
4951 | int ret; |
4952 | |
4953 | bt_dev_dbg(hdev, ""); |
4954 | |
4955 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
4956 | set_bit(nr: HCI_INIT, addr: &hdev->flags); |
4957 | |
4958 | ret = hci_dev_setup_sync(hdev); |
4959 | |
4960 | if (hci_dev_test_flag(hdev, HCI_CONFIG)) { |
4961 | /* If public address change is configured, ensure that |
4962 | * the address gets programmed. If the driver does not |
4963 | * support changing the public address, fail the power |
4964 | * on procedure. |
4965 | */ |
4966 | if (bacmp(ba1: &hdev->public_addr, BDADDR_ANY) && |
4967 | hdev->set_bdaddr) |
4968 | ret = hdev->set_bdaddr(hdev, &hdev->public_addr); |
4969 | else |
4970 | ret = -EADDRNOTAVAIL; |
4971 | } |
4972 | |
4973 | if (!ret) { |
4974 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && |
4975 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
4976 | ret = hci_init_sync(hdev); |
4977 | if (!ret && hdev->post_init) |
4978 | ret = hdev->post_init(hdev); |
4979 | } |
4980 | } |
4981 | |
4982 | /* If the HCI Reset command is clearing all diagnostic settings, |
4983 | * then they need to be reprogrammed after the init procedure |
4984 | * completed. |
4985 | */ |
4986 | if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && |
4987 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
4988 | hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) |
4989 | ret = hdev->set_diag(hdev, true); |
4990 | |
4991 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
4992 | msft_do_open(hdev); |
4993 | aosp_do_open(hdev); |
4994 | } |
4995 | |
4996 | clear_bit(nr: HCI_INIT, addr: &hdev->flags); |
4997 | |
4998 | return ret; |
4999 | } |
5000 | |
5001 | int hci_dev_open_sync(struct hci_dev *hdev) |
5002 | { |
5003 | int ret; |
5004 | |
5005 | bt_dev_dbg(hdev, ""); |
5006 | |
5007 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { |
5008 | ret = -ENODEV; |
5009 | goto done; |
5010 | } |
5011 | |
5012 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
5013 | !hci_dev_test_flag(hdev, HCI_CONFIG)) { |
5014 | /* Check for rfkill but allow the HCI setup stage to |
5015 | * proceed (which in itself doesn't cause any RF activity). |
5016 | */ |
5017 | if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { |
5018 | ret = -ERFKILL; |
5019 | goto done; |
5020 | } |
5021 | |
5022 | /* Check for valid public address or a configured static |
5023 | * random address, but let the HCI setup proceed to |
5024 | * be able to determine if there is a public address |
5025 | * or not. |
5026 | * |
5027 | * In case of user channel usage, it is not important |
5028 | * if a public address or static random address is |
5029 | * available. |
5030 | */ |
5031 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
5032 | !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
5033 | !bacmp(ba1: &hdev->static_addr, BDADDR_ANY)) { |
5034 | ret = -EADDRNOTAVAIL; |
5035 | goto done; |
5036 | } |
5037 | } |
5038 | |
5039 | if (test_bit(HCI_UP, &hdev->flags)) { |
5040 | ret = -EALREADY; |
5041 | goto done; |
5042 | } |
5043 | |
5044 | if (hdev->open(hdev)) { |
5045 | ret = -EIO; |
5046 | goto done; |
5047 | } |
5048 | |
5049 | hci_devcd_reset(hdev); |
5050 | |
5051 | set_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
5052 | hci_sock_dev_event(hdev, HCI_DEV_OPEN); |
5053 | |
5054 | ret = hci_dev_init_sync(hdev); |
5055 | if (!ret) { |
5056 | hci_dev_hold(d: hdev); |
5057 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
5058 | hci_adv_instances_set_rpa_expired(hdev, rpa_expired: true); |
5059 | set_bit(nr: HCI_UP, addr: &hdev->flags); |
5060 | hci_sock_dev_event(hdev, HCI_DEV_UP); |
5061 | hci_leds_update_powered(hdev, enabled: true); |
5062 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
5063 | !hci_dev_test_flag(hdev, HCI_CONFIG) && |
5064 | !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && |
5065 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
5066 | hci_dev_test_flag(hdev, HCI_MGMT)) { |
5067 | ret = hci_powered_update_sync(hdev); |
5068 | mgmt_power_on(hdev, err: ret); |
5069 | } |
5070 | } else { |
5071 | /* Init failed, cleanup */ |
5072 | flush_work(work: &hdev->tx_work); |
5073 | |
5074 | /* Since hci_rx_work() is possible to awake new cmd_work |
5075 | * it should be flushed first to avoid unexpected call of |
5076 | * hci_cmd_work() |
5077 | */ |
5078 | flush_work(work: &hdev->rx_work); |
5079 | flush_work(work: &hdev->cmd_work); |
5080 | |
5081 | skb_queue_purge(list: &hdev->cmd_q); |
5082 | skb_queue_purge(list: &hdev->rx_q); |
5083 | |
5084 | if (hdev->flush) |
5085 | hdev->flush(hdev); |
5086 | |
5087 | if (hdev->sent_cmd) { |
5088 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5089 | kfree_skb(skb: hdev->sent_cmd); |
5090 | hdev->sent_cmd = NULL; |
5091 | } |
5092 | |
5093 | if (hdev->req_skb) { |
5094 | kfree_skb(skb: hdev->req_skb); |
5095 | hdev->req_skb = NULL; |
5096 | } |
5097 | |
5098 | clear_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
5099 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); |
5100 | |
5101 | hdev->close(hdev); |
5102 | hdev->flags &= BIT(HCI_RAW); |
5103 | } |
5104 | |
5105 | done: |
5106 | return ret; |
5107 | } |
5108 | |
5109 | /* This function requires the caller holds hdev->lock */ |
5110 | static void hci_pend_le_actions_clear(struct hci_dev *hdev) |
5111 | { |
5112 | struct hci_conn_params *p; |
5113 | |
5114 | list_for_each_entry(p, &hdev->le_conn_params, list) { |
5115 | hci_pend_le_list_del_init(param: p); |
5116 | if (p->conn) { |
5117 | hci_conn_drop(conn: p->conn); |
5118 | hci_conn_put(conn: p->conn); |
5119 | p->conn = NULL; |
5120 | } |
5121 | } |
5122 | |
5123 | BT_DBG("All LE pending actions cleared"); |
5124 | } |
5125 | |
5126 | static int hci_dev_shutdown(struct hci_dev *hdev) |
5127 | { |
5128 | int err = 0; |
5129 | /* Similar to how we first do setup and then set the exclusive access |
5130 | * bit for userspace, we must first unset userchannel and then clean up. |
5131 | * Otherwise, the kernel can't properly use the hci channel to clean up |
5132 | * the controller (some shutdown routines require sending additional |
5133 | * commands to the controller for example). |
5134 | */ |
5135 | bool was_userchannel = |
5136 | hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); |
5137 | |
5138 | if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && |
5139 | test_bit(HCI_UP, &hdev->flags)) { |
5140 | /* Execute vendor specific shutdown routine */ |
5141 | if (hdev->shutdown) |
5142 | err = hdev->shutdown(hdev); |
5143 | } |
5144 | |
5145 | if (was_userchannel) |
5146 | hci_dev_set_flag(hdev, HCI_USER_CHANNEL); |
5147 | |
5148 | return err; |
5149 | } |
5150 | |
5151 | int hci_dev_close_sync(struct hci_dev *hdev) |
5152 | { |
5153 | bool auto_off; |
5154 | int err = 0; |
5155 | |
5156 | bt_dev_dbg(hdev, ""); |
5157 | |
5158 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { |
5159 | disable_delayed_work(dwork: &hdev->power_off); |
5160 | disable_delayed_work(dwork: &hdev->ncmd_timer); |
5161 | disable_delayed_work(dwork: &hdev->le_scan_disable); |
5162 | } else { |
5163 | cancel_delayed_work(dwork: &hdev->power_off); |
5164 | cancel_delayed_work(dwork: &hdev->ncmd_timer); |
5165 | cancel_delayed_work(dwork: &hdev->le_scan_disable); |
5166 | } |
5167 | |
5168 | hci_cmd_sync_cancel_sync(hdev, ENODEV); |
5169 | |
5170 | cancel_interleave_scan(hdev); |
5171 | |
5172 | if (hdev->adv_instance_timeout) { |
5173 | cancel_delayed_work_sync(dwork: &hdev->adv_instance_expire); |
5174 | hdev->adv_instance_timeout = 0; |
5175 | } |
5176 | |
5177 | err = hci_dev_shutdown(hdev); |
5178 | |
5179 | if (!test_and_clear_bit(nr: HCI_UP, addr: &hdev->flags)) { |
5180 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5181 | return err; |
5182 | } |
5183 | |
5184 | hci_leds_update_powered(hdev, enabled: false); |
5185 | |
5186 | /* Flush RX and TX works */ |
5187 | flush_work(work: &hdev->tx_work); |
5188 | flush_work(work: &hdev->rx_work); |
5189 | |
5190 | if (hdev->discov_timeout > 0) { |
5191 | hdev->discov_timeout = 0; |
5192 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); |
5193 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); |
5194 | } |
5195 | |
5196 | if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) |
5197 | cancel_delayed_work(dwork: &hdev->service_cache); |
5198 | |
5199 | if (hci_dev_test_flag(hdev, HCI_MGMT)) { |
5200 | struct adv_info *adv_instance; |
5201 | |
5202 | cancel_delayed_work_sync(dwork: &hdev->rpa_expired); |
5203 | |
5204 | list_for_each_entry(adv_instance, &hdev->adv_instances, list) |
5205 | cancel_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb); |
5206 | } |
5207 | |
5208 | /* Avoid potential lockdep warnings from the *_flush() calls by |
5209 | * ensuring the workqueue is empty up front. |
5210 | */ |
5211 | drain_workqueue(wq: hdev->workqueue); |
5212 | |
5213 | hci_dev_lock(hdev); |
5214 | |
5215 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
5216 | |
5217 | auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); |
5218 | |
5219 | if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
5220 | hci_dev_test_flag(hdev, HCI_MGMT)) |
5221 | __mgmt_power_off(hdev); |
5222 | |
5223 | hci_inquiry_cache_flush(hdev); |
5224 | hci_pend_le_actions_clear(hdev); |
5225 | hci_conn_hash_flush(hdev); |
5226 | /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ |
5227 | smp_unregister(hdev); |
5228 | hci_dev_unlock(hdev); |
5229 | |
5230 | hci_sock_dev_event(hdev, HCI_DEV_DOWN); |
5231 | |
5232 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
5233 | aosp_do_close(hdev); |
5234 | msft_do_close(hdev); |
5235 | } |
5236 | |
5237 | if (hdev->flush) |
5238 | hdev->flush(hdev); |
5239 | |
5240 | /* Reset device */ |
5241 | skb_queue_purge(list: &hdev->cmd_q); |
5242 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
5243 | if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && |
5244 | !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { |
5245 | set_bit(nr: HCI_INIT, addr: &hdev->flags); |
5246 | hci_reset_sync(hdev); |
5247 | clear_bit(nr: HCI_INIT, addr: &hdev->flags); |
5248 | } |
5249 | |
5250 | /* flush cmd work */ |
5251 | flush_work(work: &hdev->cmd_work); |
5252 | |
5253 | /* Drop queues */ |
5254 | skb_queue_purge(list: &hdev->rx_q); |
5255 | skb_queue_purge(list: &hdev->cmd_q); |
5256 | skb_queue_purge(list: &hdev->raw_q); |
5257 | |
5258 | /* Drop last sent command */ |
5259 | if (hdev->sent_cmd) { |
5260 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5261 | kfree_skb(skb: hdev->sent_cmd); |
5262 | hdev->sent_cmd = NULL; |
5263 | } |
5264 | |
5265 | /* Drop last request */ |
5266 | if (hdev->req_skb) { |
5267 | kfree_skb(skb: hdev->req_skb); |
5268 | hdev->req_skb = NULL; |
5269 | } |
5270 | |
5271 | clear_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
5272 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); |
5273 | |
5274 | /* After this point our queues are empty and no tasks are scheduled. */ |
5275 | hdev->close(hdev); |
5276 | |
5277 | /* Clear flags */ |
5278 | hdev->flags &= BIT(HCI_RAW); |
5279 | hci_dev_clear_volatile_flags(hdev); |
5280 | |
5281 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
5282 | memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); |
5283 | bacpy(dst: &hdev->random_addr, BDADDR_ANY); |
5284 | hci_codec_list_clear(codec_list: &hdev->local_codecs); |
5285 | |
5286 | hci_dev_put(d: hdev); |
5287 | return err; |
5288 | } |
5289 | |
5290 | /* This function perform power on HCI command sequence as follows: |
5291 | * |
5292 | * If controller is already up (HCI_UP) performs hci_powered_update_sync |
5293 | * sequence otherwise run hci_dev_open_sync which will follow with |
5294 | * hci_powered_update_sync after the init sequence is completed. |
5295 | */ |
5296 | static int hci_power_on_sync(struct hci_dev *hdev) |
5297 | { |
5298 | int err; |
5299 | |
5300 | if (test_bit(HCI_UP, &hdev->flags) && |
5301 | hci_dev_test_flag(hdev, HCI_MGMT) && |
5302 | hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { |
5303 | cancel_delayed_work(dwork: &hdev->power_off); |
5304 | return hci_powered_update_sync(hdev); |
5305 | } |
5306 | |
5307 | err = hci_dev_open_sync(hdev); |
5308 | if (err < 0) |
5309 | return err; |
5310 | |
5311 | /* During the HCI setup phase, a few error conditions are |
5312 | * ignored and they need to be checked now. If they are still |
5313 | * valid, it is important to return the device back off. |
5314 | */ |
5315 | if (hci_dev_test_flag(hdev, HCI_RFKILLED) || |
5316 | hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || |
5317 | (!bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
5318 | !bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) { |
5319 | hci_dev_clear_flag(hdev, HCI_AUTO_OFF); |
5320 | hci_dev_close_sync(hdev); |
5321 | } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { |
5322 | queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->power_off, |
5323 | HCI_AUTO_OFF_TIMEOUT); |
5324 | } |
5325 | |
5326 | if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { |
5327 | /* For unconfigured devices, set the HCI_RAW flag |
5328 | * so that userspace can easily identify them. |
5329 | */ |
5330 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
5331 | set_bit(nr: HCI_RAW, addr: &hdev->flags); |
5332 | |
5333 | /* For fully configured devices, this will send |
5334 | * the Index Added event. For unconfigured devices, |
5335 | * it will send Unconfigued Index Added event. |
5336 | * |
5337 | * Devices with HCI_QUIRK_RAW_DEVICE are ignored |
5338 | * and no event will be send. |
5339 | */ |
5340 | mgmt_index_added(hdev); |
5341 | } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { |
5342 | /* When the controller is now configured, then it |
5343 | * is important to clear the HCI_RAW flag. |
5344 | */ |
5345 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
5346 | clear_bit(nr: HCI_RAW, addr: &hdev->flags); |
5347 | |
5348 | /* Powering on the controller with HCI_CONFIG set only |
5349 | * happens with the transition from unconfigured to |
5350 | * configured. This will send the Index Added event. |
5351 | */ |
5352 | mgmt_index_added(hdev); |
5353 | } |
5354 | |
5355 | return 0; |
5356 | } |
5357 | |
5358 | static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) |
5359 | { |
5360 | struct hci_cp_remote_name_req_cancel cp; |
5361 | |
5362 | memset(&cp, 0, sizeof(cp)); |
5363 | bacpy(dst: &cp.bdaddr, src: addr); |
5364 | |
5365 | return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, |
5366 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5367 | } |
5368 | |
5369 | int hci_stop_discovery_sync(struct hci_dev *hdev) |
5370 | { |
5371 | struct discovery_state *d = &hdev->discovery; |
5372 | struct inquiry_entry *e; |
5373 | int err; |
5374 | |
5375 | bt_dev_dbg(hdev, "state %u", hdev->discovery.state); |
5376 | |
5377 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { |
5378 | if (test_bit(HCI_INQUIRY, &hdev->flags)) { |
5379 | err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, |
5380 | 0, NULL, HCI_CMD_TIMEOUT); |
5381 | if (err) |
5382 | return err; |
5383 | } |
5384 | |
5385 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
5386 | cancel_delayed_work(dwork: &hdev->le_scan_disable); |
5387 | |
5388 | err = hci_scan_disable_sync(hdev); |
5389 | if (err) |
5390 | return err; |
5391 | } |
5392 | |
5393 | } else { |
5394 | err = hci_scan_disable_sync(hdev); |
5395 | if (err) |
5396 | return err; |
5397 | } |
5398 | |
5399 | /* Resume advertising if it was paused */ |
5400 | if (ll_privacy_capable(hdev)) |
5401 | hci_resume_advertising_sync(hdev); |
5402 | |
5403 | /* No further actions needed for LE-only discovery */ |
5404 | if (d->type == DISCOV_TYPE_LE) |
5405 | return 0; |
5406 | |
5407 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { |
5408 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, |
5409 | state: NAME_PENDING); |
5410 | if (!e) |
5411 | return 0; |
5412 | |
5413 | /* Ignore cancel errors since it should interfere with stopping |
5414 | * of the discovery. |
5415 | */ |
5416 | hci_remote_name_cancel_sync(hdev, addr: &e->data.bdaddr); |
5417 | } |
5418 | |
5419 | return 0; |
5420 | } |
5421 | |
5422 | static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5423 | u8 reason) |
5424 | { |
5425 | struct hci_cp_disconnect cp; |
5426 | |
5427 | if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { |
5428 | /* This is a BIS connection, hci_conn_del will |
5429 | * do the necessary cleanup. |
5430 | */ |
5431 | hci_dev_lock(hdev); |
5432 | hci_conn_failed(conn, status: reason); |
5433 | hci_dev_unlock(hdev); |
5434 | |
5435 | return 0; |
5436 | } |
5437 | |
5438 | memset(&cp, 0, sizeof(cp)); |
5439 | cp.handle = cpu_to_le16(conn->handle); |
5440 | cp.reason = reason; |
5441 | |
5442 | /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the |
5443 | * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is |
5444 | * used when suspending or powering off, where we don't want to wait |
5445 | * for the peer's response. |
5446 | */ |
5447 | if (reason != HCI_ERROR_REMOTE_POWER_OFF) |
5448 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, |
5449 | sizeof(cp), &cp, |
5450 | HCI_EV_DISCONN_COMPLETE, |
5451 | HCI_CMD_TIMEOUT, NULL); |
5452 | |
5453 | return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, |
5454 | HCI_CMD_TIMEOUT); |
5455 | } |
5456 | |
5457 | static int hci_le_connect_cancel_sync(struct hci_dev *hdev, |
5458 | struct hci_conn *conn, u8 reason) |
5459 | { |
5460 | /* Return reason if scanning since the connection shall probably be |
5461 | * cleanup directly. |
5462 | */ |
5463 | if (test_bit(HCI_CONN_SCANNING, &conn->flags)) |
5464 | return reason; |
5465 | |
5466 | if (conn->role == HCI_ROLE_SLAVE || |
5467 | test_and_set_bit(nr: HCI_CONN_CANCEL, addr: &conn->flags)) |
5468 | return 0; |
5469 | |
5470 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, |
5471 | 0, NULL, HCI_CMD_TIMEOUT); |
5472 | } |
5473 | |
5474 | static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5475 | u8 reason) |
5476 | { |
5477 | if (conn->type == LE_LINK) |
5478 | return hci_le_connect_cancel_sync(hdev, conn, reason); |
5479 | |
5480 | if (conn->type == CIS_LINK) { |
5481 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
5482 | * page 1857: |
5483 | * |
5484 | * If this command is issued for a CIS on the Central and the |
5485 | * CIS is successfully terminated before being established, |
5486 | * then an HCI_LE_CIS_Established event shall also be sent for |
5487 | * this CIS with the Status Operation Cancelled by Host (0x44). |
5488 | */ |
5489 | if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) |
5490 | return hci_disconnect_sync(hdev, conn, reason); |
5491 | |
5492 | /* CIS with no Create CIS sent have nothing to cancel */ |
5493 | return HCI_ERROR_LOCAL_HOST_TERM; |
5494 | } |
5495 | |
5496 | if (conn->type == BIS_LINK) { |
5497 | /* There is no way to cancel a BIS without terminating the BIG |
5498 | * which is done later on connection cleanup. |
5499 | */ |
5500 | return 0; |
5501 | } |
5502 | |
5503 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
5504 | return 0; |
5505 | |
5506 | /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the |
5507 | * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is |
5508 | * used when suspending or powering off, where we don't want to wait |
5509 | * for the peer's response. |
5510 | */ |
5511 | if (reason != HCI_ERROR_REMOTE_POWER_OFF) |
5512 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, |
5513 | 6, &conn->dst, |
5514 | HCI_EV_CONN_COMPLETE, |
5515 | HCI_CMD_TIMEOUT, NULL); |
5516 | |
5517 | return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, |
5518 | 6, &conn->dst, HCI_CMD_TIMEOUT); |
5519 | } |
5520 | |
5521 | static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5522 | u8 reason) |
5523 | { |
5524 | struct hci_cp_reject_sync_conn_req cp; |
5525 | |
5526 | memset(&cp, 0, sizeof(cp)); |
5527 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
5528 | cp.reason = reason; |
5529 | |
5530 | /* SCO rejection has its own limited set of |
5531 | * allowed error values (0x0D-0x0F). |
5532 | */ |
5533 | if (reason < 0x0d || reason > 0x0f) |
5534 | cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; |
5535 | |
5536 | return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, |
5537 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5538 | } |
5539 | |
5540 | static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5541 | u8 reason) |
5542 | { |
5543 | struct hci_cp_le_reject_cis cp; |
5544 | |
5545 | memset(&cp, 0, sizeof(cp)); |
5546 | cp.handle = cpu_to_le16(conn->handle); |
5547 | cp.reason = reason; |
5548 | |
5549 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, |
5550 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5551 | } |
5552 | |
5553 | static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5554 | u8 reason) |
5555 | { |
5556 | struct hci_cp_reject_conn_req cp; |
5557 | |
5558 | if (conn->type == CIS_LINK) |
5559 | return hci_le_reject_cis_sync(hdev, conn, reason); |
5560 | |
5561 | if (conn->type == BIS_LINK) |
5562 | return -EINVAL; |
5563 | |
5564 | if (conn->type == SCO_LINK || conn->type == ESCO_LINK) |
5565 | return hci_reject_sco_sync(hdev, conn, reason); |
5566 | |
5567 | memset(&cp, 0, sizeof(cp)); |
5568 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
5569 | cp.reason = reason; |
5570 | |
5571 | return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, |
5572 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5573 | } |
5574 | |
5575 | int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) |
5576 | { |
5577 | int err = 0; |
5578 | u16 handle = conn->handle; |
5579 | bool disconnect = false; |
5580 | struct hci_conn *c; |
5581 | |
5582 | switch (conn->state) { |
5583 | case BT_CONNECTED: |
5584 | case BT_CONFIG: |
5585 | err = hci_disconnect_sync(hdev, conn, reason); |
5586 | break; |
5587 | case BT_CONNECT: |
5588 | err = hci_connect_cancel_sync(hdev, conn, reason); |
5589 | break; |
5590 | case BT_CONNECT2: |
5591 | err = hci_reject_conn_sync(hdev, conn, reason); |
5592 | break; |
5593 | case BT_OPEN: |
5594 | case BT_BOUND: |
5595 | break; |
5596 | default: |
5597 | disconnect = true; |
5598 | break; |
5599 | } |
5600 | |
5601 | hci_dev_lock(hdev); |
5602 | |
5603 | /* Check if the connection has been cleaned up concurrently */ |
5604 | c = hci_conn_hash_lookup_handle(hdev, handle); |
5605 | if (!c || c != conn) { |
5606 | err = 0; |
5607 | goto unlock; |
5608 | } |
5609 | |
5610 | /* Cleanup hci_conn object if it cannot be cancelled as it |
5611 | * likelly means the controller and host stack are out of sync |
5612 | * or in case of LE it was still scanning so it can be cleanup |
5613 | * safely. |
5614 | */ |
5615 | if (disconnect) { |
5616 | conn->state = BT_CLOSED; |
5617 | hci_disconn_cfm(conn, reason); |
5618 | hci_conn_del(conn); |
5619 | } else { |
5620 | hci_conn_failed(conn, status: reason); |
5621 | } |
5622 | |
5623 | unlock: |
5624 | hci_dev_unlock(hdev); |
5625 | return err; |
5626 | } |
5627 | |
5628 | static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) |
5629 | { |
5630 | struct list_head *head = &hdev->conn_hash.list; |
5631 | struct hci_conn *conn; |
5632 | |
5633 | rcu_read_lock(); |
5634 | while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { |
5635 | /* Make sure the connection is not freed while unlocking */ |
5636 | conn = hci_conn_get(conn); |
5637 | rcu_read_unlock(); |
5638 | /* Disregard possible errors since hci_conn_del shall have been |
5639 | * called even in case of errors had occurred since it would |
5640 | * then cause hci_conn_failed to be called which calls |
5641 | * hci_conn_del internally. |
5642 | */ |
5643 | hci_abort_conn_sync(hdev, conn, reason); |
5644 | hci_conn_put(conn); |
5645 | rcu_read_lock(); |
5646 | } |
5647 | rcu_read_unlock(); |
5648 | |
5649 | return 0; |
5650 | } |
5651 | |
5652 | /* This function perform power off HCI command sequence as follows: |
5653 | * |
5654 | * Clear Advertising |
5655 | * Stop Discovery |
5656 | * Disconnect all connections |
5657 | * hci_dev_close_sync |
5658 | */ |
5659 | static int hci_power_off_sync(struct hci_dev *hdev) |
5660 | { |
5661 | int err; |
5662 | |
5663 | /* If controller is already down there is nothing to do */ |
5664 | if (!test_bit(HCI_UP, &hdev->flags)) |
5665 | return 0; |
5666 | |
5667 | hci_dev_set_flag(hdev, HCI_POWERING_DOWN); |
5668 | |
5669 | if (test_bit(HCI_ISCAN, &hdev->flags) || |
5670 | test_bit(HCI_PSCAN, &hdev->flags)) { |
5671 | err = hci_write_scan_enable_sync(hdev, val: 0x00); |
5672 | if (err) |
5673 | goto out; |
5674 | } |
5675 | |
5676 | err = hci_clear_adv_sync(hdev, NULL, force: false); |
5677 | if (err) |
5678 | goto out; |
5679 | |
5680 | err = hci_stop_discovery_sync(hdev); |
5681 | if (err) |
5682 | goto out; |
5683 | |
5684 | /* Terminated due to Power Off */ |
5685 | err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); |
5686 | if (err) |
5687 | goto out; |
5688 | |
5689 | err = hci_dev_close_sync(hdev); |
5690 | |
5691 | out: |
5692 | hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); |
5693 | return err; |
5694 | } |
5695 | |
5696 | int hci_set_powered_sync(struct hci_dev *hdev, u8 val) |
5697 | { |
5698 | if (val) |
5699 | return hci_power_on_sync(hdev); |
5700 | |
5701 | return hci_power_off_sync(hdev); |
5702 | } |
5703 | |
5704 | static int hci_write_iac_sync(struct hci_dev *hdev) |
5705 | { |
5706 | struct hci_cp_write_current_iac_lap cp; |
5707 | |
5708 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
5709 | return 0; |
5710 | |
5711 | memset(&cp, 0, sizeof(cp)); |
5712 | |
5713 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { |
5714 | /* Limited discoverable mode */ |
5715 | cp.num_iac = min_t(u8, hdev->num_iac, 2); |
5716 | cp.iac_lap[0] = 0x00; /* LIAC */ |
5717 | cp.iac_lap[1] = 0x8b; |
5718 | cp.iac_lap[2] = 0x9e; |
5719 | cp.iac_lap[3] = 0x33; /* GIAC */ |
5720 | cp.iac_lap[4] = 0x8b; |
5721 | cp.iac_lap[5] = 0x9e; |
5722 | } else { |
5723 | /* General discoverable mode */ |
5724 | cp.num_iac = 1; |
5725 | cp.iac_lap[0] = 0x33; /* GIAC */ |
5726 | cp.iac_lap[1] = 0x8b; |
5727 | cp.iac_lap[2] = 0x9e; |
5728 | } |
5729 | |
5730 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, |
5731 | (cp.num_iac * 3) + 1, &cp, |
5732 | HCI_CMD_TIMEOUT); |
5733 | } |
5734 | |
5735 | int hci_update_discoverable_sync(struct hci_dev *hdev) |
5736 | { |
5737 | int err = 0; |
5738 | |
5739 | if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { |
5740 | err = hci_write_iac_sync(hdev); |
5741 | if (err) |
5742 | return err; |
5743 | |
5744 | err = hci_update_scan_sync(hdev); |
5745 | if (err) |
5746 | return err; |
5747 | |
5748 | err = hci_update_class_sync(hdev); |
5749 | if (err) |
5750 | return err; |
5751 | } |
5752 | |
5753 | /* Advertising instances don't use the global discoverable setting, so |
5754 | * only update AD if advertising was enabled using Set Advertising. |
5755 | */ |
5756 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { |
5757 | err = hci_update_adv_data_sync(hdev, instance: 0x00); |
5758 | if (err) |
5759 | return err; |
5760 | |
5761 | /* Discoverable mode affects the local advertising |
5762 | * address in limited privacy mode. |
5763 | */ |
5764 | if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { |
5765 | if (ext_adv_capable(hdev)) |
5766 | err = hci_start_ext_adv_sync(hdev, instance: 0x00); |
5767 | else |
5768 | err = hci_enable_advertising_sync(hdev); |
5769 | } |
5770 | } |
5771 | |
5772 | return err; |
5773 | } |
5774 | |
5775 | static int update_discoverable_sync(struct hci_dev *hdev, void *data) |
5776 | { |
5777 | return hci_update_discoverable_sync(hdev); |
5778 | } |
5779 | |
5780 | int hci_update_discoverable(struct hci_dev *hdev) |
5781 | { |
5782 | /* Only queue if it would have any effect */ |
5783 | if (hdev_is_powered(hdev) && |
5784 | hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
5785 | hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && |
5786 | hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) |
5787 | return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, |
5788 | NULL); |
5789 | |
5790 | return 0; |
5791 | } |
5792 | |
5793 | int hci_update_connectable_sync(struct hci_dev *hdev) |
5794 | { |
5795 | int err; |
5796 | |
5797 | err = hci_update_scan_sync(hdev); |
5798 | if (err) |
5799 | return err; |
5800 | |
5801 | /* If BR/EDR is not enabled and we disable advertising as a |
5802 | * by-product of disabling connectable, we need to update the |
5803 | * advertising flags. |
5804 | */ |
5805 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
5806 | err = hci_update_adv_data_sync(hdev, instance: hdev->cur_adv_instance); |
5807 | |
5808 | /* Update the advertising parameters if necessary */ |
5809 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || |
5810 | !list_empty(head: &hdev->adv_instances)) { |
5811 | if (ext_adv_capable(hdev)) |
5812 | err = hci_start_ext_adv_sync(hdev, |
5813 | instance: hdev->cur_adv_instance); |
5814 | else |
5815 | err = hci_enable_advertising_sync(hdev); |
5816 | |
5817 | if (err) |
5818 | return err; |
5819 | } |
5820 | |
5821 | return hci_update_passive_scan_sync(hdev); |
5822 | } |
5823 | |
5824 | int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) |
5825 | { |
5826 | const u8 giac[3] = { 0x33, 0x8b, 0x9e }; |
5827 | const u8 liac[3] = { 0x00, 0x8b, 0x9e }; |
5828 | struct hci_cp_inquiry cp; |
5829 | |
5830 | bt_dev_dbg(hdev, ""); |
5831 | |
5832 | if (test_bit(HCI_INQUIRY, &hdev->flags)) |
5833 | return 0; |
5834 | |
5835 | hci_dev_lock(hdev); |
5836 | hci_inquiry_cache_flush(hdev); |
5837 | hci_dev_unlock(hdev); |
5838 | |
5839 | memset(&cp, 0, sizeof(cp)); |
5840 | |
5841 | if (hdev->discovery.limited) |
5842 | memcpy(&cp.lap, liac, sizeof(cp.lap)); |
5843 | else |
5844 | memcpy(&cp.lap, giac, sizeof(cp.lap)); |
5845 | |
5846 | cp.length = length; |
5847 | cp.num_rsp = num_rsp; |
5848 | |
5849 | return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, |
5850 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5851 | } |
5852 | |
5853 | static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) |
5854 | { |
5855 | u8 own_addr_type; |
5856 | /* Accept list is not used for discovery */ |
5857 | u8 filter_policy = 0x00; |
5858 | /* Default is to enable duplicates filter */ |
5859 | u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; |
5860 | int err; |
5861 | |
5862 | bt_dev_dbg(hdev, ""); |
5863 | |
5864 | /* If controller is scanning, it means the passive scanning is |
5865 | * running. Thus, we should temporarily stop it in order to set the |
5866 | * discovery scanning parameters. |
5867 | */ |
5868 | err = hci_scan_disable_sync(hdev); |
5869 | if (err) { |
5870 | bt_dev_err(hdev, "Unable to disable scanning: %d", err); |
5871 | return err; |
5872 | } |
5873 | |
5874 | cancel_interleave_scan(hdev); |
5875 | |
5876 | /* Pause address resolution for active scan and stop advertising if |
5877 | * privacy is enabled. |
5878 | */ |
5879 | err = hci_pause_addr_resolution(hdev); |
5880 | if (err) |
5881 | goto failed; |
5882 | |
5883 | /* All active scans will be done with either a resolvable private |
5884 | * address (when privacy feature has been enabled) or non-resolvable |
5885 | * private address. |
5886 | */ |
5887 | err = hci_update_random_address_sync(hdev, require_privacy: true, rpa: scan_use_rpa(hdev), |
5888 | own_addr_type: &own_addr_type); |
5889 | if (err < 0) |
5890 | own_addr_type = ADDR_LE_DEV_PUBLIC; |
5891 | |
5892 | if (hci_is_adv_monitoring(hdev) || |
5893 | (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && |
5894 | hdev->discovery.result_filtering)) { |
5895 | /* Duplicate filter should be disabled when some advertisement |
5896 | * monitor is activated, otherwise AdvMon can only receive one |
5897 | * advertisement for one peer(*) during active scanning, and |
5898 | * might report loss to these peers. |
5899 | * |
5900 | * If controller does strict duplicate filtering and the |
5901 | * discovery requires result filtering disables controller based |
5902 | * filtering since that can cause reports that would match the |
5903 | * host filter to not be reported. |
5904 | */ |
5905 | filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
5906 | } |
5907 | |
5908 | err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, |
5909 | window: hdev->le_scan_window_discovery, |
5910 | own_addr_type, filter_policy, filter_dup); |
5911 | if (!err) |
5912 | return err; |
5913 | |
5914 | failed: |
5915 | /* Resume advertising if it was paused */ |
5916 | if (ll_privacy_capable(hdev)) |
5917 | hci_resume_advertising_sync(hdev); |
5918 | |
5919 | /* Resume passive scanning */ |
5920 | hci_update_passive_scan_sync(hdev); |
5921 | return err; |
5922 | } |
5923 | |
5924 | static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) |
5925 | { |
5926 | int err; |
5927 | |
5928 | bt_dev_dbg(hdev, ""); |
5929 | |
5930 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery * 2); |
5931 | if (err) |
5932 | return err; |
5933 | |
5934 | return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, num_rsp: 0); |
5935 | } |
5936 | |
5937 | int hci_start_discovery_sync(struct hci_dev *hdev) |
5938 | { |
5939 | unsigned long timeout; |
5940 | int err; |
5941 | |
5942 | bt_dev_dbg(hdev, "type %u", hdev->discovery.type); |
5943 | |
5944 | switch (hdev->discovery.type) { |
5945 | case DISCOV_TYPE_BREDR: |
5946 | return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, num_rsp: 0); |
5947 | case DISCOV_TYPE_INTERLEAVED: |
5948 | /* When running simultaneous discovery, the LE scanning time |
5949 | * should occupy the whole discovery time sine BR/EDR inquiry |
5950 | * and LE scanning are scheduled by the controller. |
5951 | * |
5952 | * For interleaving discovery in comparison, BR/EDR inquiry |
5953 | * and LE scanning are done sequentially with separate |
5954 | * timeouts. |
5955 | */ |
5956 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, |
5957 | &hdev->quirks)) { |
5958 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); |
5959 | /* During simultaneous discovery, we double LE scan |
5960 | * interval. We must leave some time for the controller |
5961 | * to do BR/EDR inquiry. |
5962 | */ |
5963 | err = hci_start_interleaved_discovery_sync(hdev); |
5964 | break; |
5965 | } |
5966 | |
5967 | timeout = msecs_to_jiffies(m: hdev->discov_interleaved_timeout); |
5968 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery); |
5969 | break; |
5970 | case DISCOV_TYPE_LE: |
5971 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); |
5972 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery); |
5973 | break; |
5974 | default: |
5975 | return -EINVAL; |
5976 | } |
5977 | |
5978 | if (err) |
5979 | return err; |
5980 | |
5981 | bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); |
5982 | |
5983 | queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->le_scan_disable, |
5984 | delay: timeout); |
5985 | return 0; |
5986 | } |
5987 | |
5988 | static void hci_suspend_monitor_sync(struct hci_dev *hdev) |
5989 | { |
5990 | switch (hci_get_adv_monitor_offload_ext(hdev)) { |
5991 | case HCI_ADV_MONITOR_EXT_MSFT: |
5992 | msft_suspend_sync(hdev); |
5993 | break; |
5994 | default: |
5995 | return; |
5996 | } |
5997 | } |
5998 | |
5999 | /* This function disables discovery and mark it as paused */ |
6000 | static int hci_pause_discovery_sync(struct hci_dev *hdev) |
6001 | { |
6002 | int old_state = hdev->discovery.state; |
6003 | int err; |
6004 | |
6005 | /* If discovery already stopped/stopping/paused there nothing to do */ |
6006 | if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || |
6007 | hdev->discovery_paused) |
6008 | return 0; |
6009 | |
6010 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPING); |
6011 | err = hci_stop_discovery_sync(hdev); |
6012 | if (err) |
6013 | return err; |
6014 | |
6015 | hdev->discovery_paused = true; |
6016 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
6017 | |
6018 | return 0; |
6019 | } |
6020 | |
6021 | static int hci_update_event_filter_sync(struct hci_dev *hdev) |
6022 | { |
6023 | struct bdaddr_list_with_flags *b; |
6024 | u8 scan = SCAN_DISABLED; |
6025 | bool scanning = test_bit(HCI_PSCAN, &hdev->flags); |
6026 | int err; |
6027 | |
6028 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
6029 | return 0; |
6030 | |
6031 | /* Some fake CSR controllers lock up after setting this type of |
6032 | * filter, so avoid sending the request altogether. |
6033 | */ |
6034 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
6035 | return 0; |
6036 | |
6037 | /* Always clear event filter when starting */ |
6038 | hci_clear_event_filter_sync(hdev); |
6039 | |
6040 | list_for_each_entry(b, &hdev->accept_list, list) { |
6041 | if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) |
6042 | continue; |
6043 | |
6044 | bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); |
6045 | |
6046 | err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, |
6047 | HCI_CONN_SETUP_ALLOW_BDADDR, |
6048 | bdaddr: &b->bdaddr, |
6049 | HCI_CONN_SETUP_AUTO_ON); |
6050 | if (err) |
6051 | bt_dev_dbg(hdev, "Failed to set event filter for %pMR", |
6052 | &b->bdaddr); |
6053 | else |
6054 | scan = SCAN_PAGE; |
6055 | } |
6056 | |
6057 | if (scan && !scanning) |
6058 | hci_write_scan_enable_sync(hdev, val: scan); |
6059 | else if (!scan && scanning) |
6060 | hci_write_scan_enable_sync(hdev, val: scan); |
6061 | |
6062 | return 0; |
6063 | } |
6064 | |
6065 | /* This function disables scan (BR and LE) and mark it as paused */ |
6066 | static int hci_pause_scan_sync(struct hci_dev *hdev) |
6067 | { |
6068 | if (hdev->scanning_paused) |
6069 | return 0; |
6070 | |
6071 | /* Disable page scan if enabled */ |
6072 | if (test_bit(HCI_PSCAN, &hdev->flags)) |
6073 | hci_write_scan_enable_sync(hdev, SCAN_DISABLED); |
6074 | |
6075 | hci_scan_disable_sync(hdev); |
6076 | |
6077 | hdev->scanning_paused = true; |
6078 | |
6079 | return 0; |
6080 | } |
6081 | |
6082 | /* This function performs the HCI suspend procedures in the follow order: |
6083 | * |
6084 | * Pause discovery (active scanning/inquiry) |
6085 | * Pause Directed Advertising/Advertising |
6086 | * Pause Scanning (passive scanning in case discovery was not active) |
6087 | * Disconnect all connections |
6088 | * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup |
6089 | * otherwise: |
6090 | * Update event mask (only set events that are allowed to wake up the host) |
6091 | * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) |
6092 | * Update passive scanning (lower duty cycle) |
6093 | * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE |
6094 | */ |
6095 | int hci_suspend_sync(struct hci_dev *hdev) |
6096 | { |
6097 | int err; |
6098 | |
6099 | /* If marked as suspended there nothing to do */ |
6100 | if (hdev->suspended) |
6101 | return 0; |
6102 | |
6103 | /* Mark device as suspended */ |
6104 | hdev->suspended = true; |
6105 | |
6106 | /* Pause discovery if not already stopped */ |
6107 | hci_pause_discovery_sync(hdev); |
6108 | |
6109 | /* Pause other advertisements */ |
6110 | hci_pause_advertising_sync(hdev); |
6111 | |
6112 | /* Suspend monitor filters */ |
6113 | hci_suspend_monitor_sync(hdev); |
6114 | |
6115 | /* Prevent disconnects from causing scanning to be re-enabled */ |
6116 | hci_pause_scan_sync(hdev); |
6117 | |
6118 | if (hci_conn_count(hdev)) { |
6119 | /* Soft disconnect everything (power off) */ |
6120 | err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); |
6121 | if (err) { |
6122 | /* Set state to BT_RUNNING so resume doesn't notify */ |
6123 | hdev->suspend_state = BT_RUNNING; |
6124 | hci_resume_sync(hdev); |
6125 | return err; |
6126 | } |
6127 | |
6128 | /* Update event mask so only the allowed event can wakeup the |
6129 | * host. |
6130 | */ |
6131 | hci_set_event_mask_sync(hdev); |
6132 | } |
6133 | |
6134 | /* Only configure accept list if disconnect succeeded and wake |
6135 | * isn't being prevented. |
6136 | */ |
6137 | if (!hdev->wakeup || !hdev->wakeup(hdev)) { |
6138 | hdev->suspend_state = BT_SUSPEND_DISCONNECT; |
6139 | return 0; |
6140 | } |
6141 | |
6142 | /* Unpause to take care of updating scanning params */ |
6143 | hdev->scanning_paused = false; |
6144 | |
6145 | /* Enable event filter for paired devices */ |
6146 | hci_update_event_filter_sync(hdev); |
6147 | |
6148 | /* Update LE passive scan if enabled */ |
6149 | hci_update_passive_scan_sync(hdev); |
6150 | |
6151 | /* Pause scan changes again. */ |
6152 | hdev->scanning_paused = true; |
6153 | |
6154 | hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; |
6155 | |
6156 | return 0; |
6157 | } |
6158 | |
6159 | /* This function resumes discovery */ |
6160 | static int hci_resume_discovery_sync(struct hci_dev *hdev) |
6161 | { |
6162 | int err; |
6163 | |
6164 | /* If discovery not paused there nothing to do */ |
6165 | if (!hdev->discovery_paused) |
6166 | return 0; |
6167 | |
6168 | hdev->discovery_paused = false; |
6169 | |
6170 | hci_discovery_set_state(hdev, state: DISCOVERY_STARTING); |
6171 | |
6172 | err = hci_start_discovery_sync(hdev); |
6173 | |
6174 | hci_discovery_set_state(hdev, state: err ? DISCOVERY_STOPPED : |
6175 | DISCOVERY_FINDING); |
6176 | |
6177 | return err; |
6178 | } |
6179 | |
6180 | static void hci_resume_monitor_sync(struct hci_dev *hdev) |
6181 | { |
6182 | switch (hci_get_adv_monitor_offload_ext(hdev)) { |
6183 | case HCI_ADV_MONITOR_EXT_MSFT: |
6184 | msft_resume_sync(hdev); |
6185 | break; |
6186 | default: |
6187 | return; |
6188 | } |
6189 | } |
6190 | |
6191 | /* This function resume scan and reset paused flag */ |
6192 | static int hci_resume_scan_sync(struct hci_dev *hdev) |
6193 | { |
6194 | if (!hdev->scanning_paused) |
6195 | return 0; |
6196 | |
6197 | hdev->scanning_paused = false; |
6198 | |
6199 | hci_update_scan_sync(hdev); |
6200 | |
6201 | /* Reset passive scanning to normal */ |
6202 | hci_update_passive_scan_sync(hdev); |
6203 | |
6204 | return 0; |
6205 | } |
6206 | |
6207 | /* This function performs the HCI suspend procedures in the follow order: |
6208 | * |
6209 | * Restore event mask |
6210 | * Clear event filter |
6211 | * Update passive scanning (normal duty cycle) |
6212 | * Resume Directed Advertising/Advertising |
6213 | * Resume discovery (active scanning/inquiry) |
6214 | */ |
6215 | int hci_resume_sync(struct hci_dev *hdev) |
6216 | { |
6217 | /* If not marked as suspended there nothing to do */ |
6218 | if (!hdev->suspended) |
6219 | return 0; |
6220 | |
6221 | hdev->suspended = false; |
6222 | |
6223 | /* Restore event mask */ |
6224 | hci_set_event_mask_sync(hdev); |
6225 | |
6226 | /* Clear any event filters and restore scan state */ |
6227 | hci_clear_event_filter_sync(hdev); |
6228 | |
6229 | /* Resume scanning */ |
6230 | hci_resume_scan_sync(hdev); |
6231 | |
6232 | /* Resume monitor filters */ |
6233 | hci_resume_monitor_sync(hdev); |
6234 | |
6235 | /* Resume other advertisements */ |
6236 | hci_resume_advertising_sync(hdev); |
6237 | |
6238 | /* Resume discovery */ |
6239 | hci_resume_discovery_sync(hdev); |
6240 | |
6241 | return 0; |
6242 | } |
6243 | |
6244 | static bool conn_use_rpa(struct hci_conn *conn) |
6245 | { |
6246 | struct hci_dev *hdev = conn->hdev; |
6247 | |
6248 | return hci_dev_test_flag(hdev, HCI_PRIVACY); |
6249 | } |
6250 | |
6251 | static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, |
6252 | struct hci_conn *conn) |
6253 | { |
6254 | struct hci_cp_le_set_ext_adv_params cp; |
6255 | int err; |
6256 | bdaddr_t random_addr; |
6257 | u8 own_addr_type; |
6258 | |
6259 | err = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6260 | own_addr_type: &own_addr_type); |
6261 | if (err) |
6262 | return err; |
6263 | |
6264 | /* Set require_privacy to false so that the remote device has a |
6265 | * chance of identifying us. |
6266 | */ |
6267 | err = hci_get_random_address(hdev, require_privacy: false, use_rpa: conn_use_rpa(conn), NULL, |
6268 | own_addr_type: &own_addr_type, rand_addr: &random_addr); |
6269 | if (err) |
6270 | return err; |
6271 | |
6272 | memset(&cp, 0, sizeof(cp)); |
6273 | |
6274 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); |
6275 | cp.channel_map = hdev->le_adv_channel_map; |
6276 | cp.tx_power = HCI_TX_POWER_INVALID; |
6277 | cp.primary_phy = HCI_ADV_PHY_1M; |
6278 | cp.secondary_phy = HCI_ADV_PHY_1M; |
6279 | cp.handle = 0x00; /* Use instance 0 for directed adv */ |
6280 | cp.own_addr_type = own_addr_type; |
6281 | cp.peer_addr_type = conn->dst_type; |
6282 | bacpy(dst: &cp.peer_addr, src: &conn->dst); |
6283 | |
6284 | /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for |
6285 | * advertising_event_property LE_LEGACY_ADV_DIRECT_IND |
6286 | * does not supports advertising data when the advertising set already |
6287 | * contains some, the controller shall return erroc code 'Invalid |
6288 | * HCI Command Parameters(0x12). |
6289 | * So it is required to remove adv set for handle 0x00. since we use |
6290 | * instance 0 for directed adv. |
6291 | */ |
6292 | err = hci_remove_ext_adv_instance_sync(hdev, instance: cp.handle, NULL); |
6293 | if (err) |
6294 | return err; |
6295 | |
6296 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, |
6297 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6298 | if (err) |
6299 | return err; |
6300 | |
6301 | /* Check if random address need to be updated */ |
6302 | if (own_addr_type == ADDR_LE_DEV_RANDOM && |
6303 | bacmp(ba1: &random_addr, BDADDR_ANY) && |
6304 | bacmp(ba1: &random_addr, ba2: &hdev->random_addr)) { |
6305 | err = hci_set_adv_set_random_addr_sync(hdev, instance: 0x00, |
6306 | random_addr: &random_addr); |
6307 | if (err) |
6308 | return err; |
6309 | } |
6310 | |
6311 | return hci_enable_ext_advertising_sync(hdev, instance: 0x00); |
6312 | } |
6313 | |
6314 | static int hci_le_directed_advertising_sync(struct hci_dev *hdev, |
6315 | struct hci_conn *conn) |
6316 | { |
6317 | struct hci_cp_le_set_adv_param cp; |
6318 | u8 status; |
6319 | u8 own_addr_type; |
6320 | u8 enable; |
6321 | |
6322 | if (ext_adv_capable(hdev)) |
6323 | return hci_le_ext_directed_advertising_sync(hdev, conn); |
6324 | |
6325 | /* Clear the HCI_LE_ADV bit temporarily so that the |
6326 | * hci_update_random_address knows that it's safe to go ahead |
6327 | * and write a new random address. The flag will be set back on |
6328 | * as soon as the SET_ADV_ENABLE HCI command completes. |
6329 | */ |
6330 | hci_dev_clear_flag(hdev, HCI_LE_ADV); |
6331 | |
6332 | /* Set require_privacy to false so that the remote device has a |
6333 | * chance of identifying us. |
6334 | */ |
6335 | status = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6336 | own_addr_type: &own_addr_type); |
6337 | if (status) |
6338 | return status; |
6339 | |
6340 | memset(&cp, 0, sizeof(cp)); |
6341 | |
6342 | /* Some controllers might reject command if intervals are not |
6343 | * within range for undirected advertising. |
6344 | * BCM20702A0 is known to be affected by this. |
6345 | */ |
6346 | cp.min_interval = cpu_to_le16(0x0020); |
6347 | cp.max_interval = cpu_to_le16(0x0020); |
6348 | |
6349 | cp.type = LE_ADV_DIRECT_IND; |
6350 | cp.own_address_type = own_addr_type; |
6351 | cp.direct_addr_type = conn->dst_type; |
6352 | bacpy(dst: &cp.direct_addr, src: &conn->dst); |
6353 | cp.channel_map = hdev->le_adv_channel_map; |
6354 | |
6355 | status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, |
6356 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6357 | if (status) |
6358 | return status; |
6359 | |
6360 | enable = 0x01; |
6361 | |
6362 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
6363 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
6364 | } |
6365 | |
6366 | static void set_ext_conn_params(struct hci_conn *conn, |
6367 | struct hci_cp_le_ext_conn_param *p) |
6368 | { |
6369 | struct hci_dev *hdev = conn->hdev; |
6370 | |
6371 | memset(p, 0, sizeof(*p)); |
6372 | |
6373 | p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
6374 | p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
6375 | p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
6376 | p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
6377 | p->conn_latency = cpu_to_le16(conn->le_conn_latency); |
6378 | p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
6379 | p->min_ce_len = cpu_to_le16(0x0000); |
6380 | p->max_ce_len = cpu_to_le16(0x0000); |
6381 | } |
6382 | |
6383 | static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, |
6384 | struct hci_conn *conn, u8 own_addr_type) |
6385 | { |
6386 | struct hci_cp_le_ext_create_conn *cp; |
6387 | struct hci_cp_le_ext_conn_param *p; |
6388 | u8 data[sizeof(*cp) + sizeof(*p) * 3]; |
6389 | u32 plen; |
6390 | |
6391 | cp = (void *)data; |
6392 | p = (void *)cp->data; |
6393 | |
6394 | memset(cp, 0, sizeof(*cp)); |
6395 | |
6396 | bacpy(dst: &cp->peer_addr, src: &conn->dst); |
6397 | cp->peer_addr_type = conn->dst_type; |
6398 | cp->own_addr_type = own_addr_type; |
6399 | |
6400 | plen = sizeof(*cp); |
6401 | |
6402 | if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || |
6403 | conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { |
6404 | cp->phys |= LE_SCAN_PHY_1M; |
6405 | set_ext_conn_params(conn, p); |
6406 | |
6407 | p++; |
6408 | plen += sizeof(*p); |
6409 | } |
6410 | |
6411 | if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || |
6412 | conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { |
6413 | cp->phys |= LE_SCAN_PHY_2M; |
6414 | set_ext_conn_params(conn, p); |
6415 | |
6416 | p++; |
6417 | plen += sizeof(*p); |
6418 | } |
6419 | |
6420 | if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || |
6421 | conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { |
6422 | cp->phys |= LE_SCAN_PHY_CODED; |
6423 | set_ext_conn_params(conn, p); |
6424 | |
6425 | plen += sizeof(*p); |
6426 | } |
6427 | |
6428 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, |
6429 | plen, data, |
6430 | HCI_EV_LE_ENHANCED_CONN_COMPLETE, |
6431 | conn->conn_timeout, NULL); |
6432 | } |
6433 | |
6434 | static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) |
6435 | { |
6436 | struct hci_cp_le_create_conn cp; |
6437 | struct hci_conn_params *params; |
6438 | u8 own_addr_type; |
6439 | int err; |
6440 | struct hci_conn *conn = data; |
6441 | |
6442 | if (!hci_conn_valid(hdev, conn)) |
6443 | return -ECANCELED; |
6444 | |
6445 | bt_dev_dbg(hdev, "conn %p", conn); |
6446 | |
6447 | clear_bit(nr: HCI_CONN_SCANNING, addr: &conn->flags); |
6448 | conn->state = BT_CONNECT; |
6449 | |
6450 | /* If requested to connect as peripheral use directed advertising */ |
6451 | if (conn->role == HCI_ROLE_SLAVE) { |
6452 | /* If we're active scanning and simultaneous roles is not |
6453 | * enabled simply reject the attempt. |
6454 | */ |
6455 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && |
6456 | hdev->le_scan_type == LE_SCAN_ACTIVE && |
6457 | !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { |
6458 | hci_conn_del(conn); |
6459 | return -EBUSY; |
6460 | } |
6461 | |
6462 | /* Pause advertising while doing directed advertising. */ |
6463 | hci_pause_advertising_sync(hdev); |
6464 | |
6465 | err = hci_le_directed_advertising_sync(hdev, conn); |
6466 | goto done; |
6467 | } |
6468 | |
6469 | /* Disable advertising if simultaneous roles is not in use. */ |
6470 | if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) |
6471 | hci_pause_advertising_sync(hdev); |
6472 | |
6473 | params = hci_conn_params_lookup(hdev, addr: &conn->dst, addr_type: conn->dst_type); |
6474 | if (params) { |
6475 | conn->le_conn_min_interval = params->conn_min_interval; |
6476 | conn->le_conn_max_interval = params->conn_max_interval; |
6477 | conn->le_conn_latency = params->conn_latency; |
6478 | conn->le_supv_timeout = params->supervision_timeout; |
6479 | } else { |
6480 | conn->le_conn_min_interval = hdev->le_conn_min_interval; |
6481 | conn->le_conn_max_interval = hdev->le_conn_max_interval; |
6482 | conn->le_conn_latency = hdev->le_conn_latency; |
6483 | conn->le_supv_timeout = hdev->le_supv_timeout; |
6484 | } |
6485 | |
6486 | /* If controller is scanning, we stop it since some controllers are |
6487 | * not able to scan and connect at the same time. Also set the |
6488 | * HCI_LE_SCAN_INTERRUPTED flag so that the command complete |
6489 | * handler for scan disabling knows to set the correct discovery |
6490 | * state. |
6491 | */ |
6492 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
6493 | hci_scan_disable_sync(hdev); |
6494 | hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); |
6495 | } |
6496 | |
6497 | /* Update random address, but set require_privacy to false so |
6498 | * that we never connect with an non-resolvable address. |
6499 | */ |
6500 | err = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6501 | own_addr_type: &own_addr_type); |
6502 | if (err) |
6503 | goto done; |
6504 | /* Send command LE Extended Create Connection if supported */ |
6505 | if (use_ext_conn(hdev)) { |
6506 | err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); |
6507 | goto done; |
6508 | } |
6509 | |
6510 | memset(&cp, 0, sizeof(cp)); |
6511 | |
6512 | cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
6513 | cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
6514 | |
6515 | bacpy(dst: &cp.peer_addr, src: &conn->dst); |
6516 | cp.peer_addr_type = conn->dst_type; |
6517 | cp.own_address_type = own_addr_type; |
6518 | cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
6519 | cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
6520 | cp.conn_latency = cpu_to_le16(conn->le_conn_latency); |
6521 | cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
6522 | cp.min_ce_len = cpu_to_le16(0x0000); |
6523 | cp.max_ce_len = cpu_to_le16(0x0000); |
6524 | |
6525 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: |
6526 | * |
6527 | * If this event is unmasked and the HCI_LE_Connection_Complete event |
6528 | * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is |
6529 | * sent when a new connection has been created. |
6530 | */ |
6531 | err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, |
6532 | sizeof(cp), &cp, |
6533 | use_enhanced_conn_complete(hdev) ? |
6534 | HCI_EV_LE_ENHANCED_CONN_COMPLETE : |
6535 | HCI_EV_LE_CONN_COMPLETE, |
6536 | conn->conn_timeout, NULL); |
6537 | |
6538 | done: |
6539 | if (err == -ETIMEDOUT) |
6540 | hci_le_connect_cancel_sync(hdev, conn, reason: 0x00); |
6541 | |
6542 | /* Re-enable advertising after the connection attempt is finished. */ |
6543 | hci_resume_advertising_sync(hdev); |
6544 | return err; |
6545 | } |
6546 | |
6547 | int hci_le_create_cis_sync(struct hci_dev *hdev) |
6548 | { |
6549 | DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f); |
6550 | size_t aux_num_cis = 0; |
6551 | struct hci_conn *conn; |
6552 | u8 cig = BT_ISO_QOS_CIG_UNSET; |
6553 | |
6554 | /* The spec allows only one pending LE Create CIS command at a time. If |
6555 | * the command is pending now, don't do anything. We check for pending |
6556 | * connections after each CIS Established event. |
6557 | * |
6558 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
6559 | * page 2566: |
6560 | * |
6561 | * If the Host issues this command before all the |
6562 | * HCI_LE_CIS_Established events from the previous use of the |
6563 | * command have been generated, the Controller shall return the |
6564 | * error code Command Disallowed (0x0C). |
6565 | * |
6566 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
6567 | * page 2567: |
6568 | * |
6569 | * When the Controller receives the HCI_LE_Create_CIS command, the |
6570 | * Controller sends the HCI_Command_Status event to the Host. An |
6571 | * HCI_LE_CIS_Established event will be generated for each CIS when it |
6572 | * is established or if it is disconnected or considered lost before |
6573 | * being established; until all the events are generated, the command |
6574 | * remains pending. |
6575 | */ |
6576 | |
6577 | hci_dev_lock(hdev); |
6578 | |
6579 | rcu_read_lock(); |
6580 | |
6581 | /* Wait until previous Create CIS has completed */ |
6582 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6583 | if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) |
6584 | goto done; |
6585 | } |
6586 | |
6587 | /* Find CIG with all CIS ready */ |
6588 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6589 | struct hci_conn *link; |
6590 | |
6591 | if (hci_conn_check_create_cis(conn)) |
6592 | continue; |
6593 | |
6594 | cig = conn->iso_qos.ucast.cig; |
6595 | |
6596 | list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { |
6597 | if (hci_conn_check_create_cis(conn: link) > 0 && |
6598 | link->iso_qos.ucast.cig == cig && |
6599 | link->state != BT_CONNECTED) { |
6600 | cig = BT_ISO_QOS_CIG_UNSET; |
6601 | break; |
6602 | } |
6603 | } |
6604 | |
6605 | if (cig != BT_ISO_QOS_CIG_UNSET) |
6606 | break; |
6607 | } |
6608 | |
6609 | if (cig == BT_ISO_QOS_CIG_UNSET) |
6610 | goto done; |
6611 | |
6612 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6613 | struct hci_cis *cis = &cmd->cis[aux_num_cis]; |
6614 | |
6615 | if (hci_conn_check_create_cis(conn) || |
6616 | conn->iso_qos.ucast.cig != cig) |
6617 | continue; |
6618 | |
6619 | set_bit(nr: HCI_CONN_CREATE_CIS, addr: &conn->flags); |
6620 | cis->acl_handle = cpu_to_le16(conn->parent->handle); |
6621 | cis->cis_handle = cpu_to_le16(conn->handle); |
6622 | aux_num_cis++; |
6623 | |
6624 | if (aux_num_cis >= cmd->num_cis) |
6625 | break; |
6626 | } |
6627 | cmd->num_cis = aux_num_cis; |
6628 | |
6629 | done: |
6630 | rcu_read_unlock(); |
6631 | |
6632 | hci_dev_unlock(hdev); |
6633 | |
6634 | if (!aux_num_cis) |
6635 | return 0; |
6636 | |
6637 | /* Wait for HCI_LE_CIS_Established */ |
6638 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, |
6639 | struct_size(cmd, cis, cmd->num_cis), |
6640 | cmd, HCI_EVT_LE_CIS_ESTABLISHED, |
6641 | conn->conn_timeout, NULL); |
6642 | } |
6643 | |
6644 | int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) |
6645 | { |
6646 | struct hci_cp_le_remove_cig cp; |
6647 | |
6648 | memset(&cp, 0, sizeof(cp)); |
6649 | cp.cig_id = handle; |
6650 | |
6651 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), |
6652 | &cp, HCI_CMD_TIMEOUT); |
6653 | } |
6654 | |
6655 | int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) |
6656 | { |
6657 | struct hci_cp_le_big_term_sync cp; |
6658 | |
6659 | memset(&cp, 0, sizeof(cp)); |
6660 | cp.handle = handle; |
6661 | |
6662 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, |
6663 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6664 | } |
6665 | |
6666 | int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) |
6667 | { |
6668 | struct hci_cp_le_pa_term_sync cp; |
6669 | |
6670 | memset(&cp, 0, sizeof(cp)); |
6671 | cp.handle = cpu_to_le16(handle); |
6672 | |
6673 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, |
6674 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6675 | } |
6676 | |
6677 | int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, |
6678 | bool use_rpa, struct adv_info *adv_instance, |
6679 | u8 *own_addr_type, bdaddr_t *rand_addr) |
6680 | { |
6681 | int err; |
6682 | |
6683 | bacpy(dst: rand_addr, BDADDR_ANY); |
6684 | |
6685 | /* If privacy is enabled use a resolvable private address. If |
6686 | * current RPA has expired then generate a new one. |
6687 | */ |
6688 | if (use_rpa) { |
6689 | /* If Controller supports LL Privacy use own address type is |
6690 | * 0x03 |
6691 | */ |
6692 | if (ll_privacy_capable(hdev)) |
6693 | *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; |
6694 | else |
6695 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
6696 | |
6697 | if (adv_instance) { |
6698 | if (adv_rpa_valid(adv_instance)) |
6699 | return 0; |
6700 | } else { |
6701 | if (rpa_valid(hdev)) |
6702 | return 0; |
6703 | } |
6704 | |
6705 | err = smp_generate_rpa(hdev, irk: hdev->irk, rpa: &hdev->rpa); |
6706 | if (err < 0) { |
6707 | bt_dev_err(hdev, "failed to generate new RPA"); |
6708 | return err; |
6709 | } |
6710 | |
6711 | bacpy(dst: rand_addr, src: &hdev->rpa); |
6712 | |
6713 | return 0; |
6714 | } |
6715 | |
6716 | /* In case of required privacy without resolvable private address, |
6717 | * use an non-resolvable private address. This is useful for |
6718 | * non-connectable advertising. |
6719 | */ |
6720 | if (require_privacy) { |
6721 | bdaddr_t nrpa; |
6722 | |
6723 | while (true) { |
6724 | /* The non-resolvable private address is generated |
6725 | * from random six bytes with the two most significant |
6726 | * bits cleared. |
6727 | */ |
6728 | get_random_bytes(buf: &nrpa, len: 6); |
6729 | nrpa.b[5] &= 0x3f; |
6730 | |
6731 | /* The non-resolvable private address shall not be |
6732 | * equal to the public address. |
6733 | */ |
6734 | if (bacmp(ba1: &hdev->bdaddr, ba2: &nrpa)) |
6735 | break; |
6736 | } |
6737 | |
6738 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
6739 | bacpy(dst: rand_addr, src: &nrpa); |
6740 | |
6741 | return 0; |
6742 | } |
6743 | |
6744 | /* No privacy so use a public address. */ |
6745 | *own_addr_type = ADDR_LE_DEV_PUBLIC; |
6746 | |
6747 | return 0; |
6748 | } |
6749 | |
6750 | static int _update_adv_data_sync(struct hci_dev *hdev, void *data) |
6751 | { |
6752 | u8 instance = PTR_UINT(data); |
6753 | |
6754 | return hci_update_adv_data_sync(hdev, instance); |
6755 | } |
6756 | |
6757 | int hci_update_adv_data(struct hci_dev *hdev, u8 instance) |
6758 | { |
6759 | return hci_cmd_sync_queue(hdev, _update_adv_data_sync, |
6760 | UINT_PTR(instance), NULL); |
6761 | } |
6762 | |
6763 | static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) |
6764 | { |
6765 | struct hci_conn *conn = data; |
6766 | struct inquiry_entry *ie; |
6767 | struct hci_cp_create_conn cp; |
6768 | int err; |
6769 | |
6770 | if (!hci_conn_valid(hdev, conn)) |
6771 | return -ECANCELED; |
6772 | |
6773 | /* Many controllers disallow HCI Create Connection while it is doing |
6774 | * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create |
6775 | * Connection. This may cause the MGMT discovering state to become false |
6776 | * without user space's request but it is okay since the MGMT Discovery |
6777 | * APIs do not promise that discovery should be done forever. Instead, |
6778 | * the user space monitors the status of MGMT discovering and it may |
6779 | * request for discovery again when this flag becomes false. |
6780 | */ |
6781 | if (test_bit(HCI_INQUIRY, &hdev->flags)) { |
6782 | err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, |
6783 | NULL, HCI_CMD_TIMEOUT); |
6784 | if (err) |
6785 | bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); |
6786 | } |
6787 | |
6788 | conn->state = BT_CONNECT; |
6789 | conn->out = true; |
6790 | conn->role = HCI_ROLE_MASTER; |
6791 | |
6792 | conn->attempt++; |
6793 | |
6794 | conn->link_policy = hdev->link_policy; |
6795 | |
6796 | memset(&cp, 0, sizeof(cp)); |
6797 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
6798 | cp.pscan_rep_mode = 0x02; |
6799 | |
6800 | ie = hci_inquiry_cache_lookup(hdev, bdaddr: &conn->dst); |
6801 | if (ie) { |
6802 | if (inquiry_entry_age(e: ie) <= INQUIRY_ENTRY_AGE_MAX) { |
6803 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; |
6804 | cp.pscan_mode = ie->data.pscan_mode; |
6805 | cp.clock_offset = ie->data.clock_offset | |
6806 | cpu_to_le16(0x8000); |
6807 | } |
6808 | |
6809 | memcpy(conn->dev_class, ie->data.dev_class, 3); |
6810 | } |
6811 | |
6812 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
6813 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) |
6814 | cp.role_switch = 0x01; |
6815 | else |
6816 | cp.role_switch = 0x00; |
6817 | |
6818 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, |
6819 | sizeof(cp), &cp, |
6820 | HCI_EV_CONN_COMPLETE, |
6821 | conn->conn_timeout, NULL); |
6822 | } |
6823 | |
6824 | int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6825 | { |
6826 | return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, |
6827 | NULL); |
6828 | } |
6829 | |
6830 | static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) |
6831 | { |
6832 | struct hci_conn *conn = data; |
6833 | |
6834 | bt_dev_dbg(hdev, "err %d", err); |
6835 | |
6836 | if (err == -ECANCELED) |
6837 | return; |
6838 | |
6839 | hci_dev_lock(hdev); |
6840 | |
6841 | if (!hci_conn_valid(hdev, conn)) |
6842 | goto done; |
6843 | |
6844 | if (!err) { |
6845 | hci_connect_le_scan_cleanup(conn, status: 0x00); |
6846 | goto done; |
6847 | } |
6848 | |
6849 | /* Check if connection is still pending */ |
6850 | if (conn != hci_lookup_le_connect(hdev)) |
6851 | goto done; |
6852 | |
6853 | /* Flush to make sure we send create conn cancel command if needed */ |
6854 | flush_delayed_work(dwork: &conn->le_conn_timeout); |
6855 | hci_conn_failed(conn, status: bt_status(err)); |
6856 | |
6857 | done: |
6858 | hci_dev_unlock(hdev); |
6859 | } |
6860 | |
6861 | int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6862 | { |
6863 | return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, |
6864 | create_le_conn_complete); |
6865 | } |
6866 | |
6867 | int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6868 | { |
6869 | if (conn->state != BT_OPEN) |
6870 | return -EINVAL; |
6871 | |
6872 | switch (conn->type) { |
6873 | case ACL_LINK: |
6874 | return !hci_cmd_sync_dequeue_once(hdev, |
6875 | hci_acl_create_conn_sync, |
6876 | conn, NULL); |
6877 | case LE_LINK: |
6878 | return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, |
6879 | conn, create_le_conn_complete); |
6880 | } |
6881 | |
6882 | return -ENOENT; |
6883 | } |
6884 | |
6885 | int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, |
6886 | struct hci_conn_params *params) |
6887 | { |
6888 | struct hci_cp_le_conn_update cp; |
6889 | |
6890 | memset(&cp, 0, sizeof(cp)); |
6891 | cp.handle = cpu_to_le16(conn->handle); |
6892 | cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); |
6893 | cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); |
6894 | cp.conn_latency = cpu_to_le16(params->conn_latency); |
6895 | cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); |
6896 | cp.min_ce_len = cpu_to_le16(0x0000); |
6897 | cp.max_ce_len = cpu_to_le16(0x0000); |
6898 | |
6899 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, |
6900 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6901 | } |
6902 | |
6903 | static void create_pa_complete(struct hci_dev *hdev, void *data, int err) |
6904 | { |
6905 | struct hci_conn *conn = data; |
6906 | struct hci_conn *pa_sync; |
6907 | |
6908 | bt_dev_dbg(hdev, "err %d", err); |
6909 | |
6910 | if (err == -ECANCELED) |
6911 | return; |
6912 | |
6913 | hci_dev_lock(hdev); |
6914 | |
6915 | hci_dev_clear_flag(hdev, HCI_PA_SYNC); |
6916 | |
6917 | if (!hci_conn_valid(hdev, conn)) |
6918 | clear_bit(nr: HCI_CONN_CREATE_PA_SYNC, addr: &conn->flags); |
6919 | |
6920 | if (!err) |
6921 | goto unlock; |
6922 | |
6923 | /* Add connection to indicate PA sync error */ |
6924 | pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY, |
6925 | HCI_ROLE_SLAVE); |
6926 | |
6927 | if (IS_ERR(ptr: pa_sync)) |
6928 | goto unlock; |
6929 | |
6930 | set_bit(nr: HCI_CONN_PA_SYNC_FAILED, addr: &pa_sync->flags); |
6931 | |
6932 | /* Notify iso layer */ |
6933 | hci_connect_cfm(conn: pa_sync, status: bt_status(err)); |
6934 | |
6935 | unlock: |
6936 | hci_dev_unlock(hdev); |
6937 | } |
6938 | |
6939 | static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data) |
6940 | { |
6941 | struct hci_cp_le_pa_create_sync cp; |
6942 | struct hci_conn *conn = data; |
6943 | struct bt_iso_qos *qos = &conn->iso_qos; |
6944 | int err; |
6945 | |
6946 | if (!hci_conn_valid(hdev, conn)) |
6947 | return -ECANCELED; |
6948 | |
6949 | if (conn->sync_handle != HCI_SYNC_HANDLE_INVALID) |
6950 | return -EINVAL; |
6951 | |
6952 | if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) |
6953 | return -EBUSY; |
6954 | |
6955 | /* Stop scanning if SID has not been set and active scanning is enabled |
6956 | * so we use passive scanning which will be scanning using the allow |
6957 | * list programmed to contain only the connection address. |
6958 | */ |
6959 | if (conn->sid == HCI_SID_INVALID && |
6960 | hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
6961 | hci_scan_disable_sync(hdev); |
6962 | hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); |
6963 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
6964 | } |
6965 | |
6966 | /* Mark HCI_CONN_CREATE_PA_SYNC so hci_update_passive_scan_sync can |
6967 | * program the address in the allow list so PA advertisements can be |
6968 | * received. |
6969 | */ |
6970 | set_bit(nr: HCI_CONN_CREATE_PA_SYNC, addr: &conn->flags); |
6971 | |
6972 | hci_update_passive_scan_sync(hdev); |
6973 | |
6974 | /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update |
6975 | * it. |
6976 | */ |
6977 | if (conn->sid == HCI_SID_INVALID) |
6978 | __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL, |
6979 | HCI_EV_LE_EXT_ADV_REPORT, |
6980 | conn->conn_timeout, NULL); |
6981 | |
6982 | memset(&cp, 0, sizeof(cp)); |
6983 | cp.options = qos->bcast.options; |
6984 | cp.sid = conn->sid; |
6985 | cp.addr_type = conn->dst_type; |
6986 | bacpy(dst: &cp.addr, src: &conn->dst); |
6987 | cp.skip = cpu_to_le16(qos->bcast.skip); |
6988 | cp.sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); |
6989 | cp.sync_cte_type = qos->bcast.sync_cte_type; |
6990 | |
6991 | /* The spec allows only one pending LE Periodic Advertising Create |
6992 | * Sync command at a time so we forcefully wait for PA Sync Established |
6993 | * event since cmd_work can only schedule one command at a time. |
6994 | * |
6995 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
6996 | * page 2493: |
6997 | * |
6998 | * If the Host issues this command when another HCI_LE_Periodic_ |
6999 | * Advertising_Create_Sync command is pending, the Controller shall |
7000 | * return the error code Command Disallowed (0x0C). |
7001 | */ |
7002 | err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_PA_CREATE_SYNC, |
7003 | sizeof(cp), &cp, |
7004 | HCI_EV_LE_PA_SYNC_ESTABLISHED, |
7005 | conn->conn_timeout, NULL); |
7006 | if (err == -ETIMEDOUT) |
7007 | __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL, |
7008 | 0, NULL, HCI_CMD_TIMEOUT); |
7009 | |
7010 | return err; |
7011 | } |
7012 | |
7013 | int hci_connect_pa_sync(struct hci_dev *hdev, struct hci_conn *conn) |
7014 | { |
7015 | return hci_cmd_sync_queue_once(hdev, hci_le_pa_create_sync, conn, |
7016 | create_pa_complete); |
7017 | } |
7018 | |
7019 | static void create_big_complete(struct hci_dev *hdev, void *data, int err) |
7020 | { |
7021 | struct hci_conn *conn = data; |
7022 | |
7023 | bt_dev_dbg(hdev, "err %d", err); |
7024 | |
7025 | if (err == -ECANCELED) |
7026 | return; |
7027 | |
7028 | if (hci_conn_valid(hdev, conn)) |
7029 | clear_bit(nr: HCI_CONN_CREATE_BIG_SYNC, addr: &conn->flags); |
7030 | } |
7031 | |
7032 | static int hci_le_big_create_sync(struct hci_dev *hdev, void *data) |
7033 | { |
7034 | DEFINE_FLEX(struct hci_cp_le_big_create_sync, cp, bis, num_bis, 0x11); |
7035 | struct hci_conn *conn = data; |
7036 | struct bt_iso_qos *qos = &conn->iso_qos; |
7037 | int err; |
7038 | |
7039 | if (!hci_conn_valid(hdev, conn)) |
7040 | return -ECANCELED; |
7041 | |
7042 | set_bit(nr: HCI_CONN_CREATE_BIG_SYNC, addr: &conn->flags); |
7043 | |
7044 | memset(cp, 0, sizeof(*cp)); |
7045 | cp->handle = qos->bcast.big; |
7046 | cp->sync_handle = cpu_to_le16(conn->sync_handle); |
7047 | cp->encryption = qos->bcast.encryption; |
7048 | memcpy(cp->bcode, qos->bcast.bcode, sizeof(cp->bcode)); |
7049 | cp->mse = qos->bcast.mse; |
7050 | cp->timeout = cpu_to_le16(qos->bcast.timeout); |
7051 | cp->num_bis = conn->num_bis; |
7052 | memcpy(cp->bis, conn->bis, conn->num_bis); |
7053 | |
7054 | /* The spec allows only one pending LE BIG Create Sync command at |
7055 | * a time, so we forcefully wait for BIG Sync Established event since |
7056 | * cmd_work can only schedule one command at a time. |
7057 | * |
7058 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
7059 | * page 2586: |
7060 | * |
7061 | * If the Host sends this command when the Controller is in the |
7062 | * process of synchronizing to any BIG, i.e. the HCI_LE_BIG_Sync_ |
7063 | * Established event has not been generated, the Controller shall |
7064 | * return the error code Command Disallowed (0x0C). |
7065 | */ |
7066 | err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_BIG_CREATE_SYNC, |
7067 | struct_size(cp, bis, cp->num_bis), cp, |
7068 | HCI_EVT_LE_BIG_SYNC_ESTABLISHED, |
7069 | conn->conn_timeout, NULL); |
7070 | if (err == -ETIMEDOUT) |
7071 | hci_le_big_terminate_sync(hdev, handle: cp->handle); |
7072 | |
7073 | return err; |
7074 | } |
7075 | |
7076 | int hci_connect_big_sync(struct hci_dev *hdev, struct hci_conn *conn) |
7077 | { |
7078 | return hci_cmd_sync_queue_once(hdev, hci_le_big_create_sync, conn, |
7079 | create_big_complete); |
7080 | } |
7081 |
Definitions
- hci_cmd_sync_complete
- hci_cmd_sync_alloc
- hci_cmd_sync_add
- hci_req_sync_run
- hci_request_init
- __hci_cmd_sync_sk
- __hci_cmd_sync
- hci_cmd_sync
- __hci_cmd_sync_ev
- __hci_cmd_sync_status_sk
- __hci_cmd_sync_status
- hci_cmd_sync_status
- hci_cmd_sync_work
- hci_cmd_sync_cancel_work
- scan_disable_sync
- interleaved_inquiry_sync
- le_scan_disable
- reenable_adv_sync
- reenable_adv
- cancel_adv_timeout
- hci_clear_adv_instance_sync
- adv_timeout_expire_sync
- adv_timeout_expire
- is_interleave_scanning
- interleave_scan_work
- hci_cmd_sync_init
- _hci_cmd_sync_cancel_entry
- hci_cmd_sync_clear
- hci_cmd_sync_cancel
- hci_cmd_sync_cancel_sync
- hci_cmd_sync_submit
- hci_cmd_sync_queue
- _hci_cmd_sync_lookup_entry
- hci_cmd_sync_queue_once
- hci_cmd_sync_run
- hci_cmd_sync_run_once
- hci_cmd_sync_lookup_entry
- hci_cmd_sync_cancel_entry
- hci_cmd_sync_dequeue_once
- hci_cmd_sync_dequeue
- hci_update_eir_sync
- get_service_classes
- hci_update_class_sync
- is_advertising_allowed
- adv_use_rpa
- hci_set_random_addr_sync
- hci_update_random_address_sync
- hci_disable_ext_adv_instance_sync
- hci_set_adv_set_random_addr_sync
- hci_setup_ext_adv_instance_sync
- hci_set_ext_scan_rsp_data_sync
- __hci_set_scan_rsp_data_sync
- hci_update_scan_rsp_data_sync
- hci_enable_ext_advertising_sync
- hci_start_ext_adv_sync
- hci_disable_per_advertising_sync
- hci_set_per_adv_params_sync
- hci_set_per_adv_data_sync
- hci_enable_per_advertising_sync
- hci_adv_bcast_annoucement
- hci_start_per_adv_sync
- hci_start_adv_sync
- hci_enable_advertising_sync
- enable_advertising_sync
- hci_enable_advertising
- hci_remove_ext_adv_instance_sync
- hci_le_terminate_big_sync
- hci_set_ext_adv_data_sync
- hci_set_adv_data_sync
- hci_update_adv_data_sync
- hci_schedule_adv_instance_sync
- hci_clear_adv_sets_sync
- hci_clear_adv_sync
- hci_remove_adv_sync
- hci_remove_advertising_sync
- hci_read_rssi_sync
- hci_read_clock_sync
- hci_read_tx_power_sync
- hci_disable_advertising_sync
- hci_le_set_ext_scan_enable_sync
- hci_le_set_scan_enable_sync
- hci_le_set_addr_resolution_enable_sync
- hci_scan_disable_sync
- scan_use_rpa
- hci_start_interleave_scan
- cancel_interleave_scan
- hci_update_interleaved_scan_sync
- hci_le_del_resolve_list_sync
- hci_le_del_accept_list_sync
- conn_params
- hci_le_add_resolve_list_sync
- hci_le_set_privacy_mode_sync
- hci_le_add_accept_list_sync
- hci_pause_advertising_sync
- hci_resume_advertising_sync
- hci_pause_addr_resolution
- hci_read_local_oob_data_sync
- conn_params_copy
- hci_le_clear_accept_list_sync
- hci_update_accept_list_sync
- hci_le_scan_phy_params
- hci_le_set_ext_scan_param_sync
- hci_le_set_scan_param_sync
- hci_start_scan_sync
- hci_passive_scan_sync
- hci_update_passive_scan_sync
- update_scan_sync
- hci_update_scan
- update_passive_scan_sync
- hci_update_passive_scan
- hci_write_sc_support_sync
- hci_write_ssp_mode_sync
- hci_write_le_host_supported_sync
- hci_powered_update_adv_sync
- hci_write_auth_enable_sync
- hci_write_fast_connectable_sync
- disconnected_accept_list_entries
- hci_write_scan_enable_sync
- hci_update_scan_sync
- hci_update_name_sync
- hci_powered_update_sync
- hci_dev_get_bd_addr_from_property
- hci_init_stage
- hci_init_stage_sync
- hci_read_local_version_sync
- hci_read_bd_addr_sync
- hci_init0
- hci_reset_sync
- hci_init0_sync
- hci_unconf_init_sync
- hci_read_local_features_sync
- br_init1
- hci_read_local_cmds_sync
- hci_init1_sync
- hci_read_buffer_size_sync
- hci_read_dev_class_sync
- hci_read_local_name_sync
- hci_read_voice_setting_sync
- hci_read_num_supported_iac_sync
- hci_read_current_iac_lap_sync
- hci_set_event_filter_sync
- hci_clear_event_filter_sync
- hci_write_ca_timeout_sync
- hci_write_sync_flowctl_sync
- br_init2
- hci_write_ssp_mode_1_sync
- hci_write_eir_sync
- hci_write_inquiry_mode_sync
- hci_read_inq_rsp_tx_power_sync
- hci_read_local_ext_features_sync
- hci_read_local_ext_features_1_sync
- hci_init2
- hci_le_read_buffer_size_sync
- hci_le_read_local_features_sync
- hci_le_read_supported_states_sync
- le_init2
- hci_init2_sync
- hci_set_event_mask_sync
- hci_read_stored_link_key_sync
- hci_setup_link_policy_sync
- hci_read_page_scan_activity_sync
- hci_read_def_err_data_reporting_sync
- hci_read_page_scan_type_sync
- hci_read_local_ext_features_all_sync
- hci_init3
- hci_le_set_event_mask_sync
- hci_le_read_adv_tx_power_sync
- hci_le_read_tx_power_sync
- hci_le_read_accept_list_size_sync
- hci_le_read_resolv_list_size_sync
- hci_le_clear_resolv_list_sync
- hci_le_set_rpa_timeout_sync
- hci_le_read_max_data_len_sync
- hci_le_read_def_data_len_sync
- hci_le_read_num_support_adv_sets_sync
- hci_set_le_support_sync
- hci_le_set_host_feature_sync
- le_init3
- hci_init3_sync
- hci_delete_stored_link_key_sync
- hci_set_event_mask_page_2_sync
- hci_read_local_codecs_sync
- hci_read_local_pairing_opts_sync
- hci_get_mws_transport_config_sync
- hci_read_sync_train_params_sync
- hci_write_sc_support_1_sync
- hci_set_err_data_report_sync
- hci_init4
- hci_le_set_write_def_data_len_sync
- hci_le_set_default_phy_sync
- le_init4
- hci_init4_sync
- hci_init_sync
- hci_broken_table
- hci_dev_setup_sync
- hci_dev_init_sync
- hci_dev_open_sync
- hci_pend_le_actions_clear
- hci_dev_shutdown
- hci_dev_close_sync
- hci_power_on_sync
- hci_remote_name_cancel_sync
- hci_stop_discovery_sync
- hci_disconnect_sync
- hci_le_connect_cancel_sync
- hci_connect_cancel_sync
- hci_reject_sco_sync
- hci_le_reject_cis_sync
- hci_reject_conn_sync
- hci_abort_conn_sync
- hci_disconnect_all_sync
- hci_power_off_sync
- hci_set_powered_sync
- hci_write_iac_sync
- hci_update_discoverable_sync
- update_discoverable_sync
- hci_update_discoverable
- hci_update_connectable_sync
- hci_inquiry_sync
- hci_active_scan_sync
- hci_start_interleaved_discovery_sync
- hci_start_discovery_sync
- hci_suspend_monitor_sync
- hci_pause_discovery_sync
- hci_update_event_filter_sync
- hci_pause_scan_sync
- hci_suspend_sync
- hci_resume_discovery_sync
- hci_resume_monitor_sync
- hci_resume_scan_sync
- hci_resume_sync
- conn_use_rpa
- hci_le_ext_directed_advertising_sync
- hci_le_directed_advertising_sync
- set_ext_conn_params
- hci_le_ext_create_conn_sync
- hci_le_create_conn_sync
- hci_le_create_cis_sync
- hci_le_remove_cig_sync
- hci_le_big_terminate_sync
- hci_le_pa_terminate_sync
- hci_get_random_address
- _update_adv_data_sync
- hci_update_adv_data
- hci_acl_create_conn_sync
- hci_connect_acl_sync
- create_le_conn_complete
- hci_connect_le_sync
- hci_cancel_connect_sync
- hci_le_conn_update_sync
- create_pa_complete
- hci_le_pa_create_sync
- hci_connect_pa_sync
- create_big_complete
- hci_le_big_create_sync
Improve your Profiling and Debugging skills
Find out more