1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * BlueZ - Bluetooth protocol stack for Linux |
4 | * |
5 | * Copyright (C) 2021 Intel Corporation |
6 | * Copyright 2023 NXP |
7 | */ |
8 | |
9 | #include <linux/property.h> |
10 | |
11 | #include <net/bluetooth/bluetooth.h> |
12 | #include <net/bluetooth/hci_core.h> |
13 | #include <net/bluetooth/mgmt.h> |
14 | |
15 | #include "hci_request.h" |
16 | #include "hci_codec.h" |
17 | #include "hci_debugfs.h" |
18 | #include "smp.h" |
19 | #include "eir.h" |
20 | #include "msft.h" |
21 | #include "aosp.h" |
22 | #include "leds.h" |
23 | |
24 | static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, |
25 | struct sk_buff *skb) |
26 | { |
27 | bt_dev_dbg(hdev, "result 0x%2.2x" , result); |
28 | |
29 | if (hdev->req_status != HCI_REQ_PEND) |
30 | return; |
31 | |
32 | hdev->req_result = result; |
33 | hdev->req_status = HCI_REQ_DONE; |
34 | |
35 | /* Free the request command so it is not used as response */ |
36 | kfree_skb(skb: hdev->req_skb); |
37 | hdev->req_skb = NULL; |
38 | |
39 | if (skb) { |
40 | struct sock *sk = hci_skb_sk(skb); |
41 | |
42 | /* Drop sk reference if set */ |
43 | if (sk) |
44 | sock_put(sk); |
45 | |
46 | hdev->req_rsp = skb_get(skb); |
47 | } |
48 | |
49 | wake_up_interruptible(&hdev->req_wait_q); |
50 | } |
51 | |
52 | static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, |
53 | u32 plen, const void *param, |
54 | struct sock *sk) |
55 | { |
56 | int len = HCI_COMMAND_HDR_SIZE + plen; |
57 | struct hci_command_hdr *hdr; |
58 | struct sk_buff *skb; |
59 | |
60 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
61 | if (!skb) |
62 | return NULL; |
63 | |
64 | hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); |
65 | hdr->opcode = cpu_to_le16(opcode); |
66 | hdr->plen = plen; |
67 | |
68 | if (plen) |
69 | skb_put_data(skb, data: param, len: plen); |
70 | |
71 | bt_dev_dbg(hdev, "skb len %d" , skb->len); |
72 | |
73 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; |
74 | hci_skb_opcode(skb) = opcode; |
75 | |
76 | /* Grab a reference if command needs to be associated with a sock (e.g. |
77 | * likely mgmt socket that initiated the command). |
78 | */ |
79 | if (sk) { |
80 | hci_skb_sk(skb) = sk; |
81 | sock_hold(sk); |
82 | } |
83 | |
84 | return skb; |
85 | } |
86 | |
87 | static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, |
88 | const void *param, u8 event, struct sock *sk) |
89 | { |
90 | struct hci_dev *hdev = req->hdev; |
91 | struct sk_buff *skb; |
92 | |
93 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d" , opcode, plen); |
94 | |
95 | /* If an error occurred during request building, there is no point in |
96 | * queueing the HCI command. We can simply return. |
97 | */ |
98 | if (req->err) |
99 | return; |
100 | |
101 | skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); |
102 | if (!skb) { |
103 | bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)" , |
104 | opcode); |
105 | req->err = -ENOMEM; |
106 | return; |
107 | } |
108 | |
109 | if (skb_queue_empty(list: &req->cmd_q)) |
110 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; |
111 | |
112 | hci_skb_event(skb) = event; |
113 | |
114 | skb_queue_tail(list: &req->cmd_q, newsk: skb); |
115 | } |
116 | |
117 | static int hci_cmd_sync_run(struct hci_request *req) |
118 | { |
119 | struct hci_dev *hdev = req->hdev; |
120 | struct sk_buff *skb; |
121 | unsigned long flags; |
122 | |
123 | bt_dev_dbg(hdev, "length %u" , skb_queue_len(&req->cmd_q)); |
124 | |
125 | /* If an error occurred during request building, remove all HCI |
126 | * commands queued on the HCI request queue. |
127 | */ |
128 | if (req->err) { |
129 | skb_queue_purge(list: &req->cmd_q); |
130 | return req->err; |
131 | } |
132 | |
133 | /* Do not allow empty requests */ |
134 | if (skb_queue_empty(list: &req->cmd_q)) |
135 | return -ENODATA; |
136 | |
137 | skb = skb_peek_tail(list_: &req->cmd_q); |
138 | bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; |
139 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; |
140 | |
141 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
142 | skb_queue_splice_tail(list: &req->cmd_q, head: &hdev->cmd_q); |
143 | spin_unlock_irqrestore(lock: &hdev->cmd_q.lock, flags); |
144 | |
145 | queue_work(wq: hdev->workqueue, work: &hdev->cmd_work); |
146 | |
147 | return 0; |
148 | } |
149 | |
150 | /* This function requires the caller holds hdev->req_lock. */ |
151 | struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, |
152 | const void *param, u8 event, u32 timeout, |
153 | struct sock *sk) |
154 | { |
155 | struct hci_request req; |
156 | struct sk_buff *skb; |
157 | int err = 0; |
158 | |
159 | bt_dev_dbg(hdev, "Opcode 0x%4.4x" , opcode); |
160 | |
161 | hci_req_init(req: &req, hdev); |
162 | |
163 | hci_cmd_sync_add(req: &req, opcode, plen, param, event, sk); |
164 | |
165 | hdev->req_status = HCI_REQ_PEND; |
166 | |
167 | err = hci_cmd_sync_run(req: &req); |
168 | if (err < 0) |
169 | return ERR_PTR(error: err); |
170 | |
171 | err = wait_event_interruptible_timeout(hdev->req_wait_q, |
172 | hdev->req_status != HCI_REQ_PEND, |
173 | timeout); |
174 | |
175 | if (err == -ERESTARTSYS) |
176 | return ERR_PTR(error: -EINTR); |
177 | |
178 | switch (hdev->req_status) { |
179 | case HCI_REQ_DONE: |
180 | err = -bt_to_errno(code: hdev->req_result); |
181 | break; |
182 | |
183 | case HCI_REQ_CANCELED: |
184 | err = -hdev->req_result; |
185 | break; |
186 | |
187 | default: |
188 | err = -ETIMEDOUT; |
189 | break; |
190 | } |
191 | |
192 | hdev->req_status = 0; |
193 | hdev->req_result = 0; |
194 | skb = hdev->req_rsp; |
195 | hdev->req_rsp = NULL; |
196 | |
197 | bt_dev_dbg(hdev, "end: err %d" , err); |
198 | |
199 | if (err < 0) { |
200 | kfree_skb(skb); |
201 | return ERR_PTR(error: err); |
202 | } |
203 | |
204 | return skb; |
205 | } |
206 | EXPORT_SYMBOL(__hci_cmd_sync_sk); |
207 | |
208 | /* This function requires the caller holds hdev->req_lock. */ |
209 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, |
210 | const void *param, u32 timeout) |
211 | { |
212 | return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); |
213 | } |
214 | EXPORT_SYMBOL(__hci_cmd_sync); |
215 | |
216 | /* Send HCI command and wait for command complete event */ |
217 | struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, |
218 | const void *param, u32 timeout) |
219 | { |
220 | struct sk_buff *skb; |
221 | |
222 | if (!test_bit(HCI_UP, &hdev->flags)) |
223 | return ERR_PTR(error: -ENETDOWN); |
224 | |
225 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d" , opcode, plen); |
226 | |
227 | hci_req_sync_lock(hdev); |
228 | skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); |
229 | hci_req_sync_unlock(hdev); |
230 | |
231 | return skb; |
232 | } |
233 | EXPORT_SYMBOL(hci_cmd_sync); |
234 | |
235 | /* This function requires the caller holds hdev->req_lock. */ |
236 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, |
237 | const void *param, u8 event, u32 timeout) |
238 | { |
239 | return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, |
240 | NULL); |
241 | } |
242 | EXPORT_SYMBOL(__hci_cmd_sync_ev); |
243 | |
244 | /* This function requires the caller holds hdev->req_lock. */ |
245 | int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, |
246 | const void *param, u8 event, u32 timeout, |
247 | struct sock *sk) |
248 | { |
249 | struct sk_buff *skb; |
250 | u8 status; |
251 | |
252 | skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); |
253 | if (IS_ERR(ptr: skb)) { |
254 | if (!event) |
255 | bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld" , opcode, |
256 | PTR_ERR(skb)); |
257 | return PTR_ERR(ptr: skb); |
258 | } |
259 | |
260 | /* If command return a status event skb will be set to NULL as there are |
261 | * no parameters, in case of failure IS_ERR(skb) would have be set to |
262 | * the actual error would be found with PTR_ERR(skb). |
263 | */ |
264 | if (!skb) |
265 | return 0; |
266 | |
267 | status = skb->data[0]; |
268 | |
269 | kfree_skb(skb); |
270 | |
271 | return status; |
272 | } |
273 | EXPORT_SYMBOL(__hci_cmd_sync_status_sk); |
274 | |
275 | int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, |
276 | const void *param, u32 timeout) |
277 | { |
278 | return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, |
279 | NULL); |
280 | } |
281 | EXPORT_SYMBOL(__hci_cmd_sync_status); |
282 | |
283 | static void hci_cmd_sync_work(struct work_struct *work) |
284 | { |
285 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); |
286 | |
287 | bt_dev_dbg(hdev, "" ); |
288 | |
289 | /* Dequeue all entries and run them */ |
290 | while (1) { |
291 | struct hci_cmd_sync_work_entry *entry; |
292 | |
293 | mutex_lock(&hdev->cmd_sync_work_lock); |
294 | entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, |
295 | struct hci_cmd_sync_work_entry, |
296 | list); |
297 | if (entry) |
298 | list_del(entry: &entry->list); |
299 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
300 | |
301 | if (!entry) |
302 | break; |
303 | |
304 | bt_dev_dbg(hdev, "entry %p" , entry); |
305 | |
306 | if (entry->func) { |
307 | int err; |
308 | |
309 | hci_req_sync_lock(hdev); |
310 | err = entry->func(hdev, entry->data); |
311 | if (entry->destroy) |
312 | entry->destroy(hdev, entry->data, err); |
313 | hci_req_sync_unlock(hdev); |
314 | } |
315 | |
316 | kfree(objp: entry); |
317 | } |
318 | } |
319 | |
320 | static void hci_cmd_sync_cancel_work(struct work_struct *work) |
321 | { |
322 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); |
323 | |
324 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
325 | cancel_delayed_work_sync(dwork: &hdev->ncmd_timer); |
326 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
327 | |
328 | wake_up_interruptible(&hdev->req_wait_q); |
329 | } |
330 | |
331 | static int hci_scan_disable_sync(struct hci_dev *hdev); |
332 | static int scan_disable_sync(struct hci_dev *hdev, void *data) |
333 | { |
334 | return hci_scan_disable_sync(hdev); |
335 | } |
336 | |
337 | static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); |
338 | static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) |
339 | { |
340 | return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); |
341 | } |
342 | |
343 | static void le_scan_disable(struct work_struct *work) |
344 | { |
345 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
346 | le_scan_disable.work); |
347 | int status; |
348 | |
349 | bt_dev_dbg(hdev, "" ); |
350 | hci_dev_lock(hdev); |
351 | |
352 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
353 | goto _return; |
354 | |
355 | status = hci_cmd_sync_queue(hdev, func: scan_disable_sync, NULL, NULL); |
356 | if (status) { |
357 | bt_dev_err(hdev, "failed to disable LE scan: %d" , status); |
358 | goto _return; |
359 | } |
360 | |
361 | hdev->discovery.scan_start = 0; |
362 | |
363 | /* If we were running LE only scan, change discovery state. If |
364 | * we were running both LE and BR/EDR inquiry simultaneously, |
365 | * and BR/EDR inquiry is already finished, stop discovery, |
366 | * otherwise BR/EDR inquiry will stop discovery when finished. |
367 | * If we will resolve remote device name, do not change |
368 | * discovery state. |
369 | */ |
370 | |
371 | if (hdev->discovery.type == DISCOV_TYPE_LE) |
372 | goto discov_stopped; |
373 | |
374 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) |
375 | goto _return; |
376 | |
377 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { |
378 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && |
379 | hdev->discovery.state != DISCOVERY_RESOLVING) |
380 | goto discov_stopped; |
381 | |
382 | goto _return; |
383 | } |
384 | |
385 | status = hci_cmd_sync_queue(hdev, func: interleaved_inquiry_sync, NULL, NULL); |
386 | if (status) { |
387 | bt_dev_err(hdev, "inquiry failed: status %d" , status); |
388 | goto discov_stopped; |
389 | } |
390 | |
391 | goto _return; |
392 | |
393 | discov_stopped: |
394 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
395 | |
396 | _return: |
397 | hci_dev_unlock(hdev); |
398 | } |
399 | |
400 | static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, |
401 | u8 filter_dup); |
402 | |
403 | static int reenable_adv_sync(struct hci_dev *hdev, void *data) |
404 | { |
405 | bt_dev_dbg(hdev, "" ); |
406 | |
407 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
408 | list_empty(head: &hdev->adv_instances)) |
409 | return 0; |
410 | |
411 | if (hdev->cur_adv_instance) { |
412 | return hci_schedule_adv_instance_sync(hdev, |
413 | instance: hdev->cur_adv_instance, |
414 | force: true); |
415 | } else { |
416 | if (ext_adv_capable(hdev)) { |
417 | hci_start_ext_adv_sync(hdev, instance: 0x00); |
418 | } else { |
419 | hci_update_adv_data_sync(hdev, instance: 0x00); |
420 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
421 | hci_enable_advertising_sync(hdev); |
422 | } |
423 | } |
424 | |
425 | return 0; |
426 | } |
427 | |
428 | static void reenable_adv(struct work_struct *work) |
429 | { |
430 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
431 | reenable_adv_work); |
432 | int status; |
433 | |
434 | bt_dev_dbg(hdev, "" ); |
435 | |
436 | hci_dev_lock(hdev); |
437 | |
438 | status = hci_cmd_sync_queue(hdev, func: reenable_adv_sync, NULL, NULL); |
439 | if (status) |
440 | bt_dev_err(hdev, "failed to reenable ADV: %d" , status); |
441 | |
442 | hci_dev_unlock(hdev); |
443 | } |
444 | |
445 | static void cancel_adv_timeout(struct hci_dev *hdev) |
446 | { |
447 | if (hdev->adv_instance_timeout) { |
448 | hdev->adv_instance_timeout = 0; |
449 | cancel_delayed_work(dwork: &hdev->adv_instance_expire); |
450 | } |
451 | } |
452 | |
453 | /* For a single instance: |
454 | * - force == true: The instance will be removed even when its remaining |
455 | * lifetime is not zero. |
456 | * - force == false: the instance will be deactivated but kept stored unless |
457 | * the remaining lifetime is zero. |
458 | * |
459 | * For instance == 0x00: |
460 | * - force == true: All instances will be removed regardless of their timeout |
461 | * setting. |
462 | * - force == false: Only instances that have a timeout will be removed. |
463 | */ |
464 | int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, |
465 | u8 instance, bool force) |
466 | { |
467 | struct adv_info *adv_instance, *n, *next_instance = NULL; |
468 | int err; |
469 | u8 rem_inst; |
470 | |
471 | /* Cancel any timeout concerning the removed instance(s). */ |
472 | if (!instance || hdev->cur_adv_instance == instance) |
473 | cancel_adv_timeout(hdev); |
474 | |
475 | /* Get the next instance to advertise BEFORE we remove |
476 | * the current one. This can be the same instance again |
477 | * if there is only one instance. |
478 | */ |
479 | if (instance && hdev->cur_adv_instance == instance) |
480 | next_instance = hci_get_next_instance(hdev, instance); |
481 | |
482 | if (instance == 0x00) { |
483 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, |
484 | list) { |
485 | if (!(force || adv_instance->timeout)) |
486 | continue; |
487 | |
488 | rem_inst = adv_instance->instance; |
489 | err = hci_remove_adv_instance(hdev, instance: rem_inst); |
490 | if (!err) |
491 | mgmt_advertising_removed(sk, hdev, instance: rem_inst); |
492 | } |
493 | } else { |
494 | adv_instance = hci_find_adv_instance(hdev, instance); |
495 | |
496 | if (force || (adv_instance && adv_instance->timeout && |
497 | !adv_instance->remaining_time)) { |
498 | /* Don't advertise a removed instance. */ |
499 | if (next_instance && |
500 | next_instance->instance == instance) |
501 | next_instance = NULL; |
502 | |
503 | err = hci_remove_adv_instance(hdev, instance); |
504 | if (!err) |
505 | mgmt_advertising_removed(sk, hdev, instance); |
506 | } |
507 | } |
508 | |
509 | if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
510 | return 0; |
511 | |
512 | if (next_instance && !ext_adv_capable(hdev)) |
513 | return hci_schedule_adv_instance_sync(hdev, |
514 | instance: next_instance->instance, |
515 | force: false); |
516 | |
517 | return 0; |
518 | } |
519 | |
520 | static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) |
521 | { |
522 | u8 instance = *(u8 *)data; |
523 | |
524 | kfree(objp: data); |
525 | |
526 | hci_clear_adv_instance_sync(hdev, NULL, instance, force: false); |
527 | |
528 | if (list_empty(head: &hdev->adv_instances)) |
529 | return hci_disable_advertising_sync(hdev); |
530 | |
531 | return 0; |
532 | } |
533 | |
534 | static void adv_timeout_expire(struct work_struct *work) |
535 | { |
536 | u8 *inst_ptr; |
537 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
538 | adv_instance_expire.work); |
539 | |
540 | bt_dev_dbg(hdev, "" ); |
541 | |
542 | hci_dev_lock(hdev); |
543 | |
544 | hdev->adv_instance_timeout = 0; |
545 | |
546 | if (hdev->cur_adv_instance == 0x00) |
547 | goto unlock; |
548 | |
549 | inst_ptr = kmalloc(size: 1, GFP_KERNEL); |
550 | if (!inst_ptr) |
551 | goto unlock; |
552 | |
553 | *inst_ptr = hdev->cur_adv_instance; |
554 | hci_cmd_sync_queue(hdev, func: adv_timeout_expire_sync, data: inst_ptr, NULL); |
555 | |
556 | unlock: |
557 | hci_dev_unlock(hdev); |
558 | } |
559 | |
560 | void hci_cmd_sync_init(struct hci_dev *hdev) |
561 | { |
562 | INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); |
563 | INIT_LIST_HEAD(list: &hdev->cmd_sync_work_list); |
564 | mutex_init(&hdev->cmd_sync_work_lock); |
565 | mutex_init(&hdev->unregister_lock); |
566 | |
567 | INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); |
568 | INIT_WORK(&hdev->reenable_adv_work, reenable_adv); |
569 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); |
570 | INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); |
571 | } |
572 | |
573 | static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, |
574 | struct hci_cmd_sync_work_entry *entry, |
575 | int err) |
576 | { |
577 | if (entry->destroy) |
578 | entry->destroy(hdev, entry->data, err); |
579 | |
580 | list_del(entry: &entry->list); |
581 | kfree(objp: entry); |
582 | } |
583 | |
584 | void hci_cmd_sync_clear(struct hci_dev *hdev) |
585 | { |
586 | struct hci_cmd_sync_work_entry *entry, *tmp; |
587 | |
588 | cancel_work_sync(work: &hdev->cmd_sync_work); |
589 | cancel_work_sync(work: &hdev->reenable_adv_work); |
590 | |
591 | mutex_lock(&hdev->cmd_sync_work_lock); |
592 | list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) |
593 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
594 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
595 | } |
596 | |
597 | void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) |
598 | { |
599 | bt_dev_dbg(hdev, "err 0x%2.2x" , err); |
600 | |
601 | if (hdev->req_status == HCI_REQ_PEND) { |
602 | hdev->req_result = err; |
603 | hdev->req_status = HCI_REQ_CANCELED; |
604 | |
605 | queue_work(wq: hdev->workqueue, work: &hdev->cmd_sync_cancel_work); |
606 | } |
607 | } |
608 | EXPORT_SYMBOL(hci_cmd_sync_cancel); |
609 | |
610 | /* Cancel ongoing command request synchronously: |
611 | * |
612 | * - Set result and mark status to HCI_REQ_CANCELED |
613 | * - Wakeup command sync thread |
614 | */ |
615 | void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err) |
616 | { |
617 | bt_dev_dbg(hdev, "err 0x%2.2x" , err); |
618 | |
619 | if (hdev->req_status == HCI_REQ_PEND) { |
620 | /* req_result is __u32 so error must be positive to be properly |
621 | * propagated. |
622 | */ |
623 | hdev->req_result = err < 0 ? -err : err; |
624 | hdev->req_status = HCI_REQ_CANCELED; |
625 | |
626 | wake_up_interruptible(&hdev->req_wait_q); |
627 | } |
628 | } |
629 | EXPORT_SYMBOL(hci_cmd_sync_cancel_sync); |
630 | |
631 | /* Submit HCI command to be run in as cmd_sync_work: |
632 | * |
633 | * - hdev must _not_ be unregistered |
634 | */ |
635 | int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
636 | void *data, hci_cmd_sync_work_destroy_t destroy) |
637 | { |
638 | struct hci_cmd_sync_work_entry *entry; |
639 | int err = 0; |
640 | |
641 | mutex_lock(&hdev->unregister_lock); |
642 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { |
643 | err = -ENODEV; |
644 | goto unlock; |
645 | } |
646 | |
647 | entry = kmalloc(size: sizeof(*entry), GFP_KERNEL); |
648 | if (!entry) { |
649 | err = -ENOMEM; |
650 | goto unlock; |
651 | } |
652 | entry->func = func; |
653 | entry->data = data; |
654 | entry->destroy = destroy; |
655 | |
656 | mutex_lock(&hdev->cmd_sync_work_lock); |
657 | list_add_tail(new: &entry->list, head: &hdev->cmd_sync_work_list); |
658 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
659 | |
660 | queue_work(wq: hdev->req_workqueue, work: &hdev->cmd_sync_work); |
661 | |
662 | unlock: |
663 | mutex_unlock(lock: &hdev->unregister_lock); |
664 | return err; |
665 | } |
666 | EXPORT_SYMBOL(hci_cmd_sync_submit); |
667 | |
668 | /* Queue HCI command: |
669 | * |
670 | * - hdev must be running |
671 | */ |
672 | int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
673 | void *data, hci_cmd_sync_work_destroy_t destroy) |
674 | { |
675 | /* Only queue command if hdev is running which means it had been opened |
676 | * and is either on init phase or is already up. |
677 | */ |
678 | if (!test_bit(HCI_RUNNING, &hdev->flags)) |
679 | return -ENETDOWN; |
680 | |
681 | return hci_cmd_sync_submit(hdev, func, data, destroy); |
682 | } |
683 | EXPORT_SYMBOL(hci_cmd_sync_queue); |
684 | |
685 | static struct hci_cmd_sync_work_entry * |
686 | _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
687 | void *data, hci_cmd_sync_work_destroy_t destroy) |
688 | { |
689 | struct hci_cmd_sync_work_entry *entry, *tmp; |
690 | |
691 | list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { |
692 | if (func && entry->func != func) |
693 | continue; |
694 | |
695 | if (data && entry->data != data) |
696 | continue; |
697 | |
698 | if (destroy && entry->destroy != destroy) |
699 | continue; |
700 | |
701 | return entry; |
702 | } |
703 | |
704 | return NULL; |
705 | } |
706 | |
707 | /* Queue HCI command entry once: |
708 | * |
709 | * - Lookup if an entry already exist and only if it doesn't creates a new entry |
710 | * and queue it. |
711 | */ |
712 | int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
713 | void *data, hci_cmd_sync_work_destroy_t destroy) |
714 | { |
715 | if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) |
716 | return 0; |
717 | |
718 | return hci_cmd_sync_queue(hdev, func, data, destroy); |
719 | } |
720 | EXPORT_SYMBOL(hci_cmd_sync_queue_once); |
721 | |
722 | /* Lookup HCI command entry: |
723 | * |
724 | * - Return first entry that matches by function callback or data or |
725 | * destroy callback. |
726 | */ |
727 | struct hci_cmd_sync_work_entry * |
728 | hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
729 | void *data, hci_cmd_sync_work_destroy_t destroy) |
730 | { |
731 | struct hci_cmd_sync_work_entry *entry; |
732 | |
733 | mutex_lock(&hdev->cmd_sync_work_lock); |
734 | entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); |
735 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
736 | |
737 | return entry; |
738 | } |
739 | EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); |
740 | |
741 | /* Cancel HCI command entry */ |
742 | void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, |
743 | struct hci_cmd_sync_work_entry *entry) |
744 | { |
745 | mutex_lock(&hdev->cmd_sync_work_lock); |
746 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
747 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
748 | } |
749 | EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); |
750 | |
751 | /* Dequeue one HCI command entry: |
752 | * |
753 | * - Lookup and cancel first entry that matches. |
754 | */ |
755 | bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, |
756 | hci_cmd_sync_work_func_t func, |
757 | void *data, hci_cmd_sync_work_destroy_t destroy) |
758 | { |
759 | struct hci_cmd_sync_work_entry *entry; |
760 | |
761 | entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); |
762 | if (!entry) |
763 | return false; |
764 | |
765 | hci_cmd_sync_cancel_entry(hdev, entry); |
766 | |
767 | return true; |
768 | } |
769 | EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); |
770 | |
771 | /* Dequeue HCI command entry: |
772 | * |
773 | * - Lookup and cancel any entry that matches by function callback or data or |
774 | * destroy callback. |
775 | */ |
776 | bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, |
777 | void *data, hci_cmd_sync_work_destroy_t destroy) |
778 | { |
779 | struct hci_cmd_sync_work_entry *entry; |
780 | bool ret = false; |
781 | |
782 | mutex_lock(&hdev->cmd_sync_work_lock); |
783 | while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, |
784 | destroy))) { |
785 | _hci_cmd_sync_cancel_entry(hdev, entry, err: -ECANCELED); |
786 | ret = true; |
787 | } |
788 | mutex_unlock(lock: &hdev->cmd_sync_work_lock); |
789 | |
790 | return ret; |
791 | } |
792 | EXPORT_SYMBOL(hci_cmd_sync_dequeue); |
793 | |
794 | int hci_update_eir_sync(struct hci_dev *hdev) |
795 | { |
796 | struct hci_cp_write_eir cp; |
797 | |
798 | bt_dev_dbg(hdev, "" ); |
799 | |
800 | if (!hdev_is_powered(hdev)) |
801 | return 0; |
802 | |
803 | if (!lmp_ext_inq_capable(hdev)) |
804 | return 0; |
805 | |
806 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
807 | return 0; |
808 | |
809 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) |
810 | return 0; |
811 | |
812 | memset(&cp, 0, sizeof(cp)); |
813 | |
814 | eir_create(hdev, data: cp.data); |
815 | |
816 | if (memcmp(p: cp.data, q: hdev->eir, size: sizeof(cp.data)) == 0) |
817 | return 0; |
818 | |
819 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); |
820 | |
821 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, |
822 | HCI_CMD_TIMEOUT); |
823 | } |
824 | |
825 | static u8 get_service_classes(struct hci_dev *hdev) |
826 | { |
827 | struct bt_uuid *uuid; |
828 | u8 val = 0; |
829 | |
830 | list_for_each_entry(uuid, &hdev->uuids, list) |
831 | val |= uuid->svc_hint; |
832 | |
833 | return val; |
834 | } |
835 | |
836 | int hci_update_class_sync(struct hci_dev *hdev) |
837 | { |
838 | u8 cod[3]; |
839 | |
840 | bt_dev_dbg(hdev, "" ); |
841 | |
842 | if (!hdev_is_powered(hdev)) |
843 | return 0; |
844 | |
845 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
846 | return 0; |
847 | |
848 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) |
849 | return 0; |
850 | |
851 | cod[0] = hdev->minor_class; |
852 | cod[1] = hdev->major_class; |
853 | cod[2] = get_service_classes(hdev); |
854 | |
855 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) |
856 | cod[1] |= 0x20; |
857 | |
858 | if (memcmp(p: cod, q: hdev->dev_class, size: 3) == 0) |
859 | return 0; |
860 | |
861 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, |
862 | sizeof(cod), cod, HCI_CMD_TIMEOUT); |
863 | } |
864 | |
865 | static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) |
866 | { |
867 | /* If there is no connection we are OK to advertise. */ |
868 | if (hci_conn_num(hdev, LE_LINK) == 0) |
869 | return true; |
870 | |
871 | /* Check le_states if there is any connection in peripheral role. */ |
872 | if (hdev->conn_hash.le_num_peripheral > 0) { |
873 | /* Peripheral connection state and non connectable mode |
874 | * bit 20. |
875 | */ |
876 | if (!connectable && !(hdev->le_states[2] & 0x10)) |
877 | return false; |
878 | |
879 | /* Peripheral connection state and connectable mode bit 38 |
880 | * and scannable bit 21. |
881 | */ |
882 | if (connectable && (!(hdev->le_states[4] & 0x40) || |
883 | !(hdev->le_states[2] & 0x20))) |
884 | return false; |
885 | } |
886 | |
887 | /* Check le_states if there is any connection in central role. */ |
888 | if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { |
889 | /* Central connection state and non connectable mode bit 18. */ |
890 | if (!connectable && !(hdev->le_states[2] & 0x02)) |
891 | return false; |
892 | |
893 | /* Central connection state and connectable mode bit 35 and |
894 | * scannable 19. |
895 | */ |
896 | if (connectable && (!(hdev->le_states[4] & 0x08) || |
897 | !(hdev->le_states[2] & 0x08))) |
898 | return false; |
899 | } |
900 | |
901 | return true; |
902 | } |
903 | |
904 | static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) |
905 | { |
906 | /* If privacy is not enabled don't use RPA */ |
907 | if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) |
908 | return false; |
909 | |
910 | /* If basic privacy mode is enabled use RPA */ |
911 | if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) |
912 | return true; |
913 | |
914 | /* If limited privacy mode is enabled don't use RPA if we're |
915 | * both discoverable and bondable. |
916 | */ |
917 | if ((flags & MGMT_ADV_FLAG_DISCOV) && |
918 | hci_dev_test_flag(hdev, HCI_BONDABLE)) |
919 | return false; |
920 | |
921 | /* We're neither bondable nor discoverable in the limited |
922 | * privacy mode, therefore use RPA. |
923 | */ |
924 | return true; |
925 | } |
926 | |
927 | static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) |
928 | { |
929 | /* If we're advertising or initiating an LE connection we can't |
930 | * go ahead and change the random address at this time. This is |
931 | * because the eventual initiator address used for the |
932 | * subsequently created connection will be undefined (some |
933 | * controllers use the new address and others the one we had |
934 | * when the operation started). |
935 | * |
936 | * In this kind of scenario skip the update and let the random |
937 | * address be updated at the next cycle. |
938 | */ |
939 | if (hci_dev_test_flag(hdev, HCI_LE_ADV) || |
940 | hci_lookup_le_connect(hdev)) { |
941 | bt_dev_dbg(hdev, "Deferring random address update" ); |
942 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
943 | return 0; |
944 | } |
945 | |
946 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, |
947 | 6, rpa, HCI_CMD_TIMEOUT); |
948 | } |
949 | |
950 | int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, |
951 | bool rpa, u8 *own_addr_type) |
952 | { |
953 | int err; |
954 | |
955 | /* If privacy is enabled use a resolvable private address. If |
956 | * current RPA has expired or there is something else than |
957 | * the current RPA in use, then generate a new one. |
958 | */ |
959 | if (rpa) { |
960 | /* If Controller supports LL Privacy use own address type is |
961 | * 0x03 |
962 | */ |
963 | if (use_ll_privacy(hdev)) |
964 | *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; |
965 | else |
966 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
967 | |
968 | /* Check if RPA is valid */ |
969 | if (rpa_valid(hdev)) |
970 | return 0; |
971 | |
972 | err = smp_generate_rpa(hdev, irk: hdev->irk, rpa: &hdev->rpa); |
973 | if (err < 0) { |
974 | bt_dev_err(hdev, "failed to generate new RPA" ); |
975 | return err; |
976 | } |
977 | |
978 | err = hci_set_random_addr_sync(hdev, rpa: &hdev->rpa); |
979 | if (err) |
980 | return err; |
981 | |
982 | return 0; |
983 | } |
984 | |
985 | /* In case of required privacy without resolvable private address, |
986 | * use an non-resolvable private address. This is useful for active |
987 | * scanning and non-connectable advertising. |
988 | */ |
989 | if (require_privacy) { |
990 | bdaddr_t nrpa; |
991 | |
992 | while (true) { |
993 | /* The non-resolvable private address is generated |
994 | * from random six bytes with the two most significant |
995 | * bits cleared. |
996 | */ |
997 | get_random_bytes(buf: &nrpa, len: 6); |
998 | nrpa.b[5] &= 0x3f; |
999 | |
1000 | /* The non-resolvable private address shall not be |
1001 | * equal to the public address. |
1002 | */ |
1003 | if (bacmp(ba1: &hdev->bdaddr, ba2: &nrpa)) |
1004 | break; |
1005 | } |
1006 | |
1007 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1008 | |
1009 | return hci_set_random_addr_sync(hdev, rpa: &nrpa); |
1010 | } |
1011 | |
1012 | /* If forcing static address is in use or there is no public |
1013 | * address use the static address as random address (but skip |
1014 | * the HCI command if the current random address is already the |
1015 | * static one. |
1016 | * |
1017 | * In case BR/EDR has been disabled on a dual-mode controller |
1018 | * and a static address has been configured, then use that |
1019 | * address instead of the public BR/EDR address. |
1020 | */ |
1021 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
1022 | !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) || |
1023 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && |
1024 | bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) { |
1025 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
1026 | if (bacmp(ba1: &hdev->static_addr, ba2: &hdev->random_addr)) |
1027 | return hci_set_random_addr_sync(hdev, |
1028 | rpa: &hdev->static_addr); |
1029 | return 0; |
1030 | } |
1031 | |
1032 | /* Neither privacy nor static address is being used so use a |
1033 | * public address. |
1034 | */ |
1035 | *own_addr_type = ADDR_LE_DEV_PUBLIC; |
1036 | |
1037 | return 0; |
1038 | } |
1039 | |
1040 | static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) |
1041 | { |
1042 | struct hci_cp_le_set_ext_adv_enable *cp; |
1043 | struct hci_cp_ext_adv_set *set; |
1044 | u8 data[sizeof(*cp) + sizeof(*set) * 1]; |
1045 | u8 size; |
1046 | |
1047 | /* If request specifies an instance that doesn't exist, fail */ |
1048 | if (instance > 0) { |
1049 | struct adv_info *adv; |
1050 | |
1051 | adv = hci_find_adv_instance(hdev, instance); |
1052 | if (!adv) |
1053 | return -EINVAL; |
1054 | |
1055 | /* If not enabled there is nothing to do */ |
1056 | if (!adv->enabled) |
1057 | return 0; |
1058 | } |
1059 | |
1060 | memset(data, 0, sizeof(data)); |
1061 | |
1062 | cp = (void *)data; |
1063 | set = (void *)cp->data; |
1064 | |
1065 | /* Instance 0x00 indicates all advertising instances will be disabled */ |
1066 | cp->num_of_sets = !!instance; |
1067 | cp->enable = 0x00; |
1068 | |
1069 | set->handle = instance; |
1070 | |
1071 | size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; |
1072 | |
1073 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, |
1074 | size, data, HCI_CMD_TIMEOUT); |
1075 | } |
1076 | |
1077 | static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, |
1078 | bdaddr_t *random_addr) |
1079 | { |
1080 | struct hci_cp_le_set_adv_set_rand_addr cp; |
1081 | int err; |
1082 | |
1083 | if (!instance) { |
1084 | /* Instance 0x00 doesn't have an adv_info, instead it uses |
1085 | * hdev->random_addr to track its address so whenever it needs |
1086 | * to be updated this also set the random address since |
1087 | * hdev->random_addr is shared with scan state machine. |
1088 | */ |
1089 | err = hci_set_random_addr_sync(hdev, rpa: random_addr); |
1090 | if (err) |
1091 | return err; |
1092 | } |
1093 | |
1094 | memset(&cp, 0, sizeof(cp)); |
1095 | |
1096 | cp.handle = instance; |
1097 | bacpy(dst: &cp.bdaddr, src: random_addr); |
1098 | |
1099 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, |
1100 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1101 | } |
1102 | |
1103 | int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) |
1104 | { |
1105 | struct hci_cp_le_set_ext_adv_params cp; |
1106 | bool connectable; |
1107 | u32 flags; |
1108 | bdaddr_t random_addr; |
1109 | u8 own_addr_type; |
1110 | int err; |
1111 | struct adv_info *adv; |
1112 | bool secondary_adv; |
1113 | |
1114 | if (instance > 0) { |
1115 | adv = hci_find_adv_instance(hdev, instance); |
1116 | if (!adv) |
1117 | return -EINVAL; |
1118 | } else { |
1119 | adv = NULL; |
1120 | } |
1121 | |
1122 | /* Updating parameters of an active instance will return a |
1123 | * Command Disallowed error, so we must first disable the |
1124 | * instance if it is active. |
1125 | */ |
1126 | if (adv && !adv->pending) { |
1127 | err = hci_disable_ext_adv_instance_sync(hdev, instance); |
1128 | if (err) |
1129 | return err; |
1130 | } |
1131 | |
1132 | flags = hci_adv_instance_flags(hdev, instance); |
1133 | |
1134 | /* If the "connectable" instance flag was not set, then choose between |
1135 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. |
1136 | */ |
1137 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || |
1138 | mgmt_get_connectable(hdev); |
1139 | |
1140 | if (!is_advertising_allowed(hdev, connectable)) |
1141 | return -EPERM; |
1142 | |
1143 | /* Set require_privacy to true only when non-connectable |
1144 | * advertising is used. In that case it is fine to use a |
1145 | * non-resolvable private address. |
1146 | */ |
1147 | err = hci_get_random_address(hdev, require_privacy: !connectable, |
1148 | use_rpa: adv_use_rpa(hdev, flags), adv_instance: adv, |
1149 | own_addr_type: &own_addr_type, rand_addr: &random_addr); |
1150 | if (err < 0) |
1151 | return err; |
1152 | |
1153 | memset(&cp, 0, sizeof(cp)); |
1154 | |
1155 | if (adv) { |
1156 | hci_cpu_to_le24(val: adv->min_interval, dst: cp.min_interval); |
1157 | hci_cpu_to_le24(val: adv->max_interval, dst: cp.max_interval); |
1158 | cp.tx_power = adv->tx_power; |
1159 | } else { |
1160 | hci_cpu_to_le24(val: hdev->le_adv_min_interval, dst: cp.min_interval); |
1161 | hci_cpu_to_le24(val: hdev->le_adv_max_interval, dst: cp.max_interval); |
1162 | cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; |
1163 | } |
1164 | |
1165 | secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); |
1166 | |
1167 | if (connectable) { |
1168 | if (secondary_adv) |
1169 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); |
1170 | else |
1171 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); |
1172 | } else if (hci_adv_instance_is_scannable(hdev, instance) || |
1173 | (flags & MGMT_ADV_PARAM_SCAN_RSP)) { |
1174 | if (secondary_adv) |
1175 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); |
1176 | else |
1177 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); |
1178 | } else { |
1179 | if (secondary_adv) |
1180 | cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); |
1181 | else |
1182 | cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); |
1183 | } |
1184 | |
1185 | /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter |
1186 | * contains the peer’s Identity Address and the Peer_Address_Type |
1187 | * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). |
1188 | * These parameters are used to locate the corresponding local IRK in |
1189 | * the resolving list; this IRK is used to generate their own address |
1190 | * used in the advertisement. |
1191 | */ |
1192 | if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) |
1193 | hci_copy_identity_address(hdev, bdaddr: &cp.peer_addr, |
1194 | bdaddr_type: &cp.peer_addr_type); |
1195 | |
1196 | cp.own_addr_type = own_addr_type; |
1197 | cp.channel_map = hdev->le_adv_channel_map; |
1198 | cp.handle = instance; |
1199 | |
1200 | if (flags & MGMT_ADV_FLAG_SEC_2M) { |
1201 | cp.primary_phy = HCI_ADV_PHY_1M; |
1202 | cp.secondary_phy = HCI_ADV_PHY_2M; |
1203 | } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { |
1204 | cp.primary_phy = HCI_ADV_PHY_CODED; |
1205 | cp.secondary_phy = HCI_ADV_PHY_CODED; |
1206 | } else { |
1207 | /* In all other cases use 1M */ |
1208 | cp.primary_phy = HCI_ADV_PHY_1M; |
1209 | cp.secondary_phy = HCI_ADV_PHY_1M; |
1210 | } |
1211 | |
1212 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, |
1213 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1214 | if (err) |
1215 | return err; |
1216 | |
1217 | if ((own_addr_type == ADDR_LE_DEV_RANDOM || |
1218 | own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && |
1219 | bacmp(ba1: &random_addr, BDADDR_ANY)) { |
1220 | /* Check if random address need to be updated */ |
1221 | if (adv) { |
1222 | if (!bacmp(ba1: &random_addr, ba2: &adv->random_addr)) |
1223 | return 0; |
1224 | } else { |
1225 | if (!bacmp(ba1: &random_addr, ba2: &hdev->random_addr)) |
1226 | return 0; |
1227 | } |
1228 | |
1229 | return hci_set_adv_set_random_addr_sync(hdev, instance, |
1230 | random_addr: &random_addr); |
1231 | } |
1232 | |
1233 | return 0; |
1234 | } |
1235 | |
1236 | static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1237 | { |
1238 | struct { |
1239 | struct hci_cp_le_set_ext_scan_rsp_data cp; |
1240 | u8 data[HCI_MAX_EXT_AD_LENGTH]; |
1241 | } pdu; |
1242 | u8 len; |
1243 | struct adv_info *adv = NULL; |
1244 | int err; |
1245 | |
1246 | memset(&pdu, 0, sizeof(pdu)); |
1247 | |
1248 | if (instance) { |
1249 | adv = hci_find_adv_instance(hdev, instance); |
1250 | if (!adv || !adv->scan_rsp_changed) |
1251 | return 0; |
1252 | } |
1253 | |
1254 | len = eir_create_scan_rsp(hdev, instance, ptr: pdu.data); |
1255 | |
1256 | pdu.cp.handle = instance; |
1257 | pdu.cp.length = len; |
1258 | pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1259 | pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; |
1260 | |
1261 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, |
1262 | sizeof(pdu.cp) + len, &pdu.cp, |
1263 | HCI_CMD_TIMEOUT); |
1264 | if (err) |
1265 | return err; |
1266 | |
1267 | if (adv) { |
1268 | adv->scan_rsp_changed = false; |
1269 | } else { |
1270 | memcpy(hdev->scan_rsp_data, pdu.data, len); |
1271 | hdev->scan_rsp_data_len = len; |
1272 | } |
1273 | |
1274 | return 0; |
1275 | } |
1276 | |
1277 | static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1278 | { |
1279 | struct hci_cp_le_set_scan_rsp_data cp; |
1280 | u8 len; |
1281 | |
1282 | memset(&cp, 0, sizeof(cp)); |
1283 | |
1284 | len = eir_create_scan_rsp(hdev, instance, ptr: cp.data); |
1285 | |
1286 | if (hdev->scan_rsp_data_len == len && |
1287 | !memcmp(p: cp.data, q: hdev->scan_rsp_data, size: len)) |
1288 | return 0; |
1289 | |
1290 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); |
1291 | hdev->scan_rsp_data_len = len; |
1292 | |
1293 | cp.length = len; |
1294 | |
1295 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, |
1296 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1297 | } |
1298 | |
1299 | int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) |
1300 | { |
1301 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
1302 | return 0; |
1303 | |
1304 | if (ext_adv_capable(hdev)) |
1305 | return hci_set_ext_scan_rsp_data_sync(hdev, instance); |
1306 | |
1307 | return __hci_set_scan_rsp_data_sync(hdev, instance); |
1308 | } |
1309 | |
1310 | int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) |
1311 | { |
1312 | struct hci_cp_le_set_ext_adv_enable *cp; |
1313 | struct hci_cp_ext_adv_set *set; |
1314 | u8 data[sizeof(*cp) + sizeof(*set) * 1]; |
1315 | struct adv_info *adv; |
1316 | |
1317 | if (instance > 0) { |
1318 | adv = hci_find_adv_instance(hdev, instance); |
1319 | if (!adv) |
1320 | return -EINVAL; |
1321 | /* If already enabled there is nothing to do */ |
1322 | if (adv->enabled) |
1323 | return 0; |
1324 | } else { |
1325 | adv = NULL; |
1326 | } |
1327 | |
1328 | cp = (void *)data; |
1329 | set = (void *)cp->data; |
1330 | |
1331 | memset(cp, 0, sizeof(*cp)); |
1332 | |
1333 | cp->enable = 0x01; |
1334 | cp->num_of_sets = 0x01; |
1335 | |
1336 | memset(set, 0, sizeof(*set)); |
1337 | |
1338 | set->handle = instance; |
1339 | |
1340 | /* Set duration per instance since controller is responsible for |
1341 | * scheduling it. |
1342 | */ |
1343 | if (adv && adv->timeout) { |
1344 | u16 duration = adv->timeout * MSEC_PER_SEC; |
1345 | |
1346 | /* Time = N * 10 ms */ |
1347 | set->duration = cpu_to_le16(duration / 10); |
1348 | } |
1349 | |
1350 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, |
1351 | sizeof(*cp) + |
1352 | sizeof(*set) * cp->num_of_sets, |
1353 | data, HCI_CMD_TIMEOUT); |
1354 | } |
1355 | |
1356 | int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) |
1357 | { |
1358 | int err; |
1359 | |
1360 | err = hci_setup_ext_adv_instance_sync(hdev, instance); |
1361 | if (err) |
1362 | return err; |
1363 | |
1364 | err = hci_set_ext_scan_rsp_data_sync(hdev, instance); |
1365 | if (err) |
1366 | return err; |
1367 | |
1368 | return hci_enable_ext_advertising_sync(hdev, instance); |
1369 | } |
1370 | |
1371 | int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) |
1372 | { |
1373 | struct hci_cp_le_set_per_adv_enable cp; |
1374 | struct adv_info *adv = NULL; |
1375 | |
1376 | /* If periodic advertising already disabled there is nothing to do. */ |
1377 | adv = hci_find_adv_instance(hdev, instance); |
1378 | if (!adv || !adv->periodic || !adv->enabled) |
1379 | return 0; |
1380 | |
1381 | memset(&cp, 0, sizeof(cp)); |
1382 | |
1383 | cp.enable = 0x00; |
1384 | cp.handle = instance; |
1385 | |
1386 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, |
1387 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1388 | } |
1389 | |
1390 | static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, |
1391 | u16 min_interval, u16 max_interval) |
1392 | { |
1393 | struct hci_cp_le_set_per_adv_params cp; |
1394 | |
1395 | memset(&cp, 0, sizeof(cp)); |
1396 | |
1397 | if (!min_interval) |
1398 | min_interval = DISCOV_LE_PER_ADV_INT_MIN; |
1399 | |
1400 | if (!max_interval) |
1401 | max_interval = DISCOV_LE_PER_ADV_INT_MAX; |
1402 | |
1403 | cp.handle = instance; |
1404 | cp.min_interval = cpu_to_le16(min_interval); |
1405 | cp.max_interval = cpu_to_le16(max_interval); |
1406 | cp.periodic_properties = 0x0000; |
1407 | |
1408 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, |
1409 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1410 | } |
1411 | |
1412 | static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1413 | { |
1414 | struct { |
1415 | struct hci_cp_le_set_per_adv_data cp; |
1416 | u8 data[HCI_MAX_PER_AD_LENGTH]; |
1417 | } pdu; |
1418 | u8 len; |
1419 | |
1420 | memset(&pdu, 0, sizeof(pdu)); |
1421 | |
1422 | if (instance) { |
1423 | struct adv_info *adv = hci_find_adv_instance(hdev, instance); |
1424 | |
1425 | if (!adv || !adv->periodic) |
1426 | return 0; |
1427 | } |
1428 | |
1429 | len = eir_create_per_adv_data(hdev, instance, ptr: pdu.data); |
1430 | |
1431 | pdu.cp.length = len; |
1432 | pdu.cp.handle = instance; |
1433 | pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1434 | |
1435 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, |
1436 | sizeof(pdu.cp) + len, &pdu, |
1437 | HCI_CMD_TIMEOUT); |
1438 | } |
1439 | |
1440 | static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) |
1441 | { |
1442 | struct hci_cp_le_set_per_adv_enable cp; |
1443 | struct adv_info *adv = NULL; |
1444 | |
1445 | /* If periodic advertising already enabled there is nothing to do. */ |
1446 | adv = hci_find_adv_instance(hdev, instance); |
1447 | if (adv && adv->periodic && adv->enabled) |
1448 | return 0; |
1449 | |
1450 | memset(&cp, 0, sizeof(cp)); |
1451 | |
1452 | cp.enable = 0x01; |
1453 | cp.handle = instance; |
1454 | |
1455 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, |
1456 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1457 | } |
1458 | |
1459 | /* Checks if periodic advertising data contains a Basic Announcement and if it |
1460 | * does generates a Broadcast ID and add Broadcast Announcement. |
1461 | */ |
1462 | static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) |
1463 | { |
1464 | u8 bid[3]; |
1465 | u8 ad[4 + 3]; |
1466 | |
1467 | /* Skip if NULL adv as instance 0x00 is used for general purpose |
1468 | * advertising so it cannot used for the likes of Broadcast Announcement |
1469 | * as it can be overwritten at any point. |
1470 | */ |
1471 | if (!adv) |
1472 | return 0; |
1473 | |
1474 | /* Check if PA data doesn't contains a Basic Audio Announcement then |
1475 | * there is nothing to do. |
1476 | */ |
1477 | if (!eir_get_service_data(eir: adv->per_adv_data, eir_len: adv->per_adv_data_len, |
1478 | uuid: 0x1851, NULL)) |
1479 | return 0; |
1480 | |
1481 | /* Check if advertising data already has a Broadcast Announcement since |
1482 | * the process may want to control the Broadcast ID directly and in that |
1483 | * case the kernel shall no interfere. |
1484 | */ |
1485 | if (eir_get_service_data(eir: adv->adv_data, eir_len: adv->adv_data_len, uuid: 0x1852, |
1486 | NULL)) |
1487 | return 0; |
1488 | |
1489 | /* Generate Broadcast ID */ |
1490 | get_random_bytes(buf: bid, len: sizeof(bid)); |
1491 | eir_append_service_data(eir: ad, eir_len: 0, uuid: 0x1852, data: bid, data_len: sizeof(bid)); |
1492 | hci_set_adv_instance_data(hdev, instance: adv->instance, adv_data_len: sizeof(ad), adv_data: ad, scan_rsp_len: 0, NULL); |
1493 | |
1494 | return hci_update_adv_data_sync(hdev, instance: adv->instance); |
1495 | } |
1496 | |
1497 | int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, |
1498 | u8 *data, u32 flags, u16 min_interval, |
1499 | u16 max_interval, u16 sync_interval) |
1500 | { |
1501 | struct adv_info *adv = NULL; |
1502 | int err; |
1503 | bool added = false; |
1504 | |
1505 | hci_disable_per_advertising_sync(hdev, instance); |
1506 | |
1507 | if (instance) { |
1508 | adv = hci_find_adv_instance(hdev, instance); |
1509 | /* Create an instance if that could not be found */ |
1510 | if (!adv) { |
1511 | adv = hci_add_per_instance(hdev, instance, flags, |
1512 | data_len, data, |
1513 | min_interval: sync_interval, |
1514 | max_interval: sync_interval); |
1515 | if (IS_ERR(ptr: adv)) |
1516 | return PTR_ERR(ptr: adv); |
1517 | adv->pending = false; |
1518 | added = true; |
1519 | } |
1520 | } |
1521 | |
1522 | /* Start advertising */ |
1523 | err = hci_start_ext_adv_sync(hdev, instance); |
1524 | if (err < 0) |
1525 | goto fail; |
1526 | |
1527 | err = hci_adv_bcast_annoucement(hdev, adv); |
1528 | if (err < 0) |
1529 | goto fail; |
1530 | |
1531 | err = hci_set_per_adv_params_sync(hdev, instance, min_interval, |
1532 | max_interval); |
1533 | if (err < 0) |
1534 | goto fail; |
1535 | |
1536 | err = hci_set_per_adv_data_sync(hdev, instance); |
1537 | if (err < 0) |
1538 | goto fail; |
1539 | |
1540 | err = hci_enable_per_advertising_sync(hdev, instance); |
1541 | if (err < 0) |
1542 | goto fail; |
1543 | |
1544 | return 0; |
1545 | |
1546 | fail: |
1547 | if (added) |
1548 | hci_remove_adv_instance(hdev, instance); |
1549 | |
1550 | return err; |
1551 | } |
1552 | |
1553 | static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) |
1554 | { |
1555 | int err; |
1556 | |
1557 | if (ext_adv_capable(hdev)) |
1558 | return hci_start_ext_adv_sync(hdev, instance); |
1559 | |
1560 | err = hci_update_adv_data_sync(hdev, instance); |
1561 | if (err) |
1562 | return err; |
1563 | |
1564 | err = hci_update_scan_rsp_data_sync(hdev, instance); |
1565 | if (err) |
1566 | return err; |
1567 | |
1568 | return hci_enable_advertising_sync(hdev); |
1569 | } |
1570 | |
1571 | int hci_enable_advertising_sync(struct hci_dev *hdev) |
1572 | { |
1573 | struct adv_info *adv_instance; |
1574 | struct hci_cp_le_set_adv_param cp; |
1575 | u8 own_addr_type, enable = 0x01; |
1576 | bool connectable; |
1577 | u16 adv_min_interval, adv_max_interval; |
1578 | u32 flags; |
1579 | u8 status; |
1580 | |
1581 | if (ext_adv_capable(hdev)) |
1582 | return hci_enable_ext_advertising_sync(hdev, |
1583 | instance: hdev->cur_adv_instance); |
1584 | |
1585 | flags = hci_adv_instance_flags(hdev, instance: hdev->cur_adv_instance); |
1586 | adv_instance = hci_find_adv_instance(hdev, instance: hdev->cur_adv_instance); |
1587 | |
1588 | /* If the "connectable" instance flag was not set, then choose between |
1589 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. |
1590 | */ |
1591 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || |
1592 | mgmt_get_connectable(hdev); |
1593 | |
1594 | if (!is_advertising_allowed(hdev, connectable)) |
1595 | return -EINVAL; |
1596 | |
1597 | status = hci_disable_advertising_sync(hdev); |
1598 | if (status) |
1599 | return status; |
1600 | |
1601 | /* Clear the HCI_LE_ADV bit temporarily so that the |
1602 | * hci_update_random_address knows that it's safe to go ahead |
1603 | * and write a new random address. The flag will be set back on |
1604 | * as soon as the SET_ADV_ENABLE HCI command completes. |
1605 | */ |
1606 | hci_dev_clear_flag(hdev, HCI_LE_ADV); |
1607 | |
1608 | /* Set require_privacy to true only when non-connectable |
1609 | * advertising is used. In that case it is fine to use a |
1610 | * non-resolvable private address. |
1611 | */ |
1612 | status = hci_update_random_address_sync(hdev, require_privacy: !connectable, |
1613 | rpa: adv_use_rpa(hdev, flags), |
1614 | own_addr_type: &own_addr_type); |
1615 | if (status) |
1616 | return status; |
1617 | |
1618 | memset(&cp, 0, sizeof(cp)); |
1619 | |
1620 | if (adv_instance) { |
1621 | adv_min_interval = adv_instance->min_interval; |
1622 | adv_max_interval = adv_instance->max_interval; |
1623 | } else { |
1624 | adv_min_interval = hdev->le_adv_min_interval; |
1625 | adv_max_interval = hdev->le_adv_max_interval; |
1626 | } |
1627 | |
1628 | if (connectable) { |
1629 | cp.type = LE_ADV_IND; |
1630 | } else { |
1631 | if (hci_adv_instance_is_scannable(hdev, instance: hdev->cur_adv_instance)) |
1632 | cp.type = LE_ADV_SCAN_IND; |
1633 | else |
1634 | cp.type = LE_ADV_NONCONN_IND; |
1635 | |
1636 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || |
1637 | hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { |
1638 | adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; |
1639 | adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; |
1640 | } |
1641 | } |
1642 | |
1643 | cp.min_interval = cpu_to_le16(adv_min_interval); |
1644 | cp.max_interval = cpu_to_le16(adv_max_interval); |
1645 | cp.own_address_type = own_addr_type; |
1646 | cp.channel_map = hdev->le_adv_channel_map; |
1647 | |
1648 | status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, |
1649 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1650 | if (status) |
1651 | return status; |
1652 | |
1653 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
1654 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
1655 | } |
1656 | |
1657 | static int enable_advertising_sync(struct hci_dev *hdev, void *data) |
1658 | { |
1659 | return hci_enable_advertising_sync(hdev); |
1660 | } |
1661 | |
1662 | int hci_enable_advertising(struct hci_dev *hdev) |
1663 | { |
1664 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
1665 | list_empty(head: &hdev->adv_instances)) |
1666 | return 0; |
1667 | |
1668 | return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); |
1669 | } |
1670 | |
1671 | int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, |
1672 | struct sock *sk) |
1673 | { |
1674 | int err; |
1675 | |
1676 | if (!ext_adv_capable(hdev)) |
1677 | return 0; |
1678 | |
1679 | err = hci_disable_ext_adv_instance_sync(hdev, instance); |
1680 | if (err) |
1681 | return err; |
1682 | |
1683 | /* If request specifies an instance that doesn't exist, fail */ |
1684 | if (instance > 0 && !hci_find_adv_instance(hdev, instance)) |
1685 | return -EINVAL; |
1686 | |
1687 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, |
1688 | sizeof(instance), &instance, 0, |
1689 | HCI_CMD_TIMEOUT, sk); |
1690 | } |
1691 | |
1692 | static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) |
1693 | { |
1694 | struct adv_info *adv = data; |
1695 | u8 instance = 0; |
1696 | |
1697 | if (adv) |
1698 | instance = adv->instance; |
1699 | |
1700 | return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); |
1701 | } |
1702 | |
1703 | int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) |
1704 | { |
1705 | struct adv_info *adv = NULL; |
1706 | |
1707 | if (instance) { |
1708 | adv = hci_find_adv_instance(hdev, instance); |
1709 | if (!adv) |
1710 | return -EINVAL; |
1711 | } |
1712 | |
1713 | return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); |
1714 | } |
1715 | |
1716 | int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) |
1717 | { |
1718 | struct hci_cp_le_term_big cp; |
1719 | |
1720 | memset(&cp, 0, sizeof(cp)); |
1721 | cp.handle = handle; |
1722 | cp.reason = reason; |
1723 | |
1724 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, |
1725 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1726 | } |
1727 | |
1728 | static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1729 | { |
1730 | struct { |
1731 | struct hci_cp_le_set_ext_adv_data cp; |
1732 | u8 data[HCI_MAX_EXT_AD_LENGTH]; |
1733 | } pdu; |
1734 | u8 len; |
1735 | struct adv_info *adv = NULL; |
1736 | int err; |
1737 | |
1738 | memset(&pdu, 0, sizeof(pdu)); |
1739 | |
1740 | if (instance) { |
1741 | adv = hci_find_adv_instance(hdev, instance); |
1742 | if (!adv || !adv->adv_data_changed) |
1743 | return 0; |
1744 | } |
1745 | |
1746 | len = eir_create_adv_data(hdev, instance, ptr: pdu.data); |
1747 | |
1748 | pdu.cp.length = len; |
1749 | pdu.cp.handle = instance; |
1750 | pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; |
1751 | pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; |
1752 | |
1753 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, |
1754 | sizeof(pdu.cp) + len, &pdu.cp, |
1755 | HCI_CMD_TIMEOUT); |
1756 | if (err) |
1757 | return err; |
1758 | |
1759 | /* Update data if the command succeed */ |
1760 | if (adv) { |
1761 | adv->adv_data_changed = false; |
1762 | } else { |
1763 | memcpy(hdev->adv_data, pdu.data, len); |
1764 | hdev->adv_data_len = len; |
1765 | } |
1766 | |
1767 | return 0; |
1768 | } |
1769 | |
1770 | static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1771 | { |
1772 | struct hci_cp_le_set_adv_data cp; |
1773 | u8 len; |
1774 | |
1775 | memset(&cp, 0, sizeof(cp)); |
1776 | |
1777 | len = eir_create_adv_data(hdev, instance, ptr: cp.data); |
1778 | |
1779 | /* There's nothing to do if the data hasn't changed */ |
1780 | if (hdev->adv_data_len == len && |
1781 | memcmp(p: cp.data, q: hdev->adv_data, size: len) == 0) |
1782 | return 0; |
1783 | |
1784 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); |
1785 | hdev->adv_data_len = len; |
1786 | |
1787 | cp.length = len; |
1788 | |
1789 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, |
1790 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1791 | } |
1792 | |
1793 | int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) |
1794 | { |
1795 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
1796 | return 0; |
1797 | |
1798 | if (ext_adv_capable(hdev)) |
1799 | return hci_set_ext_adv_data_sync(hdev, instance); |
1800 | |
1801 | return hci_set_adv_data_sync(hdev, instance); |
1802 | } |
1803 | |
1804 | int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, |
1805 | bool force) |
1806 | { |
1807 | struct adv_info *adv = NULL; |
1808 | u16 timeout; |
1809 | |
1810 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) |
1811 | return -EPERM; |
1812 | |
1813 | if (hdev->adv_instance_timeout) |
1814 | return -EBUSY; |
1815 | |
1816 | adv = hci_find_adv_instance(hdev, instance); |
1817 | if (!adv) |
1818 | return -ENOENT; |
1819 | |
1820 | /* A zero timeout means unlimited advertising. As long as there is |
1821 | * only one instance, duration should be ignored. We still set a timeout |
1822 | * in case further instances are being added later on. |
1823 | * |
1824 | * If the remaining lifetime of the instance is more than the duration |
1825 | * then the timeout corresponds to the duration, otherwise it will be |
1826 | * reduced to the remaining instance lifetime. |
1827 | */ |
1828 | if (adv->timeout == 0 || adv->duration <= adv->remaining_time) |
1829 | timeout = adv->duration; |
1830 | else |
1831 | timeout = adv->remaining_time; |
1832 | |
1833 | /* The remaining time is being reduced unless the instance is being |
1834 | * advertised without time limit. |
1835 | */ |
1836 | if (adv->timeout) |
1837 | adv->remaining_time = adv->remaining_time - timeout; |
1838 | |
1839 | /* Only use work for scheduling instances with legacy advertising */ |
1840 | if (!ext_adv_capable(hdev)) { |
1841 | hdev->adv_instance_timeout = timeout; |
1842 | queue_delayed_work(wq: hdev->req_workqueue, |
1843 | dwork: &hdev->adv_instance_expire, |
1844 | delay: msecs_to_jiffies(m: timeout * 1000)); |
1845 | } |
1846 | |
1847 | /* If we're just re-scheduling the same instance again then do not |
1848 | * execute any HCI commands. This happens when a single instance is |
1849 | * being advertised. |
1850 | */ |
1851 | if (!force && hdev->cur_adv_instance == instance && |
1852 | hci_dev_test_flag(hdev, HCI_LE_ADV)) |
1853 | return 0; |
1854 | |
1855 | hdev->cur_adv_instance = instance; |
1856 | |
1857 | return hci_start_adv_sync(hdev, instance); |
1858 | } |
1859 | |
1860 | static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) |
1861 | { |
1862 | int err; |
1863 | |
1864 | if (!ext_adv_capable(hdev)) |
1865 | return 0; |
1866 | |
1867 | /* Disable instance 0x00 to disable all instances */ |
1868 | err = hci_disable_ext_adv_instance_sync(hdev, instance: 0x00); |
1869 | if (err) |
1870 | return err; |
1871 | |
1872 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, |
1873 | 0, NULL, 0, HCI_CMD_TIMEOUT, sk); |
1874 | } |
1875 | |
1876 | static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) |
1877 | { |
1878 | struct adv_info *adv, *n; |
1879 | int err = 0; |
1880 | |
1881 | if (ext_adv_capable(hdev)) |
1882 | /* Remove all existing sets */ |
1883 | err = hci_clear_adv_sets_sync(hdev, sk); |
1884 | if (ext_adv_capable(hdev)) |
1885 | return err; |
1886 | |
1887 | /* This is safe as long as there is no command send while the lock is |
1888 | * held. |
1889 | */ |
1890 | hci_dev_lock(hdev); |
1891 | |
1892 | /* Cleanup non-ext instances */ |
1893 | list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { |
1894 | u8 instance = adv->instance; |
1895 | int err; |
1896 | |
1897 | if (!(force || adv->timeout)) |
1898 | continue; |
1899 | |
1900 | err = hci_remove_adv_instance(hdev, instance); |
1901 | if (!err) |
1902 | mgmt_advertising_removed(sk, hdev, instance); |
1903 | } |
1904 | |
1905 | hci_dev_unlock(hdev); |
1906 | |
1907 | return 0; |
1908 | } |
1909 | |
1910 | static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, |
1911 | struct sock *sk) |
1912 | { |
1913 | int err = 0; |
1914 | |
1915 | /* If we use extended advertising, instance has to be removed first. */ |
1916 | if (ext_adv_capable(hdev)) |
1917 | err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); |
1918 | if (ext_adv_capable(hdev)) |
1919 | return err; |
1920 | |
1921 | /* This is safe as long as there is no command send while the lock is |
1922 | * held. |
1923 | */ |
1924 | hci_dev_lock(hdev); |
1925 | |
1926 | err = hci_remove_adv_instance(hdev, instance); |
1927 | if (!err) |
1928 | mgmt_advertising_removed(sk, hdev, instance); |
1929 | |
1930 | hci_dev_unlock(hdev); |
1931 | |
1932 | return err; |
1933 | } |
1934 | |
1935 | /* For a single instance: |
1936 | * - force == true: The instance will be removed even when its remaining |
1937 | * lifetime is not zero. |
1938 | * - force == false: the instance will be deactivated but kept stored unless |
1939 | * the remaining lifetime is zero. |
1940 | * |
1941 | * For instance == 0x00: |
1942 | * - force == true: All instances will be removed regardless of their timeout |
1943 | * setting. |
1944 | * - force == false: Only instances that have a timeout will be removed. |
1945 | */ |
1946 | int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, |
1947 | u8 instance, bool force) |
1948 | { |
1949 | struct adv_info *next = NULL; |
1950 | int err; |
1951 | |
1952 | /* Cancel any timeout concerning the removed instance(s). */ |
1953 | if (!instance || hdev->cur_adv_instance == instance) |
1954 | cancel_adv_timeout(hdev); |
1955 | |
1956 | /* Get the next instance to advertise BEFORE we remove |
1957 | * the current one. This can be the same instance again |
1958 | * if there is only one instance. |
1959 | */ |
1960 | if (hdev->cur_adv_instance == instance) |
1961 | next = hci_get_next_instance(hdev, instance); |
1962 | |
1963 | if (!instance) { |
1964 | err = hci_clear_adv_sync(hdev, sk, force); |
1965 | if (err) |
1966 | return err; |
1967 | } else { |
1968 | struct adv_info *adv = hci_find_adv_instance(hdev, instance); |
1969 | |
1970 | if (force || (adv && adv->timeout && !adv->remaining_time)) { |
1971 | /* Don't advertise a removed instance. */ |
1972 | if (next && next->instance == instance) |
1973 | next = NULL; |
1974 | |
1975 | err = hci_remove_adv_sync(hdev, instance, sk); |
1976 | if (err) |
1977 | return err; |
1978 | } |
1979 | } |
1980 | |
1981 | if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
1982 | return 0; |
1983 | |
1984 | if (next && !ext_adv_capable(hdev)) |
1985 | hci_schedule_adv_instance_sync(hdev, instance: next->instance, force: false); |
1986 | |
1987 | return 0; |
1988 | } |
1989 | |
1990 | int (struct hci_dev *hdev, __le16 handle) |
1991 | { |
1992 | struct hci_cp_read_rssi cp; |
1993 | |
1994 | cp.handle = handle; |
1995 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, |
1996 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
1997 | } |
1998 | |
1999 | int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) |
2000 | { |
2001 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, |
2002 | sizeof(*cp), cp, HCI_CMD_TIMEOUT); |
2003 | } |
2004 | |
2005 | int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) |
2006 | { |
2007 | struct hci_cp_read_tx_power cp; |
2008 | |
2009 | cp.handle = handle; |
2010 | cp.type = type; |
2011 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, |
2012 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2013 | } |
2014 | |
2015 | int hci_disable_advertising_sync(struct hci_dev *hdev) |
2016 | { |
2017 | u8 enable = 0x00; |
2018 | int err = 0; |
2019 | |
2020 | /* If controller is not advertising we are done. */ |
2021 | if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) |
2022 | return 0; |
2023 | |
2024 | if (ext_adv_capable(hdev)) |
2025 | err = hci_disable_ext_adv_instance_sync(hdev, instance: 0x00); |
2026 | if (ext_adv_capable(hdev)) |
2027 | return err; |
2028 | |
2029 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
2030 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
2031 | } |
2032 | |
2033 | static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, |
2034 | u8 filter_dup) |
2035 | { |
2036 | struct hci_cp_le_set_ext_scan_enable cp; |
2037 | |
2038 | memset(&cp, 0, sizeof(cp)); |
2039 | cp.enable = val; |
2040 | |
2041 | if (hci_dev_test_flag(hdev, HCI_MESH)) |
2042 | cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
2043 | else |
2044 | cp.filter_dup = filter_dup; |
2045 | |
2046 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, |
2047 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2048 | } |
2049 | |
2050 | static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, |
2051 | u8 filter_dup) |
2052 | { |
2053 | struct hci_cp_le_set_scan_enable cp; |
2054 | |
2055 | if (use_ext_scan(hdev)) |
2056 | return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); |
2057 | |
2058 | memset(&cp, 0, sizeof(cp)); |
2059 | cp.enable = val; |
2060 | |
2061 | if (val && hci_dev_test_flag(hdev, HCI_MESH)) |
2062 | cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
2063 | else |
2064 | cp.filter_dup = filter_dup; |
2065 | |
2066 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, |
2067 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2068 | } |
2069 | |
2070 | static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) |
2071 | { |
2072 | if (!use_ll_privacy(hdev)) |
2073 | return 0; |
2074 | |
2075 | /* If controller is not/already resolving we are done. */ |
2076 | if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) |
2077 | return 0; |
2078 | |
2079 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, |
2080 | sizeof(val), &val, HCI_CMD_TIMEOUT); |
2081 | } |
2082 | |
2083 | static int hci_scan_disable_sync(struct hci_dev *hdev) |
2084 | { |
2085 | int err; |
2086 | |
2087 | /* If controller is not scanning we are done. */ |
2088 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) |
2089 | return 0; |
2090 | |
2091 | if (hdev->scanning_paused) { |
2092 | bt_dev_dbg(hdev, "Scanning is paused for suspend" ); |
2093 | return 0; |
2094 | } |
2095 | |
2096 | err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, filter_dup: 0x00); |
2097 | if (err) { |
2098 | bt_dev_err(hdev, "Unable to disable scanning: %d" , err); |
2099 | return err; |
2100 | } |
2101 | |
2102 | return err; |
2103 | } |
2104 | |
2105 | static bool scan_use_rpa(struct hci_dev *hdev) |
2106 | { |
2107 | return hci_dev_test_flag(hdev, HCI_PRIVACY); |
2108 | } |
2109 | |
2110 | static void hci_start_interleave_scan(struct hci_dev *hdev) |
2111 | { |
2112 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; |
2113 | queue_delayed_work(wq: hdev->req_workqueue, |
2114 | dwork: &hdev->interleave_scan, delay: 0); |
2115 | } |
2116 | |
2117 | static bool is_interleave_scanning(struct hci_dev *hdev) |
2118 | { |
2119 | return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; |
2120 | } |
2121 | |
2122 | static void cancel_interleave_scan(struct hci_dev *hdev) |
2123 | { |
2124 | bt_dev_dbg(hdev, "cancelling interleave scan" ); |
2125 | |
2126 | cancel_delayed_work_sync(dwork: &hdev->interleave_scan); |
2127 | |
2128 | hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; |
2129 | } |
2130 | |
2131 | /* Return true if interleave_scan wasn't started until exiting this function, |
2132 | * otherwise, return false |
2133 | */ |
2134 | static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) |
2135 | { |
2136 | /* Do interleaved scan only if all of the following are true: |
2137 | * - There is at least one ADV monitor |
2138 | * - At least one pending LE connection or one device to be scanned for |
2139 | * - Monitor offloading is not supported |
2140 | * If so, we should alternate between allowlist scan and one without |
2141 | * any filters to save power. |
2142 | */ |
2143 | bool use_interleaving = hci_is_adv_monitoring(hdev) && |
2144 | !(list_empty(head: &hdev->pend_le_conns) && |
2145 | list_empty(head: &hdev->pend_le_reports)) && |
2146 | hci_get_adv_monitor_offload_ext(hdev) == |
2147 | HCI_ADV_MONITOR_EXT_NONE; |
2148 | bool is_interleaving = is_interleave_scanning(hdev); |
2149 | |
2150 | if (use_interleaving && !is_interleaving) { |
2151 | hci_start_interleave_scan(hdev); |
2152 | bt_dev_dbg(hdev, "starting interleave scan" ); |
2153 | return true; |
2154 | } |
2155 | |
2156 | if (!use_interleaving && is_interleaving) |
2157 | cancel_interleave_scan(hdev); |
2158 | |
2159 | return false; |
2160 | } |
2161 | |
2162 | /* Removes connection to resolve list if needed.*/ |
2163 | static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, |
2164 | bdaddr_t *bdaddr, u8 bdaddr_type) |
2165 | { |
2166 | struct hci_cp_le_del_from_resolv_list cp; |
2167 | struct bdaddr_list_with_irk *entry; |
2168 | |
2169 | if (!use_ll_privacy(hdev)) |
2170 | return 0; |
2171 | |
2172 | /* Check if the IRK has been programmed */ |
2173 | entry = hci_bdaddr_list_lookup_with_irk(list: &hdev->le_resolv_list, bdaddr, |
2174 | type: bdaddr_type); |
2175 | if (!entry) |
2176 | return 0; |
2177 | |
2178 | cp.bdaddr_type = bdaddr_type; |
2179 | bacpy(dst: &cp.bdaddr, src: bdaddr); |
2180 | |
2181 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, |
2182 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2183 | } |
2184 | |
2185 | static int hci_le_del_accept_list_sync(struct hci_dev *hdev, |
2186 | bdaddr_t *bdaddr, u8 bdaddr_type) |
2187 | { |
2188 | struct hci_cp_le_del_from_accept_list cp; |
2189 | int err; |
2190 | |
2191 | /* Check if device is on accept list before removing it */ |
2192 | if (!hci_bdaddr_list_lookup(list: &hdev->le_accept_list, bdaddr, type: bdaddr_type)) |
2193 | return 0; |
2194 | |
2195 | cp.bdaddr_type = bdaddr_type; |
2196 | bacpy(dst: &cp.bdaddr, src: bdaddr); |
2197 | |
2198 | /* Ignore errors when removing from resolving list as that is likely |
2199 | * that the device was never added. |
2200 | */ |
2201 | hci_le_del_resolve_list_sync(hdev, bdaddr: &cp.bdaddr, bdaddr_type: cp.bdaddr_type); |
2202 | |
2203 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, |
2204 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2205 | if (err) { |
2206 | bt_dev_err(hdev, "Unable to remove from allow list: %d" , err); |
2207 | return err; |
2208 | } |
2209 | |
2210 | bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list" , &cp.bdaddr, |
2211 | cp.bdaddr_type); |
2212 | |
2213 | return 0; |
2214 | } |
2215 | |
2216 | struct conn_params { |
2217 | bdaddr_t addr; |
2218 | u8 addr_type; |
2219 | hci_conn_flags_t flags; |
2220 | u8 privacy_mode; |
2221 | }; |
2222 | |
2223 | /* Adds connection to resolve list if needed. |
2224 | * Setting params to NULL programs local hdev->irk |
2225 | */ |
2226 | static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, |
2227 | struct conn_params *params) |
2228 | { |
2229 | struct hci_cp_le_add_to_resolv_list cp; |
2230 | struct smp_irk *irk; |
2231 | struct bdaddr_list_with_irk *entry; |
2232 | struct hci_conn_params *p; |
2233 | |
2234 | if (!use_ll_privacy(hdev)) |
2235 | return 0; |
2236 | |
2237 | /* Attempt to program local identity address, type and irk if params is |
2238 | * NULL. |
2239 | */ |
2240 | if (!params) { |
2241 | if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) |
2242 | return 0; |
2243 | |
2244 | hci_copy_identity_address(hdev, bdaddr: &cp.bdaddr, bdaddr_type: &cp.bdaddr_type); |
2245 | memcpy(cp.peer_irk, hdev->irk, 16); |
2246 | goto done; |
2247 | } |
2248 | |
2249 | irk = hci_find_irk_by_addr(hdev, bdaddr: ¶ms->addr, addr_type: params->addr_type); |
2250 | if (!irk) |
2251 | return 0; |
2252 | |
2253 | /* Check if the IK has _not_ been programmed yet. */ |
2254 | entry = hci_bdaddr_list_lookup_with_irk(list: &hdev->le_resolv_list, |
2255 | bdaddr: ¶ms->addr, |
2256 | type: params->addr_type); |
2257 | if (entry) |
2258 | return 0; |
2259 | |
2260 | cp.bdaddr_type = params->addr_type; |
2261 | bacpy(dst: &cp.bdaddr, src: ¶ms->addr); |
2262 | memcpy(cp.peer_irk, irk->val, 16); |
2263 | |
2264 | /* Default privacy mode is always Network */ |
2265 | params->privacy_mode = HCI_NETWORK_PRIVACY; |
2266 | |
2267 | rcu_read_lock(); |
2268 | p = hci_pend_le_action_lookup(list: &hdev->pend_le_conns, |
2269 | addr: ¶ms->addr, addr_type: params->addr_type); |
2270 | if (!p) |
2271 | p = hci_pend_le_action_lookup(list: &hdev->pend_le_reports, |
2272 | addr: ¶ms->addr, addr_type: params->addr_type); |
2273 | if (p) |
2274 | WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); |
2275 | rcu_read_unlock(); |
2276 | |
2277 | done: |
2278 | if (hci_dev_test_flag(hdev, HCI_PRIVACY)) |
2279 | memcpy(cp.local_irk, hdev->irk, 16); |
2280 | else |
2281 | memset(cp.local_irk, 0, 16); |
2282 | |
2283 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, |
2284 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2285 | } |
2286 | |
2287 | /* Set Device Privacy Mode. */ |
2288 | static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, |
2289 | struct conn_params *params) |
2290 | { |
2291 | struct hci_cp_le_set_privacy_mode cp; |
2292 | struct smp_irk *irk; |
2293 | |
2294 | /* If device privacy mode has already been set there is nothing to do */ |
2295 | if (params->privacy_mode == HCI_DEVICE_PRIVACY) |
2296 | return 0; |
2297 | |
2298 | /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also |
2299 | * indicates that LL Privacy has been enabled and |
2300 | * HCI_OP_LE_SET_PRIVACY_MODE is supported. |
2301 | */ |
2302 | if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) |
2303 | return 0; |
2304 | |
2305 | irk = hci_find_irk_by_addr(hdev, bdaddr: ¶ms->addr, addr_type: params->addr_type); |
2306 | if (!irk) |
2307 | return 0; |
2308 | |
2309 | memset(&cp, 0, sizeof(cp)); |
2310 | cp.bdaddr_type = irk->addr_type; |
2311 | bacpy(dst: &cp.bdaddr, src: &irk->bdaddr); |
2312 | cp.mode = HCI_DEVICE_PRIVACY; |
2313 | |
2314 | /* Note: params->privacy_mode is not updated since it is a copy */ |
2315 | |
2316 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, |
2317 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2318 | } |
2319 | |
2320 | /* Adds connection to allow list if needed, if the device uses RPA (has IRK) |
2321 | * this attempts to program the device in the resolving list as well and |
2322 | * properly set the privacy mode. |
2323 | */ |
2324 | static int hci_le_add_accept_list_sync(struct hci_dev *hdev, |
2325 | struct conn_params *params, |
2326 | u8 *num_entries) |
2327 | { |
2328 | struct hci_cp_le_add_to_accept_list cp; |
2329 | int err; |
2330 | |
2331 | /* During suspend, only wakeable devices can be in acceptlist */ |
2332 | if (hdev->suspended && |
2333 | !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { |
2334 | hci_le_del_accept_list_sync(hdev, bdaddr: ¶ms->addr, |
2335 | bdaddr_type: params->addr_type); |
2336 | return 0; |
2337 | } |
2338 | |
2339 | /* Select filter policy to accept all advertising */ |
2340 | if (*num_entries >= hdev->le_accept_list_size) |
2341 | return -ENOSPC; |
2342 | |
2343 | /* Accept list can not be used with RPAs */ |
2344 | if (!use_ll_privacy(hdev) && |
2345 | hci_find_irk_by_addr(hdev, bdaddr: ¶ms->addr, addr_type: params->addr_type)) |
2346 | return -EINVAL; |
2347 | |
2348 | /* Attempt to program the device in the resolving list first to avoid |
2349 | * having to rollback in case it fails since the resolving list is |
2350 | * dynamic it can probably be smaller than the accept list. |
2351 | */ |
2352 | err = hci_le_add_resolve_list_sync(hdev, params); |
2353 | if (err) { |
2354 | bt_dev_err(hdev, "Unable to add to resolve list: %d" , err); |
2355 | return err; |
2356 | } |
2357 | |
2358 | /* Set Privacy Mode */ |
2359 | err = hci_le_set_privacy_mode_sync(hdev, params); |
2360 | if (err) { |
2361 | bt_dev_err(hdev, "Unable to set privacy mode: %d" , err); |
2362 | return err; |
2363 | } |
2364 | |
2365 | /* Check if already in accept list */ |
2366 | if (hci_bdaddr_list_lookup(list: &hdev->le_accept_list, bdaddr: ¶ms->addr, |
2367 | type: params->addr_type)) |
2368 | return 0; |
2369 | |
2370 | *num_entries += 1; |
2371 | cp.bdaddr_type = params->addr_type; |
2372 | bacpy(dst: &cp.bdaddr, src: ¶ms->addr); |
2373 | |
2374 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, |
2375 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2376 | if (err) { |
2377 | bt_dev_err(hdev, "Unable to add to allow list: %d" , err); |
2378 | /* Rollback the device from the resolving list */ |
2379 | hci_le_del_resolve_list_sync(hdev, bdaddr: &cp.bdaddr, bdaddr_type: cp.bdaddr_type); |
2380 | return err; |
2381 | } |
2382 | |
2383 | bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list" , &cp.bdaddr, |
2384 | cp.bdaddr_type); |
2385 | |
2386 | return 0; |
2387 | } |
2388 | |
2389 | /* This function disables/pause all advertising instances */ |
2390 | static int hci_pause_advertising_sync(struct hci_dev *hdev) |
2391 | { |
2392 | int err; |
2393 | int old_state; |
2394 | |
2395 | /* If already been paused there is nothing to do. */ |
2396 | if (hdev->advertising_paused) |
2397 | return 0; |
2398 | |
2399 | bt_dev_dbg(hdev, "Pausing directed advertising" ); |
2400 | |
2401 | /* Stop directed advertising */ |
2402 | old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); |
2403 | if (old_state) { |
2404 | /* When discoverable timeout triggers, then just make sure |
2405 | * the limited discoverable flag is cleared. Even in the case |
2406 | * of a timeout triggered from general discoverable, it is |
2407 | * safe to unconditionally clear the flag. |
2408 | */ |
2409 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); |
2410 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); |
2411 | hdev->discov_timeout = 0; |
2412 | } |
2413 | |
2414 | bt_dev_dbg(hdev, "Pausing advertising instances" ); |
2415 | |
2416 | /* Call to disable any advertisements active on the controller. |
2417 | * This will succeed even if no advertisements are configured. |
2418 | */ |
2419 | err = hci_disable_advertising_sync(hdev); |
2420 | if (err) |
2421 | return err; |
2422 | |
2423 | /* If we are using software rotation, pause the loop */ |
2424 | if (!ext_adv_capable(hdev)) |
2425 | cancel_adv_timeout(hdev); |
2426 | |
2427 | hdev->advertising_paused = true; |
2428 | hdev->advertising_old_state = old_state; |
2429 | |
2430 | return 0; |
2431 | } |
2432 | |
2433 | /* This function enables all user advertising instances */ |
2434 | static int hci_resume_advertising_sync(struct hci_dev *hdev) |
2435 | { |
2436 | struct adv_info *adv, *tmp; |
2437 | int err; |
2438 | |
2439 | /* If advertising has not been paused there is nothing to do. */ |
2440 | if (!hdev->advertising_paused) |
2441 | return 0; |
2442 | |
2443 | /* Resume directed advertising */ |
2444 | hdev->advertising_paused = false; |
2445 | if (hdev->advertising_old_state) { |
2446 | hci_dev_set_flag(hdev, HCI_ADVERTISING); |
2447 | hdev->advertising_old_state = 0; |
2448 | } |
2449 | |
2450 | bt_dev_dbg(hdev, "Resuming advertising instances" ); |
2451 | |
2452 | if (ext_adv_capable(hdev)) { |
2453 | /* Call for each tracked instance to be re-enabled */ |
2454 | list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { |
2455 | err = hci_enable_ext_advertising_sync(hdev, |
2456 | instance: adv->instance); |
2457 | if (!err) |
2458 | continue; |
2459 | |
2460 | /* If the instance cannot be resumed remove it */ |
2461 | hci_remove_ext_adv_instance_sync(hdev, instance: adv->instance, |
2462 | NULL); |
2463 | } |
2464 | } else { |
2465 | /* Schedule for most recent instance to be restarted and begin |
2466 | * the software rotation loop |
2467 | */ |
2468 | err = hci_schedule_adv_instance_sync(hdev, |
2469 | instance: hdev->cur_adv_instance, |
2470 | force: true); |
2471 | } |
2472 | |
2473 | hdev->advertising_paused = false; |
2474 | |
2475 | return err; |
2476 | } |
2477 | |
2478 | static int hci_pause_addr_resolution(struct hci_dev *hdev) |
2479 | { |
2480 | int err; |
2481 | |
2482 | if (!use_ll_privacy(hdev)) |
2483 | return 0; |
2484 | |
2485 | if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) |
2486 | return 0; |
2487 | |
2488 | /* Cannot disable addr resolution if scanning is enabled or |
2489 | * when initiating an LE connection. |
2490 | */ |
2491 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || |
2492 | hci_lookup_le_connect(hdev)) { |
2493 | bt_dev_err(hdev, "Command not allowed when scan/LE connect" ); |
2494 | return -EPERM; |
2495 | } |
2496 | |
2497 | /* Cannot disable addr resolution if advertising is enabled. */ |
2498 | err = hci_pause_advertising_sync(hdev); |
2499 | if (err) { |
2500 | bt_dev_err(hdev, "Pause advertising failed: %d" , err); |
2501 | return err; |
2502 | } |
2503 | |
2504 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x00); |
2505 | if (err) |
2506 | bt_dev_err(hdev, "Unable to disable Address Resolution: %d" , |
2507 | err); |
2508 | |
2509 | /* Return if address resolution is disabled and RPA is not used. */ |
2510 | if (!err && scan_use_rpa(hdev)) |
2511 | return 0; |
2512 | |
2513 | hci_resume_advertising_sync(hdev); |
2514 | return err; |
2515 | } |
2516 | |
2517 | struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, |
2518 | bool extended, struct sock *sk) |
2519 | { |
2520 | u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : |
2521 | HCI_OP_READ_LOCAL_OOB_DATA; |
2522 | |
2523 | return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); |
2524 | } |
2525 | |
2526 | static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) |
2527 | { |
2528 | struct hci_conn_params *params; |
2529 | struct conn_params *p; |
2530 | size_t i; |
2531 | |
2532 | rcu_read_lock(); |
2533 | |
2534 | i = 0; |
2535 | list_for_each_entry_rcu(params, list, action) |
2536 | ++i; |
2537 | *n = i; |
2538 | |
2539 | rcu_read_unlock(); |
2540 | |
2541 | p = kvcalloc(n: *n, size: sizeof(struct conn_params), GFP_KERNEL); |
2542 | if (!p) |
2543 | return NULL; |
2544 | |
2545 | rcu_read_lock(); |
2546 | |
2547 | i = 0; |
2548 | list_for_each_entry_rcu(params, list, action) { |
2549 | /* Racing adds are handled in next scan update */ |
2550 | if (i >= *n) |
2551 | break; |
2552 | |
2553 | /* No hdev->lock, but: addr, addr_type are immutable. |
2554 | * privacy_mode is only written by us or in |
2555 | * hci_cc_le_set_privacy_mode that we wait for. |
2556 | * We should be idempotent so MGMT updating flags |
2557 | * while we are processing is OK. |
2558 | */ |
2559 | bacpy(dst: &p[i].addr, src: ¶ms->addr); |
2560 | p[i].addr_type = params->addr_type; |
2561 | p[i].flags = READ_ONCE(params->flags); |
2562 | p[i].privacy_mode = READ_ONCE(params->privacy_mode); |
2563 | ++i; |
2564 | } |
2565 | |
2566 | rcu_read_unlock(); |
2567 | |
2568 | *n = i; |
2569 | return p; |
2570 | } |
2571 | |
2572 | /* Clear LE Accept List */ |
2573 | static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) |
2574 | { |
2575 | if (!(hdev->commands[26] & 0x80)) |
2576 | return 0; |
2577 | |
2578 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, |
2579 | HCI_CMD_TIMEOUT); |
2580 | } |
2581 | |
2582 | /* Device must not be scanning when updating the accept list. |
2583 | * |
2584 | * Update is done using the following sequence: |
2585 | * |
2586 | * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> |
2587 | * Remove Devices From Accept List -> |
2588 | * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> |
2589 | * Add Devices to Accept List -> |
2590 | * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> |
2591 | * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> |
2592 | * Enable Scanning |
2593 | * |
2594 | * In case of failure advertising shall be restored to its original state and |
2595 | * return would disable accept list since either accept or resolving list could |
2596 | * not be programmed. |
2597 | * |
2598 | */ |
2599 | static u8 hci_update_accept_list_sync(struct hci_dev *hdev) |
2600 | { |
2601 | struct conn_params *params; |
2602 | struct bdaddr_list *b, *t; |
2603 | u8 num_entries = 0; |
2604 | bool pend_conn, pend_report; |
2605 | u8 filter_policy; |
2606 | size_t i, n; |
2607 | int err; |
2608 | |
2609 | /* Pause advertising if resolving list can be used as controllers |
2610 | * cannot accept resolving list modifications while advertising. |
2611 | */ |
2612 | if (use_ll_privacy(hdev)) { |
2613 | err = hci_pause_advertising_sync(hdev); |
2614 | if (err) { |
2615 | bt_dev_err(hdev, "pause advertising failed: %d" , err); |
2616 | return 0x00; |
2617 | } |
2618 | } |
2619 | |
2620 | /* Disable address resolution while reprogramming accept list since |
2621 | * devices that do have an IRK will be programmed in the resolving list |
2622 | * when LL Privacy is enabled. |
2623 | */ |
2624 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x00); |
2625 | if (err) { |
2626 | bt_dev_err(hdev, "Unable to disable LL privacy: %d" , err); |
2627 | goto done; |
2628 | } |
2629 | |
2630 | /* Force address filtering if PA Sync is in progress */ |
2631 | if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
2632 | struct hci_cp_le_pa_create_sync *sent; |
2633 | |
2634 | sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); |
2635 | if (sent) { |
2636 | struct conn_params pa; |
2637 | |
2638 | memset(&pa, 0, sizeof(pa)); |
2639 | |
2640 | bacpy(dst: &pa.addr, src: &sent->addr); |
2641 | pa.addr_type = sent->addr_type; |
2642 | |
2643 | /* Clear first since there could be addresses left |
2644 | * behind. |
2645 | */ |
2646 | hci_le_clear_accept_list_sync(hdev); |
2647 | |
2648 | num_entries = 1; |
2649 | err = hci_le_add_accept_list_sync(hdev, params: &pa, |
2650 | num_entries: &num_entries); |
2651 | goto done; |
2652 | } |
2653 | } |
2654 | |
2655 | /* Go through the current accept list programmed into the |
2656 | * controller one by one and check if that address is connected or is |
2657 | * still in the list of pending connections or list of devices to |
2658 | * report. If not present in either list, then remove it from |
2659 | * the controller. |
2660 | */ |
2661 | list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { |
2662 | if (hci_conn_hash_lookup_le(hdev, ba: &b->bdaddr, ba_type: b->bdaddr_type)) |
2663 | continue; |
2664 | |
2665 | /* Pointers not dereferenced, no locks needed */ |
2666 | pend_conn = hci_pend_le_action_lookup(list: &hdev->pend_le_conns, |
2667 | addr: &b->bdaddr, |
2668 | addr_type: b->bdaddr_type); |
2669 | pend_report = hci_pend_le_action_lookup(list: &hdev->pend_le_reports, |
2670 | addr: &b->bdaddr, |
2671 | addr_type: b->bdaddr_type); |
2672 | |
2673 | /* If the device is not likely to connect or report, |
2674 | * remove it from the acceptlist. |
2675 | */ |
2676 | if (!pend_conn && !pend_report) { |
2677 | hci_le_del_accept_list_sync(hdev, bdaddr: &b->bdaddr, |
2678 | bdaddr_type: b->bdaddr_type); |
2679 | continue; |
2680 | } |
2681 | |
2682 | num_entries++; |
2683 | } |
2684 | |
2685 | /* Since all no longer valid accept list entries have been |
2686 | * removed, walk through the list of pending connections |
2687 | * and ensure that any new device gets programmed into |
2688 | * the controller. |
2689 | * |
2690 | * If the list of the devices is larger than the list of |
2691 | * available accept list entries in the controller, then |
2692 | * just abort and return filer policy value to not use the |
2693 | * accept list. |
2694 | * |
2695 | * The list and params may be mutated while we wait for events, |
2696 | * so make a copy and iterate it. |
2697 | */ |
2698 | |
2699 | params = conn_params_copy(list: &hdev->pend_le_conns, n: &n); |
2700 | if (!params) { |
2701 | err = -ENOMEM; |
2702 | goto done; |
2703 | } |
2704 | |
2705 | for (i = 0; i < n; ++i) { |
2706 | err = hci_le_add_accept_list_sync(hdev, params: ¶ms[i], |
2707 | num_entries: &num_entries); |
2708 | if (err) { |
2709 | kvfree(addr: params); |
2710 | goto done; |
2711 | } |
2712 | } |
2713 | |
2714 | kvfree(addr: params); |
2715 | |
2716 | /* After adding all new pending connections, walk through |
2717 | * the list of pending reports and also add these to the |
2718 | * accept list if there is still space. Abort if space runs out. |
2719 | */ |
2720 | |
2721 | params = conn_params_copy(list: &hdev->pend_le_reports, n: &n); |
2722 | if (!params) { |
2723 | err = -ENOMEM; |
2724 | goto done; |
2725 | } |
2726 | |
2727 | for (i = 0; i < n; ++i) { |
2728 | err = hci_le_add_accept_list_sync(hdev, params: ¶ms[i], |
2729 | num_entries: &num_entries); |
2730 | if (err) { |
2731 | kvfree(addr: params); |
2732 | goto done; |
2733 | } |
2734 | } |
2735 | |
2736 | kvfree(addr: params); |
2737 | |
2738 | /* Use the allowlist unless the following conditions are all true: |
2739 | * - We are not currently suspending |
2740 | * - There are 1 or more ADV monitors registered and it's not offloaded |
2741 | * - Interleaved scanning is not currently using the allowlist |
2742 | */ |
2743 | if (!idr_is_empty(idr: &hdev->adv_monitors_idr) && !hdev->suspended && |
2744 | hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && |
2745 | hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) |
2746 | err = -EINVAL; |
2747 | |
2748 | done: |
2749 | filter_policy = err ? 0x00 : 0x01; |
2750 | |
2751 | /* Enable address resolution when LL Privacy is enabled. */ |
2752 | err = hci_le_set_addr_resolution_enable_sync(hdev, val: 0x01); |
2753 | if (err) |
2754 | bt_dev_err(hdev, "Unable to enable LL privacy: %d" , err); |
2755 | |
2756 | /* Resume advertising if it was paused */ |
2757 | if (use_ll_privacy(hdev)) |
2758 | hci_resume_advertising_sync(hdev); |
2759 | |
2760 | /* Select filter policy to use accept list */ |
2761 | return filter_policy; |
2762 | } |
2763 | |
2764 | static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, |
2765 | u8 type, u16 interval, u16 window) |
2766 | { |
2767 | cp->type = type; |
2768 | cp->interval = cpu_to_le16(interval); |
2769 | cp->window = cpu_to_le16(window); |
2770 | } |
2771 | |
2772 | static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, |
2773 | u16 interval, u16 window, |
2774 | u8 own_addr_type, u8 filter_policy) |
2775 | { |
2776 | struct hci_cp_le_set_ext_scan_params *cp; |
2777 | struct hci_cp_le_scan_phy_params *phy; |
2778 | u8 data[sizeof(*cp) + sizeof(*phy) * 2]; |
2779 | u8 num_phy = 0x00; |
2780 | |
2781 | cp = (void *)data; |
2782 | phy = (void *)cp->data; |
2783 | |
2784 | memset(data, 0, sizeof(data)); |
2785 | |
2786 | cp->own_addr_type = own_addr_type; |
2787 | cp->filter_policy = filter_policy; |
2788 | |
2789 | /* Check if PA Sync is in progress then select the PHY based on the |
2790 | * hci_conn.iso_qos. |
2791 | */ |
2792 | if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
2793 | struct hci_cp_le_add_to_accept_list *sent; |
2794 | |
2795 | sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); |
2796 | if (sent) { |
2797 | struct hci_conn *conn; |
2798 | |
2799 | conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK, |
2800 | ba: &sent->bdaddr); |
2801 | if (conn) { |
2802 | struct bt_iso_qos *qos = &conn->iso_qos; |
2803 | |
2804 | if (qos->bcast.in.phy & BT_ISO_PHY_1M || |
2805 | qos->bcast.in.phy & BT_ISO_PHY_2M) { |
2806 | cp->scanning_phys |= LE_SCAN_PHY_1M; |
2807 | hci_le_scan_phy_params(cp: phy, type, |
2808 | interval, |
2809 | window); |
2810 | num_phy++; |
2811 | phy++; |
2812 | } |
2813 | |
2814 | if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { |
2815 | cp->scanning_phys |= LE_SCAN_PHY_CODED; |
2816 | hci_le_scan_phy_params(cp: phy, type, |
2817 | interval: interval * 3, |
2818 | window: window * 3); |
2819 | num_phy++; |
2820 | phy++; |
2821 | } |
2822 | |
2823 | if (num_phy) |
2824 | goto done; |
2825 | } |
2826 | } |
2827 | } |
2828 | |
2829 | if (scan_1m(hdev) || scan_2m(hdev)) { |
2830 | cp->scanning_phys |= LE_SCAN_PHY_1M; |
2831 | hci_le_scan_phy_params(cp: phy, type, interval, window); |
2832 | num_phy++; |
2833 | phy++; |
2834 | } |
2835 | |
2836 | if (scan_coded(hdev)) { |
2837 | cp->scanning_phys |= LE_SCAN_PHY_CODED; |
2838 | hci_le_scan_phy_params(cp: phy, type, interval: interval * 3, window: window * 3); |
2839 | num_phy++; |
2840 | phy++; |
2841 | } |
2842 | |
2843 | done: |
2844 | if (!num_phy) |
2845 | return -EINVAL; |
2846 | |
2847 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, |
2848 | sizeof(*cp) + sizeof(*phy) * num_phy, |
2849 | data, HCI_CMD_TIMEOUT); |
2850 | } |
2851 | |
2852 | static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, |
2853 | u16 interval, u16 window, |
2854 | u8 own_addr_type, u8 filter_policy) |
2855 | { |
2856 | struct hci_cp_le_set_scan_param cp; |
2857 | |
2858 | if (use_ext_scan(hdev)) |
2859 | return hci_le_set_ext_scan_param_sync(hdev, type, interval, |
2860 | window, own_addr_type, |
2861 | filter_policy); |
2862 | |
2863 | memset(&cp, 0, sizeof(cp)); |
2864 | cp.type = type; |
2865 | cp.interval = cpu_to_le16(interval); |
2866 | cp.window = cpu_to_le16(window); |
2867 | cp.own_address_type = own_addr_type; |
2868 | cp.filter_policy = filter_policy; |
2869 | |
2870 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, |
2871 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
2872 | } |
2873 | |
2874 | static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, |
2875 | u16 window, u8 own_addr_type, u8 filter_policy, |
2876 | u8 filter_dup) |
2877 | { |
2878 | int err; |
2879 | |
2880 | if (hdev->scanning_paused) { |
2881 | bt_dev_dbg(hdev, "Scanning is paused for suspend" ); |
2882 | return 0; |
2883 | } |
2884 | |
2885 | err = hci_le_set_scan_param_sync(hdev, type, interval, window, |
2886 | own_addr_type, filter_policy); |
2887 | if (err) |
2888 | return err; |
2889 | |
2890 | return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); |
2891 | } |
2892 | |
2893 | static int hci_passive_scan_sync(struct hci_dev *hdev) |
2894 | { |
2895 | u8 own_addr_type; |
2896 | u8 filter_policy; |
2897 | u16 window, interval; |
2898 | u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; |
2899 | int err; |
2900 | |
2901 | if (hdev->scanning_paused) { |
2902 | bt_dev_dbg(hdev, "Scanning is paused for suspend" ); |
2903 | return 0; |
2904 | } |
2905 | |
2906 | err = hci_scan_disable_sync(hdev); |
2907 | if (err) { |
2908 | bt_dev_err(hdev, "disable scanning failed: %d" , err); |
2909 | return err; |
2910 | } |
2911 | |
2912 | /* Set require_privacy to false since no SCAN_REQ are send |
2913 | * during passive scanning. Not using an non-resolvable address |
2914 | * here is important so that peer devices using direct |
2915 | * advertising with our address will be correctly reported |
2916 | * by the controller. |
2917 | */ |
2918 | if (hci_update_random_address_sync(hdev, require_privacy: false, rpa: scan_use_rpa(hdev), |
2919 | own_addr_type: &own_addr_type)) |
2920 | return 0; |
2921 | |
2922 | if (hdev->enable_advmon_interleave_scan && |
2923 | hci_update_interleaved_scan_sync(hdev)) |
2924 | return 0; |
2925 | |
2926 | bt_dev_dbg(hdev, "interleave state %d" , hdev->interleave_scan_state); |
2927 | |
2928 | /* Adding or removing entries from the accept list must |
2929 | * happen before enabling scanning. The controller does |
2930 | * not allow accept list modification while scanning. |
2931 | */ |
2932 | filter_policy = hci_update_accept_list_sync(hdev); |
2933 | |
2934 | /* When the controller is using random resolvable addresses and |
2935 | * with that having LE privacy enabled, then controllers with |
2936 | * Extended Scanner Filter Policies support can now enable support |
2937 | * for handling directed advertising. |
2938 | * |
2939 | * So instead of using filter polices 0x00 (no acceptlist) |
2940 | * and 0x01 (acceptlist enabled) use the new filter policies |
2941 | * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). |
2942 | */ |
2943 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && |
2944 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) |
2945 | filter_policy |= 0x02; |
2946 | |
2947 | if (hdev->suspended) { |
2948 | window = hdev->le_scan_window_suspend; |
2949 | interval = hdev->le_scan_int_suspend; |
2950 | } else if (hci_is_le_conn_scanning(hdev)) { |
2951 | window = hdev->le_scan_window_connect; |
2952 | interval = hdev->le_scan_int_connect; |
2953 | } else if (hci_is_adv_monitoring(hdev)) { |
2954 | window = hdev->le_scan_window_adv_monitor; |
2955 | interval = hdev->le_scan_int_adv_monitor; |
2956 | } else { |
2957 | window = hdev->le_scan_window; |
2958 | interval = hdev->le_scan_interval; |
2959 | } |
2960 | |
2961 | /* Disable all filtering for Mesh */ |
2962 | if (hci_dev_test_flag(hdev, HCI_MESH)) { |
2963 | filter_policy = 0; |
2964 | filter_dups = LE_SCAN_FILTER_DUP_DISABLE; |
2965 | } |
2966 | |
2967 | bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d" , filter_policy); |
2968 | |
2969 | return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, |
2970 | own_addr_type, filter_policy, filter_dup: filter_dups); |
2971 | } |
2972 | |
2973 | /* This function controls the passive scanning based on hdev->pend_le_conns |
2974 | * list. If there are pending LE connection we start the background scanning, |
2975 | * otherwise we stop it in the following sequence: |
2976 | * |
2977 | * If there are devices to scan: |
2978 | * |
2979 | * Disable Scanning -> Update Accept List -> |
2980 | * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> |
2981 | * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> |
2982 | * Enable Scanning |
2983 | * |
2984 | * Otherwise: |
2985 | * |
2986 | * Disable Scanning |
2987 | */ |
2988 | int hci_update_passive_scan_sync(struct hci_dev *hdev) |
2989 | { |
2990 | int err; |
2991 | |
2992 | if (!test_bit(HCI_UP, &hdev->flags) || |
2993 | test_bit(HCI_INIT, &hdev->flags) || |
2994 | hci_dev_test_flag(hdev, HCI_SETUP) || |
2995 | hci_dev_test_flag(hdev, HCI_CONFIG) || |
2996 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || |
2997 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) |
2998 | return 0; |
2999 | |
3000 | /* No point in doing scanning if LE support hasn't been enabled */ |
3001 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
3002 | return 0; |
3003 | |
3004 | /* If discovery is active don't interfere with it */ |
3005 | if (hdev->discovery.state != DISCOVERY_STOPPED) |
3006 | return 0; |
3007 | |
3008 | /* Reset RSSI and UUID filters when starting background scanning |
3009 | * since these filters are meant for service discovery only. |
3010 | * |
3011 | * The Start Discovery and Start Service Discovery operations |
3012 | * ensure to set proper values for RSSI threshold and UUID |
3013 | * filter list. So it is safe to just reset them here. |
3014 | */ |
3015 | hci_discovery_filter_clear(hdev); |
3016 | |
3017 | bt_dev_dbg(hdev, "ADV monitoring is %s" , |
3018 | hci_is_adv_monitoring(hdev) ? "on" : "off" ); |
3019 | |
3020 | if (!hci_dev_test_flag(hdev, HCI_MESH) && |
3021 | list_empty(head: &hdev->pend_le_conns) && |
3022 | list_empty(head: &hdev->pend_le_reports) && |
3023 | !hci_is_adv_monitoring(hdev) && |
3024 | !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { |
3025 | /* If there is no pending LE connections or devices |
3026 | * to be scanned for or no ADV monitors, we should stop the |
3027 | * background scanning. |
3028 | */ |
3029 | |
3030 | bt_dev_dbg(hdev, "stopping background scanning" ); |
3031 | |
3032 | err = hci_scan_disable_sync(hdev); |
3033 | if (err) |
3034 | bt_dev_err(hdev, "stop background scanning failed: %d" , |
3035 | err); |
3036 | } else { |
3037 | /* If there is at least one pending LE connection, we should |
3038 | * keep the background scan running. |
3039 | */ |
3040 | |
3041 | /* If controller is connecting, we should not start scanning |
3042 | * since some controllers are not able to scan and connect at |
3043 | * the same time. |
3044 | */ |
3045 | if (hci_lookup_le_connect(hdev)) |
3046 | return 0; |
3047 | |
3048 | bt_dev_dbg(hdev, "start background scanning" ); |
3049 | |
3050 | err = hci_passive_scan_sync(hdev); |
3051 | if (err) |
3052 | bt_dev_err(hdev, "start background scanning failed: %d" , |
3053 | err); |
3054 | } |
3055 | |
3056 | return err; |
3057 | } |
3058 | |
3059 | static int update_scan_sync(struct hci_dev *hdev, void *data) |
3060 | { |
3061 | return hci_update_scan_sync(hdev); |
3062 | } |
3063 | |
3064 | int hci_update_scan(struct hci_dev *hdev) |
3065 | { |
3066 | return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); |
3067 | } |
3068 | |
3069 | static int update_passive_scan_sync(struct hci_dev *hdev, void *data) |
3070 | { |
3071 | return hci_update_passive_scan_sync(hdev); |
3072 | } |
3073 | |
3074 | int hci_update_passive_scan(struct hci_dev *hdev) |
3075 | { |
3076 | /* Only queue if it would have any effect */ |
3077 | if (!test_bit(HCI_UP, &hdev->flags) || |
3078 | test_bit(HCI_INIT, &hdev->flags) || |
3079 | hci_dev_test_flag(hdev, HCI_SETUP) || |
3080 | hci_dev_test_flag(hdev, HCI_CONFIG) || |
3081 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || |
3082 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) |
3083 | return 0; |
3084 | |
3085 | return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, |
3086 | NULL); |
3087 | } |
3088 | |
3089 | int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) |
3090 | { |
3091 | int err; |
3092 | |
3093 | if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) |
3094 | return 0; |
3095 | |
3096 | err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, |
3097 | sizeof(val), &val, HCI_CMD_TIMEOUT); |
3098 | |
3099 | if (!err) { |
3100 | if (val) { |
3101 | hdev->features[1][0] |= LMP_HOST_SC; |
3102 | hci_dev_set_flag(hdev, HCI_SC_ENABLED); |
3103 | } else { |
3104 | hdev->features[1][0] &= ~LMP_HOST_SC; |
3105 | hci_dev_clear_flag(hdev, HCI_SC_ENABLED); |
3106 | } |
3107 | } |
3108 | |
3109 | return err; |
3110 | } |
3111 | |
3112 | int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) |
3113 | { |
3114 | int err; |
3115 | |
3116 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || |
3117 | lmp_host_ssp_capable(hdev)) |
3118 | return 0; |
3119 | |
3120 | if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { |
3121 | __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, |
3122 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3123 | } |
3124 | |
3125 | err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, |
3126 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3127 | if (err) |
3128 | return err; |
3129 | |
3130 | return hci_write_sc_support_sync(hdev, val: 0x01); |
3131 | } |
3132 | |
3133 | int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) |
3134 | { |
3135 | struct hci_cp_write_le_host_supported cp; |
3136 | |
3137 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || |
3138 | !lmp_bredr_capable(hdev)) |
3139 | return 0; |
3140 | |
3141 | /* Check first if we already have the right host state |
3142 | * (host features set) |
3143 | */ |
3144 | if (le == lmp_host_le_capable(hdev) && |
3145 | simul == lmp_host_le_br_capable(hdev)) |
3146 | return 0; |
3147 | |
3148 | memset(&cp, 0, sizeof(cp)); |
3149 | |
3150 | cp.le = le; |
3151 | cp.simul = simul; |
3152 | |
3153 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, |
3154 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3155 | } |
3156 | |
3157 | static int hci_powered_update_adv_sync(struct hci_dev *hdev) |
3158 | { |
3159 | struct adv_info *adv, *tmp; |
3160 | int err; |
3161 | |
3162 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) |
3163 | return 0; |
3164 | |
3165 | /* If RPA Resolution has not been enable yet it means the |
3166 | * resolving list is empty and we should attempt to program the |
3167 | * local IRK in order to support using own_addr_type |
3168 | * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). |
3169 | */ |
3170 | if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { |
3171 | hci_le_add_resolve_list_sync(hdev, NULL); |
3172 | hci_le_set_addr_resolution_enable_sync(hdev, val: 0x01); |
3173 | } |
3174 | |
3175 | /* Make sure the controller has a good default for |
3176 | * advertising data. This also applies to the case |
3177 | * where BR/EDR was toggled during the AUTO_OFF phase. |
3178 | */ |
3179 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || |
3180 | list_empty(head: &hdev->adv_instances)) { |
3181 | if (ext_adv_capable(hdev)) { |
3182 | err = hci_setup_ext_adv_instance_sync(hdev, instance: 0x00); |
3183 | if (!err) |
3184 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
3185 | } else { |
3186 | err = hci_update_adv_data_sync(hdev, instance: 0x00); |
3187 | if (!err) |
3188 | hci_update_scan_rsp_data_sync(hdev, instance: 0x00); |
3189 | } |
3190 | |
3191 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
3192 | hci_enable_advertising_sync(hdev); |
3193 | } |
3194 | |
3195 | /* Call for each tracked instance to be scheduled */ |
3196 | list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) |
3197 | hci_schedule_adv_instance_sync(hdev, instance: adv->instance, force: true); |
3198 | |
3199 | return 0; |
3200 | } |
3201 | |
3202 | static int hci_write_auth_enable_sync(struct hci_dev *hdev) |
3203 | { |
3204 | u8 link_sec; |
3205 | |
3206 | link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); |
3207 | if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) |
3208 | return 0; |
3209 | |
3210 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, |
3211 | sizeof(link_sec), &link_sec, |
3212 | HCI_CMD_TIMEOUT); |
3213 | } |
3214 | |
3215 | int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) |
3216 | { |
3217 | struct hci_cp_write_page_scan_activity cp; |
3218 | u8 type; |
3219 | int err = 0; |
3220 | |
3221 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3222 | return 0; |
3223 | |
3224 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
3225 | return 0; |
3226 | |
3227 | memset(&cp, 0, sizeof(cp)); |
3228 | |
3229 | if (enable) { |
3230 | type = PAGE_SCAN_TYPE_INTERLACED; |
3231 | |
3232 | /* 160 msec page scan interval */ |
3233 | cp.interval = cpu_to_le16(0x0100); |
3234 | } else { |
3235 | type = hdev->def_page_scan_type; |
3236 | cp.interval = cpu_to_le16(hdev->def_page_scan_int); |
3237 | } |
3238 | |
3239 | cp.window = cpu_to_le16(hdev->def_page_scan_window); |
3240 | |
3241 | if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || |
3242 | __cpu_to_le16(hdev->page_scan_window) != cp.window) { |
3243 | err = __hci_cmd_sync_status(hdev, |
3244 | HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, |
3245 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3246 | if (err) |
3247 | return err; |
3248 | } |
3249 | |
3250 | if (hdev->page_scan_type != type) |
3251 | err = __hci_cmd_sync_status(hdev, |
3252 | HCI_OP_WRITE_PAGE_SCAN_TYPE, |
3253 | sizeof(type), &type, |
3254 | HCI_CMD_TIMEOUT); |
3255 | |
3256 | return err; |
3257 | } |
3258 | |
3259 | static bool disconnected_accept_list_entries(struct hci_dev *hdev) |
3260 | { |
3261 | struct bdaddr_list *b; |
3262 | |
3263 | list_for_each_entry(b, &hdev->accept_list, list) { |
3264 | struct hci_conn *conn; |
3265 | |
3266 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &b->bdaddr); |
3267 | if (!conn) |
3268 | return true; |
3269 | |
3270 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) |
3271 | return true; |
3272 | } |
3273 | |
3274 | return false; |
3275 | } |
3276 | |
3277 | static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) |
3278 | { |
3279 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, |
3280 | sizeof(val), &val, |
3281 | HCI_CMD_TIMEOUT); |
3282 | } |
3283 | |
3284 | int hci_update_scan_sync(struct hci_dev *hdev) |
3285 | { |
3286 | u8 scan; |
3287 | |
3288 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3289 | return 0; |
3290 | |
3291 | if (!hdev_is_powered(hdev)) |
3292 | return 0; |
3293 | |
3294 | if (mgmt_powering_down(hdev)) |
3295 | return 0; |
3296 | |
3297 | if (hdev->scanning_paused) |
3298 | return 0; |
3299 | |
3300 | if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || |
3301 | disconnected_accept_list_entries(hdev)) |
3302 | scan = SCAN_PAGE; |
3303 | else |
3304 | scan = SCAN_DISABLED; |
3305 | |
3306 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
3307 | scan |= SCAN_INQUIRY; |
3308 | |
3309 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && |
3310 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) |
3311 | return 0; |
3312 | |
3313 | return hci_write_scan_enable_sync(hdev, val: scan); |
3314 | } |
3315 | |
3316 | int hci_update_name_sync(struct hci_dev *hdev) |
3317 | { |
3318 | struct hci_cp_write_local_name cp; |
3319 | |
3320 | memset(&cp, 0, sizeof(cp)); |
3321 | |
3322 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); |
3323 | |
3324 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, |
3325 | sizeof(cp), &cp, |
3326 | HCI_CMD_TIMEOUT); |
3327 | } |
3328 | |
3329 | /* This function perform powered update HCI command sequence after the HCI init |
3330 | * sequence which end up resetting all states, the sequence is as follows: |
3331 | * |
3332 | * HCI_SSP_ENABLED(Enable SSP) |
3333 | * HCI_LE_ENABLED(Enable LE) |
3334 | * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> |
3335 | * Update adv data) |
3336 | * Enable Authentication |
3337 | * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> |
3338 | * Set Name -> Set EIR) |
3339 | * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address) |
3340 | */ |
3341 | int hci_powered_update_sync(struct hci_dev *hdev) |
3342 | { |
3343 | int err; |
3344 | |
3345 | /* Register the available SMP channels (BR/EDR and LE) only when |
3346 | * successfully powering on the controller. This late |
3347 | * registration is required so that LE SMP can clearly decide if |
3348 | * the public address or static address is used. |
3349 | */ |
3350 | smp_register(hdev); |
3351 | |
3352 | err = hci_write_ssp_mode_sync(hdev, mode: 0x01); |
3353 | if (err) |
3354 | return err; |
3355 | |
3356 | err = hci_write_le_host_supported_sync(hdev, le: 0x01, simul: 0x00); |
3357 | if (err) |
3358 | return err; |
3359 | |
3360 | err = hci_powered_update_adv_sync(hdev); |
3361 | if (err) |
3362 | return err; |
3363 | |
3364 | err = hci_write_auth_enable_sync(hdev); |
3365 | if (err) |
3366 | return err; |
3367 | |
3368 | if (lmp_bredr_capable(hdev)) { |
3369 | if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) |
3370 | hci_write_fast_connectable_sync(hdev, enable: true); |
3371 | else |
3372 | hci_write_fast_connectable_sync(hdev, enable: false); |
3373 | hci_update_scan_sync(hdev); |
3374 | hci_update_class_sync(hdev); |
3375 | hci_update_name_sync(hdev); |
3376 | hci_update_eir_sync(hdev); |
3377 | } |
3378 | |
3379 | /* If forcing static address is in use or there is no public |
3380 | * address use the static address as random address (but skip |
3381 | * the HCI command if the current random address is already the |
3382 | * static one. |
3383 | * |
3384 | * In case BR/EDR has been disabled on a dual-mode controller |
3385 | * and a static address has been configured, then use that |
3386 | * address instead of the public BR/EDR address. |
3387 | */ |
3388 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || |
3389 | (!bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
3390 | !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) { |
3391 | if (bacmp(ba1: &hdev->static_addr, BDADDR_ANY)) |
3392 | return hci_set_random_addr_sync(hdev, |
3393 | rpa: &hdev->static_addr); |
3394 | } |
3395 | |
3396 | return 0; |
3397 | } |
3398 | |
3399 | /** |
3400 | * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address |
3401 | * (BD_ADDR) for a HCI device from |
3402 | * a firmware node property. |
3403 | * @hdev: The HCI device |
3404 | * |
3405 | * Search the firmware node for 'local-bd-address'. |
3406 | * |
3407 | * All-zero BD addresses are rejected, because those could be properties |
3408 | * that exist in the firmware tables, but were not updated by the firmware. For |
3409 | * example, the DTS could define 'local-bd-address', with zero BD addresses. |
3410 | */ |
3411 | static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) |
3412 | { |
3413 | struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); |
3414 | bdaddr_t ba; |
3415 | int ret; |
3416 | |
3417 | ret = fwnode_property_read_u8_array(fwnode, propname: "local-bd-address" , |
3418 | val: (u8 *)&ba, nval: sizeof(ba)); |
3419 | if (ret < 0 || !bacmp(ba1: &ba, BDADDR_ANY)) |
3420 | return; |
3421 | |
3422 | if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks)) |
3423 | baswap(dst: &hdev->public_addr, src: &ba); |
3424 | else |
3425 | bacpy(dst: &hdev->public_addr, src: &ba); |
3426 | } |
3427 | |
3428 | struct hci_init_stage { |
3429 | int (*func)(struct hci_dev *hdev); |
3430 | }; |
3431 | |
3432 | /* Run init stage NULL terminated function table */ |
3433 | static int hci_init_stage_sync(struct hci_dev *hdev, |
3434 | const struct hci_init_stage *stage) |
3435 | { |
3436 | size_t i; |
3437 | |
3438 | for (i = 0; stage[i].func; i++) { |
3439 | int err; |
3440 | |
3441 | err = stage[i].func(hdev); |
3442 | if (err) |
3443 | return err; |
3444 | } |
3445 | |
3446 | return 0; |
3447 | } |
3448 | |
3449 | /* Read Local Version */ |
3450 | static int hci_read_local_version_sync(struct hci_dev *hdev) |
3451 | { |
3452 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, |
3453 | 0, NULL, HCI_CMD_TIMEOUT); |
3454 | } |
3455 | |
3456 | /* Read BD Address */ |
3457 | static int hci_read_bd_addr_sync(struct hci_dev *hdev) |
3458 | { |
3459 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, |
3460 | 0, NULL, HCI_CMD_TIMEOUT); |
3461 | } |
3462 | |
3463 | #define HCI_INIT(_func) \ |
3464 | { \ |
3465 | .func = _func, \ |
3466 | } |
3467 | |
3468 | static const struct hci_init_stage hci_init0[] = { |
3469 | /* HCI_OP_READ_LOCAL_VERSION */ |
3470 | HCI_INIT(hci_read_local_version_sync), |
3471 | /* HCI_OP_READ_BD_ADDR */ |
3472 | HCI_INIT(hci_read_bd_addr_sync), |
3473 | {} |
3474 | }; |
3475 | |
3476 | int hci_reset_sync(struct hci_dev *hdev) |
3477 | { |
3478 | int err; |
3479 | |
3480 | set_bit(nr: HCI_RESET, addr: &hdev->flags); |
3481 | |
3482 | err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, |
3483 | HCI_CMD_TIMEOUT); |
3484 | if (err) |
3485 | return err; |
3486 | |
3487 | return 0; |
3488 | } |
3489 | |
3490 | static int hci_init0_sync(struct hci_dev *hdev) |
3491 | { |
3492 | int err; |
3493 | |
3494 | bt_dev_dbg(hdev, "" ); |
3495 | |
3496 | /* Reset */ |
3497 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
3498 | err = hci_reset_sync(hdev); |
3499 | if (err) |
3500 | return err; |
3501 | } |
3502 | |
3503 | return hci_init_stage_sync(hdev, stage: hci_init0); |
3504 | } |
3505 | |
3506 | static int hci_unconf_init_sync(struct hci_dev *hdev) |
3507 | { |
3508 | int err; |
3509 | |
3510 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
3511 | return 0; |
3512 | |
3513 | err = hci_init0_sync(hdev); |
3514 | if (err < 0) |
3515 | return err; |
3516 | |
3517 | if (hci_dev_test_flag(hdev, HCI_SETUP)) |
3518 | hci_debugfs_create_basic(hdev); |
3519 | |
3520 | return 0; |
3521 | } |
3522 | |
3523 | /* Read Local Supported Features. */ |
3524 | static int hci_read_local_features_sync(struct hci_dev *hdev) |
3525 | { |
3526 | /* Not all AMP controllers support this command */ |
3527 | if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) |
3528 | return 0; |
3529 | |
3530 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, |
3531 | 0, NULL, HCI_CMD_TIMEOUT); |
3532 | } |
3533 | |
3534 | /* BR Controller init stage 1 command sequence */ |
3535 | static const struct hci_init_stage br_init1[] = { |
3536 | /* HCI_OP_READ_LOCAL_FEATURES */ |
3537 | HCI_INIT(hci_read_local_features_sync), |
3538 | /* HCI_OP_READ_LOCAL_VERSION */ |
3539 | HCI_INIT(hci_read_local_version_sync), |
3540 | /* HCI_OP_READ_BD_ADDR */ |
3541 | HCI_INIT(hci_read_bd_addr_sync), |
3542 | {} |
3543 | }; |
3544 | |
3545 | /* Read Local Commands */ |
3546 | static int hci_read_local_cmds_sync(struct hci_dev *hdev) |
3547 | { |
3548 | /* All Bluetooth 1.2 and later controllers should support the |
3549 | * HCI command for reading the local supported commands. |
3550 | * |
3551 | * Unfortunately some controllers indicate Bluetooth 1.2 support, |
3552 | * but do not have support for this command. If that is the case, |
3553 | * the driver can quirk the behavior and skip reading the local |
3554 | * supported commands. |
3555 | */ |
3556 | if (hdev->hci_ver > BLUETOOTH_VER_1_1 && |
3557 | !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) |
3558 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, |
3559 | 0, NULL, HCI_CMD_TIMEOUT); |
3560 | |
3561 | return 0; |
3562 | } |
3563 | |
3564 | /* Read Local AMP Info */ |
3565 | static int hci_read_local_amp_info_sync(struct hci_dev *hdev) |
3566 | { |
3567 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, |
3568 | 0, NULL, HCI_CMD_TIMEOUT); |
3569 | } |
3570 | |
3571 | /* Read Data Blk size */ |
3572 | static int hci_read_data_block_size_sync(struct hci_dev *hdev) |
3573 | { |
3574 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, |
3575 | 0, NULL, HCI_CMD_TIMEOUT); |
3576 | } |
3577 | |
3578 | /* Read Flow Control Mode */ |
3579 | static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) |
3580 | { |
3581 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, |
3582 | 0, NULL, HCI_CMD_TIMEOUT); |
3583 | } |
3584 | |
3585 | /* Read Location Data */ |
3586 | static int hci_read_location_data_sync(struct hci_dev *hdev) |
3587 | { |
3588 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, |
3589 | 0, NULL, HCI_CMD_TIMEOUT); |
3590 | } |
3591 | |
3592 | /* AMP Controller init stage 1 command sequence */ |
3593 | static const struct hci_init_stage amp_init1[] = { |
3594 | /* HCI_OP_READ_LOCAL_VERSION */ |
3595 | HCI_INIT(hci_read_local_version_sync), |
3596 | /* HCI_OP_READ_LOCAL_COMMANDS */ |
3597 | HCI_INIT(hci_read_local_cmds_sync), |
3598 | /* HCI_OP_READ_LOCAL_AMP_INFO */ |
3599 | HCI_INIT(hci_read_local_amp_info_sync), |
3600 | /* HCI_OP_READ_DATA_BLOCK_SIZE */ |
3601 | HCI_INIT(hci_read_data_block_size_sync), |
3602 | /* HCI_OP_READ_FLOW_CONTROL_MODE */ |
3603 | HCI_INIT(hci_read_flow_control_mode_sync), |
3604 | /* HCI_OP_READ_LOCATION_DATA */ |
3605 | HCI_INIT(hci_read_location_data_sync), |
3606 | {} |
3607 | }; |
3608 | |
3609 | static int hci_init1_sync(struct hci_dev *hdev) |
3610 | { |
3611 | int err; |
3612 | |
3613 | bt_dev_dbg(hdev, "" ); |
3614 | |
3615 | /* Reset */ |
3616 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
3617 | err = hci_reset_sync(hdev); |
3618 | if (err) |
3619 | return err; |
3620 | } |
3621 | |
3622 | switch (hdev->dev_type) { |
3623 | case HCI_PRIMARY: |
3624 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; |
3625 | return hci_init_stage_sync(hdev, stage: br_init1); |
3626 | case HCI_AMP: |
3627 | hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; |
3628 | return hci_init_stage_sync(hdev, stage: amp_init1); |
3629 | default: |
3630 | bt_dev_err(hdev, "Unknown device type %d" , hdev->dev_type); |
3631 | break; |
3632 | } |
3633 | |
3634 | return 0; |
3635 | } |
3636 | |
3637 | /* AMP Controller init stage 2 command sequence */ |
3638 | static const struct hci_init_stage amp_init2[] = { |
3639 | /* HCI_OP_READ_LOCAL_FEATURES */ |
3640 | HCI_INIT(hci_read_local_features_sync), |
3641 | {} |
3642 | }; |
3643 | |
3644 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ |
3645 | static int hci_read_buffer_size_sync(struct hci_dev *hdev) |
3646 | { |
3647 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, |
3648 | 0, NULL, HCI_CMD_TIMEOUT); |
3649 | } |
3650 | |
3651 | /* Read Class of Device */ |
3652 | static int hci_read_dev_class_sync(struct hci_dev *hdev) |
3653 | { |
3654 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, |
3655 | 0, NULL, HCI_CMD_TIMEOUT); |
3656 | } |
3657 | |
3658 | /* Read Local Name */ |
3659 | static int hci_read_local_name_sync(struct hci_dev *hdev) |
3660 | { |
3661 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, |
3662 | 0, NULL, HCI_CMD_TIMEOUT); |
3663 | } |
3664 | |
3665 | /* Read Voice Setting */ |
3666 | static int hci_read_voice_setting_sync(struct hci_dev *hdev) |
3667 | { |
3668 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, |
3669 | 0, NULL, HCI_CMD_TIMEOUT); |
3670 | } |
3671 | |
3672 | /* Read Number of Supported IAC */ |
3673 | static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) |
3674 | { |
3675 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, |
3676 | 0, NULL, HCI_CMD_TIMEOUT); |
3677 | } |
3678 | |
3679 | /* Read Current IAC LAP */ |
3680 | static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) |
3681 | { |
3682 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, |
3683 | 0, NULL, HCI_CMD_TIMEOUT); |
3684 | } |
3685 | |
3686 | static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, |
3687 | u8 cond_type, bdaddr_t *bdaddr, |
3688 | u8 auto_accept) |
3689 | { |
3690 | struct hci_cp_set_event_filter cp; |
3691 | |
3692 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
3693 | return 0; |
3694 | |
3695 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
3696 | return 0; |
3697 | |
3698 | memset(&cp, 0, sizeof(cp)); |
3699 | cp.flt_type = flt_type; |
3700 | |
3701 | if (flt_type != HCI_FLT_CLEAR_ALL) { |
3702 | cp.cond_type = cond_type; |
3703 | bacpy(dst: &cp.addr_conn_flt.bdaddr, src: bdaddr); |
3704 | cp.addr_conn_flt.auto_accept = auto_accept; |
3705 | } |
3706 | |
3707 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, |
3708 | flt_type == HCI_FLT_CLEAR_ALL ? |
3709 | sizeof(cp.flt_type) : sizeof(cp), &cp, |
3710 | HCI_CMD_TIMEOUT); |
3711 | } |
3712 | |
3713 | static int hci_clear_event_filter_sync(struct hci_dev *hdev) |
3714 | { |
3715 | if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) |
3716 | return 0; |
3717 | |
3718 | /* In theory the state machine should not reach here unless |
3719 | * a hci_set_event_filter_sync() call succeeds, but we do |
3720 | * the check both for parity and as a future reminder. |
3721 | */ |
3722 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
3723 | return 0; |
3724 | |
3725 | return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, cond_type: 0x00, |
3726 | BDADDR_ANY, auto_accept: 0x00); |
3727 | } |
3728 | |
3729 | /* Connection accept timeout ~20 secs */ |
3730 | static int hci_write_ca_timeout_sync(struct hci_dev *hdev) |
3731 | { |
3732 | __le16 param = cpu_to_le16(0x7d00); |
3733 | |
3734 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, |
3735 | sizeof(param), ¶m, HCI_CMD_TIMEOUT); |
3736 | } |
3737 | |
3738 | /* BR Controller init stage 2 command sequence */ |
3739 | static const struct hci_init_stage br_init2[] = { |
3740 | /* HCI_OP_READ_BUFFER_SIZE */ |
3741 | HCI_INIT(hci_read_buffer_size_sync), |
3742 | /* HCI_OP_READ_CLASS_OF_DEV */ |
3743 | HCI_INIT(hci_read_dev_class_sync), |
3744 | /* HCI_OP_READ_LOCAL_NAME */ |
3745 | HCI_INIT(hci_read_local_name_sync), |
3746 | /* HCI_OP_READ_VOICE_SETTING */ |
3747 | HCI_INIT(hci_read_voice_setting_sync), |
3748 | /* HCI_OP_READ_NUM_SUPPORTED_IAC */ |
3749 | HCI_INIT(hci_read_num_supported_iac_sync), |
3750 | /* HCI_OP_READ_CURRENT_IAC_LAP */ |
3751 | HCI_INIT(hci_read_current_iac_lap_sync), |
3752 | /* HCI_OP_SET_EVENT_FLT */ |
3753 | HCI_INIT(hci_clear_event_filter_sync), |
3754 | /* HCI_OP_WRITE_CA_TIMEOUT */ |
3755 | HCI_INIT(hci_write_ca_timeout_sync), |
3756 | {} |
3757 | }; |
3758 | |
3759 | static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) |
3760 | { |
3761 | u8 mode = 0x01; |
3762 | |
3763 | if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
3764 | return 0; |
3765 | |
3766 | /* When SSP is available, then the host features page |
3767 | * should also be available as well. However some |
3768 | * controllers list the max_page as 0 as long as SSP |
3769 | * has not been enabled. To achieve proper debugging |
3770 | * output, force the minimum max_page to 1 at least. |
3771 | */ |
3772 | hdev->max_page = 0x01; |
3773 | |
3774 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, |
3775 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3776 | } |
3777 | |
3778 | static int hci_write_eir_sync(struct hci_dev *hdev) |
3779 | { |
3780 | struct hci_cp_write_eir cp; |
3781 | |
3782 | if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) |
3783 | return 0; |
3784 | |
3785 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
3786 | memset(&cp, 0, sizeof(cp)); |
3787 | |
3788 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, |
3789 | HCI_CMD_TIMEOUT); |
3790 | } |
3791 | |
3792 | static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) |
3793 | { |
3794 | u8 mode; |
3795 | |
3796 | if (!lmp_inq_rssi_capable(hdev) && |
3797 | !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) |
3798 | return 0; |
3799 | |
3800 | /* If Extended Inquiry Result events are supported, then |
3801 | * they are clearly preferred over Inquiry Result with RSSI |
3802 | * events. |
3803 | */ |
3804 | mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; |
3805 | |
3806 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, |
3807 | sizeof(mode), &mode, HCI_CMD_TIMEOUT); |
3808 | } |
3809 | |
3810 | static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) |
3811 | { |
3812 | if (!lmp_inq_tx_pwr_capable(hdev)) |
3813 | return 0; |
3814 | |
3815 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, |
3816 | 0, NULL, HCI_CMD_TIMEOUT); |
3817 | } |
3818 | |
3819 | static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) |
3820 | { |
3821 | struct hci_cp_read_local_ext_features cp; |
3822 | |
3823 | if (!lmp_ext_feat_capable(hdev)) |
3824 | return 0; |
3825 | |
3826 | memset(&cp, 0, sizeof(cp)); |
3827 | cp.page = page; |
3828 | |
3829 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, |
3830 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
3831 | } |
3832 | |
3833 | static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) |
3834 | { |
3835 | return hci_read_local_ext_features_sync(hdev, page: 0x01); |
3836 | } |
3837 | |
3838 | /* HCI Controller init stage 2 command sequence */ |
3839 | static const struct hci_init_stage hci_init2[] = { |
3840 | /* HCI_OP_READ_LOCAL_COMMANDS */ |
3841 | HCI_INIT(hci_read_local_cmds_sync), |
3842 | /* HCI_OP_WRITE_SSP_MODE */ |
3843 | HCI_INIT(hci_write_ssp_mode_1_sync), |
3844 | /* HCI_OP_WRITE_EIR */ |
3845 | HCI_INIT(hci_write_eir_sync), |
3846 | /* HCI_OP_WRITE_INQUIRY_MODE */ |
3847 | HCI_INIT(hci_write_inquiry_mode_sync), |
3848 | /* HCI_OP_READ_INQ_RSP_TX_POWER */ |
3849 | HCI_INIT(hci_read_inq_rsp_tx_power_sync), |
3850 | /* HCI_OP_READ_LOCAL_EXT_FEATURES */ |
3851 | HCI_INIT(hci_read_local_ext_features_1_sync), |
3852 | /* HCI_OP_WRITE_AUTH_ENABLE */ |
3853 | HCI_INIT(hci_write_auth_enable_sync), |
3854 | {} |
3855 | }; |
3856 | |
3857 | /* Read LE Buffer Size */ |
3858 | static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) |
3859 | { |
3860 | /* Use Read LE Buffer Size V2 if supported */ |
3861 | if (iso_capable(hdev) && hdev->commands[41] & 0x20) |
3862 | return __hci_cmd_sync_status(hdev, |
3863 | HCI_OP_LE_READ_BUFFER_SIZE_V2, |
3864 | 0, NULL, HCI_CMD_TIMEOUT); |
3865 | |
3866 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, |
3867 | 0, NULL, HCI_CMD_TIMEOUT); |
3868 | } |
3869 | |
3870 | /* Read LE Local Supported Features */ |
3871 | static int hci_le_read_local_features_sync(struct hci_dev *hdev) |
3872 | { |
3873 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, |
3874 | 0, NULL, HCI_CMD_TIMEOUT); |
3875 | } |
3876 | |
3877 | /* Read LE Supported States */ |
3878 | static int hci_le_read_supported_states_sync(struct hci_dev *hdev) |
3879 | { |
3880 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, |
3881 | 0, NULL, HCI_CMD_TIMEOUT); |
3882 | } |
3883 | |
3884 | /* LE Controller init stage 2 command sequence */ |
3885 | static const struct hci_init_stage le_init2[] = { |
3886 | /* HCI_OP_LE_READ_LOCAL_FEATURES */ |
3887 | HCI_INIT(hci_le_read_local_features_sync), |
3888 | /* HCI_OP_LE_READ_BUFFER_SIZE */ |
3889 | HCI_INIT(hci_le_read_buffer_size_sync), |
3890 | /* HCI_OP_LE_READ_SUPPORTED_STATES */ |
3891 | HCI_INIT(hci_le_read_supported_states_sync), |
3892 | {} |
3893 | }; |
3894 | |
3895 | static int hci_init2_sync(struct hci_dev *hdev) |
3896 | { |
3897 | int err; |
3898 | |
3899 | bt_dev_dbg(hdev, "" ); |
3900 | |
3901 | if (hdev->dev_type == HCI_AMP) |
3902 | return hci_init_stage_sync(hdev, stage: amp_init2); |
3903 | |
3904 | err = hci_init_stage_sync(hdev, stage: hci_init2); |
3905 | if (err) |
3906 | return err; |
3907 | |
3908 | if (lmp_bredr_capable(hdev)) { |
3909 | err = hci_init_stage_sync(hdev, stage: br_init2); |
3910 | if (err) |
3911 | return err; |
3912 | } else { |
3913 | hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); |
3914 | } |
3915 | |
3916 | if (lmp_le_capable(hdev)) { |
3917 | err = hci_init_stage_sync(hdev, stage: le_init2); |
3918 | if (err) |
3919 | return err; |
3920 | /* LE-only controllers have LE implicitly enabled */ |
3921 | if (!lmp_bredr_capable(hdev)) |
3922 | hci_dev_set_flag(hdev, HCI_LE_ENABLED); |
3923 | } |
3924 | |
3925 | return 0; |
3926 | } |
3927 | |
3928 | static int hci_set_event_mask_sync(struct hci_dev *hdev) |
3929 | { |
3930 | /* The second byte is 0xff instead of 0x9f (two reserved bits |
3931 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the |
3932 | * command otherwise. |
3933 | */ |
3934 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; |
3935 | |
3936 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set |
3937 | * any event mask for pre 1.2 devices. |
3938 | */ |
3939 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
3940 | return 0; |
3941 | |
3942 | if (lmp_bredr_capable(hdev)) { |
3943 | events[4] |= 0x01; /* Flow Specification Complete */ |
3944 | |
3945 | /* Don't set Disconnect Complete and mode change when |
3946 | * suspended as that would wakeup the host when disconnecting |
3947 | * due to suspend. |
3948 | */ |
3949 | if (hdev->suspended) { |
3950 | events[0] &= 0xef; |
3951 | events[2] &= 0xf7; |
3952 | } |
3953 | } else { |
3954 | /* Use a different default for LE-only devices */ |
3955 | memset(events, 0, sizeof(events)); |
3956 | events[1] |= 0x20; /* Command Complete */ |
3957 | events[1] |= 0x40; /* Command Status */ |
3958 | events[1] |= 0x80; /* Hardware Error */ |
3959 | |
3960 | /* If the controller supports the Disconnect command, enable |
3961 | * the corresponding event. In addition enable packet flow |
3962 | * control related events. |
3963 | */ |
3964 | if (hdev->commands[0] & 0x20) { |
3965 | /* Don't set Disconnect Complete when suspended as that |
3966 | * would wakeup the host when disconnecting due to |
3967 | * suspend. |
3968 | */ |
3969 | if (!hdev->suspended) |
3970 | events[0] |= 0x10; /* Disconnection Complete */ |
3971 | events[2] |= 0x04; /* Number of Completed Packets */ |
3972 | events[3] |= 0x02; /* Data Buffer Overflow */ |
3973 | } |
3974 | |
3975 | /* If the controller supports the Read Remote Version |
3976 | * Information command, enable the corresponding event. |
3977 | */ |
3978 | if (hdev->commands[2] & 0x80) |
3979 | events[1] |= 0x08; /* Read Remote Version Information |
3980 | * Complete |
3981 | */ |
3982 | |
3983 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { |
3984 | events[0] |= 0x80; /* Encryption Change */ |
3985 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
3986 | } |
3987 | } |
3988 | |
3989 | if (lmp_inq_rssi_capable(hdev) || |
3990 | test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) |
3991 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
3992 | |
3993 | if (lmp_ext_feat_capable(hdev)) |
3994 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ |
3995 | |
3996 | if (lmp_esco_capable(hdev)) { |
3997 | events[5] |= 0x08; /* Synchronous Connection Complete */ |
3998 | events[5] |= 0x10; /* Synchronous Connection Changed */ |
3999 | } |
4000 | |
4001 | if (lmp_sniffsubr_capable(hdev)) |
4002 | events[5] |= 0x20; /* Sniff Subrating */ |
4003 | |
4004 | if (lmp_pause_enc_capable(hdev)) |
4005 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
4006 | |
4007 | if (lmp_ext_inq_capable(hdev)) |
4008 | events[5] |= 0x40; /* Extended Inquiry Result */ |
4009 | |
4010 | if (lmp_no_flush_capable(hdev)) |
4011 | events[7] |= 0x01; /* Enhanced Flush Complete */ |
4012 | |
4013 | if (lmp_lsto_capable(hdev)) |
4014 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ |
4015 | |
4016 | if (lmp_ssp_capable(hdev)) { |
4017 | events[6] |= 0x01; /* IO Capability Request */ |
4018 | events[6] |= 0x02; /* IO Capability Response */ |
4019 | events[6] |= 0x04; /* User Confirmation Request */ |
4020 | events[6] |= 0x08; /* User Passkey Request */ |
4021 | events[6] |= 0x10; /* Remote OOB Data Request */ |
4022 | events[6] |= 0x20; /* Simple Pairing Complete */ |
4023 | events[7] |= 0x04; /* User Passkey Notification */ |
4024 | events[7] |= 0x08; /* Keypress Notification */ |
4025 | events[7] |= 0x10; /* Remote Host Supported |
4026 | * Features Notification |
4027 | */ |
4028 | } |
4029 | |
4030 | if (lmp_le_capable(hdev)) |
4031 | events[7] |= 0x20; /* LE Meta-Event */ |
4032 | |
4033 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, |
4034 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4035 | } |
4036 | |
4037 | static int hci_read_stored_link_key_sync(struct hci_dev *hdev) |
4038 | { |
4039 | struct hci_cp_read_stored_link_key cp; |
4040 | |
4041 | if (!(hdev->commands[6] & 0x20) || |
4042 | test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) |
4043 | return 0; |
4044 | |
4045 | memset(&cp, 0, sizeof(cp)); |
4046 | bacpy(dst: &cp.bdaddr, BDADDR_ANY); |
4047 | cp.read_all = 0x01; |
4048 | |
4049 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, |
4050 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4051 | } |
4052 | |
4053 | static int hci_setup_link_policy_sync(struct hci_dev *hdev) |
4054 | { |
4055 | struct hci_cp_write_def_link_policy cp; |
4056 | u16 link_policy = 0; |
4057 | |
4058 | if (!(hdev->commands[5] & 0x10)) |
4059 | return 0; |
4060 | |
4061 | memset(&cp, 0, sizeof(cp)); |
4062 | |
4063 | if (lmp_rswitch_capable(hdev)) |
4064 | link_policy |= HCI_LP_RSWITCH; |
4065 | if (lmp_hold_capable(hdev)) |
4066 | link_policy |= HCI_LP_HOLD; |
4067 | if (lmp_sniff_capable(hdev)) |
4068 | link_policy |= HCI_LP_SNIFF; |
4069 | if (lmp_park_capable(hdev)) |
4070 | link_policy |= HCI_LP_PARK; |
4071 | |
4072 | cp.policy = cpu_to_le16(link_policy); |
4073 | |
4074 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, |
4075 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4076 | } |
4077 | |
4078 | static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) |
4079 | { |
4080 | if (!(hdev->commands[8] & 0x01)) |
4081 | return 0; |
4082 | |
4083 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, |
4084 | 0, NULL, HCI_CMD_TIMEOUT); |
4085 | } |
4086 | |
4087 | static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) |
4088 | { |
4089 | if (!(hdev->commands[18] & 0x04) || |
4090 | !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || |
4091 | test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) |
4092 | return 0; |
4093 | |
4094 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, |
4095 | 0, NULL, HCI_CMD_TIMEOUT); |
4096 | } |
4097 | |
4098 | static int hci_read_page_scan_type_sync(struct hci_dev *hdev) |
4099 | { |
4100 | /* Some older Broadcom based Bluetooth 1.2 controllers do not |
4101 | * support the Read Page Scan Type command. Check support for |
4102 | * this command in the bit mask of supported commands. |
4103 | */ |
4104 | if (!(hdev->commands[13] & 0x01)) |
4105 | return 0; |
4106 | |
4107 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, |
4108 | 0, NULL, HCI_CMD_TIMEOUT); |
4109 | } |
4110 | |
4111 | /* Read features beyond page 1 if available */ |
4112 | static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) |
4113 | { |
4114 | u8 page; |
4115 | int err; |
4116 | |
4117 | if (!lmp_ext_feat_capable(hdev)) |
4118 | return 0; |
4119 | |
4120 | for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; |
4121 | page++) { |
4122 | err = hci_read_local_ext_features_sync(hdev, page); |
4123 | if (err) |
4124 | return err; |
4125 | } |
4126 | |
4127 | return 0; |
4128 | } |
4129 | |
4130 | /* HCI Controller init stage 3 command sequence */ |
4131 | static const struct hci_init_stage hci_init3[] = { |
4132 | /* HCI_OP_SET_EVENT_MASK */ |
4133 | HCI_INIT(hci_set_event_mask_sync), |
4134 | /* HCI_OP_READ_STORED_LINK_KEY */ |
4135 | HCI_INIT(hci_read_stored_link_key_sync), |
4136 | /* HCI_OP_WRITE_DEF_LINK_POLICY */ |
4137 | HCI_INIT(hci_setup_link_policy_sync), |
4138 | /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ |
4139 | HCI_INIT(hci_read_page_scan_activity_sync), |
4140 | /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ |
4141 | HCI_INIT(hci_read_def_err_data_reporting_sync), |
4142 | /* HCI_OP_READ_PAGE_SCAN_TYPE */ |
4143 | HCI_INIT(hci_read_page_scan_type_sync), |
4144 | /* HCI_OP_READ_LOCAL_EXT_FEATURES */ |
4145 | HCI_INIT(hci_read_local_ext_features_all_sync), |
4146 | {} |
4147 | }; |
4148 | |
4149 | static int hci_le_set_event_mask_sync(struct hci_dev *hdev) |
4150 | { |
4151 | u8 events[8]; |
4152 | |
4153 | if (!lmp_le_capable(hdev)) |
4154 | return 0; |
4155 | |
4156 | memset(events, 0, sizeof(events)); |
4157 | |
4158 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) |
4159 | events[0] |= 0x10; /* LE Long Term Key Request */ |
4160 | |
4161 | /* If controller supports the Connection Parameters Request |
4162 | * Link Layer Procedure, enable the corresponding event. |
4163 | */ |
4164 | if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) |
4165 | /* LE Remote Connection Parameter Request */ |
4166 | events[0] |= 0x20; |
4167 | |
4168 | /* If the controller supports the Data Length Extension |
4169 | * feature, enable the corresponding event. |
4170 | */ |
4171 | if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) |
4172 | events[0] |= 0x40; /* LE Data Length Change */ |
4173 | |
4174 | /* If the controller supports LL Privacy feature or LE Extended Adv, |
4175 | * enable the corresponding event. |
4176 | */ |
4177 | if (use_enhanced_conn_complete(hdev)) |
4178 | events[1] |= 0x02; /* LE Enhanced Connection Complete */ |
4179 | |
4180 | /* If the controller supports Extended Scanner Filter |
4181 | * Policies, enable the corresponding event. |
4182 | */ |
4183 | if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) |
4184 | events[1] |= 0x04; /* LE Direct Advertising Report */ |
4185 | |
4186 | /* If the controller supports Channel Selection Algorithm #2 |
4187 | * feature, enable the corresponding event. |
4188 | */ |
4189 | if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) |
4190 | events[2] |= 0x08; /* LE Channel Selection Algorithm */ |
4191 | |
4192 | /* If the controller supports the LE Set Scan Enable command, |
4193 | * enable the corresponding advertising report event. |
4194 | */ |
4195 | if (hdev->commands[26] & 0x08) |
4196 | events[0] |= 0x02; /* LE Advertising Report */ |
4197 | |
4198 | /* If the controller supports the LE Create Connection |
4199 | * command, enable the corresponding event. |
4200 | */ |
4201 | if (hdev->commands[26] & 0x10) |
4202 | events[0] |= 0x01; /* LE Connection Complete */ |
4203 | |
4204 | /* If the controller supports the LE Connection Update |
4205 | * command, enable the corresponding event. |
4206 | */ |
4207 | if (hdev->commands[27] & 0x04) |
4208 | events[0] |= 0x04; /* LE Connection Update Complete */ |
4209 | |
4210 | /* If the controller supports the LE Read Remote Used Features |
4211 | * command, enable the corresponding event. |
4212 | */ |
4213 | if (hdev->commands[27] & 0x20) |
4214 | /* LE Read Remote Used Features Complete */ |
4215 | events[0] |= 0x08; |
4216 | |
4217 | /* If the controller supports the LE Read Local P-256 |
4218 | * Public Key command, enable the corresponding event. |
4219 | */ |
4220 | if (hdev->commands[34] & 0x02) |
4221 | /* LE Read Local P-256 Public Key Complete */ |
4222 | events[0] |= 0x80; |
4223 | |
4224 | /* If the controller supports the LE Generate DHKey |
4225 | * command, enable the corresponding event. |
4226 | */ |
4227 | if (hdev->commands[34] & 0x04) |
4228 | events[1] |= 0x01; /* LE Generate DHKey Complete */ |
4229 | |
4230 | /* If the controller supports the LE Set Default PHY or |
4231 | * LE Set PHY commands, enable the corresponding event. |
4232 | */ |
4233 | if (hdev->commands[35] & (0x20 | 0x40)) |
4234 | events[1] |= 0x08; /* LE PHY Update Complete */ |
4235 | |
4236 | /* If the controller supports LE Set Extended Scan Parameters |
4237 | * and LE Set Extended Scan Enable commands, enable the |
4238 | * corresponding event. |
4239 | */ |
4240 | if (use_ext_scan(hdev)) |
4241 | events[1] |= 0x10; /* LE Extended Advertising Report */ |
4242 | |
4243 | /* If the controller supports the LE Extended Advertising |
4244 | * command, enable the corresponding event. |
4245 | */ |
4246 | if (ext_adv_capable(hdev)) |
4247 | events[2] |= 0x02; /* LE Advertising Set Terminated */ |
4248 | |
4249 | if (cis_capable(hdev)) { |
4250 | events[3] |= 0x01; /* LE CIS Established */ |
4251 | if (cis_peripheral_capable(hdev)) |
4252 | events[3] |= 0x02; /* LE CIS Request */ |
4253 | } |
4254 | |
4255 | if (bis_capable(hdev)) { |
4256 | events[1] |= 0x20; /* LE PA Report */ |
4257 | events[1] |= 0x40; /* LE PA Sync Established */ |
4258 | events[3] |= 0x04; /* LE Create BIG Complete */ |
4259 | events[3] |= 0x08; /* LE Terminate BIG Complete */ |
4260 | events[3] |= 0x10; /* LE BIG Sync Established */ |
4261 | events[3] |= 0x20; /* LE BIG Sync Loss */ |
4262 | events[4] |= 0x02; /* LE BIG Info Advertising Report */ |
4263 | } |
4264 | |
4265 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, |
4266 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4267 | } |
4268 | |
4269 | /* Read LE Advertising Channel TX Power */ |
4270 | static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) |
4271 | { |
4272 | if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { |
4273 | /* HCI TS spec forbids mixing of legacy and extended |
4274 | * advertising commands wherein READ_ADV_TX_POWER is |
4275 | * also included. So do not call it if extended adv |
4276 | * is supported otherwise controller will return |
4277 | * COMMAND_DISALLOWED for extended commands. |
4278 | */ |
4279 | return __hci_cmd_sync_status(hdev, |
4280 | HCI_OP_LE_READ_ADV_TX_POWER, |
4281 | 0, NULL, HCI_CMD_TIMEOUT); |
4282 | } |
4283 | |
4284 | return 0; |
4285 | } |
4286 | |
4287 | /* Read LE Min/Max Tx Power*/ |
4288 | static int hci_le_read_tx_power_sync(struct hci_dev *hdev) |
4289 | { |
4290 | if (!(hdev->commands[38] & 0x80) || |
4291 | test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) |
4292 | return 0; |
4293 | |
4294 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, |
4295 | 0, NULL, HCI_CMD_TIMEOUT); |
4296 | } |
4297 | |
4298 | /* Read LE Accept List Size */ |
4299 | static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) |
4300 | { |
4301 | if (!(hdev->commands[26] & 0x40)) |
4302 | return 0; |
4303 | |
4304 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, |
4305 | 0, NULL, HCI_CMD_TIMEOUT); |
4306 | } |
4307 | |
4308 | /* Read LE Resolving List Size */ |
4309 | static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) |
4310 | { |
4311 | if (!(hdev->commands[34] & 0x40)) |
4312 | return 0; |
4313 | |
4314 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, |
4315 | 0, NULL, HCI_CMD_TIMEOUT); |
4316 | } |
4317 | |
4318 | /* Clear LE Resolving List */ |
4319 | static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) |
4320 | { |
4321 | if (!(hdev->commands[34] & 0x20)) |
4322 | return 0; |
4323 | |
4324 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, |
4325 | HCI_CMD_TIMEOUT); |
4326 | } |
4327 | |
4328 | /* Set RPA timeout */ |
4329 | static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) |
4330 | { |
4331 | __le16 timeout = cpu_to_le16(hdev->rpa_timeout); |
4332 | |
4333 | if (!(hdev->commands[35] & 0x04) || |
4334 | test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) |
4335 | return 0; |
4336 | |
4337 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, |
4338 | sizeof(timeout), &timeout, |
4339 | HCI_CMD_TIMEOUT); |
4340 | } |
4341 | |
4342 | /* Read LE Maximum Data Length */ |
4343 | static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) |
4344 | { |
4345 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4346 | return 0; |
4347 | |
4348 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, |
4349 | HCI_CMD_TIMEOUT); |
4350 | } |
4351 | |
4352 | /* Read LE Suggested Default Data Length */ |
4353 | static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) |
4354 | { |
4355 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4356 | return 0; |
4357 | |
4358 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, |
4359 | HCI_CMD_TIMEOUT); |
4360 | } |
4361 | |
4362 | /* Read LE Number of Supported Advertising Sets */ |
4363 | static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) |
4364 | { |
4365 | if (!ext_adv_capable(hdev)) |
4366 | return 0; |
4367 | |
4368 | return __hci_cmd_sync_status(hdev, |
4369 | HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, |
4370 | 0, NULL, HCI_CMD_TIMEOUT); |
4371 | } |
4372 | |
4373 | /* Write LE Host Supported */ |
4374 | static int hci_set_le_support_sync(struct hci_dev *hdev) |
4375 | { |
4376 | struct hci_cp_write_le_host_supported cp; |
4377 | |
4378 | /* LE-only devices do not support explicit enablement */ |
4379 | if (!lmp_bredr_capable(hdev)) |
4380 | return 0; |
4381 | |
4382 | memset(&cp, 0, sizeof(cp)); |
4383 | |
4384 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { |
4385 | cp.le = 0x01; |
4386 | cp.simul = 0x00; |
4387 | } |
4388 | |
4389 | if (cp.le == lmp_host_le_capable(hdev)) |
4390 | return 0; |
4391 | |
4392 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, |
4393 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4394 | } |
4395 | |
4396 | /* LE Set Host Feature */ |
4397 | static int hci_le_set_host_feature_sync(struct hci_dev *hdev) |
4398 | { |
4399 | struct hci_cp_le_set_host_feature cp; |
4400 | |
4401 | if (!cis_capable(hdev)) |
4402 | return 0; |
4403 | |
4404 | memset(&cp, 0, sizeof(cp)); |
4405 | |
4406 | /* Connected Isochronous Channels (Host Support) */ |
4407 | cp.bit_number = 32; |
4408 | cp.bit_value = 1; |
4409 | |
4410 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, |
4411 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4412 | } |
4413 | |
4414 | /* LE Controller init stage 3 command sequence */ |
4415 | static const struct hci_init_stage le_init3[] = { |
4416 | /* HCI_OP_LE_SET_EVENT_MASK */ |
4417 | HCI_INIT(hci_le_set_event_mask_sync), |
4418 | /* HCI_OP_LE_READ_ADV_TX_POWER */ |
4419 | HCI_INIT(hci_le_read_adv_tx_power_sync), |
4420 | /* HCI_OP_LE_READ_TRANSMIT_POWER */ |
4421 | HCI_INIT(hci_le_read_tx_power_sync), |
4422 | /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ |
4423 | HCI_INIT(hci_le_read_accept_list_size_sync), |
4424 | /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ |
4425 | HCI_INIT(hci_le_clear_accept_list_sync), |
4426 | /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ |
4427 | HCI_INIT(hci_le_read_resolv_list_size_sync), |
4428 | /* HCI_OP_LE_CLEAR_RESOLV_LIST */ |
4429 | HCI_INIT(hci_le_clear_resolv_list_sync), |
4430 | /* HCI_OP_LE_SET_RPA_TIMEOUT */ |
4431 | HCI_INIT(hci_le_set_rpa_timeout_sync), |
4432 | /* HCI_OP_LE_READ_MAX_DATA_LEN */ |
4433 | HCI_INIT(hci_le_read_max_data_len_sync), |
4434 | /* HCI_OP_LE_READ_DEF_DATA_LEN */ |
4435 | HCI_INIT(hci_le_read_def_data_len_sync), |
4436 | /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ |
4437 | HCI_INIT(hci_le_read_num_support_adv_sets_sync), |
4438 | /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ |
4439 | HCI_INIT(hci_set_le_support_sync), |
4440 | /* HCI_OP_LE_SET_HOST_FEATURE */ |
4441 | HCI_INIT(hci_le_set_host_feature_sync), |
4442 | {} |
4443 | }; |
4444 | |
4445 | static int hci_init3_sync(struct hci_dev *hdev) |
4446 | { |
4447 | int err; |
4448 | |
4449 | bt_dev_dbg(hdev, "" ); |
4450 | |
4451 | err = hci_init_stage_sync(hdev, stage: hci_init3); |
4452 | if (err) |
4453 | return err; |
4454 | |
4455 | if (lmp_le_capable(hdev)) |
4456 | return hci_init_stage_sync(hdev, stage: le_init3); |
4457 | |
4458 | return 0; |
4459 | } |
4460 | |
4461 | static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) |
4462 | { |
4463 | struct hci_cp_delete_stored_link_key cp; |
4464 | |
4465 | /* Some Broadcom based Bluetooth controllers do not support the |
4466 | * Delete Stored Link Key command. They are clearly indicating its |
4467 | * absence in the bit mask of supported commands. |
4468 | * |
4469 | * Check the supported commands and only if the command is marked |
4470 | * as supported send it. If not supported assume that the controller |
4471 | * does not have actual support for stored link keys which makes this |
4472 | * command redundant anyway. |
4473 | * |
4474 | * Some controllers indicate that they support handling deleting |
4475 | * stored link keys, but they don't. The quirk lets a driver |
4476 | * just disable this command. |
4477 | */ |
4478 | if (!(hdev->commands[6] & 0x80) || |
4479 | test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) |
4480 | return 0; |
4481 | |
4482 | memset(&cp, 0, sizeof(cp)); |
4483 | bacpy(dst: &cp.bdaddr, BDADDR_ANY); |
4484 | cp.delete_all = 0x01; |
4485 | |
4486 | return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, |
4487 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4488 | } |
4489 | |
4490 | static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) |
4491 | { |
4492 | u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
4493 | bool changed = false; |
4494 | |
4495 | /* Set event mask page 2 if the HCI command for it is supported */ |
4496 | if (!(hdev->commands[22] & 0x04)) |
4497 | return 0; |
4498 | |
4499 | /* If Connectionless Peripheral Broadcast central role is supported |
4500 | * enable all necessary events for it. |
4501 | */ |
4502 | if (lmp_cpb_central_capable(hdev)) { |
4503 | events[1] |= 0x40; /* Triggered Clock Capture */ |
4504 | events[1] |= 0x80; /* Synchronization Train Complete */ |
4505 | events[2] |= 0x08; /* Truncated Page Complete */ |
4506 | events[2] |= 0x20; /* CPB Channel Map Change */ |
4507 | changed = true; |
4508 | } |
4509 | |
4510 | /* If Connectionless Peripheral Broadcast peripheral role is supported |
4511 | * enable all necessary events for it. |
4512 | */ |
4513 | if (lmp_cpb_peripheral_capable(hdev)) { |
4514 | events[2] |= 0x01; /* Synchronization Train Received */ |
4515 | events[2] |= 0x02; /* CPB Receive */ |
4516 | events[2] |= 0x04; /* CPB Timeout */ |
4517 | events[2] |= 0x10; /* Peripheral Page Response Timeout */ |
4518 | changed = true; |
4519 | } |
4520 | |
4521 | /* Enable Authenticated Payload Timeout Expired event if supported */ |
4522 | if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { |
4523 | events[2] |= 0x80; |
4524 | changed = true; |
4525 | } |
4526 | |
4527 | /* Some Broadcom based controllers indicate support for Set Event |
4528 | * Mask Page 2 command, but then actually do not support it. Since |
4529 | * the default value is all bits set to zero, the command is only |
4530 | * required if the event mask has to be changed. In case no change |
4531 | * to the event mask is needed, skip this command. |
4532 | */ |
4533 | if (!changed) |
4534 | return 0; |
4535 | |
4536 | return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, |
4537 | sizeof(events), events, HCI_CMD_TIMEOUT); |
4538 | } |
4539 | |
4540 | /* Read local codec list if the HCI command is supported */ |
4541 | static int hci_read_local_codecs_sync(struct hci_dev *hdev) |
4542 | { |
4543 | if (hdev->commands[45] & 0x04) |
4544 | hci_read_supported_codecs_v2(hdev); |
4545 | else if (hdev->commands[29] & 0x20) |
4546 | hci_read_supported_codecs(hdev); |
4547 | |
4548 | return 0; |
4549 | } |
4550 | |
4551 | /* Read local pairing options if the HCI command is supported */ |
4552 | static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) |
4553 | { |
4554 | if (!(hdev->commands[41] & 0x08)) |
4555 | return 0; |
4556 | |
4557 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, |
4558 | 0, NULL, HCI_CMD_TIMEOUT); |
4559 | } |
4560 | |
4561 | /* Get MWS transport configuration if the HCI command is supported */ |
4562 | static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) |
4563 | { |
4564 | if (!mws_transport_config_capable(hdev)) |
4565 | return 0; |
4566 | |
4567 | return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, |
4568 | 0, NULL, HCI_CMD_TIMEOUT); |
4569 | } |
4570 | |
4571 | /* Check for Synchronization Train support */ |
4572 | static int hci_read_sync_train_params_sync(struct hci_dev *hdev) |
4573 | { |
4574 | if (!lmp_sync_train_capable(hdev)) |
4575 | return 0; |
4576 | |
4577 | return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, |
4578 | 0, NULL, HCI_CMD_TIMEOUT); |
4579 | } |
4580 | |
4581 | /* Enable Secure Connections if supported and configured */ |
4582 | static int hci_write_sc_support_1_sync(struct hci_dev *hdev) |
4583 | { |
4584 | u8 support = 0x01; |
4585 | |
4586 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || |
4587 | !bredr_sc_enabled(hdev)) |
4588 | return 0; |
4589 | |
4590 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, |
4591 | sizeof(support), &support, |
4592 | HCI_CMD_TIMEOUT); |
4593 | } |
4594 | |
4595 | /* Set erroneous data reporting if supported to the wideband speech |
4596 | * setting value |
4597 | */ |
4598 | static int hci_set_err_data_report_sync(struct hci_dev *hdev) |
4599 | { |
4600 | struct hci_cp_write_def_err_data_reporting cp; |
4601 | bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); |
4602 | |
4603 | if (!(hdev->commands[18] & 0x08) || |
4604 | !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || |
4605 | test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) |
4606 | return 0; |
4607 | |
4608 | if (enabled == hdev->err_data_reporting) |
4609 | return 0; |
4610 | |
4611 | memset(&cp, 0, sizeof(cp)); |
4612 | cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : |
4613 | ERR_DATA_REPORTING_DISABLED; |
4614 | |
4615 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, |
4616 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4617 | } |
4618 | |
4619 | static const struct hci_init_stage hci_init4[] = { |
4620 | /* HCI_OP_DELETE_STORED_LINK_KEY */ |
4621 | HCI_INIT(hci_delete_stored_link_key_sync), |
4622 | /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ |
4623 | HCI_INIT(hci_set_event_mask_page_2_sync), |
4624 | /* HCI_OP_READ_LOCAL_CODECS */ |
4625 | HCI_INIT(hci_read_local_codecs_sync), |
4626 | /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ |
4627 | HCI_INIT(hci_read_local_pairing_opts_sync), |
4628 | /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ |
4629 | HCI_INIT(hci_get_mws_transport_config_sync), |
4630 | /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ |
4631 | HCI_INIT(hci_read_sync_train_params_sync), |
4632 | /* HCI_OP_WRITE_SC_SUPPORT */ |
4633 | HCI_INIT(hci_write_sc_support_1_sync), |
4634 | /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ |
4635 | HCI_INIT(hci_set_err_data_report_sync), |
4636 | {} |
4637 | }; |
4638 | |
4639 | /* Set Suggested Default Data Length to maximum if supported */ |
4640 | static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) |
4641 | { |
4642 | struct hci_cp_le_write_def_data_len cp; |
4643 | |
4644 | if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) |
4645 | return 0; |
4646 | |
4647 | memset(&cp, 0, sizeof(cp)); |
4648 | cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); |
4649 | cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); |
4650 | |
4651 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, |
4652 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4653 | } |
4654 | |
4655 | /* Set Default PHY parameters if command is supported, enables all supported |
4656 | * PHYs according to the LE Features bits. |
4657 | */ |
4658 | static int hci_le_set_default_phy_sync(struct hci_dev *hdev) |
4659 | { |
4660 | struct hci_cp_le_set_default_phy cp; |
4661 | |
4662 | if (!(hdev->commands[35] & 0x20)) { |
4663 | /* If the command is not supported it means only 1M PHY is |
4664 | * supported. |
4665 | */ |
4666 | hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; |
4667 | hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; |
4668 | return 0; |
4669 | } |
4670 | |
4671 | memset(&cp, 0, sizeof(cp)); |
4672 | cp.all_phys = 0x00; |
4673 | cp.tx_phys = HCI_LE_SET_PHY_1M; |
4674 | cp.rx_phys = HCI_LE_SET_PHY_1M; |
4675 | |
4676 | /* Enables 2M PHY if supported */ |
4677 | if (le_2m_capable(hdev)) { |
4678 | cp.tx_phys |= HCI_LE_SET_PHY_2M; |
4679 | cp.rx_phys |= HCI_LE_SET_PHY_2M; |
4680 | } |
4681 | |
4682 | /* Enables Coded PHY if supported */ |
4683 | if (le_coded_capable(hdev)) { |
4684 | cp.tx_phys |= HCI_LE_SET_PHY_CODED; |
4685 | cp.rx_phys |= HCI_LE_SET_PHY_CODED; |
4686 | } |
4687 | |
4688 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, |
4689 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
4690 | } |
4691 | |
4692 | static const struct hci_init_stage le_init4[] = { |
4693 | /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ |
4694 | HCI_INIT(hci_le_set_write_def_data_len_sync), |
4695 | /* HCI_OP_LE_SET_DEFAULT_PHY */ |
4696 | HCI_INIT(hci_le_set_default_phy_sync), |
4697 | {} |
4698 | }; |
4699 | |
4700 | static int hci_init4_sync(struct hci_dev *hdev) |
4701 | { |
4702 | int err; |
4703 | |
4704 | bt_dev_dbg(hdev, "" ); |
4705 | |
4706 | err = hci_init_stage_sync(hdev, stage: hci_init4); |
4707 | if (err) |
4708 | return err; |
4709 | |
4710 | if (lmp_le_capable(hdev)) |
4711 | return hci_init_stage_sync(hdev, stage: le_init4); |
4712 | |
4713 | return 0; |
4714 | } |
4715 | |
4716 | static int hci_init_sync(struct hci_dev *hdev) |
4717 | { |
4718 | int err; |
4719 | |
4720 | err = hci_init1_sync(hdev); |
4721 | if (err < 0) |
4722 | return err; |
4723 | |
4724 | if (hci_dev_test_flag(hdev, HCI_SETUP)) |
4725 | hci_debugfs_create_basic(hdev); |
4726 | |
4727 | err = hci_init2_sync(hdev); |
4728 | if (err < 0) |
4729 | return err; |
4730 | |
4731 | /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode |
4732 | * BR/EDR/LE type controllers. AMP controllers only need the |
4733 | * first two stages of init. |
4734 | */ |
4735 | if (hdev->dev_type != HCI_PRIMARY) |
4736 | return 0; |
4737 | |
4738 | err = hci_init3_sync(hdev); |
4739 | if (err < 0) |
4740 | return err; |
4741 | |
4742 | err = hci_init4_sync(hdev); |
4743 | if (err < 0) |
4744 | return err; |
4745 | |
4746 | /* This function is only called when the controller is actually in |
4747 | * configured state. When the controller is marked as unconfigured, |
4748 | * this initialization procedure is not run. |
4749 | * |
4750 | * It means that it is possible that a controller runs through its |
4751 | * setup phase and then discovers missing settings. If that is the |
4752 | * case, then this function will not be called. It then will only |
4753 | * be called during the config phase. |
4754 | * |
4755 | * So only when in setup phase or config phase, create the debugfs |
4756 | * entries and register the SMP channels. |
4757 | */ |
4758 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
4759 | !hci_dev_test_flag(hdev, HCI_CONFIG)) |
4760 | return 0; |
4761 | |
4762 | if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) |
4763 | return 0; |
4764 | |
4765 | hci_debugfs_create_common(hdev); |
4766 | |
4767 | if (lmp_bredr_capable(hdev)) |
4768 | hci_debugfs_create_bredr(hdev); |
4769 | |
4770 | if (lmp_le_capable(hdev)) |
4771 | hci_debugfs_create_le(hdev); |
4772 | |
4773 | return 0; |
4774 | } |
4775 | |
4776 | #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } |
4777 | |
4778 | static const struct { |
4779 | unsigned long quirk; |
4780 | const char *desc; |
4781 | } hci_broken_table[] = { |
4782 | HCI_QUIRK_BROKEN(LOCAL_COMMANDS, |
4783 | "HCI Read Local Supported Commands not supported" ), |
4784 | HCI_QUIRK_BROKEN(STORED_LINK_KEY, |
4785 | "HCI Delete Stored Link Key command is advertised, " |
4786 | "but not supported." ), |
4787 | HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, |
4788 | "HCI Read Default Erroneous Data Reporting command is " |
4789 | "advertised, but not supported." ), |
4790 | HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, |
4791 | "HCI Read Transmit Power Level command is advertised, " |
4792 | "but not supported." ), |
4793 | HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, |
4794 | "HCI Set Event Filter command not supported." ), |
4795 | HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, |
4796 | "HCI Enhanced Setup Synchronous Connection command is " |
4797 | "advertised, but not supported." ), |
4798 | HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, |
4799 | "HCI LE Set Random Private Address Timeout command is " |
4800 | "advertised, but not supported." ), |
4801 | HCI_QUIRK_BROKEN(LE_CODED, |
4802 | "HCI LE Coded PHY feature bit is set, " |
4803 | "but its usage is not supported." ) |
4804 | }; |
4805 | |
4806 | /* This function handles hdev setup stage: |
4807 | * |
4808 | * Calls hdev->setup |
4809 | * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. |
4810 | */ |
4811 | static int hci_dev_setup_sync(struct hci_dev *hdev) |
4812 | { |
4813 | int ret = 0; |
4814 | bool invalid_bdaddr; |
4815 | size_t i; |
4816 | |
4817 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
4818 | !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) |
4819 | return 0; |
4820 | |
4821 | bt_dev_dbg(hdev, "" ); |
4822 | |
4823 | hci_sock_dev_event(hdev, HCI_DEV_SETUP); |
4824 | |
4825 | if (hdev->setup) |
4826 | ret = hdev->setup(hdev); |
4827 | |
4828 | for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { |
4829 | if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) |
4830 | bt_dev_warn(hdev, "%s" , hci_broken_table[i].desc); |
4831 | } |
4832 | |
4833 | /* The transport driver can set the quirk to mark the |
4834 | * BD_ADDR invalid before creating the HCI device or in |
4835 | * its setup callback. |
4836 | */ |
4837 | invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || |
4838 | test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); |
4839 | if (!ret) { |
4840 | if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && |
4841 | !bacmp(ba1: &hdev->public_addr, BDADDR_ANY)) |
4842 | hci_dev_get_bd_addr_from_property(hdev); |
4843 | |
4844 | if (invalid_bdaddr && bacmp(ba1: &hdev->public_addr, BDADDR_ANY) && |
4845 | hdev->set_bdaddr) { |
4846 | ret = hdev->set_bdaddr(hdev, &hdev->public_addr); |
4847 | if (!ret) |
4848 | invalid_bdaddr = false; |
4849 | } |
4850 | } |
4851 | |
4852 | /* The transport driver can set these quirks before |
4853 | * creating the HCI device or in its setup callback. |
4854 | * |
4855 | * For the invalid BD_ADDR quirk it is possible that |
4856 | * it becomes a valid address if the bootloader does |
4857 | * provide it (see above). |
4858 | * |
4859 | * In case any of them is set, the controller has to |
4860 | * start up as unconfigured. |
4861 | */ |
4862 | if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || |
4863 | invalid_bdaddr) |
4864 | hci_dev_set_flag(hdev, HCI_UNCONFIGURED); |
4865 | |
4866 | /* For an unconfigured controller it is required to |
4867 | * read at least the version information provided by |
4868 | * the Read Local Version Information command. |
4869 | * |
4870 | * If the set_bdaddr driver callback is provided, then |
4871 | * also the original Bluetooth public device address |
4872 | * will be read using the Read BD Address command. |
4873 | */ |
4874 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
4875 | return hci_unconf_init_sync(hdev); |
4876 | |
4877 | return ret; |
4878 | } |
4879 | |
4880 | /* This function handles hdev init stage: |
4881 | * |
4882 | * Calls hci_dev_setup_sync to perform setup stage |
4883 | * Calls hci_init_sync to perform HCI command init sequence |
4884 | */ |
4885 | static int hci_dev_init_sync(struct hci_dev *hdev) |
4886 | { |
4887 | int ret; |
4888 | |
4889 | bt_dev_dbg(hdev, "" ); |
4890 | |
4891 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
4892 | set_bit(nr: HCI_INIT, addr: &hdev->flags); |
4893 | |
4894 | ret = hci_dev_setup_sync(hdev); |
4895 | |
4896 | if (hci_dev_test_flag(hdev, HCI_CONFIG)) { |
4897 | /* If public address change is configured, ensure that |
4898 | * the address gets programmed. If the driver does not |
4899 | * support changing the public address, fail the power |
4900 | * on procedure. |
4901 | */ |
4902 | if (bacmp(ba1: &hdev->public_addr, BDADDR_ANY) && |
4903 | hdev->set_bdaddr) |
4904 | ret = hdev->set_bdaddr(hdev, &hdev->public_addr); |
4905 | else |
4906 | ret = -EADDRNOTAVAIL; |
4907 | } |
4908 | |
4909 | if (!ret) { |
4910 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && |
4911 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
4912 | ret = hci_init_sync(hdev); |
4913 | if (!ret && hdev->post_init) |
4914 | ret = hdev->post_init(hdev); |
4915 | } |
4916 | } |
4917 | |
4918 | /* If the HCI Reset command is clearing all diagnostic settings, |
4919 | * then they need to be reprogrammed after the init procedure |
4920 | * completed. |
4921 | */ |
4922 | if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && |
4923 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
4924 | hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) |
4925 | ret = hdev->set_diag(hdev, true); |
4926 | |
4927 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
4928 | msft_do_open(hdev); |
4929 | aosp_do_open(hdev); |
4930 | } |
4931 | |
4932 | clear_bit(nr: HCI_INIT, addr: &hdev->flags); |
4933 | |
4934 | return ret; |
4935 | } |
4936 | |
4937 | int hci_dev_open_sync(struct hci_dev *hdev) |
4938 | { |
4939 | int ret; |
4940 | |
4941 | bt_dev_dbg(hdev, "" ); |
4942 | |
4943 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { |
4944 | ret = -ENODEV; |
4945 | goto done; |
4946 | } |
4947 | |
4948 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
4949 | !hci_dev_test_flag(hdev, HCI_CONFIG)) { |
4950 | /* Check for rfkill but allow the HCI setup stage to |
4951 | * proceed (which in itself doesn't cause any RF activity). |
4952 | */ |
4953 | if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { |
4954 | ret = -ERFKILL; |
4955 | goto done; |
4956 | } |
4957 | |
4958 | /* Check for valid public address or a configured static |
4959 | * random address, but let the HCI setup proceed to |
4960 | * be able to determine if there is a public address |
4961 | * or not. |
4962 | * |
4963 | * In case of user channel usage, it is not important |
4964 | * if a public address or static random address is |
4965 | * available. |
4966 | * |
4967 | * This check is only valid for BR/EDR controllers |
4968 | * since AMP controllers do not have an address. |
4969 | */ |
4970 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
4971 | hdev->dev_type == HCI_PRIMARY && |
4972 | !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
4973 | !bacmp(ba1: &hdev->static_addr, BDADDR_ANY)) { |
4974 | ret = -EADDRNOTAVAIL; |
4975 | goto done; |
4976 | } |
4977 | } |
4978 | |
4979 | if (test_bit(HCI_UP, &hdev->flags)) { |
4980 | ret = -EALREADY; |
4981 | goto done; |
4982 | } |
4983 | |
4984 | if (hdev->open(hdev)) { |
4985 | ret = -EIO; |
4986 | goto done; |
4987 | } |
4988 | |
4989 | hci_devcd_reset(hdev); |
4990 | |
4991 | set_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
4992 | hci_sock_dev_event(hdev, HCI_DEV_OPEN); |
4993 | |
4994 | ret = hci_dev_init_sync(hdev); |
4995 | if (!ret) { |
4996 | hci_dev_hold(d: hdev); |
4997 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); |
4998 | hci_adv_instances_set_rpa_expired(hdev, rpa_expired: true); |
4999 | set_bit(nr: HCI_UP, addr: &hdev->flags); |
5000 | hci_sock_dev_event(hdev, HCI_DEV_UP); |
5001 | hci_leds_update_powered(hdev, enabled: true); |
5002 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && |
5003 | !hci_dev_test_flag(hdev, HCI_CONFIG) && |
5004 | !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && |
5005 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
5006 | hci_dev_test_flag(hdev, HCI_MGMT) && |
5007 | hdev->dev_type == HCI_PRIMARY) { |
5008 | ret = hci_powered_update_sync(hdev); |
5009 | mgmt_power_on(hdev, err: ret); |
5010 | } |
5011 | } else { |
5012 | /* Init failed, cleanup */ |
5013 | flush_work(work: &hdev->tx_work); |
5014 | |
5015 | /* Since hci_rx_work() is possible to awake new cmd_work |
5016 | * it should be flushed first to avoid unexpected call of |
5017 | * hci_cmd_work() |
5018 | */ |
5019 | flush_work(work: &hdev->rx_work); |
5020 | flush_work(work: &hdev->cmd_work); |
5021 | |
5022 | skb_queue_purge(list: &hdev->cmd_q); |
5023 | skb_queue_purge(list: &hdev->rx_q); |
5024 | |
5025 | if (hdev->flush) |
5026 | hdev->flush(hdev); |
5027 | |
5028 | if (hdev->sent_cmd) { |
5029 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5030 | kfree_skb(skb: hdev->sent_cmd); |
5031 | hdev->sent_cmd = NULL; |
5032 | } |
5033 | |
5034 | if (hdev->req_skb) { |
5035 | kfree_skb(skb: hdev->req_skb); |
5036 | hdev->req_skb = NULL; |
5037 | } |
5038 | |
5039 | clear_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
5040 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); |
5041 | |
5042 | hdev->close(hdev); |
5043 | hdev->flags &= BIT(HCI_RAW); |
5044 | } |
5045 | |
5046 | done: |
5047 | return ret; |
5048 | } |
5049 | |
5050 | /* This function requires the caller holds hdev->lock */ |
5051 | static void hci_pend_le_actions_clear(struct hci_dev *hdev) |
5052 | { |
5053 | struct hci_conn_params *p; |
5054 | |
5055 | list_for_each_entry(p, &hdev->le_conn_params, list) { |
5056 | hci_pend_le_list_del_init(param: p); |
5057 | if (p->conn) { |
5058 | hci_conn_drop(conn: p->conn); |
5059 | hci_conn_put(conn: p->conn); |
5060 | p->conn = NULL; |
5061 | } |
5062 | } |
5063 | |
5064 | BT_DBG("All LE pending actions cleared" ); |
5065 | } |
5066 | |
5067 | static int hci_dev_shutdown(struct hci_dev *hdev) |
5068 | { |
5069 | int err = 0; |
5070 | /* Similar to how we first do setup and then set the exclusive access |
5071 | * bit for userspace, we must first unset userchannel and then clean up. |
5072 | * Otherwise, the kernel can't properly use the hci channel to clean up |
5073 | * the controller (some shutdown routines require sending additional |
5074 | * commands to the controller for example). |
5075 | */ |
5076 | bool was_userchannel = |
5077 | hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); |
5078 | |
5079 | if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && |
5080 | test_bit(HCI_UP, &hdev->flags)) { |
5081 | /* Execute vendor specific shutdown routine */ |
5082 | if (hdev->shutdown) |
5083 | err = hdev->shutdown(hdev); |
5084 | } |
5085 | |
5086 | if (was_userchannel) |
5087 | hci_dev_set_flag(hdev, HCI_USER_CHANNEL); |
5088 | |
5089 | return err; |
5090 | } |
5091 | |
5092 | int hci_dev_close_sync(struct hci_dev *hdev) |
5093 | { |
5094 | bool auto_off; |
5095 | int err = 0; |
5096 | |
5097 | bt_dev_dbg(hdev, "" ); |
5098 | |
5099 | cancel_delayed_work(dwork: &hdev->power_off); |
5100 | cancel_delayed_work(dwork: &hdev->ncmd_timer); |
5101 | cancel_delayed_work(dwork: &hdev->le_scan_disable); |
5102 | |
5103 | hci_request_cancel_all(hdev); |
5104 | |
5105 | if (hdev->adv_instance_timeout) { |
5106 | cancel_delayed_work_sync(dwork: &hdev->adv_instance_expire); |
5107 | hdev->adv_instance_timeout = 0; |
5108 | } |
5109 | |
5110 | err = hci_dev_shutdown(hdev); |
5111 | |
5112 | if (!test_and_clear_bit(nr: HCI_UP, addr: &hdev->flags)) { |
5113 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5114 | return err; |
5115 | } |
5116 | |
5117 | hci_leds_update_powered(hdev, enabled: false); |
5118 | |
5119 | /* Flush RX and TX works */ |
5120 | flush_work(work: &hdev->tx_work); |
5121 | flush_work(work: &hdev->rx_work); |
5122 | |
5123 | if (hdev->discov_timeout > 0) { |
5124 | hdev->discov_timeout = 0; |
5125 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); |
5126 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); |
5127 | } |
5128 | |
5129 | if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) |
5130 | cancel_delayed_work(dwork: &hdev->service_cache); |
5131 | |
5132 | if (hci_dev_test_flag(hdev, HCI_MGMT)) { |
5133 | struct adv_info *adv_instance; |
5134 | |
5135 | cancel_delayed_work_sync(dwork: &hdev->rpa_expired); |
5136 | |
5137 | list_for_each_entry(adv_instance, &hdev->adv_instances, list) |
5138 | cancel_delayed_work_sync(dwork: &adv_instance->rpa_expired_cb); |
5139 | } |
5140 | |
5141 | /* Avoid potential lockdep warnings from the *_flush() calls by |
5142 | * ensuring the workqueue is empty up front. |
5143 | */ |
5144 | drain_workqueue(wq: hdev->workqueue); |
5145 | |
5146 | hci_dev_lock(hdev); |
5147 | |
5148 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
5149 | |
5150 | auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); |
5151 | |
5152 | if (!auto_off && hdev->dev_type == HCI_PRIMARY && |
5153 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && |
5154 | hci_dev_test_flag(hdev, HCI_MGMT)) |
5155 | __mgmt_power_off(hdev); |
5156 | |
5157 | hci_inquiry_cache_flush(hdev); |
5158 | hci_pend_le_actions_clear(hdev); |
5159 | hci_conn_hash_flush(hdev); |
5160 | /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ |
5161 | smp_unregister(hdev); |
5162 | hci_dev_unlock(hdev); |
5163 | |
5164 | hci_sock_dev_event(hdev, HCI_DEV_DOWN); |
5165 | |
5166 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { |
5167 | aosp_do_close(hdev); |
5168 | msft_do_close(hdev); |
5169 | } |
5170 | |
5171 | if (hdev->flush) |
5172 | hdev->flush(hdev); |
5173 | |
5174 | /* Reset device */ |
5175 | skb_queue_purge(list: &hdev->cmd_q); |
5176 | atomic_set(v: &hdev->cmd_cnt, i: 1); |
5177 | if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && |
5178 | !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { |
5179 | set_bit(nr: HCI_INIT, addr: &hdev->flags); |
5180 | hci_reset_sync(hdev); |
5181 | clear_bit(nr: HCI_INIT, addr: &hdev->flags); |
5182 | } |
5183 | |
5184 | /* flush cmd work */ |
5185 | flush_work(work: &hdev->cmd_work); |
5186 | |
5187 | /* Drop queues */ |
5188 | skb_queue_purge(list: &hdev->rx_q); |
5189 | skb_queue_purge(list: &hdev->cmd_q); |
5190 | skb_queue_purge(list: &hdev->raw_q); |
5191 | |
5192 | /* Drop last sent command */ |
5193 | if (hdev->sent_cmd) { |
5194 | cancel_delayed_work_sync(dwork: &hdev->cmd_timer); |
5195 | kfree_skb(skb: hdev->sent_cmd); |
5196 | hdev->sent_cmd = NULL; |
5197 | } |
5198 | |
5199 | /* Drop last request */ |
5200 | if (hdev->req_skb) { |
5201 | kfree_skb(skb: hdev->req_skb); |
5202 | hdev->req_skb = NULL; |
5203 | } |
5204 | |
5205 | clear_bit(nr: HCI_RUNNING, addr: &hdev->flags); |
5206 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); |
5207 | |
5208 | /* After this point our queues are empty and no tasks are scheduled. */ |
5209 | hdev->close(hdev); |
5210 | |
5211 | /* Clear flags */ |
5212 | hdev->flags &= BIT(HCI_RAW); |
5213 | hci_dev_clear_volatile_flags(hdev); |
5214 | |
5215 | /* Controller radio is available but is currently powered down */ |
5216 | hdev->amp_status = AMP_STATUS_POWERED_DOWN; |
5217 | |
5218 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
5219 | memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); |
5220 | bacpy(dst: &hdev->random_addr, BDADDR_ANY); |
5221 | hci_codec_list_clear(codec_list: &hdev->local_codecs); |
5222 | |
5223 | hci_dev_put(d: hdev); |
5224 | return err; |
5225 | } |
5226 | |
5227 | /* This function perform power on HCI command sequence as follows: |
5228 | * |
5229 | * If controller is already up (HCI_UP) performs hci_powered_update_sync |
5230 | * sequence otherwise run hci_dev_open_sync which will follow with |
5231 | * hci_powered_update_sync after the init sequence is completed. |
5232 | */ |
5233 | static int hci_power_on_sync(struct hci_dev *hdev) |
5234 | { |
5235 | int err; |
5236 | |
5237 | if (test_bit(HCI_UP, &hdev->flags) && |
5238 | hci_dev_test_flag(hdev, HCI_MGMT) && |
5239 | hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { |
5240 | cancel_delayed_work(dwork: &hdev->power_off); |
5241 | return hci_powered_update_sync(hdev); |
5242 | } |
5243 | |
5244 | err = hci_dev_open_sync(hdev); |
5245 | if (err < 0) |
5246 | return err; |
5247 | |
5248 | /* During the HCI setup phase, a few error conditions are |
5249 | * ignored and they need to be checked now. If they are still |
5250 | * valid, it is important to return the device back off. |
5251 | */ |
5252 | if (hci_dev_test_flag(hdev, HCI_RFKILLED) || |
5253 | hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || |
5254 | (hdev->dev_type == HCI_PRIMARY && |
5255 | !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) && |
5256 | !bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) { |
5257 | hci_dev_clear_flag(hdev, HCI_AUTO_OFF); |
5258 | hci_dev_close_sync(hdev); |
5259 | } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { |
5260 | queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->power_off, |
5261 | HCI_AUTO_OFF_TIMEOUT); |
5262 | } |
5263 | |
5264 | if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { |
5265 | /* For unconfigured devices, set the HCI_RAW flag |
5266 | * so that userspace can easily identify them. |
5267 | */ |
5268 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
5269 | set_bit(nr: HCI_RAW, addr: &hdev->flags); |
5270 | |
5271 | /* For fully configured devices, this will send |
5272 | * the Index Added event. For unconfigured devices, |
5273 | * it will send Unconfigued Index Added event. |
5274 | * |
5275 | * Devices with HCI_QUIRK_RAW_DEVICE are ignored |
5276 | * and no event will be send. |
5277 | */ |
5278 | mgmt_index_added(hdev); |
5279 | } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { |
5280 | /* When the controller is now configured, then it |
5281 | * is important to clear the HCI_RAW flag. |
5282 | */ |
5283 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) |
5284 | clear_bit(nr: HCI_RAW, addr: &hdev->flags); |
5285 | |
5286 | /* Powering on the controller with HCI_CONFIG set only |
5287 | * happens with the transition from unconfigured to |
5288 | * configured. This will send the Index Added event. |
5289 | */ |
5290 | mgmt_index_added(hdev); |
5291 | } |
5292 | |
5293 | return 0; |
5294 | } |
5295 | |
5296 | static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) |
5297 | { |
5298 | struct hci_cp_remote_name_req_cancel cp; |
5299 | |
5300 | memset(&cp, 0, sizeof(cp)); |
5301 | bacpy(dst: &cp.bdaddr, src: addr); |
5302 | |
5303 | return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, |
5304 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5305 | } |
5306 | |
5307 | int hci_stop_discovery_sync(struct hci_dev *hdev) |
5308 | { |
5309 | struct discovery_state *d = &hdev->discovery; |
5310 | struct inquiry_entry *e; |
5311 | int err; |
5312 | |
5313 | bt_dev_dbg(hdev, "state %u" , hdev->discovery.state); |
5314 | |
5315 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { |
5316 | if (test_bit(HCI_INQUIRY, &hdev->flags)) { |
5317 | err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, |
5318 | 0, NULL, HCI_CMD_TIMEOUT); |
5319 | if (err) |
5320 | return err; |
5321 | } |
5322 | |
5323 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
5324 | cancel_delayed_work(dwork: &hdev->le_scan_disable); |
5325 | |
5326 | err = hci_scan_disable_sync(hdev); |
5327 | if (err) |
5328 | return err; |
5329 | } |
5330 | |
5331 | } else { |
5332 | err = hci_scan_disable_sync(hdev); |
5333 | if (err) |
5334 | return err; |
5335 | } |
5336 | |
5337 | /* Resume advertising if it was paused */ |
5338 | if (use_ll_privacy(hdev)) |
5339 | hci_resume_advertising_sync(hdev); |
5340 | |
5341 | /* No further actions needed for LE-only discovery */ |
5342 | if (d->type == DISCOV_TYPE_LE) |
5343 | return 0; |
5344 | |
5345 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { |
5346 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, |
5347 | state: NAME_PENDING); |
5348 | if (!e) |
5349 | return 0; |
5350 | |
5351 | return hci_remote_name_cancel_sync(hdev, addr: &e->data.bdaddr); |
5352 | } |
5353 | |
5354 | return 0; |
5355 | } |
5356 | |
5357 | static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, |
5358 | u8 reason) |
5359 | { |
5360 | struct hci_cp_disconn_phy_link cp; |
5361 | |
5362 | memset(&cp, 0, sizeof(cp)); |
5363 | cp.phy_handle = HCI_PHY_HANDLE(handle); |
5364 | cp.reason = reason; |
5365 | |
5366 | return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, |
5367 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5368 | } |
5369 | |
5370 | static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5371 | u8 reason) |
5372 | { |
5373 | struct hci_cp_disconnect cp; |
5374 | |
5375 | if (conn->type == AMP_LINK) |
5376 | return hci_disconnect_phy_link_sync(hdev, handle: conn->handle, reason); |
5377 | |
5378 | if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { |
5379 | /* This is a BIS connection, hci_conn_del will |
5380 | * do the necessary cleanup. |
5381 | */ |
5382 | hci_dev_lock(hdev); |
5383 | hci_conn_failed(conn, status: reason); |
5384 | hci_dev_unlock(hdev); |
5385 | |
5386 | return 0; |
5387 | } |
5388 | |
5389 | memset(&cp, 0, sizeof(cp)); |
5390 | cp.handle = cpu_to_le16(conn->handle); |
5391 | cp.reason = reason; |
5392 | |
5393 | /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the |
5394 | * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is |
5395 | * used when suspending or powering off, where we don't want to wait |
5396 | * for the peer's response. |
5397 | */ |
5398 | if (reason != HCI_ERROR_REMOTE_POWER_OFF) |
5399 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, |
5400 | sizeof(cp), &cp, |
5401 | HCI_EV_DISCONN_COMPLETE, |
5402 | HCI_CMD_TIMEOUT, NULL); |
5403 | |
5404 | return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, |
5405 | HCI_CMD_TIMEOUT); |
5406 | } |
5407 | |
5408 | static int hci_le_connect_cancel_sync(struct hci_dev *hdev, |
5409 | struct hci_conn *conn, u8 reason) |
5410 | { |
5411 | /* Return reason if scanning since the connection shall probably be |
5412 | * cleanup directly. |
5413 | */ |
5414 | if (test_bit(HCI_CONN_SCANNING, &conn->flags)) |
5415 | return reason; |
5416 | |
5417 | if (conn->role == HCI_ROLE_SLAVE || |
5418 | test_and_set_bit(nr: HCI_CONN_CANCEL, addr: &conn->flags)) |
5419 | return 0; |
5420 | |
5421 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, |
5422 | 0, NULL, HCI_CMD_TIMEOUT); |
5423 | } |
5424 | |
5425 | static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5426 | u8 reason) |
5427 | { |
5428 | if (conn->type == LE_LINK) |
5429 | return hci_le_connect_cancel_sync(hdev, conn, reason); |
5430 | |
5431 | if (conn->type == ISO_LINK) { |
5432 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
5433 | * page 1857: |
5434 | * |
5435 | * If this command is issued for a CIS on the Central and the |
5436 | * CIS is successfully terminated before being established, |
5437 | * then an HCI_LE_CIS_Established event shall also be sent for |
5438 | * this CIS with the Status Operation Cancelled by Host (0x44). |
5439 | */ |
5440 | if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) |
5441 | return hci_disconnect_sync(hdev, conn, reason); |
5442 | |
5443 | /* CIS with no Create CIS sent have nothing to cancel */ |
5444 | if (bacmp(ba1: &conn->dst, BDADDR_ANY)) |
5445 | return HCI_ERROR_LOCAL_HOST_TERM; |
5446 | |
5447 | /* There is no way to cancel a BIS without terminating the BIG |
5448 | * which is done later on connection cleanup. |
5449 | */ |
5450 | return 0; |
5451 | } |
5452 | |
5453 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
5454 | return 0; |
5455 | |
5456 | /* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the |
5457 | * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is |
5458 | * used when suspending or powering off, where we don't want to wait |
5459 | * for the peer's response. |
5460 | */ |
5461 | if (reason != HCI_ERROR_REMOTE_POWER_OFF) |
5462 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL, |
5463 | 6, &conn->dst, |
5464 | HCI_EV_CONN_COMPLETE, |
5465 | HCI_CMD_TIMEOUT, NULL); |
5466 | |
5467 | return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, |
5468 | 6, &conn->dst, HCI_CMD_TIMEOUT); |
5469 | } |
5470 | |
5471 | static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5472 | u8 reason) |
5473 | { |
5474 | struct hci_cp_reject_sync_conn_req cp; |
5475 | |
5476 | memset(&cp, 0, sizeof(cp)); |
5477 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
5478 | cp.reason = reason; |
5479 | |
5480 | /* SCO rejection has its own limited set of |
5481 | * allowed error values (0x0D-0x0F). |
5482 | */ |
5483 | if (reason < 0x0d || reason > 0x0f) |
5484 | cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; |
5485 | |
5486 | return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, |
5487 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5488 | } |
5489 | |
5490 | static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5491 | u8 reason) |
5492 | { |
5493 | struct hci_cp_le_reject_cis cp; |
5494 | |
5495 | memset(&cp, 0, sizeof(cp)); |
5496 | cp.handle = cpu_to_le16(conn->handle); |
5497 | cp.reason = reason; |
5498 | |
5499 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS, |
5500 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5501 | } |
5502 | |
5503 | static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, |
5504 | u8 reason) |
5505 | { |
5506 | struct hci_cp_reject_conn_req cp; |
5507 | |
5508 | if (conn->type == ISO_LINK) |
5509 | return hci_le_reject_cis_sync(hdev, conn, reason); |
5510 | |
5511 | if (conn->type == SCO_LINK || conn->type == ESCO_LINK) |
5512 | return hci_reject_sco_sync(hdev, conn, reason); |
5513 | |
5514 | memset(&cp, 0, sizeof(cp)); |
5515 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
5516 | cp.reason = reason; |
5517 | |
5518 | return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, |
5519 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5520 | } |
5521 | |
5522 | int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) |
5523 | { |
5524 | int err = 0; |
5525 | u16 handle = conn->handle; |
5526 | bool disconnect = false; |
5527 | struct hci_conn *c; |
5528 | |
5529 | switch (conn->state) { |
5530 | case BT_CONNECTED: |
5531 | case BT_CONFIG: |
5532 | err = hci_disconnect_sync(hdev, conn, reason); |
5533 | break; |
5534 | case BT_CONNECT: |
5535 | err = hci_connect_cancel_sync(hdev, conn, reason); |
5536 | break; |
5537 | case BT_CONNECT2: |
5538 | err = hci_reject_conn_sync(hdev, conn, reason); |
5539 | break; |
5540 | case BT_OPEN: |
5541 | case BT_BOUND: |
5542 | break; |
5543 | default: |
5544 | disconnect = true; |
5545 | break; |
5546 | } |
5547 | |
5548 | hci_dev_lock(hdev); |
5549 | |
5550 | /* Check if the connection has been cleaned up concurrently */ |
5551 | c = hci_conn_hash_lookup_handle(hdev, handle); |
5552 | if (!c || c != conn) { |
5553 | err = 0; |
5554 | goto unlock; |
5555 | } |
5556 | |
5557 | /* Cleanup hci_conn object if it cannot be cancelled as it |
5558 | * likelly means the controller and host stack are out of sync |
5559 | * or in case of LE it was still scanning so it can be cleanup |
5560 | * safely. |
5561 | */ |
5562 | if (disconnect) { |
5563 | conn->state = BT_CLOSED; |
5564 | hci_disconn_cfm(conn, reason); |
5565 | hci_conn_del(conn); |
5566 | } else { |
5567 | hci_conn_failed(conn, status: reason); |
5568 | } |
5569 | |
5570 | unlock: |
5571 | hci_dev_unlock(hdev); |
5572 | return err; |
5573 | } |
5574 | |
5575 | static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) |
5576 | { |
5577 | struct list_head *head = &hdev->conn_hash.list; |
5578 | struct hci_conn *conn; |
5579 | |
5580 | rcu_read_lock(); |
5581 | while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) { |
5582 | /* Make sure the connection is not freed while unlocking */ |
5583 | conn = hci_conn_get(conn); |
5584 | rcu_read_unlock(); |
5585 | /* Disregard possible errors since hci_conn_del shall have been |
5586 | * called even in case of errors had occurred since it would |
5587 | * then cause hci_conn_failed to be called which calls |
5588 | * hci_conn_del internally. |
5589 | */ |
5590 | hci_abort_conn_sync(hdev, conn, reason); |
5591 | hci_conn_put(conn); |
5592 | rcu_read_lock(); |
5593 | } |
5594 | rcu_read_unlock(); |
5595 | |
5596 | return 0; |
5597 | } |
5598 | |
5599 | /* This function perform power off HCI command sequence as follows: |
5600 | * |
5601 | * Clear Advertising |
5602 | * Stop Discovery |
5603 | * Disconnect all connections |
5604 | * hci_dev_close_sync |
5605 | */ |
5606 | static int hci_power_off_sync(struct hci_dev *hdev) |
5607 | { |
5608 | int err; |
5609 | |
5610 | /* If controller is already down there is nothing to do */ |
5611 | if (!test_bit(HCI_UP, &hdev->flags)) |
5612 | return 0; |
5613 | |
5614 | hci_dev_set_flag(hdev, HCI_POWERING_DOWN); |
5615 | |
5616 | if (test_bit(HCI_ISCAN, &hdev->flags) || |
5617 | test_bit(HCI_PSCAN, &hdev->flags)) { |
5618 | err = hci_write_scan_enable_sync(hdev, val: 0x00); |
5619 | if (err) |
5620 | goto out; |
5621 | } |
5622 | |
5623 | err = hci_clear_adv_sync(hdev, NULL, force: false); |
5624 | if (err) |
5625 | goto out; |
5626 | |
5627 | err = hci_stop_discovery_sync(hdev); |
5628 | if (err) |
5629 | goto out; |
5630 | |
5631 | /* Terminated due to Power Off */ |
5632 | err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); |
5633 | if (err) |
5634 | goto out; |
5635 | |
5636 | err = hci_dev_close_sync(hdev); |
5637 | |
5638 | out: |
5639 | hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); |
5640 | return err; |
5641 | } |
5642 | |
5643 | int hci_set_powered_sync(struct hci_dev *hdev, u8 val) |
5644 | { |
5645 | if (val) |
5646 | return hci_power_on_sync(hdev); |
5647 | |
5648 | return hci_power_off_sync(hdev); |
5649 | } |
5650 | |
5651 | static int hci_write_iac_sync(struct hci_dev *hdev) |
5652 | { |
5653 | struct hci_cp_write_current_iac_lap cp; |
5654 | |
5655 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) |
5656 | return 0; |
5657 | |
5658 | memset(&cp, 0, sizeof(cp)); |
5659 | |
5660 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { |
5661 | /* Limited discoverable mode */ |
5662 | cp.num_iac = min_t(u8, hdev->num_iac, 2); |
5663 | cp.iac_lap[0] = 0x00; /* LIAC */ |
5664 | cp.iac_lap[1] = 0x8b; |
5665 | cp.iac_lap[2] = 0x9e; |
5666 | cp.iac_lap[3] = 0x33; /* GIAC */ |
5667 | cp.iac_lap[4] = 0x8b; |
5668 | cp.iac_lap[5] = 0x9e; |
5669 | } else { |
5670 | /* General discoverable mode */ |
5671 | cp.num_iac = 1; |
5672 | cp.iac_lap[0] = 0x33; /* GIAC */ |
5673 | cp.iac_lap[1] = 0x8b; |
5674 | cp.iac_lap[2] = 0x9e; |
5675 | } |
5676 | |
5677 | return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, |
5678 | (cp.num_iac * 3) + 1, &cp, |
5679 | HCI_CMD_TIMEOUT); |
5680 | } |
5681 | |
5682 | int hci_update_discoverable_sync(struct hci_dev *hdev) |
5683 | { |
5684 | int err = 0; |
5685 | |
5686 | if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { |
5687 | err = hci_write_iac_sync(hdev); |
5688 | if (err) |
5689 | return err; |
5690 | |
5691 | err = hci_update_scan_sync(hdev); |
5692 | if (err) |
5693 | return err; |
5694 | |
5695 | err = hci_update_class_sync(hdev); |
5696 | if (err) |
5697 | return err; |
5698 | } |
5699 | |
5700 | /* Advertising instances don't use the global discoverable setting, so |
5701 | * only update AD if advertising was enabled using Set Advertising. |
5702 | */ |
5703 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { |
5704 | err = hci_update_adv_data_sync(hdev, instance: 0x00); |
5705 | if (err) |
5706 | return err; |
5707 | |
5708 | /* Discoverable mode affects the local advertising |
5709 | * address in limited privacy mode. |
5710 | */ |
5711 | if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { |
5712 | if (ext_adv_capable(hdev)) |
5713 | err = hci_start_ext_adv_sync(hdev, instance: 0x00); |
5714 | else |
5715 | err = hci_enable_advertising_sync(hdev); |
5716 | } |
5717 | } |
5718 | |
5719 | return err; |
5720 | } |
5721 | |
5722 | static int update_discoverable_sync(struct hci_dev *hdev, void *data) |
5723 | { |
5724 | return hci_update_discoverable_sync(hdev); |
5725 | } |
5726 | |
5727 | int hci_update_discoverable(struct hci_dev *hdev) |
5728 | { |
5729 | /* Only queue if it would have any effect */ |
5730 | if (hdev_is_powered(hdev) && |
5731 | hci_dev_test_flag(hdev, HCI_ADVERTISING) && |
5732 | hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && |
5733 | hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) |
5734 | return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, |
5735 | NULL); |
5736 | |
5737 | return 0; |
5738 | } |
5739 | |
5740 | int hci_update_connectable_sync(struct hci_dev *hdev) |
5741 | { |
5742 | int err; |
5743 | |
5744 | err = hci_update_scan_sync(hdev); |
5745 | if (err) |
5746 | return err; |
5747 | |
5748 | /* If BR/EDR is not enabled and we disable advertising as a |
5749 | * by-product of disabling connectable, we need to update the |
5750 | * advertising flags. |
5751 | */ |
5752 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
5753 | err = hci_update_adv_data_sync(hdev, instance: hdev->cur_adv_instance); |
5754 | |
5755 | /* Update the advertising parameters if necessary */ |
5756 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || |
5757 | !list_empty(head: &hdev->adv_instances)) { |
5758 | if (ext_adv_capable(hdev)) |
5759 | err = hci_start_ext_adv_sync(hdev, |
5760 | instance: hdev->cur_adv_instance); |
5761 | else |
5762 | err = hci_enable_advertising_sync(hdev); |
5763 | |
5764 | if (err) |
5765 | return err; |
5766 | } |
5767 | |
5768 | return hci_update_passive_scan_sync(hdev); |
5769 | } |
5770 | |
5771 | static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) |
5772 | { |
5773 | const u8 giac[3] = { 0x33, 0x8b, 0x9e }; |
5774 | const u8 liac[3] = { 0x00, 0x8b, 0x9e }; |
5775 | struct hci_cp_inquiry cp; |
5776 | |
5777 | bt_dev_dbg(hdev, "" ); |
5778 | |
5779 | if (test_bit(HCI_INQUIRY, &hdev->flags)) |
5780 | return 0; |
5781 | |
5782 | hci_dev_lock(hdev); |
5783 | hci_inquiry_cache_flush(hdev); |
5784 | hci_dev_unlock(hdev); |
5785 | |
5786 | memset(&cp, 0, sizeof(cp)); |
5787 | |
5788 | if (hdev->discovery.limited) |
5789 | memcpy(&cp.lap, liac, sizeof(cp.lap)); |
5790 | else |
5791 | memcpy(&cp.lap, giac, sizeof(cp.lap)); |
5792 | |
5793 | cp.length = length; |
5794 | |
5795 | return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, |
5796 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
5797 | } |
5798 | |
5799 | static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) |
5800 | { |
5801 | u8 own_addr_type; |
5802 | /* Accept list is not used for discovery */ |
5803 | u8 filter_policy = 0x00; |
5804 | /* Default is to enable duplicates filter */ |
5805 | u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; |
5806 | int err; |
5807 | |
5808 | bt_dev_dbg(hdev, "" ); |
5809 | |
5810 | /* If controller is scanning, it means the passive scanning is |
5811 | * running. Thus, we should temporarily stop it in order to set the |
5812 | * discovery scanning parameters. |
5813 | */ |
5814 | err = hci_scan_disable_sync(hdev); |
5815 | if (err) { |
5816 | bt_dev_err(hdev, "Unable to disable scanning: %d" , err); |
5817 | return err; |
5818 | } |
5819 | |
5820 | cancel_interleave_scan(hdev); |
5821 | |
5822 | /* Pause address resolution for active scan and stop advertising if |
5823 | * privacy is enabled. |
5824 | */ |
5825 | err = hci_pause_addr_resolution(hdev); |
5826 | if (err) |
5827 | goto failed; |
5828 | |
5829 | /* All active scans will be done with either a resolvable private |
5830 | * address (when privacy feature has been enabled) or non-resolvable |
5831 | * private address. |
5832 | */ |
5833 | err = hci_update_random_address_sync(hdev, require_privacy: true, rpa: scan_use_rpa(hdev), |
5834 | own_addr_type: &own_addr_type); |
5835 | if (err < 0) |
5836 | own_addr_type = ADDR_LE_DEV_PUBLIC; |
5837 | |
5838 | if (hci_is_adv_monitoring(hdev) || |
5839 | (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && |
5840 | hdev->discovery.result_filtering)) { |
5841 | /* Duplicate filter should be disabled when some advertisement |
5842 | * monitor is activated, otherwise AdvMon can only receive one |
5843 | * advertisement for one peer(*) during active scanning, and |
5844 | * might report loss to these peers. |
5845 | * |
5846 | * If controller does strict duplicate filtering and the |
5847 | * discovery requires result filtering disables controller based |
5848 | * filtering since that can cause reports that would match the |
5849 | * host filter to not be reported. |
5850 | */ |
5851 | filter_dup = LE_SCAN_FILTER_DUP_DISABLE; |
5852 | } |
5853 | |
5854 | err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, |
5855 | window: hdev->le_scan_window_discovery, |
5856 | own_addr_type, filter_policy, filter_dup); |
5857 | if (!err) |
5858 | return err; |
5859 | |
5860 | failed: |
5861 | /* Resume advertising if it was paused */ |
5862 | if (use_ll_privacy(hdev)) |
5863 | hci_resume_advertising_sync(hdev); |
5864 | |
5865 | /* Resume passive scanning */ |
5866 | hci_update_passive_scan_sync(hdev); |
5867 | return err; |
5868 | } |
5869 | |
5870 | static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) |
5871 | { |
5872 | int err; |
5873 | |
5874 | bt_dev_dbg(hdev, "" ); |
5875 | |
5876 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery * 2); |
5877 | if (err) |
5878 | return err; |
5879 | |
5880 | return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); |
5881 | } |
5882 | |
5883 | int hci_start_discovery_sync(struct hci_dev *hdev) |
5884 | { |
5885 | unsigned long timeout; |
5886 | int err; |
5887 | |
5888 | bt_dev_dbg(hdev, "type %u" , hdev->discovery.type); |
5889 | |
5890 | switch (hdev->discovery.type) { |
5891 | case DISCOV_TYPE_BREDR: |
5892 | return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); |
5893 | case DISCOV_TYPE_INTERLEAVED: |
5894 | /* When running simultaneous discovery, the LE scanning time |
5895 | * should occupy the whole discovery time sine BR/EDR inquiry |
5896 | * and LE scanning are scheduled by the controller. |
5897 | * |
5898 | * For interleaving discovery in comparison, BR/EDR inquiry |
5899 | * and LE scanning are done sequentially with separate |
5900 | * timeouts. |
5901 | */ |
5902 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, |
5903 | &hdev->quirks)) { |
5904 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); |
5905 | /* During simultaneous discovery, we double LE scan |
5906 | * interval. We must leave some time for the controller |
5907 | * to do BR/EDR inquiry. |
5908 | */ |
5909 | err = hci_start_interleaved_discovery_sync(hdev); |
5910 | break; |
5911 | } |
5912 | |
5913 | timeout = msecs_to_jiffies(m: hdev->discov_interleaved_timeout); |
5914 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery); |
5915 | break; |
5916 | case DISCOV_TYPE_LE: |
5917 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); |
5918 | err = hci_active_scan_sync(hdev, interval: hdev->le_scan_int_discovery); |
5919 | break; |
5920 | default: |
5921 | return -EINVAL; |
5922 | } |
5923 | |
5924 | if (err) |
5925 | return err; |
5926 | |
5927 | bt_dev_dbg(hdev, "timeout %u ms" , jiffies_to_msecs(timeout)); |
5928 | |
5929 | queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->le_scan_disable, |
5930 | delay: timeout); |
5931 | return 0; |
5932 | } |
5933 | |
5934 | static void hci_suspend_monitor_sync(struct hci_dev *hdev) |
5935 | { |
5936 | switch (hci_get_adv_monitor_offload_ext(hdev)) { |
5937 | case HCI_ADV_MONITOR_EXT_MSFT: |
5938 | msft_suspend_sync(hdev); |
5939 | break; |
5940 | default: |
5941 | return; |
5942 | } |
5943 | } |
5944 | |
5945 | /* This function disables discovery and mark it as paused */ |
5946 | static int hci_pause_discovery_sync(struct hci_dev *hdev) |
5947 | { |
5948 | int old_state = hdev->discovery.state; |
5949 | int err; |
5950 | |
5951 | /* If discovery already stopped/stopping/paused there nothing to do */ |
5952 | if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || |
5953 | hdev->discovery_paused) |
5954 | return 0; |
5955 | |
5956 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPING); |
5957 | err = hci_stop_discovery_sync(hdev); |
5958 | if (err) |
5959 | return err; |
5960 | |
5961 | hdev->discovery_paused = true; |
5962 | hdev->discovery_old_state = old_state; |
5963 | hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED); |
5964 | |
5965 | return 0; |
5966 | } |
5967 | |
5968 | static int hci_update_event_filter_sync(struct hci_dev *hdev) |
5969 | { |
5970 | struct bdaddr_list_with_flags *b; |
5971 | u8 scan = SCAN_DISABLED; |
5972 | bool scanning = test_bit(HCI_PSCAN, &hdev->flags); |
5973 | int err; |
5974 | |
5975 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) |
5976 | return 0; |
5977 | |
5978 | /* Some fake CSR controllers lock up after setting this type of |
5979 | * filter, so avoid sending the request altogether. |
5980 | */ |
5981 | if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) |
5982 | return 0; |
5983 | |
5984 | /* Always clear event filter when starting */ |
5985 | hci_clear_event_filter_sync(hdev); |
5986 | |
5987 | list_for_each_entry(b, &hdev->accept_list, list) { |
5988 | if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) |
5989 | continue; |
5990 | |
5991 | bt_dev_dbg(hdev, "Adding event filters for %pMR" , &b->bdaddr); |
5992 | |
5993 | err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, |
5994 | HCI_CONN_SETUP_ALLOW_BDADDR, |
5995 | bdaddr: &b->bdaddr, |
5996 | HCI_CONN_SETUP_AUTO_ON); |
5997 | if (err) |
5998 | bt_dev_dbg(hdev, "Failed to set event filter for %pMR" , |
5999 | &b->bdaddr); |
6000 | else |
6001 | scan = SCAN_PAGE; |
6002 | } |
6003 | |
6004 | if (scan && !scanning) |
6005 | hci_write_scan_enable_sync(hdev, val: scan); |
6006 | else if (!scan && scanning) |
6007 | hci_write_scan_enable_sync(hdev, val: scan); |
6008 | |
6009 | return 0; |
6010 | } |
6011 | |
6012 | /* This function disables scan (BR and LE) and mark it as paused */ |
6013 | static int hci_pause_scan_sync(struct hci_dev *hdev) |
6014 | { |
6015 | if (hdev->scanning_paused) |
6016 | return 0; |
6017 | |
6018 | /* Disable page scan if enabled */ |
6019 | if (test_bit(HCI_PSCAN, &hdev->flags)) |
6020 | hci_write_scan_enable_sync(hdev, SCAN_DISABLED); |
6021 | |
6022 | hci_scan_disable_sync(hdev); |
6023 | |
6024 | hdev->scanning_paused = true; |
6025 | |
6026 | return 0; |
6027 | } |
6028 | |
6029 | /* This function performs the HCI suspend procedures in the follow order: |
6030 | * |
6031 | * Pause discovery (active scanning/inquiry) |
6032 | * Pause Directed Advertising/Advertising |
6033 | * Pause Scanning (passive scanning in case discovery was not active) |
6034 | * Disconnect all connections |
6035 | * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup |
6036 | * otherwise: |
6037 | * Update event mask (only set events that are allowed to wake up the host) |
6038 | * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) |
6039 | * Update passive scanning (lower duty cycle) |
6040 | * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE |
6041 | */ |
6042 | int hci_suspend_sync(struct hci_dev *hdev) |
6043 | { |
6044 | int err; |
6045 | |
6046 | /* If marked as suspended there nothing to do */ |
6047 | if (hdev->suspended) |
6048 | return 0; |
6049 | |
6050 | /* Mark device as suspended */ |
6051 | hdev->suspended = true; |
6052 | |
6053 | /* Pause discovery if not already stopped */ |
6054 | hci_pause_discovery_sync(hdev); |
6055 | |
6056 | /* Pause other advertisements */ |
6057 | hci_pause_advertising_sync(hdev); |
6058 | |
6059 | /* Suspend monitor filters */ |
6060 | hci_suspend_monitor_sync(hdev); |
6061 | |
6062 | /* Prevent disconnects from causing scanning to be re-enabled */ |
6063 | hci_pause_scan_sync(hdev); |
6064 | |
6065 | if (hci_conn_count(hdev)) { |
6066 | /* Soft disconnect everything (power off) */ |
6067 | err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); |
6068 | if (err) { |
6069 | /* Set state to BT_RUNNING so resume doesn't notify */ |
6070 | hdev->suspend_state = BT_RUNNING; |
6071 | hci_resume_sync(hdev); |
6072 | return err; |
6073 | } |
6074 | |
6075 | /* Update event mask so only the allowed event can wakeup the |
6076 | * host. |
6077 | */ |
6078 | hci_set_event_mask_sync(hdev); |
6079 | } |
6080 | |
6081 | /* Only configure accept list if disconnect succeeded and wake |
6082 | * isn't being prevented. |
6083 | */ |
6084 | if (!hdev->wakeup || !hdev->wakeup(hdev)) { |
6085 | hdev->suspend_state = BT_SUSPEND_DISCONNECT; |
6086 | return 0; |
6087 | } |
6088 | |
6089 | /* Unpause to take care of updating scanning params */ |
6090 | hdev->scanning_paused = false; |
6091 | |
6092 | /* Enable event filter for paired devices */ |
6093 | hci_update_event_filter_sync(hdev); |
6094 | |
6095 | /* Update LE passive scan if enabled */ |
6096 | hci_update_passive_scan_sync(hdev); |
6097 | |
6098 | /* Pause scan changes again. */ |
6099 | hdev->scanning_paused = true; |
6100 | |
6101 | hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; |
6102 | |
6103 | return 0; |
6104 | } |
6105 | |
6106 | /* This function resumes discovery */ |
6107 | static int hci_resume_discovery_sync(struct hci_dev *hdev) |
6108 | { |
6109 | int err; |
6110 | |
6111 | /* If discovery not paused there nothing to do */ |
6112 | if (!hdev->discovery_paused) |
6113 | return 0; |
6114 | |
6115 | hdev->discovery_paused = false; |
6116 | |
6117 | hci_discovery_set_state(hdev, state: DISCOVERY_STARTING); |
6118 | |
6119 | err = hci_start_discovery_sync(hdev); |
6120 | |
6121 | hci_discovery_set_state(hdev, state: err ? DISCOVERY_STOPPED : |
6122 | DISCOVERY_FINDING); |
6123 | |
6124 | return err; |
6125 | } |
6126 | |
6127 | static void hci_resume_monitor_sync(struct hci_dev *hdev) |
6128 | { |
6129 | switch (hci_get_adv_monitor_offload_ext(hdev)) { |
6130 | case HCI_ADV_MONITOR_EXT_MSFT: |
6131 | msft_resume_sync(hdev); |
6132 | break; |
6133 | default: |
6134 | return; |
6135 | } |
6136 | } |
6137 | |
6138 | /* This function resume scan and reset paused flag */ |
6139 | static int hci_resume_scan_sync(struct hci_dev *hdev) |
6140 | { |
6141 | if (!hdev->scanning_paused) |
6142 | return 0; |
6143 | |
6144 | hdev->scanning_paused = false; |
6145 | |
6146 | hci_update_scan_sync(hdev); |
6147 | |
6148 | /* Reset passive scanning to normal */ |
6149 | hci_update_passive_scan_sync(hdev); |
6150 | |
6151 | return 0; |
6152 | } |
6153 | |
6154 | /* This function performs the HCI suspend procedures in the follow order: |
6155 | * |
6156 | * Restore event mask |
6157 | * Clear event filter |
6158 | * Update passive scanning (normal duty cycle) |
6159 | * Resume Directed Advertising/Advertising |
6160 | * Resume discovery (active scanning/inquiry) |
6161 | */ |
6162 | int hci_resume_sync(struct hci_dev *hdev) |
6163 | { |
6164 | /* If not marked as suspended there nothing to do */ |
6165 | if (!hdev->suspended) |
6166 | return 0; |
6167 | |
6168 | hdev->suspended = false; |
6169 | |
6170 | /* Restore event mask */ |
6171 | hci_set_event_mask_sync(hdev); |
6172 | |
6173 | /* Clear any event filters and restore scan state */ |
6174 | hci_clear_event_filter_sync(hdev); |
6175 | |
6176 | /* Resume scanning */ |
6177 | hci_resume_scan_sync(hdev); |
6178 | |
6179 | /* Resume monitor filters */ |
6180 | hci_resume_monitor_sync(hdev); |
6181 | |
6182 | /* Resume other advertisements */ |
6183 | hci_resume_advertising_sync(hdev); |
6184 | |
6185 | /* Resume discovery */ |
6186 | hci_resume_discovery_sync(hdev); |
6187 | |
6188 | return 0; |
6189 | } |
6190 | |
6191 | static bool conn_use_rpa(struct hci_conn *conn) |
6192 | { |
6193 | struct hci_dev *hdev = conn->hdev; |
6194 | |
6195 | return hci_dev_test_flag(hdev, HCI_PRIVACY); |
6196 | } |
6197 | |
6198 | static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, |
6199 | struct hci_conn *conn) |
6200 | { |
6201 | struct hci_cp_le_set_ext_adv_params cp; |
6202 | int err; |
6203 | bdaddr_t random_addr; |
6204 | u8 own_addr_type; |
6205 | |
6206 | err = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6207 | own_addr_type: &own_addr_type); |
6208 | if (err) |
6209 | return err; |
6210 | |
6211 | /* Set require_privacy to false so that the remote device has a |
6212 | * chance of identifying us. |
6213 | */ |
6214 | err = hci_get_random_address(hdev, require_privacy: false, use_rpa: conn_use_rpa(conn), NULL, |
6215 | own_addr_type: &own_addr_type, rand_addr: &random_addr); |
6216 | if (err) |
6217 | return err; |
6218 | |
6219 | memset(&cp, 0, sizeof(cp)); |
6220 | |
6221 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); |
6222 | cp.channel_map = hdev->le_adv_channel_map; |
6223 | cp.tx_power = HCI_TX_POWER_INVALID; |
6224 | cp.primary_phy = HCI_ADV_PHY_1M; |
6225 | cp.secondary_phy = HCI_ADV_PHY_1M; |
6226 | cp.handle = 0x00; /* Use instance 0 for directed adv */ |
6227 | cp.own_addr_type = own_addr_type; |
6228 | cp.peer_addr_type = conn->dst_type; |
6229 | bacpy(dst: &cp.peer_addr, src: &conn->dst); |
6230 | |
6231 | /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for |
6232 | * advertising_event_property LE_LEGACY_ADV_DIRECT_IND |
6233 | * does not supports advertising data when the advertising set already |
6234 | * contains some, the controller shall return erroc code 'Invalid |
6235 | * HCI Command Parameters(0x12). |
6236 | * So it is required to remove adv set for handle 0x00. since we use |
6237 | * instance 0 for directed adv. |
6238 | */ |
6239 | err = hci_remove_ext_adv_instance_sync(hdev, instance: cp.handle, NULL); |
6240 | if (err) |
6241 | return err; |
6242 | |
6243 | err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, |
6244 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6245 | if (err) |
6246 | return err; |
6247 | |
6248 | /* Check if random address need to be updated */ |
6249 | if (own_addr_type == ADDR_LE_DEV_RANDOM && |
6250 | bacmp(ba1: &random_addr, BDADDR_ANY) && |
6251 | bacmp(ba1: &random_addr, ba2: &hdev->random_addr)) { |
6252 | err = hci_set_adv_set_random_addr_sync(hdev, instance: 0x00, |
6253 | random_addr: &random_addr); |
6254 | if (err) |
6255 | return err; |
6256 | } |
6257 | |
6258 | return hci_enable_ext_advertising_sync(hdev, instance: 0x00); |
6259 | } |
6260 | |
6261 | static int hci_le_directed_advertising_sync(struct hci_dev *hdev, |
6262 | struct hci_conn *conn) |
6263 | { |
6264 | struct hci_cp_le_set_adv_param cp; |
6265 | u8 status; |
6266 | u8 own_addr_type; |
6267 | u8 enable; |
6268 | |
6269 | if (ext_adv_capable(hdev)) |
6270 | return hci_le_ext_directed_advertising_sync(hdev, conn); |
6271 | |
6272 | /* Clear the HCI_LE_ADV bit temporarily so that the |
6273 | * hci_update_random_address knows that it's safe to go ahead |
6274 | * and write a new random address. The flag will be set back on |
6275 | * as soon as the SET_ADV_ENABLE HCI command completes. |
6276 | */ |
6277 | hci_dev_clear_flag(hdev, HCI_LE_ADV); |
6278 | |
6279 | /* Set require_privacy to false so that the remote device has a |
6280 | * chance of identifying us. |
6281 | */ |
6282 | status = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6283 | own_addr_type: &own_addr_type); |
6284 | if (status) |
6285 | return status; |
6286 | |
6287 | memset(&cp, 0, sizeof(cp)); |
6288 | |
6289 | /* Some controllers might reject command if intervals are not |
6290 | * within range for undirected advertising. |
6291 | * BCM20702A0 is known to be affected by this. |
6292 | */ |
6293 | cp.min_interval = cpu_to_le16(0x0020); |
6294 | cp.max_interval = cpu_to_le16(0x0020); |
6295 | |
6296 | cp.type = LE_ADV_DIRECT_IND; |
6297 | cp.own_address_type = own_addr_type; |
6298 | cp.direct_addr_type = conn->dst_type; |
6299 | bacpy(dst: &cp.direct_addr, src: &conn->dst); |
6300 | cp.channel_map = hdev->le_adv_channel_map; |
6301 | |
6302 | status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, |
6303 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6304 | if (status) |
6305 | return status; |
6306 | |
6307 | enable = 0x01; |
6308 | |
6309 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, |
6310 | sizeof(enable), &enable, HCI_CMD_TIMEOUT); |
6311 | } |
6312 | |
6313 | static void set_ext_conn_params(struct hci_conn *conn, |
6314 | struct hci_cp_le_ext_conn_param *p) |
6315 | { |
6316 | struct hci_dev *hdev = conn->hdev; |
6317 | |
6318 | memset(p, 0, sizeof(*p)); |
6319 | |
6320 | p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
6321 | p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
6322 | p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
6323 | p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
6324 | p->conn_latency = cpu_to_le16(conn->le_conn_latency); |
6325 | p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
6326 | p->min_ce_len = cpu_to_le16(0x0000); |
6327 | p->max_ce_len = cpu_to_le16(0x0000); |
6328 | } |
6329 | |
6330 | static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, |
6331 | struct hci_conn *conn, u8 own_addr_type) |
6332 | { |
6333 | struct hci_cp_le_ext_create_conn *cp; |
6334 | struct hci_cp_le_ext_conn_param *p; |
6335 | u8 data[sizeof(*cp) + sizeof(*p) * 3]; |
6336 | u32 plen; |
6337 | |
6338 | cp = (void *)data; |
6339 | p = (void *)cp->data; |
6340 | |
6341 | memset(cp, 0, sizeof(*cp)); |
6342 | |
6343 | bacpy(dst: &cp->peer_addr, src: &conn->dst); |
6344 | cp->peer_addr_type = conn->dst_type; |
6345 | cp->own_addr_type = own_addr_type; |
6346 | |
6347 | plen = sizeof(*cp); |
6348 | |
6349 | if (scan_1m(hdev)) { |
6350 | cp->phys |= LE_SCAN_PHY_1M; |
6351 | set_ext_conn_params(conn, p); |
6352 | |
6353 | p++; |
6354 | plen += sizeof(*p); |
6355 | } |
6356 | |
6357 | if (scan_2m(hdev)) { |
6358 | cp->phys |= LE_SCAN_PHY_2M; |
6359 | set_ext_conn_params(conn, p); |
6360 | |
6361 | p++; |
6362 | plen += sizeof(*p); |
6363 | } |
6364 | |
6365 | if (scan_coded(hdev)) { |
6366 | cp->phys |= LE_SCAN_PHY_CODED; |
6367 | set_ext_conn_params(conn, p); |
6368 | |
6369 | plen += sizeof(*p); |
6370 | } |
6371 | |
6372 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, |
6373 | plen, data, |
6374 | HCI_EV_LE_ENHANCED_CONN_COMPLETE, |
6375 | conn->conn_timeout, NULL); |
6376 | } |
6377 | |
6378 | static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data) |
6379 | { |
6380 | struct hci_cp_le_create_conn cp; |
6381 | struct hci_conn_params *params; |
6382 | u8 own_addr_type; |
6383 | int err; |
6384 | struct hci_conn *conn = data; |
6385 | |
6386 | if (!hci_conn_valid(hdev, conn)) |
6387 | return -ECANCELED; |
6388 | |
6389 | bt_dev_dbg(hdev, "conn %p" , conn); |
6390 | |
6391 | clear_bit(nr: HCI_CONN_SCANNING, addr: &conn->flags); |
6392 | conn->state = BT_CONNECT; |
6393 | |
6394 | /* If requested to connect as peripheral use directed advertising */ |
6395 | if (conn->role == HCI_ROLE_SLAVE) { |
6396 | /* If we're active scanning and simultaneous roles is not |
6397 | * enabled simply reject the attempt. |
6398 | */ |
6399 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && |
6400 | hdev->le_scan_type == LE_SCAN_ACTIVE && |
6401 | !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { |
6402 | hci_conn_del(conn); |
6403 | return -EBUSY; |
6404 | } |
6405 | |
6406 | /* Pause advertising while doing directed advertising. */ |
6407 | hci_pause_advertising_sync(hdev); |
6408 | |
6409 | err = hci_le_directed_advertising_sync(hdev, conn); |
6410 | goto done; |
6411 | } |
6412 | |
6413 | /* Disable advertising if simultaneous roles is not in use. */ |
6414 | if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) |
6415 | hci_pause_advertising_sync(hdev); |
6416 | |
6417 | params = hci_conn_params_lookup(hdev, addr: &conn->dst, addr_type: conn->dst_type); |
6418 | if (params) { |
6419 | conn->le_conn_min_interval = params->conn_min_interval; |
6420 | conn->le_conn_max_interval = params->conn_max_interval; |
6421 | conn->le_conn_latency = params->conn_latency; |
6422 | conn->le_supv_timeout = params->supervision_timeout; |
6423 | } else { |
6424 | conn->le_conn_min_interval = hdev->le_conn_min_interval; |
6425 | conn->le_conn_max_interval = hdev->le_conn_max_interval; |
6426 | conn->le_conn_latency = hdev->le_conn_latency; |
6427 | conn->le_supv_timeout = hdev->le_supv_timeout; |
6428 | } |
6429 | |
6430 | /* If controller is scanning, we stop it since some controllers are |
6431 | * not able to scan and connect at the same time. Also set the |
6432 | * HCI_LE_SCAN_INTERRUPTED flag so that the command complete |
6433 | * handler for scan disabling knows to set the correct discovery |
6434 | * state. |
6435 | */ |
6436 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { |
6437 | hci_scan_disable_sync(hdev); |
6438 | hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); |
6439 | } |
6440 | |
6441 | /* Update random address, but set require_privacy to false so |
6442 | * that we never connect with an non-resolvable address. |
6443 | */ |
6444 | err = hci_update_random_address_sync(hdev, require_privacy: false, rpa: conn_use_rpa(conn), |
6445 | own_addr_type: &own_addr_type); |
6446 | if (err) |
6447 | goto done; |
6448 | |
6449 | if (use_ext_conn(hdev)) { |
6450 | err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); |
6451 | goto done; |
6452 | } |
6453 | |
6454 | memset(&cp, 0, sizeof(cp)); |
6455 | |
6456 | cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); |
6457 | cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); |
6458 | |
6459 | bacpy(dst: &cp.peer_addr, src: &conn->dst); |
6460 | cp.peer_addr_type = conn->dst_type; |
6461 | cp.own_address_type = own_addr_type; |
6462 | cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); |
6463 | cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); |
6464 | cp.conn_latency = cpu_to_le16(conn->le_conn_latency); |
6465 | cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); |
6466 | cp.min_ce_len = cpu_to_le16(0x0000); |
6467 | cp.max_ce_len = cpu_to_le16(0x0000); |
6468 | |
6469 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: |
6470 | * |
6471 | * If this event is unmasked and the HCI_LE_Connection_Complete event |
6472 | * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is |
6473 | * sent when a new connection has been created. |
6474 | */ |
6475 | err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, |
6476 | sizeof(cp), &cp, |
6477 | use_enhanced_conn_complete(hdev) ? |
6478 | HCI_EV_LE_ENHANCED_CONN_COMPLETE : |
6479 | HCI_EV_LE_CONN_COMPLETE, |
6480 | conn->conn_timeout, NULL); |
6481 | |
6482 | done: |
6483 | if (err == -ETIMEDOUT) |
6484 | hci_le_connect_cancel_sync(hdev, conn, reason: 0x00); |
6485 | |
6486 | /* Re-enable advertising after the connection attempt is finished. */ |
6487 | hci_resume_advertising_sync(hdev); |
6488 | return err; |
6489 | } |
6490 | |
6491 | int hci_le_create_cis_sync(struct hci_dev *hdev) |
6492 | { |
6493 | struct { |
6494 | struct hci_cp_le_create_cis cp; |
6495 | struct hci_cis cis[0x1f]; |
6496 | } cmd; |
6497 | struct hci_conn *conn; |
6498 | u8 cig = BT_ISO_QOS_CIG_UNSET; |
6499 | |
6500 | /* The spec allows only one pending LE Create CIS command at a time. If |
6501 | * the command is pending now, don't do anything. We check for pending |
6502 | * connections after each CIS Established event. |
6503 | * |
6504 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
6505 | * page 2566: |
6506 | * |
6507 | * If the Host issues this command before all the |
6508 | * HCI_LE_CIS_Established events from the previous use of the |
6509 | * command have been generated, the Controller shall return the |
6510 | * error code Command Disallowed (0x0C). |
6511 | * |
6512 | * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E |
6513 | * page 2567: |
6514 | * |
6515 | * When the Controller receives the HCI_LE_Create_CIS command, the |
6516 | * Controller sends the HCI_Command_Status event to the Host. An |
6517 | * HCI_LE_CIS_Established event will be generated for each CIS when it |
6518 | * is established or if it is disconnected or considered lost before |
6519 | * being established; until all the events are generated, the command |
6520 | * remains pending. |
6521 | */ |
6522 | |
6523 | memset(&cmd, 0, sizeof(cmd)); |
6524 | |
6525 | hci_dev_lock(hdev); |
6526 | |
6527 | rcu_read_lock(); |
6528 | |
6529 | /* Wait until previous Create CIS has completed */ |
6530 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6531 | if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) |
6532 | goto done; |
6533 | } |
6534 | |
6535 | /* Find CIG with all CIS ready */ |
6536 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6537 | struct hci_conn *link; |
6538 | |
6539 | if (hci_conn_check_create_cis(conn)) |
6540 | continue; |
6541 | |
6542 | cig = conn->iso_qos.ucast.cig; |
6543 | |
6544 | list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { |
6545 | if (hci_conn_check_create_cis(conn: link) > 0 && |
6546 | link->iso_qos.ucast.cig == cig && |
6547 | link->state != BT_CONNECTED) { |
6548 | cig = BT_ISO_QOS_CIG_UNSET; |
6549 | break; |
6550 | } |
6551 | } |
6552 | |
6553 | if (cig != BT_ISO_QOS_CIG_UNSET) |
6554 | break; |
6555 | } |
6556 | |
6557 | if (cig == BT_ISO_QOS_CIG_UNSET) |
6558 | goto done; |
6559 | |
6560 | list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { |
6561 | struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; |
6562 | |
6563 | if (hci_conn_check_create_cis(conn) || |
6564 | conn->iso_qos.ucast.cig != cig) |
6565 | continue; |
6566 | |
6567 | set_bit(nr: HCI_CONN_CREATE_CIS, addr: &conn->flags); |
6568 | cis->acl_handle = cpu_to_le16(conn->parent->handle); |
6569 | cis->cis_handle = cpu_to_le16(conn->handle); |
6570 | cmd.cp.num_cis++; |
6571 | |
6572 | if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis)) |
6573 | break; |
6574 | } |
6575 | |
6576 | done: |
6577 | rcu_read_unlock(); |
6578 | |
6579 | hci_dev_unlock(hdev); |
6580 | |
6581 | if (!cmd.cp.num_cis) |
6582 | return 0; |
6583 | |
6584 | /* Wait for HCI_LE_CIS_Established */ |
6585 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, |
6586 | sizeof(cmd.cp) + sizeof(cmd.cis[0]) * |
6587 | cmd.cp.num_cis, &cmd, |
6588 | HCI_EVT_LE_CIS_ESTABLISHED, |
6589 | conn->conn_timeout, NULL); |
6590 | } |
6591 | |
6592 | int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) |
6593 | { |
6594 | struct hci_cp_le_remove_cig cp; |
6595 | |
6596 | memset(&cp, 0, sizeof(cp)); |
6597 | cp.cig_id = handle; |
6598 | |
6599 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), |
6600 | &cp, HCI_CMD_TIMEOUT); |
6601 | } |
6602 | |
6603 | int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) |
6604 | { |
6605 | struct hci_cp_le_big_term_sync cp; |
6606 | |
6607 | memset(&cp, 0, sizeof(cp)); |
6608 | cp.handle = handle; |
6609 | |
6610 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, |
6611 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6612 | } |
6613 | |
6614 | int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) |
6615 | { |
6616 | struct hci_cp_le_pa_term_sync cp; |
6617 | |
6618 | memset(&cp, 0, sizeof(cp)); |
6619 | cp.handle = cpu_to_le16(handle); |
6620 | |
6621 | return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, |
6622 | sizeof(cp), &cp, HCI_CMD_TIMEOUT); |
6623 | } |
6624 | |
6625 | int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, |
6626 | bool use_rpa, struct adv_info *adv_instance, |
6627 | u8 *own_addr_type, bdaddr_t *rand_addr) |
6628 | { |
6629 | int err; |
6630 | |
6631 | bacpy(dst: rand_addr, BDADDR_ANY); |
6632 | |
6633 | /* If privacy is enabled use a resolvable private address. If |
6634 | * current RPA has expired then generate a new one. |
6635 | */ |
6636 | if (use_rpa) { |
6637 | /* If Controller supports LL Privacy use own address type is |
6638 | * 0x03 |
6639 | */ |
6640 | if (use_ll_privacy(hdev)) |
6641 | *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; |
6642 | else |
6643 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
6644 | |
6645 | if (adv_instance) { |
6646 | if (adv_rpa_valid(adv_instance)) |
6647 | return 0; |
6648 | } else { |
6649 | if (rpa_valid(hdev)) |
6650 | return 0; |
6651 | } |
6652 | |
6653 | err = smp_generate_rpa(hdev, irk: hdev->irk, rpa: &hdev->rpa); |
6654 | if (err < 0) { |
6655 | bt_dev_err(hdev, "failed to generate new RPA" ); |
6656 | return err; |
6657 | } |
6658 | |
6659 | bacpy(dst: rand_addr, src: &hdev->rpa); |
6660 | |
6661 | return 0; |
6662 | } |
6663 | |
6664 | /* In case of required privacy without resolvable private address, |
6665 | * use an non-resolvable private address. This is useful for |
6666 | * non-connectable advertising. |
6667 | */ |
6668 | if (require_privacy) { |
6669 | bdaddr_t nrpa; |
6670 | |
6671 | while (true) { |
6672 | /* The non-resolvable private address is generated |
6673 | * from random six bytes with the two most significant |
6674 | * bits cleared. |
6675 | */ |
6676 | get_random_bytes(buf: &nrpa, len: 6); |
6677 | nrpa.b[5] &= 0x3f; |
6678 | |
6679 | /* The non-resolvable private address shall not be |
6680 | * equal to the public address. |
6681 | */ |
6682 | if (bacmp(ba1: &hdev->bdaddr, ba2: &nrpa)) |
6683 | break; |
6684 | } |
6685 | |
6686 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
6687 | bacpy(dst: rand_addr, src: &nrpa); |
6688 | |
6689 | return 0; |
6690 | } |
6691 | |
6692 | /* No privacy so use a public address. */ |
6693 | *own_addr_type = ADDR_LE_DEV_PUBLIC; |
6694 | |
6695 | return 0; |
6696 | } |
6697 | |
6698 | static int _update_adv_data_sync(struct hci_dev *hdev, void *data) |
6699 | { |
6700 | u8 instance = PTR_UINT(data); |
6701 | |
6702 | return hci_update_adv_data_sync(hdev, instance); |
6703 | } |
6704 | |
6705 | int hci_update_adv_data(struct hci_dev *hdev, u8 instance) |
6706 | { |
6707 | return hci_cmd_sync_queue(hdev, _update_adv_data_sync, |
6708 | UINT_PTR(instance), NULL); |
6709 | } |
6710 | |
6711 | static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) |
6712 | { |
6713 | struct hci_conn *conn = data; |
6714 | struct inquiry_entry *ie; |
6715 | struct hci_cp_create_conn cp; |
6716 | int err; |
6717 | |
6718 | if (!hci_conn_valid(hdev, conn)) |
6719 | return -ECANCELED; |
6720 | |
6721 | /* Many controllers disallow HCI Create Connection while it is doing |
6722 | * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create |
6723 | * Connection. This may cause the MGMT discovering state to become false |
6724 | * without user space's request but it is okay since the MGMT Discovery |
6725 | * APIs do not promise that discovery should be done forever. Instead, |
6726 | * the user space monitors the status of MGMT discovering and it may |
6727 | * request for discovery again when this flag becomes false. |
6728 | */ |
6729 | if (test_bit(HCI_INQUIRY, &hdev->flags)) { |
6730 | err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, |
6731 | NULL, HCI_CMD_TIMEOUT); |
6732 | if (err) |
6733 | bt_dev_warn(hdev, "Failed to cancel inquiry %d" , err); |
6734 | } |
6735 | |
6736 | conn->state = BT_CONNECT; |
6737 | conn->out = true; |
6738 | conn->role = HCI_ROLE_MASTER; |
6739 | |
6740 | conn->attempt++; |
6741 | |
6742 | conn->link_policy = hdev->link_policy; |
6743 | |
6744 | memset(&cp, 0, sizeof(cp)); |
6745 | bacpy(dst: &cp.bdaddr, src: &conn->dst); |
6746 | cp.pscan_rep_mode = 0x02; |
6747 | |
6748 | ie = hci_inquiry_cache_lookup(hdev, bdaddr: &conn->dst); |
6749 | if (ie) { |
6750 | if (inquiry_entry_age(e: ie) <= INQUIRY_ENTRY_AGE_MAX) { |
6751 | cp.pscan_rep_mode = ie->data.pscan_rep_mode; |
6752 | cp.pscan_mode = ie->data.pscan_mode; |
6753 | cp.clock_offset = ie->data.clock_offset | |
6754 | cpu_to_le16(0x8000); |
6755 | } |
6756 | |
6757 | memcpy(conn->dev_class, ie->data.dev_class, 3); |
6758 | } |
6759 | |
6760 | cp.pkt_type = cpu_to_le16(conn->pkt_type); |
6761 | if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) |
6762 | cp.role_switch = 0x01; |
6763 | else |
6764 | cp.role_switch = 0x00; |
6765 | |
6766 | return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, |
6767 | sizeof(cp), &cp, |
6768 | HCI_EV_CONN_COMPLETE, |
6769 | conn->conn_timeout, NULL); |
6770 | } |
6771 | |
6772 | int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6773 | { |
6774 | return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, |
6775 | NULL); |
6776 | } |
6777 | |
6778 | static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) |
6779 | { |
6780 | struct hci_conn *conn = data; |
6781 | |
6782 | bt_dev_dbg(hdev, "err %d" , err); |
6783 | |
6784 | if (err == -ECANCELED) |
6785 | return; |
6786 | |
6787 | hci_dev_lock(hdev); |
6788 | |
6789 | if (!hci_conn_valid(hdev, conn)) |
6790 | goto done; |
6791 | |
6792 | if (!err) { |
6793 | hci_connect_le_scan_cleanup(conn, status: 0x00); |
6794 | goto done; |
6795 | } |
6796 | |
6797 | /* Check if connection is still pending */ |
6798 | if (conn != hci_lookup_le_connect(hdev)) |
6799 | goto done; |
6800 | |
6801 | /* Flush to make sure we send create conn cancel command if needed */ |
6802 | flush_delayed_work(dwork: &conn->le_conn_timeout); |
6803 | hci_conn_failed(conn, status: bt_status(err)); |
6804 | |
6805 | done: |
6806 | hci_dev_unlock(hdev); |
6807 | } |
6808 | |
6809 | int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6810 | { |
6811 | return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, |
6812 | create_le_conn_complete); |
6813 | } |
6814 | |
6815 | int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) |
6816 | { |
6817 | if (conn->state != BT_OPEN) |
6818 | return -EINVAL; |
6819 | |
6820 | switch (conn->type) { |
6821 | case ACL_LINK: |
6822 | return !hci_cmd_sync_dequeue_once(hdev, |
6823 | hci_acl_create_conn_sync, |
6824 | conn, NULL); |
6825 | case LE_LINK: |
6826 | return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, |
6827 | conn, create_le_conn_complete); |
6828 | } |
6829 | |
6830 | return -ENOENT; |
6831 | } |
6832 | |