1 | /* |
---|---|
2 | BlueZ - Bluetooth protocol stack for Linux |
3 | Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. |
4 | Copyright 2023-2024 NXP |
5 | |
6 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
7 | |
8 | This program is free software; you can redistribute it and/or modify |
9 | it under the terms of the GNU General Public License version 2 as |
10 | published by the Free Software Foundation; |
11 | |
12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
13 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. |
15 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY |
16 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES |
17 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
18 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
19 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
20 | |
21 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, |
22 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS |
23 | SOFTWARE IS DISCLAIMED. |
24 | */ |
25 | |
26 | #ifndef __HCI_CORE_H |
27 | #define __HCI_CORE_H |
28 | |
29 | #include <linux/idr.h> |
30 | #include <linux/leds.h> |
31 | #include <linux/rculist.h> |
32 | |
33 | #include <net/bluetooth/hci.h> |
34 | #include <net/bluetooth/hci_drv.h> |
35 | #include <net/bluetooth/hci_sync.h> |
36 | #include <net/bluetooth/hci_sock.h> |
37 | #include <net/bluetooth/coredump.h> |
38 | |
39 | /* HCI priority */ |
40 | #define HCI_PRIO_MAX 7 |
41 | |
42 | /* HCI maximum id value */ |
43 | #define HCI_MAX_ID 10000 |
44 | |
45 | /* HCI Core structures */ |
46 | struct inquiry_data { |
47 | bdaddr_t bdaddr; |
48 | __u8 pscan_rep_mode; |
49 | __u8 pscan_period_mode; |
50 | __u8 pscan_mode; |
51 | __u8 dev_class[3]; |
52 | __le16 clock_offset; |
53 | __s8 rssi; |
54 | __u8 ssp_mode; |
55 | }; |
56 | |
57 | struct inquiry_entry { |
58 | struct list_head all; /* inq_cache.all */ |
59 | struct list_head list; /* unknown or resolve */ |
60 | enum { |
61 | NAME_NOT_KNOWN, |
62 | NAME_NEEDED, |
63 | NAME_PENDING, |
64 | NAME_KNOWN, |
65 | } name_state; |
66 | __u32 timestamp; |
67 | struct inquiry_data data; |
68 | }; |
69 | |
70 | struct discovery_state { |
71 | int type; |
72 | enum { |
73 | DISCOVERY_STOPPED, |
74 | DISCOVERY_STARTING, |
75 | DISCOVERY_FINDING, |
76 | DISCOVERY_RESOLVING, |
77 | DISCOVERY_STOPPING, |
78 | } state; |
79 | struct list_head all; /* All devices found during inquiry */ |
80 | struct list_head unknown; /* Name state not known */ |
81 | struct list_head resolve; /* Name needs to be resolved */ |
82 | __u32 timestamp; |
83 | bdaddr_t last_adv_addr; |
84 | u8 last_adv_addr_type; |
85 | s8 last_adv_rssi; |
86 | u32 last_adv_flags; |
87 | u8 last_adv_data[HCI_MAX_EXT_AD_LENGTH]; |
88 | u8 last_adv_data_len; |
89 | bool report_invalid_rssi; |
90 | bool result_filtering; |
91 | bool limited; |
92 | s8 rssi; |
93 | u16 uuid_count; |
94 | u8 (*uuids)[16]; |
95 | unsigned long name_resolve_timeout; |
96 | }; |
97 | |
98 | #define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ |
99 | |
100 | enum suspend_tasks { |
101 | SUSPEND_PAUSE_DISCOVERY, |
102 | SUSPEND_UNPAUSE_DISCOVERY, |
103 | |
104 | SUSPEND_PAUSE_ADVERTISING, |
105 | SUSPEND_UNPAUSE_ADVERTISING, |
106 | |
107 | SUSPEND_SCAN_DISABLE, |
108 | SUSPEND_SCAN_ENABLE, |
109 | SUSPEND_DISCONNECTING, |
110 | |
111 | SUSPEND_POWERING_DOWN, |
112 | |
113 | SUSPEND_PREPARE_NOTIFIER, |
114 | |
115 | SUSPEND_SET_ADV_FILTER, |
116 | __SUSPEND_NUM_TASKS |
117 | }; |
118 | |
119 | enum suspended_state { |
120 | BT_RUNNING = 0, |
121 | BT_SUSPEND_DISCONNECT, |
122 | BT_SUSPEND_CONFIGURE_WAKE, |
123 | }; |
124 | |
125 | struct hci_conn_hash { |
126 | struct list_head list; |
127 | unsigned int acl_num; |
128 | unsigned int sco_num; |
129 | unsigned int iso_num; |
130 | unsigned int le_num; |
131 | unsigned int le_num_peripheral; |
132 | }; |
133 | |
134 | struct bdaddr_list { |
135 | struct list_head list; |
136 | bdaddr_t bdaddr; |
137 | u8 bdaddr_type; |
138 | }; |
139 | |
140 | struct codec_list { |
141 | struct list_head list; |
142 | u8 id; |
143 | __u16 cid; |
144 | __u16 vid; |
145 | u8 transport; |
146 | u8 num_caps; |
147 | u32 len; |
148 | struct hci_codec_caps caps[]; |
149 | }; |
150 | |
151 | struct bdaddr_list_with_irk { |
152 | struct list_head list; |
153 | bdaddr_t bdaddr; |
154 | u8 bdaddr_type; |
155 | u8 peer_irk[16]; |
156 | u8 local_irk[16]; |
157 | }; |
158 | |
159 | /* Bitmask of connection flags */ |
160 | enum hci_conn_flags { |
161 | HCI_CONN_FLAG_REMOTE_WAKEUP = BIT(0), |
162 | HCI_CONN_FLAG_DEVICE_PRIVACY = BIT(1), |
163 | HCI_CONN_FLAG_ADDRESS_RESOLUTION = BIT(2), |
164 | }; |
165 | typedef u8 hci_conn_flags_t; |
166 | |
167 | struct bdaddr_list_with_flags { |
168 | struct list_head list; |
169 | bdaddr_t bdaddr; |
170 | u8 bdaddr_type; |
171 | hci_conn_flags_t flags; |
172 | }; |
173 | |
174 | struct bt_uuid { |
175 | struct list_head list; |
176 | u8 uuid[16]; |
177 | u8 size; |
178 | u8 svc_hint; |
179 | }; |
180 | |
181 | struct blocked_key { |
182 | struct list_head list; |
183 | struct rcu_head rcu; |
184 | u8 type; |
185 | u8 val[16]; |
186 | }; |
187 | |
188 | struct smp_csrk { |
189 | bdaddr_t bdaddr; |
190 | u8 bdaddr_type; |
191 | u8 type; |
192 | u8 val[16]; |
193 | }; |
194 | |
195 | struct smp_ltk { |
196 | struct list_head list; |
197 | struct rcu_head rcu; |
198 | bdaddr_t bdaddr; |
199 | u8 bdaddr_type; |
200 | u8 authenticated; |
201 | u8 type; |
202 | u8 enc_size; |
203 | __le16 ediv; |
204 | __le64 rand; |
205 | u8 val[16]; |
206 | }; |
207 | |
208 | struct smp_irk { |
209 | struct list_head list; |
210 | struct rcu_head rcu; |
211 | bdaddr_t rpa; |
212 | bdaddr_t bdaddr; |
213 | u8 addr_type; |
214 | u8 val[16]; |
215 | }; |
216 | |
217 | struct link_key { |
218 | struct list_head list; |
219 | struct rcu_head rcu; |
220 | bdaddr_t bdaddr; |
221 | u8 type; |
222 | u8 val[HCI_LINK_KEY_SIZE]; |
223 | u8 pin_len; |
224 | }; |
225 | |
226 | struct oob_data { |
227 | struct list_head list; |
228 | bdaddr_t bdaddr; |
229 | u8 bdaddr_type; |
230 | u8 present; |
231 | u8 hash192[16]; |
232 | u8 rand192[16]; |
233 | u8 hash256[16]; |
234 | u8 rand256[16]; |
235 | }; |
236 | |
237 | struct adv_info { |
238 | struct list_head list; |
239 | bool enabled; |
240 | bool pending; |
241 | bool periodic; |
242 | __u8 mesh; |
243 | __u8 instance; |
244 | __u8 handle; |
245 | __u32 flags; |
246 | __u16 timeout; |
247 | __u16 remaining_time; |
248 | __u16 duration; |
249 | __u16 adv_data_len; |
250 | __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; |
251 | bool adv_data_changed; |
252 | __u16 scan_rsp_len; |
253 | __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; |
254 | bool scan_rsp_changed; |
255 | __u16 per_adv_data_len; |
256 | __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; |
257 | __s8 tx_power; |
258 | __u32 min_interval; |
259 | __u32 max_interval; |
260 | bdaddr_t random_addr; |
261 | bool rpa_expired; |
262 | struct delayed_work rpa_expired_cb; |
263 | }; |
264 | |
265 | struct tx_queue { |
266 | struct sk_buff_head queue; |
267 | unsigned int extra; |
268 | unsigned int tracked; |
269 | }; |
270 | |
271 | #define HCI_MAX_ADV_INSTANCES 5 |
272 | #define HCI_DEFAULT_ADV_DURATION 2 |
273 | |
274 | #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F |
275 | |
276 | #define DATA_CMP(_d1, _l1, _d2, _l2) \ |
277 | (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) |
278 | |
279 | #define ADV_DATA_CMP(_adv, _data, _len) \ |
280 | DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) |
281 | |
282 | #define SCAN_RSP_CMP(_adv, _data, _len) \ |
283 | DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) |
284 | |
285 | struct monitored_device { |
286 | struct list_head list; |
287 | |
288 | bdaddr_t bdaddr; |
289 | __u8 addr_type; |
290 | __u16 handle; |
291 | bool notified; |
292 | }; |
293 | |
294 | struct adv_pattern { |
295 | struct list_head list; |
296 | __u8 ad_type; |
297 | __u8 offset; |
298 | __u8 length; |
299 | __u8 value[HCI_MAX_EXT_AD_LENGTH]; |
300 | }; |
301 | |
302 | struct adv_rssi_thresholds { |
303 | __s8 low_threshold; |
304 | __s8 high_threshold; |
305 | __u16 low_threshold_timeout; |
306 | __u16 high_threshold_timeout; |
307 | __u8 sampling_period; |
308 | }; |
309 | |
310 | struct adv_monitor { |
311 | struct list_head patterns; |
312 | struct adv_rssi_thresholds rssi; |
313 | __u16 handle; |
314 | |
315 | enum { |
316 | ADV_MONITOR_STATE_NOT_REGISTERED, |
317 | ADV_MONITOR_STATE_REGISTERED, |
318 | ADV_MONITOR_STATE_OFFLOADED |
319 | } state; |
320 | }; |
321 | |
322 | #define HCI_MIN_ADV_MONITOR_HANDLE 1 |
323 | #define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 |
324 | #define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 |
325 | #define HCI_ADV_MONITOR_EXT_NONE 1 |
326 | #define HCI_ADV_MONITOR_EXT_MSFT 2 |
327 | |
328 | #define HCI_MAX_SHORT_NAME_LENGTH 10 |
329 | |
330 | #define HCI_CONN_HANDLE_MAX 0x0eff |
331 | #define HCI_CONN_HANDLE_UNSET(_handle) (_handle > HCI_CONN_HANDLE_MAX) |
332 | |
333 | /* Min encryption key size to match with SMP */ |
334 | #define HCI_MIN_ENC_KEY_SIZE 7 |
335 | |
336 | /* Default LE RPA expiry time, 15 minutes */ |
337 | #define HCI_DEFAULT_RPA_TIMEOUT (15 * 60) |
338 | |
339 | /* Default min/max age of connection information (1s/3s) */ |
340 | #define DEFAULT_CONN_INFO_MIN_AGE 1000 |
341 | #define DEFAULT_CONN_INFO_MAX_AGE 3000 |
342 | /* Default authenticated payload timeout 30s */ |
343 | #define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8 |
344 | |
345 | #define HCI_MAX_PAGES 3 |
346 | |
347 | struct hci_dev { |
348 | struct list_head list; |
349 | struct mutex lock; |
350 | |
351 | struct ida unset_handle_ida; |
352 | |
353 | const char *name; |
354 | unsigned long flags; |
355 | __u16 id; |
356 | __u8 bus; |
357 | bdaddr_t bdaddr; |
358 | bdaddr_t setup_addr; |
359 | bdaddr_t public_addr; |
360 | bdaddr_t random_addr; |
361 | bdaddr_t static_addr; |
362 | __u8 adv_addr_type; |
363 | __u8 dev_name[HCI_MAX_NAME_LENGTH]; |
364 | __u8 short_name[HCI_MAX_SHORT_NAME_LENGTH]; |
365 | __u8 eir[HCI_MAX_EIR_LENGTH]; |
366 | __u16 appearance; |
367 | __u8 dev_class[3]; |
368 | __u8 major_class; |
369 | __u8 minor_class; |
370 | __u8 max_page; |
371 | __u8 features[HCI_MAX_PAGES][8]; |
372 | __u8 le_features[8]; |
373 | __u8 le_accept_list_size; |
374 | __u8 le_resolv_list_size; |
375 | __u8 le_num_of_adv_sets; |
376 | __u8 le_states[8]; |
377 | __u8 mesh_ad_types[16]; |
378 | __u8 mesh_send_ref; |
379 | __u8 commands[64]; |
380 | __u8 hci_ver; |
381 | __u16 hci_rev; |
382 | __u8 lmp_ver; |
383 | __u16 manufacturer; |
384 | __u16 lmp_subver; |
385 | __u16 voice_setting; |
386 | __u8 num_iac; |
387 | __u16 stored_max_keys; |
388 | __u16 stored_num_keys; |
389 | __u8 io_capability; |
390 | __s8 inq_tx_power; |
391 | __u8 err_data_reporting; |
392 | __u16 page_scan_interval; |
393 | __u16 page_scan_window; |
394 | __u8 page_scan_type; |
395 | __u8 le_adv_channel_map; |
396 | __u16 le_adv_min_interval; |
397 | __u16 le_adv_max_interval; |
398 | __u8 le_scan_type; |
399 | __u16 le_scan_interval; |
400 | __u16 le_scan_window; |
401 | __u16 le_scan_int_suspend; |
402 | __u16 le_scan_window_suspend; |
403 | __u16 le_scan_int_discovery; |
404 | __u16 le_scan_window_discovery; |
405 | __u16 le_scan_int_adv_monitor; |
406 | __u16 le_scan_window_adv_monitor; |
407 | __u16 le_scan_int_connect; |
408 | __u16 le_scan_window_connect; |
409 | __u16 le_conn_min_interval; |
410 | __u16 le_conn_max_interval; |
411 | __u16 le_conn_latency; |
412 | __u16 le_supv_timeout; |
413 | __u16 le_def_tx_len; |
414 | __u16 le_def_tx_time; |
415 | __u16 le_max_tx_len; |
416 | __u16 le_max_tx_time; |
417 | __u16 le_max_rx_len; |
418 | __u16 le_max_rx_time; |
419 | __u8 le_max_key_size; |
420 | __u8 le_min_key_size; |
421 | __u16 discov_interleaved_timeout; |
422 | __u16 conn_info_min_age; |
423 | __u16 conn_info_max_age; |
424 | __u16 auth_payload_timeout; |
425 | __u8 min_enc_key_size; |
426 | __u8 max_enc_key_size; |
427 | __u8 pairing_opts; |
428 | __u8 ssp_debug_mode; |
429 | __u8 hw_error_code; |
430 | __u32 clock; |
431 | __u16 advmon_allowlist_duration; |
432 | __u16 advmon_no_filter_duration; |
433 | __u8 enable_advmon_interleave_scan; |
434 | |
435 | __u16 devid_source; |
436 | __u16 devid_vendor; |
437 | __u16 devid_product; |
438 | __u16 devid_version; |
439 | |
440 | __u8 def_page_scan_type; |
441 | __u16 def_page_scan_int; |
442 | __u16 def_page_scan_window; |
443 | __u8 def_inq_scan_type; |
444 | __u16 def_inq_scan_int; |
445 | __u16 def_inq_scan_window; |
446 | __u16 def_br_lsto; |
447 | __u16 def_page_timeout; |
448 | __u16 def_multi_adv_rotation_duration; |
449 | __u16 def_le_autoconnect_timeout; |
450 | __s8 min_le_tx_power; |
451 | __s8 max_le_tx_power; |
452 | |
453 | __u16 pkt_type; |
454 | __u16 esco_type; |
455 | __u16 link_policy; |
456 | __u16 link_mode; |
457 | |
458 | __u32 idle_timeout; |
459 | __u16 sniff_min_interval; |
460 | __u16 sniff_max_interval; |
461 | |
462 | unsigned int auto_accept_delay; |
463 | |
464 | unsigned long quirks; |
465 | |
466 | atomic_t cmd_cnt; |
467 | unsigned int acl_cnt; |
468 | unsigned int sco_cnt; |
469 | unsigned int le_cnt; |
470 | unsigned int iso_cnt; |
471 | |
472 | unsigned int acl_mtu; |
473 | unsigned int sco_mtu; |
474 | unsigned int le_mtu; |
475 | unsigned int iso_mtu; |
476 | unsigned int acl_pkts; |
477 | unsigned int sco_pkts; |
478 | unsigned int le_pkts; |
479 | unsigned int iso_pkts; |
480 | |
481 | unsigned long acl_last_tx; |
482 | unsigned long le_last_tx; |
483 | |
484 | __u8 le_tx_def_phys; |
485 | __u8 le_rx_def_phys; |
486 | |
487 | struct workqueue_struct *workqueue; |
488 | struct workqueue_struct *req_workqueue; |
489 | |
490 | struct work_struct power_on; |
491 | struct delayed_work power_off; |
492 | struct work_struct error_reset; |
493 | struct work_struct cmd_sync_work; |
494 | struct list_head cmd_sync_work_list; |
495 | struct mutex cmd_sync_work_lock; |
496 | struct mutex unregister_lock; |
497 | struct work_struct cmd_sync_cancel_work; |
498 | struct work_struct reenable_adv_work; |
499 | |
500 | __u16 discov_timeout; |
501 | struct delayed_work discov_off; |
502 | |
503 | struct delayed_work service_cache; |
504 | |
505 | struct delayed_work cmd_timer; |
506 | struct delayed_work ncmd_timer; |
507 | |
508 | struct work_struct rx_work; |
509 | struct work_struct cmd_work; |
510 | struct work_struct tx_work; |
511 | |
512 | struct delayed_work le_scan_disable; |
513 | |
514 | struct sk_buff_head rx_q; |
515 | struct sk_buff_head raw_q; |
516 | struct sk_buff_head cmd_q; |
517 | |
518 | struct sk_buff *sent_cmd; |
519 | struct sk_buff *recv_event; |
520 | |
521 | struct mutex req_lock; |
522 | wait_queue_head_t req_wait_q; |
523 | __u32 req_status; |
524 | __u32 req_result; |
525 | struct sk_buff *req_skb; |
526 | struct sk_buff *req_rsp; |
527 | |
528 | void *smp_data; |
529 | void *smp_bredr_data; |
530 | |
531 | struct discovery_state discovery; |
532 | |
533 | bool discovery_paused; |
534 | int advertising_old_state; |
535 | bool advertising_paused; |
536 | |
537 | struct notifier_block suspend_notifier; |
538 | enum suspended_state suspend_state_next; |
539 | enum suspended_state suspend_state; |
540 | bool scanning_paused; |
541 | bool suspended; |
542 | u8 wake_reason; |
543 | bdaddr_t wake_addr; |
544 | u8 wake_addr_type; |
545 | |
546 | struct hci_conn_hash conn_hash; |
547 | |
548 | struct list_head mesh_pending; |
549 | struct list_head mgmt_pending; |
550 | struct list_head reject_list; |
551 | struct list_head accept_list; |
552 | struct list_head uuids; |
553 | struct list_head link_keys; |
554 | struct list_head long_term_keys; |
555 | struct list_head identity_resolving_keys; |
556 | struct list_head remote_oob_data; |
557 | struct list_head le_accept_list; |
558 | struct list_head le_resolv_list; |
559 | struct list_head le_conn_params; |
560 | struct list_head pend_le_conns; |
561 | struct list_head pend_le_reports; |
562 | struct list_head blocked_keys; |
563 | struct list_head local_codecs; |
564 | |
565 | struct hci_dev_stats stat; |
566 | |
567 | atomic_t promisc; |
568 | |
569 | const char *hw_info; |
570 | const char *fw_info; |
571 | struct dentry *debugfs; |
572 | |
573 | struct hci_devcoredump dump; |
574 | |
575 | struct device dev; |
576 | |
577 | struct rfkill *rfkill; |
578 | |
579 | DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS); |
580 | hci_conn_flags_t conn_flags; |
581 | |
582 | __s8 adv_tx_power; |
583 | __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; |
584 | __u8 adv_data_len; |
585 | __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; |
586 | __u8 scan_rsp_data_len; |
587 | __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; |
588 | __u8 per_adv_data_len; |
589 | |
590 | struct list_head adv_instances; |
591 | unsigned int adv_instance_cnt; |
592 | __u8 cur_adv_instance; |
593 | __u16 adv_instance_timeout; |
594 | struct delayed_work adv_instance_expire; |
595 | |
596 | struct idr adv_monitors_idr; |
597 | unsigned int adv_monitors_cnt; |
598 | |
599 | __u8 irk[16]; |
600 | __u32 rpa_timeout; |
601 | struct delayed_work rpa_expired; |
602 | bdaddr_t rpa; |
603 | |
604 | struct delayed_work mesh_send_done; |
605 | |
606 | enum { |
607 | INTERLEAVE_SCAN_NONE, |
608 | INTERLEAVE_SCAN_NO_FILTER, |
609 | INTERLEAVE_SCAN_ALLOWLIST |
610 | } interleave_scan_state; |
611 | |
612 | struct delayed_work interleave_scan; |
613 | |
614 | struct list_head monitored_devices; |
615 | bool advmon_pend_notify; |
616 | |
617 | struct hci_drv *hci_drv; |
618 | |
619 | #if IS_ENABLED(CONFIG_BT_LEDS) |
620 | struct led_trigger *power_led; |
621 | #endif |
622 | |
623 | #if IS_ENABLED(CONFIG_BT_MSFTEXT) |
624 | __u16 msft_opcode; |
625 | void *msft_data; |
626 | bool msft_curve_validity; |
627 | #endif |
628 | |
629 | #if IS_ENABLED(CONFIG_BT_AOSPEXT) |
630 | bool aosp_capable; |
631 | bool aosp_quality_report; |
632 | #endif |
633 | |
634 | int (*open)(struct hci_dev *hdev); |
635 | int (*close)(struct hci_dev *hdev); |
636 | int (*flush)(struct hci_dev *hdev); |
637 | int (*setup)(struct hci_dev *hdev); |
638 | int (*shutdown)(struct hci_dev *hdev); |
639 | int (*send)(struct hci_dev *hdev, struct sk_buff *skb); |
640 | void (*notify)(struct hci_dev *hdev, unsigned int evt); |
641 | void (*hw_error)(struct hci_dev *hdev, u8 code); |
642 | int (*post_init)(struct hci_dev *hdev); |
643 | int (*set_diag)(struct hci_dev *hdev, bool enable); |
644 | int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); |
645 | void (*reset)(struct hci_dev *hdev); |
646 | bool (*wakeup)(struct hci_dev *hdev); |
647 | int (*set_quality_report)(struct hci_dev *hdev, bool enable); |
648 | int (*get_data_path_id)(struct hci_dev *hdev, __u8 *data_path); |
649 | int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, |
650 | struct bt_codec *codec, __u8 *vnd_len, |
651 | __u8 **vnd_data); |
652 | u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); |
653 | }; |
654 | |
655 | #define HCI_PHY_HANDLE(handle) (handle & 0xff) |
656 | |
657 | enum conn_reasons { |
658 | CONN_REASON_PAIR_DEVICE, |
659 | CONN_REASON_L2CAP_CHAN, |
660 | CONN_REASON_SCO_CONNECT, |
661 | CONN_REASON_ISO_CONNECT, |
662 | }; |
663 | |
664 | struct hci_conn { |
665 | struct list_head list; |
666 | |
667 | atomic_t refcnt; |
668 | |
669 | bdaddr_t dst; |
670 | __u8 dst_type; |
671 | bdaddr_t src; |
672 | __u8 src_type; |
673 | bdaddr_t init_addr; |
674 | __u8 init_addr_type; |
675 | bdaddr_t resp_addr; |
676 | __u8 resp_addr_type; |
677 | __u8 adv_instance; |
678 | __u16 handle; |
679 | __u16 sync_handle; |
680 | __u8 sid; |
681 | __u16 state; |
682 | __u16 mtu; |
683 | __u8 mode; |
684 | __u8 type; |
685 | __u8 role; |
686 | bool out; |
687 | __u8 attempt; |
688 | __u8 dev_class[3]; |
689 | __u8 features[HCI_MAX_PAGES][8]; |
690 | __u16 pkt_type; |
691 | __u16 link_policy; |
692 | __u8 key_type; |
693 | __u8 auth_type; |
694 | __u8 sec_level; |
695 | __u8 pending_sec_level; |
696 | __u8 pin_length; |
697 | __u8 enc_key_size; |
698 | __u8 io_capability; |
699 | __u32 passkey_notify; |
700 | __u8 passkey_entered; |
701 | __u16 disc_timeout; |
702 | __u16 conn_timeout; |
703 | __u16 setting; |
704 | __u16 auth_payload_timeout; |
705 | __u16 le_conn_min_interval; |
706 | __u16 le_conn_max_interval; |
707 | __u16 le_conn_interval; |
708 | __u16 le_conn_latency; |
709 | __u16 le_supv_timeout; |
710 | __u8 le_adv_data[HCI_MAX_EXT_AD_LENGTH]; |
711 | __u8 le_adv_data_len; |
712 | __u8 le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN]; |
713 | __u16 le_per_adv_data_len; |
714 | __u16 le_per_adv_data_offset; |
715 | __u8 le_adv_phy; |
716 | __u8 le_adv_sec_phy; |
717 | __u8 le_tx_phy; |
718 | __u8 le_rx_phy; |
719 | __s8 rssi; |
720 | __s8 tx_power; |
721 | __s8 max_tx_power; |
722 | struct bt_iso_qos iso_qos; |
723 | __u8 num_bis; |
724 | __u8 bis[HCI_MAX_ISO_BIS]; |
725 | |
726 | unsigned long flags; |
727 | |
728 | enum conn_reasons conn_reason; |
729 | __u8 abort_reason; |
730 | |
731 | __u32 clock; |
732 | __u16 clock_accuracy; |
733 | |
734 | unsigned long conn_info_timestamp; |
735 | |
736 | __u8 remote_cap; |
737 | __u8 remote_auth; |
738 | __u8 remote_id; |
739 | |
740 | unsigned int sent; |
741 | |
742 | struct sk_buff_head data_q; |
743 | struct list_head chan_list; |
744 | |
745 | struct tx_queue tx_q; |
746 | |
747 | struct delayed_work disc_work; |
748 | struct delayed_work auto_accept_work; |
749 | struct delayed_work idle_work; |
750 | struct delayed_work le_conn_timeout; |
751 | |
752 | struct device dev; |
753 | struct dentry *debugfs; |
754 | |
755 | struct hci_dev *hdev; |
756 | void *l2cap_data; |
757 | void *sco_data; |
758 | void *iso_data; |
759 | |
760 | struct list_head link_list; |
761 | struct hci_conn *parent; |
762 | struct hci_link *link; |
763 | |
764 | struct bt_codec codec; |
765 | |
766 | void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); |
767 | void (*security_cfm_cb) (struct hci_conn *conn, u8 status); |
768 | void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); |
769 | |
770 | void (*cleanup)(struct hci_conn *conn); |
771 | }; |
772 | |
773 | struct hci_link { |
774 | struct list_head list; |
775 | struct hci_conn *conn; |
776 | }; |
777 | |
778 | struct hci_chan { |
779 | struct list_head list; |
780 | __u16 handle; |
781 | struct hci_conn *conn; |
782 | struct sk_buff_head data_q; |
783 | unsigned int sent; |
784 | __u8 state; |
785 | }; |
786 | |
787 | struct hci_conn_params { |
788 | struct list_head list; |
789 | struct list_head action; |
790 | |
791 | bdaddr_t addr; |
792 | u8 addr_type; |
793 | |
794 | u16 conn_min_interval; |
795 | u16 conn_max_interval; |
796 | u16 conn_latency; |
797 | u16 supervision_timeout; |
798 | |
799 | enum { |
800 | HCI_AUTO_CONN_DISABLED, |
801 | HCI_AUTO_CONN_REPORT, |
802 | HCI_AUTO_CONN_DIRECT, |
803 | HCI_AUTO_CONN_ALWAYS, |
804 | HCI_AUTO_CONN_LINK_LOSS, |
805 | HCI_AUTO_CONN_EXPLICIT, |
806 | } auto_connect; |
807 | |
808 | struct hci_conn *conn; |
809 | bool explicit_connect; |
810 | /* Accessed without hdev->lock: */ |
811 | hci_conn_flags_t flags; |
812 | u8 privacy_mode; |
813 | }; |
814 | |
815 | extern struct list_head hci_dev_list; |
816 | extern struct list_head hci_cb_list; |
817 | extern rwlock_t hci_dev_list_lock; |
818 | extern struct mutex hci_cb_list_lock; |
819 | |
820 | #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) |
821 | #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) |
822 | #define hci_dev_change_flag(hdev, nr) change_bit((nr), (hdev)->dev_flags) |
823 | #define hci_dev_test_flag(hdev, nr) test_bit((nr), (hdev)->dev_flags) |
824 | #define hci_dev_test_and_set_flag(hdev, nr) test_and_set_bit((nr), (hdev)->dev_flags) |
825 | #define hci_dev_test_and_clear_flag(hdev, nr) test_and_clear_bit((nr), (hdev)->dev_flags) |
826 | #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags) |
827 | |
828 | #define hci_dev_clear_volatile_flags(hdev) \ |
829 | do { \ |
830 | hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ |
831 | hci_dev_clear_flag(hdev, HCI_LE_ADV); \ |
832 | hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ |
833 | hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ |
834 | hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); \ |
835 | } while (0) |
836 | |
837 | #define hci_dev_le_state_simultaneous(hdev) \ |
838 | (!test_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks) && \ |
839 | (hdev->le_states[4] & 0x08) && /* Central */ \ |
840 | (hdev->le_states[4] & 0x40) && /* Peripheral */ \ |
841 | (hdev->le_states[3] & 0x10)) /* Simultaneous */ |
842 | |
843 | /* ----- HCI interface to upper protocols ----- */ |
844 | int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); |
845 | int l2cap_disconn_ind(struct hci_conn *hcon); |
846 | void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); |
847 | |
848 | #if IS_ENABLED(CONFIG_BT_BREDR) |
849 | int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); |
850 | void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb); |
851 | #else |
852 | static inline int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, |
853 | __u8 *flags) |
854 | { |
855 | return 0; |
856 | } |
857 | |
858 | static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) |
859 | { |
860 | } |
861 | #endif |
862 | |
863 | #if IS_ENABLED(CONFIG_BT_LE) |
864 | int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); |
865 | void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); |
866 | #else |
867 | static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, |
868 | __u8 *flags) |
869 | { |
870 | return 0; |
871 | } |
872 | static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, |
873 | u16 flags) |
874 | { |
875 | } |
876 | #endif |
877 | |
878 | /* ----- Inquiry cache ----- */ |
879 | #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ |
880 | #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ |
881 | |
882 | static inline void discovery_init(struct hci_dev *hdev) |
883 | { |
884 | hdev->discovery.state = DISCOVERY_STOPPED; |
885 | INIT_LIST_HEAD(list: &hdev->discovery.all); |
886 | INIT_LIST_HEAD(list: &hdev->discovery.unknown); |
887 | INIT_LIST_HEAD(list: &hdev->discovery.resolve); |
888 | hdev->discovery.report_invalid_rssi = true; |
889 | hdev->discovery.rssi = HCI_RSSI_INVALID; |
890 | } |
891 | |
892 | static inline void hci_discovery_filter_clear(struct hci_dev *hdev) |
893 | { |
894 | hdev->discovery.result_filtering = false; |
895 | hdev->discovery.report_invalid_rssi = true; |
896 | hdev->discovery.rssi = HCI_RSSI_INVALID; |
897 | hdev->discovery.uuid_count = 0; |
898 | kfree(objp: hdev->discovery.uuids); |
899 | hdev->discovery.uuids = NULL; |
900 | } |
901 | |
902 | bool hci_discovery_active(struct hci_dev *hdev); |
903 | |
904 | void hci_discovery_set_state(struct hci_dev *hdev, int state); |
905 | |
906 | static inline int inquiry_cache_empty(struct hci_dev *hdev) |
907 | { |
908 | return list_empty(head: &hdev->discovery.all); |
909 | } |
910 | |
911 | static inline long inquiry_cache_age(struct hci_dev *hdev) |
912 | { |
913 | struct discovery_state *c = &hdev->discovery; |
914 | return jiffies - c->timestamp; |
915 | } |
916 | |
917 | static inline long inquiry_entry_age(struct inquiry_entry *e) |
918 | { |
919 | return jiffies - e->timestamp; |
920 | } |
921 | |
922 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, |
923 | bdaddr_t *bdaddr); |
924 | struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, |
925 | bdaddr_t *bdaddr); |
926 | struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, |
927 | bdaddr_t *bdaddr, |
928 | int state); |
929 | void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, |
930 | struct inquiry_entry *ie); |
931 | u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, |
932 | bool name_known); |
933 | void hci_inquiry_cache_flush(struct hci_dev *hdev); |
934 | |
935 | /* ----- HCI Connections ----- */ |
936 | enum { |
937 | HCI_CONN_AUTH_PEND, |
938 | HCI_CONN_ENCRYPT_PEND, |
939 | HCI_CONN_RSWITCH_PEND, |
940 | HCI_CONN_MODE_CHANGE_PEND, |
941 | HCI_CONN_SCO_SETUP_PEND, |
942 | HCI_CONN_MGMT_CONNECTED, |
943 | HCI_CONN_SSP_ENABLED, |
944 | HCI_CONN_SC_ENABLED, |
945 | HCI_CONN_AES_CCM, |
946 | HCI_CONN_POWER_SAVE, |
947 | HCI_CONN_FLUSH_KEY, |
948 | HCI_CONN_ENCRYPT, |
949 | HCI_CONN_AUTH, |
950 | HCI_CONN_SECURE, |
951 | HCI_CONN_FIPS, |
952 | HCI_CONN_STK_ENCRYPT, |
953 | HCI_CONN_AUTH_INITIATOR, |
954 | HCI_CONN_DROP, |
955 | HCI_CONN_CANCEL, |
956 | HCI_CONN_PARAM_REMOVAL_PEND, |
957 | HCI_CONN_NEW_LINK_KEY, |
958 | HCI_CONN_SCANNING, |
959 | HCI_CONN_AUTH_FAILURE, |
960 | HCI_CONN_PER_ADV, |
961 | HCI_CONN_BIG_CREATED, |
962 | HCI_CONN_CREATE_CIS, |
963 | HCI_CONN_CREATE_BIG_SYNC, |
964 | HCI_CONN_BIG_SYNC, |
965 | HCI_CONN_BIG_SYNC_FAILED, |
966 | HCI_CONN_CREATE_PA_SYNC, |
967 | HCI_CONN_PA_SYNC, |
968 | HCI_CONN_PA_SYNC_FAILED, |
969 | }; |
970 | |
971 | static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) |
972 | { |
973 | struct hci_dev *hdev = conn->hdev; |
974 | return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && |
975 | test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); |
976 | } |
977 | |
978 | static inline bool hci_conn_sc_enabled(struct hci_conn *conn) |
979 | { |
980 | struct hci_dev *hdev = conn->hdev; |
981 | return hci_dev_test_flag(hdev, HCI_SC_ENABLED) && |
982 | test_bit(HCI_CONN_SC_ENABLED, &conn->flags); |
983 | } |
984 | |
985 | static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) |
986 | { |
987 | struct hci_conn_hash *h = &hdev->conn_hash; |
988 | list_add_tail_rcu(new: &c->list, head: &h->list); |
989 | switch (c->type) { |
990 | case ACL_LINK: |
991 | h->acl_num++; |
992 | break; |
993 | case LE_LINK: |
994 | h->le_num++; |
995 | if (c->role == HCI_ROLE_SLAVE) |
996 | h->le_num_peripheral++; |
997 | break; |
998 | case SCO_LINK: |
999 | case ESCO_LINK: |
1000 | h->sco_num++; |
1001 | break; |
1002 | case CIS_LINK: |
1003 | case BIS_LINK: |
1004 | h->iso_num++; |
1005 | break; |
1006 | } |
1007 | } |
1008 | |
1009 | static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) |
1010 | { |
1011 | struct hci_conn_hash *h = &hdev->conn_hash; |
1012 | |
1013 | list_del_rcu(entry: &c->list); |
1014 | synchronize_rcu(); |
1015 | |
1016 | switch (c->type) { |
1017 | case ACL_LINK: |
1018 | h->acl_num--; |
1019 | break; |
1020 | case LE_LINK: |
1021 | h->le_num--; |
1022 | if (c->role == HCI_ROLE_SLAVE) |
1023 | h->le_num_peripheral--; |
1024 | break; |
1025 | case SCO_LINK: |
1026 | case ESCO_LINK: |
1027 | h->sco_num--; |
1028 | break; |
1029 | case CIS_LINK: |
1030 | case BIS_LINK: |
1031 | h->iso_num--; |
1032 | break; |
1033 | } |
1034 | } |
1035 | |
1036 | static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) |
1037 | { |
1038 | struct hci_conn_hash *h = &hdev->conn_hash; |
1039 | switch (type) { |
1040 | case ACL_LINK: |
1041 | return h->acl_num; |
1042 | case LE_LINK: |
1043 | return h->le_num; |
1044 | case SCO_LINK: |
1045 | case ESCO_LINK: |
1046 | return h->sco_num; |
1047 | case CIS_LINK: |
1048 | case BIS_LINK: |
1049 | return h->iso_num; |
1050 | default: |
1051 | return 0; |
1052 | } |
1053 | } |
1054 | |
1055 | static inline unsigned int hci_conn_count(struct hci_dev *hdev) |
1056 | { |
1057 | struct hci_conn_hash *c = &hdev->conn_hash; |
1058 | |
1059 | return c->acl_num + c->sco_num + c->le_num + c->iso_num; |
1060 | } |
1061 | |
1062 | static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn) |
1063 | { |
1064 | struct hci_conn_hash *h = &hdev->conn_hash; |
1065 | struct hci_conn *c; |
1066 | |
1067 | rcu_read_lock(); |
1068 | |
1069 | list_for_each_entry_rcu(c, &h->list, list) { |
1070 | if (c == conn) { |
1071 | rcu_read_unlock(); |
1072 | return true; |
1073 | } |
1074 | } |
1075 | rcu_read_unlock(); |
1076 | |
1077 | return false; |
1078 | } |
1079 | |
1080 | static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) |
1081 | { |
1082 | struct hci_conn_hash *h = &hdev->conn_hash; |
1083 | struct hci_conn *c; |
1084 | __u8 type = INVALID_LINK; |
1085 | |
1086 | rcu_read_lock(); |
1087 | |
1088 | list_for_each_entry_rcu(c, &h->list, list) { |
1089 | if (c->handle == handle) { |
1090 | type = c->type; |
1091 | break; |
1092 | } |
1093 | } |
1094 | |
1095 | rcu_read_unlock(); |
1096 | |
1097 | return type; |
1098 | } |
1099 | |
1100 | static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, |
1101 | bdaddr_t *ba, __u8 bis) |
1102 | { |
1103 | struct hci_conn_hash *h = &hdev->conn_hash; |
1104 | struct hci_conn *c; |
1105 | |
1106 | rcu_read_lock(); |
1107 | |
1108 | list_for_each_entry_rcu(c, &h->list, list) { |
1109 | if (bacmp(ba1: &c->dst, ba2: ba) || c->type != BIS_LINK) |
1110 | continue; |
1111 | |
1112 | if (c->iso_qos.bcast.bis == bis) { |
1113 | rcu_read_unlock(); |
1114 | return c; |
1115 | } |
1116 | } |
1117 | rcu_read_unlock(); |
1118 | |
1119 | return NULL; |
1120 | } |
1121 | |
1122 | static inline struct hci_conn * |
1123 | hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev) |
1124 | { |
1125 | struct hci_conn_hash *h = &hdev->conn_hash; |
1126 | struct hci_conn *c; |
1127 | |
1128 | rcu_read_lock(); |
1129 | |
1130 | list_for_each_entry_rcu(c, &h->list, list) { |
1131 | if (c->type != BIS_LINK) |
1132 | continue; |
1133 | |
1134 | if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags)) |
1135 | continue; |
1136 | |
1137 | rcu_read_unlock(); |
1138 | return c; |
1139 | } |
1140 | |
1141 | rcu_read_unlock(); |
1142 | |
1143 | return NULL; |
1144 | } |
1145 | |
1146 | static inline struct hci_conn * |
1147 | hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, |
1148 | bdaddr_t *ba, |
1149 | __u8 big, __u8 bis) |
1150 | { |
1151 | struct hci_conn_hash *h = &hdev->conn_hash; |
1152 | struct hci_conn *c; |
1153 | |
1154 | rcu_read_lock(); |
1155 | |
1156 | list_for_each_entry_rcu(c, &h->list, list) { |
1157 | if (bacmp(ba1: &c->dst, ba2: ba) || c->type != BIS_LINK || |
1158 | !test_bit(HCI_CONN_PER_ADV, &c->flags)) |
1159 | continue; |
1160 | |
1161 | if (c->iso_qos.bcast.big == big && |
1162 | c->iso_qos.bcast.bis == bis) { |
1163 | rcu_read_unlock(); |
1164 | return c; |
1165 | } |
1166 | } |
1167 | rcu_read_unlock(); |
1168 | |
1169 | return NULL; |
1170 | } |
1171 | |
1172 | static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, |
1173 | __u16 handle) |
1174 | { |
1175 | struct hci_conn_hash *h = &hdev->conn_hash; |
1176 | struct hci_conn *c; |
1177 | |
1178 | rcu_read_lock(); |
1179 | |
1180 | list_for_each_entry_rcu(c, &h->list, list) { |
1181 | if (c->handle == handle) { |
1182 | rcu_read_unlock(); |
1183 | return c; |
1184 | } |
1185 | } |
1186 | rcu_read_unlock(); |
1187 | |
1188 | return NULL; |
1189 | } |
1190 | |
1191 | static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev, |
1192 | __u8 type, bdaddr_t *ba) |
1193 | { |
1194 | struct hci_conn_hash *h = &hdev->conn_hash; |
1195 | struct hci_conn *c; |
1196 | |
1197 | rcu_read_lock(); |
1198 | |
1199 | list_for_each_entry_rcu(c, &h->list, list) { |
1200 | if (c->type == type && !bacmp(ba1: &c->dst, ba2: ba)) { |
1201 | rcu_read_unlock(); |
1202 | return c; |
1203 | } |
1204 | } |
1205 | |
1206 | rcu_read_unlock(); |
1207 | |
1208 | return NULL; |
1209 | } |
1210 | |
1211 | static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, |
1212 | bdaddr_t *ba, |
1213 | __u8 ba_type) |
1214 | { |
1215 | struct hci_conn_hash *h = &hdev->conn_hash; |
1216 | struct hci_conn *c; |
1217 | |
1218 | rcu_read_lock(); |
1219 | |
1220 | list_for_each_entry_rcu(c, &h->list, list) { |
1221 | if (c->type != LE_LINK) |
1222 | continue; |
1223 | |
1224 | if (ba_type == c->dst_type && !bacmp(ba1: &c->dst, ba2: ba)) { |
1225 | rcu_read_unlock(); |
1226 | return c; |
1227 | } |
1228 | } |
1229 | |
1230 | rcu_read_unlock(); |
1231 | |
1232 | return NULL; |
1233 | } |
1234 | |
1235 | static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, |
1236 | bdaddr_t *ba, |
1237 | __u8 ba_type, |
1238 | __u8 cig, |
1239 | __u8 id) |
1240 | { |
1241 | struct hci_conn_hash *h = &hdev->conn_hash; |
1242 | struct hci_conn *c; |
1243 | |
1244 | rcu_read_lock(); |
1245 | |
1246 | list_for_each_entry_rcu(c, &h->list, list) { |
1247 | if (c->type != CIS_LINK) |
1248 | continue; |
1249 | |
1250 | /* Match CIG ID if set */ |
1251 | if (cig != c->iso_qos.ucast.cig) |
1252 | continue; |
1253 | |
1254 | /* Match CIS ID if set */ |
1255 | if (id != c->iso_qos.ucast.cis) |
1256 | continue; |
1257 | |
1258 | /* Match destination address if set */ |
1259 | if (!ba || (ba_type == c->dst_type && !bacmp(ba1: &c->dst, ba2: ba))) { |
1260 | rcu_read_unlock(); |
1261 | return c; |
1262 | } |
1263 | } |
1264 | |
1265 | rcu_read_unlock(); |
1266 | |
1267 | return NULL; |
1268 | } |
1269 | |
1270 | static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, |
1271 | __u8 handle) |
1272 | { |
1273 | struct hci_conn_hash *h = &hdev->conn_hash; |
1274 | struct hci_conn *c; |
1275 | |
1276 | rcu_read_lock(); |
1277 | |
1278 | list_for_each_entry_rcu(c, &h->list, list) { |
1279 | if (c->type != CIS_LINK) |
1280 | continue; |
1281 | |
1282 | if (handle == c->iso_qos.ucast.cig) { |
1283 | rcu_read_unlock(); |
1284 | return c; |
1285 | } |
1286 | } |
1287 | |
1288 | rcu_read_unlock(); |
1289 | |
1290 | return NULL; |
1291 | } |
1292 | |
1293 | static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, |
1294 | __u8 handle) |
1295 | { |
1296 | struct hci_conn_hash *h = &hdev->conn_hash; |
1297 | struct hci_conn *c; |
1298 | |
1299 | rcu_read_lock(); |
1300 | |
1301 | list_for_each_entry_rcu(c, &h->list, list) { |
1302 | if (c->type != BIS_LINK) |
1303 | continue; |
1304 | |
1305 | if (handle == c->iso_qos.bcast.big) { |
1306 | rcu_read_unlock(); |
1307 | return c; |
1308 | } |
1309 | } |
1310 | |
1311 | rcu_read_unlock(); |
1312 | |
1313 | return NULL; |
1314 | } |
1315 | |
1316 | static inline struct hci_conn * |
1317 | hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, |
1318 | __u8 handle, __u8 num_bis) |
1319 | { |
1320 | struct hci_conn_hash *h = &hdev->conn_hash; |
1321 | struct hci_conn *c; |
1322 | |
1323 | rcu_read_lock(); |
1324 | |
1325 | list_for_each_entry_rcu(c, &h->list, list) { |
1326 | if (c->type != BIS_LINK) |
1327 | continue; |
1328 | |
1329 | if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { |
1330 | rcu_read_unlock(); |
1331 | return c; |
1332 | } |
1333 | } |
1334 | |
1335 | rcu_read_unlock(); |
1336 | |
1337 | return NULL; |
1338 | } |
1339 | |
1340 | static inline struct hci_conn * |
1341 | hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) |
1342 | { |
1343 | struct hci_conn_hash *h = &hdev->conn_hash; |
1344 | struct hci_conn *c; |
1345 | |
1346 | rcu_read_lock(); |
1347 | |
1348 | list_for_each_entry_rcu(c, &h->list, list) { |
1349 | if (c->type != BIS_LINK || bacmp(ba1: &c->dst, BDADDR_ANY) || |
1350 | c->state != state) |
1351 | continue; |
1352 | |
1353 | if (handle == c->iso_qos.bcast.big) { |
1354 | rcu_read_unlock(); |
1355 | return c; |
1356 | } |
1357 | } |
1358 | |
1359 | rcu_read_unlock(); |
1360 | |
1361 | return NULL; |
1362 | } |
1363 | |
1364 | static inline struct hci_conn * |
1365 | hci_conn_hash_lookup_pa_sync_big_handle(struct hci_dev *hdev, __u8 big) |
1366 | { |
1367 | struct hci_conn_hash *h = &hdev->conn_hash; |
1368 | struct hci_conn *c; |
1369 | |
1370 | rcu_read_lock(); |
1371 | |
1372 | list_for_each_entry_rcu(c, &h->list, list) { |
1373 | if (c->type != BIS_LINK || |
1374 | !test_bit(HCI_CONN_PA_SYNC, &c->flags)) |
1375 | continue; |
1376 | |
1377 | if (c->iso_qos.bcast.big == big) { |
1378 | rcu_read_unlock(); |
1379 | return c; |
1380 | } |
1381 | } |
1382 | rcu_read_unlock(); |
1383 | |
1384 | return NULL; |
1385 | } |
1386 | |
1387 | static inline struct hci_conn * |
1388 | hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) |
1389 | { |
1390 | struct hci_conn_hash *h = &hdev->conn_hash; |
1391 | struct hci_conn *c; |
1392 | |
1393 | rcu_read_lock(); |
1394 | |
1395 | list_for_each_entry_rcu(c, &h->list, list) { |
1396 | if (c->type != BIS_LINK) |
1397 | continue; |
1398 | |
1399 | /* Ignore the listen hcon, we are looking |
1400 | * for the child hcon that was created as |
1401 | * a result of the PA sync established event. |
1402 | */ |
1403 | if (c->state == BT_LISTEN) |
1404 | continue; |
1405 | |
1406 | if (c->sync_handle == sync_handle) { |
1407 | rcu_read_unlock(); |
1408 | return c; |
1409 | } |
1410 | } |
1411 | rcu_read_unlock(); |
1412 | |
1413 | return NULL; |
1414 | } |
1415 | |
1416 | static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, |
1417 | __u8 type, __u16 state) |
1418 | { |
1419 | struct hci_conn_hash *h = &hdev->conn_hash; |
1420 | struct hci_conn *c; |
1421 | |
1422 | rcu_read_lock(); |
1423 | |
1424 | list_for_each_entry_rcu(c, &h->list, list) { |
1425 | if (c->type == type && c->state == state) { |
1426 | rcu_read_unlock(); |
1427 | return c; |
1428 | } |
1429 | } |
1430 | |
1431 | rcu_read_unlock(); |
1432 | |
1433 | return NULL; |
1434 | } |
1435 | |
1436 | typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); |
1437 | static inline void hci_conn_hash_list_state(struct hci_dev *hdev, |
1438 | hci_conn_func_t func, __u8 type, |
1439 | __u16 state, void *data) |
1440 | { |
1441 | struct hci_conn_hash *h = &hdev->conn_hash; |
1442 | struct hci_conn *c; |
1443 | |
1444 | if (!func) |
1445 | return; |
1446 | |
1447 | rcu_read_lock(); |
1448 | |
1449 | list_for_each_entry_rcu(c, &h->list, list) { |
1450 | if (c->type == type && c->state == state) |
1451 | func(c, data); |
1452 | } |
1453 | |
1454 | rcu_read_unlock(); |
1455 | } |
1456 | |
1457 | static inline void hci_conn_hash_list_flag(struct hci_dev *hdev, |
1458 | hci_conn_func_t func, __u8 type, |
1459 | __u8 flag, void *data) |
1460 | { |
1461 | struct hci_conn_hash *h = &hdev->conn_hash; |
1462 | struct hci_conn *c; |
1463 | |
1464 | if (!func) |
1465 | return; |
1466 | |
1467 | rcu_read_lock(); |
1468 | |
1469 | list_for_each_entry_rcu(c, &h->list, list) { |
1470 | if (c->type == type && test_bit(flag, &c->flags)) |
1471 | func(c, data); |
1472 | } |
1473 | |
1474 | rcu_read_unlock(); |
1475 | } |
1476 | |
1477 | static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) |
1478 | { |
1479 | struct hci_conn_hash *h = &hdev->conn_hash; |
1480 | struct hci_conn *c; |
1481 | |
1482 | rcu_read_lock(); |
1483 | |
1484 | list_for_each_entry_rcu(c, &h->list, list) { |
1485 | if (c->type == LE_LINK && c->state == BT_CONNECT && |
1486 | !test_bit(HCI_CONN_SCANNING, &c->flags)) { |
1487 | rcu_read_unlock(); |
1488 | return c; |
1489 | } |
1490 | } |
1491 | |
1492 | rcu_read_unlock(); |
1493 | |
1494 | return NULL; |
1495 | } |
1496 | |
1497 | /* Returns true if an le connection is in the scanning state */ |
1498 | static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) |
1499 | { |
1500 | struct hci_conn_hash *h = &hdev->conn_hash; |
1501 | struct hci_conn *c; |
1502 | |
1503 | rcu_read_lock(); |
1504 | |
1505 | list_for_each_entry_rcu(c, &h->list, list) { |
1506 | if (c->type == LE_LINK && c->state == BT_CONNECT && |
1507 | test_bit(HCI_CONN_SCANNING, &c->flags)) { |
1508 | rcu_read_unlock(); |
1509 | return true; |
1510 | } |
1511 | } |
1512 | |
1513 | rcu_read_unlock(); |
1514 | |
1515 | return false; |
1516 | } |
1517 | |
1518 | int hci_disconnect(struct hci_conn *conn, __u8 reason); |
1519 | bool hci_setup_sync(struct hci_conn *conn, __u16 handle); |
1520 | void hci_sco_setup(struct hci_conn *conn, __u8 status); |
1521 | bool hci_iso_setup_path(struct hci_conn *conn); |
1522 | int hci_le_create_cis_pending(struct hci_dev *hdev); |
1523 | int hci_conn_check_create_cis(struct hci_conn *conn); |
1524 | |
1525 | struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, |
1526 | u8 role, u16 handle); |
1527 | struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, |
1528 | bdaddr_t *dst, u8 role); |
1529 | void hci_conn_del(struct hci_conn *conn); |
1530 | void hci_conn_hash_flush(struct hci_dev *hdev); |
1531 | |
1532 | struct hci_chan *hci_chan_create(struct hci_conn *conn); |
1533 | void hci_chan_del(struct hci_chan *chan); |
1534 | void hci_chan_list_flush(struct hci_conn *conn); |
1535 | struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); |
1536 | |
1537 | struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, |
1538 | u8 dst_type, u8 sec_level, |
1539 | u16 conn_timeout, |
1540 | enum conn_reasons conn_reason); |
1541 | struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, |
1542 | u8 dst_type, bool dst_resolved, u8 sec_level, |
1543 | u16 conn_timeout, u8 role, u8 phy, u8 sec_phy); |
1544 | void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status); |
1545 | struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, |
1546 | u8 sec_level, u8 auth_type, |
1547 | enum conn_reasons conn_reason, u16 timeout); |
1548 | struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, |
1549 | __u16 setting, struct bt_codec *codec, |
1550 | u16 timeout); |
1551 | struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, |
1552 | __u8 dst_type, struct bt_iso_qos *qos); |
1553 | struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, |
1554 | struct bt_iso_qos *qos, |
1555 | __u8 base_len, __u8 *base); |
1556 | struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, |
1557 | __u8 dst_type, struct bt_iso_qos *qos); |
1558 | struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, |
1559 | __u8 dst_type, struct bt_iso_qos *qos, |
1560 | __u8 data_len, __u8 *data); |
1561 | struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, |
1562 | __u8 dst_type, __u8 sid, struct bt_iso_qos *qos); |
1563 | int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, |
1564 | struct bt_iso_qos *qos, __u16 sync_handle, |
1565 | __u8 num_bis, __u8 bis[]); |
1566 | int hci_conn_check_link_mode(struct hci_conn *conn); |
1567 | int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); |
1568 | int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, |
1569 | bool initiator); |
1570 | int hci_conn_switch_role(struct hci_conn *conn, __u8 role); |
1571 | |
1572 | void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); |
1573 | |
1574 | void hci_conn_failed(struct hci_conn *conn, u8 status); |
1575 | u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); |
1576 | |
1577 | void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb); |
1578 | void hci_conn_tx_dequeue(struct hci_conn *conn); |
1579 | void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, |
1580 | const struct sockcm_cookie *sockc); |
1581 | |
1582 | static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk) |
1583 | { |
1584 | *sockc = (struct sockcm_cookie) { |
1585 | .tsflags = READ_ONCE(sk->sk_tsflags), |
1586 | }; |
1587 | } |
1588 | |
1589 | /* |
1590 | * hci_conn_get() and hci_conn_put() are used to control the life-time of an |
1591 | * "hci_conn" object. They do not guarantee that the hci_conn object is running, |
1592 | * working or anything else. They just guarantee that the object is available |
1593 | * and can be dereferenced. So you can use its locks, local variables and any |
1594 | * other constant data. |
1595 | * Before accessing runtime data, you _must_ lock the object and then check that |
1596 | * it is still running. As soon as you release the locks, the connection might |
1597 | * get dropped, though. |
1598 | * |
1599 | * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control |
1600 | * how long the underlying connection is held. So every channel that runs on the |
1601 | * hci_conn object calls this to prevent the connection from disappearing. As |
1602 | * long as you hold a device, you must also guarantee that you have a valid |
1603 | * reference to the device via hci_conn_get() (or the initial reference from |
1604 | * hci_conn_add()). |
1605 | * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't |
1606 | * break because nobody cares for that. But this means, we cannot use |
1607 | * _get()/_drop() in it, but require the caller to have a valid ref (FIXME). |
1608 | */ |
1609 | |
1610 | static inline struct hci_conn *hci_conn_get(struct hci_conn *conn) |
1611 | { |
1612 | get_device(dev: &conn->dev); |
1613 | return conn; |
1614 | } |
1615 | |
1616 | static inline void hci_conn_put(struct hci_conn *conn) |
1617 | { |
1618 | put_device(dev: &conn->dev); |
1619 | } |
1620 | |
1621 | static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) |
1622 | { |
1623 | BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); |
1624 | |
1625 | atomic_inc(v: &conn->refcnt); |
1626 | cancel_delayed_work(dwork: &conn->disc_work); |
1627 | |
1628 | return conn; |
1629 | } |
1630 | |
1631 | static inline void hci_conn_drop(struct hci_conn *conn) |
1632 | { |
1633 | BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); |
1634 | |
1635 | if (atomic_dec_and_test(v: &conn->refcnt)) { |
1636 | unsigned long timeo; |
1637 | |
1638 | switch (conn->type) { |
1639 | case ACL_LINK: |
1640 | case LE_LINK: |
1641 | cancel_delayed_work(dwork: &conn->idle_work); |
1642 | if (conn->state == BT_CONNECTED) { |
1643 | timeo = conn->disc_timeout; |
1644 | if (!conn->out) |
1645 | timeo *= 2; |
1646 | } else { |
1647 | timeo = 0; |
1648 | } |
1649 | break; |
1650 | |
1651 | default: |
1652 | timeo = 0; |
1653 | break; |
1654 | } |
1655 | |
1656 | cancel_delayed_work(dwork: &conn->disc_work); |
1657 | queue_delayed_work(wq: conn->hdev->workqueue, |
1658 | dwork: &conn->disc_work, delay: timeo); |
1659 | } |
1660 | } |
1661 | |
1662 | /* ----- HCI Devices ----- */ |
1663 | static inline void hci_dev_put(struct hci_dev *d) |
1664 | { |
1665 | BT_DBG("%s orig refcnt %d", d->name, |
1666 | kref_read(&d->dev.kobj.kref)); |
1667 | |
1668 | put_device(dev: &d->dev); |
1669 | } |
1670 | |
1671 | static inline struct hci_dev *hci_dev_hold(struct hci_dev *d) |
1672 | { |
1673 | BT_DBG("%s orig refcnt %d", d->name, |
1674 | kref_read(&d->dev.kobj.kref)); |
1675 | |
1676 | get_device(dev: &d->dev); |
1677 | return d; |
1678 | } |
1679 | |
1680 | #define hci_dev_lock(d) mutex_lock(&d->lock) |
1681 | #define hci_dev_unlock(d) mutex_unlock(&d->lock) |
1682 | |
1683 | #define to_hci_dev(d) container_of(d, struct hci_dev, dev) |
1684 | #define to_hci_conn(c) container_of(c, struct hci_conn, dev) |
1685 | |
1686 | static inline void *hci_get_drvdata(struct hci_dev *hdev) |
1687 | { |
1688 | return dev_get_drvdata(dev: &hdev->dev); |
1689 | } |
1690 | |
1691 | static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) |
1692 | { |
1693 | dev_set_drvdata(dev: &hdev->dev, data); |
1694 | } |
1695 | |
1696 | static inline void *hci_get_priv(struct hci_dev *hdev) |
1697 | { |
1698 | return (char *)hdev + sizeof(*hdev); |
1699 | } |
1700 | |
1701 | struct hci_dev *hci_dev_get(int index); |
1702 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); |
1703 | |
1704 | struct hci_dev *hci_alloc_dev_priv(int sizeof_priv); |
1705 | |
1706 | static inline struct hci_dev *hci_alloc_dev(void) |
1707 | { |
1708 | return hci_alloc_dev_priv(sizeof_priv: 0); |
1709 | } |
1710 | |
1711 | void hci_free_dev(struct hci_dev *hdev); |
1712 | int hci_register_dev(struct hci_dev *hdev); |
1713 | void hci_unregister_dev(struct hci_dev *hdev); |
1714 | void hci_release_dev(struct hci_dev *hdev); |
1715 | int hci_register_suspend_notifier(struct hci_dev *hdev); |
1716 | int hci_unregister_suspend_notifier(struct hci_dev *hdev); |
1717 | int hci_suspend_dev(struct hci_dev *hdev); |
1718 | int hci_resume_dev(struct hci_dev *hdev); |
1719 | int hci_reset_dev(struct hci_dev *hdev); |
1720 | int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); |
1721 | int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); |
1722 | __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); |
1723 | __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); |
1724 | |
1725 | static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) |
1726 | { |
1727 | #if IS_ENABLED(CONFIG_BT_MSFTEXT) |
1728 | hdev->msft_opcode = opcode; |
1729 | #endif |
1730 | } |
1731 | |
1732 | static inline void hci_set_aosp_capable(struct hci_dev *hdev) |
1733 | { |
1734 | #if IS_ENABLED(CONFIG_BT_AOSPEXT) |
1735 | hdev->aosp_capable = true; |
1736 | #endif |
1737 | } |
1738 | |
1739 | static inline void hci_devcd_setup(struct hci_dev *hdev) |
1740 | { |
1741 | #ifdef CONFIG_DEV_COREDUMP |
1742 | INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); |
1743 | INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); |
1744 | skb_queue_head_init(list: &hdev->dump.dump_q); |
1745 | #endif |
1746 | } |
1747 | |
1748 | int hci_dev_open(__u16 dev); |
1749 | int hci_dev_close(__u16 dev); |
1750 | int hci_dev_do_close(struct hci_dev *hdev); |
1751 | int hci_dev_reset(__u16 dev); |
1752 | int hci_dev_reset_stat(__u16 dev); |
1753 | int hci_dev_cmd(unsigned int cmd, void __user *arg); |
1754 | int hci_get_dev_list(void __user *arg); |
1755 | int hci_get_dev_info(void __user *arg); |
1756 | int hci_get_conn_list(void __user *arg); |
1757 | int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); |
1758 | int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); |
1759 | int hci_inquiry(void __user *arg); |
1760 | |
1761 | struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, |
1762 | bdaddr_t *bdaddr, u8 type); |
1763 | struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( |
1764 | struct list_head *list, bdaddr_t *bdaddr, |
1765 | u8 type); |
1766 | struct bdaddr_list_with_flags * |
1767 | hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, |
1768 | u8 type); |
1769 | int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); |
1770 | int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, |
1771 | u8 type, u8 *peer_irk, u8 *local_irk); |
1772 | int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, |
1773 | u8 type, u32 flags); |
1774 | int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); |
1775 | int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, |
1776 | u8 type); |
1777 | void hci_bdaddr_list_clear(struct list_head *list); |
1778 | |
1779 | struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, |
1780 | bdaddr_t *addr, u8 addr_type); |
1781 | struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, |
1782 | bdaddr_t *addr, u8 addr_type); |
1783 | void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type); |
1784 | void hci_conn_params_clear_disabled(struct hci_dev *hdev); |
1785 | void hci_conn_params_free(struct hci_conn_params *param); |
1786 | |
1787 | void hci_pend_le_list_del_init(struct hci_conn_params *param); |
1788 | void hci_pend_le_list_add(struct hci_conn_params *param, |
1789 | struct list_head *list); |
1790 | struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, |
1791 | bdaddr_t *addr, |
1792 | u8 addr_type); |
1793 | |
1794 | void hci_uuids_clear(struct hci_dev *hdev); |
1795 | |
1796 | void hci_link_keys_clear(struct hci_dev *hdev); |
1797 | u8 *hci_conn_key_enc_size(struct hci_conn *conn); |
1798 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); |
1799 | struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, |
1800 | bdaddr_t *bdaddr, u8 *val, u8 type, |
1801 | u8 pin_len, bool *persistent); |
1802 | struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1803 | u8 addr_type, u8 type, u8 authenticated, |
1804 | u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand); |
1805 | struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1806 | u8 addr_type, u8 role); |
1807 | int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type); |
1808 | void hci_smp_ltks_clear(struct hci_dev *hdev); |
1809 | int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr); |
1810 | |
1811 | struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa); |
1812 | struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1813 | u8 addr_type); |
1814 | struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1815 | u8 addr_type, u8 val[16], bdaddr_t *rpa); |
1816 | void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type); |
1817 | bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]); |
1818 | void hci_blocked_keys_clear(struct hci_dev *hdev); |
1819 | void hci_smp_irks_clear(struct hci_dev *hdev); |
1820 | |
1821 | bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); |
1822 | |
1823 | void hci_remote_oob_data_clear(struct hci_dev *hdev); |
1824 | struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, |
1825 | bdaddr_t *bdaddr, u8 bdaddr_type); |
1826 | int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1827 | u8 bdaddr_type, u8 *hash192, u8 *rand192, |
1828 | u8 *hash256, u8 *rand256); |
1829 | int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, |
1830 | u8 bdaddr_type); |
1831 | |
1832 | void hci_adv_instances_clear(struct hci_dev *hdev); |
1833 | struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); |
1834 | struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); |
1835 | struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, |
1836 | u32 flags, u16 adv_data_len, u8 *adv_data, |
1837 | u16 scan_rsp_len, u8 *scan_rsp_data, |
1838 | u16 timeout, u16 duration, s8 tx_power, |
1839 | u32 min_interval, u32 max_interval, |
1840 | u8 mesh_handle); |
1841 | struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, |
1842 | u32 flags, u8 data_len, u8 *data, |
1843 | u32 min_interval, u32 max_interval); |
1844 | int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, |
1845 | u16 adv_data_len, u8 *adv_data, |
1846 | u16 scan_rsp_len, u8 *scan_rsp_data); |
1847 | int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); |
1848 | void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); |
1849 | u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance); |
1850 | bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); |
1851 | |
1852 | void hci_adv_monitors_clear(struct hci_dev *hdev); |
1853 | void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); |
1854 | int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); |
1855 | int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); |
1856 | int hci_remove_all_adv_monitor(struct hci_dev *hdev); |
1857 | bool hci_is_adv_monitoring(struct hci_dev *hdev); |
1858 | int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); |
1859 | |
1860 | void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); |
1861 | |
1862 | void hci_init_sysfs(struct hci_dev *hdev); |
1863 | void hci_conn_init_sysfs(struct hci_conn *conn); |
1864 | void hci_conn_add_sysfs(struct hci_conn *conn); |
1865 | void hci_conn_del_sysfs(struct hci_conn *conn); |
1866 | |
1867 | #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev)) |
1868 | #define GET_HCIDEV_DEV(hdev) ((hdev)->dev.parent) |
1869 | |
1870 | /* ----- LMP capabilities ----- */ |
1871 | #define lmp_encrypt_capable(dev) ((dev)->features[0][0] & LMP_ENCRYPT) |
1872 | #define lmp_rswitch_capable(dev) ((dev)->features[0][0] & LMP_RSWITCH) |
1873 | #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) |
1874 | #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) |
1875 | #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) |
1876 | #define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO) |
1877 | #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) |
1878 | #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) |
1879 | #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) |
1880 | #define lmp_le_capable(dev) ((dev)->features[0][4] & LMP_LE) |
1881 | #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR) |
1882 | #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC) |
1883 | #define lmp_esco_2m_capable(dev) ((dev)->features[0][5] & LMP_EDR_ESCO_2M) |
1884 | #define lmp_ext_inq_capable(dev) ((dev)->features[0][6] & LMP_EXT_INQ) |
1885 | #define lmp_le_br_capable(dev) (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR)) |
1886 | #define lmp_ssp_capable(dev) ((dev)->features[0][6] & LMP_SIMPLE_PAIR) |
1887 | #define lmp_no_flush_capable(dev) ((dev)->features[0][6] & LMP_NO_FLUSH) |
1888 | #define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO) |
1889 | #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR) |
1890 | #define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES) |
1891 | #define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT) |
1892 | #define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M) |
1893 | #define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M) |
1894 | #define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT) |
1895 | #define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT) |
1896 | |
1897 | /* ----- Extended LMP capabilities ----- */ |
1898 | #define lmp_cpb_central_capable(dev) ((dev)->features[2][0] & LMP_CPB_CENTRAL) |
1899 | #define lmp_cpb_peripheral_capable(dev) ((dev)->features[2][0] & LMP_CPB_PERIPHERAL) |
1900 | #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN) |
1901 | #define lmp_sync_scan_capable(dev) ((dev)->features[2][0] & LMP_SYNC_SCAN) |
1902 | #define lmp_sc_capable(dev) ((dev)->features[2][1] & LMP_SC) |
1903 | #define lmp_ping_capable(dev) ((dev)->features[2][1] & LMP_PING) |
1904 | |
1905 | /* ----- Host capabilities ----- */ |
1906 | #define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP) |
1907 | #define lmp_host_sc_capable(dev) ((dev)->features[1][0] & LMP_HOST_SC) |
1908 | #define lmp_host_le_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE)) |
1909 | #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR)) |
1910 | |
1911 | #define hdev_is_powered(dev) (test_bit(HCI_UP, &(dev)->flags) && \ |
1912 | !hci_dev_test_flag(dev, HCI_AUTO_OFF)) |
1913 | #define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \ |
1914 | hci_dev_test_flag(dev, HCI_SC_ENABLED)) |
1915 | #define rpa_valid(dev) (bacmp(&dev->rpa, BDADDR_ANY) && \ |
1916 | !hci_dev_test_flag(dev, HCI_RPA_EXPIRED)) |
1917 | #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \ |
1918 | !adv->rpa_expired) |
1919 | |
1920 | #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ |
1921 | ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) |
1922 | |
1923 | #define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) |
1924 | |
1925 | #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ |
1926 | ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) |
1927 | |
1928 | #define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED) && \ |
1929 | !test_bit(HCI_QUIRK_BROKEN_LE_CODED, \ |
1930 | &(dev)->quirks)) |
1931 | |
1932 | #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ |
1933 | ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) |
1934 | |
1935 | #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) |
1936 | |
1937 | #define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \ |
1938 | (hdev->commands[39] & 0x04)) |
1939 | |
1940 | #define read_key_size_capable(dev) \ |
1941 | ((dev)->commands[20] & 0x10 && \ |
1942 | !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) |
1943 | |
1944 | #define read_voice_setting_capable(dev) \ |
1945 | ((dev)->commands[9] & 0x04 && \ |
1946 | !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks)) |
1947 | |
1948 | /* Use enhanced synchronous connection if command is supported and its quirk |
1949 | * has not been set. |
1950 | */ |
1951 | #define enhanced_sync_conn_capable(dev) \ |
1952 | (((dev)->commands[29] & 0x08) && \ |
1953 | !test_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &(dev)->quirks)) |
1954 | |
1955 | /* Use ext scanning if set ext scan param and ext scan enable is supported */ |
1956 | #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ |
1957 | ((dev)->commands[37] & 0x40) && \ |
1958 | !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) |
1959 | |
1960 | /* Use ext create connection if command is supported */ |
1961 | #define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \ |
1962 | !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks)) |
1963 | /* Extended advertising support */ |
1964 | #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) |
1965 | |
1966 | /* Maximum advertising length */ |
1967 | #define max_adv_len(dev) \ |
1968 | (ext_adv_capable(dev) ? HCI_MAX_EXT_AD_LENGTH : HCI_MAX_AD_LENGTH) |
1969 | |
1970 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 1789: |
1971 | * |
1972 | * C24: Mandatory if the LE Controller supports Connection State and either |
1973 | * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported |
1974 | */ |
1975 | #define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \ |
1976 | ext_adv_capable(dev)) && \ |
1977 | !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \ |
1978 | &(dev)->quirks)) |
1979 | |
1980 | /* Periodic advertising support */ |
1981 | #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) |
1982 | |
1983 | /* CIS Master/Slave and BIS support */ |
1984 | #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) |
1985 | #define cis_capable(dev) \ |
1986 | (cis_central_capable(dev) || cis_peripheral_capable(dev)) |
1987 | #define cis_central_capable(dev) \ |
1988 | ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) |
1989 | #define cis_peripheral_capable(dev) \ |
1990 | ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) |
1991 | #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) |
1992 | #define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER) |
1993 | |
1994 | #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \ |
1995 | (!test_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &(dev)->quirks))) |
1996 | |
1997 | /* ----- HCI protocols ----- */ |
1998 | #define HCI_PROTO_DEFER 0x01 |
1999 | |
2000 | static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2001 | __u8 type, __u8 *flags) |
2002 | { |
2003 | switch (type) { |
2004 | case ACL_LINK: |
2005 | return l2cap_connect_ind(hdev, bdaddr); |
2006 | |
2007 | case SCO_LINK: |
2008 | case ESCO_LINK: |
2009 | return sco_connect_ind(hdev, bdaddr, flags); |
2010 | |
2011 | case CIS_LINK: |
2012 | case BIS_LINK: |
2013 | return iso_connect_ind(hdev, bdaddr, flags); |
2014 | |
2015 | default: |
2016 | BT_ERR("unknown link type %d", type); |
2017 | return -EINVAL; |
2018 | } |
2019 | } |
2020 | |
2021 | static inline int hci_proto_disconn_ind(struct hci_conn *conn) |
2022 | { |
2023 | if (conn->type != ACL_LINK && conn->type != LE_LINK) |
2024 | return HCI_ERROR_REMOTE_USER_TERM; |
2025 | |
2026 | return l2cap_disconn_ind(hcon: conn); |
2027 | } |
2028 | |
2029 | /* ----- HCI callbacks ----- */ |
2030 | struct hci_cb { |
2031 | struct list_head list; |
2032 | |
2033 | char *name; |
2034 | |
2035 | void (*connect_cfm) (struct hci_conn *conn, __u8 status); |
2036 | void (*disconn_cfm) (struct hci_conn *conn, __u8 status); |
2037 | void (*security_cfm) (struct hci_conn *conn, __u8 status, |
2038 | __u8 encrypt); |
2039 | void (*key_change_cfm) (struct hci_conn *conn, __u8 status); |
2040 | void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); |
2041 | }; |
2042 | |
2043 | static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) |
2044 | { |
2045 | struct hci_cb *cb; |
2046 | |
2047 | mutex_lock(&hci_cb_list_lock); |
2048 | list_for_each_entry(cb, &hci_cb_list, list) { |
2049 | if (cb->connect_cfm) |
2050 | cb->connect_cfm(conn, status); |
2051 | } |
2052 | mutex_unlock(lock: &hci_cb_list_lock); |
2053 | |
2054 | if (conn->connect_cfm_cb) |
2055 | conn->connect_cfm_cb(conn, status); |
2056 | } |
2057 | |
2058 | static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) |
2059 | { |
2060 | struct hci_cb *cb; |
2061 | |
2062 | mutex_lock(&hci_cb_list_lock); |
2063 | list_for_each_entry(cb, &hci_cb_list, list) { |
2064 | if (cb->disconn_cfm) |
2065 | cb->disconn_cfm(conn, reason); |
2066 | } |
2067 | mutex_unlock(lock: &hci_cb_list_lock); |
2068 | |
2069 | if (conn->disconn_cfm_cb) |
2070 | conn->disconn_cfm_cb(conn, reason); |
2071 | } |
2072 | |
2073 | static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) |
2074 | { |
2075 | struct hci_cb *cb; |
2076 | __u8 encrypt; |
2077 | |
2078 | if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) |
2079 | return; |
2080 | |
2081 | encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; |
2082 | |
2083 | mutex_lock(&hci_cb_list_lock); |
2084 | list_for_each_entry(cb, &hci_cb_list, list) { |
2085 | if (cb->security_cfm) |
2086 | cb->security_cfm(conn, status, encrypt); |
2087 | } |
2088 | mutex_unlock(lock: &hci_cb_list_lock); |
2089 | |
2090 | if (conn->security_cfm_cb) |
2091 | conn->security_cfm_cb(conn, status); |
2092 | } |
2093 | |
2094 | static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) |
2095 | { |
2096 | struct hci_cb *cb; |
2097 | __u8 encrypt; |
2098 | |
2099 | if (conn->state == BT_CONFIG) { |
2100 | if (!status) |
2101 | conn->state = BT_CONNECTED; |
2102 | |
2103 | hci_connect_cfm(conn, status); |
2104 | hci_conn_drop(conn); |
2105 | return; |
2106 | } |
2107 | |
2108 | if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) |
2109 | encrypt = 0x00; |
2110 | else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) |
2111 | encrypt = 0x02; |
2112 | else |
2113 | encrypt = 0x01; |
2114 | |
2115 | if (!status) { |
2116 | if (conn->sec_level == BT_SECURITY_SDP) |
2117 | conn->sec_level = BT_SECURITY_LOW; |
2118 | |
2119 | if (conn->pending_sec_level > conn->sec_level) |
2120 | conn->sec_level = conn->pending_sec_level; |
2121 | } |
2122 | |
2123 | mutex_lock(&hci_cb_list_lock); |
2124 | list_for_each_entry(cb, &hci_cb_list, list) { |
2125 | if (cb->security_cfm) |
2126 | cb->security_cfm(conn, status, encrypt); |
2127 | } |
2128 | mutex_unlock(lock: &hci_cb_list_lock); |
2129 | |
2130 | if (conn->security_cfm_cb) |
2131 | conn->security_cfm_cb(conn, status); |
2132 | } |
2133 | |
2134 | static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) |
2135 | { |
2136 | struct hci_cb *cb; |
2137 | |
2138 | mutex_lock(&hci_cb_list_lock); |
2139 | list_for_each_entry(cb, &hci_cb_list, list) { |
2140 | if (cb->key_change_cfm) |
2141 | cb->key_change_cfm(conn, status); |
2142 | } |
2143 | mutex_unlock(lock: &hci_cb_list_lock); |
2144 | } |
2145 | |
2146 | static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, |
2147 | __u8 role) |
2148 | { |
2149 | struct hci_cb *cb; |
2150 | |
2151 | mutex_lock(&hci_cb_list_lock); |
2152 | list_for_each_entry(cb, &hci_cb_list, list) { |
2153 | if (cb->role_switch_cfm) |
2154 | cb->role_switch_cfm(conn, status, role); |
2155 | } |
2156 | mutex_unlock(lock: &hci_cb_list_lock); |
2157 | } |
2158 | |
2159 | static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) |
2160 | { |
2161 | if (addr_type != ADDR_LE_DEV_RANDOM) |
2162 | return false; |
2163 | |
2164 | if ((bdaddr->b[5] & 0xc0) == 0x40) |
2165 | return true; |
2166 | |
2167 | return false; |
2168 | } |
2169 | |
2170 | static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type) |
2171 | { |
2172 | if (addr_type == ADDR_LE_DEV_PUBLIC) |
2173 | return true; |
2174 | |
2175 | /* Check for Random Static address type */ |
2176 | if ((addr->b[5] & 0xc0) == 0xc0) |
2177 | return true; |
2178 | |
2179 | return false; |
2180 | } |
2181 | |
2182 | static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev, |
2183 | bdaddr_t *bdaddr, u8 addr_type) |
2184 | { |
2185 | if (!hci_bdaddr_is_rpa(bdaddr, addr_type)) |
2186 | return NULL; |
2187 | |
2188 | return hci_find_irk_by_rpa(hdev, rpa: bdaddr); |
2189 | } |
2190 | |
2191 | static inline int hci_check_conn_params(u16 min, u16 max, u16 latency, |
2192 | u16 to_multiplier) |
2193 | { |
2194 | u16 max_latency; |
2195 | |
2196 | if (min > max) { |
2197 | BT_WARN("min %d > max %d", min, max); |
2198 | return -EINVAL; |
2199 | } |
2200 | |
2201 | if (min < 6) { |
2202 | BT_WARN("min %d < 6", min); |
2203 | return -EINVAL; |
2204 | } |
2205 | |
2206 | if (max > 3200) { |
2207 | BT_WARN("max %d > 3200", max); |
2208 | return -EINVAL; |
2209 | } |
2210 | |
2211 | if (to_multiplier < 10) { |
2212 | BT_WARN("to_multiplier %d < 10", to_multiplier); |
2213 | return -EINVAL; |
2214 | } |
2215 | |
2216 | if (to_multiplier > 3200) { |
2217 | BT_WARN("to_multiplier %d > 3200", to_multiplier); |
2218 | return -EINVAL; |
2219 | } |
2220 | |
2221 | if (max >= to_multiplier * 8) { |
2222 | BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier); |
2223 | return -EINVAL; |
2224 | } |
2225 | |
2226 | max_latency = (to_multiplier * 4 / max) - 1; |
2227 | if (latency > 499) { |
2228 | BT_WARN("latency %d > 499", latency); |
2229 | return -EINVAL; |
2230 | } |
2231 | |
2232 | if (latency > max_latency) { |
2233 | BT_WARN("latency %d > max_latency %d", latency, max_latency); |
2234 | return -EINVAL; |
2235 | } |
2236 | |
2237 | return 0; |
2238 | } |
2239 | |
2240 | int hci_register_cb(struct hci_cb *hcb); |
2241 | int hci_unregister_cb(struct hci_cb *hcb); |
2242 | |
2243 | int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, |
2244 | const void *param); |
2245 | |
2246 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, |
2247 | const void *param); |
2248 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); |
2249 | void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); |
2250 | void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); |
2251 | |
2252 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); |
2253 | void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); |
2254 | |
2255 | u32 hci_conn_get_phy(struct hci_conn *conn); |
2256 | |
2257 | /* ----- HCI Sockets ----- */ |
2258 | void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb); |
2259 | void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, |
2260 | int flag, struct sock *skip_sk); |
2261 | void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb); |
2262 | void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, |
2263 | void *data, u16 data_len, ktime_t tstamp, |
2264 | int flag, struct sock *skip_sk); |
2265 | |
2266 | void hci_sock_dev_event(struct hci_dev *hdev, int event); |
2267 | |
2268 | #define HCI_MGMT_VAR_LEN BIT(0) |
2269 | #define HCI_MGMT_NO_HDEV BIT(1) |
2270 | #define HCI_MGMT_UNTRUSTED BIT(2) |
2271 | #define HCI_MGMT_UNCONFIGURED BIT(3) |
2272 | #define HCI_MGMT_HDEV_OPTIONAL BIT(4) |
2273 | |
2274 | struct hci_mgmt_handler { |
2275 | int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, |
2276 | u16 data_len); |
2277 | size_t data_len; |
2278 | unsigned long flags; |
2279 | }; |
2280 | |
2281 | struct hci_mgmt_chan { |
2282 | struct list_head list; |
2283 | unsigned short channel; |
2284 | size_t handler_count; |
2285 | const struct hci_mgmt_handler *handlers; |
2286 | void (*hdev_init) (struct sock *sk, struct hci_dev *hdev); |
2287 | }; |
2288 | |
2289 | int hci_mgmt_chan_register(struct hci_mgmt_chan *c); |
2290 | void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); |
2291 | |
2292 | /* Management interface */ |
2293 | #define DISCOV_TYPE_BREDR (BIT(BDADDR_BREDR)) |
2294 | #define DISCOV_TYPE_LE (BIT(BDADDR_LE_PUBLIC) | \ |
2295 | BIT(BDADDR_LE_RANDOM)) |
2296 | #define DISCOV_TYPE_INTERLEAVED (BIT(BDADDR_BREDR) | \ |
2297 | BIT(BDADDR_LE_PUBLIC) | \ |
2298 | BIT(BDADDR_LE_RANDOM)) |
2299 | |
2300 | /* These LE scan and inquiry parameters were chosen according to LE General |
2301 | * Discovery Procedure specification. |
2302 | */ |
2303 | #define DISCOV_LE_SCAN_WIN 0x0012 /* 11.25 msec */ |
2304 | #define DISCOV_LE_SCAN_INT 0x0012 /* 11.25 msec */ |
2305 | #define DISCOV_LE_SCAN_INT_FAST 0x0060 /* 60 msec */ |
2306 | #define DISCOV_LE_SCAN_WIN_FAST 0x0030 /* 30 msec */ |
2307 | #define DISCOV_LE_SCAN_INT_CONN 0x0060 /* 60 msec */ |
2308 | #define DISCOV_LE_SCAN_WIN_CONN 0x0060 /* 60 msec */ |
2309 | #define DISCOV_LE_SCAN_INT_SLOW1 0x0800 /* 1.28 sec */ |
2310 | #define DISCOV_LE_SCAN_WIN_SLOW1 0x0012 /* 11.25 msec */ |
2311 | #define DISCOV_LE_SCAN_INT_SLOW2 0x1000 /* 2.56 sec */ |
2312 | #define DISCOV_LE_SCAN_WIN_SLOW2 0x0024 /* 22.5 msec */ |
2313 | #define DISCOV_CODED_SCAN_INT_FAST 0x0120 /* 180 msec */ |
2314 | #define DISCOV_CODED_SCAN_WIN_FAST 0x0090 /* 90 msec */ |
2315 | #define DISCOV_CODED_SCAN_INT_SLOW1 0x1800 /* 3.84 sec */ |
2316 | #define DISCOV_CODED_SCAN_WIN_SLOW1 0x0036 /* 33.75 msec */ |
2317 | #define DISCOV_CODED_SCAN_INT_SLOW2 0x3000 /* 7.68 sec */ |
2318 | #define DISCOV_CODED_SCAN_WIN_SLOW2 0x006c /* 67.5 msec */ |
2319 | #define DISCOV_LE_TIMEOUT 10240 /* msec */ |
2320 | #define DISCOV_INTERLEAVED_TIMEOUT 5120 /* msec */ |
2321 | #define DISCOV_INTERLEAVED_INQUIRY_LEN 0x04 |
2322 | #define DISCOV_BREDR_INQUIRY_LEN 0x08 |
2323 | #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ |
2324 | #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ |
2325 | #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ |
2326 | #define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ |
2327 | #define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ |
2328 | #define DISCOV_LE_ADV_MESH_MIN 0x00A0 /* 100 msec */ |
2329 | #define DISCOV_LE_ADV_MESH_MAX 0x00A0 /* 100 msec */ |
2330 | #define INTERVAL_TO_MS(x) (((x) * 10) / 0x10) |
2331 | |
2332 | #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ |
2333 | |
2334 | void mgmt_fill_version_info(void *ver); |
2335 | int mgmt_new_settings(struct hci_dev *hdev); |
2336 | void mgmt_index_added(struct hci_dev *hdev); |
2337 | void mgmt_index_removed(struct hci_dev *hdev); |
2338 | void mgmt_set_powered_failed(struct hci_dev *hdev, int err); |
2339 | void mgmt_power_on(struct hci_dev *hdev, int err); |
2340 | void __mgmt_power_off(struct hci_dev *hdev); |
2341 | void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, |
2342 | bool persistent); |
2343 | void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, |
2344 | u8 *name, u8 name_len); |
2345 | void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2346 | u8 link_type, u8 addr_type, u8 reason, |
2347 | bool mgmt_connected); |
2348 | void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2349 | u8 link_type, u8 addr_type, u8 status); |
2350 | void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, |
2351 | u8 status); |
2352 | void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure); |
2353 | void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2354 | u8 status); |
2355 | void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2356 | u8 status); |
2357 | int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2358 | u8 link_type, u8 addr_type, u32 value, |
2359 | u8 confirm_hint); |
2360 | int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2361 | u8 link_type, u8 addr_type, u8 status); |
2362 | int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2363 | u8 link_type, u8 addr_type, u8 status); |
2364 | int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2365 | u8 link_type, u8 addr_type); |
2366 | int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2367 | u8 link_type, u8 addr_type, u8 status); |
2368 | int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2369 | u8 link_type, u8 addr_type, u8 status); |
2370 | int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2371 | u8 link_type, u8 addr_type, u32 passkey, |
2372 | u8 entered); |
2373 | void mgmt_auth_failed(struct hci_conn *conn, u8 status); |
2374 | void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); |
2375 | void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, |
2376 | u8 status); |
2377 | void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); |
2378 | void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
2379 | u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, |
2380 | u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, |
2381 | u64 instant); |
2382 | void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, |
2383 | u8 addr_type, s8 rssi, u8 *name, u8 name_len); |
2384 | void mgmt_discovering(struct hci_dev *hdev, u8 discovering); |
2385 | void mgmt_suspending(struct hci_dev *hdev, u8 state); |
2386 | void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, |
2387 | u8 addr_type); |
2388 | bool mgmt_powering_down(struct hci_dev *hdev); |
2389 | void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent); |
2390 | void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent); |
2391 | void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, |
2392 | bool persistent); |
2393 | void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2394 | u8 bdaddr_type, u8 store_hint, u16 min_interval, |
2395 | u16 max_interval, u16 latency, u16 timeout); |
2396 | void mgmt_smp_complete(struct hci_conn *conn, bool complete); |
2397 | bool mgmt_get_connectable(struct hci_dev *hdev); |
2398 | u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev); |
2399 | void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, |
2400 | u8 instance); |
2401 | void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, |
2402 | u8 instance); |
2403 | void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); |
2404 | int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); |
2405 | void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, |
2406 | bdaddr_t *bdaddr, u8 addr_type); |
2407 | |
2408 | int hci_abort_conn(struct hci_conn *conn, u8 reason); |
2409 | u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, |
2410 | u16 to_multiplier); |
2411 | void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, |
2412 | __u8 ltk[16], __u8 key_size); |
2413 | |
2414 | void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2415 | u8 *bdaddr_type); |
2416 | |
2417 | #define SCO_AIRMODE_MASK 0x0003 |
2418 | #define SCO_AIRMODE_CVSD 0x0000 |
2419 | #define SCO_AIRMODE_TRANSP 0x0003 |
2420 | |
2421 | #define LOCAL_CODEC_ACL_MASK BIT(0) |
2422 | #define LOCAL_CODEC_SCO_MASK BIT(1) |
2423 | |
2424 | #define TRANSPORT_TYPE_MAX 0x04 |
2425 | |
2426 | #endif /* __HCI_CORE_H */ |
2427 |
Definitions
- inquiry_data
- inquiry_entry
- discovery_state
- suspend_tasks
- suspended_state
- hci_conn_hash
- bdaddr_list
- codec_list
- bdaddr_list_with_irk
- hci_conn_flags
- bdaddr_list_with_flags
- bt_uuid
- blocked_key
- smp_csrk
- smp_ltk
- smp_irk
- link_key
- oob_data
- adv_info
- tx_queue
- monitored_device
- adv_pattern
- adv_rssi_thresholds
- adv_monitor
- hci_dev
- conn_reasons
- hci_conn
- hci_link
- hci_chan
- hci_conn_params
- discovery_init
- hci_discovery_filter_clear
- inquiry_cache_empty
- inquiry_cache_age
- inquiry_entry_age
- hci_conn_ssp_enabled
- hci_conn_sc_enabled
- hci_conn_hash_add
- hci_conn_hash_del
- hci_conn_num
- hci_conn_count
- hci_conn_valid
- hci_conn_lookup_type
- hci_conn_hash_lookup_bis
- hci_conn_hash_lookup_create_pa_sync
- hci_conn_hash_lookup_per_adv_bis
- hci_conn_hash_lookup_handle
- hci_conn_hash_lookup_ba
- hci_conn_hash_lookup_le
- hci_conn_hash_lookup_cis
- hci_conn_hash_lookup_cig
- hci_conn_hash_lookup_big
- hci_conn_hash_lookup_big_sync_pend
- hci_conn_hash_lookup_big_state
- hci_conn_hash_lookup_pa_sync_big_handle
- hci_conn_hash_lookup_pa_sync_handle
- hci_conn_hash_lookup_state
- hci_conn_hash_list_state
- hci_conn_hash_list_flag
- hci_lookup_le_connect
- hci_is_le_conn_scanning
- hci_sockcm_init
- hci_conn_get
- hci_conn_put
- hci_conn_hold
- hci_conn_drop
- hci_dev_put
- hci_dev_hold
- hci_get_drvdata
- hci_set_drvdata
- hci_get_priv
- hci_alloc_dev
- hci_set_msft_opcode
- hci_set_aosp_capable
- hci_devcd_setup
- hci_proto_connect_ind
- hci_proto_disconn_ind
- hci_cb
- hci_connect_cfm
- hci_disconn_cfm
- hci_auth_cfm
- hci_encrypt_cfm
- hci_key_change_cfm
- hci_role_switch_cfm
- hci_bdaddr_is_rpa
- hci_is_identity_address
- hci_get_irk
- hci_check_conn_params
- hci_mgmt_handler
Improve your Profiling and Debugging skills
Find out more