1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI Management interface */
26
27#include <linux/module.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_sock.h>
33#include <net/bluetooth/l2cap.h>
34#include <net/bluetooth/mgmt.h>
35
36#include "hci_request.h"
37#include "smp.h"
38#include "mgmt_util.h"
39#include "mgmt_config.h"
40#include "msft.h"
41#include "eir.h"
42#include "aosp.h"
43
44#define MGMT_VERSION 1
45#define MGMT_REVISION 22
46
47static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
49 MGMT_OP_READ_INFO,
50 MGMT_OP_SET_POWERED,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
54 MGMT_OP_SET_BONDABLE,
55 MGMT_OP_SET_LINK_SECURITY,
56 MGMT_OP_SET_SSP,
57 MGMT_OP_SET_HS,
58 MGMT_OP_SET_LE,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
61 MGMT_OP_ADD_UUID,
62 MGMT_OP_REMOVE_UUID,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
65 MGMT_OP_DISCONNECT,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
70 MGMT_OP_PAIR_DEVICE,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
82 MGMT_OP_CONFIRM_NAME,
83 MGMT_OP_BLOCK_DEVICE,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
87 MGMT_OP_SET_BREDR,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
92 MGMT_OP_SET_PRIVACY,
93 MGMT_OP_LOAD_IRKS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
96 MGMT_OP_ADD_DEVICE,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
134 MGMT_OP_MESH_SEND,
135 MGMT_OP_MESH_SEND_CANCEL,
136};
137
138static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
140 MGMT_EV_INDEX_ADDED,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
153 MGMT_EV_AUTH_FAILED,
154 MGMT_EV_DEVICE_FOUND,
155 MGMT_EV_DISCOVERING,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
160 MGMT_EV_NEW_IRK,
161 MGMT_EV_NEW_CSRK,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183};
184
185static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
187 MGMT_OP_READ_INFO,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196};
197
198static const u16 mgmt_untrusted_events[] = {
199 MGMT_EV_INDEX_ADDED,
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
211};
212
213#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
214
215#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
217
218/* HCI to MGMT error code conversion table */
219static const u8 mgmt_status_table[] = {
220 MGMT_STATUS_SUCCESS,
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
284};
285
286static u8 mgmt_errno_status(int err)
287{
288 switch (err) {
289 case 0:
290 return MGMT_STATUS_SUCCESS;
291 case -EPERM:
292 return MGMT_STATUS_REJECTED;
293 case -EINVAL:
294 return MGMT_STATUS_INVALID_PARAMS;
295 case -EOPNOTSUPP:
296 return MGMT_STATUS_NOT_SUPPORTED;
297 case -EBUSY:
298 return MGMT_STATUS_BUSY;
299 case -ETIMEDOUT:
300 return MGMT_STATUS_AUTH_FAILED;
301 case -ENOMEM:
302 return MGMT_STATUS_NO_RESOURCES;
303 case -EISCONN:
304 return MGMT_STATUS_ALREADY_CONNECTED;
305 case -ENOTCONN:
306 return MGMT_STATUS_DISCONNECTED;
307 }
308
309 return MGMT_STATUS_FAILED;
310}
311
312static u8 mgmt_status(int err)
313{
314 if (err < 0)
315 return mgmt_errno_status(err);
316
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
319
320 return MGMT_STATUS_FAILED;
321}
322
323static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 u16 len, int flag)
325{
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
327 flag, NULL);
328}
329
330static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
332{
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
334 flag, skip_sk);
335}
336
337static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
339{
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, data_len: len,
341 flag: HCI_SOCK_TRUSTED, skip_sk);
342}
343
344static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345{
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, flag: HCI_SOCK_TRUSTED,
347 skip_sk);
348}
349
350static u8 le_addr_type(u8 mgmt_addr_type)
351{
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
354 else
355 return ADDR_LE_DEV_RANDOM;
356}
357
358void mgmt_fill_version_info(void *ver)
359{
360 struct mgmt_rp_read_version *rp = ver;
361
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
364}
365
366static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 u16 data_len)
368{
369 struct mgmt_rp_read_version rp;
370
371 bt_dev_dbg(hdev, "sock %p", sk);
372
373 mgmt_fill_version_info(ver: &rp);
374
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, status: 0,
376 rp: &rp, rp_len: sizeof(rp));
377}
378
379static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 u16 data_len)
381{
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
384 size_t rp_size;
385 int i, err;
386
387 bt_dev_dbg(hdev, "sock %p", sk);
388
389 if (hci_sock_test_flag(sk, nr: HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
392 } else {
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 }
396
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398
399 rp = kmalloc(size: rp_size, GFP_KERNEL);
400 if (!rp)
401 return -ENOMEM;
402
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
405
406 if (hci_sock_test_flag(sk, nr: HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
408
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(val: mgmt_commands[i], p: opcode);
411
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(val: mgmt_events[i], p: opcode);
414 } else {
415 __le16 *opcode = rp->opcodes;
416
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(val: mgmt_untrusted_commands[i], p: opcode);
419
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(val: mgmt_untrusted_events[i], p: opcode);
422 }
423
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, status: 0,
425 rp, rp_len: rp_size);
426 kfree(objp: rp);
427
428 return err;
429}
430
431static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 u16 data_len)
433{
434 struct mgmt_rp_read_index_list *rp;
435 struct hci_dev *d;
436 size_t rp_len;
437 u16 count;
438 int err;
439
440 bt_dev_dbg(hdev, "sock %p", sk);
441
442 read_lock(&hci_dev_list_lock);
443
444 count = 0;
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 count++;
449 }
450
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(size: rp_len, GFP_ATOMIC);
453 if (!rp) {
454 read_unlock(&hci_dev_list_lock);
455 return -ENOMEM;
456 }
457
458 count = 0;
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 continue;
464
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
467 */
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 continue;
470
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
475 }
476 }
477
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
480
481 read_unlock(&hci_dev_list_lock);
482
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 status: 0, rp, rp_len);
485
486 kfree(objp: rp);
487
488 return err;
489}
490
491static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
493{
494 struct mgmt_rp_read_unconf_index_list *rp;
495 struct hci_dev *d;
496 size_t rp_len;
497 u16 count;
498 int err;
499
500 bt_dev_dbg(hdev, "sock %p", sk);
501
502 read_lock(&hci_dev_list_lock);
503
504 count = 0;
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 count++;
509 }
510
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(size: rp_len, GFP_ATOMIC);
513 if (!rp) {
514 read_unlock(&hci_dev_list_lock);
515 return -ENOMEM;
516 }
517
518 count = 0;
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 continue;
524
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
527 */
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 continue;
530
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
535 }
536 }
537
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
540
541 read_unlock(&hci_dev_list_lock);
542
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, status: 0, rp, rp_len);
545
546 kfree(objp: rp);
547
548 return err;
549}
550
551static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
553{
554 struct mgmt_rp_read_ext_index_list *rp;
555 struct hci_dev *d;
556 u16 count;
557 int err;
558
559 bt_dev_dbg(hdev, "sock %p", sk);
560
561 read_lock(&hci_dev_list_lock);
562
563 count = 0;
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 count++;
567 }
568
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 if (!rp) {
571 read_unlock(&hci_dev_list_lock);
572 return -ENOMEM;
573 }
574
575 count = 0;
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 continue;
581
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
584 */
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 continue;
587
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
591 else
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
595 } else {
596 continue;
597 }
598
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
602 }
603
604 rp->num_controllers = cpu_to_le16(count);
605
606 read_unlock(&hci_dev_list_lock);
607
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
611 */
612 hci_sock_set_flag(sk, nr: HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, nr: HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, nr: HCI_MGMT_UNCONF_INDEX_EVENTS);
615
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, status: 0, rp,
618 struct_size(rp, entry, count));
619
620 kfree(objp: rp);
621
622 return err;
623}
624
625static bool is_configured(struct hci_dev *hdev)
626{
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 return false;
630
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(ba1: &hdev->public_addr, BDADDR_ANY))
634 return false;
635
636 return true;
637}
638
639static __le32 get_missing_options(struct hci_dev *hdev)
640{
641 u32 options = 0;
642
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
646
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(ba1: &hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
651
652 return cpu_to_le32(options);
653}
654
655static int new_options(struct hci_dev *hdev, struct sock *skip)
656{
657 __le32 options = get_missing_options(hdev);
658
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, data: &options,
660 len: sizeof(options), flag: HCI_MGMT_OPTION_EVENTS, skip_sk: skip);
661}
662
663static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664{
665 __le32 options = get_missing_options(hdev);
666
667 return mgmt_cmd_complete(sk, index: hdev->id, cmd: opcode, status: 0, rp: &options,
668 rp_len: sizeof(options));
669}
670
671static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
673{
674 struct mgmt_rp_read_config_info rp;
675 u32 options = 0;
676
677 bt_dev_dbg(hdev, "sock %p", sk);
678
679 hci_dev_lock(hdev);
680
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
686
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
689
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
692
693 hci_dev_unlock(hdev);
694
695 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_CONFIG_INFO, status: 0,
696 rp: &rp, rp_len: sizeof(rp));
697}
698
699static u32 get_supported_phys(struct hci_dev *hdev)
700{
701 u32 supported_phys = 0;
702
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 }
730 }
731 }
732
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
736
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
740 }
741
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
745 }
746 }
747
748 return supported_phys;
749}
750
751static u32 get_selected_phys(struct hci_dev *hdev)
752{
753 u32 selected_phys = 0;
754
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 }
788 }
789 }
790
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
794
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
797
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
800
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
803
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
806
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
809 }
810
811 return selected_phys;
812}
813
814static u32 get_configurable_phys(struct hci_dev *hdev)
815{
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818}
819
820static u32 get_supported_settings(struct hci_dev *hdev)
821{
822 u32 settings = 0;
823
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
829
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
835
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 }
839
840 if (lmp_sc_capable(hdev))
841 settings |= MGMT_SETTING_SECURE_CONN;
842
843 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
844 &hdev->quirks))
845 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 }
847
848 if (lmp_le_capable(hdev)) {
849 settings |= MGMT_SETTING_LE;
850 settings |= MGMT_SETTING_SECURE_CONN;
851 settings |= MGMT_SETTING_PRIVACY;
852 settings |= MGMT_SETTING_STATIC_ADDRESS;
853 settings |= MGMT_SETTING_ADVERTISING;
854 }
855
856 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
857 hdev->set_bdaddr)
858 settings |= MGMT_SETTING_CONFIGURATION;
859
860 if (cis_central_capable(hdev))
861 settings |= MGMT_SETTING_CIS_CENTRAL;
862
863 if (cis_peripheral_capable(hdev))
864 settings |= MGMT_SETTING_CIS_PERIPHERAL;
865
866 settings |= MGMT_SETTING_PHY_CONFIGURATION;
867
868 return settings;
869}
870
871static u32 get_current_settings(struct hci_dev *hdev)
872{
873 u32 settings = 0;
874
875 if (hdev_is_powered(hdev))
876 settings |= MGMT_SETTING_POWERED;
877
878 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 settings |= MGMT_SETTING_CONNECTABLE;
880
881 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 settings |= MGMT_SETTING_FAST_CONNECTABLE;
883
884 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 settings |= MGMT_SETTING_DISCOVERABLE;
886
887 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 settings |= MGMT_SETTING_BONDABLE;
889
890 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 settings |= MGMT_SETTING_BREDR;
892
893 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 settings |= MGMT_SETTING_LE;
895
896 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 settings |= MGMT_SETTING_LINK_SECURITY;
898
899 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 settings |= MGMT_SETTING_SSP;
901
902 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 settings |= MGMT_SETTING_ADVERTISING;
904
905 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 settings |= MGMT_SETTING_SECURE_CONN;
907
908 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 settings |= MGMT_SETTING_DEBUG_KEYS;
910
911 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 settings |= MGMT_SETTING_PRIVACY;
913
914 /* The current setting for static address has two purposes. The
915 * first is to indicate if the static address will be used and
916 * the second is to indicate if it is actually set.
917 *
918 * This means if the static address is not configured, this flag
919 * will never be set. If the address is configured, then if the
920 * address is actually used decides if the flag is set or not.
921 *
922 * For single mode LE only controllers and dual-mode controllers
923 * with BR/EDR disabled, the existence of the static address will
924 * be evaluated.
925 */
926 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY)) {
929 if (bacmp(ba1: &hdev->static_addr, BDADDR_ANY))
930 settings |= MGMT_SETTING_STATIC_ADDRESS;
931 }
932
933 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
935
936 if (cis_central_capable(hdev))
937 settings |= MGMT_SETTING_CIS_CENTRAL;
938
939 if (cis_peripheral_capable(hdev))
940 settings |= MGMT_SETTING_CIS_PERIPHERAL;
941
942 if (bis_capable(hdev))
943 settings |= MGMT_SETTING_ISO_BROADCASTER;
944
945 if (sync_recv_capable(hdev))
946 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
947
948 return settings;
949}
950
951static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952{
953 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
954}
955
956u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957{
958 struct mgmt_pending_cmd *cmd;
959
960 /* If there's a pending mgmt command the flags will not yet have
961 * their final values, so check for this first.
962 */
963 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964 if (cmd) {
965 struct mgmt_mode *cp = cmd->param;
966 if (cp->val == 0x01)
967 return LE_AD_GENERAL;
968 else if (cp->val == 0x02)
969 return LE_AD_LIMITED;
970 } else {
971 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 return LE_AD_LIMITED;
973 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 return LE_AD_GENERAL;
975 }
976
977 return 0;
978}
979
980bool mgmt_get_connectable(struct hci_dev *hdev)
981{
982 struct mgmt_pending_cmd *cmd;
983
984 /* If there's a pending mgmt command the flag will not yet have
985 * it's final value, so check for this first.
986 */
987 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988 if (cmd) {
989 struct mgmt_mode *cp = cmd->param;
990
991 return cp->val;
992 }
993
994 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
995}
996
997static int service_cache_sync(struct hci_dev *hdev, void *data)
998{
999 hci_update_eir_sync(hdev);
1000 hci_update_class_sync(hdev);
1001
1002 return 0;
1003}
1004
1005static void service_cache_off(struct work_struct *work)
1006{
1007 struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 service_cache.work);
1009
1010 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1011 return;
1012
1013 hci_cmd_sync_queue(hdev, func: service_cache_sync, NULL, NULL);
1014}
1015
1016static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017{
1018 /* The generation of a new RPA and programming it into the
1019 * controller happens in the hci_req_enable_advertising()
1020 * function.
1021 */
1022 if (ext_adv_capable(hdev))
1023 return hci_start_ext_adv_sync(hdev, instance: hdev->cur_adv_instance);
1024 else
1025 return hci_enable_advertising_sync(hdev);
1026}
1027
1028static void rpa_expired(struct work_struct *work)
1029{
1030 struct hci_dev *hdev = container_of(work, struct hci_dev,
1031 rpa_expired.work);
1032
1033 bt_dev_dbg(hdev, "");
1034
1035 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036
1037 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1038 return;
1039
1040 hci_cmd_sync_queue(hdev, func: rpa_expired_sync, NULL, NULL);
1041}
1042
1043static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044
1045static void discov_off(struct work_struct *work)
1046{
1047 struct hci_dev *hdev = container_of(work, struct hci_dev,
1048 discov_off.work);
1049
1050 bt_dev_dbg(hdev, "");
1051
1052 hci_dev_lock(hdev);
1053
1054 /* When discoverable timeout triggers, then just make sure
1055 * the limited discoverable flag is cleared. Even in the case
1056 * of a timeout triggered from general discoverable, it is
1057 * safe to unconditionally clear the flag.
1058 */
1059 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 hdev->discov_timeout = 0;
1062
1063 hci_cmd_sync_queue(hdev, func: set_discoverable_sync, NULL, NULL);
1064
1065 mgmt_new_settings(hdev);
1066
1067 hci_dev_unlock(hdev);
1068}
1069
1070static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071
1072static void mesh_send_complete(struct hci_dev *hdev,
1073 struct mgmt_mesh_tx *mesh_tx, bool silent)
1074{
1075 u8 handle = mesh_tx->handle;
1076
1077 if (!silent)
1078 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, data: &handle,
1079 len: sizeof(handle), NULL);
1080
1081 mgmt_mesh_remove(mesh_tx);
1082}
1083
1084static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085{
1086 struct mgmt_mesh_tx *mesh_tx;
1087
1088 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 hci_disable_advertising_sync(hdev);
1090 mesh_tx = mgmt_mesh_next(hdev, NULL);
1091
1092 if (mesh_tx)
1093 mesh_send_complete(hdev, mesh_tx, silent: false);
1094
1095 return 0;
1096}
1097
1098static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101{
1102 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103
1104 if (!mesh_tx)
1105 return;
1106
1107 err = hci_cmd_sync_queue(hdev, func: mesh_send_sync, data: mesh_tx,
1108 destroy: mesh_send_start_complete);
1109
1110 if (err < 0)
1111 mesh_send_complete(hdev, mesh_tx, silent: false);
1112 else
1113 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114}
1115
1116static void mesh_send_done(struct work_struct *work)
1117{
1118 struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 mesh_send_done.work);
1120
1121 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122 return;
1123
1124 hci_cmd_sync_queue(hdev, func: mesh_send_done_sync, NULL, destroy: mesh_next);
1125}
1126
1127static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128{
1129 if (hci_dev_test_flag(hdev, HCI_MGMT))
1130 return;
1131
1132 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133
1134 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138
1139 /* Non-mgmt controlled devices get this bit set
1140 * implicitly so that pairing works for them, however
1141 * for mgmt we require user-space to explicitly enable
1142 * it
1143 */
1144 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145
1146 hci_dev_set_flag(hdev, HCI_MGMT);
1147}
1148
1149static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 void *data, u16 data_len)
1151{
1152 struct mgmt_rp_read_info rp;
1153
1154 bt_dev_dbg(hdev, "sock %p", sk);
1155
1156 hci_dev_lock(hdev);
1157
1158 memset(&rp, 0, sizeof(rp));
1159
1160 bacpy(dst: &rp.bdaddr, src: &hdev->bdaddr);
1161
1162 rp.version = hdev->hci_ver;
1163 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164
1165 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167
1168 memcpy(rp.dev_class, hdev->dev_class, 3);
1169
1170 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172
1173 hci_dev_unlock(hdev);
1174
1175 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_INFO, status: 0, rp: &rp,
1176 rp_len: sizeof(rp));
1177}
1178
1179static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180{
1181 u16 eir_len = 0;
1182 size_t name_len;
1183
1184 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 data: hdev->dev_class, data_len: 3);
1187
1188 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190 data: hdev->appearance);
1191
1192 name_len = strnlen(p: hdev->dev_name, maxlen: sizeof(hdev->dev_name));
1193 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 data: hdev->dev_name, data_len: name_len);
1195
1196 name_len = strnlen(p: hdev->short_name, maxlen: sizeof(hdev->short_name));
1197 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 data: hdev->short_name, data_len: name_len);
1199
1200 return eir_len;
1201}
1202
1203static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 void *data, u16 data_len)
1205{
1206 char buf[512];
1207 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208 u16 eir_len;
1209
1210 bt_dev_dbg(hdev, "sock %p", sk);
1211
1212 memset(&buf, 0, sizeof(buf));
1213
1214 hci_dev_lock(hdev);
1215
1216 bacpy(dst: &rp->bdaddr, src: &hdev->bdaddr);
1217
1218 rp->version = hdev->hci_ver;
1219 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220
1221 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223
1224
1225 eir_len = append_eir_data_to_buf(hdev, eir: rp->eir);
1226 rp->eir_len = cpu_to_le16(eir_len);
1227
1228 hci_dev_unlock(hdev);
1229
1230 /* If this command is called at least once, then the events
1231 * for class of device and local name changes are disabled
1232 * and only the new extended controller information event
1233 * is used.
1234 */
1235 hci_sock_set_flag(sk, nr: HCI_MGMT_EXT_INFO_EVENTS);
1236 hci_sock_clear_flag(sk, nr: HCI_MGMT_DEV_CLASS_EVENTS);
1237 hci_sock_clear_flag(sk, nr: HCI_MGMT_LOCAL_NAME_EVENTS);
1238
1239 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_EXT_INFO, status: 0, rp,
1240 rp_len: sizeof(*rp) + eir_len);
1241}
1242
1243static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244{
1245 char buf[512];
1246 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247 u16 eir_len;
1248
1249 memset(buf, 0, sizeof(buf));
1250
1251 eir_len = append_eir_data_to_buf(hdev, eir: ev->eir);
1252 ev->eir_len = cpu_to_le16(eir_len);
1253
1254 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, data: ev,
1255 len: sizeof(*ev) + eir_len,
1256 flag: HCI_MGMT_EXT_INFO_EVENTS, skip_sk: skip);
1257}
1258
1259static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260{
1261 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1262
1263 return mgmt_cmd_complete(sk, index: hdev->id, cmd: opcode, status: 0, rp: &settings,
1264 rp_len: sizeof(settings));
1265}
1266
1267void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268{
1269 struct mgmt_ev_advertising_added ev;
1270
1271 ev.instance = instance;
1272
1273 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
1274}
1275
1276void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277 u8 instance)
1278{
1279 struct mgmt_ev_advertising_removed ev;
1280
1281 ev.instance = instance;
1282
1283 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
1284}
1285
1286static void cancel_adv_timeout(struct hci_dev *hdev)
1287{
1288 if (hdev->adv_instance_timeout) {
1289 hdev->adv_instance_timeout = 0;
1290 cancel_delayed_work(dwork: &hdev->adv_instance_expire);
1291 }
1292}
1293
1294/* This function requires the caller holds hdev->lock */
1295static void restart_le_actions(struct hci_dev *hdev)
1296{
1297 struct hci_conn_params *p;
1298
1299 list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 /* Needed for AUTO_OFF case where might not "really"
1301 * have been powered off.
1302 */
1303 hci_pend_le_list_del_init(param: p);
1304
1305 switch (p->auto_connect) {
1306 case HCI_AUTO_CONN_DIRECT:
1307 case HCI_AUTO_CONN_ALWAYS:
1308 hci_pend_le_list_add(param: p, list: &hdev->pend_le_conns);
1309 break;
1310 case HCI_AUTO_CONN_REPORT:
1311 hci_pend_le_list_add(param: p, list: &hdev->pend_le_reports);
1312 break;
1313 default:
1314 break;
1315 }
1316 }
1317}
1318
1319static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320{
1321 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1322
1323 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, data: &ev,
1324 len: sizeof(ev), flag: HCI_MGMT_SETTING_EVENTS, skip_sk: skip);
1325}
1326
1327static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328{
1329 struct mgmt_pending_cmd *cmd = data;
1330 struct mgmt_mode *cp;
1331
1332 /* Make sure cmd still outstanding. */
1333 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334 return;
1335
1336 cp = cmd->param;
1337
1338 bt_dev_dbg(hdev, "err %d", err);
1339
1340 if (!err) {
1341 if (cp->val) {
1342 hci_dev_lock(hdev);
1343 restart_le_actions(hdev);
1344 hci_update_passive_scan(hdev);
1345 hci_dev_unlock(hdev);
1346 }
1347
1348 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev);
1349
1350 /* Only call new_setting for power on as power off is deferred
1351 * to hdev->power_off work which does call hci_dev_do_close.
1352 */
1353 if (cp->val)
1354 new_settings(hdev, skip: cmd->sk);
1355 } else {
1356 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_POWERED,
1357 status: mgmt_status(err));
1358 }
1359
1360 mgmt_pending_remove(cmd);
1361}
1362
1363static int set_powered_sync(struct hci_dev *hdev, void *data)
1364{
1365 struct mgmt_pending_cmd *cmd = data;
1366 struct mgmt_mode *cp = cmd->param;
1367
1368 BT_DBG("%s", hdev->name);
1369
1370 return hci_set_powered_sync(hdev, val: cp->val);
1371}
1372
1373static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 u16 len)
1375{
1376 struct mgmt_mode *cp = data;
1377 struct mgmt_pending_cmd *cmd;
1378 int err;
1379
1380 bt_dev_dbg(hdev, "sock %p", sk);
1381
1382 if (cp->val != 0x00 && cp->val != 0x01)
1383 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_POWERED,
1384 MGMT_STATUS_INVALID_PARAMS);
1385
1386 hci_dev_lock(hdev);
1387
1388 if (!cp->val) {
1389 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN)) {
1390 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_BUSY);
1392 goto failed;
1393 }
1394 }
1395
1396 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1397 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_POWERED,
1398 MGMT_STATUS_BUSY);
1399 goto failed;
1400 }
1401
1402 if (!!cp->val == hdev_is_powered(hdev)) {
1403 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1404 goto failed;
1405 }
1406
1407 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 if (!cmd) {
1409 err = -ENOMEM;
1410 goto failed;
1411 }
1412
1413 /* Cancel potentially blocking sync operation before power off */
1414 if (cp->val == 0x00) {
1415 hci_cmd_sync_cancel_sync(hdev, err: -EHOSTDOWN);
1416 err = hci_cmd_sync_queue(hdev, func: set_powered_sync, data: cmd,
1417 destroy: mgmt_set_powered_complete);
1418 } else {
1419 /* Use hci_cmd_sync_submit since hdev might not be running */
1420 err = hci_cmd_sync_submit(hdev, func: set_powered_sync, data: cmd,
1421 destroy: mgmt_set_powered_complete);
1422 }
1423
1424 if (err < 0)
1425 mgmt_pending_remove(cmd);
1426
1427failed:
1428 hci_dev_unlock(hdev);
1429 return err;
1430}
1431
1432int mgmt_new_settings(struct hci_dev *hdev)
1433{
1434 return new_settings(hdev, NULL);
1435}
1436
1437struct cmd_lookup {
1438 struct sock *sk;
1439 struct hci_dev *hdev;
1440 u8 mgmt_status;
1441};
1442
1443static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444{
1445 struct cmd_lookup *match = data;
1446
1447 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev: match->hdev);
1448
1449 list_del(entry: &cmd->list);
1450
1451 if (match->sk == NULL) {
1452 match->sk = cmd->sk;
1453 sock_hold(sk: match->sk);
1454 }
1455
1456 mgmt_pending_free(cmd);
1457}
1458
1459static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460{
1461 u8 *status = data;
1462
1463 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: *status);
1464 mgmt_pending_remove(cmd);
1465}
1466
1467static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468{
1469 if (cmd->cmd_complete) {
1470 u8 *status = data;
1471
1472 cmd->cmd_complete(cmd, *status);
1473 mgmt_pending_remove(cmd);
1474
1475 return;
1476 }
1477
1478 cmd_status_rsp(cmd, data);
1479}
1480
1481static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482{
1483 return mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status,
1484 rp: cmd->param, rp_len: cmd->param_len);
1485}
1486
1487static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488{
1489 return mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status,
1490 rp: cmd->param, rp_len: sizeof(struct mgmt_addr_info));
1491}
1492
1493static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494{
1495 if (!lmp_bredr_capable(hdev))
1496 return MGMT_STATUS_NOT_SUPPORTED;
1497 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1498 return MGMT_STATUS_REJECTED;
1499 else
1500 return MGMT_STATUS_SUCCESS;
1501}
1502
1503static u8 mgmt_le_support(struct hci_dev *hdev)
1504{
1505 if (!lmp_le_capable(hdev))
1506 return MGMT_STATUS_NOT_SUPPORTED;
1507 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1508 return MGMT_STATUS_REJECTED;
1509 else
1510 return MGMT_STATUS_SUCCESS;
1511}
1512
1513static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1514 int err)
1515{
1516 struct mgmt_pending_cmd *cmd = data;
1517
1518 bt_dev_dbg(hdev, "err %d", err);
1519
1520 /* Make sure cmd still outstanding. */
1521 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1522 return;
1523
1524 hci_dev_lock(hdev);
1525
1526 if (err) {
1527 u8 mgmt_err = mgmt_status(err);
1528 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
1529 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1530 goto done;
1531 }
1532
1533 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1534 hdev->discov_timeout > 0) {
1535 int to = msecs_to_jiffies(m: hdev->discov_timeout * 1000);
1536 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->discov_off, delay: to);
1537 }
1538
1539 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1540 new_settings(hdev, skip: cmd->sk);
1541
1542done:
1543 mgmt_pending_remove(cmd);
1544 hci_dev_unlock(hdev);
1545}
1546
1547static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548{
1549 BT_DBG("%s", hdev->name);
1550
1551 return hci_update_discoverable_sync(hdev);
1552}
1553
1554static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1555 u16 len)
1556{
1557 struct mgmt_cp_set_discoverable *cp = data;
1558 struct mgmt_pending_cmd *cmd;
1559 u16 timeout;
1560 int err;
1561
1562 bt_dev_dbg(hdev, "sock %p", sk);
1563
1564 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1565 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1566 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1568
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1572
1573 timeout = __le16_to_cpu(cp->timeout);
1574
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1577 */
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1589 }
1590
1591 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1596 }
1597
1598 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1599 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1602 }
1603
1604 if (hdev->advertising_paused) {
1605 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DISCOVERABLE,
1606 MGMT_STATUS_BUSY);
1607 goto failed;
1608 }
1609
1610 if (!hdev_is_powered(hdev)) {
1611 bool changed = false;
1612
1613 /* Setting limited discoverable when powered off is
1614 * not a valid operation since it requires a timeout
1615 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616 */
1617 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1618 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1619 changed = true;
1620 }
1621
1622 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1623 if (err < 0)
1624 goto failed;
1625
1626 if (changed)
1627 err = new_settings(hdev, skip: sk);
1628
1629 goto failed;
1630 }
1631
1632 /* If the current mode is the same, then just update the timeout
1633 * value with the new value. And if only the timeout gets updated,
1634 * then no need for any HCI transactions.
1635 */
1636 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1637 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1638 HCI_LIMITED_DISCOVERABLE)) {
1639 cancel_delayed_work(dwork: &hdev->discov_off);
1640 hdev->discov_timeout = timeout;
1641
1642 if (cp->val && hdev->discov_timeout > 0) {
1643 int to = msecs_to_jiffies(m: hdev->discov_timeout * 1000);
1644 queue_delayed_work(wq: hdev->req_workqueue,
1645 dwork: &hdev->discov_off, delay: to);
1646 }
1647
1648 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1649 goto failed;
1650 }
1651
1652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1653 if (!cmd) {
1654 err = -ENOMEM;
1655 goto failed;
1656 }
1657
1658 /* Cancel any potential discoverable timeout that might be
1659 * still active and store new timeout value. The arming of
1660 * the timeout happens in the complete handler.
1661 */
1662 cancel_delayed_work(dwork: &hdev->discov_off);
1663 hdev->discov_timeout = timeout;
1664
1665 if (cp->val)
1666 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667 else
1668 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669
1670 /* Limited discoverable mode */
1671 if (cp->val == 0x02)
1672 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673 else
1674 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675
1676 err = hci_cmd_sync_queue(hdev, func: set_discoverable_sync, data: cmd,
1677 destroy: mgmt_set_discoverable_complete);
1678
1679 if (err < 0)
1680 mgmt_pending_remove(cmd);
1681
1682failed:
1683 hci_dev_unlock(hdev);
1684 return err;
1685}
1686
1687static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1688 int err)
1689{
1690 struct mgmt_pending_cmd *cmd = data;
1691
1692 bt_dev_dbg(hdev, "err %d", err);
1693
1694 /* Make sure cmd still outstanding. */
1695 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 return;
1697
1698 hci_dev_lock(hdev);
1699
1700 if (err) {
1701 u8 mgmt_err = mgmt_status(err);
1702 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
1703 goto done;
1704 }
1705
1706 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1707 new_settings(hdev, skip: cmd->sk);
1708
1709done:
1710 mgmt_pending_remove(cmd);
1711
1712 hci_dev_unlock(hdev);
1713}
1714
1715static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1717{
1718 bool changed = false;
1719 int err;
1720
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1722 changed = true;
1723
1724 if (val) {
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1726 } else {
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1729 }
1730
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 if (err < 0)
1733 return err;
1734
1735 if (changed) {
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, skip: sk);
1739 }
1740
1741 return 0;
1742}
1743
1744static int set_connectable_sync(struct hci_dev *hdev, void *data)
1745{
1746 BT_DBG("%s", hdev->name);
1747
1748 return hci_update_connectable_sync(hdev);
1749}
1750
1751static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1752 u16 len)
1753{
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1756 int err;
1757
1758 bt_dev_dbg(hdev, "sock %p", sk);
1759
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1764
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1768
1769 hci_dev_lock(hdev);
1770
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, val: cp->val);
1773 goto failed;
1774 }
1775
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 MGMT_STATUS_BUSY);
1780 goto failed;
1781 }
1782
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1784 if (!cmd) {
1785 err = -ENOMEM;
1786 goto failed;
1787 }
1788
1789 if (cp->val) {
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1791 } else {
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(dwork: &hdev->discov_off);
1794
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1798 }
1799
1800 err = hci_cmd_sync_queue(hdev, func: set_connectable_sync, data: cmd,
1801 destroy: mgmt_set_connectable_complete);
1802
1803 if (err < 0)
1804 mgmt_pending_remove(cmd);
1805
1806failed:
1807 hci_dev_unlock(hdev);
1808 return err;
1809}
1810
1811static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1812 u16 len)
1813{
1814 struct mgmt_mode *cp = data;
1815 bool changed;
1816 int err;
1817
1818 bt_dev_dbg(hdev, "sock %p", sk);
1819
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1823
1824 hci_dev_lock(hdev);
1825
1826 if (cp->val)
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1828 else
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1830
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 if (err < 0)
1833 goto unlock;
1834
1835 if (changed) {
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1838 */
1839 hci_update_discoverable(hdev);
1840
1841 err = new_settings(hdev, skip: sk);
1842 }
1843
1844unlock:
1845 hci_dev_unlock(hdev);
1846 return err;
1847}
1848
1849static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1850 u16 len)
1851{
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1854 u8 val, status;
1855 int err;
1856
1857 bt_dev_dbg(hdev, "sock %p", sk);
1858
1859 status = mgmt_bredr_support(hdev);
1860 if (status)
1861 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 status);
1863
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1867
1868 hci_dev_lock(hdev);
1869
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1872
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1875 changed = true;
1876 }
1877
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 if (err < 0)
1880 goto failed;
1881
1882 if (changed)
1883 err = new_settings(hdev, skip: sk);
1884
1885 goto failed;
1886 }
1887
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LINK_SECURITY,
1890 MGMT_STATUS_BUSY);
1891 goto failed;
1892 }
1893
1894 val = !!cp->val;
1895
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1898 goto failed;
1899 }
1900
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1902 if (!cmd) {
1903 err = -ENOMEM;
1904 goto failed;
1905 }
1906
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, plen: sizeof(val), param: &val);
1908 if (err < 0) {
1909 mgmt_pending_remove(cmd);
1910 goto failed;
1911 }
1912
1913failed:
1914 hci_dev_unlock(hdev);
1915 return err;
1916}
1917
1918static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1919{
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1924 bool changed;
1925
1926 /* Make sure cmd still outstanding. */
1927 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1928 return;
1929
1930 if (err) {
1931 u8 mgmt_err = mgmt_status(err);
1932
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1934 HCI_SSP_ENABLED)) {
1935 new_settings(hdev, NULL);
1936 }
1937
1938 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cb: cmd_status_rsp,
1939 data: &mgmt_err);
1940 return;
1941 }
1942
1943 if (enable) {
1944 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1945 } else {
1946 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1947 }
1948
1949 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cb: settings_rsp, data: &match);
1950
1951 if (changed)
1952 new_settings(hdev, skip: match.sk);
1953
1954 if (match.sk)
1955 sock_put(sk: match.sk);
1956
1957 hci_update_eir_sync(hdev);
1958}
1959
1960static int set_ssp_sync(struct hci_dev *hdev, void *data)
1961{
1962 struct mgmt_pending_cmd *cmd = data;
1963 struct mgmt_mode *cp = cmd->param;
1964 bool changed = false;
1965 int err;
1966
1967 if (cp->val)
1968 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1969
1970 err = hci_write_ssp_mode_sync(hdev, mode: cp->val);
1971
1972 if (!err && changed)
1973 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1974
1975 return err;
1976}
1977
1978static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1979{
1980 struct mgmt_mode *cp = data;
1981 struct mgmt_pending_cmd *cmd;
1982 u8 status;
1983 int err;
1984
1985 bt_dev_dbg(hdev, "sock %p", sk);
1986
1987 status = mgmt_bredr_support(hdev);
1988 if (status)
1989 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP, status);
1990
1991 if (!lmp_ssp_capable(hdev))
1992 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
1993 MGMT_STATUS_NOT_SUPPORTED);
1994
1995 if (cp->val != 0x00 && cp->val != 0x01)
1996 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
1997 MGMT_STATUS_INVALID_PARAMS);
1998
1999 hci_dev_lock(hdev);
2000
2001 if (!hdev_is_powered(hdev)) {
2002 bool changed;
2003
2004 if (cp->val) {
2005 changed = !hci_dev_test_and_set_flag(hdev,
2006 HCI_SSP_ENABLED);
2007 } else {
2008 changed = hci_dev_test_and_clear_flag(hdev,
2009 HCI_SSP_ENABLED);
2010 }
2011
2012 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2013 if (err < 0)
2014 goto failed;
2015
2016 if (changed)
2017 err = new_settings(hdev, skip: sk);
2018
2019 goto failed;
2020 }
2021
2022 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2023 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
2024 MGMT_STATUS_BUSY);
2025 goto failed;
2026 }
2027
2028 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2029 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2030 goto failed;
2031 }
2032
2033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2034 if (!cmd)
2035 err = -ENOMEM;
2036 else
2037 err = hci_cmd_sync_queue(hdev, func: set_ssp_sync, data: cmd,
2038 destroy: set_ssp_complete);
2039
2040 if (err < 0) {
2041 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SSP,
2042 MGMT_STATUS_FAILED);
2043
2044 if (cmd)
2045 mgmt_pending_remove(cmd);
2046 }
2047
2048failed:
2049 hci_dev_unlock(hdev);
2050 return err;
2051}
2052
2053static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2054{
2055 bt_dev_dbg(hdev, "sock %p", sk);
2056
2057 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_HS,
2058 MGMT_STATUS_NOT_SUPPORTED);
2059}
2060
2061static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2062{
2063 struct cmd_lookup match = { NULL, hdev };
2064 u8 status = mgmt_status(err);
2065
2066 bt_dev_dbg(hdev, "err %d", err);
2067
2068 if (status) {
2069 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cb: cmd_status_rsp,
2070 data: &status);
2071 return;
2072 }
2073
2074 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cb: settings_rsp, data: &match);
2075
2076 new_settings(hdev, skip: match.sk);
2077
2078 if (match.sk)
2079 sock_put(sk: match.sk);
2080}
2081
2082static int set_le_sync(struct hci_dev *hdev, void *data)
2083{
2084 struct mgmt_pending_cmd *cmd = data;
2085 struct mgmt_mode *cp = cmd->param;
2086 u8 val = !!cp->val;
2087 int err;
2088
2089 if (!val) {
2090 hci_clear_adv_instance_sync(hdev, NULL, instance: 0x00, force: true);
2091
2092 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2093 hci_disable_advertising_sync(hdev);
2094
2095 if (ext_adv_capable(hdev))
2096 hci_remove_ext_adv_instance_sync(hdev, instance: 0, sk: cmd->sk);
2097 } else {
2098 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2099 }
2100
2101 err = hci_write_le_host_supported_sync(hdev, le: val, simul: 0);
2102
2103 /* Make sure the controller has a good default for
2104 * advertising data. Restrict the update to when LE
2105 * has actually been enabled. During power on, the
2106 * update in powered_update_hci will take care of it.
2107 */
2108 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2109 if (ext_adv_capable(hdev)) {
2110 int status;
2111
2112 status = hci_setup_ext_adv_instance_sync(hdev, instance: 0x00);
2113 if (!status)
2114 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
2115 } else {
2116 hci_update_adv_data_sync(hdev, instance: 0x00);
2117 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
2118 }
2119
2120 hci_update_passive_scan(hdev);
2121 }
2122
2123 return err;
2124}
2125
2126static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2127{
2128 struct mgmt_pending_cmd *cmd = data;
2129 u8 status = mgmt_status(err);
2130 struct sock *sk = cmd->sk;
2131
2132 if (status) {
2133 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2134 cb: cmd_status_rsp, data: &status);
2135 return;
2136 }
2137
2138 mgmt_pending_remove(cmd);
2139 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER, status: 0, NULL, rp_len: 0);
2140}
2141
2142static int set_mesh_sync(struct hci_dev *hdev, void *data)
2143{
2144 struct mgmt_pending_cmd *cmd = data;
2145 struct mgmt_cp_set_mesh *cp = cmd->param;
2146 size_t len = cmd->param_len;
2147
2148 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2149
2150 if (cp->enable)
2151 hci_dev_set_flag(hdev, HCI_MESH);
2152 else
2153 hci_dev_clear_flag(hdev, HCI_MESH);
2154
2155 len -= sizeof(*cp);
2156
2157 /* If filters don't fit, forward all adv pkts */
2158 if (len <= sizeof(hdev->mesh_ad_types))
2159 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2160
2161 hci_update_passive_scan_sync(hdev);
2162 return 0;
2163}
2164
2165static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2166{
2167 struct mgmt_cp_set_mesh *cp = data;
2168 struct mgmt_pending_cmd *cmd;
2169 int err = 0;
2170
2171 bt_dev_dbg(hdev, "sock %p", sk);
2172
2173 if (!lmp_le_capable(hdev) ||
2174 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2175 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2176 MGMT_STATUS_NOT_SUPPORTED);
2177
2178 if (cp->enable != 0x00 && cp->enable != 0x01)
2179 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2180 MGMT_STATUS_INVALID_PARAMS);
2181
2182 hci_dev_lock(hdev);
2183
2184 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2185 if (!cmd)
2186 err = -ENOMEM;
2187 else
2188 err = hci_cmd_sync_queue(hdev, func: set_mesh_sync, data: cmd,
2189 destroy: set_mesh_complete);
2190
2191 if (err < 0) {
2192 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2193 MGMT_STATUS_FAILED);
2194
2195 if (cmd)
2196 mgmt_pending_remove(cmd);
2197 }
2198
2199 hci_dev_unlock(hdev);
2200 return err;
2201}
2202
2203static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2204{
2205 struct mgmt_mesh_tx *mesh_tx = data;
2206 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2207 unsigned long mesh_send_interval;
2208 u8 mgmt_err = mgmt_status(err);
2209
2210 /* Report any errors here, but don't report completion */
2211
2212 if (mgmt_err) {
2213 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2214 /* Send Complete Error Code for handle */
2215 mesh_send_complete(hdev, mesh_tx, silent: false);
2216 return;
2217 }
2218
2219 mesh_send_interval = msecs_to_jiffies(m: (send->cnt) * 25);
2220 queue_delayed_work(wq: hdev->req_workqueue, dwork: &hdev->mesh_send_done,
2221 delay: mesh_send_interval);
2222}
2223
2224static int mesh_send_sync(struct hci_dev *hdev, void *data)
2225{
2226 struct mgmt_mesh_tx *mesh_tx = data;
2227 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2228 struct adv_info *adv, *next_instance;
2229 u8 instance = hdev->le_num_of_adv_sets + 1;
2230 u16 timeout, duration;
2231 int err = 0;
2232
2233 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2234 return MGMT_STATUS_BUSY;
2235
2236 timeout = 1000;
2237 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2238 adv = hci_add_adv_instance(hdev, instance, flags: 0,
2239 adv_data_len: send->adv_data_len, adv_data: send->adv_data,
2240 scan_rsp_len: 0, NULL,
2241 timeout, duration,
2242 HCI_ADV_TX_POWER_NO_PREFERENCE,
2243 min_interval: hdev->le_adv_min_interval,
2244 max_interval: hdev->le_adv_max_interval,
2245 mesh_handle: mesh_tx->handle);
2246
2247 if (!IS_ERR(ptr: adv))
2248 mesh_tx->instance = instance;
2249 else
2250 err = PTR_ERR(ptr: adv);
2251
2252 if (hdev->cur_adv_instance == instance) {
2253 /* If the currently advertised instance is being changed then
2254 * cancel the current advertising and schedule the next
2255 * instance. If there is only one instance then the overridden
2256 * advertising data will be visible right away.
2257 */
2258 cancel_adv_timeout(hdev);
2259
2260 next_instance = hci_get_next_instance(hdev, instance);
2261 if (next_instance)
2262 instance = next_instance->instance;
2263 else
2264 instance = 0;
2265 } else if (hdev->adv_instance_timeout) {
2266 /* Immediately advertise the new instance if no other, or
2267 * let it go naturally from queue if ADV is already happening
2268 */
2269 instance = 0;
2270 }
2271
2272 if (instance)
2273 return hci_schedule_adv_instance_sync(hdev, instance, force: true);
2274
2275 return err;
2276}
2277
2278static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2279{
2280 struct mgmt_rp_mesh_read_features *rp = data;
2281
2282 if (rp->used_handles >= rp->max_handles)
2283 return;
2284
2285 rp->handles[rp->used_handles++] = mesh_tx->handle;
2286}
2287
2288static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2289 void *data, u16 len)
2290{
2291 struct mgmt_rp_mesh_read_features rp;
2292
2293 if (!lmp_le_capable(hdev) ||
2294 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2295 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_READ_FEATURES,
2296 MGMT_STATUS_NOT_SUPPORTED);
2297
2298 memset(&rp, 0, sizeof(rp));
2299 rp.index = cpu_to_le16(hdev->id);
2300 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2301 rp.max_handles = MESH_HANDLES_MAX;
2302
2303 hci_dev_lock(hdev);
2304
2305 if (rp.max_handles)
2306 mgmt_mesh_foreach(hdev, cb: send_count, data: &rp, sk);
2307
2308 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_MESH_READ_FEATURES, status: 0, rp: &rp,
2309 rp_len: rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2310
2311 hci_dev_unlock(hdev);
2312 return 0;
2313}
2314
2315static int send_cancel(struct hci_dev *hdev, void *data)
2316{
2317 struct mgmt_pending_cmd *cmd = data;
2318 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2319 struct mgmt_mesh_tx *mesh_tx;
2320
2321 if (!cancel->handle) {
2322 do {
2323 mesh_tx = mgmt_mesh_next(hdev, sk: cmd->sk);
2324
2325 if (mesh_tx)
2326 mesh_send_complete(hdev, mesh_tx, silent: false);
2327 } while (mesh_tx);
2328 } else {
2329 mesh_tx = mgmt_mesh_find(hdev, handle: cancel->handle);
2330
2331 if (mesh_tx && mesh_tx->sk == cmd->sk)
2332 mesh_send_complete(hdev, mesh_tx, silent: false);
2333 }
2334
2335 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336 status: 0, NULL, rp_len: 0);
2337 mgmt_pending_free(cmd);
2338
2339 return 0;
2340}
2341
2342static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2343 void *data, u16 len)
2344{
2345 struct mgmt_pending_cmd *cmd;
2346 int err;
2347
2348 if (!lmp_le_capable(hdev) ||
2349 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2350 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2351 MGMT_STATUS_NOT_SUPPORTED);
2352
2353 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2354 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2355 MGMT_STATUS_REJECTED);
2356
2357 hci_dev_lock(hdev);
2358 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2359 if (!cmd)
2360 err = -ENOMEM;
2361 else
2362 err = hci_cmd_sync_queue(hdev, func: send_cancel, data: cmd, NULL);
2363
2364 if (err < 0) {
2365 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2366 MGMT_STATUS_FAILED);
2367
2368 if (cmd)
2369 mgmt_pending_free(cmd);
2370 }
2371
2372 hci_dev_unlock(hdev);
2373 return err;
2374}
2375
2376static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2377{
2378 struct mgmt_mesh_tx *mesh_tx;
2379 struct mgmt_cp_mesh_send *send = data;
2380 struct mgmt_rp_mesh_read_features rp;
2381 bool sending;
2382 int err = 0;
2383
2384 if (!lmp_le_capable(hdev) ||
2385 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2386 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2387 MGMT_STATUS_NOT_SUPPORTED);
2388 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2389 len <= MGMT_MESH_SEND_SIZE ||
2390 len > (MGMT_MESH_SEND_SIZE + 31))
2391 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2392 MGMT_STATUS_REJECTED);
2393
2394 hci_dev_lock(hdev);
2395
2396 memset(&rp, 0, sizeof(rp));
2397 rp.max_handles = MESH_HANDLES_MAX;
2398
2399 mgmt_mesh_foreach(hdev, cb: send_count, data: &rp, sk);
2400
2401 if (rp.max_handles <= rp.used_handles) {
2402 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2403 MGMT_STATUS_BUSY);
2404 goto done;
2405 }
2406
2407 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2408 mesh_tx = mgmt_mesh_add(sk, hdev, data: send, len);
2409
2410 if (!mesh_tx)
2411 err = -ENOMEM;
2412 else if (!sending)
2413 err = hci_cmd_sync_queue(hdev, func: mesh_send_sync, data: mesh_tx,
2414 destroy: mesh_send_start_complete);
2415
2416 if (err < 0) {
2417 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2418 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_MESH_SEND,
2419 MGMT_STATUS_FAILED);
2420
2421 if (mesh_tx) {
2422 if (sending)
2423 mgmt_mesh_remove(mesh_tx);
2424 }
2425 } else {
2426 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2427
2428 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_MESH_SEND, status: 0,
2429 rp: &mesh_tx->handle, rp_len: 1);
2430 }
2431
2432done:
2433 hci_dev_unlock(hdev);
2434 return err;
2435}
2436
2437static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2438{
2439 struct mgmt_mode *cp = data;
2440 struct mgmt_pending_cmd *cmd;
2441 int err;
2442 u8 val, enabled;
2443
2444 bt_dev_dbg(hdev, "sock %p", sk);
2445
2446 if (!lmp_le_capable(hdev))
2447 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449
2450 if (cp->val != 0x00 && cp->val != 0x01)
2451 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2452 MGMT_STATUS_INVALID_PARAMS);
2453
2454 /* Bluetooth single mode LE only controllers or dual-mode
2455 * controllers configured as LE only devices, do not allow
2456 * switching LE off. These have either LE enabled explicitly
2457 * or BR/EDR has been previously switched off.
2458 *
2459 * When trying to enable an already enabled LE, then gracefully
2460 * send a positive response. Trying to disable it however will
2461 * result into rejection.
2462 */
2463 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2464 if (cp->val == 0x01)
2465 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2466
2467 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2468 MGMT_STATUS_REJECTED);
2469 }
2470
2471 hci_dev_lock(hdev);
2472
2473 val = !!cp->val;
2474 enabled = lmp_host_le_capable(hdev);
2475
2476 if (!hdev_is_powered(hdev) || val == enabled) {
2477 bool changed = false;
2478
2479 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2480 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2481 changed = true;
2482 }
2483
2484 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2485 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2486 changed = true;
2487 }
2488
2489 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2490 if (err < 0)
2491 goto unlock;
2492
2493 if (changed)
2494 err = new_settings(hdev, skip: sk);
2495
2496 goto unlock;
2497 }
2498
2499 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2500 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2501 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2502 MGMT_STATUS_BUSY);
2503 goto unlock;
2504 }
2505
2506 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2507 if (!cmd)
2508 err = -ENOMEM;
2509 else
2510 err = hci_cmd_sync_queue(hdev, func: set_le_sync, data: cmd,
2511 destroy: set_le_complete);
2512
2513 if (err < 0) {
2514 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LE,
2515 MGMT_STATUS_FAILED);
2516
2517 if (cmd)
2518 mgmt_pending_remove(cmd);
2519 }
2520
2521unlock:
2522 hci_dev_unlock(hdev);
2523 return err;
2524}
2525
2526/* This is a helper function to test for pending mgmt commands that can
2527 * cause CoD or EIR HCI commands. We can only allow one such pending
2528 * mgmt command at a time since otherwise we cannot easily track what
2529 * the current values are, will be, and based on that calculate if a new
2530 * HCI command needs to be sent and if yes with what value.
2531 */
2532static bool pending_eir_or_class(struct hci_dev *hdev)
2533{
2534 struct mgmt_pending_cmd *cmd;
2535
2536 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2537 switch (cmd->opcode) {
2538 case MGMT_OP_ADD_UUID:
2539 case MGMT_OP_REMOVE_UUID:
2540 case MGMT_OP_SET_DEV_CLASS:
2541 case MGMT_OP_SET_POWERED:
2542 return true;
2543 }
2544 }
2545
2546 return false;
2547}
2548
2549static const u8 bluetooth_base_uuid[] = {
2550 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2551 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2552};
2553
2554static u8 get_uuid_size(const u8 *uuid)
2555{
2556 u32 val;
2557
2558 if (memcmp(p: uuid, q: bluetooth_base_uuid, size: 12))
2559 return 128;
2560
2561 val = get_unaligned_le32(p: &uuid[12]);
2562 if (val > 0xffff)
2563 return 32;
2564
2565 return 16;
2566}
2567
2568static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2569{
2570 struct mgmt_pending_cmd *cmd = data;
2571
2572 bt_dev_dbg(hdev, "err %d", err);
2573
2574 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
2575 status: mgmt_status(err), rp: hdev->dev_class, rp_len: 3);
2576
2577 mgmt_pending_free(cmd);
2578}
2579
2580static int add_uuid_sync(struct hci_dev *hdev, void *data)
2581{
2582 int err;
2583
2584 err = hci_update_class_sync(hdev);
2585 if (err)
2586 return err;
2587
2588 return hci_update_eir_sync(hdev);
2589}
2590
2591static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2592{
2593 struct mgmt_cp_add_uuid *cp = data;
2594 struct mgmt_pending_cmd *cmd;
2595 struct bt_uuid *uuid;
2596 int err;
2597
2598 bt_dev_dbg(hdev, "sock %p", sk);
2599
2600 hci_dev_lock(hdev);
2601
2602 if (pending_eir_or_class(hdev)) {
2603 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_UUID,
2604 MGMT_STATUS_BUSY);
2605 goto failed;
2606 }
2607
2608 uuid = kmalloc(size: sizeof(*uuid), GFP_KERNEL);
2609 if (!uuid) {
2610 err = -ENOMEM;
2611 goto failed;
2612 }
2613
2614 memcpy(uuid->uuid, cp->uuid, 16);
2615 uuid->svc_hint = cp->svc_hint;
2616 uuid->size = get_uuid_size(uuid: cp->uuid);
2617
2618 list_add_tail(new: &uuid->list, head: &hdev->uuids);
2619
2620 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2621 if (!cmd) {
2622 err = -ENOMEM;
2623 goto failed;
2624 }
2625
2626 err = hci_cmd_sync_queue(hdev, func: add_uuid_sync, data: cmd, destroy: mgmt_class_complete);
2627 if (err < 0) {
2628 mgmt_pending_free(cmd);
2629 goto failed;
2630 }
2631
2632failed:
2633 hci_dev_unlock(hdev);
2634 return err;
2635}
2636
2637static bool enable_service_cache(struct hci_dev *hdev)
2638{
2639 if (!hdev_is_powered(hdev))
2640 return false;
2641
2642 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2643 queue_delayed_work(wq: hdev->workqueue, dwork: &hdev->service_cache,
2644 CACHE_TIMEOUT);
2645 return true;
2646 }
2647
2648 return false;
2649}
2650
2651static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2652{
2653 int err;
2654
2655 err = hci_update_class_sync(hdev);
2656 if (err)
2657 return err;
2658
2659 return hci_update_eir_sync(hdev);
2660}
2661
2662static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2663 u16 len)
2664{
2665 struct mgmt_cp_remove_uuid *cp = data;
2666 struct mgmt_pending_cmd *cmd;
2667 struct bt_uuid *match, *tmp;
2668 static const u8 bt_uuid_any[] = {
2669 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2670 };
2671 int err, found;
2672
2673 bt_dev_dbg(hdev, "sock %p", sk);
2674
2675 hci_dev_lock(hdev);
2676
2677 if (pending_eir_or_class(hdev)) {
2678 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_UUID,
2679 MGMT_STATUS_BUSY);
2680 goto unlock;
2681 }
2682
2683 if (memcmp(p: cp->uuid, q: bt_uuid_any, size: 16) == 0) {
2684 hci_uuids_clear(hdev);
2685
2686 if (enable_service_cache(hdev)) {
2687 err = mgmt_cmd_complete(sk, index: hdev->id,
2688 MGMT_OP_REMOVE_UUID,
2689 status: 0, rp: hdev->dev_class, rp_len: 3);
2690 goto unlock;
2691 }
2692
2693 goto update_class;
2694 }
2695
2696 found = 0;
2697
2698 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2699 if (memcmp(p: match->uuid, q: cp->uuid, size: 16) != 0)
2700 continue;
2701
2702 list_del(entry: &match->list);
2703 kfree(objp: match);
2704 found++;
2705 }
2706
2707 if (found == 0) {
2708 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_UUID,
2709 MGMT_STATUS_INVALID_PARAMS);
2710 goto unlock;
2711 }
2712
2713update_class:
2714 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2715 if (!cmd) {
2716 err = -ENOMEM;
2717 goto unlock;
2718 }
2719
2720 err = hci_cmd_sync_queue(hdev, func: remove_uuid_sync, data: cmd,
2721 destroy: mgmt_class_complete);
2722 if (err < 0)
2723 mgmt_pending_free(cmd);
2724
2725unlock:
2726 hci_dev_unlock(hdev);
2727 return err;
2728}
2729
2730static int set_class_sync(struct hci_dev *hdev, void *data)
2731{
2732 int err = 0;
2733
2734 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2735 cancel_delayed_work_sync(dwork: &hdev->service_cache);
2736 err = hci_update_eir_sync(hdev);
2737 }
2738
2739 if (err)
2740 return err;
2741
2742 return hci_update_class_sync(hdev);
2743}
2744
2745static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2746 u16 len)
2747{
2748 struct mgmt_cp_set_dev_class *cp = data;
2749 struct mgmt_pending_cmd *cmd;
2750 int err;
2751
2752 bt_dev_dbg(hdev, "sock %p", sk);
2753
2754 if (!lmp_bredr_capable(hdev))
2755 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 MGMT_STATUS_NOT_SUPPORTED);
2757
2758 hci_dev_lock(hdev);
2759
2760 if (pending_eir_or_class(hdev)) {
2761 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 MGMT_STATUS_BUSY);
2763 goto unlock;
2764 }
2765
2766 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2767 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS,
2768 MGMT_STATUS_INVALID_PARAMS);
2769 goto unlock;
2770 }
2771
2772 hdev->major_class = cp->major;
2773 hdev->minor_class = cp->minor;
2774
2775 if (!hdev_is_powered(hdev)) {
2776 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEV_CLASS, status: 0,
2777 rp: hdev->dev_class, rp_len: 3);
2778 goto unlock;
2779 }
2780
2781 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2782 if (!cmd) {
2783 err = -ENOMEM;
2784 goto unlock;
2785 }
2786
2787 err = hci_cmd_sync_queue(hdev, func: set_class_sync, data: cmd,
2788 destroy: mgmt_class_complete);
2789 if (err < 0)
2790 mgmt_pending_free(cmd);
2791
2792unlock:
2793 hci_dev_unlock(hdev);
2794 return err;
2795}
2796
2797static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2798 u16 len)
2799{
2800 struct mgmt_cp_load_link_keys *cp = data;
2801 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2802 sizeof(struct mgmt_link_key_info));
2803 u16 key_count, expected_len;
2804 bool changed;
2805 int i;
2806
2807 bt_dev_dbg(hdev, "sock %p", sk);
2808
2809 if (!lmp_bredr_capable(hdev))
2810 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2811 MGMT_STATUS_NOT_SUPPORTED);
2812
2813 key_count = __le16_to_cpu(cp->key_count);
2814 if (key_count > max_key_count) {
2815 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2816 key_count);
2817 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2818 MGMT_STATUS_INVALID_PARAMS);
2819 }
2820
2821 expected_len = struct_size(cp, keys, key_count);
2822 if (expected_len != len) {
2823 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2824 expected_len, len);
2825 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2826 MGMT_STATUS_INVALID_PARAMS);
2827 }
2828
2829 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2830 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2831 MGMT_STATUS_INVALID_PARAMS);
2832
2833 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2834 key_count);
2835
2836 for (i = 0; i < key_count; i++) {
2837 struct mgmt_link_key_info *key = &cp->keys[i];
2838
2839 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2840 if (key->type > 0x08)
2841 return mgmt_cmd_status(sk, index: hdev->id,
2842 MGMT_OP_LOAD_LINK_KEYS,
2843 MGMT_STATUS_INVALID_PARAMS);
2844 }
2845
2846 hci_dev_lock(hdev);
2847
2848 hci_link_keys_clear(hdev);
2849
2850 if (cp->debug_keys)
2851 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2852 else
2853 changed = hci_dev_test_and_clear_flag(hdev,
2854 HCI_KEEP_DEBUG_KEYS);
2855
2856 if (changed)
2857 new_settings(hdev, NULL);
2858
2859 for (i = 0; i < key_count; i++) {
2860 struct mgmt_link_key_info *key = &cp->keys[i];
2861
2862 if (hci_is_blocked_key(hdev,
2863 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2864 val: key->val)) {
2865 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2866 &key->addr.bdaddr);
2867 continue;
2868 }
2869
2870 /* Always ignore debug keys and require a new pairing if
2871 * the user wants to use them.
2872 */
2873 if (key->type == HCI_LK_DEBUG_COMBINATION)
2874 continue;
2875
2876 hci_add_link_key(hdev, NULL, bdaddr: &key->addr.bdaddr, val: key->val,
2877 type: key->type, pin_len: key->pin_len, NULL);
2878 }
2879
2880 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_LINK_KEYS, status: 0, NULL, rp_len: 0);
2881
2882 hci_dev_unlock(hdev);
2883
2884 return 0;
2885}
2886
2887static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2888 u8 addr_type, struct sock *skip_sk)
2889{
2890 struct mgmt_ev_device_unpaired ev;
2891
2892 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
2893 ev.addr.type = addr_type;
2894
2895 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, data: &ev, len: sizeof(ev),
2896 skip_sk);
2897}
2898
2899static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2900{
2901 struct mgmt_pending_cmd *cmd = data;
2902 struct mgmt_cp_unpair_device *cp = cmd->param;
2903
2904 if (!err)
2905 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: cmd->sk);
2906
2907 cmd->cmd_complete(cmd, err);
2908 mgmt_pending_free(cmd);
2909}
2910
2911static int unpair_device_sync(struct hci_dev *hdev, void *data)
2912{
2913 struct mgmt_pending_cmd *cmd = data;
2914 struct mgmt_cp_unpair_device *cp = cmd->param;
2915 struct hci_conn *conn;
2916
2917 if (cp->addr.type == BDADDR_BREDR)
2918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2919 ba: &cp->addr.bdaddr);
2920 else
2921 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr,
2922 ba_type: le_addr_type(mgmt_addr_type: cp->addr.type));
2923
2924 if (!conn)
2925 return 0;
2926
2927 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2928}
2929
2930static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2931 u16 len)
2932{
2933 struct mgmt_cp_unpair_device *cp = data;
2934 struct mgmt_rp_unpair_device rp;
2935 struct hci_conn_params *params;
2936 struct mgmt_pending_cmd *cmd;
2937 struct hci_conn *conn;
2938 u8 addr_type;
2939 int err;
2940
2941 memset(&rp, 0, sizeof(rp));
2942 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
2943 rp.addr.type = cp->addr.type;
2944
2945 if (!bdaddr_type_is_valid(type: cp->addr.type))
2946 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
2947 MGMT_STATUS_INVALID_PARAMS,
2948 rp: &rp, rp_len: sizeof(rp));
2949
2950 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2951 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
2952 MGMT_STATUS_INVALID_PARAMS,
2953 rp: &rp, rp_len: sizeof(rp));
2954
2955 hci_dev_lock(hdev);
2956
2957 if (!hdev_is_powered(hdev)) {
2958 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
2959 MGMT_STATUS_NOT_POWERED, rp: &rp,
2960 rp_len: sizeof(rp));
2961 goto unlock;
2962 }
2963
2964 if (cp->addr.type == BDADDR_BREDR) {
2965 /* If disconnection is requested, then look up the
2966 * connection. If the remote device is connected, it
2967 * will be later used to terminate the link.
2968 *
2969 * Setting it to NULL explicitly will cause no
2970 * termination of the link.
2971 */
2972 if (cp->disconnect)
2973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2974 ba: &cp->addr.bdaddr);
2975 else
2976 conn = NULL;
2977
2978 err = hci_remove_link_key(hdev, bdaddr: &cp->addr.bdaddr);
2979 if (err < 0) {
2980 err = mgmt_cmd_complete(sk, index: hdev->id,
2981 MGMT_OP_UNPAIR_DEVICE,
2982 MGMT_STATUS_NOT_PAIRED, rp: &rp,
2983 rp_len: sizeof(rp));
2984 goto unlock;
2985 }
2986
2987 goto done;
2988 }
2989
2990 /* LE address type */
2991 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
2992
2993 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2994 err = smp_cancel_and_remove_pairing(hdev, bdaddr: &cp->addr.bdaddr, addr_type);
2995 if (err < 0) {
2996 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE,
2997 MGMT_STATUS_NOT_PAIRED, rp: &rp,
2998 rp_len: sizeof(rp));
2999 goto unlock;
3000 }
3001
3002 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr, ba_type: addr_type);
3003 if (!conn) {
3004 hci_conn_params_del(hdev, addr: &cp->addr.bdaddr, addr_type);
3005 goto done;
3006 }
3007
3008
3009 /* Defer clearing up the connection parameters until closing to
3010 * give a chance of keeping them if a repairing happens.
3011 */
3012 set_bit(nr: HCI_CONN_PARAM_REMOVAL_PEND, addr: &conn->flags);
3013
3014 /* Disable auto-connection parameters if present */
3015 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr, addr_type);
3016 if (params) {
3017 if (params->explicit_connect)
3018 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3019 else
3020 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3021 }
3022
3023 /* If disconnection is not requested, then clear the connection
3024 * variable so that the link is not terminated.
3025 */
3026 if (!cp->disconnect)
3027 conn = NULL;
3028
3029done:
3030 /* If the connection variable is set, then termination of the
3031 * link is requested.
3032 */
3033 if (!conn) {
3034 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNPAIR_DEVICE, status: 0,
3035 rp: &rp, rp_len: sizeof(rp));
3036 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: sk);
3037 goto unlock;
3038 }
3039
3040 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, data: cp,
3041 len: sizeof(*cp));
3042 if (!cmd) {
3043 err = -ENOMEM;
3044 goto unlock;
3045 }
3046
3047 cmd->cmd_complete = addr_cmd_complete;
3048
3049 err = hci_cmd_sync_queue(hdev, func: unpair_device_sync, data: cmd,
3050 destroy: unpair_device_complete);
3051 if (err < 0)
3052 mgmt_pending_free(cmd);
3053
3054unlock:
3055 hci_dev_unlock(hdev);
3056 return err;
3057}
3058
3059static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3060 u16 len)
3061{
3062 struct mgmt_cp_disconnect *cp = data;
3063 struct mgmt_rp_disconnect rp;
3064 struct mgmt_pending_cmd *cmd;
3065 struct hci_conn *conn;
3066 int err;
3067
3068 bt_dev_dbg(hdev, "sock %p", sk);
3069
3070 memset(&rp, 0, sizeof(rp));
3071 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
3072 rp.addr.type = cp->addr.type;
3073
3074 if (!bdaddr_type_is_valid(type: cp->addr.type))
3075 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3076 MGMT_STATUS_INVALID_PARAMS,
3077 rp: &rp, rp_len: sizeof(rp));
3078
3079 hci_dev_lock(hdev);
3080
3081 if (!test_bit(HCI_UP, &hdev->flags)) {
3082 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3083 MGMT_STATUS_NOT_POWERED, rp: &rp,
3084 rp_len: sizeof(rp));
3085 goto failed;
3086 }
3087
3088 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3089 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3090 MGMT_STATUS_BUSY, rp: &rp, rp_len: sizeof(rp));
3091 goto failed;
3092 }
3093
3094 if (cp->addr.type == BDADDR_BREDR)
3095 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3096 ba: &cp->addr.bdaddr);
3097 else
3098 conn = hci_conn_hash_lookup_le(hdev, ba: &cp->addr.bdaddr,
3099 ba_type: le_addr_type(mgmt_addr_type: cp->addr.type));
3100
3101 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3102 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_DISCONNECT,
3103 MGMT_STATUS_NOT_CONNECTED, rp: &rp,
3104 rp_len: sizeof(rp));
3105 goto failed;
3106 }
3107
3108 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3109 if (!cmd) {
3110 err = -ENOMEM;
3111 goto failed;
3112 }
3113
3114 cmd->cmd_complete = generic_cmd_complete;
3115
3116 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3117 if (err < 0)
3118 mgmt_pending_remove(cmd);
3119
3120failed:
3121 hci_dev_unlock(hdev);
3122 return err;
3123}
3124
3125static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3126{
3127 switch (link_type) {
3128 case ISO_LINK:
3129 case LE_LINK:
3130 switch (addr_type) {
3131 case ADDR_LE_DEV_PUBLIC:
3132 return BDADDR_LE_PUBLIC;
3133
3134 default:
3135 /* Fallback to LE Random address type */
3136 return BDADDR_LE_RANDOM;
3137 }
3138
3139 default:
3140 /* Fallback to BR/EDR type */
3141 return BDADDR_BREDR;
3142 }
3143}
3144
3145static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3146 u16 data_len)
3147{
3148 struct mgmt_rp_get_connections *rp;
3149 struct hci_conn *c;
3150 int err;
3151 u16 i;
3152
3153 bt_dev_dbg(hdev, "sock %p", sk);
3154
3155 hci_dev_lock(hdev);
3156
3157 if (!hdev_is_powered(hdev)) {
3158 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_CONNECTIONS,
3159 MGMT_STATUS_NOT_POWERED);
3160 goto unlock;
3161 }
3162
3163 i = 0;
3164 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3165 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3166 i++;
3167 }
3168
3169 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3170 if (!rp) {
3171 err = -ENOMEM;
3172 goto unlock;
3173 }
3174
3175 i = 0;
3176 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3177 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3178 continue;
3179 bacpy(dst: &rp->addr[i].bdaddr, src: &c->dst);
3180 rp->addr[i].type = link_to_bdaddr(link_type: c->type, addr_type: c->dst_type);
3181 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3182 continue;
3183 i++;
3184 }
3185
3186 rp->conn_count = cpu_to_le16(i);
3187
3188 /* Recalculate length in case of filtered SCO connections, etc */
3189 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONNECTIONS, status: 0, rp,
3190 struct_size(rp, addr, i));
3191
3192 kfree(objp: rp);
3193
3194unlock:
3195 hci_dev_unlock(hdev);
3196 return err;
3197}
3198
3199static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3200 struct mgmt_cp_pin_code_neg_reply *cp)
3201{
3202 struct mgmt_pending_cmd *cmd;
3203 int err;
3204
3205 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, data: cp,
3206 len: sizeof(*cp));
3207 if (!cmd)
3208 return -ENOMEM;
3209
3210 cmd->cmd_complete = addr_cmd_complete;
3211
3212 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3213 plen: sizeof(cp->addr.bdaddr), param: &cp->addr.bdaddr);
3214 if (err < 0)
3215 mgmt_pending_remove(cmd);
3216
3217 return err;
3218}
3219
3220static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3221 u16 len)
3222{
3223 struct hci_conn *conn;
3224 struct mgmt_cp_pin_code_reply *cp = data;
3225 struct hci_cp_pin_code_reply reply;
3226 struct mgmt_pending_cmd *cmd;
3227 int err;
3228
3229 bt_dev_dbg(hdev, "sock %p", sk);
3230
3231 hci_dev_lock(hdev);
3232
3233 if (!hdev_is_powered(hdev)) {
3234 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3235 MGMT_STATUS_NOT_POWERED);
3236 goto failed;
3237 }
3238
3239 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &cp->addr.bdaddr);
3240 if (!conn) {
3241 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3242 MGMT_STATUS_NOT_CONNECTED);
3243 goto failed;
3244 }
3245
3246 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3247 struct mgmt_cp_pin_code_neg_reply ncp;
3248
3249 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3250
3251 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3252
3253 err = send_pin_code_neg_reply(sk, hdev, cp: &ncp);
3254 if (err >= 0)
3255 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_PIN_CODE_REPLY,
3256 MGMT_STATUS_INVALID_PARAMS);
3257
3258 goto failed;
3259 }
3260
3261 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3262 if (!cmd) {
3263 err = -ENOMEM;
3264 goto failed;
3265 }
3266
3267 cmd->cmd_complete = addr_cmd_complete;
3268
3269 bacpy(dst: &reply.bdaddr, src: &cp->addr.bdaddr);
3270 reply.pin_len = cp->pin_len;
3271 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3272
3273 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, plen: sizeof(reply), param: &reply);
3274 if (err < 0)
3275 mgmt_pending_remove(cmd);
3276
3277failed:
3278 hci_dev_unlock(hdev);
3279 return err;
3280}
3281
3282static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3283 u16 len)
3284{
3285 struct mgmt_cp_set_io_capability *cp = data;
3286
3287 bt_dev_dbg(hdev, "sock %p", sk);
3288
3289 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3290 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3291 MGMT_STATUS_INVALID_PARAMS);
3292
3293 hci_dev_lock(hdev);
3294
3295 hdev->io_capability = cp->io_capability;
3296
3297 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3298
3299 hci_dev_unlock(hdev);
3300
3301 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_IO_CAPABILITY, status: 0,
3302 NULL, rp_len: 0);
3303}
3304
3305static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3306{
3307 struct hci_dev *hdev = conn->hdev;
3308 struct mgmt_pending_cmd *cmd;
3309
3310 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3311 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3312 continue;
3313
3314 if (cmd->user_data != conn)
3315 continue;
3316
3317 return cmd;
3318 }
3319
3320 return NULL;
3321}
3322
3323static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3324{
3325 struct mgmt_rp_pair_device rp;
3326 struct hci_conn *conn = cmd->user_data;
3327 int err;
3328
3329 bacpy(dst: &rp.addr.bdaddr, src: &conn->dst);
3330 rp.addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
3331
3332 err = mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, MGMT_OP_PAIR_DEVICE,
3333 status, rp: &rp, rp_len: sizeof(rp));
3334
3335 /* So we don't get further callbacks for this connection */
3336 conn->connect_cfm_cb = NULL;
3337 conn->security_cfm_cb = NULL;
3338 conn->disconn_cfm_cb = NULL;
3339
3340 hci_conn_drop(conn);
3341
3342 /* The device is paired so there is no need to remove
3343 * its connection parameters anymore.
3344 */
3345 clear_bit(nr: HCI_CONN_PARAM_REMOVAL_PEND, addr: &conn->flags);
3346
3347 hci_conn_put(conn);
3348
3349 return err;
3350}
3351
3352void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3353{
3354 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3355 struct mgmt_pending_cmd *cmd;
3356
3357 cmd = find_pairing(conn);
3358 if (cmd) {
3359 cmd->cmd_complete(cmd, status);
3360 mgmt_pending_remove(cmd);
3361 }
3362}
3363
3364static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3365{
3366 struct mgmt_pending_cmd *cmd;
3367
3368 BT_DBG("status %u", status);
3369
3370 cmd = find_pairing(conn);
3371 if (!cmd) {
3372 BT_DBG("Unable to find a pending command");
3373 return;
3374 }
3375
3376 cmd->cmd_complete(cmd, mgmt_status(err: status));
3377 mgmt_pending_remove(cmd);
3378}
3379
3380static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3381{
3382 struct mgmt_pending_cmd *cmd;
3383
3384 BT_DBG("status %u", status);
3385
3386 if (!status)
3387 return;
3388
3389 cmd = find_pairing(conn);
3390 if (!cmd) {
3391 BT_DBG("Unable to find a pending command");
3392 return;
3393 }
3394
3395 cmd->cmd_complete(cmd, mgmt_status(err: status));
3396 mgmt_pending_remove(cmd);
3397}
3398
3399static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3400 u16 len)
3401{
3402 struct mgmt_cp_pair_device *cp = data;
3403 struct mgmt_rp_pair_device rp;
3404 struct mgmt_pending_cmd *cmd;
3405 u8 sec_level, auth_type;
3406 struct hci_conn *conn;
3407 int err;
3408
3409 bt_dev_dbg(hdev, "sock %p", sk);
3410
3411 memset(&rp, 0, sizeof(rp));
3412 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
3413 rp.addr.type = cp->addr.type;
3414
3415 if (!bdaddr_type_is_valid(type: cp->addr.type))
3416 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3417 MGMT_STATUS_INVALID_PARAMS,
3418 rp: &rp, rp_len: sizeof(rp));
3419
3420 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3421 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3422 MGMT_STATUS_INVALID_PARAMS,
3423 rp: &rp, rp_len: sizeof(rp));
3424
3425 hci_dev_lock(hdev);
3426
3427 if (!hdev_is_powered(hdev)) {
3428 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3429 MGMT_STATUS_NOT_POWERED, rp: &rp,
3430 rp_len: sizeof(rp));
3431 goto unlock;
3432 }
3433
3434 if (hci_bdaddr_is_paired(hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type)) {
3435 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3436 MGMT_STATUS_ALREADY_PAIRED, rp: &rp,
3437 rp_len: sizeof(rp));
3438 goto unlock;
3439 }
3440
3441 sec_level = BT_SECURITY_MEDIUM;
3442 auth_type = HCI_AT_DEDICATED_BONDING;
3443
3444 if (cp->addr.type == BDADDR_BREDR) {
3445 conn = hci_connect_acl(hdev, dst: &cp->addr.bdaddr, sec_level,
3446 auth_type, conn_reason: CONN_REASON_PAIR_DEVICE,
3447 HCI_ACL_CONN_TIMEOUT);
3448 } else {
3449 u8 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
3450 struct hci_conn_params *p;
3451
3452 /* When pairing a new device, it is expected to remember
3453 * this device for future connections. Adding the connection
3454 * parameter information ahead of time allows tracking
3455 * of the peripheral preferred values and will speed up any
3456 * further connection establishment.
3457 *
3458 * If connection parameters already exist, then they
3459 * will be kept and this function does nothing.
3460 */
3461 p = hci_conn_params_add(hdev, addr: &cp->addr.bdaddr, addr_type);
3462
3463 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3464 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3465
3466 conn = hci_connect_le_scan(hdev, dst: &cp->addr.bdaddr, dst_type: addr_type,
3467 sec_level, HCI_LE_CONN_TIMEOUT,
3468 conn_reason: CONN_REASON_PAIR_DEVICE);
3469 }
3470
3471 if (IS_ERR(ptr: conn)) {
3472 int status;
3473
3474 if (PTR_ERR(ptr: conn) == -EBUSY)
3475 status = MGMT_STATUS_BUSY;
3476 else if (PTR_ERR(ptr: conn) == -EOPNOTSUPP)
3477 status = MGMT_STATUS_NOT_SUPPORTED;
3478 else if (PTR_ERR(ptr: conn) == -ECONNREFUSED)
3479 status = MGMT_STATUS_REJECTED;
3480 else
3481 status = MGMT_STATUS_CONNECT_FAILED;
3482
3483 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3484 status, rp: &rp, rp_len: sizeof(rp));
3485 goto unlock;
3486 }
3487
3488 if (conn->connect_cfm_cb) {
3489 hci_conn_drop(conn);
3490 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_PAIR_DEVICE,
3491 MGMT_STATUS_BUSY, rp: &rp, rp_len: sizeof(rp));
3492 goto unlock;
3493 }
3494
3495 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3496 if (!cmd) {
3497 err = -ENOMEM;
3498 hci_conn_drop(conn);
3499 goto unlock;
3500 }
3501
3502 cmd->cmd_complete = pairing_complete;
3503
3504 /* For LE, just connecting isn't a proof that the pairing finished */
3505 if (cp->addr.type == BDADDR_BREDR) {
3506 conn->connect_cfm_cb = pairing_complete_cb;
3507 conn->security_cfm_cb = pairing_complete_cb;
3508 conn->disconn_cfm_cb = pairing_complete_cb;
3509 } else {
3510 conn->connect_cfm_cb = le_pairing_complete_cb;
3511 conn->security_cfm_cb = le_pairing_complete_cb;
3512 conn->disconn_cfm_cb = le_pairing_complete_cb;
3513 }
3514
3515 conn->io_capability = cp->io_cap;
3516 cmd->user_data = hci_conn_get(conn);
3517
3518 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3519 hci_conn_security(conn, sec_level, auth_type, initiator: true)) {
3520 cmd->cmd_complete(cmd, 0);
3521 mgmt_pending_remove(cmd);
3522 }
3523
3524 err = 0;
3525
3526unlock:
3527 hci_dev_unlock(hdev);
3528 return err;
3529}
3530
3531static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3532 u16 len)
3533{
3534 struct mgmt_addr_info *addr = data;
3535 struct mgmt_pending_cmd *cmd;
3536 struct hci_conn *conn;
3537 int err;
3538
3539 bt_dev_dbg(hdev, "sock %p", sk);
3540
3541 hci_dev_lock(hdev);
3542
3543 if (!hdev_is_powered(hdev)) {
3544 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3545 MGMT_STATUS_NOT_POWERED);
3546 goto unlock;
3547 }
3548
3549 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3550 if (!cmd) {
3551 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3552 MGMT_STATUS_INVALID_PARAMS);
3553 goto unlock;
3554 }
3555
3556 conn = cmd->user_data;
3557
3558 if (bacmp(ba1: &addr->bdaddr, ba2: &conn->dst) != 0) {
3559 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3560 MGMT_STATUS_INVALID_PARAMS);
3561 goto unlock;
3562 }
3563
3564 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3565 mgmt_pending_remove(cmd);
3566
3567 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, status: 0,
3568 rp: addr, rp_len: sizeof(*addr));
3569
3570 /* Since user doesn't want to proceed with the connection, abort any
3571 * ongoing pairing and then terminate the link if it was created
3572 * because of the pair device action.
3573 */
3574 if (addr->type == BDADDR_BREDR)
3575 hci_remove_link_key(hdev, bdaddr: &addr->bdaddr);
3576 else
3577 smp_cancel_and_remove_pairing(hdev, bdaddr: &addr->bdaddr,
3578 addr_type: le_addr_type(mgmt_addr_type: addr->type));
3579
3580 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3581 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3582
3583unlock:
3584 hci_dev_unlock(hdev);
3585 return err;
3586}
3587
3588static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3589 struct mgmt_addr_info *addr, u16 mgmt_op,
3590 u16 hci_op, __le32 passkey)
3591{
3592 struct mgmt_pending_cmd *cmd;
3593 struct hci_conn *conn;
3594 int err;
3595
3596 hci_dev_lock(hdev);
3597
3598 if (!hdev_is_powered(hdev)) {
3599 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3600 MGMT_STATUS_NOT_POWERED, rp: addr,
3601 rp_len: sizeof(*addr));
3602 goto done;
3603 }
3604
3605 if (addr->type == BDADDR_BREDR)
3606 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &addr->bdaddr);
3607 else
3608 conn = hci_conn_hash_lookup_le(hdev, ba: &addr->bdaddr,
3609 ba_type: le_addr_type(mgmt_addr_type: addr->type));
3610
3611 if (!conn) {
3612 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3613 MGMT_STATUS_NOT_CONNECTED, rp: addr,
3614 rp_len: sizeof(*addr));
3615 goto done;
3616 }
3617
3618 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3619 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3620 if (!err)
3621 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3622 MGMT_STATUS_SUCCESS, rp: addr,
3623 rp_len: sizeof(*addr));
3624 else
3625 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: mgmt_op,
3626 MGMT_STATUS_FAILED, rp: addr,
3627 rp_len: sizeof(*addr));
3628
3629 goto done;
3630 }
3631
3632 cmd = mgmt_pending_add(sk, opcode: mgmt_op, hdev, data: addr, len: sizeof(*addr));
3633 if (!cmd) {
3634 err = -ENOMEM;
3635 goto done;
3636 }
3637
3638 cmd->cmd_complete = addr_cmd_complete;
3639
3640 /* Continue with pairing via HCI */
3641 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3642 struct hci_cp_user_passkey_reply cp;
3643
3644 bacpy(dst: &cp.bdaddr, src: &addr->bdaddr);
3645 cp.passkey = passkey;
3646 err = hci_send_cmd(hdev, opcode: hci_op, plen: sizeof(cp), param: &cp);
3647 } else
3648 err = hci_send_cmd(hdev, opcode: hci_op, plen: sizeof(addr->bdaddr),
3649 param: &addr->bdaddr);
3650
3651 if (err < 0)
3652 mgmt_pending_remove(cmd);
3653
3654done:
3655 hci_dev_unlock(hdev);
3656 return err;
3657}
3658
3659static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3660 void *data, u16 len)
3661{
3662 struct mgmt_cp_pin_code_neg_reply *cp = data;
3663
3664 bt_dev_dbg(hdev, "sock %p", sk);
3665
3666 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3667 MGMT_OP_PIN_CODE_NEG_REPLY,
3668 HCI_OP_PIN_CODE_NEG_REPLY, passkey: 0);
3669}
3670
3671static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3672 u16 len)
3673{
3674 struct mgmt_cp_user_confirm_reply *cp = data;
3675
3676 bt_dev_dbg(hdev, "sock %p", sk);
3677
3678 if (len != sizeof(*cp))
3679 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3680 MGMT_STATUS_INVALID_PARAMS);
3681
3682 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3683 MGMT_OP_USER_CONFIRM_REPLY,
3684 HCI_OP_USER_CONFIRM_REPLY, passkey: 0);
3685}
3686
3687static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3688 void *data, u16 len)
3689{
3690 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3691
3692 bt_dev_dbg(hdev, "sock %p", sk);
3693
3694 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3695 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3696 HCI_OP_USER_CONFIRM_NEG_REPLY, passkey: 0);
3697}
3698
3699static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3700 u16 len)
3701{
3702 struct mgmt_cp_user_passkey_reply *cp = data;
3703
3704 bt_dev_dbg(hdev, "sock %p", sk);
3705
3706 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3707 MGMT_OP_USER_PASSKEY_REPLY,
3708 HCI_OP_USER_PASSKEY_REPLY, passkey: cp->passkey);
3709}
3710
3711static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3712 void *data, u16 len)
3713{
3714 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3715
3716 bt_dev_dbg(hdev, "sock %p", sk);
3717
3718 return user_pairing_resp(sk, hdev, addr: &cp->addr,
3719 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3720 HCI_OP_USER_PASSKEY_NEG_REPLY, passkey: 0);
3721}
3722
3723static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3724{
3725 struct adv_info *adv_instance;
3726
3727 adv_instance = hci_find_adv_instance(hdev, instance: hdev->cur_adv_instance);
3728 if (!adv_instance)
3729 return 0;
3730
3731 /* stop if current instance doesn't need to be changed */
3732 if (!(adv_instance->flags & flags))
3733 return 0;
3734
3735 cancel_adv_timeout(hdev);
3736
3737 adv_instance = hci_get_next_instance(hdev, instance: adv_instance->instance);
3738 if (!adv_instance)
3739 return 0;
3740
3741 hci_schedule_adv_instance_sync(hdev, instance: adv_instance->instance, force: true);
3742
3743 return 0;
3744}
3745
3746static int name_changed_sync(struct hci_dev *hdev, void *data)
3747{
3748 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3749}
3750
3751static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3752{
3753 struct mgmt_pending_cmd *cmd = data;
3754 struct mgmt_cp_set_local_name *cp = cmd->param;
3755 u8 status = mgmt_status(err);
3756
3757 bt_dev_dbg(hdev, "err %d", err);
3758
3759 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3760 return;
3761
3762 if (status) {
3763 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME,
3764 status);
3765 } else {
3766 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3767 rp: cp, rp_len: sizeof(*cp));
3768
3769 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3770 hci_cmd_sync_queue(hdev, func: name_changed_sync, NULL, NULL);
3771 }
3772
3773 mgmt_pending_remove(cmd);
3774}
3775
3776static int set_name_sync(struct hci_dev *hdev, void *data)
3777{
3778 if (lmp_bredr_capable(hdev)) {
3779 hci_update_name_sync(hdev);
3780 hci_update_eir_sync(hdev);
3781 }
3782
3783 /* The name is stored in the scan response data and so
3784 * no need to update the advertising data here.
3785 */
3786 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3787 hci_update_scan_rsp_data_sync(hdev, instance: hdev->cur_adv_instance);
3788
3789 return 0;
3790}
3791
3792static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3793 u16 len)
3794{
3795 struct mgmt_cp_set_local_name *cp = data;
3796 struct mgmt_pending_cmd *cmd;
3797 int err;
3798
3799 bt_dev_dbg(hdev, "sock %p", sk);
3800
3801 hci_dev_lock(hdev);
3802
3803 /* If the old values are the same as the new ones just return a
3804 * direct command complete event.
3805 */
3806 if (!memcmp(p: hdev->dev_name, q: cp->name, size: sizeof(hdev->dev_name)) &&
3807 !memcmp(p: hdev->short_name, q: cp->short_name,
3808 size: sizeof(hdev->short_name))) {
3809 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3810 rp: data, rp_len: len);
3811 goto failed;
3812 }
3813
3814 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3815
3816 if (!hdev_is_powered(hdev)) {
3817 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3818
3819 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME, status: 0,
3820 rp: data, rp_len: len);
3821 if (err < 0)
3822 goto failed;
3823
3824 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3825 len, flag: HCI_MGMT_LOCAL_NAME_EVENTS, skip_sk: sk);
3826 ext_info_changed(hdev, skip: sk);
3827
3828 goto failed;
3829 }
3830
3831 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3832 if (!cmd)
3833 err = -ENOMEM;
3834 else
3835 err = hci_cmd_sync_queue(hdev, func: set_name_sync, data: cmd,
3836 destroy: set_name_complete);
3837
3838 if (err < 0) {
3839 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_LOCAL_NAME,
3840 MGMT_STATUS_FAILED);
3841
3842 if (cmd)
3843 mgmt_pending_remove(cmd);
3844
3845 goto failed;
3846 }
3847
3848 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3849
3850failed:
3851 hci_dev_unlock(hdev);
3852 return err;
3853}
3854
3855static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3856{
3857 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3858}
3859
3860static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3861 u16 len)
3862{
3863 struct mgmt_cp_set_appearance *cp = data;
3864 u16 appearance;
3865 int err;
3866
3867 bt_dev_dbg(hdev, "sock %p", sk);
3868
3869 if (!lmp_le_capable(hdev))
3870 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_APPEARANCE,
3871 MGMT_STATUS_NOT_SUPPORTED);
3872
3873 appearance = le16_to_cpu(cp->appearance);
3874
3875 hci_dev_lock(hdev);
3876
3877 if (hdev->appearance != appearance) {
3878 hdev->appearance = appearance;
3879
3880 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3881 hci_cmd_sync_queue(hdev, func: appearance_changed_sync, NULL,
3882 NULL);
3883
3884 ext_info_changed(hdev, skip: sk);
3885 }
3886
3887 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_APPEARANCE, status: 0, NULL,
3888 rp_len: 0);
3889
3890 hci_dev_unlock(hdev);
3891
3892 return err;
3893}
3894
3895static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3896 void *data, u16 len)
3897{
3898 struct mgmt_rp_get_phy_configuration rp;
3899
3900 bt_dev_dbg(hdev, "sock %p", sk);
3901
3902 hci_dev_lock(hdev);
3903
3904 memset(&rp, 0, sizeof(rp));
3905
3906 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3907 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3908 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3909
3910 hci_dev_unlock(hdev);
3911
3912 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, status: 0,
3913 rp: &rp, rp_len: sizeof(rp));
3914}
3915
3916int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3917{
3918 struct mgmt_ev_phy_configuration_changed ev;
3919
3920 memset(&ev, 0, sizeof(ev));
3921
3922 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3923
3924 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, data: &ev,
3925 len: sizeof(ev), skip_sk: skip);
3926}
3927
3928static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3929{
3930 struct mgmt_pending_cmd *cmd = data;
3931 struct sk_buff *skb = cmd->skb;
3932 u8 status = mgmt_status(err);
3933
3934 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3935 return;
3936
3937 if (!status) {
3938 if (!skb)
3939 status = MGMT_STATUS_FAILED;
3940 else if (IS_ERR(ptr: skb))
3941 status = mgmt_status(err: PTR_ERR(ptr: skb));
3942 else
3943 status = mgmt_status(err: skb->data[0]);
3944 }
3945
3946 bt_dev_dbg(hdev, "status %d", status);
3947
3948 if (status) {
3949 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
3950 MGMT_OP_SET_PHY_CONFIGURATION, status);
3951 } else {
3952 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id,
3953 MGMT_OP_SET_PHY_CONFIGURATION, status: 0,
3954 NULL, rp_len: 0);
3955
3956 mgmt_phy_configuration_changed(hdev, skip: cmd->sk);
3957 }
3958
3959 if (skb && !IS_ERR(ptr: skb))
3960 kfree_skb(skb);
3961
3962 mgmt_pending_remove(cmd);
3963}
3964
3965static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3966{
3967 struct mgmt_pending_cmd *cmd = data;
3968 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3969 struct hci_cp_le_set_default_phy cp_phy;
3970 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3971
3972 memset(&cp_phy, 0, sizeof(cp_phy));
3973
3974 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3975 cp_phy.all_phys |= 0x01;
3976
3977 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3978 cp_phy.all_phys |= 0x02;
3979
3980 if (selected_phys & MGMT_PHY_LE_1M_TX)
3981 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3982
3983 if (selected_phys & MGMT_PHY_LE_2M_TX)
3984 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3985
3986 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3987 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3988
3989 if (selected_phys & MGMT_PHY_LE_1M_RX)
3990 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3991
3992 if (selected_phys & MGMT_PHY_LE_2M_RX)
3993 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3994
3995 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3996 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3997
3998 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3999 plen: sizeof(cp_phy), param: &cp_phy, HCI_CMD_TIMEOUT);
4000
4001 return 0;
4002}
4003
4004static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4005 void *data, u16 len)
4006{
4007 struct mgmt_cp_set_phy_configuration *cp = data;
4008 struct mgmt_pending_cmd *cmd;
4009 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4010 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4011 bool changed = false;
4012 int err;
4013
4014 bt_dev_dbg(hdev, "sock %p", sk);
4015
4016 configurable_phys = get_configurable_phys(hdev);
4017 supported_phys = get_supported_phys(hdev);
4018 selected_phys = __le32_to_cpu(cp->selected_phys);
4019
4020 if (selected_phys & ~supported_phys)
4021 return mgmt_cmd_status(sk, index: hdev->id,
4022 MGMT_OP_SET_PHY_CONFIGURATION,
4023 MGMT_STATUS_INVALID_PARAMS);
4024
4025 unconfigure_phys = supported_phys & ~configurable_phys;
4026
4027 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4028 return mgmt_cmd_status(sk, index: hdev->id,
4029 MGMT_OP_SET_PHY_CONFIGURATION,
4030 MGMT_STATUS_INVALID_PARAMS);
4031
4032 if (selected_phys == get_selected_phys(hdev))
4033 return mgmt_cmd_complete(sk, index: hdev->id,
4034 MGMT_OP_SET_PHY_CONFIGURATION,
4035 status: 0, NULL, rp_len: 0);
4036
4037 hci_dev_lock(hdev);
4038
4039 if (!hdev_is_powered(hdev)) {
4040 err = mgmt_cmd_status(sk, index: hdev->id,
4041 MGMT_OP_SET_PHY_CONFIGURATION,
4042 MGMT_STATUS_REJECTED);
4043 goto unlock;
4044 }
4045
4046 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4047 err = mgmt_cmd_status(sk, index: hdev->id,
4048 MGMT_OP_SET_PHY_CONFIGURATION,
4049 MGMT_STATUS_BUSY);
4050 goto unlock;
4051 }
4052
4053 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4054 pkt_type |= (HCI_DH3 | HCI_DM3);
4055 else
4056 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4057
4058 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4059 pkt_type |= (HCI_DH5 | HCI_DM5);
4060 else
4061 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4062
4063 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4064 pkt_type &= ~HCI_2DH1;
4065 else
4066 pkt_type |= HCI_2DH1;
4067
4068 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4069 pkt_type &= ~HCI_2DH3;
4070 else
4071 pkt_type |= HCI_2DH3;
4072
4073 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4074 pkt_type &= ~HCI_2DH5;
4075 else
4076 pkt_type |= HCI_2DH5;
4077
4078 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4079 pkt_type &= ~HCI_3DH1;
4080 else
4081 pkt_type |= HCI_3DH1;
4082
4083 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4084 pkt_type &= ~HCI_3DH3;
4085 else
4086 pkt_type |= HCI_3DH3;
4087
4088 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4089 pkt_type &= ~HCI_3DH5;
4090 else
4091 pkt_type |= HCI_3DH5;
4092
4093 if (pkt_type != hdev->pkt_type) {
4094 hdev->pkt_type = pkt_type;
4095 changed = true;
4096 }
4097
4098 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4099 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4100 if (changed)
4101 mgmt_phy_configuration_changed(hdev, skip: sk);
4102
4103 err = mgmt_cmd_complete(sk, index: hdev->id,
4104 MGMT_OP_SET_PHY_CONFIGURATION,
4105 status: 0, NULL, rp_len: 0);
4106
4107 goto unlock;
4108 }
4109
4110 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4111 len);
4112 if (!cmd)
4113 err = -ENOMEM;
4114 else
4115 err = hci_cmd_sync_queue(hdev, func: set_default_phy_sync, data: cmd,
4116 destroy: set_default_phy_complete);
4117
4118 if (err < 0) {
4119 err = mgmt_cmd_status(sk, index: hdev->id,
4120 MGMT_OP_SET_PHY_CONFIGURATION,
4121 MGMT_STATUS_FAILED);
4122
4123 if (cmd)
4124 mgmt_pending_remove(cmd);
4125 }
4126
4127unlock:
4128 hci_dev_unlock(hdev);
4129
4130 return err;
4131}
4132
4133static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4134 u16 len)
4135{
4136 int err = MGMT_STATUS_SUCCESS;
4137 struct mgmt_cp_set_blocked_keys *keys = data;
4138 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4139 sizeof(struct mgmt_blocked_key_info));
4140 u16 key_count, expected_len;
4141 int i;
4142
4143 bt_dev_dbg(hdev, "sock %p", sk);
4144
4145 key_count = __le16_to_cpu(keys->key_count);
4146 if (key_count > max_key_count) {
4147 bt_dev_err(hdev, "too big key_count value %u", key_count);
4148 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4149 MGMT_STATUS_INVALID_PARAMS);
4150 }
4151
4152 expected_len = struct_size(keys, keys, key_count);
4153 if (expected_len != len) {
4154 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4155 expected_len, len);
4156 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4157 MGMT_STATUS_INVALID_PARAMS);
4158 }
4159
4160 hci_dev_lock(hdev);
4161
4162 hci_blocked_keys_clear(hdev);
4163
4164 for (i = 0; i < key_count; ++i) {
4165 struct blocked_key *b = kzalloc(size: sizeof(*b), GFP_KERNEL);
4166
4167 if (!b) {
4168 err = MGMT_STATUS_NO_RESOURCES;
4169 break;
4170 }
4171
4172 b->type = keys->keys[i].type;
4173 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4174 list_add_rcu(new: &b->list, head: &hdev->blocked_keys);
4175 }
4176 hci_dev_unlock(hdev);
4177
4178 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4179 status: err, NULL, rp_len: 0);
4180}
4181
4182static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4183 void *data, u16 len)
4184{
4185 struct mgmt_mode *cp = data;
4186 int err;
4187 bool changed = false;
4188
4189 bt_dev_dbg(hdev, "sock %p", sk);
4190
4191 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4192 return mgmt_cmd_status(sk, index: hdev->id,
4193 MGMT_OP_SET_WIDEBAND_SPEECH,
4194 MGMT_STATUS_NOT_SUPPORTED);
4195
4196 if (cp->val != 0x00 && cp->val != 0x01)
4197 return mgmt_cmd_status(sk, index: hdev->id,
4198 MGMT_OP_SET_WIDEBAND_SPEECH,
4199 MGMT_STATUS_INVALID_PARAMS);
4200
4201 hci_dev_lock(hdev);
4202
4203 if (hdev_is_powered(hdev) &&
4204 !!cp->val != hci_dev_test_flag(hdev,
4205 HCI_WIDEBAND_SPEECH_ENABLED)) {
4206 err = mgmt_cmd_status(sk, index: hdev->id,
4207 MGMT_OP_SET_WIDEBAND_SPEECH,
4208 MGMT_STATUS_REJECTED);
4209 goto unlock;
4210 }
4211
4212 if (cp->val)
4213 changed = !hci_dev_test_and_set_flag(hdev,
4214 HCI_WIDEBAND_SPEECH_ENABLED);
4215 else
4216 changed = hci_dev_test_and_clear_flag(hdev,
4217 HCI_WIDEBAND_SPEECH_ENABLED);
4218
4219 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4220 if (err < 0)
4221 goto unlock;
4222
4223 if (changed)
4224 err = new_settings(hdev, skip: sk);
4225
4226unlock:
4227 hci_dev_unlock(hdev);
4228 return err;
4229}
4230
4231static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4232 void *data, u16 data_len)
4233{
4234 char buf[20];
4235 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4236 u16 cap_len = 0;
4237 u8 flags = 0;
4238 u8 tx_power_range[2];
4239
4240 bt_dev_dbg(hdev, "sock %p", sk);
4241
4242 memset(&buf, 0, sizeof(buf));
4243
4244 hci_dev_lock(hdev);
4245
4246 /* When the Read Simple Pairing Options command is supported, then
4247 * the remote public key validation is supported.
4248 *
4249 * Alternatively, when Microsoft extensions are available, they can
4250 * indicate support for public key validation as well.
4251 */
4252 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4253 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4254
4255 flags |= 0x02; /* Remote public key validation (LE) */
4256
4257 /* When the Read Encryption Key Size command is supported, then the
4258 * encryption key size is enforced.
4259 */
4260 if (hdev->commands[20] & 0x10)
4261 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4262
4263 flags |= 0x08; /* Encryption key size enforcement (LE) */
4264
4265 cap_len = eir_append_data(eir: rp->cap, eir_len: cap_len, MGMT_CAP_SEC_FLAGS,
4266 data: &flags, data_len: 1);
4267
4268 /* When the Read Simple Pairing Options command is supported, then
4269 * also max encryption key size information is provided.
4270 */
4271 if (hdev->commands[41] & 0x08)
4272 cap_len = eir_append_le16(eir: rp->cap, eir_len: cap_len,
4273 MGMT_CAP_MAX_ENC_KEY_SIZE,
4274 data: hdev->max_enc_key_size);
4275
4276 cap_len = eir_append_le16(eir: rp->cap, eir_len: cap_len,
4277 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4278 SMP_MAX_ENC_KEY_SIZE);
4279
4280 /* Append the min/max LE tx power parameters if we were able to fetch
4281 * it from the controller
4282 */
4283 if (hdev->commands[38] & 0x80) {
4284 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4285 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4286 cap_len = eir_append_data(eir: rp->cap, eir_len: cap_len, MGMT_CAP_LE_TX_PWR,
4287 data: tx_power_range, data_len: 2);
4288 }
4289
4290 rp->cap_len = cpu_to_le16(cap_len);
4291
4292 hci_dev_unlock(hdev);
4293
4294 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_CONTROLLER_CAP, status: 0,
4295 rp, rp_len: sizeof(*rp) + cap_len);
4296}
4297
4298#ifdef CONFIG_BT_FEATURE_DEBUG
4299/* d4992530-b9ec-469f-ab01-6c481c47da1c */
4300static const u8 debug_uuid[16] = {
4301 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4302 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4303};
4304#endif
4305
4306/* 330859bc-7506-492d-9370-9a6f0614037f */
4307static const u8 quality_report_uuid[16] = {
4308 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4309 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4310};
4311
4312/* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4313static const u8 offload_codecs_uuid[16] = {
4314 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4315 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4316};
4317
4318/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4319static const u8 le_simultaneous_roles_uuid[16] = {
4320 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4321 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4322};
4323
4324/* 15c0a148-c273-11ea-b3de-0242ac130004 */
4325static const u8 rpa_resolution_uuid[16] = {
4326 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4327 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4328};
4329
4330/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4331static const u8 iso_socket_uuid[16] = {
4332 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4333 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4334};
4335
4336/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4337static const u8 mgmt_mesh_uuid[16] = {
4338 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4339 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4340};
4341
4342static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4343 void *data, u16 data_len)
4344{
4345 struct mgmt_rp_read_exp_features_info *rp;
4346 size_t len;
4347 u16 idx = 0;
4348 u32 flags;
4349 int status;
4350
4351 bt_dev_dbg(hdev, "sock %p", sk);
4352
4353 /* Enough space for 7 features */
4354 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4355 rp = kzalloc(size: len, GFP_KERNEL);
4356 if (!rp)
4357 return -ENOMEM;
4358
4359#ifdef CONFIG_BT_FEATURE_DEBUG
4360 if (!hdev) {
4361 flags = bt_dbg_get() ? BIT(0) : 0;
4362
4363 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4364 rp->features[idx].flags = cpu_to_le32(flags);
4365 idx++;
4366 }
4367#endif
4368
4369 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4370 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4371 flags = BIT(0);
4372 else
4373 flags = 0;
4374
4375 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4376 rp->features[idx].flags = cpu_to_le32(flags);
4377 idx++;
4378 }
4379
4380 if (hdev && ll_privacy_capable(hdev)) {
4381 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4382 flags = BIT(0) | BIT(1);
4383 else
4384 flags = BIT(1);
4385
4386 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4387 rp->features[idx].flags = cpu_to_le32(flags);
4388 idx++;
4389 }
4390
4391 if (hdev && (aosp_has_quality_report(hdev) ||
4392 hdev->set_quality_report)) {
4393 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4394 flags = BIT(0);
4395 else
4396 flags = 0;
4397
4398 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4399 rp->features[idx].flags = cpu_to_le32(flags);
4400 idx++;
4401 }
4402
4403 if (hdev && hdev->get_data_path_id) {
4404 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4405 flags = BIT(0);
4406 else
4407 flags = 0;
4408
4409 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4410 rp->features[idx].flags = cpu_to_le32(flags);
4411 idx++;
4412 }
4413
4414 if (IS_ENABLED(CONFIG_BT_LE)) {
4415 flags = iso_enabled() ? BIT(0) : 0;
4416 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4417 rp->features[idx].flags = cpu_to_le32(flags);
4418 idx++;
4419 }
4420
4421 if (hdev && lmp_le_capable(hdev)) {
4422 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4423 flags = BIT(0);
4424 else
4425 flags = 0;
4426
4427 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4428 rp->features[idx].flags = cpu_to_le32(flags);
4429 idx++;
4430 }
4431
4432 rp->feature_count = cpu_to_le16(idx);
4433
4434 /* After reading the experimental features information, enable
4435 * the events to update client on any future change.
4436 */
4437 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4438
4439 status = mgmt_cmd_complete(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
4440 MGMT_OP_READ_EXP_FEATURES_INFO,
4441 status: 0, rp, rp_len: sizeof(*rp) + (20 * idx));
4442
4443 kfree(objp: rp);
4444 return status;
4445}
4446
4447static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4448 struct sock *skip)
4449{
4450 struct mgmt_ev_exp_feature_changed ev;
4451
4452 memset(&ev, 0, sizeof(ev));
4453 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4454 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4455
4456 // Do we need to be atomic with the conn_flags?
4457 if (enabled && privacy_mode_capable(hdev))
4458 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4459 else
4460 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4461
4462 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4463 data: &ev, len: sizeof(ev),
4464 flag: HCI_MGMT_EXP_FEATURE_EVENTS, skip_sk: skip);
4465
4466}
4467
4468static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4469 bool enabled, struct sock *skip)
4470{
4471 struct mgmt_ev_exp_feature_changed ev;
4472
4473 memset(&ev, 0, sizeof(ev));
4474 memcpy(ev.uuid, uuid, 16);
4475 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4476
4477 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4478 data: &ev, len: sizeof(ev),
4479 flag: HCI_MGMT_EXP_FEATURE_EVENTS, skip_sk: skip);
4480}
4481
4482#define EXP_FEAT(_uuid, _set_func) \
4483{ \
4484 .uuid = _uuid, \
4485 .set_func = _set_func, \
4486}
4487
4488/* The zero key uuid is special. Multiple exp features are set through it. */
4489static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4490 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4491{
4492 struct mgmt_rp_set_exp_feature rp;
4493
4494 memset(rp.uuid, 0, 16);
4495 rp.flags = cpu_to_le32(0);
4496
4497#ifdef CONFIG_BT_FEATURE_DEBUG
4498 if (!hdev) {
4499 bool changed = bt_dbg_get();
4500
4501 bt_dbg_set(false);
4502
4503 if (changed)
4504 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4505 }
4506#endif
4507
4508 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4509 bool changed;
4510
4511 changed = hci_dev_test_and_clear_flag(hdev,
4512 HCI_ENABLE_LL_PRIVACY);
4513 if (changed)
4514 exp_feature_changed(hdev, uuid: rpa_resolution_uuid, enabled: false,
4515 skip: sk);
4516 }
4517
4518 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4519
4520 return mgmt_cmd_complete(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
4521 MGMT_OP_SET_EXP_FEATURE, status: 0,
4522 rp: &rp, rp_len: sizeof(rp));
4523}
4524
4525#ifdef CONFIG_BT_FEATURE_DEBUG
4526static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4527 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4528{
4529 struct mgmt_rp_set_exp_feature rp;
4530
4531 bool val, changed;
4532 int err;
4533
4534 /* Command requires to use the non-controller index */
4535 if (hdev)
4536 return mgmt_cmd_status(sk, hdev->id,
4537 MGMT_OP_SET_EXP_FEATURE,
4538 MGMT_STATUS_INVALID_INDEX);
4539
4540 /* Parameters are limited to a single octet */
4541 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4542 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4543 MGMT_OP_SET_EXP_FEATURE,
4544 MGMT_STATUS_INVALID_PARAMS);
4545
4546 /* Only boolean on/off is supported */
4547 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4548 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4549 MGMT_OP_SET_EXP_FEATURE,
4550 MGMT_STATUS_INVALID_PARAMS);
4551
4552 val = !!cp->param[0];
4553 changed = val ? !bt_dbg_get() : bt_dbg_get();
4554 bt_dbg_set(val);
4555
4556 memcpy(rp.uuid, debug_uuid, 16);
4557 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4558
4559 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4560
4561 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4562 MGMT_OP_SET_EXP_FEATURE, 0,
4563 &rp, sizeof(rp));
4564
4565 if (changed)
4566 exp_feature_changed(hdev, debug_uuid, val, sk);
4567
4568 return err;
4569}
4570#endif
4571
4572static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4573 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4574{
4575 struct mgmt_rp_set_exp_feature rp;
4576 bool val, changed;
4577 int err;
4578
4579 /* Command requires to use the controller index */
4580 if (!hdev)
4581 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4582 MGMT_OP_SET_EXP_FEATURE,
4583 MGMT_STATUS_INVALID_INDEX);
4584
4585 /* Parameters are limited to a single octet */
4586 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4587 return mgmt_cmd_status(sk, index: hdev->id,
4588 MGMT_OP_SET_EXP_FEATURE,
4589 MGMT_STATUS_INVALID_PARAMS);
4590
4591 /* Only boolean on/off is supported */
4592 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4593 return mgmt_cmd_status(sk, index: hdev->id,
4594 MGMT_OP_SET_EXP_FEATURE,
4595 MGMT_STATUS_INVALID_PARAMS);
4596
4597 val = !!cp->param[0];
4598
4599 if (val) {
4600 changed = !hci_dev_test_and_set_flag(hdev,
4601 HCI_MESH_EXPERIMENTAL);
4602 } else {
4603 hci_dev_clear_flag(hdev, HCI_MESH);
4604 changed = hci_dev_test_and_clear_flag(hdev,
4605 HCI_MESH_EXPERIMENTAL);
4606 }
4607
4608 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4609 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4610
4611 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4612
4613 err = mgmt_cmd_complete(sk, index: hdev->id,
4614 MGMT_OP_SET_EXP_FEATURE, status: 0,
4615 rp: &rp, rp_len: sizeof(rp));
4616
4617 if (changed)
4618 exp_feature_changed(hdev, uuid: mgmt_mesh_uuid, enabled: val, skip: sk);
4619
4620 return err;
4621}
4622
4623static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4624 struct mgmt_cp_set_exp_feature *cp,
4625 u16 data_len)
4626{
4627 struct mgmt_rp_set_exp_feature rp;
4628 bool val, changed;
4629 int err;
4630 u32 flags;
4631
4632 /* Command requires to use the controller index */
4633 if (!hdev)
4634 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4635 MGMT_OP_SET_EXP_FEATURE,
4636 MGMT_STATUS_INVALID_INDEX);
4637
4638 /* Changes can only be made when controller is powered down */
4639 if (hdev_is_powered(hdev))
4640 return mgmt_cmd_status(sk, index: hdev->id,
4641 MGMT_OP_SET_EXP_FEATURE,
4642 MGMT_STATUS_REJECTED);
4643
4644 /* Parameters are limited to a single octet */
4645 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4646 return mgmt_cmd_status(sk, index: hdev->id,
4647 MGMT_OP_SET_EXP_FEATURE,
4648 MGMT_STATUS_INVALID_PARAMS);
4649
4650 /* Only boolean on/off is supported */
4651 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4652 return mgmt_cmd_status(sk, index: hdev->id,
4653 MGMT_OP_SET_EXP_FEATURE,
4654 MGMT_STATUS_INVALID_PARAMS);
4655
4656 val = !!cp->param[0];
4657
4658 if (val) {
4659 changed = !hci_dev_test_and_set_flag(hdev,
4660 HCI_ENABLE_LL_PRIVACY);
4661 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4662
4663 /* Enable LL privacy + supported settings changed */
4664 flags = BIT(0) | BIT(1);
4665 } else {
4666 changed = hci_dev_test_and_clear_flag(hdev,
4667 HCI_ENABLE_LL_PRIVACY);
4668
4669 /* Disable LL privacy + supported settings changed */
4670 flags = BIT(1);
4671 }
4672
4673 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4674 rp.flags = cpu_to_le32(flags);
4675
4676 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4677
4678 err = mgmt_cmd_complete(sk, index: hdev->id,
4679 MGMT_OP_SET_EXP_FEATURE, status: 0,
4680 rp: &rp, rp_len: sizeof(rp));
4681
4682 if (changed)
4683 exp_ll_privacy_feature_changed(enabled: val, hdev, skip: sk);
4684
4685 return err;
4686}
4687
4688static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4689 struct mgmt_cp_set_exp_feature *cp,
4690 u16 data_len)
4691{
4692 struct mgmt_rp_set_exp_feature rp;
4693 bool val, changed;
4694 int err;
4695
4696 /* Command requires to use a valid controller index */
4697 if (!hdev)
4698 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4699 MGMT_OP_SET_EXP_FEATURE,
4700 MGMT_STATUS_INVALID_INDEX);
4701
4702 /* Parameters are limited to a single octet */
4703 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4704 return mgmt_cmd_status(sk, index: hdev->id,
4705 MGMT_OP_SET_EXP_FEATURE,
4706 MGMT_STATUS_INVALID_PARAMS);
4707
4708 /* Only boolean on/off is supported */
4709 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4710 return mgmt_cmd_status(sk, index: hdev->id,
4711 MGMT_OP_SET_EXP_FEATURE,
4712 MGMT_STATUS_INVALID_PARAMS);
4713
4714 hci_req_sync_lock(hdev);
4715
4716 val = !!cp->param[0];
4717 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4718
4719 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4720 err = mgmt_cmd_status(sk, index: hdev->id,
4721 MGMT_OP_SET_EXP_FEATURE,
4722 MGMT_STATUS_NOT_SUPPORTED);
4723 goto unlock_quality_report;
4724 }
4725
4726 if (changed) {
4727 if (hdev->set_quality_report)
4728 err = hdev->set_quality_report(hdev, val);
4729 else
4730 err = aosp_set_quality_report(hdev, enable: val);
4731
4732 if (err) {
4733 err = mgmt_cmd_status(sk, index: hdev->id,
4734 MGMT_OP_SET_EXP_FEATURE,
4735 MGMT_STATUS_FAILED);
4736 goto unlock_quality_report;
4737 }
4738
4739 if (val)
4740 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4741 else
4742 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4743 }
4744
4745 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4746
4747 memcpy(rp.uuid, quality_report_uuid, 16);
4748 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4749 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4750
4751 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_EXP_FEATURE, status: 0,
4752 rp: &rp, rp_len: sizeof(rp));
4753
4754 if (changed)
4755 exp_feature_changed(hdev, uuid: quality_report_uuid, enabled: val, skip: sk);
4756
4757unlock_quality_report:
4758 hci_req_sync_unlock(hdev);
4759 return err;
4760}
4761
4762static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4763 struct mgmt_cp_set_exp_feature *cp,
4764 u16 data_len)
4765{
4766 bool val, changed;
4767 int err;
4768 struct mgmt_rp_set_exp_feature rp;
4769
4770 /* Command requires to use a valid controller index */
4771 if (!hdev)
4772 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4773 MGMT_OP_SET_EXP_FEATURE,
4774 MGMT_STATUS_INVALID_INDEX);
4775
4776 /* Parameters are limited to a single octet */
4777 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4778 return mgmt_cmd_status(sk, index: hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_INVALID_PARAMS);
4781
4782 /* Only boolean on/off is supported */
4783 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4784 return mgmt_cmd_status(sk, index: hdev->id,
4785 MGMT_OP_SET_EXP_FEATURE,
4786 MGMT_STATUS_INVALID_PARAMS);
4787
4788 val = !!cp->param[0];
4789 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4790
4791 if (!hdev->get_data_path_id) {
4792 return mgmt_cmd_status(sk, index: hdev->id,
4793 MGMT_OP_SET_EXP_FEATURE,
4794 MGMT_STATUS_NOT_SUPPORTED);
4795 }
4796
4797 if (changed) {
4798 if (val)
4799 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4800 else
4801 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4802 }
4803
4804 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4805 val, changed);
4806
4807 memcpy(rp.uuid, offload_codecs_uuid, 16);
4808 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4809 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4810 err = mgmt_cmd_complete(sk, index: hdev->id,
4811 MGMT_OP_SET_EXP_FEATURE, status: 0,
4812 rp: &rp, rp_len: sizeof(rp));
4813
4814 if (changed)
4815 exp_feature_changed(hdev, uuid: offload_codecs_uuid, enabled: val, skip: sk);
4816
4817 return err;
4818}
4819
4820static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4821 struct mgmt_cp_set_exp_feature *cp,
4822 u16 data_len)
4823{
4824 bool val, changed;
4825 int err;
4826 struct mgmt_rp_set_exp_feature rp;
4827
4828 /* Command requires to use a valid controller index */
4829 if (!hdev)
4830 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4831 MGMT_OP_SET_EXP_FEATURE,
4832 MGMT_STATUS_INVALID_INDEX);
4833
4834 /* Parameters are limited to a single octet */
4835 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4836 return mgmt_cmd_status(sk, index: hdev->id,
4837 MGMT_OP_SET_EXP_FEATURE,
4838 MGMT_STATUS_INVALID_PARAMS);
4839
4840 /* Only boolean on/off is supported */
4841 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4842 return mgmt_cmd_status(sk, index: hdev->id,
4843 MGMT_OP_SET_EXP_FEATURE,
4844 MGMT_STATUS_INVALID_PARAMS);
4845
4846 val = !!cp->param[0];
4847 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4848
4849 if (!hci_dev_le_state_simultaneous(hdev)) {
4850 return mgmt_cmd_status(sk, index: hdev->id,
4851 MGMT_OP_SET_EXP_FEATURE,
4852 MGMT_STATUS_NOT_SUPPORTED);
4853 }
4854
4855 if (changed) {
4856 if (val)
4857 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4858 else
4859 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4860 }
4861
4862 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4863 val, changed);
4864
4865 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4866 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4867 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4868 err = mgmt_cmd_complete(sk, index: hdev->id,
4869 MGMT_OP_SET_EXP_FEATURE, status: 0,
4870 rp: &rp, rp_len: sizeof(rp));
4871
4872 if (changed)
4873 exp_feature_changed(hdev, uuid: le_simultaneous_roles_uuid, enabled: val, skip: sk);
4874
4875 return err;
4876}
4877
4878#ifdef CONFIG_BT_LE
4879static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4880 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4881{
4882 struct mgmt_rp_set_exp_feature rp;
4883 bool val, changed = false;
4884 int err;
4885
4886 /* Command requires to use the non-controller index */
4887 if (hdev)
4888 return mgmt_cmd_status(sk, index: hdev->id,
4889 MGMT_OP_SET_EXP_FEATURE,
4890 MGMT_STATUS_INVALID_INDEX);
4891
4892 /* Parameters are limited to a single octet */
4893 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4894 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4895 MGMT_OP_SET_EXP_FEATURE,
4896 MGMT_STATUS_INVALID_PARAMS);
4897
4898 /* Only boolean on/off is supported */
4899 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4900 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4901 MGMT_OP_SET_EXP_FEATURE,
4902 MGMT_STATUS_INVALID_PARAMS);
4903
4904 val = cp->param[0] ? true : false;
4905 if (val)
4906 err = iso_init();
4907 else
4908 err = iso_exit();
4909
4910 if (!err)
4911 changed = true;
4912
4913 memcpy(rp.uuid, iso_socket_uuid, 16);
4914 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4915
4916 hci_sock_set_flag(sk, nr: HCI_MGMT_EXP_FEATURE_EVENTS);
4917
4918 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4919 MGMT_OP_SET_EXP_FEATURE, status: 0,
4920 rp: &rp, rp_len: sizeof(rp));
4921
4922 if (changed)
4923 exp_feature_changed(hdev, uuid: iso_socket_uuid, enabled: val, skip: sk);
4924
4925 return err;
4926}
4927#endif
4928
4929static const struct mgmt_exp_feature {
4930 const u8 *uuid;
4931 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4932 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4933} exp_features[] = {
4934 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4935#ifdef CONFIG_BT_FEATURE_DEBUG
4936 EXP_FEAT(debug_uuid, set_debug_func),
4937#endif
4938 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4939 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4940 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4941 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4942 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4943#ifdef CONFIG_BT_LE
4944 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4945#endif
4946
4947 /* end with a null feature */
4948 EXP_FEAT(NULL, NULL)
4949};
4950
4951static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4952 void *data, u16 data_len)
4953{
4954 struct mgmt_cp_set_exp_feature *cp = data;
4955 size_t i = 0;
4956
4957 bt_dev_dbg(hdev, "sock %p", sk);
4958
4959 for (i = 0; exp_features[i].uuid; i++) {
4960 if (!memcmp(p: cp->uuid, q: exp_features[i].uuid, size: 16))
4961 return exp_features[i].set_func(sk, hdev, cp, data_len);
4962 }
4963
4964 return mgmt_cmd_status(sk, index: hdev ? hdev->id : MGMT_INDEX_NONE,
4965 MGMT_OP_SET_EXP_FEATURE,
4966 MGMT_STATUS_NOT_SUPPORTED);
4967}
4968
4969static u32 get_params_flags(struct hci_dev *hdev,
4970 struct hci_conn_params *params)
4971{
4972 u32 flags = hdev->conn_flags;
4973
4974 /* Devices using RPAs can only be programmed in the acceptlist if
4975 * LL Privacy has been enable otherwise they cannot mark
4976 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4977 */
4978 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4979 hci_find_irk_by_addr(hdev, bdaddr: &params->addr, addr_type: params->addr_type))
4980 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4981
4982 return flags;
4983}
4984
4985static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4986 u16 data_len)
4987{
4988 struct mgmt_cp_get_device_flags *cp = data;
4989 struct mgmt_rp_get_device_flags rp;
4990 struct bdaddr_list_with_flags *br_params;
4991 struct hci_conn_params *params;
4992 u32 supported_flags;
4993 u32 current_flags = 0;
4994 u8 status = MGMT_STATUS_INVALID_PARAMS;
4995
4996 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4997 &cp->addr.bdaddr, cp->addr.type);
4998
4999 hci_dev_lock(hdev);
5000
5001 supported_flags = hdev->conn_flags;
5002
5003 memset(&rp, 0, sizeof(rp));
5004
5005 if (cp->addr.type == BDADDR_BREDR) {
5006 br_params = hci_bdaddr_list_lookup_with_flags(list: &hdev->accept_list,
5007 bdaddr: &cp->addr.bdaddr,
5008 type: cp->addr.type);
5009 if (!br_params)
5010 goto done;
5011
5012 current_flags = br_params->flags;
5013 } else {
5014 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
5015 addr_type: le_addr_type(mgmt_addr_type: cp->addr.type));
5016 if (!params)
5017 goto done;
5018
5019 supported_flags = get_params_flags(hdev, params);
5020 current_flags = params->flags;
5021 }
5022
5023 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
5024 rp.addr.type = cp->addr.type;
5025 rp.supported_flags = cpu_to_le32(supported_flags);
5026 rp.current_flags = cpu_to_le32(current_flags);
5027
5028 status = MGMT_STATUS_SUCCESS;
5029
5030done:
5031 hci_dev_unlock(hdev);
5032
5033 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5034 rp: &rp, rp_len: sizeof(rp));
5035}
5036
5037static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5038 bdaddr_t *bdaddr, u8 bdaddr_type,
5039 u32 supported_flags, u32 current_flags)
5040{
5041 struct mgmt_ev_device_flags_changed ev;
5042
5043 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
5044 ev.addr.type = bdaddr_type;
5045 ev.supported_flags = cpu_to_le32(supported_flags);
5046 ev.current_flags = cpu_to_le32(current_flags);
5047
5048 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
5049}
5050
5051static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5052 u16 len)
5053{
5054 struct mgmt_cp_set_device_flags *cp = data;
5055 struct bdaddr_list_with_flags *br_params;
5056 struct hci_conn_params *params;
5057 u8 status = MGMT_STATUS_INVALID_PARAMS;
5058 u32 supported_flags;
5059 u32 current_flags = __le32_to_cpu(cp->current_flags);
5060
5061 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5062 &cp->addr.bdaddr, cp->addr.type, current_flags);
5063
5064 // We should take hci_dev_lock() early, I think.. conn_flags can change
5065 supported_flags = hdev->conn_flags;
5066
5067 if ((supported_flags | current_flags) != supported_flags) {
5068 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5069 current_flags, supported_flags);
5070 goto done;
5071 }
5072
5073 hci_dev_lock(hdev);
5074
5075 if (cp->addr.type == BDADDR_BREDR) {
5076 br_params = hci_bdaddr_list_lookup_with_flags(list: &hdev->accept_list,
5077 bdaddr: &cp->addr.bdaddr,
5078 type: cp->addr.type);
5079
5080 if (br_params) {
5081 br_params->flags = current_flags;
5082 status = MGMT_STATUS_SUCCESS;
5083 } else {
5084 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5085 &cp->addr.bdaddr, cp->addr.type);
5086 }
5087
5088 goto unlock;
5089 }
5090
5091 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
5092 addr_type: le_addr_type(mgmt_addr_type: cp->addr.type));
5093 if (!params) {
5094 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5095 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5096 goto unlock;
5097 }
5098
5099 supported_flags = get_params_flags(hdev, params);
5100
5101 if ((supported_flags | current_flags) != supported_flags) {
5102 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5103 current_flags, supported_flags);
5104 goto unlock;
5105 }
5106
5107 WRITE_ONCE(params->flags, current_flags);
5108 status = MGMT_STATUS_SUCCESS;
5109
5110 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5111 * has been set.
5112 */
5113 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5114 hci_update_passive_scan(hdev);
5115
5116unlock:
5117 hci_dev_unlock(hdev);
5118
5119done:
5120 if (status == MGMT_STATUS_SUCCESS)
5121 device_flags_changed(sk, hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type,
5122 supported_flags, current_flags);
5123
5124 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5125 rp: &cp->addr, rp_len: sizeof(cp->addr));
5126}
5127
5128static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5129 u16 handle)
5130{
5131 struct mgmt_ev_adv_monitor_added ev;
5132
5133 ev.monitor_handle = cpu_to_le16(handle);
5134
5135 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
5136}
5137
5138void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5139{
5140 struct mgmt_ev_adv_monitor_removed ev;
5141 struct mgmt_pending_cmd *cmd;
5142 struct sock *sk_skip = NULL;
5143 struct mgmt_cp_remove_adv_monitor *cp;
5144
5145 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5146 if (cmd) {
5147 cp = cmd->param;
5148
5149 if (cp->monitor_handle)
5150 sk_skip = cmd->sk;
5151 }
5152
5153 ev.monitor_handle = cpu_to_le16(handle);
5154
5155 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk_skip);
5156}
5157
5158static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5159 void *data, u16 len)
5160{
5161 struct adv_monitor *monitor = NULL;
5162 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5163 int handle, err;
5164 size_t rp_size = 0;
5165 __u32 supported = 0;
5166 __u32 enabled = 0;
5167 __u16 num_handles = 0;
5168 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5169
5170 BT_DBG("request for %s", hdev->name);
5171
5172 hci_dev_lock(hdev);
5173
5174 if (msft_monitor_supported(hdev))
5175 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5176
5177 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5178 handles[num_handles++] = monitor->handle;
5179
5180 hci_dev_unlock(hdev);
5181
5182 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5183 rp = kmalloc(size: rp_size, GFP_KERNEL);
5184 if (!rp)
5185 return -ENOMEM;
5186
5187 /* All supported features are currently enabled */
5188 enabled = supported;
5189
5190 rp->supported_features = cpu_to_le32(supported);
5191 rp->enabled_features = cpu_to_le32(enabled);
5192 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5193 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5194 rp->num_handles = cpu_to_le16(num_handles);
5195 if (num_handles)
5196 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5197
5198 err = mgmt_cmd_complete(sk, index: hdev->id,
5199 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5200 MGMT_STATUS_SUCCESS, rp, rp_len: rp_size);
5201
5202 kfree(objp: rp);
5203
5204 return err;
5205}
5206
5207static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5208 void *data, int status)
5209{
5210 struct mgmt_rp_add_adv_patterns_monitor rp;
5211 struct mgmt_pending_cmd *cmd = data;
5212 struct adv_monitor *monitor = cmd->user_data;
5213
5214 hci_dev_lock(hdev);
5215
5216 rp.monitor_handle = cpu_to_le16(monitor->handle);
5217
5218 if (!status) {
5219 mgmt_adv_monitor_added(sk: cmd->sk, hdev, handle: monitor->handle);
5220 hdev->adv_monitors_cnt++;
5221 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5222 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5223 hci_update_passive_scan(hdev);
5224 }
5225
5226 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
5227 status: mgmt_status(err: status), rp: &rp, rp_len: sizeof(rp));
5228 mgmt_pending_remove(cmd);
5229
5230 hci_dev_unlock(hdev);
5231 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5232 rp.monitor_handle, status);
5233}
5234
5235static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5236{
5237 struct mgmt_pending_cmd *cmd = data;
5238 struct adv_monitor *monitor = cmd->user_data;
5239
5240 return hci_add_adv_monitor(hdev, monitor);
5241}
5242
5243static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5244 struct adv_monitor *m, u8 status,
5245 void *data, u16 len, u16 op)
5246{
5247 struct mgmt_pending_cmd *cmd;
5248 int err;
5249
5250 hci_dev_lock(hdev);
5251
5252 if (status)
5253 goto unlock;
5254
5255 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5256 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5257 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5258 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5259 status = MGMT_STATUS_BUSY;
5260 goto unlock;
5261 }
5262
5263 cmd = mgmt_pending_add(sk, opcode: op, hdev, data, len);
5264 if (!cmd) {
5265 status = MGMT_STATUS_NO_RESOURCES;
5266 goto unlock;
5267 }
5268
5269 cmd->user_data = m;
5270 err = hci_cmd_sync_queue(hdev, func: mgmt_add_adv_patterns_monitor_sync, data: cmd,
5271 destroy: mgmt_add_adv_patterns_monitor_complete);
5272 if (err) {
5273 if (err == -ENOMEM)
5274 status = MGMT_STATUS_NO_RESOURCES;
5275 else
5276 status = MGMT_STATUS_FAILED;
5277
5278 goto unlock;
5279 }
5280
5281 hci_dev_unlock(hdev);
5282
5283 return 0;
5284
5285unlock:
5286 hci_free_adv_monitor(hdev, monitor: m);
5287 hci_dev_unlock(hdev);
5288 return mgmt_cmd_status(sk, index: hdev->id, cmd: op, status);
5289}
5290
5291static void parse_adv_monitor_rssi(struct adv_monitor *m,
5292 struct mgmt_adv_rssi_thresholds *rssi)
5293{
5294 if (rssi) {
5295 m->rssi.low_threshold = rssi->low_threshold;
5296 m->rssi.low_threshold_timeout =
5297 __le16_to_cpu(rssi->low_threshold_timeout);
5298 m->rssi.high_threshold = rssi->high_threshold;
5299 m->rssi.high_threshold_timeout =
5300 __le16_to_cpu(rssi->high_threshold_timeout);
5301 m->rssi.sampling_period = rssi->sampling_period;
5302 } else {
5303 /* Default values. These numbers are the least constricting
5304 * parameters for MSFT API to work, so it behaves as if there
5305 * are no rssi parameter to consider. May need to be changed
5306 * if other API are to be supported.
5307 */
5308 m->rssi.low_threshold = -127;
5309 m->rssi.low_threshold_timeout = 60;
5310 m->rssi.high_threshold = -127;
5311 m->rssi.high_threshold_timeout = 0;
5312 m->rssi.sampling_period = 0;
5313 }
5314}
5315
5316static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5317 struct mgmt_adv_pattern *patterns)
5318{
5319 u8 offset = 0, length = 0;
5320 struct adv_pattern *p = NULL;
5321 int i;
5322
5323 for (i = 0; i < pattern_count; i++) {
5324 offset = patterns[i].offset;
5325 length = patterns[i].length;
5326 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5327 length > HCI_MAX_EXT_AD_LENGTH ||
5328 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5329 return MGMT_STATUS_INVALID_PARAMS;
5330
5331 p = kmalloc(size: sizeof(*p), GFP_KERNEL);
5332 if (!p)
5333 return MGMT_STATUS_NO_RESOURCES;
5334
5335 p->ad_type = patterns[i].ad_type;
5336 p->offset = patterns[i].offset;
5337 p->length = patterns[i].length;
5338 memcpy(p->value, patterns[i].value, p->length);
5339
5340 INIT_LIST_HEAD(list: &p->list);
5341 list_add(new: &p->list, head: &m->patterns);
5342 }
5343
5344 return MGMT_STATUS_SUCCESS;
5345}
5346
5347static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5348 void *data, u16 len)
5349{
5350 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5351 struct adv_monitor *m = NULL;
5352 u8 status = MGMT_STATUS_SUCCESS;
5353 size_t expected_size = sizeof(*cp);
5354
5355 BT_DBG("request for %s", hdev->name);
5356
5357 if (len <= sizeof(*cp)) {
5358 status = MGMT_STATUS_INVALID_PARAMS;
5359 goto done;
5360 }
5361
5362 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5363 if (len != expected_size) {
5364 status = MGMT_STATUS_INVALID_PARAMS;
5365 goto done;
5366 }
5367
5368 m = kzalloc(size: sizeof(*m), GFP_KERNEL);
5369 if (!m) {
5370 status = MGMT_STATUS_NO_RESOURCES;
5371 goto done;
5372 }
5373
5374 INIT_LIST_HEAD(list: &m->patterns);
5375
5376 parse_adv_monitor_rssi(m, NULL);
5377 status = parse_adv_monitor_pattern(m, pattern_count: cp->pattern_count, patterns: cp->patterns);
5378
5379done:
5380 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5381 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5382}
5383
5384static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5385 void *data, u16 len)
5386{
5387 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5388 struct adv_monitor *m = NULL;
5389 u8 status = MGMT_STATUS_SUCCESS;
5390 size_t expected_size = sizeof(*cp);
5391
5392 BT_DBG("request for %s", hdev->name);
5393
5394 if (len <= sizeof(*cp)) {
5395 status = MGMT_STATUS_INVALID_PARAMS;
5396 goto done;
5397 }
5398
5399 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5400 if (len != expected_size) {
5401 status = MGMT_STATUS_INVALID_PARAMS;
5402 goto done;
5403 }
5404
5405 m = kzalloc(size: sizeof(*m), GFP_KERNEL);
5406 if (!m) {
5407 status = MGMT_STATUS_NO_RESOURCES;
5408 goto done;
5409 }
5410
5411 INIT_LIST_HEAD(list: &m->patterns);
5412
5413 parse_adv_monitor_rssi(m, rssi: &cp->rssi);
5414 status = parse_adv_monitor_pattern(m, pattern_count: cp->pattern_count, patterns: cp->patterns);
5415
5416done:
5417 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5418 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5419}
5420
5421static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5422 void *data, int status)
5423{
5424 struct mgmt_rp_remove_adv_monitor rp;
5425 struct mgmt_pending_cmd *cmd = data;
5426 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5427
5428 hci_dev_lock(hdev);
5429
5430 rp.monitor_handle = cp->monitor_handle;
5431
5432 if (!status)
5433 hci_update_passive_scan(hdev);
5434
5435 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
5436 status: mgmt_status(err: status), rp: &rp, rp_len: sizeof(rp));
5437 mgmt_pending_remove(cmd);
5438
5439 hci_dev_unlock(hdev);
5440 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5441 rp.monitor_handle, status);
5442}
5443
5444static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5445{
5446 struct mgmt_pending_cmd *cmd = data;
5447 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5448 u16 handle = __le16_to_cpu(cp->monitor_handle);
5449
5450 if (!handle)
5451 return hci_remove_all_adv_monitor(hdev);
5452
5453 return hci_remove_single_adv_monitor(hdev, handle);
5454}
5455
5456static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5457 void *data, u16 len)
5458{
5459 struct mgmt_pending_cmd *cmd;
5460 int err, status;
5461
5462 hci_dev_lock(hdev);
5463
5464 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5465 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5466 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5467 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5468 status = MGMT_STATUS_BUSY;
5469 goto unlock;
5470 }
5471
5472 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5473 if (!cmd) {
5474 status = MGMT_STATUS_NO_RESOURCES;
5475 goto unlock;
5476 }
5477
5478 err = hci_cmd_sync_queue(hdev, func: mgmt_remove_adv_monitor_sync, data: cmd,
5479 destroy: mgmt_remove_adv_monitor_complete);
5480
5481 if (err) {
5482 mgmt_pending_remove(cmd);
5483
5484 if (err == -ENOMEM)
5485 status = MGMT_STATUS_NO_RESOURCES;
5486 else
5487 status = MGMT_STATUS_FAILED;
5488
5489 goto unlock;
5490 }
5491
5492 hci_dev_unlock(hdev);
5493
5494 return 0;
5495
5496unlock:
5497 hci_dev_unlock(hdev);
5498 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5499 status);
5500}
5501
5502static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5503{
5504 struct mgmt_rp_read_local_oob_data mgmt_rp;
5505 size_t rp_size = sizeof(mgmt_rp);
5506 struct mgmt_pending_cmd *cmd = data;
5507 struct sk_buff *skb = cmd->skb;
5508 u8 status = mgmt_status(err);
5509
5510 if (!status) {
5511 if (!skb)
5512 status = MGMT_STATUS_FAILED;
5513 else if (IS_ERR(ptr: skb))
5514 status = mgmt_status(err: PTR_ERR(ptr: skb));
5515 else
5516 status = mgmt_status(err: skb->data[0]);
5517 }
5518
5519 bt_dev_dbg(hdev, "status %d", status);
5520
5521 if (status) {
5522 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5523 goto remove;
5524 }
5525
5526 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5527
5528 if (!bredr_sc_enabled(hdev)) {
5529 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5530
5531 if (skb->len < sizeof(*rp)) {
5532 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
5533 MGMT_OP_READ_LOCAL_OOB_DATA,
5534 MGMT_STATUS_FAILED);
5535 goto remove;
5536 }
5537
5538 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5539 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5540
5541 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5542 } else {
5543 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5544
5545 if (skb->len < sizeof(*rp)) {
5546 mgmt_cmd_status(sk: cmd->sk, index: hdev->id,
5547 MGMT_OP_READ_LOCAL_OOB_DATA,
5548 MGMT_STATUS_FAILED);
5549 goto remove;
5550 }
5551
5552 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5553 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5554
5555 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5556 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5557 }
5558
5559 mgmt_cmd_complete(sk: cmd->sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5560 MGMT_STATUS_SUCCESS, rp: &mgmt_rp, rp_len: rp_size);
5561
5562remove:
5563 if (skb && !IS_ERR(ptr: skb))
5564 kfree_skb(skb);
5565
5566 mgmt_pending_free(cmd);
5567}
5568
5569static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5570{
5571 struct mgmt_pending_cmd *cmd = data;
5572
5573 if (bredr_sc_enabled(hdev))
5574 cmd->skb = hci_read_local_oob_data_sync(hdev, ext: true, sk: cmd->sk);
5575 else
5576 cmd->skb = hci_read_local_oob_data_sync(hdev, ext: false, sk: cmd->sk);
5577
5578 if (IS_ERR(ptr: cmd->skb))
5579 return PTR_ERR(ptr: cmd->skb);
5580 else
5581 return 0;
5582}
5583
5584static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5585 void *data, u16 data_len)
5586{
5587 struct mgmt_pending_cmd *cmd;
5588 int err;
5589
5590 bt_dev_dbg(hdev, "sock %p", sk);
5591
5592 hci_dev_lock(hdev);
5593
5594 if (!hdev_is_powered(hdev)) {
5595 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5596 MGMT_STATUS_NOT_POWERED);
5597 goto unlock;
5598 }
5599
5600 if (!lmp_ssp_capable(hdev)) {
5601 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5602 MGMT_STATUS_NOT_SUPPORTED);
5603 goto unlock;
5604 }
5605
5606 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, len: 0);
5607 if (!cmd)
5608 err = -ENOMEM;
5609 else
5610 err = hci_cmd_sync_queue(hdev, func: read_local_oob_data_sync, data: cmd,
5611 destroy: read_local_oob_data_complete);
5612
5613 if (err < 0) {
5614 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5615 MGMT_STATUS_FAILED);
5616
5617 if (cmd)
5618 mgmt_pending_free(cmd);
5619 }
5620
5621unlock:
5622 hci_dev_unlock(hdev);
5623 return err;
5624}
5625
5626static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5627 void *data, u16 len)
5628{
5629 struct mgmt_addr_info *addr = data;
5630 int err;
5631
5632 bt_dev_dbg(hdev, "sock %p", sk);
5633
5634 if (!bdaddr_type_is_valid(type: addr->type))
5635 return mgmt_cmd_complete(sk, index: hdev->id,
5636 MGMT_OP_ADD_REMOTE_OOB_DATA,
5637 MGMT_STATUS_INVALID_PARAMS,
5638 rp: addr, rp_len: sizeof(*addr));
5639
5640 hci_dev_lock(hdev);
5641
5642 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5643 struct mgmt_cp_add_remote_oob_data *cp = data;
5644 u8 status;
5645
5646 if (cp->addr.type != BDADDR_BREDR) {
5647 err = mgmt_cmd_complete(sk, index: hdev->id,
5648 MGMT_OP_ADD_REMOTE_OOB_DATA,
5649 MGMT_STATUS_INVALID_PARAMS,
5650 rp: &cp->addr, rp_len: sizeof(cp->addr));
5651 goto unlock;
5652 }
5653
5654 err = hci_add_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr,
5655 bdaddr_type: cp->addr.type, hash192: cp->hash,
5656 rand192: cp->rand, NULL, NULL);
5657 if (err < 0)
5658 status = MGMT_STATUS_FAILED;
5659 else
5660 status = MGMT_STATUS_SUCCESS;
5661
5662 err = mgmt_cmd_complete(sk, index: hdev->id,
5663 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5664 rp: &cp->addr, rp_len: sizeof(cp->addr));
5665 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5666 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5667 u8 *rand192, *hash192, *rand256, *hash256;
5668 u8 status;
5669
5670 if (bdaddr_type_is_le(type: cp->addr.type)) {
5671 /* Enforce zero-valued 192-bit parameters as
5672 * long as legacy SMP OOB isn't implemented.
5673 */
5674 if (memcmp(p: cp->rand192, ZERO_KEY, size: 16) ||
5675 memcmp(p: cp->hash192, ZERO_KEY, size: 16)) {
5676 err = mgmt_cmd_complete(sk, index: hdev->id,
5677 MGMT_OP_ADD_REMOTE_OOB_DATA,
5678 MGMT_STATUS_INVALID_PARAMS,
5679 rp: addr, rp_len: sizeof(*addr));
5680 goto unlock;
5681 }
5682
5683 rand192 = NULL;
5684 hash192 = NULL;
5685 } else {
5686 /* In case one of the P-192 values is set to zero,
5687 * then just disable OOB data for P-192.
5688 */
5689 if (!memcmp(p: cp->rand192, ZERO_KEY, size: 16) ||
5690 !memcmp(p: cp->hash192, ZERO_KEY, size: 16)) {
5691 rand192 = NULL;
5692 hash192 = NULL;
5693 } else {
5694 rand192 = cp->rand192;
5695 hash192 = cp->hash192;
5696 }
5697 }
5698
5699 /* In case one of the P-256 values is set to zero, then just
5700 * disable OOB data for P-256.
5701 */
5702 if (!memcmp(p: cp->rand256, ZERO_KEY, size: 16) ||
5703 !memcmp(p: cp->hash256, ZERO_KEY, size: 16)) {
5704 rand256 = NULL;
5705 hash256 = NULL;
5706 } else {
5707 rand256 = cp->rand256;
5708 hash256 = cp->hash256;
5709 }
5710
5711 err = hci_add_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr,
5712 bdaddr_type: cp->addr.type, hash192, rand192,
5713 hash256, rand256);
5714 if (err < 0)
5715 status = MGMT_STATUS_FAILED;
5716 else
5717 status = MGMT_STATUS_SUCCESS;
5718
5719 err = mgmt_cmd_complete(sk, index: hdev->id,
5720 MGMT_OP_ADD_REMOTE_OOB_DATA,
5721 status, rp: &cp->addr, rp_len: sizeof(cp->addr));
5722 } else {
5723 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5724 len);
5725 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5726 MGMT_STATUS_INVALID_PARAMS);
5727 }
5728
5729unlock:
5730 hci_dev_unlock(hdev);
5731 return err;
5732}
5733
5734static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5735 void *data, u16 len)
5736{
5737 struct mgmt_cp_remove_remote_oob_data *cp = data;
5738 u8 status;
5739 int err;
5740
5741 bt_dev_dbg(hdev, "sock %p", sk);
5742
5743 if (cp->addr.type != BDADDR_BREDR)
5744 return mgmt_cmd_complete(sk, index: hdev->id,
5745 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5746 MGMT_STATUS_INVALID_PARAMS,
5747 rp: &cp->addr, rp_len: sizeof(cp->addr));
5748
5749 hci_dev_lock(hdev);
5750
5751 if (!bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
5752 hci_remote_oob_data_clear(hdev);
5753 status = MGMT_STATUS_SUCCESS;
5754 goto done;
5755 }
5756
5757 err = hci_remove_remote_oob_data(hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type);
5758 if (err < 0)
5759 status = MGMT_STATUS_INVALID_PARAMS;
5760 else
5761 status = MGMT_STATUS_SUCCESS;
5762
5763done:
5764 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5765 status, rp: &cp->addr, rp_len: sizeof(cp->addr));
5766
5767 hci_dev_unlock(hdev);
5768 return err;
5769}
5770
5771void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5772{
5773 struct mgmt_pending_cmd *cmd;
5774
5775 bt_dev_dbg(hdev, "status %u", status);
5776
5777 hci_dev_lock(hdev);
5778
5779 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5780 if (!cmd)
5781 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5782
5783 if (!cmd)
5784 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5785
5786 if (cmd) {
5787 cmd->cmd_complete(cmd, mgmt_status(err: status));
5788 mgmt_pending_remove(cmd);
5789 }
5790
5791 hci_dev_unlock(hdev);
5792}
5793
5794static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5795 uint8_t *mgmt_status)
5796{
5797 switch (type) {
5798 case DISCOV_TYPE_LE:
5799 *mgmt_status = mgmt_le_support(hdev);
5800 if (*mgmt_status)
5801 return false;
5802 break;
5803 case DISCOV_TYPE_INTERLEAVED:
5804 *mgmt_status = mgmt_le_support(hdev);
5805 if (*mgmt_status)
5806 return false;
5807 fallthrough;
5808 case DISCOV_TYPE_BREDR:
5809 *mgmt_status = mgmt_bredr_support(hdev);
5810 if (*mgmt_status)
5811 return false;
5812 break;
5813 default:
5814 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5815 return false;
5816 }
5817
5818 return true;
5819}
5820
5821static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5822{
5823 struct mgmt_pending_cmd *cmd = data;
5824
5825 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5826 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5827 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5828 return;
5829
5830 bt_dev_dbg(hdev, "err %d", err);
5831
5832 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_status(err),
5833 rp: cmd->param, rp_len: 1);
5834 mgmt_pending_remove(cmd);
5835
5836 hci_discovery_set_state(hdev, state: err ? DISCOVERY_STOPPED:
5837 DISCOVERY_FINDING);
5838}
5839
5840static int start_discovery_sync(struct hci_dev *hdev, void *data)
5841{
5842 return hci_start_discovery_sync(hdev);
5843}
5844
5845static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5846 u16 op, void *data, u16 len)
5847{
5848 struct mgmt_cp_start_discovery *cp = data;
5849 struct mgmt_pending_cmd *cmd;
5850 u8 status;
5851 int err;
5852
5853 bt_dev_dbg(hdev, "sock %p", sk);
5854
5855 hci_dev_lock(hdev);
5856
5857 if (!hdev_is_powered(hdev)) {
5858 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op,
5859 MGMT_STATUS_NOT_POWERED,
5860 rp: &cp->type, rp_len: sizeof(cp->type));
5861 goto failed;
5862 }
5863
5864 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5865 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5866 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, MGMT_STATUS_BUSY,
5867 rp: &cp->type, rp_len: sizeof(cp->type));
5868 goto failed;
5869 }
5870
5871 if (!discovery_type_is_valid(hdev, type: cp->type, mgmt_status: &status)) {
5872 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, status,
5873 rp: &cp->type, rp_len: sizeof(cp->type));
5874 goto failed;
5875 }
5876
5877 /* Can't start discovery when it is paused */
5878 if (hdev->discovery_paused) {
5879 err = mgmt_cmd_complete(sk, index: hdev->id, cmd: op, MGMT_STATUS_BUSY,
5880 rp: &cp->type, rp_len: sizeof(cp->type));
5881 goto failed;
5882 }
5883
5884 /* Clear the discovery filter first to free any previously
5885 * allocated memory for the UUID list.
5886 */
5887 hci_discovery_filter_clear(hdev);
5888
5889 hdev->discovery.type = cp->type;
5890 hdev->discovery.report_invalid_rssi = false;
5891 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5892 hdev->discovery.limited = true;
5893 else
5894 hdev->discovery.limited = false;
5895
5896 cmd = mgmt_pending_add(sk, opcode: op, hdev, data, len);
5897 if (!cmd) {
5898 err = -ENOMEM;
5899 goto failed;
5900 }
5901
5902 err = hci_cmd_sync_queue(hdev, func: start_discovery_sync, data: cmd,
5903 destroy: start_discovery_complete);
5904 if (err < 0) {
5905 mgmt_pending_remove(cmd);
5906 goto failed;
5907 }
5908
5909 hci_discovery_set_state(hdev, state: DISCOVERY_STARTING);
5910
5911failed:
5912 hci_dev_unlock(hdev);
5913 return err;
5914}
5915
5916static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5917 void *data, u16 len)
5918{
5919 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5920 data, len);
5921}
5922
5923static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5924 void *data, u16 len)
5925{
5926 return start_discovery_internal(sk, hdev,
5927 MGMT_OP_START_LIMITED_DISCOVERY,
5928 data, len);
5929}
5930
5931static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5932 void *data, u16 len)
5933{
5934 struct mgmt_cp_start_service_discovery *cp = data;
5935 struct mgmt_pending_cmd *cmd;
5936 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5937 u16 uuid_count, expected_len;
5938 u8 status;
5939 int err;
5940
5941 bt_dev_dbg(hdev, "sock %p", sk);
5942
5943 hci_dev_lock(hdev);
5944
5945 if (!hdev_is_powered(hdev)) {
5946 err = mgmt_cmd_complete(sk, index: hdev->id,
5947 MGMT_OP_START_SERVICE_DISCOVERY,
5948 MGMT_STATUS_NOT_POWERED,
5949 rp: &cp->type, rp_len: sizeof(cp->type));
5950 goto failed;
5951 }
5952
5953 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5954 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5955 err = mgmt_cmd_complete(sk, index: hdev->id,
5956 MGMT_OP_START_SERVICE_DISCOVERY,
5957 MGMT_STATUS_BUSY, rp: &cp->type,
5958 rp_len: sizeof(cp->type));
5959 goto failed;
5960 }
5961
5962 if (hdev->discovery_paused) {
5963 err = mgmt_cmd_complete(sk, index: hdev->id,
5964 MGMT_OP_START_SERVICE_DISCOVERY,
5965 MGMT_STATUS_BUSY, rp: &cp->type,
5966 rp_len: sizeof(cp->type));
5967 goto failed;
5968 }
5969
5970 uuid_count = __le16_to_cpu(cp->uuid_count);
5971 if (uuid_count > max_uuid_count) {
5972 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5973 uuid_count);
5974 err = mgmt_cmd_complete(sk, index: hdev->id,
5975 MGMT_OP_START_SERVICE_DISCOVERY,
5976 MGMT_STATUS_INVALID_PARAMS, rp: &cp->type,
5977 rp_len: sizeof(cp->type));
5978 goto failed;
5979 }
5980
5981 expected_len = sizeof(*cp) + uuid_count * 16;
5982 if (expected_len != len) {
5983 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5984 expected_len, len);
5985 err = mgmt_cmd_complete(sk, index: hdev->id,
5986 MGMT_OP_START_SERVICE_DISCOVERY,
5987 MGMT_STATUS_INVALID_PARAMS, rp: &cp->type,
5988 rp_len: sizeof(cp->type));
5989 goto failed;
5990 }
5991
5992 if (!discovery_type_is_valid(hdev, type: cp->type, mgmt_status: &status)) {
5993 err = mgmt_cmd_complete(sk, index: hdev->id,
5994 MGMT_OP_START_SERVICE_DISCOVERY,
5995 status, rp: &cp->type, rp_len: sizeof(cp->type));
5996 goto failed;
5997 }
5998
5999 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6000 hdev, data, len);
6001 if (!cmd) {
6002 err = -ENOMEM;
6003 goto failed;
6004 }
6005
6006 /* Clear the discovery filter first to free any previously
6007 * allocated memory for the UUID list.
6008 */
6009 hci_discovery_filter_clear(hdev);
6010
6011 hdev->discovery.result_filtering = true;
6012 hdev->discovery.type = cp->type;
6013 hdev->discovery.rssi = cp->rssi;
6014 hdev->discovery.uuid_count = uuid_count;
6015
6016 if (uuid_count > 0) {
6017 hdev->discovery.uuids = kmemdup(p: cp->uuids, size: uuid_count * 16,
6018 GFP_KERNEL);
6019 if (!hdev->discovery.uuids) {
6020 err = mgmt_cmd_complete(sk, index: hdev->id,
6021 MGMT_OP_START_SERVICE_DISCOVERY,
6022 MGMT_STATUS_FAILED,
6023 rp: &cp->type, rp_len: sizeof(cp->type));
6024 mgmt_pending_remove(cmd);
6025 goto failed;
6026 }
6027 }
6028
6029 err = hci_cmd_sync_queue(hdev, func: start_discovery_sync, data: cmd,
6030 destroy: start_discovery_complete);
6031 if (err < 0) {
6032 mgmt_pending_remove(cmd);
6033 goto failed;
6034 }
6035
6036 hci_discovery_set_state(hdev, state: DISCOVERY_STARTING);
6037
6038failed:
6039 hci_dev_unlock(hdev);
6040 return err;
6041}
6042
6043void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6044{
6045 struct mgmt_pending_cmd *cmd;
6046
6047 bt_dev_dbg(hdev, "status %u", status);
6048
6049 hci_dev_lock(hdev);
6050
6051 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6052 if (cmd) {
6053 cmd->cmd_complete(cmd, mgmt_status(err: status));
6054 mgmt_pending_remove(cmd);
6055 }
6056
6057 hci_dev_unlock(hdev);
6058}
6059
6060static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6061{
6062 struct mgmt_pending_cmd *cmd = data;
6063
6064 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6065 return;
6066
6067 bt_dev_dbg(hdev, "err %d", err);
6068
6069 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_status(err),
6070 rp: cmd->param, rp_len: 1);
6071 mgmt_pending_remove(cmd);
6072
6073 if (!err)
6074 hci_discovery_set_state(hdev, state: DISCOVERY_STOPPED);
6075}
6076
6077static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6078{
6079 return hci_stop_discovery_sync(hdev);
6080}
6081
6082static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6083 u16 len)
6084{
6085 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6086 struct mgmt_pending_cmd *cmd;
6087 int err;
6088
6089 bt_dev_dbg(hdev, "sock %p", sk);
6090
6091 hci_dev_lock(hdev);
6092
6093 if (!hci_discovery_active(hdev)) {
6094 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_STOP_DISCOVERY,
6095 MGMT_STATUS_REJECTED, rp: &mgmt_cp->type,
6096 rp_len: sizeof(mgmt_cp->type));
6097 goto unlock;
6098 }
6099
6100 if (hdev->discovery.type != mgmt_cp->type) {
6101 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_STOP_DISCOVERY,
6102 MGMT_STATUS_INVALID_PARAMS,
6103 rp: &mgmt_cp->type, rp_len: sizeof(mgmt_cp->type));
6104 goto unlock;
6105 }
6106
6107 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6108 if (!cmd) {
6109 err = -ENOMEM;
6110 goto unlock;
6111 }
6112
6113 err = hci_cmd_sync_queue(hdev, func: stop_discovery_sync, data: cmd,
6114 destroy: stop_discovery_complete);
6115 if (err < 0) {
6116 mgmt_pending_remove(cmd);
6117 goto unlock;
6118 }
6119
6120 hci_discovery_set_state(hdev, state: DISCOVERY_STOPPING);
6121
6122unlock:
6123 hci_dev_unlock(hdev);
6124 return err;
6125}
6126
6127static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6128 u16 len)
6129{
6130 struct mgmt_cp_confirm_name *cp = data;
6131 struct inquiry_entry *e;
6132 int err;
6133
6134 bt_dev_dbg(hdev, "sock %p", sk);
6135
6136 hci_dev_lock(hdev);
6137
6138 if (!hci_discovery_active(hdev)) {
6139 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME,
6140 MGMT_STATUS_FAILED, rp: &cp->addr,
6141 rp_len: sizeof(cp->addr));
6142 goto failed;
6143 }
6144
6145 e = hci_inquiry_cache_lookup_unknown(hdev, bdaddr: &cp->addr.bdaddr);
6146 if (!e) {
6147 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME,
6148 MGMT_STATUS_INVALID_PARAMS, rp: &cp->addr,
6149 rp_len: sizeof(cp->addr));
6150 goto failed;
6151 }
6152
6153 if (cp->name_known) {
6154 e->name_state = NAME_KNOWN;
6155 list_del(entry: &e->list);
6156 } else {
6157 e->name_state = NAME_NEEDED;
6158 hci_inquiry_cache_update_resolve(hdev, ie: e);
6159 }
6160
6161 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_CONFIRM_NAME, status: 0,
6162 rp: &cp->addr, rp_len: sizeof(cp->addr));
6163
6164failed:
6165 hci_dev_unlock(hdev);
6166 return err;
6167}
6168
6169static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6170 u16 len)
6171{
6172 struct mgmt_cp_block_device *cp = data;
6173 u8 status;
6174 int err;
6175
6176 bt_dev_dbg(hdev, "sock %p", sk);
6177
6178 if (!bdaddr_type_is_valid(type: cp->addr.type))
6179 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_BLOCK_DEVICE,
6180 MGMT_STATUS_INVALID_PARAMS,
6181 rp: &cp->addr, rp_len: sizeof(cp->addr));
6182
6183 hci_dev_lock(hdev);
6184
6185 err = hci_bdaddr_list_add(list: &hdev->reject_list, bdaddr: &cp->addr.bdaddr,
6186 type: cp->addr.type);
6187 if (err < 0) {
6188 status = MGMT_STATUS_FAILED;
6189 goto done;
6190 }
6191
6192 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, data: &cp->addr, len: sizeof(cp->addr),
6193 skip_sk: sk);
6194 status = MGMT_STATUS_SUCCESS;
6195
6196done:
6197 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6198 rp: &cp->addr, rp_len: sizeof(cp->addr));
6199
6200 hci_dev_unlock(hdev);
6201
6202 return err;
6203}
6204
6205static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6206 u16 len)
6207{
6208 struct mgmt_cp_unblock_device *cp = data;
6209 u8 status;
6210 int err;
6211
6212 bt_dev_dbg(hdev, "sock %p", sk);
6213
6214 if (!bdaddr_type_is_valid(type: cp->addr.type))
6215 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6216 MGMT_STATUS_INVALID_PARAMS,
6217 rp: &cp->addr, rp_len: sizeof(cp->addr));
6218
6219 hci_dev_lock(hdev);
6220
6221 err = hci_bdaddr_list_del(list: &hdev->reject_list, bdaddr: &cp->addr.bdaddr,
6222 type: cp->addr.type);
6223 if (err < 0) {
6224 status = MGMT_STATUS_INVALID_PARAMS;
6225 goto done;
6226 }
6227
6228 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, data: &cp->addr, len: sizeof(cp->addr),
6229 skip_sk: sk);
6230 status = MGMT_STATUS_SUCCESS;
6231
6232done:
6233 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6234 rp: &cp->addr, rp_len: sizeof(cp->addr));
6235
6236 hci_dev_unlock(hdev);
6237
6238 return err;
6239}
6240
6241static int set_device_id_sync(struct hci_dev *hdev, void *data)
6242{
6243 return hci_update_eir_sync(hdev);
6244}
6245
6246static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6247 u16 len)
6248{
6249 struct mgmt_cp_set_device_id *cp = data;
6250 int err;
6251 __u16 source;
6252
6253 bt_dev_dbg(hdev, "sock %p", sk);
6254
6255 source = __le16_to_cpu(cp->source);
6256
6257 if (source > 0x0002)
6258 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEVICE_ID,
6259 MGMT_STATUS_INVALID_PARAMS);
6260
6261 hci_dev_lock(hdev);
6262
6263 hdev->devid_source = source;
6264 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6265 hdev->devid_product = __le16_to_cpu(cp->product);
6266 hdev->devid_version = __le16_to_cpu(cp->version);
6267
6268 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_DEVICE_ID, status: 0,
6269 NULL, rp_len: 0);
6270
6271 hci_cmd_sync_queue(hdev, func: set_device_id_sync, NULL, NULL);
6272
6273 hci_dev_unlock(hdev);
6274
6275 return err;
6276}
6277
6278static void enable_advertising_instance(struct hci_dev *hdev, int err)
6279{
6280 if (err)
6281 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6282 else
6283 bt_dev_dbg(hdev, "status %d", err);
6284}
6285
6286static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6287{
6288 struct cmd_lookup match = { NULL, hdev };
6289 u8 instance;
6290 struct adv_info *adv_instance;
6291 u8 status = mgmt_status(err);
6292
6293 if (status) {
6294 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6295 cb: cmd_status_rsp, data: &status);
6296 return;
6297 }
6298
6299 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6300 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6301 else
6302 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6303
6304 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, cb: settings_rsp,
6305 data: &match);
6306
6307 new_settings(hdev, skip: match.sk);
6308
6309 if (match.sk)
6310 sock_put(sk: match.sk);
6311
6312 /* If "Set Advertising" was just disabled and instance advertising was
6313 * set up earlier, then re-enable multi-instance advertising.
6314 */
6315 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6316 list_empty(head: &hdev->adv_instances))
6317 return;
6318
6319 instance = hdev->cur_adv_instance;
6320 if (!instance) {
6321 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6322 struct adv_info, list);
6323 if (!adv_instance)
6324 return;
6325
6326 instance = adv_instance->instance;
6327 }
6328
6329 err = hci_schedule_adv_instance_sync(hdev, instance, force: true);
6330
6331 enable_advertising_instance(hdev, err);
6332}
6333
6334static int set_adv_sync(struct hci_dev *hdev, void *data)
6335{
6336 struct mgmt_pending_cmd *cmd = data;
6337 struct mgmt_mode *cp = cmd->param;
6338 u8 val = !!cp->val;
6339
6340 if (cp->val == 0x02)
6341 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6342 else
6343 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6344
6345 cancel_adv_timeout(hdev);
6346
6347 if (val) {
6348 /* Switch to instance "0" for the Set Advertising setting.
6349 * We cannot use update_[adv|scan_rsp]_data() here as the
6350 * HCI_ADVERTISING flag is not yet set.
6351 */
6352 hdev->cur_adv_instance = 0x00;
6353
6354 if (ext_adv_capable(hdev)) {
6355 hci_start_ext_adv_sync(hdev, instance: 0x00);
6356 } else {
6357 hci_update_adv_data_sync(hdev, instance: 0x00);
6358 hci_update_scan_rsp_data_sync(hdev, instance: 0x00);
6359 hci_enable_advertising_sync(hdev);
6360 }
6361 } else {
6362 hci_disable_advertising_sync(hdev);
6363 }
6364
6365 return 0;
6366}
6367
6368static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6369 u16 len)
6370{
6371 struct mgmt_mode *cp = data;
6372 struct mgmt_pending_cmd *cmd;
6373 u8 val, status;
6374 int err;
6375
6376 bt_dev_dbg(hdev, "sock %p", sk);
6377
6378 status = mgmt_le_support(hdev);
6379 if (status)
6380 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6381 status);
6382
6383 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6384 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6385 MGMT_STATUS_INVALID_PARAMS);
6386
6387 if (hdev->advertising_paused)
6388 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6389 MGMT_STATUS_BUSY);
6390
6391 hci_dev_lock(hdev);
6392
6393 val = !!cp->val;
6394
6395 /* The following conditions are ones which mean that we should
6396 * not do any HCI communication but directly send a mgmt
6397 * response to user space (after toggling the flag if
6398 * necessary).
6399 */
6400 if (!hdev_is_powered(hdev) ||
6401 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6402 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6403 hci_dev_test_flag(hdev, HCI_MESH) ||
6404 hci_conn_num(hdev, LE_LINK) > 0 ||
6405 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6406 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6407 bool changed;
6408
6409 if (cp->val) {
6410 hdev->cur_adv_instance = 0x00;
6411 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6412 if (cp->val == 0x02)
6413 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6414 else
6415 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6416 } else {
6417 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6418 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6419 }
6420
6421 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6422 if (err < 0)
6423 goto unlock;
6424
6425 if (changed)
6426 err = new_settings(hdev, skip: sk);
6427
6428 goto unlock;
6429 }
6430
6431 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6432 pending_find(MGMT_OP_SET_LE, hdev)) {
6433 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_ADVERTISING,
6434 MGMT_STATUS_BUSY);
6435 goto unlock;
6436 }
6437
6438 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6439 if (!cmd)
6440 err = -ENOMEM;
6441 else
6442 err = hci_cmd_sync_queue(hdev, func: set_adv_sync, data: cmd,
6443 destroy: set_advertising_complete);
6444
6445 if (err < 0 && cmd)
6446 mgmt_pending_remove(cmd);
6447
6448unlock:
6449 hci_dev_unlock(hdev);
6450 return err;
6451}
6452
6453static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6454 void *data, u16 len)
6455{
6456 struct mgmt_cp_set_static_address *cp = data;
6457 int err;
6458
6459 bt_dev_dbg(hdev, "sock %p", sk);
6460
6461 if (!lmp_le_capable(hdev))
6462 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6463 MGMT_STATUS_NOT_SUPPORTED);
6464
6465 if (hdev_is_powered(hdev))
6466 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6467 MGMT_STATUS_REJECTED);
6468
6469 if (bacmp(ba1: &cp->bdaddr, BDADDR_ANY)) {
6470 if (!bacmp(ba1: &cp->bdaddr, BDADDR_NONE))
6471 return mgmt_cmd_status(sk, index: hdev->id,
6472 MGMT_OP_SET_STATIC_ADDRESS,
6473 MGMT_STATUS_INVALID_PARAMS);
6474
6475 /* Two most significant bits shall be set */
6476 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6477 return mgmt_cmd_status(sk, index: hdev->id,
6478 MGMT_OP_SET_STATIC_ADDRESS,
6479 MGMT_STATUS_INVALID_PARAMS);
6480 }
6481
6482 hci_dev_lock(hdev);
6483
6484 bacpy(dst: &hdev->static_addr, src: &cp->bdaddr);
6485
6486 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6487 if (err < 0)
6488 goto unlock;
6489
6490 err = new_settings(hdev, skip: sk);
6491
6492unlock:
6493 hci_dev_unlock(hdev);
6494 return err;
6495}
6496
6497static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6498 void *data, u16 len)
6499{
6500 struct mgmt_cp_set_scan_params *cp = data;
6501 __u16 interval, window;
6502 int err;
6503
6504 bt_dev_dbg(hdev, "sock %p", sk);
6505
6506 if (!lmp_le_capable(hdev))
6507 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6508 MGMT_STATUS_NOT_SUPPORTED);
6509
6510 interval = __le16_to_cpu(cp->interval);
6511
6512 if (interval < 0x0004 || interval > 0x4000)
6513 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6514 MGMT_STATUS_INVALID_PARAMS);
6515
6516 window = __le16_to_cpu(cp->window);
6517
6518 if (window < 0x0004 || window > 0x4000)
6519 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6520 MGMT_STATUS_INVALID_PARAMS);
6521
6522 if (window > interval)
6523 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6524 MGMT_STATUS_INVALID_PARAMS);
6525
6526 hci_dev_lock(hdev);
6527
6528 hdev->le_scan_interval = interval;
6529 hdev->le_scan_window = window;
6530
6531 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_SET_SCAN_PARAMS, status: 0,
6532 NULL, rp_len: 0);
6533
6534 /* If background scan is running, restart it so new parameters are
6535 * loaded.
6536 */
6537 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6538 hdev->discovery.state == DISCOVERY_STOPPED)
6539 hci_update_passive_scan(hdev);
6540
6541 hci_dev_unlock(hdev);
6542
6543 return err;
6544}
6545
6546static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6547{
6548 struct mgmt_pending_cmd *cmd = data;
6549
6550 bt_dev_dbg(hdev, "err %d", err);
6551
6552 if (err) {
6553 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6554 status: mgmt_status(err));
6555 } else {
6556 struct mgmt_mode *cp = cmd->param;
6557
6558 if (cp->val)
6559 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6560 else
6561 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6562
6563 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6564 new_settings(hdev, skip: cmd->sk);
6565 }
6566
6567 mgmt_pending_free(cmd);
6568}
6569
6570static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6571{
6572 struct mgmt_pending_cmd *cmd = data;
6573 struct mgmt_mode *cp = cmd->param;
6574
6575 return hci_write_fast_connectable_sync(hdev, enable: cp->val);
6576}
6577
6578static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6579 void *data, u16 len)
6580{
6581 struct mgmt_mode *cp = data;
6582 struct mgmt_pending_cmd *cmd;
6583 int err;
6584
6585 bt_dev_dbg(hdev, "sock %p", sk);
6586
6587 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6588 hdev->hci_ver < BLUETOOTH_VER_1_2)
6589 return mgmt_cmd_status(sk, index: hdev->id,
6590 MGMT_OP_SET_FAST_CONNECTABLE,
6591 MGMT_STATUS_NOT_SUPPORTED);
6592
6593 if (cp->val != 0x00 && cp->val != 0x01)
6594 return mgmt_cmd_status(sk, index: hdev->id,
6595 MGMT_OP_SET_FAST_CONNECTABLE,
6596 MGMT_STATUS_INVALID_PARAMS);
6597
6598 hci_dev_lock(hdev);
6599
6600 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6601 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6602 goto unlock;
6603 }
6604
6605 if (!hdev_is_powered(hdev)) {
6606 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6607 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6608 new_settings(hdev, skip: sk);
6609 goto unlock;
6610 }
6611
6612 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6613 len);
6614 if (!cmd)
6615 err = -ENOMEM;
6616 else
6617 err = hci_cmd_sync_queue(hdev, func: write_fast_connectable_sync, data: cmd,
6618 destroy: fast_connectable_complete);
6619
6620 if (err < 0) {
6621 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6622 MGMT_STATUS_FAILED);
6623
6624 if (cmd)
6625 mgmt_pending_free(cmd);
6626 }
6627
6628unlock:
6629 hci_dev_unlock(hdev);
6630
6631 return err;
6632}
6633
6634static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6635{
6636 struct mgmt_pending_cmd *cmd = data;
6637
6638 bt_dev_dbg(hdev, "err %d", err);
6639
6640 if (err) {
6641 u8 mgmt_err = mgmt_status(err);
6642
6643 /* We need to restore the flag if related HCI commands
6644 * failed.
6645 */
6646 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6647
6648 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
6649 } else {
6650 send_settings_rsp(sk: cmd->sk, MGMT_OP_SET_BREDR, hdev);
6651 new_settings(hdev, skip: cmd->sk);
6652 }
6653
6654 mgmt_pending_free(cmd);
6655}
6656
6657static int set_bredr_sync(struct hci_dev *hdev, void *data)
6658{
6659 int status;
6660
6661 status = hci_write_fast_connectable_sync(hdev, enable: false);
6662
6663 if (!status)
6664 status = hci_update_scan_sync(hdev);
6665
6666 /* Since only the advertising data flags will change, there
6667 * is no need to update the scan response data.
6668 */
6669 if (!status)
6670 status = hci_update_adv_data_sync(hdev, instance: hdev->cur_adv_instance);
6671
6672 return status;
6673}
6674
6675static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6676{
6677 struct mgmt_mode *cp = data;
6678 struct mgmt_pending_cmd *cmd;
6679 int err;
6680
6681 bt_dev_dbg(hdev, "sock %p", sk);
6682
6683 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6684 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6685 MGMT_STATUS_NOT_SUPPORTED);
6686
6687 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6688 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6689 MGMT_STATUS_REJECTED);
6690
6691 if (cp->val != 0x00 && cp->val != 0x01)
6692 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6693 MGMT_STATUS_INVALID_PARAMS);
6694
6695 hci_dev_lock(hdev);
6696
6697 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6698 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6699 goto unlock;
6700 }
6701
6702 if (!hdev_is_powered(hdev)) {
6703 if (!cp->val) {
6704 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6705 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6706 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6707 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6708 }
6709
6710 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6711
6712 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6713 if (err < 0)
6714 goto unlock;
6715
6716 err = new_settings(hdev, skip: sk);
6717 goto unlock;
6718 }
6719
6720 /* Reject disabling when powered on */
6721 if (!cp->val) {
6722 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6723 MGMT_STATUS_REJECTED);
6724 goto unlock;
6725 } else {
6726 /* When configuring a dual-mode controller to operate
6727 * with LE only and using a static address, then switching
6728 * BR/EDR back on is not allowed.
6729 *
6730 * Dual-mode controllers shall operate with the public
6731 * address as its identity address for BR/EDR and LE. So
6732 * reject the attempt to create an invalid configuration.
6733 *
6734 * The same restrictions applies when secure connections
6735 * has been enabled. For BR/EDR this is a controller feature
6736 * while for LE it is a host stack feature. This means that
6737 * switching BR/EDR back on when secure connections has been
6738 * enabled is not a supported transaction.
6739 */
6740 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6741 (bacmp(ba1: &hdev->static_addr, BDADDR_ANY) ||
6742 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6743 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6744 MGMT_STATUS_REJECTED);
6745 goto unlock;
6746 }
6747 }
6748
6749 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6750 if (!cmd)
6751 err = -ENOMEM;
6752 else
6753 err = hci_cmd_sync_queue(hdev, func: set_bredr_sync, data: cmd,
6754 destroy: set_bredr_complete);
6755
6756 if (err < 0) {
6757 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_BREDR,
6758 MGMT_STATUS_FAILED);
6759 if (cmd)
6760 mgmt_pending_free(cmd);
6761
6762 goto unlock;
6763 }
6764
6765 /* We need to flip the bit already here so that
6766 * hci_req_update_adv_data generates the correct flags.
6767 */
6768 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6769
6770unlock:
6771 hci_dev_unlock(hdev);
6772 return err;
6773}
6774
6775static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6776{
6777 struct mgmt_pending_cmd *cmd = data;
6778 struct mgmt_mode *cp;
6779
6780 bt_dev_dbg(hdev, "err %d", err);
6781
6782 if (err) {
6783 u8 mgmt_err = mgmt_status(err);
6784
6785 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status: mgmt_err);
6786 goto done;
6787 }
6788
6789 cp = cmd->param;
6790
6791 switch (cp->val) {
6792 case 0x00:
6793 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6794 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6795 break;
6796 case 0x01:
6797 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6798 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6799 break;
6800 case 0x02:
6801 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6802 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6803 break;
6804 }
6805
6806 send_settings_rsp(sk: cmd->sk, opcode: cmd->opcode, hdev);
6807 new_settings(hdev, skip: cmd->sk);
6808
6809done:
6810 mgmt_pending_free(cmd);
6811}
6812
6813static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6814{
6815 struct mgmt_pending_cmd *cmd = data;
6816 struct mgmt_mode *cp = cmd->param;
6817 u8 val = !!cp->val;
6818
6819 /* Force write of val */
6820 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6821
6822 return hci_write_sc_support_sync(hdev, val);
6823}
6824
6825static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6826 void *data, u16 len)
6827{
6828 struct mgmt_mode *cp = data;
6829 struct mgmt_pending_cmd *cmd;
6830 u8 val;
6831 int err;
6832
6833 bt_dev_dbg(hdev, "sock %p", sk);
6834
6835 if (!lmp_sc_capable(hdev) &&
6836 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6837 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6838 MGMT_STATUS_NOT_SUPPORTED);
6839
6840 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6841 lmp_sc_capable(hdev) &&
6842 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6843 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6844 MGMT_STATUS_REJECTED);
6845
6846 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6847 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6848 MGMT_STATUS_INVALID_PARAMS);
6849
6850 hci_dev_lock(hdev);
6851
6852 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6853 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6854 bool changed;
6855
6856 if (cp->val) {
6857 changed = !hci_dev_test_and_set_flag(hdev,
6858 HCI_SC_ENABLED);
6859 if (cp->val == 0x02)
6860 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6861 else
6862 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 } else {
6864 changed = hci_dev_test_and_clear_flag(hdev,
6865 HCI_SC_ENABLED);
6866 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6867 }
6868
6869 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6870 if (err < 0)
6871 goto failed;
6872
6873 if (changed)
6874 err = new_settings(hdev, skip: sk);
6875
6876 goto failed;
6877 }
6878
6879 val = !!cp->val;
6880
6881 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6882 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6883 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6884 goto failed;
6885 }
6886
6887 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6888 if (!cmd)
6889 err = -ENOMEM;
6890 else
6891 err = hci_cmd_sync_queue(hdev, func: set_secure_conn_sync, data: cmd,
6892 destroy: set_secure_conn_complete);
6893
6894 if (err < 0) {
6895 mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_SECURE_CONN,
6896 MGMT_STATUS_FAILED);
6897 if (cmd)
6898 mgmt_pending_free(cmd);
6899 }
6900
6901failed:
6902 hci_dev_unlock(hdev);
6903 return err;
6904}
6905
6906static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6907 void *data, u16 len)
6908{
6909 struct mgmt_mode *cp = data;
6910 bool changed, use_changed;
6911 int err;
6912
6913 bt_dev_dbg(hdev, "sock %p", sk);
6914
6915 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6916 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6917 MGMT_STATUS_INVALID_PARAMS);
6918
6919 hci_dev_lock(hdev);
6920
6921 if (cp->val)
6922 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6923 else
6924 changed = hci_dev_test_and_clear_flag(hdev,
6925 HCI_KEEP_DEBUG_KEYS);
6926
6927 if (cp->val == 0x02)
6928 use_changed = !hci_dev_test_and_set_flag(hdev,
6929 HCI_USE_DEBUG_KEYS);
6930 else
6931 use_changed = hci_dev_test_and_clear_flag(hdev,
6932 HCI_USE_DEBUG_KEYS);
6933
6934 if (hdev_is_powered(hdev) && use_changed &&
6935 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6936 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6937 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6938 plen: sizeof(mode), param: &mode);
6939 }
6940
6941 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6942 if (err < 0)
6943 goto unlock;
6944
6945 if (changed)
6946 err = new_settings(hdev, skip: sk);
6947
6948unlock:
6949 hci_dev_unlock(hdev);
6950 return err;
6951}
6952
6953static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6954 u16 len)
6955{
6956 struct mgmt_cp_set_privacy *cp = cp_data;
6957 bool changed;
6958 int err;
6959
6960 bt_dev_dbg(hdev, "sock %p", sk);
6961
6962 if (!lmp_le_capable(hdev))
6963 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
6964 MGMT_STATUS_NOT_SUPPORTED);
6965
6966 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6967 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
6968 MGMT_STATUS_INVALID_PARAMS);
6969
6970 if (hdev_is_powered(hdev))
6971 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PRIVACY,
6972 MGMT_STATUS_REJECTED);
6973
6974 hci_dev_lock(hdev);
6975
6976 /* If user space supports this command it is also expected to
6977 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6978 */
6979 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6980
6981 if (cp->privacy) {
6982 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6983 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6984 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6985 hci_adv_instances_set_rpa_expired(hdev, rpa_expired: true);
6986 if (cp->privacy == 0x02)
6987 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6988 else
6989 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6990 } else {
6991 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6992 memset(hdev->irk, 0, sizeof(hdev->irk));
6993 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6994 hci_adv_instances_set_rpa_expired(hdev, rpa_expired: false);
6995 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6996 }
6997
6998 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6999 if (err < 0)
7000 goto unlock;
7001
7002 if (changed)
7003 err = new_settings(hdev, skip: sk);
7004
7005unlock:
7006 hci_dev_unlock(hdev);
7007 return err;
7008}
7009
7010static bool irk_is_valid(struct mgmt_irk_info *irk)
7011{
7012 switch (irk->addr.type) {
7013 case BDADDR_LE_PUBLIC:
7014 return true;
7015
7016 case BDADDR_LE_RANDOM:
7017 /* Two most significant bits shall be set */
7018 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7019 return false;
7020 return true;
7021 }
7022
7023 return false;
7024}
7025
7026static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7027 u16 len)
7028{
7029 struct mgmt_cp_load_irks *cp = cp_data;
7030 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7031 sizeof(struct mgmt_irk_info));
7032 u16 irk_count, expected_len;
7033 int i, err;
7034
7035 bt_dev_dbg(hdev, "sock %p", sk);
7036
7037 if (!lmp_le_capable(hdev))
7038 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7039 MGMT_STATUS_NOT_SUPPORTED);
7040
7041 irk_count = __le16_to_cpu(cp->irk_count);
7042 if (irk_count > max_irk_count) {
7043 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7044 irk_count);
7045 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7046 MGMT_STATUS_INVALID_PARAMS);
7047 }
7048
7049 expected_len = struct_size(cp, irks, irk_count);
7050 if (expected_len != len) {
7051 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7052 expected_len, len);
7053 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_IRKS,
7054 MGMT_STATUS_INVALID_PARAMS);
7055 }
7056
7057 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7058
7059 for (i = 0; i < irk_count; i++) {
7060 struct mgmt_irk_info *key = &cp->irks[i];
7061
7062 if (!irk_is_valid(irk: key))
7063 return mgmt_cmd_status(sk, index: hdev->id,
7064 MGMT_OP_LOAD_IRKS,
7065 MGMT_STATUS_INVALID_PARAMS);
7066 }
7067
7068 hci_dev_lock(hdev);
7069
7070 hci_smp_irks_clear(hdev);
7071
7072 for (i = 0; i < irk_count; i++) {
7073 struct mgmt_irk_info *irk = &cp->irks[i];
7074 u8 addr_type = le_addr_type(mgmt_addr_type: irk->addr.type);
7075
7076 if (hci_is_blocked_key(hdev,
7077 HCI_BLOCKED_KEY_TYPE_IRK,
7078 val: irk->val)) {
7079 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7080 &irk->addr.bdaddr);
7081 continue;
7082 }
7083
7084 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7085 if (irk->addr.type == BDADDR_BREDR)
7086 addr_type = BDADDR_BREDR;
7087
7088 hci_add_irk(hdev, bdaddr: &irk->addr.bdaddr,
7089 addr_type, val: irk->val,
7090 BDADDR_ANY);
7091 }
7092
7093 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7094
7095 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_IRKS, status: 0, NULL, rp_len: 0);
7096
7097 hci_dev_unlock(hdev);
7098
7099 return err;
7100}
7101
7102static bool ltk_is_valid(struct mgmt_ltk_info *key)
7103{
7104 if (key->initiator != 0x00 && key->initiator != 0x01)
7105 return false;
7106
7107 switch (key->addr.type) {
7108 case BDADDR_LE_PUBLIC:
7109 return true;
7110
7111 case BDADDR_LE_RANDOM:
7112 /* Two most significant bits shall be set */
7113 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7114 return false;
7115 return true;
7116 }
7117
7118 return false;
7119}
7120
7121static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7122 void *cp_data, u16 len)
7123{
7124 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7125 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7126 sizeof(struct mgmt_ltk_info));
7127 u16 key_count, expected_len;
7128 int i, err;
7129
7130 bt_dev_dbg(hdev, "sock %p", sk);
7131
7132 if (!lmp_le_capable(hdev))
7133 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7134 MGMT_STATUS_NOT_SUPPORTED);
7135
7136 key_count = __le16_to_cpu(cp->key_count);
7137 if (key_count > max_key_count) {
7138 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7139 key_count);
7140 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7141 MGMT_STATUS_INVALID_PARAMS);
7142 }
7143
7144 expected_len = struct_size(cp, keys, key_count);
7145 if (expected_len != len) {
7146 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7147 expected_len, len);
7148 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7149 MGMT_STATUS_INVALID_PARAMS);
7150 }
7151
7152 bt_dev_dbg(hdev, "key_count %u", key_count);
7153
7154 for (i = 0; i < key_count; i++) {
7155 struct mgmt_ltk_info *key = &cp->keys[i];
7156
7157 if (!ltk_is_valid(key))
7158 return mgmt_cmd_status(sk, index: hdev->id,
7159 MGMT_OP_LOAD_LONG_TERM_KEYS,
7160 MGMT_STATUS_INVALID_PARAMS);
7161 }
7162
7163 hci_dev_lock(hdev);
7164
7165 hci_smp_ltks_clear(hdev);
7166
7167 for (i = 0; i < key_count; i++) {
7168 struct mgmt_ltk_info *key = &cp->keys[i];
7169 u8 type, authenticated;
7170 u8 addr_type = le_addr_type(mgmt_addr_type: key->addr.type);
7171
7172 if (hci_is_blocked_key(hdev,
7173 HCI_BLOCKED_KEY_TYPE_LTK,
7174 val: key->val)) {
7175 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7176 &key->addr.bdaddr);
7177 continue;
7178 }
7179
7180 switch (key->type) {
7181 case MGMT_LTK_UNAUTHENTICATED:
7182 authenticated = 0x00;
7183 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7184 break;
7185 case MGMT_LTK_AUTHENTICATED:
7186 authenticated = 0x01;
7187 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7188 break;
7189 case MGMT_LTK_P256_UNAUTH:
7190 authenticated = 0x00;
7191 type = SMP_LTK_P256;
7192 break;
7193 case MGMT_LTK_P256_AUTH:
7194 authenticated = 0x01;
7195 type = SMP_LTK_P256;
7196 break;
7197 case MGMT_LTK_P256_DEBUG:
7198 authenticated = 0x00;
7199 type = SMP_LTK_P256_DEBUG;
7200 fallthrough;
7201 default:
7202 continue;
7203 }
7204
7205 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7206 if (key->addr.type == BDADDR_BREDR)
7207 addr_type = BDADDR_BREDR;
7208
7209 hci_add_ltk(hdev, bdaddr: &key->addr.bdaddr,
7210 addr_type, type, authenticated,
7211 tk: key->val, enc_size: key->enc_size, ediv: key->ediv, rand: key->rand);
7212 }
7213
7214 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, status: 0,
7215 NULL, rp_len: 0);
7216
7217 hci_dev_unlock(hdev);
7218
7219 return err;
7220}
7221
7222static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7223{
7224 struct mgmt_pending_cmd *cmd = data;
7225 struct hci_conn *conn = cmd->user_data;
7226 struct mgmt_cp_get_conn_info *cp = cmd->param;
7227 struct mgmt_rp_get_conn_info rp;
7228 u8 status;
7229
7230 bt_dev_dbg(hdev, "err %d", err);
7231
7232 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7233
7234 status = mgmt_status(err);
7235 if (status == MGMT_STATUS_SUCCESS) {
7236 rp.rssi = conn->rssi;
7237 rp.tx_power = conn->tx_power;
7238 rp.max_tx_power = conn->max_tx_power;
7239 } else {
7240 rp.rssi = HCI_RSSI_INVALID;
7241 rp.tx_power = HCI_TX_POWER_INVALID;
7242 rp.max_tx_power = HCI_TX_POWER_INVALID;
7243 }
7244
7245 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, MGMT_OP_GET_CONN_INFO, status,
7246 rp: &rp, rp_len: sizeof(rp));
7247
7248 mgmt_pending_free(cmd);
7249}
7250
7251static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7252{
7253 struct mgmt_pending_cmd *cmd = data;
7254 struct mgmt_cp_get_conn_info *cp = cmd->param;
7255 struct hci_conn *conn;
7256 int err;
7257 __le16 handle;
7258
7259 /* Make sure we are still connected */
7260 if (cp->addr.type == BDADDR_BREDR)
7261 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7262 ba: &cp->addr.bdaddr);
7263 else
7264 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: &cp->addr.bdaddr);
7265
7266 if (!conn || conn->state != BT_CONNECTED)
7267 return MGMT_STATUS_NOT_CONNECTED;
7268
7269 cmd->user_data = conn;
7270 handle = cpu_to_le16(conn->handle);
7271
7272 /* Refresh RSSI each time */
7273 err = hci_read_rssi_sync(hdev, handle);
7274
7275 /* For LE links TX power does not change thus we don't need to
7276 * query for it once value is known.
7277 */
7278 if (!err && (!bdaddr_type_is_le(type: cp->addr.type) ||
7279 conn->tx_power == HCI_TX_POWER_INVALID))
7280 err = hci_read_tx_power_sync(hdev, handle, type: 0x00);
7281
7282 /* Max TX power needs to be read only once per connection */
7283 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7284 err = hci_read_tx_power_sync(hdev, handle, type: 0x01);
7285
7286 return err;
7287}
7288
7289static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7290 u16 len)
7291{
7292 struct mgmt_cp_get_conn_info *cp = data;
7293 struct mgmt_rp_get_conn_info rp;
7294 struct hci_conn *conn;
7295 unsigned long conn_info_age;
7296 int err = 0;
7297
7298 bt_dev_dbg(hdev, "sock %p", sk);
7299
7300 memset(&rp, 0, sizeof(rp));
7301 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7302 rp.addr.type = cp->addr.type;
7303
7304 if (!bdaddr_type_is_valid(type: cp->addr.type))
7305 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7306 MGMT_STATUS_INVALID_PARAMS,
7307 rp: &rp, rp_len: sizeof(rp));
7308
7309 hci_dev_lock(hdev);
7310
7311 if (!hdev_is_powered(hdev)) {
7312 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7313 MGMT_STATUS_NOT_POWERED, rp: &rp,
7314 rp_len: sizeof(rp));
7315 goto unlock;
7316 }
7317
7318 if (cp->addr.type == BDADDR_BREDR)
7319 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7320 ba: &cp->addr.bdaddr);
7321 else
7322 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: &cp->addr.bdaddr);
7323
7324 if (!conn || conn->state != BT_CONNECTED) {
7325 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7326 MGMT_STATUS_NOT_CONNECTED, rp: &rp,
7327 rp_len: sizeof(rp));
7328 goto unlock;
7329 }
7330
7331 /* To avoid client trying to guess when to poll again for information we
7332 * calculate conn info age as random value between min/max set in hdev.
7333 */
7334 conn_info_age = get_random_u32_inclusive(floor: hdev->conn_info_min_age,
7335 ceil: hdev->conn_info_max_age - 1);
7336
7337 /* Query controller to refresh cached values if they are too old or were
7338 * never read.
7339 */
7340 if (time_after(jiffies, conn->conn_info_timestamp +
7341 msecs_to_jiffies(conn_info_age)) ||
7342 !conn->conn_info_timestamp) {
7343 struct mgmt_pending_cmd *cmd;
7344
7345 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7346 len);
7347 if (!cmd) {
7348 err = -ENOMEM;
7349 } else {
7350 err = hci_cmd_sync_queue(hdev, func: get_conn_info_sync,
7351 data: cmd, destroy: get_conn_info_complete);
7352 }
7353
7354 if (err < 0) {
7355 mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7356 MGMT_STATUS_FAILED, rp: &rp, rp_len: sizeof(rp));
7357
7358 if (cmd)
7359 mgmt_pending_free(cmd);
7360
7361 goto unlock;
7362 }
7363
7364 conn->conn_info_timestamp = jiffies;
7365 } else {
7366 /* Cache is valid, just reply with values cached in hci_conn */
7367 rp.rssi = conn->rssi;
7368 rp.tx_power = conn->tx_power;
7369 rp.max_tx_power = conn->max_tx_power;
7370
7371 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CONN_INFO,
7372 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
7373 }
7374
7375unlock:
7376 hci_dev_unlock(hdev);
7377 return err;
7378}
7379
7380static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7381{
7382 struct mgmt_pending_cmd *cmd = data;
7383 struct mgmt_cp_get_clock_info *cp = cmd->param;
7384 struct mgmt_rp_get_clock_info rp;
7385 struct hci_conn *conn = cmd->user_data;
7386 u8 status = mgmt_status(err);
7387
7388 bt_dev_dbg(hdev, "err %d", err);
7389
7390 memset(&rp, 0, sizeof(rp));
7391 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7392 rp.addr.type = cp->addr.type;
7393
7394 if (err)
7395 goto complete;
7396
7397 rp.local_clock = cpu_to_le32(hdev->clock);
7398
7399 if (conn) {
7400 rp.piconet_clock = cpu_to_le32(conn->clock);
7401 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7402 }
7403
7404complete:
7405 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode, status, rp: &rp,
7406 rp_len: sizeof(rp));
7407
7408 mgmt_pending_free(cmd);
7409}
7410
7411static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7412{
7413 struct mgmt_pending_cmd *cmd = data;
7414 struct mgmt_cp_get_clock_info *cp = cmd->param;
7415 struct hci_cp_read_clock hci_cp;
7416 struct hci_conn *conn;
7417
7418 memset(&hci_cp, 0, sizeof(hci_cp));
7419 hci_read_clock_sync(hdev, cp: &hci_cp);
7420
7421 /* Make sure connection still exists */
7422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, ba: &cp->addr.bdaddr);
7423 if (!conn || conn->state != BT_CONNECTED)
7424 return MGMT_STATUS_NOT_CONNECTED;
7425
7426 cmd->user_data = conn;
7427 hci_cp.handle = cpu_to_le16(conn->handle);
7428 hci_cp.which = 0x01; /* Piconet clock */
7429
7430 return hci_read_clock_sync(hdev, cp: &hci_cp);
7431}
7432
7433static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7434 u16 len)
7435{
7436 struct mgmt_cp_get_clock_info *cp = data;
7437 struct mgmt_rp_get_clock_info rp;
7438 struct mgmt_pending_cmd *cmd;
7439 struct hci_conn *conn;
7440 int err;
7441
7442 bt_dev_dbg(hdev, "sock %p", sk);
7443
7444 memset(&rp, 0, sizeof(rp));
7445 bacpy(dst: &rp.addr.bdaddr, src: &cp->addr.bdaddr);
7446 rp.addr.type = cp->addr.type;
7447
7448 if (cp->addr.type != BDADDR_BREDR)
7449 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7450 MGMT_STATUS_INVALID_PARAMS,
7451 rp: &rp, rp_len: sizeof(rp));
7452
7453 hci_dev_lock(hdev);
7454
7455 if (!hdev_is_powered(hdev)) {
7456 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7457 MGMT_STATUS_NOT_POWERED, rp: &rp,
7458 rp_len: sizeof(rp));
7459 goto unlock;
7460 }
7461
7462 if (bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
7463 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7464 ba: &cp->addr.bdaddr);
7465 if (!conn || conn->state != BT_CONNECTED) {
7466 err = mgmt_cmd_complete(sk, index: hdev->id,
7467 MGMT_OP_GET_CLOCK_INFO,
7468 MGMT_STATUS_NOT_CONNECTED,
7469 rp: &rp, rp_len: sizeof(rp));
7470 goto unlock;
7471 }
7472 } else {
7473 conn = NULL;
7474 }
7475
7476 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7477 if (!cmd)
7478 err = -ENOMEM;
7479 else
7480 err = hci_cmd_sync_queue(hdev, func: get_clock_info_sync, data: cmd,
7481 destroy: get_clock_info_complete);
7482
7483 if (err < 0) {
7484 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_CLOCK_INFO,
7485 MGMT_STATUS_FAILED, rp: &rp, rp_len: sizeof(rp));
7486
7487 if (cmd)
7488 mgmt_pending_free(cmd);
7489 }
7490
7491
7492unlock:
7493 hci_dev_unlock(hdev);
7494 return err;
7495}
7496
7497static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7498{
7499 struct hci_conn *conn;
7500
7501 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, ba: addr);
7502 if (!conn)
7503 return false;
7504
7505 if (conn->dst_type != type)
7506 return false;
7507
7508 if (conn->state != BT_CONNECTED)
7509 return false;
7510
7511 return true;
7512}
7513
7514/* This function requires the caller holds hdev->lock */
7515static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7516 u8 addr_type, u8 auto_connect)
7517{
7518 struct hci_conn_params *params;
7519
7520 params = hci_conn_params_add(hdev, addr, addr_type);
7521 if (!params)
7522 return -EIO;
7523
7524 if (params->auto_connect == auto_connect)
7525 return 0;
7526
7527 hci_pend_le_list_del_init(param: params);
7528
7529 switch (auto_connect) {
7530 case HCI_AUTO_CONN_DISABLED:
7531 case HCI_AUTO_CONN_LINK_LOSS:
7532 /* If auto connect is being disabled when we're trying to
7533 * connect to device, keep connecting.
7534 */
7535 if (params->explicit_connect)
7536 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7537 break;
7538 case HCI_AUTO_CONN_REPORT:
7539 if (params->explicit_connect)
7540 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7541 else
7542 hci_pend_le_list_add(param: params, list: &hdev->pend_le_reports);
7543 break;
7544 case HCI_AUTO_CONN_DIRECT:
7545 case HCI_AUTO_CONN_ALWAYS:
7546 if (!is_connected(hdev, addr, type: addr_type))
7547 hci_pend_le_list_add(param: params, list: &hdev->pend_le_conns);
7548 break;
7549 }
7550
7551 params->auto_connect = auto_connect;
7552
7553 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7554 addr, addr_type, auto_connect);
7555
7556 return 0;
7557}
7558
7559static void device_added(struct sock *sk, struct hci_dev *hdev,
7560 bdaddr_t *bdaddr, u8 type, u8 action)
7561{
7562 struct mgmt_ev_device_added ev;
7563
7564 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
7565 ev.addr.type = type;
7566 ev.action = action;
7567
7568 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
7569}
7570
7571static int add_device_sync(struct hci_dev *hdev, void *data)
7572{
7573 return hci_update_passive_scan_sync(hdev);
7574}
7575
7576static int add_device(struct sock *sk, struct hci_dev *hdev,
7577 void *data, u16 len)
7578{
7579 struct mgmt_cp_add_device *cp = data;
7580 u8 auto_conn, addr_type;
7581 struct hci_conn_params *params;
7582 int err;
7583 u32 current_flags = 0;
7584 u32 supported_flags;
7585
7586 bt_dev_dbg(hdev, "sock %p", sk);
7587
7588 if (!bdaddr_type_is_valid(type: cp->addr.type) ||
7589 !bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY))
7590 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7591 MGMT_STATUS_INVALID_PARAMS,
7592 rp: &cp->addr, rp_len: sizeof(cp->addr));
7593
7594 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7595 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7596 MGMT_STATUS_INVALID_PARAMS,
7597 rp: &cp->addr, rp_len: sizeof(cp->addr));
7598
7599 hci_dev_lock(hdev);
7600
7601 if (cp->addr.type == BDADDR_BREDR) {
7602 /* Only incoming connections action is supported for now */
7603 if (cp->action != 0x01) {
7604 err = mgmt_cmd_complete(sk, index: hdev->id,
7605 MGMT_OP_ADD_DEVICE,
7606 MGMT_STATUS_INVALID_PARAMS,
7607 rp: &cp->addr, rp_len: sizeof(cp->addr));
7608 goto unlock;
7609 }
7610
7611 err = hci_bdaddr_list_add_with_flags(list: &hdev->accept_list,
7612 bdaddr: &cp->addr.bdaddr,
7613 type: cp->addr.type, flags: 0);
7614 if (err)
7615 goto unlock;
7616
7617 hci_update_scan(hdev);
7618
7619 goto added;
7620 }
7621
7622 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
7623
7624 if (cp->action == 0x02)
7625 auto_conn = HCI_AUTO_CONN_ALWAYS;
7626 else if (cp->action == 0x01)
7627 auto_conn = HCI_AUTO_CONN_DIRECT;
7628 else
7629 auto_conn = HCI_AUTO_CONN_REPORT;
7630
7631 /* Kernel internally uses conn_params with resolvable private
7632 * address, but Add Device allows only identity addresses.
7633 * Make sure it is enforced before calling
7634 * hci_conn_params_lookup.
7635 */
7636 if (!hci_is_identity_address(addr: &cp->addr.bdaddr, addr_type)) {
7637 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7638 MGMT_STATUS_INVALID_PARAMS,
7639 rp: &cp->addr, rp_len: sizeof(cp->addr));
7640 goto unlock;
7641 }
7642
7643 /* If the connection parameters don't exist for this device,
7644 * they will be created and configured with defaults.
7645 */
7646 if (hci_conn_params_set(hdev, addr: &cp->addr.bdaddr, addr_type,
7647 auto_connect: auto_conn) < 0) {
7648 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7649 MGMT_STATUS_FAILED, rp: &cp->addr,
7650 rp_len: sizeof(cp->addr));
7651 goto unlock;
7652 } else {
7653 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
7654 addr_type);
7655 if (params)
7656 current_flags = params->flags;
7657 }
7658
7659 err = hci_cmd_sync_queue(hdev, func: add_device_sync, NULL, NULL);
7660 if (err < 0)
7661 goto unlock;
7662
7663added:
7664 device_added(sk, hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type, action: cp->action);
7665 supported_flags = hdev->conn_flags;
7666 device_flags_changed(NULL, hdev, bdaddr: &cp->addr.bdaddr, bdaddr_type: cp->addr.type,
7667 supported_flags, current_flags);
7668
7669 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_DEVICE,
7670 MGMT_STATUS_SUCCESS, rp: &cp->addr,
7671 rp_len: sizeof(cp->addr));
7672
7673unlock:
7674 hci_dev_unlock(hdev);
7675 return err;
7676}
7677
7678static void device_removed(struct sock *sk, struct hci_dev *hdev,
7679 bdaddr_t *bdaddr, u8 type)
7680{
7681 struct mgmt_ev_device_removed ev;
7682
7683 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
7684 ev.addr.type = type;
7685
7686 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
7687}
7688
7689static int remove_device_sync(struct hci_dev *hdev, void *data)
7690{
7691 return hci_update_passive_scan_sync(hdev);
7692}
7693
7694static int remove_device(struct sock *sk, struct hci_dev *hdev,
7695 void *data, u16 len)
7696{
7697 struct mgmt_cp_remove_device *cp = data;
7698 int err;
7699
7700 bt_dev_dbg(hdev, "sock %p", sk);
7701
7702 hci_dev_lock(hdev);
7703
7704 if (bacmp(ba1: &cp->addr.bdaddr, BDADDR_ANY)) {
7705 struct hci_conn_params *params;
7706 u8 addr_type;
7707
7708 if (!bdaddr_type_is_valid(type: cp->addr.type)) {
7709 err = mgmt_cmd_complete(sk, index: hdev->id,
7710 MGMT_OP_REMOVE_DEVICE,
7711 MGMT_STATUS_INVALID_PARAMS,
7712 rp: &cp->addr, rp_len: sizeof(cp->addr));
7713 goto unlock;
7714 }
7715
7716 if (cp->addr.type == BDADDR_BREDR) {
7717 err = hci_bdaddr_list_del(list: &hdev->accept_list,
7718 bdaddr: &cp->addr.bdaddr,
7719 type: cp->addr.type);
7720 if (err) {
7721 err = mgmt_cmd_complete(sk, index: hdev->id,
7722 MGMT_OP_REMOVE_DEVICE,
7723 MGMT_STATUS_INVALID_PARAMS,
7724 rp: &cp->addr,
7725 rp_len: sizeof(cp->addr));
7726 goto unlock;
7727 }
7728
7729 hci_update_scan(hdev);
7730
7731 device_removed(sk, hdev, bdaddr: &cp->addr.bdaddr,
7732 type: cp->addr.type);
7733 goto complete;
7734 }
7735
7736 addr_type = le_addr_type(mgmt_addr_type: cp->addr.type);
7737
7738 /* Kernel internally uses conn_params with resolvable private
7739 * address, but Remove Device allows only identity addresses.
7740 * Make sure it is enforced before calling
7741 * hci_conn_params_lookup.
7742 */
7743 if (!hci_is_identity_address(addr: &cp->addr.bdaddr, addr_type)) {
7744 err = mgmt_cmd_complete(sk, index: hdev->id,
7745 MGMT_OP_REMOVE_DEVICE,
7746 MGMT_STATUS_INVALID_PARAMS,
7747 rp: &cp->addr, rp_len: sizeof(cp->addr));
7748 goto unlock;
7749 }
7750
7751 params = hci_conn_params_lookup(hdev, addr: &cp->addr.bdaddr,
7752 addr_type);
7753 if (!params) {
7754 err = mgmt_cmd_complete(sk, index: hdev->id,
7755 MGMT_OP_REMOVE_DEVICE,
7756 MGMT_STATUS_INVALID_PARAMS,
7757 rp: &cp->addr, rp_len: sizeof(cp->addr));
7758 goto unlock;
7759 }
7760
7761 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7762 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7763 err = mgmt_cmd_complete(sk, index: hdev->id,
7764 MGMT_OP_REMOVE_DEVICE,
7765 MGMT_STATUS_INVALID_PARAMS,
7766 rp: &cp->addr, rp_len: sizeof(cp->addr));
7767 goto unlock;
7768 }
7769
7770 hci_conn_params_free(param: params);
7771
7772 device_removed(sk, hdev, bdaddr: &cp->addr.bdaddr, type: cp->addr.type);
7773 } else {
7774 struct hci_conn_params *p, *tmp;
7775 struct bdaddr_list *b, *btmp;
7776
7777 if (cp->addr.type) {
7778 err = mgmt_cmd_complete(sk, index: hdev->id,
7779 MGMT_OP_REMOVE_DEVICE,
7780 MGMT_STATUS_INVALID_PARAMS,
7781 rp: &cp->addr, rp_len: sizeof(cp->addr));
7782 goto unlock;
7783 }
7784
7785 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7786 device_removed(sk, hdev, bdaddr: &b->bdaddr, type: b->bdaddr_type);
7787 list_del(entry: &b->list);
7788 kfree(objp: b);
7789 }
7790
7791 hci_update_scan(hdev);
7792
7793 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7794 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7795 continue;
7796 device_removed(sk, hdev, bdaddr: &p->addr, type: p->addr_type);
7797 if (p->explicit_connect) {
7798 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7799 continue;
7800 }
7801 hci_conn_params_free(param: p);
7802 }
7803
7804 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7805 }
7806
7807 hci_cmd_sync_queue(hdev, func: remove_device_sync, NULL, NULL);
7808
7809complete:
7810 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_REMOVE_DEVICE,
7811 MGMT_STATUS_SUCCESS, rp: &cp->addr,
7812 rp_len: sizeof(cp->addr));
7813unlock:
7814 hci_dev_unlock(hdev);
7815 return err;
7816}
7817
7818static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7819 u16 len)
7820{
7821 struct mgmt_cp_load_conn_param *cp = data;
7822 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7823 sizeof(struct mgmt_conn_param));
7824 u16 param_count, expected_len;
7825 int i;
7826
7827 if (!lmp_le_capable(hdev))
7828 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7829 MGMT_STATUS_NOT_SUPPORTED);
7830
7831 param_count = __le16_to_cpu(cp->param_count);
7832 if (param_count > max_param_count) {
7833 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7834 param_count);
7835 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7836 MGMT_STATUS_INVALID_PARAMS);
7837 }
7838
7839 expected_len = struct_size(cp, params, param_count);
7840 if (expected_len != len) {
7841 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7842 expected_len, len);
7843 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7844 MGMT_STATUS_INVALID_PARAMS);
7845 }
7846
7847 bt_dev_dbg(hdev, "param_count %u", param_count);
7848
7849 hci_dev_lock(hdev);
7850
7851 hci_conn_params_clear_disabled(hdev);
7852
7853 for (i = 0; i < param_count; i++) {
7854 struct mgmt_conn_param *param = &cp->params[i];
7855 struct hci_conn_params *hci_param;
7856 u16 min, max, latency, timeout;
7857 u8 addr_type;
7858
7859 bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7860 param->addr.type);
7861
7862 if (param->addr.type == BDADDR_LE_PUBLIC) {
7863 addr_type = ADDR_LE_DEV_PUBLIC;
7864 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7865 addr_type = ADDR_LE_DEV_RANDOM;
7866 } else {
7867 bt_dev_err(hdev, "ignoring invalid connection parameters");
7868 continue;
7869 }
7870
7871 min = le16_to_cpu(param->min_interval);
7872 max = le16_to_cpu(param->max_interval);
7873 latency = le16_to_cpu(param->latency);
7874 timeout = le16_to_cpu(param->timeout);
7875
7876 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7877 min, max, latency, timeout);
7878
7879 if (hci_check_conn_params(min, max, latency, to_multiplier: timeout) < 0) {
7880 bt_dev_err(hdev, "ignoring invalid connection parameters");
7881 continue;
7882 }
7883
7884 hci_param = hci_conn_params_add(hdev, addr: &param->addr.bdaddr,
7885 addr_type);
7886 if (!hci_param) {
7887 bt_dev_err(hdev, "failed to add connection parameters");
7888 continue;
7889 }
7890
7891 hci_param->conn_min_interval = min;
7892 hci_param->conn_max_interval = max;
7893 hci_param->conn_latency = latency;
7894 hci_param->supervision_timeout = timeout;
7895 }
7896
7897 hci_dev_unlock(hdev);
7898
7899 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_LOAD_CONN_PARAM, status: 0,
7900 NULL, rp_len: 0);
7901}
7902
7903static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7904 void *data, u16 len)
7905{
7906 struct mgmt_cp_set_external_config *cp = data;
7907 bool changed;
7908 int err;
7909
7910 bt_dev_dbg(hdev, "sock %p", sk);
7911
7912 if (hdev_is_powered(hdev))
7913 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7914 MGMT_STATUS_REJECTED);
7915
7916 if (cp->config != 0x00 && cp->config != 0x01)
7917 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7918 MGMT_STATUS_INVALID_PARAMS);
7919
7920 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7921 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7922 MGMT_STATUS_NOT_SUPPORTED);
7923
7924 hci_dev_lock(hdev);
7925
7926 if (cp->config)
7927 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7928 else
7929 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7930
7931 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7932 if (err < 0)
7933 goto unlock;
7934
7935 if (!changed)
7936 goto unlock;
7937
7938 err = new_options(hdev, skip: sk);
7939
7940 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7941 mgmt_index_removed(hdev);
7942
7943 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7944 hci_dev_set_flag(hdev, HCI_CONFIG);
7945 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7946
7947 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
7948 } else {
7949 set_bit(nr: HCI_RAW, addr: &hdev->flags);
7950 mgmt_index_added(hdev);
7951 }
7952 }
7953
7954unlock:
7955 hci_dev_unlock(hdev);
7956 return err;
7957}
7958
7959static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7960 void *data, u16 len)
7961{
7962 struct mgmt_cp_set_public_address *cp = data;
7963 bool changed;
7964 int err;
7965
7966 bt_dev_dbg(hdev, "sock %p", sk);
7967
7968 if (hdev_is_powered(hdev))
7969 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7970 MGMT_STATUS_REJECTED);
7971
7972 if (!bacmp(ba1: &cp->bdaddr, BDADDR_ANY))
7973 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7974 MGMT_STATUS_INVALID_PARAMS);
7975
7976 if (!hdev->set_bdaddr)
7977 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7978 MGMT_STATUS_NOT_SUPPORTED);
7979
7980 hci_dev_lock(hdev);
7981
7982 changed = !!bacmp(ba1: &hdev->public_addr, ba2: &cp->bdaddr);
7983 bacpy(dst: &hdev->public_addr, src: &cp->bdaddr);
7984
7985 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7986 if (err < 0)
7987 goto unlock;
7988
7989 if (!changed)
7990 goto unlock;
7991
7992 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7993 err = new_options(hdev, skip: sk);
7994
7995 if (is_configured(hdev)) {
7996 mgmt_index_removed(hdev);
7997
7998 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7999
8000 hci_dev_set_flag(hdev, HCI_CONFIG);
8001 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8002
8003 queue_work(wq: hdev->req_workqueue, work: &hdev->power_on);
8004 }
8005
8006unlock:
8007 hci_dev_unlock(hdev);
8008 return err;
8009}
8010
8011static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8012 int err)
8013{
8014 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8015 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8016 u8 *h192, *r192, *h256, *r256;
8017 struct mgmt_pending_cmd *cmd = data;
8018 struct sk_buff *skb = cmd->skb;
8019 u8 status = mgmt_status(err);
8020 u16 eir_len;
8021
8022 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8023 return;
8024
8025 if (!status) {
8026 if (!skb)
8027 status = MGMT_STATUS_FAILED;
8028 else if (IS_ERR(ptr: skb))
8029 status = mgmt_status(err: PTR_ERR(ptr: skb));
8030 else
8031 status = mgmt_status(err: skb->data[0]);
8032 }
8033
8034 bt_dev_dbg(hdev, "status %u", status);
8035
8036 mgmt_cp = cmd->param;
8037
8038 if (status) {
8039 status = mgmt_status(err: status);
8040 eir_len = 0;
8041
8042 h192 = NULL;
8043 r192 = NULL;
8044 h256 = NULL;
8045 r256 = NULL;
8046 } else if (!bredr_sc_enabled(hdev)) {
8047 struct hci_rp_read_local_oob_data *rp;
8048
8049 if (skb->len != sizeof(*rp)) {
8050 status = MGMT_STATUS_FAILED;
8051 eir_len = 0;
8052 } else {
8053 status = MGMT_STATUS_SUCCESS;
8054 rp = (void *)skb->data;
8055
8056 eir_len = 5 + 18 + 18;
8057 h192 = rp->hash;
8058 r192 = rp->rand;
8059 h256 = NULL;
8060 r256 = NULL;
8061 }
8062 } else {
8063 struct hci_rp_read_local_oob_ext_data *rp;
8064
8065 if (skb->len != sizeof(*rp)) {
8066 status = MGMT_STATUS_FAILED;
8067 eir_len = 0;
8068 } else {
8069 status = MGMT_STATUS_SUCCESS;
8070 rp = (void *)skb->data;
8071
8072 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8073 eir_len = 5 + 18 + 18;
8074 h192 = NULL;
8075 r192 = NULL;
8076 } else {
8077 eir_len = 5 + 18 + 18 + 18 + 18;
8078 h192 = rp->hash192;
8079 r192 = rp->rand192;
8080 }
8081
8082 h256 = rp->hash256;
8083 r256 = rp->rand256;
8084 }
8085 }
8086
8087 mgmt_rp = kmalloc(size: sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8088 if (!mgmt_rp)
8089 goto done;
8090
8091 if (eir_len == 0)
8092 goto send_rsp;
8093
8094 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len: 0, EIR_CLASS_OF_DEV,
8095 data: hdev->dev_class, data_len: 3);
8096
8097 if (h192 && r192) {
8098 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8099 EIR_SSP_HASH_C192, data: h192, data_len: 16);
8100 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8101 EIR_SSP_RAND_R192, data: r192, data_len: 16);
8102 }
8103
8104 if (h256 && r256) {
8105 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8106 EIR_SSP_HASH_C256, data: h256, data_len: 16);
8107 eir_len = eir_append_data(eir: mgmt_rp->eir, eir_len,
8108 EIR_SSP_RAND_R256, data: r256, data_len: 16);
8109 }
8110
8111send_rsp:
8112 mgmt_rp->type = mgmt_cp->type;
8113 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8114
8115 err = mgmt_cmd_complete(sk: cmd->sk, index: hdev->id,
8116 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8117 rp: mgmt_rp, rp_len: sizeof(*mgmt_rp) + eir_len);
8118 if (err < 0 || status)
8119 goto done;
8120
8121 hci_sock_set_flag(sk: cmd->sk, nr: HCI_MGMT_OOB_DATA_EVENTS);
8122
8123 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8124 data: mgmt_rp, len: sizeof(*mgmt_rp) + eir_len,
8125 flag: HCI_MGMT_OOB_DATA_EVENTS, skip_sk: cmd->sk);
8126done:
8127 if (skb && !IS_ERR(ptr: skb))
8128 kfree_skb(skb);
8129
8130 kfree(objp: mgmt_rp);
8131 mgmt_pending_remove(cmd);
8132}
8133
8134static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8135 struct mgmt_cp_read_local_oob_ext_data *cp)
8136{
8137 struct mgmt_pending_cmd *cmd;
8138 int err;
8139
8140 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8141 data: cp, len: sizeof(*cp));
8142 if (!cmd)
8143 return -ENOMEM;
8144
8145 err = hci_cmd_sync_queue(hdev, func: read_local_oob_data_sync, data: cmd,
8146 destroy: read_local_oob_ext_data_complete);
8147
8148 if (err < 0) {
8149 mgmt_pending_remove(cmd);
8150 return err;
8151 }
8152
8153 return 0;
8154}
8155
8156static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8157 void *data, u16 data_len)
8158{
8159 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8160 struct mgmt_rp_read_local_oob_ext_data *rp;
8161 size_t rp_len;
8162 u16 eir_len;
8163 u8 status, flags, role, addr[7], hash[16], rand[16];
8164 int err;
8165
8166 bt_dev_dbg(hdev, "sock %p", sk);
8167
8168 if (hdev_is_powered(hdev)) {
8169 switch (cp->type) {
8170 case BIT(BDADDR_BREDR):
8171 status = mgmt_bredr_support(hdev);
8172 if (status)
8173 eir_len = 0;
8174 else
8175 eir_len = 5;
8176 break;
8177 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8178 status = mgmt_le_support(hdev);
8179 if (status)
8180 eir_len = 0;
8181 else
8182 eir_len = 9 + 3 + 18 + 18 + 3;
8183 break;
8184 default:
8185 status = MGMT_STATUS_INVALID_PARAMS;
8186 eir_len = 0;
8187 break;
8188 }
8189 } else {
8190 status = MGMT_STATUS_NOT_POWERED;
8191 eir_len = 0;
8192 }
8193
8194 rp_len = sizeof(*rp) + eir_len;
8195 rp = kmalloc(size: rp_len, GFP_ATOMIC);
8196 if (!rp)
8197 return -ENOMEM;
8198
8199 if (!status && !lmp_ssp_capable(hdev)) {
8200 status = MGMT_STATUS_NOT_SUPPORTED;
8201 eir_len = 0;
8202 }
8203
8204 if (status)
8205 goto complete;
8206
8207 hci_dev_lock(hdev);
8208
8209 eir_len = 0;
8210 switch (cp->type) {
8211 case BIT(BDADDR_BREDR):
8212 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8213 err = read_local_ssp_oob_req(hdev, sk, cp);
8214 hci_dev_unlock(hdev);
8215 if (!err)
8216 goto done;
8217
8218 status = MGMT_STATUS_FAILED;
8219 goto complete;
8220 } else {
8221 eir_len = eir_append_data(eir: rp->eir, eir_len,
8222 EIR_CLASS_OF_DEV,
8223 data: hdev->dev_class, data_len: 3);
8224 }
8225 break;
8226 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8227 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8228 smp_generate_oob(hdev, hash, rand) < 0) {
8229 hci_dev_unlock(hdev);
8230 status = MGMT_STATUS_FAILED;
8231 goto complete;
8232 }
8233
8234 /* This should return the active RPA, but since the RPA
8235 * is only programmed on demand, it is really hard to fill
8236 * this in at the moment. For now disallow retrieving
8237 * local out-of-band data when privacy is in use.
8238 *
8239 * Returning the identity address will not help here since
8240 * pairing happens before the identity resolving key is
8241 * known and thus the connection establishment happens
8242 * based on the RPA and not the identity address.
8243 */
8244 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8245 hci_dev_unlock(hdev);
8246 status = MGMT_STATUS_REJECTED;
8247 goto complete;
8248 }
8249
8250 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8251 !bacmp(ba1: &hdev->bdaddr, BDADDR_ANY) ||
8252 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8253 bacmp(ba1: &hdev->static_addr, BDADDR_ANY))) {
8254 memcpy(addr, &hdev->static_addr, 6);
8255 addr[6] = 0x01;
8256 } else {
8257 memcpy(addr, &hdev->bdaddr, 6);
8258 addr[6] = 0x00;
8259 }
8260
8261 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_LE_BDADDR,
8262 data: addr, data_len: sizeof(addr));
8263
8264 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8265 role = 0x02;
8266 else
8267 role = 0x01;
8268
8269 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_LE_ROLE,
8270 data: &role, data_len: sizeof(role));
8271
8272 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8273 eir_len = eir_append_data(eir: rp->eir, eir_len,
8274 EIR_LE_SC_CONFIRM,
8275 data: hash, data_len: sizeof(hash));
8276
8277 eir_len = eir_append_data(eir: rp->eir, eir_len,
8278 EIR_LE_SC_RANDOM,
8279 data: rand, data_len: sizeof(rand));
8280 }
8281
8282 flags = mgmt_get_adv_discov_flags(hdev);
8283
8284 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8285 flags |= LE_AD_NO_BREDR;
8286
8287 eir_len = eir_append_data(eir: rp->eir, eir_len, EIR_FLAGS,
8288 data: &flags, data_len: sizeof(flags));
8289 break;
8290 }
8291
8292 hci_dev_unlock(hdev);
8293
8294 hci_sock_set_flag(sk, nr: HCI_MGMT_OOB_DATA_EVENTS);
8295
8296 status = MGMT_STATUS_SUCCESS;
8297
8298complete:
8299 rp->type = cp->type;
8300 rp->eir_len = cpu_to_le16(eir_len);
8301
8302 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8303 status, rp, rp_len: sizeof(*rp) + eir_len);
8304 if (err < 0 || status)
8305 goto done;
8306
8307 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8308 data: rp, len: sizeof(*rp) + eir_len,
8309 flag: HCI_MGMT_OOB_DATA_EVENTS, skip_sk: sk);
8310
8311done:
8312 kfree(objp: rp);
8313
8314 return err;
8315}
8316
8317static u32 get_supported_adv_flags(struct hci_dev *hdev)
8318{
8319 u32 flags = 0;
8320
8321 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8322 flags |= MGMT_ADV_FLAG_DISCOV;
8323 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8324 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8325 flags |= MGMT_ADV_FLAG_APPEARANCE;
8326 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8327 flags |= MGMT_ADV_PARAM_DURATION;
8328 flags |= MGMT_ADV_PARAM_TIMEOUT;
8329 flags |= MGMT_ADV_PARAM_INTERVALS;
8330 flags |= MGMT_ADV_PARAM_TX_POWER;
8331 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8332
8333 /* In extended adv TX_POWER returned from Set Adv Param
8334 * will be always valid.
8335 */
8336 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8337 flags |= MGMT_ADV_FLAG_TX_POWER;
8338
8339 if (ext_adv_capable(hdev)) {
8340 flags |= MGMT_ADV_FLAG_SEC_1M;
8341 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8342 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8343
8344 if (le_2m_capable(hdev))
8345 flags |= MGMT_ADV_FLAG_SEC_2M;
8346
8347 if (le_coded_capable(hdev))
8348 flags |= MGMT_ADV_FLAG_SEC_CODED;
8349 }
8350
8351 return flags;
8352}
8353
8354static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8355 void *data, u16 data_len)
8356{
8357 struct mgmt_rp_read_adv_features *rp;
8358 size_t rp_len;
8359 int err;
8360 struct adv_info *adv_instance;
8361 u32 supported_flags;
8362 u8 *instance;
8363
8364 bt_dev_dbg(hdev, "sock %p", sk);
8365
8366 if (!lmp_le_capable(hdev))
8367 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_READ_ADV_FEATURES,
8368 MGMT_STATUS_REJECTED);
8369
8370 hci_dev_lock(hdev);
8371
8372 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8373 rp = kmalloc(size: rp_len, GFP_ATOMIC);
8374 if (!rp) {
8375 hci_dev_unlock(hdev);
8376 return -ENOMEM;
8377 }
8378
8379 supported_flags = get_supported_adv_flags(hdev);
8380
8381 rp->supported_flags = cpu_to_le32(supported_flags);
8382 rp->max_adv_data_len = max_adv_len(hdev);
8383 rp->max_scan_rsp_len = max_adv_len(hdev);
8384 rp->max_instances = hdev->le_num_of_adv_sets;
8385 rp->num_instances = hdev->adv_instance_cnt;
8386
8387 instance = rp->instance;
8388 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8389 /* Only instances 1-le_num_of_adv_sets are externally visible */
8390 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8391 *instance = adv_instance->instance;
8392 instance++;
8393 } else {
8394 rp->num_instances--;
8395 rp_len--;
8396 }
8397 }
8398
8399 hci_dev_unlock(hdev);
8400
8401 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_READ_ADV_FEATURES,
8402 MGMT_STATUS_SUCCESS, rp, rp_len);
8403
8404 kfree(objp: rp);
8405
8406 return err;
8407}
8408
8409static u8 calculate_name_len(struct hci_dev *hdev)
8410{
8411 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8412
8413 return eir_append_local_name(hdev, eir: buf, ad_len: 0);
8414}
8415
8416static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8417 bool is_adv_data)
8418{
8419 u8 max_len = max_adv_len(hdev);
8420
8421 if (is_adv_data) {
8422 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8423 MGMT_ADV_FLAG_LIMITED_DISCOV |
8424 MGMT_ADV_FLAG_MANAGED_FLAGS))
8425 max_len -= 3;
8426
8427 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8428 max_len -= 3;
8429 } else {
8430 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8431 max_len -= calculate_name_len(hdev);
8432
8433 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8434 max_len -= 4;
8435 }
8436
8437 return max_len;
8438}
8439
8440static bool flags_managed(u32 adv_flags)
8441{
8442 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8443 MGMT_ADV_FLAG_LIMITED_DISCOV |
8444 MGMT_ADV_FLAG_MANAGED_FLAGS);
8445}
8446
8447static bool tx_power_managed(u32 adv_flags)
8448{
8449 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8450}
8451
8452static bool name_managed(u32 adv_flags)
8453{
8454 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8455}
8456
8457static bool appearance_managed(u32 adv_flags)
8458{
8459 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8460}
8461
8462static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8463 u8 len, bool is_adv_data)
8464{
8465 int i, cur_len;
8466 u8 max_len;
8467
8468 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8469
8470 if (len > max_len)
8471 return false;
8472
8473 /* Make sure that the data is correctly formatted. */
8474 for (i = 0; i < len; i += (cur_len + 1)) {
8475 cur_len = data[i];
8476
8477 if (!cur_len)
8478 continue;
8479
8480 if (data[i + 1] == EIR_FLAGS &&
8481 (!is_adv_data || flags_managed(adv_flags)))
8482 return false;
8483
8484 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8485 return false;
8486
8487 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8488 return false;
8489
8490 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8491 return false;
8492
8493 if (data[i + 1] == EIR_APPEARANCE &&
8494 appearance_managed(adv_flags))
8495 return false;
8496
8497 /* If the current field length would exceed the total data
8498 * length, then it's invalid.
8499 */
8500 if (i + cur_len >= len)
8501 return false;
8502 }
8503
8504 return true;
8505}
8506
8507static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8508{
8509 u32 supported_flags, phy_flags;
8510
8511 /* The current implementation only supports a subset of the specified
8512 * flags. Also need to check mutual exclusiveness of sec flags.
8513 */
8514 supported_flags = get_supported_adv_flags(hdev);
8515 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8516 if (adv_flags & ~supported_flags ||
8517 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8518 return false;
8519
8520 return true;
8521}
8522
8523static bool adv_busy(struct hci_dev *hdev)
8524{
8525 return pending_find(MGMT_OP_SET_LE, hdev);
8526}
8527
8528static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8529 int err)
8530{
8531 struct adv_info *adv, *n;
8532
8533 bt_dev_dbg(hdev, "err %d", err);
8534
8535 hci_dev_lock(hdev);
8536
8537 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8538 u8 instance;
8539
8540 if (!adv->pending)
8541 continue;
8542
8543 if (!err) {
8544 adv->pending = false;
8545 continue;
8546 }
8547
8548 instance = adv->instance;
8549
8550 if (hdev->cur_adv_instance == instance)
8551 cancel_adv_timeout(hdev);
8552
8553 hci_remove_adv_instance(hdev, instance);
8554 mgmt_advertising_removed(sk, hdev, instance);
8555 }
8556
8557 hci_dev_unlock(hdev);
8558}
8559
8560static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8561{
8562 struct mgmt_pending_cmd *cmd = data;
8563 struct mgmt_cp_add_advertising *cp = cmd->param;
8564 struct mgmt_rp_add_advertising rp;
8565
8566 memset(&rp, 0, sizeof(rp));
8567
8568 rp.instance = cp->instance;
8569
8570 if (err)
8571 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8572 status: mgmt_status(err));
8573 else
8574 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8575 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8576
8577 add_adv_complete(hdev, sk: cmd->sk, instance: cp->instance, err);
8578
8579 mgmt_pending_free(cmd);
8580}
8581
8582static int add_advertising_sync(struct hci_dev *hdev, void *data)
8583{
8584 struct mgmt_pending_cmd *cmd = data;
8585 struct mgmt_cp_add_advertising *cp = cmd->param;
8586
8587 return hci_schedule_adv_instance_sync(hdev, instance: cp->instance, force: true);
8588}
8589
8590static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8591 void *data, u16 data_len)
8592{
8593 struct mgmt_cp_add_advertising *cp = data;
8594 struct mgmt_rp_add_advertising rp;
8595 u32 flags;
8596 u8 status;
8597 u16 timeout, duration;
8598 unsigned int prev_instance_cnt;
8599 u8 schedule_instance = 0;
8600 struct adv_info *adv, *next_instance;
8601 int err;
8602 struct mgmt_pending_cmd *cmd;
8603
8604 bt_dev_dbg(hdev, "sock %p", sk);
8605
8606 status = mgmt_le_support(hdev);
8607 if (status)
8608 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8609 status);
8610
8611 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8612 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8613 MGMT_STATUS_INVALID_PARAMS);
8614
8615 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8616 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8617 MGMT_STATUS_INVALID_PARAMS);
8618
8619 flags = __le32_to_cpu(cp->flags);
8620 timeout = __le16_to_cpu(cp->timeout);
8621 duration = __le16_to_cpu(cp->duration);
8622
8623 if (!requested_adv_flags_are_valid(hdev, adv_flags: flags))
8624 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8625 MGMT_STATUS_INVALID_PARAMS);
8626
8627 hci_dev_lock(hdev);
8628
8629 if (timeout && !hdev_is_powered(hdev)) {
8630 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8631 MGMT_STATUS_REJECTED);
8632 goto unlock;
8633 }
8634
8635 if (adv_busy(hdev)) {
8636 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8637 MGMT_STATUS_BUSY);
8638 goto unlock;
8639 }
8640
8641 if (!tlv_data_is_valid(hdev, adv_flags: flags, data: cp->data, len: cp->adv_data_len, is_adv_data: true) ||
8642 !tlv_data_is_valid(hdev, adv_flags: flags, data: cp->data + cp->adv_data_len,
8643 len: cp->scan_rsp_len, is_adv_data: false)) {
8644 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8645 MGMT_STATUS_INVALID_PARAMS);
8646 goto unlock;
8647 }
8648
8649 prev_instance_cnt = hdev->adv_instance_cnt;
8650
8651 adv = hci_add_adv_instance(hdev, instance: cp->instance, flags,
8652 adv_data_len: cp->adv_data_len, adv_data: cp->data,
8653 scan_rsp_len: cp->scan_rsp_len,
8654 scan_rsp_data: cp->data + cp->adv_data_len,
8655 timeout, duration,
8656 HCI_ADV_TX_POWER_NO_PREFERENCE,
8657 min_interval: hdev->le_adv_min_interval,
8658 max_interval: hdev->le_adv_max_interval, mesh_handle: 0);
8659 if (IS_ERR(ptr: adv)) {
8660 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8661 MGMT_STATUS_FAILED);
8662 goto unlock;
8663 }
8664
8665 /* Only trigger an advertising added event if a new instance was
8666 * actually added.
8667 */
8668 if (hdev->adv_instance_cnt > prev_instance_cnt)
8669 mgmt_advertising_added(sk, hdev, instance: cp->instance);
8670
8671 if (hdev->cur_adv_instance == cp->instance) {
8672 /* If the currently advertised instance is being changed then
8673 * cancel the current advertising and schedule the next
8674 * instance. If there is only one instance then the overridden
8675 * advertising data will be visible right away.
8676 */
8677 cancel_adv_timeout(hdev);
8678
8679 next_instance = hci_get_next_instance(hdev, instance: cp->instance);
8680 if (next_instance)
8681 schedule_instance = next_instance->instance;
8682 } else if (!hdev->adv_instance_timeout) {
8683 /* Immediately advertise the new instance if no other
8684 * instance is currently being advertised.
8685 */
8686 schedule_instance = cp->instance;
8687 }
8688
8689 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8690 * there is no instance to be advertised then we have no HCI
8691 * communication to make. Simply return.
8692 */
8693 if (!hdev_is_powered(hdev) ||
8694 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8695 !schedule_instance) {
8696 rp.instance = cp->instance;
8697 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_ADVERTISING,
8698 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
8699 goto unlock;
8700 }
8701
8702 /* We're good to go, update advertising data, parameters, and start
8703 * advertising.
8704 */
8705 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8706 len: data_len);
8707 if (!cmd) {
8708 err = -ENOMEM;
8709 goto unlock;
8710 }
8711
8712 cp->instance = schedule_instance;
8713
8714 err = hci_cmd_sync_queue(hdev, func: add_advertising_sync, data: cmd,
8715 destroy: add_advertising_complete);
8716 if (err < 0)
8717 mgmt_pending_free(cmd);
8718
8719unlock:
8720 hci_dev_unlock(hdev);
8721
8722 return err;
8723}
8724
8725static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8726 int err)
8727{
8728 struct mgmt_pending_cmd *cmd = data;
8729 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8730 struct mgmt_rp_add_ext_adv_params rp;
8731 struct adv_info *adv;
8732 u32 flags;
8733
8734 BT_DBG("%s", hdev->name);
8735
8736 hci_dev_lock(hdev);
8737
8738 adv = hci_find_adv_instance(hdev, instance: cp->instance);
8739 if (!adv)
8740 goto unlock;
8741
8742 rp.instance = cp->instance;
8743 rp.tx_power = adv->tx_power;
8744
8745 /* While we're at it, inform userspace of the available space for this
8746 * advertisement, given the flags that will be used.
8747 */
8748 flags = __le32_to_cpu(cp->flags);
8749 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
8750 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
8751
8752 if (err) {
8753 /* If this advertisement was previously advertising and we
8754 * failed to update it, we signal that it has been removed and
8755 * delete its structure
8756 */
8757 if (!adv->pending)
8758 mgmt_advertising_removed(sk: cmd->sk, hdev, instance: cp->instance);
8759
8760 hci_remove_adv_instance(hdev, instance: cp->instance);
8761
8762 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8763 status: mgmt_status(err));
8764 } else {
8765 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8766 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8767 }
8768
8769unlock:
8770 mgmt_pending_free(cmd);
8771
8772 hci_dev_unlock(hdev);
8773}
8774
8775static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8776{
8777 struct mgmt_pending_cmd *cmd = data;
8778 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8779
8780 return hci_setup_ext_adv_instance_sync(hdev, instance: cp->instance);
8781}
8782
8783static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8784 void *data, u16 data_len)
8785{
8786 struct mgmt_cp_add_ext_adv_params *cp = data;
8787 struct mgmt_rp_add_ext_adv_params rp;
8788 struct mgmt_pending_cmd *cmd = NULL;
8789 struct adv_info *adv;
8790 u32 flags, min_interval, max_interval;
8791 u16 timeout, duration;
8792 u8 status;
8793 s8 tx_power;
8794 int err;
8795
8796 BT_DBG("%s", hdev->name);
8797
8798 status = mgmt_le_support(hdev);
8799 if (status)
8800 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8801 status);
8802
8803 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8804 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8805 MGMT_STATUS_INVALID_PARAMS);
8806
8807 /* The purpose of breaking add_advertising into two separate MGMT calls
8808 * for params and data is to allow more parameters to be added to this
8809 * structure in the future. For this reason, we verify that we have the
8810 * bare minimum structure we know of when the interface was defined. Any
8811 * extra parameters we don't know about will be ignored in this request.
8812 */
8813 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8814 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8815 MGMT_STATUS_INVALID_PARAMS);
8816
8817 flags = __le32_to_cpu(cp->flags);
8818
8819 if (!requested_adv_flags_are_valid(hdev, adv_flags: flags))
8820 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8821 MGMT_STATUS_INVALID_PARAMS);
8822
8823 hci_dev_lock(hdev);
8824
8825 /* In new interface, we require that we are powered to register */
8826 if (!hdev_is_powered(hdev)) {
8827 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8828 MGMT_STATUS_REJECTED);
8829 goto unlock;
8830 }
8831
8832 if (adv_busy(hdev)) {
8833 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8834 MGMT_STATUS_BUSY);
8835 goto unlock;
8836 }
8837
8838 /* Parse defined parameters from request, use defaults otherwise */
8839 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8840 __le16_to_cpu(cp->timeout) : 0;
8841
8842 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8843 __le16_to_cpu(cp->duration) :
8844 hdev->def_multi_adv_rotation_duration;
8845
8846 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8847 __le32_to_cpu(cp->min_interval) :
8848 hdev->le_adv_min_interval;
8849
8850 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8851 __le32_to_cpu(cp->max_interval) :
8852 hdev->le_adv_max_interval;
8853
8854 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8855 cp->tx_power :
8856 HCI_ADV_TX_POWER_NO_PREFERENCE;
8857
8858 /* Create advertising instance with no advertising or response data */
8859 adv = hci_add_adv_instance(hdev, instance: cp->instance, flags, adv_data_len: 0, NULL, scan_rsp_len: 0, NULL,
8860 timeout, duration, tx_power, min_interval,
8861 max_interval, mesh_handle: 0);
8862
8863 if (IS_ERR(ptr: adv)) {
8864 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8865 MGMT_STATUS_FAILED);
8866 goto unlock;
8867 }
8868
8869 /* Submit request for advertising params if ext adv available */
8870 if (ext_adv_capable(hdev)) {
8871 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8872 data, len: data_len);
8873 if (!cmd) {
8874 err = -ENOMEM;
8875 hci_remove_adv_instance(hdev, instance: cp->instance);
8876 goto unlock;
8877 }
8878
8879 err = hci_cmd_sync_queue(hdev, func: add_ext_adv_params_sync, data: cmd,
8880 destroy: add_ext_adv_params_complete);
8881 if (err < 0)
8882 mgmt_pending_free(cmd);
8883 } else {
8884 rp.instance = cp->instance;
8885 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8886 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
8887 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
8888 err = mgmt_cmd_complete(sk, index: hdev->id,
8889 MGMT_OP_ADD_EXT_ADV_PARAMS,
8890 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
8891 }
8892
8893unlock:
8894 hci_dev_unlock(hdev);
8895
8896 return err;
8897}
8898
8899static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8900{
8901 struct mgmt_pending_cmd *cmd = data;
8902 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8903 struct mgmt_rp_add_advertising rp;
8904
8905 add_adv_complete(hdev, sk: cmd->sk, instance: cp->instance, err);
8906
8907 memset(&rp, 0, sizeof(rp));
8908
8909 rp.instance = cp->instance;
8910
8911 if (err)
8912 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8913 status: mgmt_status(err));
8914 else
8915 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
8916 status: mgmt_status(err), rp: &rp, rp_len: sizeof(rp));
8917
8918 mgmt_pending_free(cmd);
8919}
8920
8921static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8922{
8923 struct mgmt_pending_cmd *cmd = data;
8924 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8925 int err;
8926
8927 if (ext_adv_capable(hdev)) {
8928 err = hci_update_adv_data_sync(hdev, instance: cp->instance);
8929 if (err)
8930 return err;
8931
8932 err = hci_update_scan_rsp_data_sync(hdev, instance: cp->instance);
8933 if (err)
8934 return err;
8935
8936 return hci_enable_ext_advertising_sync(hdev, instance: cp->instance);
8937 }
8938
8939 return hci_schedule_adv_instance_sync(hdev, instance: cp->instance, force: true);
8940}
8941
8942static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8943 u16 data_len)
8944{
8945 struct mgmt_cp_add_ext_adv_data *cp = data;
8946 struct mgmt_rp_add_ext_adv_data rp;
8947 u8 schedule_instance = 0;
8948 struct adv_info *next_instance;
8949 struct adv_info *adv_instance;
8950 int err = 0;
8951 struct mgmt_pending_cmd *cmd;
8952
8953 BT_DBG("%s", hdev->name);
8954
8955 hci_dev_lock(hdev);
8956
8957 adv_instance = hci_find_adv_instance(hdev, instance: cp->instance);
8958
8959 if (!adv_instance) {
8960 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8961 MGMT_STATUS_INVALID_PARAMS);
8962 goto unlock;
8963 }
8964
8965 /* In new interface, we require that we are powered to register */
8966 if (!hdev_is_powered(hdev)) {
8967 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8968 MGMT_STATUS_REJECTED);
8969 goto clear_new_instance;
8970 }
8971
8972 if (adv_busy(hdev)) {
8973 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8974 MGMT_STATUS_BUSY);
8975 goto clear_new_instance;
8976 }
8977
8978 /* Validate new data */
8979 if (!tlv_data_is_valid(hdev, adv_flags: adv_instance->flags, data: cp->data,
8980 len: cp->adv_data_len, is_adv_data: true) ||
8981 !tlv_data_is_valid(hdev, adv_flags: adv_instance->flags, data: cp->data +
8982 cp->adv_data_len, len: cp->scan_rsp_len, is_adv_data: false)) {
8983 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8984 MGMT_STATUS_INVALID_PARAMS);
8985 goto clear_new_instance;
8986 }
8987
8988 /* Set the data in the advertising instance */
8989 hci_set_adv_instance_data(hdev, instance: cp->instance, adv_data_len: cp->adv_data_len,
8990 adv_data: cp->data, scan_rsp_len: cp->scan_rsp_len,
8991 scan_rsp_data: cp->data + cp->adv_data_len);
8992
8993 /* If using software rotation, determine next instance to use */
8994 if (hdev->cur_adv_instance == cp->instance) {
8995 /* If the currently advertised instance is being changed
8996 * then cancel the current advertising and schedule the
8997 * next instance. If there is only one instance then the
8998 * overridden advertising data will be visible right
8999 * away
9000 */
9001 cancel_adv_timeout(hdev);
9002
9003 next_instance = hci_get_next_instance(hdev, instance: cp->instance);
9004 if (next_instance)
9005 schedule_instance = next_instance->instance;
9006 } else if (!hdev->adv_instance_timeout) {
9007 /* Immediately advertise the new instance if no other
9008 * instance is currently being advertised.
9009 */
9010 schedule_instance = cp->instance;
9011 }
9012
9013 /* If the HCI_ADVERTISING flag is set or there is no instance to
9014 * be advertised then we have no HCI communication to make.
9015 * Simply return.
9016 */
9017 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9018 if (adv_instance->pending) {
9019 mgmt_advertising_added(sk, hdev, instance: cp->instance);
9020 adv_instance->pending = false;
9021 }
9022 rp.instance = cp->instance;
9023 err = mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9024 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9025 goto unlock;
9026 }
9027
9028 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9029 len: data_len);
9030 if (!cmd) {
9031 err = -ENOMEM;
9032 goto clear_new_instance;
9033 }
9034
9035 err = hci_cmd_sync_queue(hdev, func: add_ext_adv_data_sync, data: cmd,
9036 destroy: add_ext_adv_data_complete);
9037 if (err < 0) {
9038 mgmt_pending_free(cmd);
9039 goto clear_new_instance;
9040 }
9041
9042 /* We were successful in updating data, so trigger advertising_added
9043 * event if this is an instance that wasn't previously advertising. If
9044 * a failure occurs in the requests we initiated, we will remove the
9045 * instance again in add_advertising_complete
9046 */
9047 if (adv_instance->pending)
9048 mgmt_advertising_added(sk, hdev, instance: cp->instance);
9049
9050 goto unlock;
9051
9052clear_new_instance:
9053 hci_remove_adv_instance(hdev, instance: cp->instance);
9054
9055unlock:
9056 hci_dev_unlock(hdev);
9057
9058 return err;
9059}
9060
9061static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9062 int err)
9063{
9064 struct mgmt_pending_cmd *cmd = data;
9065 struct mgmt_cp_remove_advertising *cp = cmd->param;
9066 struct mgmt_rp_remove_advertising rp;
9067
9068 bt_dev_dbg(hdev, "err %d", err);
9069
9070 memset(&rp, 0, sizeof(rp));
9071 rp.instance = cp->instance;
9072
9073 if (err)
9074 mgmt_cmd_status(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
9075 status: mgmt_status(err));
9076 else
9077 mgmt_cmd_complete(sk: cmd->sk, index: cmd->index, cmd: cmd->opcode,
9078 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9079
9080 mgmt_pending_free(cmd);
9081}
9082
9083static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9084{
9085 struct mgmt_pending_cmd *cmd = data;
9086 struct mgmt_cp_remove_advertising *cp = cmd->param;
9087 int err;
9088
9089 err = hci_remove_advertising_sync(hdev, sk: cmd->sk, instance: cp->instance, force: true);
9090 if (err)
9091 return err;
9092
9093 if (list_empty(head: &hdev->adv_instances))
9094 err = hci_disable_advertising_sync(hdev);
9095
9096 return err;
9097}
9098
9099static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9100 void *data, u16 data_len)
9101{
9102 struct mgmt_cp_remove_advertising *cp = data;
9103 struct mgmt_pending_cmd *cmd;
9104 int err;
9105
9106 bt_dev_dbg(hdev, "sock %p", sk);
9107
9108 hci_dev_lock(hdev);
9109
9110 if (cp->instance && !hci_find_adv_instance(hdev, instance: cp->instance)) {
9111 err = mgmt_cmd_status(sk, index: hdev->id,
9112 MGMT_OP_REMOVE_ADVERTISING,
9113 MGMT_STATUS_INVALID_PARAMS);
9114 goto unlock;
9115 }
9116
9117 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9118 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9119 MGMT_STATUS_BUSY);
9120 goto unlock;
9121 }
9122
9123 if (list_empty(head: &hdev->adv_instances)) {
9124 err = mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9125 MGMT_STATUS_INVALID_PARAMS);
9126 goto unlock;
9127 }
9128
9129 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9130 len: data_len);
9131 if (!cmd) {
9132 err = -ENOMEM;
9133 goto unlock;
9134 }
9135
9136 err = hci_cmd_sync_queue(hdev, func: remove_advertising_sync, data: cmd,
9137 destroy: remove_advertising_complete);
9138 if (err < 0)
9139 mgmt_pending_free(cmd);
9140
9141unlock:
9142 hci_dev_unlock(hdev);
9143
9144 return err;
9145}
9146
9147static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9148 void *data, u16 data_len)
9149{
9150 struct mgmt_cp_get_adv_size_info *cp = data;
9151 struct mgmt_rp_get_adv_size_info rp;
9152 u32 flags, supported_flags;
9153
9154 bt_dev_dbg(hdev, "sock %p", sk);
9155
9156 if (!lmp_le_capable(hdev))
9157 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9158 MGMT_STATUS_REJECTED);
9159
9160 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9161 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9162 MGMT_STATUS_INVALID_PARAMS);
9163
9164 flags = __le32_to_cpu(cp->flags);
9165
9166 /* The current implementation only supports a subset of the specified
9167 * flags.
9168 */
9169 supported_flags = get_supported_adv_flags(hdev);
9170 if (flags & ~supported_flags)
9171 return mgmt_cmd_status(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9172 MGMT_STATUS_INVALID_PARAMS);
9173
9174 rp.instance = cp->instance;
9175 rp.flags = cp->flags;
9176 rp.max_adv_data_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: true);
9177 rp.max_scan_rsp_len = tlv_data_max_len(hdev, adv_flags: flags, is_adv_data: false);
9178
9179 return mgmt_cmd_complete(sk, index: hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9180 MGMT_STATUS_SUCCESS, rp: &rp, rp_len: sizeof(rp));
9181}
9182
9183static const struct hci_mgmt_handler mgmt_handlers[] = {
9184 { NULL }, /* 0x0000 (no command) */
9185 { read_version, MGMT_READ_VERSION_SIZE,
9186 HCI_MGMT_NO_HDEV |
9187 HCI_MGMT_UNTRUSTED },
9188 { read_commands, MGMT_READ_COMMANDS_SIZE,
9189 HCI_MGMT_NO_HDEV |
9190 HCI_MGMT_UNTRUSTED },
9191 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9192 HCI_MGMT_NO_HDEV |
9193 HCI_MGMT_UNTRUSTED },
9194 { read_controller_info, MGMT_READ_INFO_SIZE,
9195 HCI_MGMT_UNTRUSTED },
9196 { set_powered, MGMT_SETTING_SIZE },
9197 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9198 { set_connectable, MGMT_SETTING_SIZE },
9199 { set_fast_connectable, MGMT_SETTING_SIZE },
9200 { set_bondable, MGMT_SETTING_SIZE },
9201 { set_link_security, MGMT_SETTING_SIZE },
9202 { set_ssp, MGMT_SETTING_SIZE },
9203 { set_hs, MGMT_SETTING_SIZE },
9204 { set_le, MGMT_SETTING_SIZE },
9205 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9206 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9207 { add_uuid, MGMT_ADD_UUID_SIZE },
9208 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9209 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9210 HCI_MGMT_VAR_LEN },
9211 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9212 HCI_MGMT_VAR_LEN },
9213 { disconnect, MGMT_DISCONNECT_SIZE },
9214 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9215 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9216 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9217 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9218 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9219 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9220 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9221 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9222 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9223 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9224 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9225 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9226 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9227 HCI_MGMT_VAR_LEN },
9228 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9229 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9230 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9231 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9232 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9233 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9234 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9235 { set_advertising, MGMT_SETTING_SIZE },
9236 { set_bredr, MGMT_SETTING_SIZE },
9237 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9238 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9239 { set_secure_conn, MGMT_SETTING_SIZE },
9240 { set_debug_keys, MGMT_SETTING_SIZE },
9241 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9242 { load_irks, MGMT_LOAD_IRKS_SIZE,
9243 HCI_MGMT_VAR_LEN },
9244 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9245 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9246 { add_device, MGMT_ADD_DEVICE_SIZE },
9247 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9248 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9249 HCI_MGMT_VAR_LEN },
9250 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9251 HCI_MGMT_NO_HDEV |
9252 HCI_MGMT_UNTRUSTED },
9253 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9254 HCI_MGMT_UNCONFIGURED |
9255 HCI_MGMT_UNTRUSTED },
9256 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9257 HCI_MGMT_UNCONFIGURED },
9258 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9259 HCI_MGMT_UNCONFIGURED },
9260 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9261 HCI_MGMT_VAR_LEN },
9262 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9263 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9264 HCI_MGMT_NO_HDEV |
9265 HCI_MGMT_UNTRUSTED },
9266 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9267 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9268 HCI_MGMT_VAR_LEN },
9269 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9270 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9271 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9272 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9273 HCI_MGMT_UNTRUSTED },
9274 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9275 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9276 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9277 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9278 HCI_MGMT_VAR_LEN },
9279 { set_wideband_speech, MGMT_SETTING_SIZE },
9280 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9281 HCI_MGMT_UNTRUSTED },
9282 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9283 HCI_MGMT_UNTRUSTED |
9284 HCI_MGMT_HDEV_OPTIONAL },
9285 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9286 HCI_MGMT_VAR_LEN |
9287 HCI_MGMT_HDEV_OPTIONAL },
9288 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9289 HCI_MGMT_UNTRUSTED },
9290 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9291 HCI_MGMT_VAR_LEN },
9292 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9293 HCI_MGMT_UNTRUSTED },
9294 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9295 HCI_MGMT_VAR_LEN },
9296 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9297 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9298 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9299 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9300 HCI_MGMT_VAR_LEN },
9301 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9302 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9303 HCI_MGMT_VAR_LEN },
9304 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9305 HCI_MGMT_VAR_LEN },
9306 { add_adv_patterns_monitor_rssi,
9307 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9308 HCI_MGMT_VAR_LEN },
9309 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9310 HCI_MGMT_VAR_LEN },
9311 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9312 { mesh_send, MGMT_MESH_SEND_SIZE,
9313 HCI_MGMT_VAR_LEN },
9314 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9315};
9316
9317void mgmt_index_added(struct hci_dev *hdev)
9318{
9319 struct mgmt_ev_ext_index ev;
9320
9321 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9322 return;
9323
9324 switch (hdev->dev_type) {
9325 case HCI_PRIMARY:
9326 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9327 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9328 NULL, len: 0, flag: HCI_MGMT_UNCONF_INDEX_EVENTS);
9329 ev.type = 0x01;
9330 } else {
9331 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, len: 0,
9332 flag: HCI_MGMT_INDEX_EVENTS);
9333 ev.type = 0x00;
9334 }
9335 break;
9336 case HCI_AMP:
9337 ev.type = 0x02;
9338 break;
9339 default:
9340 return;
9341 }
9342
9343 ev.bus = hdev->bus;
9344
9345 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, data: &ev, len: sizeof(ev),
9346 flag: HCI_MGMT_EXT_INDEX_EVENTS);
9347}
9348
9349void mgmt_index_removed(struct hci_dev *hdev)
9350{
9351 struct mgmt_ev_ext_index ev;
9352 u8 status = MGMT_STATUS_INVALID_INDEX;
9353
9354 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9355 return;
9356
9357 switch (hdev->dev_type) {
9358 case HCI_PRIMARY:
9359 mgmt_pending_foreach(opcode: 0, hdev, cb: cmd_complete_rsp, data: &status);
9360
9361 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9362 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9363 NULL, len: 0, flag: HCI_MGMT_UNCONF_INDEX_EVENTS);
9364 ev.type = 0x01;
9365 } else {
9366 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, len: 0,
9367 flag: HCI_MGMT_INDEX_EVENTS);
9368 ev.type = 0x00;
9369 }
9370 break;
9371 case HCI_AMP:
9372 ev.type = 0x02;
9373 break;
9374 default:
9375 return;
9376 }
9377
9378 ev.bus = hdev->bus;
9379
9380 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, data: &ev, len: sizeof(ev),
9381 flag: HCI_MGMT_EXT_INDEX_EVENTS);
9382
9383 /* Cancel any remaining timed work */
9384 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9385 return;
9386 cancel_delayed_work_sync(dwork: &hdev->discov_off);
9387 cancel_delayed_work_sync(dwork: &hdev->service_cache);
9388 cancel_delayed_work_sync(dwork: &hdev->rpa_expired);
9389}
9390
9391void mgmt_power_on(struct hci_dev *hdev, int err)
9392{
9393 struct cmd_lookup match = { NULL, hdev };
9394
9395 bt_dev_dbg(hdev, "err %d", err);
9396
9397 hci_dev_lock(hdev);
9398
9399 if (!err) {
9400 restart_le_actions(hdev);
9401 hci_update_passive_scan(hdev);
9402 }
9403
9404 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, cb: settings_rsp, data: &match);
9405
9406 new_settings(hdev, skip: match.sk);
9407
9408 if (match.sk)
9409 sock_put(sk: match.sk);
9410
9411 hci_dev_unlock(hdev);
9412}
9413
9414void __mgmt_power_off(struct hci_dev *hdev)
9415{
9416 struct cmd_lookup match = { NULL, hdev };
9417 u8 status, zero_cod[] = { 0, 0, 0 };
9418
9419 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, cb: settings_rsp, data: &match);
9420
9421 /* If the power off is because of hdev unregistration let
9422 * use the appropriate INVALID_INDEX status. Otherwise use
9423 * NOT_POWERED. We cover both scenarios here since later in
9424 * mgmt_index_removed() any hci_conn callbacks will have already
9425 * been triggered, potentially causing misleading DISCONNECTED
9426 * status responses.
9427 */
9428 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9429 status = MGMT_STATUS_INVALID_INDEX;
9430 else
9431 status = MGMT_STATUS_NOT_POWERED;
9432
9433 mgmt_pending_foreach(opcode: 0, hdev, cb: cmd_complete_rsp, data: &status);
9434
9435 if (memcmp(p: hdev->dev_class, q: zero_cod, size: sizeof(zero_cod)) != 0) {
9436 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9437 data: zero_cod, len: sizeof(zero_cod),
9438 flag: HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9439 ext_info_changed(hdev, NULL);
9440 }
9441
9442 new_settings(hdev, skip: match.sk);
9443
9444 if (match.sk)
9445 sock_put(sk: match.sk);
9446}
9447
9448void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9449{
9450 struct mgmt_pending_cmd *cmd;
9451 u8 status;
9452
9453 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9454 if (!cmd)
9455 return;
9456
9457 if (err == -ERFKILL)
9458 status = MGMT_STATUS_RFKILLED;
9459 else
9460 status = MGMT_STATUS_FAILED;
9461
9462 mgmt_cmd_status(sk: cmd->sk, index: hdev->id, MGMT_OP_SET_POWERED, status);
9463
9464 mgmt_pending_remove(cmd);
9465}
9466
9467void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9468 bool persistent)
9469{
9470 struct mgmt_ev_new_link_key ev;
9471
9472 memset(&ev, 0, sizeof(ev));
9473
9474 ev.store_hint = persistent;
9475 bacpy(dst: &ev.key.addr.bdaddr, src: &key->bdaddr);
9476 ev.key.addr.type = link_to_bdaddr(link_type: key->link_type, addr_type: key->bdaddr_type);
9477 ev.key.type = key->type;
9478 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9479 ev.key.pin_len = key->pin_len;
9480
9481 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, data: &ev, len: sizeof(ev), NULL);
9482}
9483
9484static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9485{
9486 switch (ltk->type) {
9487 case SMP_LTK:
9488 case SMP_LTK_RESPONDER:
9489 if (ltk->authenticated)
9490 return MGMT_LTK_AUTHENTICATED;
9491 return MGMT_LTK_UNAUTHENTICATED;
9492 case SMP_LTK_P256:
9493 if (ltk->authenticated)
9494 return MGMT_LTK_P256_AUTH;
9495 return MGMT_LTK_P256_UNAUTH;
9496 case SMP_LTK_P256_DEBUG:
9497 return MGMT_LTK_P256_DEBUG;
9498 }
9499
9500 return MGMT_LTK_UNAUTHENTICATED;
9501}
9502
9503void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9504{
9505 struct mgmt_ev_new_long_term_key ev;
9506
9507 memset(&ev, 0, sizeof(ev));
9508
9509 /* Devices using resolvable or non-resolvable random addresses
9510 * without providing an identity resolving key don't require
9511 * to store long term keys. Their addresses will change the
9512 * next time around.
9513 *
9514 * Only when a remote device provides an identity address
9515 * make sure the long term key is stored. If the remote
9516 * identity is known, the long term keys are internally
9517 * mapped to the identity address. So allow static random
9518 * and public addresses here.
9519 */
9520 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9521 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9522 ev.store_hint = 0x00;
9523 else
9524 ev.store_hint = persistent;
9525
9526 bacpy(dst: &ev.key.addr.bdaddr, src: &key->bdaddr);
9527 ev.key.addr.type = link_to_bdaddr(link_type: key->link_type, addr_type: key->bdaddr_type);
9528 ev.key.type = mgmt_ltk_type(ltk: key);
9529 ev.key.enc_size = key->enc_size;
9530 ev.key.ediv = key->ediv;
9531 ev.key.rand = key->rand;
9532
9533 if (key->type == SMP_LTK)
9534 ev.key.initiator = 1;
9535
9536 /* Make sure we copy only the significant bytes based on the
9537 * encryption key size, and set the rest of the value to zeroes.
9538 */
9539 memcpy(ev.key.val, key->val, key->enc_size);
9540 memset(ev.key.val + key->enc_size, 0,
9541 sizeof(ev.key.val) - key->enc_size);
9542
9543 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, data: &ev, len: sizeof(ev), NULL);
9544}
9545
9546void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9547{
9548 struct mgmt_ev_new_irk ev;
9549
9550 memset(&ev, 0, sizeof(ev));
9551
9552 ev.store_hint = persistent;
9553
9554 bacpy(dst: &ev.rpa, src: &irk->rpa);
9555 bacpy(dst: &ev.irk.addr.bdaddr, src: &irk->bdaddr);
9556 ev.irk.addr.type = link_to_bdaddr(link_type: irk->link_type, addr_type: irk->addr_type);
9557 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9558
9559 mgmt_event(MGMT_EV_NEW_IRK, hdev, data: &ev, len: sizeof(ev), NULL);
9560}
9561
9562void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9563 bool persistent)
9564{
9565 struct mgmt_ev_new_csrk ev;
9566
9567 memset(&ev, 0, sizeof(ev));
9568
9569 /* Devices using resolvable or non-resolvable random addresses
9570 * without providing an identity resolving key don't require
9571 * to store signature resolving keys. Their addresses will change
9572 * the next time around.
9573 *
9574 * Only when a remote device provides an identity address
9575 * make sure the signature resolving key is stored. So allow
9576 * static random and public addresses here.
9577 */
9578 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9579 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9580 ev.store_hint = 0x00;
9581 else
9582 ev.store_hint = persistent;
9583
9584 bacpy(dst: &ev.key.addr.bdaddr, src: &csrk->bdaddr);
9585 ev.key.addr.type = link_to_bdaddr(link_type: csrk->link_type, addr_type: csrk->bdaddr_type);
9586 ev.key.type = csrk->type;
9587 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9588
9589 mgmt_event(MGMT_EV_NEW_CSRK, hdev, data: &ev, len: sizeof(ev), NULL);
9590}
9591
9592void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9593 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9594 u16 max_interval, u16 latency, u16 timeout)
9595{
9596 struct mgmt_ev_new_conn_param ev;
9597
9598 if (!hci_is_identity_address(addr: bdaddr, addr_type: bdaddr_type))
9599 return;
9600
9601 memset(&ev, 0, sizeof(ev));
9602 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9603 ev.addr.type = link_to_bdaddr(LE_LINK, addr_type: bdaddr_type);
9604 ev.store_hint = store_hint;
9605 ev.min_interval = cpu_to_le16(min_interval);
9606 ev.max_interval = cpu_to_le16(max_interval);
9607 ev.latency = cpu_to_le16(latency);
9608 ev.timeout = cpu_to_le16(timeout);
9609
9610 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, data: &ev, len: sizeof(ev), NULL);
9611}
9612
9613void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9614 u8 *name, u8 name_len)
9615{
9616 struct sk_buff *skb;
9617 struct mgmt_ev_device_connected *ev;
9618 u16 eir_len = 0;
9619 u32 flags = 0;
9620
9621 if (test_and_set_bit(nr: HCI_CONN_MGMT_CONNECTED, addr: &conn->flags))
9622 return;
9623
9624 /* allocate buff for LE or BR/EDR adv */
9625 if (conn->le_adv_data_len > 0)
9626 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9627 size: sizeof(*ev) + conn->le_adv_data_len);
9628 else
9629 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9630 size: sizeof(*ev) + (name ? eir_precalc_len(data_len: name_len) : 0) +
9631 eir_precalc_len(data_len: sizeof(conn->dev_class)));
9632
9633 ev = skb_put(skb, len: sizeof(*ev));
9634 bacpy(dst: &ev->addr.bdaddr, src: &conn->dst);
9635 ev->addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
9636
9637 if (conn->out)
9638 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9639
9640 ev->flags = __cpu_to_le32(flags);
9641
9642 /* We must ensure that the EIR Data fields are ordered and
9643 * unique. Keep it simple for now and avoid the problem by not
9644 * adding any BR/EDR data to the LE adv.
9645 */
9646 if (conn->le_adv_data_len > 0) {
9647 skb_put_data(skb, data: conn->le_adv_data, len: conn->le_adv_data_len);
9648 eir_len = conn->le_adv_data_len;
9649 } else {
9650 if (name)
9651 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, data: name, data_len: name_len);
9652
9653 if (memcmp(p: conn->dev_class, q: "\0\0\0", size: sizeof(conn->dev_class)))
9654 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9655 data: conn->dev_class, data_len: sizeof(conn->dev_class));
9656 }
9657
9658 ev->eir_len = cpu_to_le16(eir_len);
9659
9660 mgmt_event_skb(skb, NULL);
9661}
9662
9663static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9664{
9665 struct sock **sk = data;
9666
9667 cmd->cmd_complete(cmd, 0);
9668
9669 *sk = cmd->sk;
9670 sock_hold(sk: *sk);
9671
9672 mgmt_pending_remove(cmd);
9673}
9674
9675static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9676{
9677 struct hci_dev *hdev = data;
9678 struct mgmt_cp_unpair_device *cp = cmd->param;
9679
9680 device_unpaired(hdev, bdaddr: &cp->addr.bdaddr, addr_type: cp->addr.type, skip_sk: cmd->sk);
9681
9682 cmd->cmd_complete(cmd, 0);
9683 mgmt_pending_remove(cmd);
9684}
9685
9686bool mgmt_powering_down(struct hci_dev *hdev)
9687{
9688 struct mgmt_pending_cmd *cmd;
9689 struct mgmt_mode *cp;
9690
9691 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
9692 return true;
9693
9694 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9695 if (!cmd)
9696 return false;
9697
9698 cp = cmd->param;
9699 if (!cp->val)
9700 return true;
9701
9702 return false;
9703}
9704
9705void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9706 u8 link_type, u8 addr_type, u8 reason,
9707 bool mgmt_connected)
9708{
9709 struct mgmt_ev_device_disconnected ev;
9710 struct sock *sk = NULL;
9711
9712 if (!mgmt_connected)
9713 return;
9714
9715 if (link_type != ACL_LINK && link_type != LE_LINK)
9716 return;
9717
9718 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, cb: disconnect_rsp, data: &sk);
9719
9720 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9721 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9722 ev.reason = reason;
9723
9724 /* Report disconnects due to suspend */
9725 if (hdev->suspended)
9726 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9727
9728 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, data: &ev, len: sizeof(ev), skip_sk: sk);
9729
9730 if (sk)
9731 sock_put(sk);
9732
9733 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, cb: unpair_device_rsp,
9734 data: hdev);
9735}
9736
9737void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9738 u8 link_type, u8 addr_type, u8 status)
9739{
9740 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9741 struct mgmt_cp_disconnect *cp;
9742 struct mgmt_pending_cmd *cmd;
9743
9744 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, cb: unpair_device_rsp,
9745 data: hdev);
9746
9747 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9748 if (!cmd)
9749 return;
9750
9751 cp = cmd->param;
9752
9753 if (bacmp(ba1: bdaddr, ba2: &cp->addr.bdaddr))
9754 return;
9755
9756 if (cp->addr.type != bdaddr_type)
9757 return;
9758
9759 cmd->cmd_complete(cmd, mgmt_status(err: status));
9760 mgmt_pending_remove(cmd);
9761}
9762
9763void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9764 u8 addr_type, u8 status)
9765{
9766 struct mgmt_ev_connect_failed ev;
9767
9768 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9769 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9770 ev.status = mgmt_status(err: status);
9771
9772 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, data: &ev, len: sizeof(ev), NULL);
9773}
9774
9775void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9776{
9777 struct mgmt_ev_pin_code_request ev;
9778
9779 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9780 ev.addr.type = BDADDR_BREDR;
9781 ev.secure = secure;
9782
9783 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, data: &ev, len: sizeof(ev), NULL);
9784}
9785
9786void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9787 u8 status)
9788{
9789 struct mgmt_pending_cmd *cmd;
9790
9791 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9792 if (!cmd)
9793 return;
9794
9795 cmd->cmd_complete(cmd, mgmt_status(err: status));
9796 mgmt_pending_remove(cmd);
9797}
9798
9799void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9800 u8 status)
9801{
9802 struct mgmt_pending_cmd *cmd;
9803
9804 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9805 if (!cmd)
9806 return;
9807
9808 cmd->cmd_complete(cmd, mgmt_status(err: status));
9809 mgmt_pending_remove(cmd);
9810}
9811
9812int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9813 u8 link_type, u8 addr_type, u32 value,
9814 u8 confirm_hint)
9815{
9816 struct mgmt_ev_user_confirm_request ev;
9817
9818 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9819
9820 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9821 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9822 ev.confirm_hint = confirm_hint;
9823 ev.value = cpu_to_le32(value);
9824
9825 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, data: &ev, len: sizeof(ev),
9826 NULL);
9827}
9828
9829int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9830 u8 link_type, u8 addr_type)
9831{
9832 struct mgmt_ev_user_passkey_request ev;
9833
9834 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9835
9836 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9837 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9838
9839 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, data: &ev, len: sizeof(ev),
9840 NULL);
9841}
9842
9843static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9844 u8 link_type, u8 addr_type, u8 status,
9845 u8 opcode)
9846{
9847 struct mgmt_pending_cmd *cmd;
9848
9849 cmd = pending_find(opcode, hdev);
9850 if (!cmd)
9851 return -ENOENT;
9852
9853 cmd->cmd_complete(cmd, mgmt_status(err: status));
9854 mgmt_pending_remove(cmd);
9855
9856 return 0;
9857}
9858
9859int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9860 u8 link_type, u8 addr_type, u8 status)
9861{
9862 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9863 status, MGMT_OP_USER_CONFIRM_REPLY);
9864}
9865
9866int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9867 u8 link_type, u8 addr_type, u8 status)
9868{
9869 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9870 status,
9871 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9872}
9873
9874int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9875 u8 link_type, u8 addr_type, u8 status)
9876{
9877 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9878 status, MGMT_OP_USER_PASSKEY_REPLY);
9879}
9880
9881int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9882 u8 link_type, u8 addr_type, u8 status)
9883{
9884 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9885 status,
9886 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9887}
9888
9889int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9890 u8 link_type, u8 addr_type, u32 passkey,
9891 u8 entered)
9892{
9893 struct mgmt_ev_passkey_notify ev;
9894
9895 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9896
9897 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
9898 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9899 ev.passkey = __cpu_to_le32(passkey);
9900 ev.entered = entered;
9901
9902 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, data: &ev, len: sizeof(ev), NULL);
9903}
9904
9905void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9906{
9907 struct mgmt_ev_auth_failed ev;
9908 struct mgmt_pending_cmd *cmd;
9909 u8 status = mgmt_status(err: hci_status);
9910
9911 bacpy(dst: &ev.addr.bdaddr, src: &conn->dst);
9912 ev.addr.type = link_to_bdaddr(link_type: conn->type, addr_type: conn->dst_type);
9913 ev.status = status;
9914
9915 cmd = find_pairing(conn);
9916
9917 mgmt_event(MGMT_EV_AUTH_FAILED, hdev: conn->hdev, data: &ev, len: sizeof(ev),
9918 skip_sk: cmd ? cmd->sk : NULL);
9919
9920 if (cmd) {
9921 cmd->cmd_complete(cmd, status);
9922 mgmt_pending_remove(cmd);
9923 }
9924}
9925
9926void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9927{
9928 struct cmd_lookup match = { NULL, hdev };
9929 bool changed;
9930
9931 if (status) {
9932 u8 mgmt_err = mgmt_status(err: status);
9933 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9934 cb: cmd_status_rsp, data: &mgmt_err);
9935 return;
9936 }
9937
9938 if (test_bit(HCI_AUTH, &hdev->flags))
9939 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9940 else
9941 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9942
9943 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, cb: settings_rsp,
9944 data: &match);
9945
9946 if (changed)
9947 new_settings(hdev, skip: match.sk);
9948
9949 if (match.sk)
9950 sock_put(sk: match.sk);
9951}
9952
9953static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9954{
9955 struct cmd_lookup *match = data;
9956
9957 if (match->sk == NULL) {
9958 match->sk = cmd->sk;
9959 sock_hold(sk: match->sk);
9960 }
9961}
9962
9963void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9964 u8 status)
9965{
9966 struct cmd_lookup match = { NULL, hdev, mgmt_status(err: status) };
9967
9968 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, cb: sk_lookup, data: &match);
9969 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, cb: sk_lookup, data: &match);
9970 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, cb: sk_lookup, data: &match);
9971
9972 if (!status) {
9973 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, data: dev_class,
9974 len: 3, flag: HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9975 ext_info_changed(hdev, NULL);
9976 }
9977
9978 if (match.sk)
9979 sock_put(sk: match.sk);
9980}
9981
9982void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9983{
9984 struct mgmt_cp_set_local_name ev;
9985 struct mgmt_pending_cmd *cmd;
9986
9987 if (status)
9988 return;
9989
9990 memset(&ev, 0, sizeof(ev));
9991 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9992 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9993
9994 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9995 if (!cmd) {
9996 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9997
9998 /* If this is a HCI command related to powering on the
9999 * HCI dev don't send any mgmt signals.
10000 */
10001 if (hci_dev_test_flag(hdev, HCI_POWERING_DOWN))
10002 return;
10003
10004 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10005 return;
10006 }
10007
10008 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data: &ev, len: sizeof(ev),
10009 flag: HCI_MGMT_LOCAL_NAME_EVENTS, skip_sk: cmd ? cmd->sk : NULL);
10010 ext_info_changed(hdev, skip: cmd ? cmd->sk : NULL);
10011}
10012
10013static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10014{
10015 int i;
10016
10017 for (i = 0; i < uuid_count; i++) {
10018 if (!memcmp(p: uuid, q: uuids[i], size: 16))
10019 return true;
10020 }
10021
10022 return false;
10023}
10024
10025static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10026{
10027 u16 parsed = 0;
10028
10029 while (parsed < eir_len) {
10030 u8 field_len = eir[0];
10031 u8 uuid[16];
10032 int i;
10033
10034 if (field_len == 0)
10035 break;
10036
10037 if (eir_len - parsed < field_len + 1)
10038 break;
10039
10040 switch (eir[1]) {
10041 case EIR_UUID16_ALL:
10042 case EIR_UUID16_SOME:
10043 for (i = 0; i + 3 <= field_len; i += 2) {
10044 memcpy(uuid, bluetooth_base_uuid, 16);
10045 uuid[13] = eir[i + 3];
10046 uuid[12] = eir[i + 2];
10047 if (has_uuid(uuid, uuid_count, uuids))
10048 return true;
10049 }
10050 break;
10051 case EIR_UUID32_ALL:
10052 case EIR_UUID32_SOME:
10053 for (i = 0; i + 5 <= field_len; i += 4) {
10054 memcpy(uuid, bluetooth_base_uuid, 16);
10055 uuid[15] = eir[i + 5];
10056 uuid[14] = eir[i + 4];
10057 uuid[13] = eir[i + 3];
10058 uuid[12] = eir[i + 2];
10059 if (has_uuid(uuid, uuid_count, uuids))
10060 return true;
10061 }
10062 break;
10063 case EIR_UUID128_ALL:
10064 case EIR_UUID128_SOME:
10065 for (i = 0; i + 17 <= field_len; i += 16) {
10066 memcpy(uuid, eir + i + 2, 16);
10067 if (has_uuid(uuid, uuid_count, uuids))
10068 return true;
10069 }
10070 break;
10071 }
10072
10073 parsed += field_len + 1;
10074 eir += field_len + 1;
10075 }
10076
10077 return false;
10078}
10079
10080static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10081 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10082{
10083 /* If a RSSI threshold has been specified, and
10084 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10085 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10086 * is set, let it through for further processing, as we might need to
10087 * restart the scan.
10088 *
10089 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10090 * the results are also dropped.
10091 */
10092 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10093 (rssi == HCI_RSSI_INVALID ||
10094 (rssi < hdev->discovery.rssi &&
10095 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10096 return false;
10097
10098 if (hdev->discovery.uuid_count != 0) {
10099 /* If a list of UUIDs is provided in filter, results with no
10100 * matching UUID should be dropped.
10101 */
10102 if (!eir_has_uuids(eir, eir_len, uuid_count: hdev->discovery.uuid_count,
10103 uuids: hdev->discovery.uuids) &&
10104 !eir_has_uuids(eir: scan_rsp, eir_len: scan_rsp_len,
10105 uuid_count: hdev->discovery.uuid_count,
10106 uuids: hdev->discovery.uuids))
10107 return false;
10108 }
10109
10110 /* If duplicate filtering does not report RSSI changes, then restart
10111 * scanning to ensure updated result with updated RSSI values.
10112 */
10113 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10114 /* Validate RSSI value against the RSSI threshold once more. */
10115 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10116 rssi < hdev->discovery.rssi)
10117 return false;
10118 }
10119
10120 return true;
10121}
10122
10123void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10124 bdaddr_t *bdaddr, u8 addr_type)
10125{
10126 struct mgmt_ev_adv_monitor_device_lost ev;
10127
10128 ev.monitor_handle = cpu_to_le16(handle);
10129 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
10130 ev.addr.type = addr_type;
10131
10132 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, data: &ev, len: sizeof(ev),
10133 NULL);
10134}
10135
10136static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10137 struct sk_buff *skb,
10138 struct sock *skip_sk,
10139 u16 handle)
10140{
10141 struct sk_buff *advmon_skb;
10142 size_t advmon_skb_len;
10143 __le16 *monitor_handle;
10144
10145 if (!skb)
10146 return;
10147
10148 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10149 sizeof(struct mgmt_ev_device_found)) + skb->len;
10150 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10151 size: advmon_skb_len);
10152 if (!advmon_skb)
10153 return;
10154
10155 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10156 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10157 * store monitor_handle of the matched monitor.
10158 */
10159 monitor_handle = skb_put(skb: advmon_skb, len: sizeof(*monitor_handle));
10160 *monitor_handle = cpu_to_le16(handle);
10161 skb_put_data(skb: advmon_skb, data: skb->data, len: skb->len);
10162
10163 mgmt_event_skb(skb: advmon_skb, skip_sk);
10164}
10165
10166static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10167 bdaddr_t *bdaddr, bool report_device,
10168 struct sk_buff *skb,
10169 struct sock *skip_sk)
10170{
10171 struct monitored_device *dev, *tmp;
10172 bool matched = false;
10173 bool notified = false;
10174
10175 /* We have received the Advertisement Report because:
10176 * 1. the kernel has initiated active discovery
10177 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10178 * passive scanning
10179 * 3. if none of the above is true, we have one or more active
10180 * Advertisement Monitor
10181 *
10182 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10183 * and report ONLY one advertisement per device for the matched Monitor
10184 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10185 *
10186 * For case 3, since we are not active scanning and all advertisements
10187 * received are due to a matched Advertisement Monitor, report all
10188 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10189 */
10190 if (report_device && !hdev->advmon_pend_notify) {
10191 mgmt_event_skb(skb, skip_sk);
10192 return;
10193 }
10194
10195 hdev->advmon_pend_notify = false;
10196
10197 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10198 if (!bacmp(ba1: &dev->bdaddr, ba2: bdaddr)) {
10199 matched = true;
10200
10201 if (!dev->notified) {
10202 mgmt_send_adv_monitor_device_found(hdev, skb,
10203 skip_sk,
10204 handle: dev->handle);
10205 notified = true;
10206 dev->notified = true;
10207 }
10208 }
10209
10210 if (!dev->notified)
10211 hdev->advmon_pend_notify = true;
10212 }
10213
10214 if (!report_device &&
10215 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10216 /* Handle 0 indicates that we are not active scanning and this
10217 * is a subsequent advertisement report for an already matched
10218 * Advertisement Monitor or the controller offloading support
10219 * is not available.
10220 */
10221 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, handle: 0);
10222 }
10223
10224 if (report_device)
10225 mgmt_event_skb(skb, skip_sk);
10226 else
10227 kfree_skb(skb);
10228}
10229
10230static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10231 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10232 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10233 u64 instant)
10234{
10235 struct sk_buff *skb;
10236 struct mgmt_ev_mesh_device_found *ev;
10237 int i, j;
10238
10239 if (!hdev->mesh_ad_types[0])
10240 goto accepted;
10241
10242 /* Scan for requested AD types */
10243 if (eir_len > 0) {
10244 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10245 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10246 if (!hdev->mesh_ad_types[j])
10247 break;
10248
10249 if (hdev->mesh_ad_types[j] == eir[i + 1])
10250 goto accepted;
10251 }
10252 }
10253 }
10254
10255 if (scan_rsp_len > 0) {
10256 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10257 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10258 if (!hdev->mesh_ad_types[j])
10259 break;
10260
10261 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10262 goto accepted;
10263 }
10264 }
10265 }
10266
10267 return;
10268
10269accepted:
10270 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10271 size: sizeof(*ev) + eir_len + scan_rsp_len);
10272 if (!skb)
10273 return;
10274
10275 ev = skb_put(skb, len: sizeof(*ev));
10276
10277 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10278 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10279 ev->rssi = rssi;
10280 ev->flags = cpu_to_le32(flags);
10281 ev->instant = cpu_to_le64(instant);
10282
10283 if (eir_len > 0)
10284 /* Copy EIR or advertising data into event */
10285 skb_put_data(skb, data: eir, len: eir_len);
10286
10287 if (scan_rsp_len > 0)
10288 /* Append scan response data to event */
10289 skb_put_data(skb, data: scan_rsp, len: scan_rsp_len);
10290
10291 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10292
10293 mgmt_event_skb(skb, NULL);
10294}
10295
10296void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10297 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10298 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10299 u64 instant)
10300{
10301 struct sk_buff *skb;
10302 struct mgmt_ev_device_found *ev;
10303 bool report_device = hci_discovery_active(hdev);
10304
10305 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10306 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10307 eir, eir_len, scan_rsp, scan_rsp_len,
10308 instant);
10309
10310 /* Don't send events for a non-kernel initiated discovery. With
10311 * LE one exception is if we have pend_le_reports > 0 in which
10312 * case we're doing passive scanning and want these events.
10313 */
10314 if (!hci_discovery_active(hdev)) {
10315 if (link_type == ACL_LINK)
10316 return;
10317 if (link_type == LE_LINK && !list_empty(head: &hdev->pend_le_reports))
10318 report_device = true;
10319 else if (!hci_is_adv_monitoring(hdev))
10320 return;
10321 }
10322
10323 if (hdev->discovery.result_filtering) {
10324 /* We are using service discovery */
10325 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10326 scan_rsp_len))
10327 return;
10328 }
10329
10330 if (hdev->discovery.limited) {
10331 /* Check for limited discoverable bit */
10332 if (dev_class) {
10333 if (!(dev_class[1] & 0x20))
10334 return;
10335 } else {
10336 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10337 if (!flags || !(flags[0] & LE_AD_LIMITED))
10338 return;
10339 }
10340 }
10341
10342 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10343 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10344 size: sizeof(*ev) + eir_len + scan_rsp_len + 5);
10345 if (!skb)
10346 return;
10347
10348 ev = skb_put(skb, len: sizeof(*ev));
10349
10350 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10351 * RSSI value was reported as 0 when not available. This behavior
10352 * is kept when using device discovery. This is required for full
10353 * backwards compatibility with the API.
10354 *
10355 * However when using service discovery, the value 127 will be
10356 * returned when the RSSI is not available.
10357 */
10358 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10359 link_type == ACL_LINK)
10360 rssi = 0;
10361
10362 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10363 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10364 ev->rssi = rssi;
10365 ev->flags = cpu_to_le32(flags);
10366
10367 if (eir_len > 0)
10368 /* Copy EIR or advertising data into event */
10369 skb_put_data(skb, data: eir, len: eir_len);
10370
10371 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10372 u8 eir_cod[5];
10373
10374 eir_len += eir_append_data(eir: eir_cod, eir_len: 0, EIR_CLASS_OF_DEV,
10375 data: dev_class, data_len: 3);
10376 skb_put_data(skb, data: eir_cod, len: sizeof(eir_cod));
10377 }
10378
10379 if (scan_rsp_len > 0)
10380 /* Append scan response data to event */
10381 skb_put_data(skb, data: scan_rsp, len: scan_rsp_len);
10382
10383 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10384
10385 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10386}
10387
10388void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10389 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10390{
10391 struct sk_buff *skb;
10392 struct mgmt_ev_device_found *ev;
10393 u16 eir_len = 0;
10394 u32 flags = 0;
10395
10396 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10397 size: sizeof(*ev) + (name ? eir_precalc_len(data_len: name_len) : 0));
10398
10399 ev = skb_put(skb, len: sizeof(*ev));
10400 bacpy(dst: &ev->addr.bdaddr, src: bdaddr);
10401 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10402 ev->rssi = rssi;
10403
10404 if (name)
10405 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, data: name, data_len: name_len);
10406 else
10407 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10408
10409 ev->eir_len = cpu_to_le16(eir_len);
10410 ev->flags = cpu_to_le32(flags);
10411
10412 mgmt_event_skb(skb, NULL);
10413}
10414
10415void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10416{
10417 struct mgmt_ev_discovering ev;
10418
10419 bt_dev_dbg(hdev, "discovering %u", discovering);
10420
10421 memset(&ev, 0, sizeof(ev));
10422 ev.type = hdev->discovery.type;
10423 ev.discovering = discovering;
10424
10425 mgmt_event(MGMT_EV_DISCOVERING, hdev, data: &ev, len: sizeof(ev), NULL);
10426}
10427
10428void mgmt_suspending(struct hci_dev *hdev, u8 state)
10429{
10430 struct mgmt_ev_controller_suspend ev;
10431
10432 ev.suspend_state = state;
10433 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, data: &ev, len: sizeof(ev), NULL);
10434}
10435
10436void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10437 u8 addr_type)
10438{
10439 struct mgmt_ev_controller_resume ev;
10440
10441 ev.wake_reason = reason;
10442 if (bdaddr) {
10443 bacpy(dst: &ev.addr.bdaddr, src: bdaddr);
10444 ev.addr.type = addr_type;
10445 } else {
10446 memset(&ev.addr, 0, sizeof(ev.addr));
10447 }
10448
10449 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, data: &ev, len: sizeof(ev), NULL);
10450}
10451
10452static struct hci_mgmt_chan chan = {
10453 .channel = HCI_CHANNEL_CONTROL,
10454 .handler_count = ARRAY_SIZE(mgmt_handlers),
10455 .handlers = mgmt_handlers,
10456 .hdev_init = mgmt_init_hdev,
10457};
10458
10459int mgmt_init(void)
10460{
10461 return hci_mgmt_chan_register(c: &chan);
10462}
10463
10464void mgmt_exit(void)
10465{
10466 hci_mgmt_chan_unregister(c: &chan);
10467}
10468
10469void mgmt_cleanup(struct sock *sk)
10470{
10471 struct mgmt_mesh_tx *mesh_tx;
10472 struct hci_dev *hdev;
10473
10474 read_lock(&hci_dev_list_lock);
10475
10476 list_for_each_entry(hdev, &hci_dev_list, list) {
10477 do {
10478 mesh_tx = mgmt_mesh_next(hdev, sk);
10479
10480 if (mesh_tx)
10481 mesh_send_complete(hdev, mesh_tx, silent: true);
10482 } while (mesh_tx);
10483 }
10484
10485 read_unlock(&hci_dev_list_lock);
10486}
10487

source code of linux/net/bluetooth/mgmt.c