1 | /* |
---|---|
2 | BlueZ - Bluetooth protocol stack for Linux |
3 | Copyright (C) 2000-2001 Qualcomm Incorporated |
4 | Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> |
5 | Copyright (C) 2010 Google Inc. |
6 | Copyright (C) 2011 ProFUSION Embedded Systems |
7 | Copyright (c) 2012 Code Aurora Forum. All rights reserved. |
8 | |
9 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
10 | |
11 | This program is free software; you can redistribute it and/or modify |
12 | it under the terms of the GNU General Public License version 2 as |
13 | published by the Free Software Foundation; |
14 | |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
16 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. |
18 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY |
19 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES |
20 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
21 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
22 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
23 | |
24 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, |
25 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS |
26 | SOFTWARE IS DISCLAIMED. |
27 | */ |
28 | |
29 | /* Bluetooth L2CAP core. */ |
30 | |
31 | #include <linux/module.h> |
32 | |
33 | #include <linux/debugfs.h> |
34 | #include <linux/crc16.h> |
35 | #include <linux/filter.h> |
36 | |
37 | #include <net/bluetooth/bluetooth.h> |
38 | #include <net/bluetooth/hci_core.h> |
39 | #include <net/bluetooth/l2cap.h> |
40 | |
41 | #include "smp.h" |
42 | |
43 | #define LE_FLOWCTL_MAX_CREDITS 65535 |
44 | |
45 | bool disable_ertm; |
46 | bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED); |
47 | |
48 | static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; |
49 | |
50 | static LIST_HEAD(chan_list); |
51 | static DEFINE_RWLOCK(chan_list_lock); |
52 | |
53 | static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, |
54 | u8 code, u8 ident, u16 dlen, void *data); |
55 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, |
56 | void *data); |
57 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); |
58 | static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); |
59 | |
60 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
61 | struct sk_buff_head *skbs, u8 event); |
62 | static void l2cap_retrans_timeout(struct work_struct *work); |
63 | static void l2cap_monitor_timeout(struct work_struct *work); |
64 | static void l2cap_ack_timeout(struct work_struct *work); |
65 | |
66 | static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) |
67 | { |
68 | if (link_type == LE_LINK) { |
69 | if (bdaddr_type == ADDR_LE_DEV_PUBLIC) |
70 | return BDADDR_LE_PUBLIC; |
71 | else |
72 | return BDADDR_LE_RANDOM; |
73 | } |
74 | |
75 | return BDADDR_BREDR; |
76 | } |
77 | |
78 | static inline u8 bdaddr_src_type(struct hci_conn *hcon) |
79 | { |
80 | return bdaddr_type(link_type: hcon->type, bdaddr_type: hcon->src_type); |
81 | } |
82 | |
83 | static inline u8 bdaddr_dst_type(struct hci_conn *hcon) |
84 | { |
85 | return bdaddr_type(link_type: hcon->type, bdaddr_type: hcon->dst_type); |
86 | } |
87 | |
88 | /* ---- L2CAP channels ---- */ |
89 | |
90 | static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, |
91 | u16 cid) |
92 | { |
93 | struct l2cap_chan *c; |
94 | |
95 | list_for_each_entry(c, &conn->chan_l, list) { |
96 | if (c->dcid == cid) |
97 | return c; |
98 | } |
99 | return NULL; |
100 | } |
101 | |
102 | static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, |
103 | u16 cid) |
104 | { |
105 | struct l2cap_chan *c; |
106 | |
107 | list_for_each_entry(c, &conn->chan_l, list) { |
108 | if (c->scid == cid) |
109 | return c; |
110 | } |
111 | return NULL; |
112 | } |
113 | |
114 | /* Find channel with given SCID. |
115 | * Returns a reference locked channel. |
116 | */ |
117 | static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, |
118 | u16 cid) |
119 | { |
120 | struct l2cap_chan *c; |
121 | |
122 | c = __l2cap_get_chan_by_scid(conn, cid); |
123 | if (c) { |
124 | /* Only lock if chan reference is not 0 */ |
125 | c = l2cap_chan_hold_unless_zero(c); |
126 | if (c) |
127 | l2cap_chan_lock(chan: c); |
128 | } |
129 | |
130 | return c; |
131 | } |
132 | |
133 | /* Find channel with given DCID. |
134 | * Returns a reference locked channel. |
135 | */ |
136 | static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, |
137 | u16 cid) |
138 | { |
139 | struct l2cap_chan *c; |
140 | |
141 | c = __l2cap_get_chan_by_dcid(conn, cid); |
142 | if (c) { |
143 | /* Only lock if chan reference is not 0 */ |
144 | c = l2cap_chan_hold_unless_zero(c); |
145 | if (c) |
146 | l2cap_chan_lock(chan: c); |
147 | } |
148 | |
149 | return c; |
150 | } |
151 | |
152 | static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, |
153 | u8 ident) |
154 | { |
155 | struct l2cap_chan *c; |
156 | |
157 | list_for_each_entry(c, &conn->chan_l, list) { |
158 | if (c->ident == ident) |
159 | return c; |
160 | } |
161 | return NULL; |
162 | } |
163 | |
164 | static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, |
165 | u8 src_type) |
166 | { |
167 | struct l2cap_chan *c; |
168 | |
169 | list_for_each_entry(c, &chan_list, global_l) { |
170 | if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) |
171 | continue; |
172 | |
173 | if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) |
174 | continue; |
175 | |
176 | if (c->sport == psm && !bacmp(ba1: &c->src, ba2: src)) |
177 | return c; |
178 | } |
179 | return NULL; |
180 | } |
181 | |
182 | int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) |
183 | { |
184 | int err; |
185 | |
186 | write_lock(&chan_list_lock); |
187 | |
188 | if (psm && __l2cap_global_chan_by_addr(psm, src, src_type: chan->src_type)) { |
189 | err = -EADDRINUSE; |
190 | goto done; |
191 | } |
192 | |
193 | if (psm) { |
194 | chan->psm = psm; |
195 | chan->sport = psm; |
196 | err = 0; |
197 | } else { |
198 | u16 p, start, end, incr; |
199 | |
200 | if (chan->src_type == BDADDR_BREDR) { |
201 | start = L2CAP_PSM_DYN_START; |
202 | end = L2CAP_PSM_AUTO_END; |
203 | incr = 2; |
204 | } else { |
205 | start = L2CAP_PSM_LE_DYN_START; |
206 | end = L2CAP_PSM_LE_DYN_END; |
207 | incr = 1; |
208 | } |
209 | |
210 | err = -EINVAL; |
211 | for (p = start; p <= end; p += incr) |
212 | if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, |
213 | src_type: chan->src_type)) { |
214 | chan->psm = cpu_to_le16(p); |
215 | chan->sport = cpu_to_le16(p); |
216 | err = 0; |
217 | break; |
218 | } |
219 | } |
220 | |
221 | done: |
222 | write_unlock(&chan_list_lock); |
223 | return err; |
224 | } |
225 | EXPORT_SYMBOL_GPL(l2cap_add_psm); |
226 | |
227 | int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) |
228 | { |
229 | write_lock(&chan_list_lock); |
230 | |
231 | /* Override the defaults (which are for conn-oriented) */ |
232 | chan->omtu = L2CAP_DEFAULT_MTU; |
233 | chan->chan_type = L2CAP_CHAN_FIXED; |
234 | |
235 | chan->scid = scid; |
236 | |
237 | write_unlock(&chan_list_lock); |
238 | |
239 | return 0; |
240 | } |
241 | |
242 | static u16 l2cap_alloc_cid(struct l2cap_conn *conn) |
243 | { |
244 | u16 cid, dyn_end; |
245 | |
246 | if (conn->hcon->type == LE_LINK) |
247 | dyn_end = L2CAP_CID_LE_DYN_END; |
248 | else |
249 | dyn_end = L2CAP_CID_DYN_END; |
250 | |
251 | for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { |
252 | if (!__l2cap_get_chan_by_scid(conn, cid)) |
253 | return cid; |
254 | } |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | static void l2cap_state_change(struct l2cap_chan *chan, int state) |
260 | { |
261 | BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), |
262 | state_to_string(state)); |
263 | |
264 | chan->state = state; |
265 | chan->ops->state_change(chan, state, 0); |
266 | } |
267 | |
268 | static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, |
269 | int state, int err) |
270 | { |
271 | chan->state = state; |
272 | chan->ops->state_change(chan, chan->state, err); |
273 | } |
274 | |
275 | static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) |
276 | { |
277 | chan->ops->state_change(chan, chan->state, err); |
278 | } |
279 | |
280 | static void __set_retrans_timer(struct l2cap_chan *chan) |
281 | { |
282 | if (!delayed_work_pending(&chan->monitor_timer) && |
283 | chan->retrans_timeout) { |
284 | l2cap_set_timer(chan, work: &chan->retrans_timer, |
285 | secs_to_jiffies(chan->retrans_timeout)); |
286 | } |
287 | } |
288 | |
289 | static void __set_monitor_timer(struct l2cap_chan *chan) |
290 | { |
291 | __clear_retrans_timer(chan); |
292 | if (chan->monitor_timeout) { |
293 | l2cap_set_timer(chan, work: &chan->monitor_timer, |
294 | secs_to_jiffies(chan->monitor_timeout)); |
295 | } |
296 | } |
297 | |
298 | static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, |
299 | u16 seq) |
300 | { |
301 | struct sk_buff *skb; |
302 | |
303 | skb_queue_walk(head, skb) { |
304 | if (bt_cb(skb)->l2cap.txseq == seq) |
305 | return skb; |
306 | } |
307 | |
308 | return NULL; |
309 | } |
310 | |
311 | /* ---- L2CAP sequence number lists ---- */ |
312 | |
313 | /* For ERTM, ordered lists of sequence numbers must be tracked for |
314 | * SREJ requests that are received and for frames that are to be |
315 | * retransmitted. These seq_list functions implement a singly-linked |
316 | * list in an array, where membership in the list can also be checked |
317 | * in constant time. Items can also be added to the tail of the list |
318 | * and removed from the head in constant time, without further memory |
319 | * allocs or frees. |
320 | */ |
321 | |
322 | static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) |
323 | { |
324 | size_t alloc_size, i; |
325 | |
326 | /* Allocated size is a power of 2 to map sequence numbers |
327 | * (which may be up to 14 bits) in to a smaller array that is |
328 | * sized for the negotiated ERTM transmit windows. |
329 | */ |
330 | alloc_size = roundup_pow_of_two(size); |
331 | |
332 | seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); |
333 | if (!seq_list->list) |
334 | return -ENOMEM; |
335 | |
336 | seq_list->mask = alloc_size - 1; |
337 | seq_list->head = L2CAP_SEQ_LIST_CLEAR; |
338 | seq_list->tail = L2CAP_SEQ_LIST_CLEAR; |
339 | for (i = 0; i < alloc_size; i++) |
340 | seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; |
341 | |
342 | return 0; |
343 | } |
344 | |
345 | static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) |
346 | { |
347 | kfree(objp: seq_list->list); |
348 | } |
349 | |
350 | static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, |
351 | u16 seq) |
352 | { |
353 | /* Constant-time check for list membership */ |
354 | return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; |
355 | } |
356 | |
357 | static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) |
358 | { |
359 | u16 seq = seq_list->head; |
360 | u16 mask = seq_list->mask; |
361 | |
362 | seq_list->head = seq_list->list[seq & mask]; |
363 | seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; |
364 | |
365 | if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { |
366 | seq_list->head = L2CAP_SEQ_LIST_CLEAR; |
367 | seq_list->tail = L2CAP_SEQ_LIST_CLEAR; |
368 | } |
369 | |
370 | return seq; |
371 | } |
372 | |
373 | static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) |
374 | { |
375 | u16 i; |
376 | |
377 | if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) |
378 | return; |
379 | |
380 | for (i = 0; i <= seq_list->mask; i++) |
381 | seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; |
382 | |
383 | seq_list->head = L2CAP_SEQ_LIST_CLEAR; |
384 | seq_list->tail = L2CAP_SEQ_LIST_CLEAR; |
385 | } |
386 | |
387 | static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) |
388 | { |
389 | u16 mask = seq_list->mask; |
390 | |
391 | /* All appends happen in constant time */ |
392 | |
393 | if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) |
394 | return; |
395 | |
396 | if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) |
397 | seq_list->head = seq; |
398 | else |
399 | seq_list->list[seq_list->tail & mask] = seq; |
400 | |
401 | seq_list->tail = seq; |
402 | seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; |
403 | } |
404 | |
405 | static void l2cap_chan_timeout(struct work_struct *work) |
406 | { |
407 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
408 | chan_timer.work); |
409 | struct l2cap_conn *conn = chan->conn; |
410 | int reason; |
411 | |
412 | BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); |
413 | |
414 | if (!conn) |
415 | return; |
416 | |
417 | mutex_lock(&conn->lock); |
418 | /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling |
419 | * this work. No need to call l2cap_chan_hold(chan) here again. |
420 | */ |
421 | l2cap_chan_lock(chan); |
422 | |
423 | if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) |
424 | reason = ECONNREFUSED; |
425 | else if (chan->state == BT_CONNECT && |
426 | chan->sec_level != BT_SECURITY_SDP) |
427 | reason = ECONNREFUSED; |
428 | else |
429 | reason = ETIMEDOUT; |
430 | |
431 | l2cap_chan_close(chan, reason); |
432 | |
433 | chan->ops->close(chan); |
434 | |
435 | l2cap_chan_unlock(chan); |
436 | l2cap_chan_put(c: chan); |
437 | |
438 | mutex_unlock(lock: &conn->lock); |
439 | } |
440 | |
441 | struct l2cap_chan *l2cap_chan_create(void) |
442 | { |
443 | struct l2cap_chan *chan; |
444 | |
445 | chan = kzalloc(sizeof(*chan), GFP_ATOMIC); |
446 | if (!chan) |
447 | return NULL; |
448 | |
449 | skb_queue_head_init(list: &chan->tx_q); |
450 | skb_queue_head_init(list: &chan->srej_q); |
451 | mutex_init(&chan->lock); |
452 | |
453 | /* Set default lock nesting level */ |
454 | atomic_set(v: &chan->nesting, i: L2CAP_NESTING_NORMAL); |
455 | |
456 | /* Available receive buffer space is initially unknown */ |
457 | chan->rx_avail = -1; |
458 | |
459 | write_lock(&chan_list_lock); |
460 | list_add(new: &chan->global_l, head: &chan_list); |
461 | write_unlock(&chan_list_lock); |
462 | |
463 | INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); |
464 | INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); |
465 | INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); |
466 | INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); |
467 | |
468 | chan->state = BT_OPEN; |
469 | |
470 | kref_init(kref: &chan->kref); |
471 | |
472 | /* This flag is cleared in l2cap_chan_ready() */ |
473 | set_bit(nr: CONF_NOT_COMPLETE, addr: &chan->conf_state); |
474 | |
475 | BT_DBG("chan %p", chan); |
476 | |
477 | return chan; |
478 | } |
479 | EXPORT_SYMBOL_GPL(l2cap_chan_create); |
480 | |
481 | static void l2cap_chan_destroy(struct kref *kref) |
482 | { |
483 | struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); |
484 | |
485 | BT_DBG("chan %p", chan); |
486 | |
487 | write_lock(&chan_list_lock); |
488 | list_del(entry: &chan->global_l); |
489 | write_unlock(&chan_list_lock); |
490 | |
491 | kfree(objp: chan); |
492 | } |
493 | |
494 | void l2cap_chan_hold(struct l2cap_chan *c) |
495 | { |
496 | BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); |
497 | |
498 | kref_get(kref: &c->kref); |
499 | } |
500 | |
501 | struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) |
502 | { |
503 | BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); |
504 | |
505 | if (!kref_get_unless_zero(kref: &c->kref)) |
506 | return NULL; |
507 | |
508 | return c; |
509 | } |
510 | |
511 | void l2cap_chan_put(struct l2cap_chan *c) |
512 | { |
513 | BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); |
514 | |
515 | kref_put(kref: &c->kref, release: l2cap_chan_destroy); |
516 | } |
517 | EXPORT_SYMBOL_GPL(l2cap_chan_put); |
518 | |
519 | void l2cap_chan_set_defaults(struct l2cap_chan *chan) |
520 | { |
521 | chan->fcs = L2CAP_FCS_CRC16; |
522 | chan->max_tx = L2CAP_DEFAULT_MAX_TX; |
523 | chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; |
524 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; |
525 | chan->remote_max_tx = chan->max_tx; |
526 | chan->remote_tx_win = chan->tx_win; |
527 | chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; |
528 | chan->sec_level = BT_SECURITY_LOW; |
529 | chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; |
530 | chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; |
531 | chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; |
532 | |
533 | chan->conf_state = 0; |
534 | set_bit(nr: CONF_NOT_COMPLETE, addr: &chan->conf_state); |
535 | |
536 | set_bit(nr: FLAG_FORCE_ACTIVE, addr: &chan->flags); |
537 | } |
538 | EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); |
539 | |
540 | static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan) |
541 | { |
542 | size_t sdu_len = chan->sdu ? chan->sdu->len : 0; |
543 | |
544 | if (chan->mps == 0) |
545 | return 0; |
546 | |
547 | /* If we don't know the available space in the receiver buffer, give |
548 | * enough credits for a full packet. |
549 | */ |
550 | if (chan->rx_avail == -1) |
551 | return (chan->imtu / chan->mps) + 1; |
552 | |
553 | /* If we know how much space is available in the receive buffer, give |
554 | * out as many credits as would fill the buffer. |
555 | */ |
556 | if (chan->rx_avail <= sdu_len) |
557 | return 0; |
558 | |
559 | return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps); |
560 | } |
561 | |
562 | static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) |
563 | { |
564 | chan->sdu = NULL; |
565 | chan->sdu_last_frag = NULL; |
566 | chan->sdu_len = 0; |
567 | chan->tx_credits = tx_credits; |
568 | /* Derive MPS from connection MTU to stop HCI fragmentation */ |
569 | chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); |
570 | chan->rx_credits = l2cap_le_rx_credits(chan); |
571 | |
572 | skb_queue_head_init(list: &chan->tx_q); |
573 | } |
574 | |
575 | static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) |
576 | { |
577 | l2cap_le_flowctl_init(chan, tx_credits); |
578 | |
579 | /* L2CAP implementations shall support a minimum MPS of 64 octets */ |
580 | if (chan->mps < L2CAP_ECRED_MIN_MPS) { |
581 | chan->mps = L2CAP_ECRED_MIN_MPS; |
582 | chan->rx_credits = l2cap_le_rx_credits(chan); |
583 | } |
584 | } |
585 | |
586 | void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) |
587 | { |
588 | BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, |
589 | __le16_to_cpu(chan->psm), chan->dcid); |
590 | |
591 | conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; |
592 | |
593 | chan->conn = conn; |
594 | |
595 | switch (chan->chan_type) { |
596 | case L2CAP_CHAN_CONN_ORIENTED: |
597 | /* Alloc CID for connection-oriented socket */ |
598 | chan->scid = l2cap_alloc_cid(conn); |
599 | if (conn->hcon->type == ACL_LINK) |
600 | chan->omtu = L2CAP_DEFAULT_MTU; |
601 | break; |
602 | |
603 | case L2CAP_CHAN_CONN_LESS: |
604 | /* Connectionless socket */ |
605 | chan->scid = L2CAP_CID_CONN_LESS; |
606 | chan->dcid = L2CAP_CID_CONN_LESS; |
607 | chan->omtu = L2CAP_DEFAULT_MTU; |
608 | break; |
609 | |
610 | case L2CAP_CHAN_FIXED: |
611 | /* Caller will set CID and CID specific MTU values */ |
612 | break; |
613 | |
614 | default: |
615 | /* Raw socket can send/recv signalling messages only */ |
616 | chan->scid = L2CAP_CID_SIGNALING; |
617 | chan->dcid = L2CAP_CID_SIGNALING; |
618 | chan->omtu = L2CAP_DEFAULT_MTU; |
619 | } |
620 | |
621 | chan->local_id = L2CAP_BESTEFFORT_ID; |
622 | chan->local_stype = L2CAP_SERV_BESTEFFORT; |
623 | chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; |
624 | chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; |
625 | chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; |
626 | chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; |
627 | |
628 | l2cap_chan_hold(c: chan); |
629 | |
630 | /* Only keep a reference for fixed channels if they requested it */ |
631 | if (chan->chan_type != L2CAP_CHAN_FIXED || |
632 | test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) |
633 | hci_conn_hold(conn: conn->hcon); |
634 | |
635 | /* Append to the list since the order matters for ECRED */ |
636 | list_add_tail(new: &chan->list, head: &conn->chan_l); |
637 | } |
638 | |
639 | void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) |
640 | { |
641 | mutex_lock(&conn->lock); |
642 | __l2cap_chan_add(conn, chan); |
643 | mutex_unlock(lock: &conn->lock); |
644 | } |
645 | |
646 | void l2cap_chan_del(struct l2cap_chan *chan, int err) |
647 | { |
648 | struct l2cap_conn *conn = chan->conn; |
649 | |
650 | __clear_chan_timer(chan); |
651 | |
652 | BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, |
653 | state_to_string(chan->state)); |
654 | |
655 | chan->ops->teardown(chan, err); |
656 | |
657 | if (conn) { |
658 | /* Delete from channel list */ |
659 | list_del(entry: &chan->list); |
660 | |
661 | l2cap_chan_put(chan); |
662 | |
663 | chan->conn = NULL; |
664 | |
665 | /* Reference was only held for non-fixed channels or |
666 | * fixed channels that explicitly requested it using the |
667 | * FLAG_HOLD_HCI_CONN flag. |
668 | */ |
669 | if (chan->chan_type != L2CAP_CHAN_FIXED || |
670 | test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) |
671 | hci_conn_drop(conn: conn->hcon); |
672 | } |
673 | |
674 | if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) |
675 | return; |
676 | |
677 | switch (chan->mode) { |
678 | case L2CAP_MODE_BASIC: |
679 | break; |
680 | |
681 | case L2CAP_MODE_LE_FLOWCTL: |
682 | case L2CAP_MODE_EXT_FLOWCTL: |
683 | skb_queue_purge(list: &chan->tx_q); |
684 | break; |
685 | |
686 | case L2CAP_MODE_ERTM: |
687 | __clear_retrans_timer(chan); |
688 | __clear_monitor_timer(chan); |
689 | __clear_ack_timer(chan); |
690 | |
691 | skb_queue_purge(list: &chan->srej_q); |
692 | |
693 | l2cap_seq_list_free(seq_list: &chan->srej_list); |
694 | l2cap_seq_list_free(seq_list: &chan->retrans_list); |
695 | fallthrough; |
696 | |
697 | case L2CAP_MODE_STREAMING: |
698 | skb_queue_purge(list: &chan->tx_q); |
699 | break; |
700 | } |
701 | } |
702 | EXPORT_SYMBOL_GPL(l2cap_chan_del); |
703 | |
704 | static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, |
705 | l2cap_chan_func_t func, void *data) |
706 | { |
707 | struct l2cap_chan *chan, *l; |
708 | |
709 | list_for_each_entry_safe(chan, l, &conn->chan_l, list) { |
710 | if (chan->ident == id) |
711 | func(chan, data); |
712 | } |
713 | } |
714 | |
715 | static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, |
716 | void *data) |
717 | { |
718 | struct l2cap_chan *chan; |
719 | |
720 | list_for_each_entry(chan, &conn->chan_l, list) { |
721 | func(chan, data); |
722 | } |
723 | } |
724 | |
725 | void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, |
726 | void *data) |
727 | { |
728 | if (!conn) |
729 | return; |
730 | |
731 | mutex_lock(&conn->lock); |
732 | __l2cap_chan_list(conn, func, data); |
733 | mutex_unlock(lock: &conn->lock); |
734 | } |
735 | |
736 | EXPORT_SYMBOL_GPL(l2cap_chan_list); |
737 | |
738 | static void l2cap_conn_update_id_addr(struct work_struct *work) |
739 | { |
740 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
741 | id_addr_timer.work); |
742 | struct hci_conn *hcon = conn->hcon; |
743 | struct l2cap_chan *chan; |
744 | |
745 | mutex_lock(&conn->lock); |
746 | |
747 | list_for_each_entry(chan, &conn->chan_l, list) { |
748 | l2cap_chan_lock(chan); |
749 | bacpy(dst: &chan->dst, src: &hcon->dst); |
750 | chan->dst_type = bdaddr_dst_type(hcon); |
751 | l2cap_chan_unlock(chan); |
752 | } |
753 | |
754 | mutex_unlock(lock: &conn->lock); |
755 | } |
756 | |
757 | static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) |
758 | { |
759 | struct l2cap_conn *conn = chan->conn; |
760 | struct l2cap_le_conn_rsp rsp; |
761 | u16 result; |
762 | |
763 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) |
764 | result = L2CAP_CR_LE_AUTHORIZATION; |
765 | else |
766 | result = L2CAP_CR_LE_BAD_PSM; |
767 | |
768 | l2cap_state_change(chan, state: BT_DISCONN); |
769 | |
770 | rsp.dcid = cpu_to_le16(chan->scid); |
771 | rsp.mtu = cpu_to_le16(chan->imtu); |
772 | rsp.mps = cpu_to_le16(chan->mps); |
773 | rsp.credits = cpu_to_le16(chan->rx_credits); |
774 | rsp.result = cpu_to_le16(result); |
775 | |
776 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_LE_CONN_RSP, len: sizeof(rsp), |
777 | data: &rsp); |
778 | } |
779 | |
780 | static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) |
781 | { |
782 | l2cap_state_change(chan, state: BT_DISCONN); |
783 | |
784 | __l2cap_ecred_conn_rsp_defer(chan); |
785 | } |
786 | |
787 | static void l2cap_chan_connect_reject(struct l2cap_chan *chan) |
788 | { |
789 | struct l2cap_conn *conn = chan->conn; |
790 | struct l2cap_conn_rsp rsp; |
791 | u16 result; |
792 | |
793 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) |
794 | result = L2CAP_CR_SEC_BLOCK; |
795 | else |
796 | result = L2CAP_CR_BAD_PSM; |
797 | |
798 | l2cap_state_change(chan, state: BT_DISCONN); |
799 | |
800 | rsp.scid = cpu_to_le16(chan->dcid); |
801 | rsp.dcid = cpu_to_le16(chan->scid); |
802 | rsp.result = cpu_to_le16(result); |
803 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); |
804 | |
805 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_CONN_RSP, len: sizeof(rsp), data: &rsp); |
806 | } |
807 | |
808 | void l2cap_chan_close(struct l2cap_chan *chan, int reason) |
809 | { |
810 | struct l2cap_conn *conn = chan->conn; |
811 | |
812 | BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); |
813 | |
814 | switch (chan->state) { |
815 | case BT_LISTEN: |
816 | chan->ops->teardown(chan, 0); |
817 | break; |
818 | |
819 | case BT_CONNECTED: |
820 | case BT_CONFIG: |
821 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { |
822 | __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); |
823 | l2cap_send_disconn_req(chan, err: reason); |
824 | } else |
825 | l2cap_chan_del(chan, reason); |
826 | break; |
827 | |
828 | case BT_CONNECT2: |
829 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { |
830 | if (conn->hcon->type == ACL_LINK) |
831 | l2cap_chan_connect_reject(chan); |
832 | else if (conn->hcon->type == LE_LINK) { |
833 | switch (chan->mode) { |
834 | case L2CAP_MODE_LE_FLOWCTL: |
835 | l2cap_chan_le_connect_reject(chan); |
836 | break; |
837 | case L2CAP_MODE_EXT_FLOWCTL: |
838 | l2cap_chan_ecred_connect_reject(chan); |
839 | return; |
840 | } |
841 | } |
842 | } |
843 | |
844 | l2cap_chan_del(chan, reason); |
845 | break; |
846 | |
847 | case BT_CONNECT: |
848 | case BT_DISCONN: |
849 | l2cap_chan_del(chan, reason); |
850 | break; |
851 | |
852 | default: |
853 | chan->ops->teardown(chan, 0); |
854 | break; |
855 | } |
856 | } |
857 | EXPORT_SYMBOL(l2cap_chan_close); |
858 | |
859 | static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) |
860 | { |
861 | switch (chan->chan_type) { |
862 | case L2CAP_CHAN_RAW: |
863 | switch (chan->sec_level) { |
864 | case BT_SECURITY_HIGH: |
865 | case BT_SECURITY_FIPS: |
866 | return HCI_AT_DEDICATED_BONDING_MITM; |
867 | case BT_SECURITY_MEDIUM: |
868 | return HCI_AT_DEDICATED_BONDING; |
869 | default: |
870 | return HCI_AT_NO_BONDING; |
871 | } |
872 | break; |
873 | case L2CAP_CHAN_CONN_LESS: |
874 | if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { |
875 | if (chan->sec_level == BT_SECURITY_LOW) |
876 | chan->sec_level = BT_SECURITY_SDP; |
877 | } |
878 | if (chan->sec_level == BT_SECURITY_HIGH || |
879 | chan->sec_level == BT_SECURITY_FIPS) |
880 | return HCI_AT_NO_BONDING_MITM; |
881 | else |
882 | return HCI_AT_NO_BONDING; |
883 | break; |
884 | case L2CAP_CHAN_CONN_ORIENTED: |
885 | if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { |
886 | if (chan->sec_level == BT_SECURITY_LOW) |
887 | chan->sec_level = BT_SECURITY_SDP; |
888 | |
889 | if (chan->sec_level == BT_SECURITY_HIGH || |
890 | chan->sec_level == BT_SECURITY_FIPS) |
891 | return HCI_AT_NO_BONDING_MITM; |
892 | else |
893 | return HCI_AT_NO_BONDING; |
894 | } |
895 | fallthrough; |
896 | |
897 | default: |
898 | switch (chan->sec_level) { |
899 | case BT_SECURITY_HIGH: |
900 | case BT_SECURITY_FIPS: |
901 | return HCI_AT_GENERAL_BONDING_MITM; |
902 | case BT_SECURITY_MEDIUM: |
903 | return HCI_AT_GENERAL_BONDING; |
904 | default: |
905 | return HCI_AT_NO_BONDING; |
906 | } |
907 | break; |
908 | } |
909 | } |
910 | |
911 | /* Service level security */ |
912 | int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) |
913 | { |
914 | struct l2cap_conn *conn = chan->conn; |
915 | __u8 auth_type; |
916 | |
917 | if (conn->hcon->type == LE_LINK) |
918 | return smp_conn_security(hcon: conn->hcon, sec_level: chan->sec_level); |
919 | |
920 | auth_type = l2cap_get_auth_type(chan); |
921 | |
922 | return hci_conn_security(conn: conn->hcon, sec_level: chan->sec_level, auth_type, |
923 | initiator); |
924 | } |
925 | |
926 | static u8 l2cap_get_ident(struct l2cap_conn *conn) |
927 | { |
928 | u8 id; |
929 | |
930 | /* Get next available identificator. |
931 | * 1 - 128 are used by kernel. |
932 | * 129 - 199 are reserved. |
933 | * 200 - 254 are used by utilities like l2ping, etc. |
934 | */ |
935 | |
936 | mutex_lock(&conn->ident_lock); |
937 | |
938 | if (++conn->tx_ident > 128) |
939 | conn->tx_ident = 1; |
940 | |
941 | id = conn->tx_ident; |
942 | |
943 | mutex_unlock(lock: &conn->ident_lock); |
944 | |
945 | return id; |
946 | } |
947 | |
948 | static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb, |
949 | u8 flags) |
950 | { |
951 | /* Check if the hcon still valid before attempting to send */ |
952 | if (hci_conn_valid(hdev: conn->hcon->hdev, conn: conn->hcon)) |
953 | hci_send_acl(chan: conn->hchan, skb, flags); |
954 | else |
955 | kfree_skb(skb); |
956 | } |
957 | |
958 | static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, |
959 | void *data) |
960 | { |
961 | struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, dlen: len, data); |
962 | u8 flags; |
963 | |
964 | BT_DBG("code 0x%2.2x", code); |
965 | |
966 | if (!skb) |
967 | return; |
968 | |
969 | /* Use NO_FLUSH if supported or we have an LE link (which does |
970 | * not support auto-flushing packets) */ |
971 | if (lmp_no_flush_capable(conn->hcon->hdev) || |
972 | conn->hcon->type == LE_LINK) |
973 | flags = ACL_START_NO_FLUSH; |
974 | else |
975 | flags = ACL_START; |
976 | |
977 | bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; |
978 | skb->priority = HCI_PRIO_MAX; |
979 | |
980 | l2cap_send_acl(conn, skb, flags); |
981 | } |
982 | |
983 | static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) |
984 | { |
985 | struct hci_conn *hcon = chan->conn->hcon; |
986 | u16 flags; |
987 | |
988 | BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, |
989 | skb->priority); |
990 | |
991 | /* Use NO_FLUSH for LE links (where this is the only option) or |
992 | * if the BR/EDR link supports it and flushing has not been |
993 | * explicitly requested (through FLAG_FLUSHABLE). |
994 | */ |
995 | if (hcon->type == LE_LINK || |
996 | (!test_bit(FLAG_FLUSHABLE, &chan->flags) && |
997 | lmp_no_flush_capable(hcon->hdev))) |
998 | flags = ACL_START_NO_FLUSH; |
999 | else |
1000 | flags = ACL_START; |
1001 | |
1002 | bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); |
1003 | hci_send_acl(chan: chan->conn->hchan, skb, flags); |
1004 | } |
1005 | |
1006 | static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) |
1007 | { |
1008 | control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; |
1009 | control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; |
1010 | |
1011 | if (enh & L2CAP_CTRL_FRAME_TYPE) { |
1012 | /* S-Frame */ |
1013 | control->sframe = 1; |
1014 | control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; |
1015 | control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; |
1016 | |
1017 | control->sar = 0; |
1018 | control->txseq = 0; |
1019 | } else { |
1020 | /* I-Frame */ |
1021 | control->sframe = 0; |
1022 | control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; |
1023 | control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; |
1024 | |
1025 | control->poll = 0; |
1026 | control->super = 0; |
1027 | } |
1028 | } |
1029 | |
1030 | static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) |
1031 | { |
1032 | control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; |
1033 | control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; |
1034 | |
1035 | if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { |
1036 | /* S-Frame */ |
1037 | control->sframe = 1; |
1038 | control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; |
1039 | control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; |
1040 | |
1041 | control->sar = 0; |
1042 | control->txseq = 0; |
1043 | } else { |
1044 | /* I-Frame */ |
1045 | control->sframe = 0; |
1046 | control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; |
1047 | control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; |
1048 | |
1049 | control->poll = 0; |
1050 | control->super = 0; |
1051 | } |
1052 | } |
1053 | |
1054 | static inline void __unpack_control(struct l2cap_chan *chan, |
1055 | struct sk_buff *skb) |
1056 | { |
1057 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { |
1058 | __unpack_extended_control(ext: get_unaligned_le32(p: skb->data), |
1059 | control: &bt_cb(skb)->l2cap); |
1060 | skb_pull(skb, L2CAP_EXT_CTRL_SIZE); |
1061 | } else { |
1062 | __unpack_enhanced_control(enh: get_unaligned_le16(p: skb->data), |
1063 | control: &bt_cb(skb)->l2cap); |
1064 | skb_pull(skb, L2CAP_ENH_CTRL_SIZE); |
1065 | } |
1066 | } |
1067 | |
1068 | static u32 __pack_extended_control(struct l2cap_ctrl *control) |
1069 | { |
1070 | u32 packed; |
1071 | |
1072 | packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; |
1073 | packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; |
1074 | |
1075 | if (control->sframe) { |
1076 | packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; |
1077 | packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; |
1078 | packed |= L2CAP_EXT_CTRL_FRAME_TYPE; |
1079 | } else { |
1080 | packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; |
1081 | packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; |
1082 | } |
1083 | |
1084 | return packed; |
1085 | } |
1086 | |
1087 | static u16 __pack_enhanced_control(struct l2cap_ctrl *control) |
1088 | { |
1089 | u16 packed; |
1090 | |
1091 | packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; |
1092 | packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; |
1093 | |
1094 | if (control->sframe) { |
1095 | packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; |
1096 | packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; |
1097 | packed |= L2CAP_CTRL_FRAME_TYPE; |
1098 | } else { |
1099 | packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; |
1100 | packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; |
1101 | } |
1102 | |
1103 | return packed; |
1104 | } |
1105 | |
1106 | static inline void __pack_control(struct l2cap_chan *chan, |
1107 | struct l2cap_ctrl *control, |
1108 | struct sk_buff *skb) |
1109 | { |
1110 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { |
1111 | put_unaligned_le32(val: __pack_extended_control(control), |
1112 | p: skb->data + L2CAP_HDR_SIZE); |
1113 | } else { |
1114 | put_unaligned_le16(val: __pack_enhanced_control(control), |
1115 | p: skb->data + L2CAP_HDR_SIZE); |
1116 | } |
1117 | } |
1118 | |
1119 | static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) |
1120 | { |
1121 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
1122 | return L2CAP_EXT_HDR_SIZE; |
1123 | else |
1124 | return L2CAP_ENH_HDR_SIZE; |
1125 | } |
1126 | |
1127 | static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, |
1128 | u32 control) |
1129 | { |
1130 | struct sk_buff *skb; |
1131 | struct l2cap_hdr *lh; |
1132 | int hlen = __ertm_hdr_size(chan); |
1133 | |
1134 | if (chan->fcs == L2CAP_FCS_CRC16) |
1135 | hlen += L2CAP_FCS_SIZE; |
1136 | |
1137 | skb = bt_skb_alloc(len: hlen, GFP_KERNEL); |
1138 | |
1139 | if (!skb) |
1140 | return ERR_PTR(error: -ENOMEM); |
1141 | |
1142 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
1143 | lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); |
1144 | lh->cid = cpu_to_le16(chan->dcid); |
1145 | |
1146 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
1147 | put_unaligned_le32(val: control, p: skb_put(skb, L2CAP_EXT_CTRL_SIZE)); |
1148 | else |
1149 | put_unaligned_le16(val: control, p: skb_put(skb, L2CAP_ENH_CTRL_SIZE)); |
1150 | |
1151 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1152 | u16 fcs = crc16(crc: 0, p: (u8 *)skb->data, len: skb->len); |
1153 | put_unaligned_le16(val: fcs, p: skb_put(skb, L2CAP_FCS_SIZE)); |
1154 | } |
1155 | |
1156 | skb->priority = HCI_PRIO_MAX; |
1157 | return skb; |
1158 | } |
1159 | |
1160 | static void l2cap_send_sframe(struct l2cap_chan *chan, |
1161 | struct l2cap_ctrl *control) |
1162 | { |
1163 | struct sk_buff *skb; |
1164 | u32 control_field; |
1165 | |
1166 | BT_DBG("chan %p, control %p", chan, control); |
1167 | |
1168 | if (!control->sframe) |
1169 | return; |
1170 | |
1171 | if (test_and_clear_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state) && |
1172 | !control->poll) |
1173 | control->final = 1; |
1174 | |
1175 | if (control->super == L2CAP_SUPER_RR) |
1176 | clear_bit(nr: CONN_RNR_SENT, addr: &chan->conn_state); |
1177 | else if (control->super == L2CAP_SUPER_RNR) |
1178 | set_bit(nr: CONN_RNR_SENT, addr: &chan->conn_state); |
1179 | |
1180 | if (control->super != L2CAP_SUPER_SREJ) { |
1181 | chan->last_acked_seq = control->reqseq; |
1182 | __clear_ack_timer(chan); |
1183 | } |
1184 | |
1185 | BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, |
1186 | control->final, control->poll, control->super); |
1187 | |
1188 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
1189 | control_field = __pack_extended_control(control); |
1190 | else |
1191 | control_field = __pack_enhanced_control(control); |
1192 | |
1193 | skb = l2cap_create_sframe_pdu(chan, control: control_field); |
1194 | if (!IS_ERR(ptr: skb)) |
1195 | l2cap_do_send(chan, skb); |
1196 | } |
1197 | |
1198 | static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) |
1199 | { |
1200 | struct l2cap_ctrl control; |
1201 | |
1202 | BT_DBG("chan %p, poll %d", chan, poll); |
1203 | |
1204 | memset(&control, 0, sizeof(control)); |
1205 | control.sframe = 1; |
1206 | control.poll = poll; |
1207 | |
1208 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) |
1209 | control.super = L2CAP_SUPER_RNR; |
1210 | else |
1211 | control.super = L2CAP_SUPER_RR; |
1212 | |
1213 | control.reqseq = chan->buffer_seq; |
1214 | l2cap_send_sframe(chan, control: &control); |
1215 | } |
1216 | |
1217 | static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) |
1218 | { |
1219 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) |
1220 | return true; |
1221 | |
1222 | return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); |
1223 | } |
1224 | |
1225 | void l2cap_send_conn_req(struct l2cap_chan *chan) |
1226 | { |
1227 | struct l2cap_conn *conn = chan->conn; |
1228 | struct l2cap_conn_req req; |
1229 | |
1230 | req.scid = cpu_to_le16(chan->scid); |
1231 | req.psm = chan->psm; |
1232 | |
1233 | chan->ident = l2cap_get_ident(conn); |
1234 | |
1235 | set_bit(nr: CONF_CONNECT_PEND, addr: &chan->conf_state); |
1236 | |
1237 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_CONN_REQ, len: sizeof(req), data: &req); |
1238 | } |
1239 | |
1240 | static void l2cap_chan_ready(struct l2cap_chan *chan) |
1241 | { |
1242 | /* The channel may have already been flagged as connected in |
1243 | * case of receiving data before the L2CAP info req/rsp |
1244 | * procedure is complete. |
1245 | */ |
1246 | if (chan->state == BT_CONNECTED) |
1247 | return; |
1248 | |
1249 | /* This clears all conf flags, including CONF_NOT_COMPLETE */ |
1250 | chan->conf_state = 0; |
1251 | __clear_chan_timer(chan); |
1252 | |
1253 | switch (chan->mode) { |
1254 | case L2CAP_MODE_LE_FLOWCTL: |
1255 | case L2CAP_MODE_EXT_FLOWCTL: |
1256 | if (!chan->tx_credits) |
1257 | chan->ops->suspend(chan); |
1258 | break; |
1259 | } |
1260 | |
1261 | chan->state = BT_CONNECTED; |
1262 | |
1263 | chan->ops->ready(chan); |
1264 | } |
1265 | |
1266 | static void l2cap_le_connect(struct l2cap_chan *chan) |
1267 | { |
1268 | struct l2cap_conn *conn = chan->conn; |
1269 | struct l2cap_le_conn_req req; |
1270 | |
1271 | if (test_and_set_bit(nr: FLAG_LE_CONN_REQ_SENT, addr: &chan->flags)) |
1272 | return; |
1273 | |
1274 | if (!chan->imtu) |
1275 | chan->imtu = chan->conn->mtu; |
1276 | |
1277 | l2cap_le_flowctl_init(chan, tx_credits: 0); |
1278 | |
1279 | memset(&req, 0, sizeof(req)); |
1280 | req.psm = chan->psm; |
1281 | req.scid = cpu_to_le16(chan->scid); |
1282 | req.mtu = cpu_to_le16(chan->imtu); |
1283 | req.mps = cpu_to_le16(chan->mps); |
1284 | req.credits = cpu_to_le16(chan->rx_credits); |
1285 | |
1286 | chan->ident = l2cap_get_ident(conn); |
1287 | |
1288 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_LE_CONN_REQ, |
1289 | len: sizeof(req), data: &req); |
1290 | } |
1291 | |
1292 | struct l2cap_ecred_conn_data { |
1293 | struct { |
1294 | struct l2cap_ecred_conn_req_hdr req; |
1295 | __le16 scid[5]; |
1296 | } __packed pdu; |
1297 | struct l2cap_chan *chan; |
1298 | struct pid *pid; |
1299 | int count; |
1300 | }; |
1301 | |
1302 | static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) |
1303 | { |
1304 | struct l2cap_ecred_conn_data *conn = data; |
1305 | struct pid *pid; |
1306 | |
1307 | if (chan == conn->chan) |
1308 | return; |
1309 | |
1310 | if (!test_and_clear_bit(nr: FLAG_DEFER_SETUP, addr: &chan->flags)) |
1311 | return; |
1312 | |
1313 | pid = chan->ops->get_peer_pid(chan); |
1314 | |
1315 | /* Only add deferred channels with the same PID/PSM */ |
1316 | if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || |
1317 | chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) |
1318 | return; |
1319 | |
1320 | if (test_and_set_bit(nr: FLAG_ECRED_CONN_REQ_SENT, addr: &chan->flags)) |
1321 | return; |
1322 | |
1323 | l2cap_ecred_init(chan, tx_credits: 0); |
1324 | |
1325 | /* Set the same ident so we can match on the rsp */ |
1326 | chan->ident = conn->chan->ident; |
1327 | |
1328 | /* Include all channels deferred */ |
1329 | conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); |
1330 | |
1331 | conn->count++; |
1332 | } |
1333 | |
1334 | static void l2cap_ecred_connect(struct l2cap_chan *chan) |
1335 | { |
1336 | struct l2cap_conn *conn = chan->conn; |
1337 | struct l2cap_ecred_conn_data data; |
1338 | |
1339 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) |
1340 | return; |
1341 | |
1342 | if (test_and_set_bit(nr: FLAG_ECRED_CONN_REQ_SENT, addr: &chan->flags)) |
1343 | return; |
1344 | |
1345 | l2cap_ecred_init(chan, tx_credits: 0); |
1346 | |
1347 | memset(&data, 0, sizeof(data)); |
1348 | data.pdu.req.psm = chan->psm; |
1349 | data.pdu.req.mtu = cpu_to_le16(chan->imtu); |
1350 | data.pdu.req.mps = cpu_to_le16(chan->mps); |
1351 | data.pdu.req.credits = cpu_to_le16(chan->rx_credits); |
1352 | data.pdu.scid[0] = cpu_to_le16(chan->scid); |
1353 | |
1354 | chan->ident = l2cap_get_ident(conn); |
1355 | |
1356 | data.count = 1; |
1357 | data.chan = chan; |
1358 | data.pid = chan->ops->get_peer_pid(chan); |
1359 | |
1360 | __l2cap_chan_list(conn, func: l2cap_ecred_defer_connect, data: &data); |
1361 | |
1362 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_ECRED_CONN_REQ, |
1363 | len: sizeof(data.pdu.req) + data.count * sizeof(__le16), |
1364 | data: &data.pdu); |
1365 | } |
1366 | |
1367 | static void l2cap_le_start(struct l2cap_chan *chan) |
1368 | { |
1369 | struct l2cap_conn *conn = chan->conn; |
1370 | |
1371 | if (!smp_conn_security(hcon: conn->hcon, sec_level: chan->sec_level)) |
1372 | return; |
1373 | |
1374 | if (!chan->psm) { |
1375 | l2cap_chan_ready(chan); |
1376 | return; |
1377 | } |
1378 | |
1379 | if (chan->state == BT_CONNECT) { |
1380 | if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) |
1381 | l2cap_ecred_connect(chan); |
1382 | else |
1383 | l2cap_le_connect(chan); |
1384 | } |
1385 | } |
1386 | |
1387 | static void l2cap_start_connection(struct l2cap_chan *chan) |
1388 | { |
1389 | if (chan->conn->hcon->type == LE_LINK) { |
1390 | l2cap_le_start(chan); |
1391 | } else { |
1392 | l2cap_send_conn_req(chan); |
1393 | } |
1394 | } |
1395 | |
1396 | static void l2cap_request_info(struct l2cap_conn *conn) |
1397 | { |
1398 | struct l2cap_info_req req; |
1399 | |
1400 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) |
1401 | return; |
1402 | |
1403 | req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); |
1404 | |
1405 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
1406 | conn->info_ident = l2cap_get_ident(conn); |
1407 | |
1408 | schedule_delayed_work(dwork: &conn->info_timer, L2CAP_INFO_TIMEOUT); |
1409 | |
1410 | l2cap_send_cmd(conn, ident: conn->info_ident, L2CAP_INFO_REQ, |
1411 | len: sizeof(req), data: &req); |
1412 | } |
1413 | |
1414 | static bool l2cap_check_enc_key_size(struct hci_conn *hcon, |
1415 | struct l2cap_chan *chan) |
1416 | { |
1417 | /* The minimum encryption key size needs to be enforced by the |
1418 | * host stack before establishing any L2CAP connections. The |
1419 | * specification in theory allows a minimum of 1, but to align |
1420 | * BR/EDR and LE transports, a minimum of 7 is chosen. |
1421 | * |
1422 | * This check might also be called for unencrypted connections |
1423 | * that have no key size requirements. Ensure that the link is |
1424 | * actually encrypted before enforcing a key size. |
1425 | */ |
1426 | int min_key_size = hcon->hdev->min_enc_key_size; |
1427 | |
1428 | /* On FIPS security level, key size must be 16 bytes */ |
1429 | if (chan->sec_level == BT_SECURITY_FIPS) |
1430 | min_key_size = 16; |
1431 | |
1432 | return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || |
1433 | hcon->enc_key_size >= min_key_size); |
1434 | } |
1435 | |
1436 | static void l2cap_do_start(struct l2cap_chan *chan) |
1437 | { |
1438 | struct l2cap_conn *conn = chan->conn; |
1439 | |
1440 | if (conn->hcon->type == LE_LINK) { |
1441 | l2cap_le_start(chan); |
1442 | return; |
1443 | } |
1444 | |
1445 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { |
1446 | l2cap_request_info(conn); |
1447 | return; |
1448 | } |
1449 | |
1450 | if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) |
1451 | return; |
1452 | |
1453 | if (!l2cap_chan_check_security(chan, initiator: true) || |
1454 | !__l2cap_no_conn_pending(chan)) |
1455 | return; |
1456 | |
1457 | if (l2cap_check_enc_key_size(hcon: conn->hcon, chan)) |
1458 | l2cap_start_connection(chan); |
1459 | else |
1460 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); |
1461 | } |
1462 | |
1463 | static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) |
1464 | { |
1465 | u32 local_feat_mask = l2cap_feat_mask; |
1466 | if (!disable_ertm) |
1467 | local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; |
1468 | |
1469 | switch (mode) { |
1470 | case L2CAP_MODE_ERTM: |
1471 | return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; |
1472 | case L2CAP_MODE_STREAMING: |
1473 | return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; |
1474 | default: |
1475 | return 0x00; |
1476 | } |
1477 | } |
1478 | |
1479 | static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) |
1480 | { |
1481 | struct l2cap_conn *conn = chan->conn; |
1482 | struct l2cap_disconn_req req; |
1483 | |
1484 | if (!conn) |
1485 | return; |
1486 | |
1487 | if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { |
1488 | __clear_retrans_timer(chan); |
1489 | __clear_monitor_timer(chan); |
1490 | __clear_ack_timer(chan); |
1491 | } |
1492 | |
1493 | req.dcid = cpu_to_le16(chan->dcid); |
1494 | req.scid = cpu_to_le16(chan->scid); |
1495 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_DISCONN_REQ, |
1496 | len: sizeof(req), data: &req); |
1497 | |
1498 | l2cap_state_change_and_error(chan, state: BT_DISCONN, err); |
1499 | } |
1500 | |
1501 | /* ---- L2CAP connections ---- */ |
1502 | static void l2cap_conn_start(struct l2cap_conn *conn) |
1503 | { |
1504 | struct l2cap_chan *chan, *tmp; |
1505 | |
1506 | BT_DBG("conn %p", conn); |
1507 | |
1508 | list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { |
1509 | l2cap_chan_lock(chan); |
1510 | |
1511 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
1512 | l2cap_chan_ready(chan); |
1513 | l2cap_chan_unlock(chan); |
1514 | continue; |
1515 | } |
1516 | |
1517 | if (chan->state == BT_CONNECT) { |
1518 | if (!l2cap_chan_check_security(chan, initiator: true) || |
1519 | !__l2cap_no_conn_pending(chan)) { |
1520 | l2cap_chan_unlock(chan); |
1521 | continue; |
1522 | } |
1523 | |
1524 | if (!l2cap_mode_supported(mode: chan->mode, feat_mask: conn->feat_mask) |
1525 | && test_bit(CONF_STATE2_DEVICE, |
1526 | &chan->conf_state)) { |
1527 | l2cap_chan_close(chan, ECONNRESET); |
1528 | l2cap_chan_unlock(chan); |
1529 | continue; |
1530 | } |
1531 | |
1532 | if (l2cap_check_enc_key_size(hcon: conn->hcon, chan)) |
1533 | l2cap_start_connection(chan); |
1534 | else |
1535 | l2cap_chan_close(chan, ECONNREFUSED); |
1536 | |
1537 | } else if (chan->state == BT_CONNECT2) { |
1538 | struct l2cap_conn_rsp rsp; |
1539 | char buf[128]; |
1540 | rsp.scid = cpu_to_le16(chan->dcid); |
1541 | rsp.dcid = cpu_to_le16(chan->scid); |
1542 | |
1543 | if (l2cap_chan_check_security(chan, initiator: false)) { |
1544 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { |
1545 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); |
1546 | rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); |
1547 | chan->ops->defer(chan); |
1548 | |
1549 | } else { |
1550 | l2cap_state_change(chan, state: BT_CONFIG); |
1551 | rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); |
1552 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); |
1553 | } |
1554 | } else { |
1555 | rsp.result = cpu_to_le16(L2CAP_CR_PEND); |
1556 | rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); |
1557 | } |
1558 | |
1559 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_CONN_RSP, |
1560 | len: sizeof(rsp), data: &rsp); |
1561 | |
1562 | if (test_bit(CONF_REQ_SENT, &chan->conf_state) || |
1563 | rsp.result != L2CAP_CR_SUCCESS) { |
1564 | l2cap_chan_unlock(chan); |
1565 | continue; |
1566 | } |
1567 | |
1568 | set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state); |
1569 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_CONF_REQ, |
1570 | len: l2cap_build_conf_req(chan, data: buf, data_size: sizeof(buf)), data: buf); |
1571 | chan->num_conf_req++; |
1572 | } |
1573 | |
1574 | l2cap_chan_unlock(chan); |
1575 | } |
1576 | } |
1577 | |
1578 | static void l2cap_le_conn_ready(struct l2cap_conn *conn) |
1579 | { |
1580 | struct hci_conn *hcon = conn->hcon; |
1581 | struct hci_dev *hdev = hcon->hdev; |
1582 | |
1583 | BT_DBG("%s conn %p", hdev->name, conn); |
1584 | |
1585 | /* For outgoing pairing which doesn't necessarily have an |
1586 | * associated socket (e.g. mgmt_pair_device). |
1587 | */ |
1588 | if (hcon->out) |
1589 | smp_conn_security(hcon, sec_level: hcon->pending_sec_level); |
1590 | |
1591 | /* For LE peripheral connections, make sure the connection interval |
1592 | * is in the range of the minimum and maximum interval that has |
1593 | * been configured for this connection. If not, then trigger |
1594 | * the connection update procedure. |
1595 | */ |
1596 | if (hcon->role == HCI_ROLE_SLAVE && |
1597 | (hcon->le_conn_interval < hcon->le_conn_min_interval || |
1598 | hcon->le_conn_interval > hcon->le_conn_max_interval)) { |
1599 | struct l2cap_conn_param_update_req req; |
1600 | |
1601 | req.min = cpu_to_le16(hcon->le_conn_min_interval); |
1602 | req.max = cpu_to_le16(hcon->le_conn_max_interval); |
1603 | req.latency = cpu_to_le16(hcon->le_conn_latency); |
1604 | req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); |
1605 | |
1606 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), |
1607 | L2CAP_CONN_PARAM_UPDATE_REQ, len: sizeof(req), data: &req); |
1608 | } |
1609 | } |
1610 | |
1611 | static void l2cap_conn_ready(struct l2cap_conn *conn) |
1612 | { |
1613 | struct l2cap_chan *chan; |
1614 | struct hci_conn *hcon = conn->hcon; |
1615 | |
1616 | BT_DBG("conn %p", conn); |
1617 | |
1618 | if (hcon->type == ACL_LINK) |
1619 | l2cap_request_info(conn); |
1620 | |
1621 | mutex_lock(&conn->lock); |
1622 | |
1623 | list_for_each_entry(chan, &conn->chan_l, list) { |
1624 | |
1625 | l2cap_chan_lock(chan); |
1626 | |
1627 | if (hcon->type == LE_LINK) { |
1628 | l2cap_le_start(chan); |
1629 | } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
1630 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) |
1631 | l2cap_chan_ready(chan); |
1632 | } else if (chan->state == BT_CONNECT) { |
1633 | l2cap_do_start(chan); |
1634 | } |
1635 | |
1636 | l2cap_chan_unlock(chan); |
1637 | } |
1638 | |
1639 | mutex_unlock(lock: &conn->lock); |
1640 | |
1641 | if (hcon->type == LE_LINK) |
1642 | l2cap_le_conn_ready(conn); |
1643 | |
1644 | queue_work(wq: hcon->hdev->workqueue, work: &conn->pending_rx_work); |
1645 | } |
1646 | |
1647 | /* Notify sockets that we cannot guaranty reliability anymore */ |
1648 | static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) |
1649 | { |
1650 | struct l2cap_chan *chan; |
1651 | |
1652 | BT_DBG("conn %p", conn); |
1653 | |
1654 | list_for_each_entry(chan, &conn->chan_l, list) { |
1655 | if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) |
1656 | l2cap_chan_set_err(chan, err); |
1657 | } |
1658 | } |
1659 | |
1660 | static void l2cap_info_timeout(struct work_struct *work) |
1661 | { |
1662 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
1663 | info_timer.work); |
1664 | |
1665 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
1666 | conn->info_ident = 0; |
1667 | |
1668 | mutex_lock(&conn->lock); |
1669 | l2cap_conn_start(conn); |
1670 | mutex_unlock(lock: &conn->lock); |
1671 | } |
1672 | |
1673 | /* |
1674 | * l2cap_user |
1675 | * External modules can register l2cap_user objects on l2cap_conn. The ->probe |
1676 | * callback is called during registration. The ->remove callback is called |
1677 | * during unregistration. |
1678 | * An l2cap_user object can either be explicitly unregistered or when the |
1679 | * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, |
1680 | * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. |
1681 | * External modules must own a reference to the l2cap_conn object if they intend |
1682 | * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at |
1683 | * any time if they don't. |
1684 | */ |
1685 | |
1686 | int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) |
1687 | { |
1688 | struct hci_dev *hdev = conn->hcon->hdev; |
1689 | int ret; |
1690 | |
1691 | /* We need to check whether l2cap_conn is registered. If it is not, we |
1692 | * must not register the l2cap_user. l2cap_conn_del() is unregisters |
1693 | * l2cap_conn objects, but doesn't provide its own locking. Instead, it |
1694 | * relies on the parent hci_conn object to be locked. This itself relies |
1695 | * on the hci_dev object to be locked. So we must lock the hci device |
1696 | * here, too. */ |
1697 | |
1698 | hci_dev_lock(hdev); |
1699 | |
1700 | if (!list_empty(head: &user->list)) { |
1701 | ret = -EINVAL; |
1702 | goto out_unlock; |
1703 | } |
1704 | |
1705 | /* conn->hchan is NULL after l2cap_conn_del() was called */ |
1706 | if (!conn->hchan) { |
1707 | ret = -ENODEV; |
1708 | goto out_unlock; |
1709 | } |
1710 | |
1711 | ret = user->probe(conn, user); |
1712 | if (ret) |
1713 | goto out_unlock; |
1714 | |
1715 | list_add(new: &user->list, head: &conn->users); |
1716 | ret = 0; |
1717 | |
1718 | out_unlock: |
1719 | hci_dev_unlock(hdev); |
1720 | return ret; |
1721 | } |
1722 | EXPORT_SYMBOL(l2cap_register_user); |
1723 | |
1724 | void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) |
1725 | { |
1726 | struct hci_dev *hdev = conn->hcon->hdev; |
1727 | |
1728 | hci_dev_lock(hdev); |
1729 | |
1730 | if (list_empty(head: &user->list)) |
1731 | goto out_unlock; |
1732 | |
1733 | list_del_init(entry: &user->list); |
1734 | user->remove(conn, user); |
1735 | |
1736 | out_unlock: |
1737 | hci_dev_unlock(hdev); |
1738 | } |
1739 | EXPORT_SYMBOL(l2cap_unregister_user); |
1740 | |
1741 | static void l2cap_unregister_all_users(struct l2cap_conn *conn) |
1742 | { |
1743 | struct l2cap_user *user; |
1744 | |
1745 | while (!list_empty(head: &conn->users)) { |
1746 | user = list_first_entry(&conn->users, struct l2cap_user, list); |
1747 | list_del_init(entry: &user->list); |
1748 | user->remove(conn, user); |
1749 | } |
1750 | } |
1751 | |
1752 | static void l2cap_conn_del(struct hci_conn *hcon, int err) |
1753 | { |
1754 | struct l2cap_conn *conn = hcon->l2cap_data; |
1755 | struct l2cap_chan *chan, *l; |
1756 | |
1757 | if (!conn) |
1758 | return; |
1759 | |
1760 | BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); |
1761 | |
1762 | mutex_lock(&conn->lock); |
1763 | |
1764 | kfree_skb(skb: conn->rx_skb); |
1765 | |
1766 | skb_queue_purge(list: &conn->pending_rx); |
1767 | |
1768 | /* We can not call flush_work(&conn->pending_rx_work) here since we |
1769 | * might block if we are running on a worker from the same workqueue |
1770 | * pending_rx_work is waiting on. |
1771 | */ |
1772 | if (work_pending(&conn->pending_rx_work)) |
1773 | cancel_work_sync(work: &conn->pending_rx_work); |
1774 | |
1775 | cancel_delayed_work_sync(dwork: &conn->id_addr_timer); |
1776 | |
1777 | l2cap_unregister_all_users(conn); |
1778 | |
1779 | /* Force the connection to be immediately dropped */ |
1780 | hcon->disc_timeout = 0; |
1781 | |
1782 | /* Kill channels */ |
1783 | list_for_each_entry_safe(chan, l, &conn->chan_l, list) { |
1784 | l2cap_chan_hold(c: chan); |
1785 | l2cap_chan_lock(chan); |
1786 | |
1787 | l2cap_chan_del(chan, err); |
1788 | |
1789 | chan->ops->close(chan); |
1790 | |
1791 | l2cap_chan_unlock(chan); |
1792 | l2cap_chan_put(chan); |
1793 | } |
1794 | |
1795 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) |
1796 | cancel_delayed_work_sync(dwork: &conn->info_timer); |
1797 | |
1798 | hci_chan_del(chan: conn->hchan); |
1799 | conn->hchan = NULL; |
1800 | |
1801 | hcon->l2cap_data = NULL; |
1802 | mutex_unlock(lock: &conn->lock); |
1803 | l2cap_conn_put(conn); |
1804 | } |
1805 | |
1806 | static void l2cap_conn_free(struct kref *ref) |
1807 | { |
1808 | struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); |
1809 | |
1810 | hci_conn_put(conn: conn->hcon); |
1811 | kfree(objp: conn); |
1812 | } |
1813 | |
1814 | struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) |
1815 | { |
1816 | kref_get(kref: &conn->ref); |
1817 | return conn; |
1818 | } |
1819 | EXPORT_SYMBOL(l2cap_conn_get); |
1820 | |
1821 | void l2cap_conn_put(struct l2cap_conn *conn) |
1822 | { |
1823 | kref_put(kref: &conn->ref, release: l2cap_conn_free); |
1824 | } |
1825 | EXPORT_SYMBOL(l2cap_conn_put); |
1826 | |
1827 | /* ---- Socket interface ---- */ |
1828 | |
1829 | /* Find socket with psm and source / destination bdaddr. |
1830 | * Returns closest match. |
1831 | */ |
1832 | static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, |
1833 | bdaddr_t *src, |
1834 | bdaddr_t *dst, |
1835 | u8 link_type) |
1836 | { |
1837 | struct l2cap_chan *c, *tmp, *c1 = NULL; |
1838 | |
1839 | read_lock(&chan_list_lock); |
1840 | |
1841 | list_for_each_entry_safe(c, tmp, &chan_list, global_l) { |
1842 | if (state && c->state != state) |
1843 | continue; |
1844 | |
1845 | if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) |
1846 | continue; |
1847 | |
1848 | if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) |
1849 | continue; |
1850 | |
1851 | if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { |
1852 | int src_match, dst_match; |
1853 | int src_any, dst_any; |
1854 | |
1855 | /* Exact match. */ |
1856 | src_match = !bacmp(ba1: &c->src, ba2: src); |
1857 | dst_match = !bacmp(ba1: &c->dst, ba2: dst); |
1858 | if (src_match && dst_match) { |
1859 | if (!l2cap_chan_hold_unless_zero(c)) |
1860 | continue; |
1861 | |
1862 | read_unlock(&chan_list_lock); |
1863 | return c; |
1864 | } |
1865 | |
1866 | /* Closest match */ |
1867 | src_any = !bacmp(ba1: &c->src, BDADDR_ANY); |
1868 | dst_any = !bacmp(ba1: &c->dst, BDADDR_ANY); |
1869 | if ((src_match && dst_any) || (src_any && dst_match) || |
1870 | (src_any && dst_any)) |
1871 | c1 = c; |
1872 | } |
1873 | } |
1874 | |
1875 | if (c1) |
1876 | c1 = l2cap_chan_hold_unless_zero(c: c1); |
1877 | |
1878 | read_unlock(&chan_list_lock); |
1879 | |
1880 | return c1; |
1881 | } |
1882 | |
1883 | static void l2cap_monitor_timeout(struct work_struct *work) |
1884 | { |
1885 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1886 | monitor_timer.work); |
1887 | |
1888 | BT_DBG("chan %p", chan); |
1889 | |
1890 | l2cap_chan_lock(chan); |
1891 | |
1892 | if (!chan->conn) { |
1893 | l2cap_chan_unlock(chan); |
1894 | l2cap_chan_put(chan); |
1895 | return; |
1896 | } |
1897 | |
1898 | l2cap_tx(chan, NULL, NULL, event: L2CAP_EV_MONITOR_TO); |
1899 | |
1900 | l2cap_chan_unlock(chan); |
1901 | l2cap_chan_put(chan); |
1902 | } |
1903 | |
1904 | static void l2cap_retrans_timeout(struct work_struct *work) |
1905 | { |
1906 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
1907 | retrans_timer.work); |
1908 | |
1909 | BT_DBG("chan %p", chan); |
1910 | |
1911 | l2cap_chan_lock(chan); |
1912 | |
1913 | if (!chan->conn) { |
1914 | l2cap_chan_unlock(chan); |
1915 | l2cap_chan_put(chan); |
1916 | return; |
1917 | } |
1918 | |
1919 | l2cap_tx(chan, NULL, NULL, event: L2CAP_EV_RETRANS_TO); |
1920 | l2cap_chan_unlock(chan); |
1921 | l2cap_chan_put(chan); |
1922 | } |
1923 | |
1924 | static void l2cap_streaming_send(struct l2cap_chan *chan, |
1925 | struct sk_buff_head *skbs) |
1926 | { |
1927 | struct sk_buff *skb; |
1928 | struct l2cap_ctrl *control; |
1929 | |
1930 | BT_DBG("chan %p, skbs %p", chan, skbs); |
1931 | |
1932 | skb_queue_splice_tail_init(list: skbs, head: &chan->tx_q); |
1933 | |
1934 | while (!skb_queue_empty(list: &chan->tx_q)) { |
1935 | |
1936 | skb = skb_dequeue(list: &chan->tx_q); |
1937 | |
1938 | bt_cb(skb)->l2cap.retries = 1; |
1939 | control = &bt_cb(skb)->l2cap; |
1940 | |
1941 | control->reqseq = 0; |
1942 | control->txseq = chan->next_tx_seq; |
1943 | |
1944 | __pack_control(chan, control, skb); |
1945 | |
1946 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1947 | u16 fcs = crc16(crc: 0, p: (u8 *) skb->data, len: skb->len); |
1948 | put_unaligned_le16(val: fcs, p: skb_put(skb, L2CAP_FCS_SIZE)); |
1949 | } |
1950 | |
1951 | l2cap_do_send(chan, skb); |
1952 | |
1953 | BT_DBG("Sent txseq %u", control->txseq); |
1954 | |
1955 | chan->next_tx_seq = __next_seq(chan, seq: chan->next_tx_seq); |
1956 | chan->frames_sent++; |
1957 | } |
1958 | } |
1959 | |
1960 | static int l2cap_ertm_send(struct l2cap_chan *chan) |
1961 | { |
1962 | struct sk_buff *skb, *tx_skb; |
1963 | struct l2cap_ctrl *control; |
1964 | int sent = 0; |
1965 | |
1966 | BT_DBG("chan %p", chan); |
1967 | |
1968 | if (chan->state != BT_CONNECTED) |
1969 | return -ENOTCONN; |
1970 | |
1971 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
1972 | return 0; |
1973 | |
1974 | while (chan->tx_send_head && |
1975 | chan->unacked_frames < chan->remote_tx_win && |
1976 | chan->tx_state == L2CAP_TX_STATE_XMIT) { |
1977 | |
1978 | skb = chan->tx_send_head; |
1979 | |
1980 | bt_cb(skb)->l2cap.retries = 1; |
1981 | control = &bt_cb(skb)->l2cap; |
1982 | |
1983 | if (test_and_clear_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state)) |
1984 | control->final = 1; |
1985 | |
1986 | control->reqseq = chan->buffer_seq; |
1987 | chan->last_acked_seq = chan->buffer_seq; |
1988 | control->txseq = chan->next_tx_seq; |
1989 | |
1990 | __pack_control(chan, control, skb); |
1991 | |
1992 | if (chan->fcs == L2CAP_FCS_CRC16) { |
1993 | u16 fcs = crc16(crc: 0, p: (u8 *) skb->data, len: skb->len); |
1994 | put_unaligned_le16(val: fcs, p: skb_put(skb, L2CAP_FCS_SIZE)); |
1995 | } |
1996 | |
1997 | /* Clone after data has been modified. Data is assumed to be |
1998 | read-only (for locking purposes) on cloned sk_buffs. |
1999 | */ |
2000 | tx_skb = skb_clone(skb, GFP_KERNEL); |
2001 | |
2002 | if (!tx_skb) |
2003 | break; |
2004 | |
2005 | __set_retrans_timer(chan); |
2006 | |
2007 | chan->next_tx_seq = __next_seq(chan, seq: chan->next_tx_seq); |
2008 | chan->unacked_frames++; |
2009 | chan->frames_sent++; |
2010 | sent++; |
2011 | |
2012 | if (skb_queue_is_last(list: &chan->tx_q, skb)) |
2013 | chan->tx_send_head = NULL; |
2014 | else |
2015 | chan->tx_send_head = skb_queue_next(list: &chan->tx_q, skb); |
2016 | |
2017 | l2cap_do_send(chan, skb: tx_skb); |
2018 | BT_DBG("Sent txseq %u", control->txseq); |
2019 | } |
2020 | |
2021 | BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, |
2022 | chan->unacked_frames, skb_queue_len(&chan->tx_q)); |
2023 | |
2024 | return sent; |
2025 | } |
2026 | |
2027 | static void l2cap_ertm_resend(struct l2cap_chan *chan) |
2028 | { |
2029 | struct l2cap_ctrl control; |
2030 | struct sk_buff *skb; |
2031 | struct sk_buff *tx_skb; |
2032 | u16 seq; |
2033 | |
2034 | BT_DBG("chan %p", chan); |
2035 | |
2036 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
2037 | return; |
2038 | |
2039 | while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { |
2040 | seq = l2cap_seq_list_pop(seq_list: &chan->retrans_list); |
2041 | |
2042 | skb = l2cap_ertm_seq_in_queue(head: &chan->tx_q, seq); |
2043 | if (!skb) { |
2044 | BT_DBG("Error: Can't retransmit seq %d, frame missing", |
2045 | seq); |
2046 | continue; |
2047 | } |
2048 | |
2049 | bt_cb(skb)->l2cap.retries++; |
2050 | control = bt_cb(skb)->l2cap; |
2051 | |
2052 | if (chan->max_tx != 0 && |
2053 | bt_cb(skb)->l2cap.retries > chan->max_tx) { |
2054 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
2055 | l2cap_send_disconn_req(chan, ECONNRESET); |
2056 | l2cap_seq_list_clear(seq_list: &chan->retrans_list); |
2057 | break; |
2058 | } |
2059 | |
2060 | control.reqseq = chan->buffer_seq; |
2061 | if (test_and_clear_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state)) |
2062 | control.final = 1; |
2063 | else |
2064 | control.final = 0; |
2065 | |
2066 | if (skb_cloned(skb)) { |
2067 | /* Cloned sk_buffs are read-only, so we need a |
2068 | * writeable copy |
2069 | */ |
2070 | tx_skb = skb_copy(skb, GFP_KERNEL); |
2071 | } else { |
2072 | tx_skb = skb_clone(skb, GFP_KERNEL); |
2073 | } |
2074 | |
2075 | if (!tx_skb) { |
2076 | l2cap_seq_list_clear(seq_list: &chan->retrans_list); |
2077 | break; |
2078 | } |
2079 | |
2080 | /* Update skb contents */ |
2081 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { |
2082 | put_unaligned_le32(val: __pack_extended_control(control: &control), |
2083 | p: tx_skb->data + L2CAP_HDR_SIZE); |
2084 | } else { |
2085 | put_unaligned_le16(val: __pack_enhanced_control(control: &control), |
2086 | p: tx_skb->data + L2CAP_HDR_SIZE); |
2087 | } |
2088 | |
2089 | /* Update FCS */ |
2090 | if (chan->fcs == L2CAP_FCS_CRC16) { |
2091 | u16 fcs = crc16(crc: 0, p: (u8 *) tx_skb->data, |
2092 | len: tx_skb->len - L2CAP_FCS_SIZE); |
2093 | put_unaligned_le16(val: fcs, p: skb_tail_pointer(skb: tx_skb) - |
2094 | L2CAP_FCS_SIZE); |
2095 | } |
2096 | |
2097 | l2cap_do_send(chan, skb: tx_skb); |
2098 | |
2099 | BT_DBG("Resent txseq %d", control.txseq); |
2100 | |
2101 | chan->last_acked_seq = chan->buffer_seq; |
2102 | } |
2103 | } |
2104 | |
2105 | static void l2cap_retransmit(struct l2cap_chan *chan, |
2106 | struct l2cap_ctrl *control) |
2107 | { |
2108 | BT_DBG("chan %p, control %p", chan, control); |
2109 | |
2110 | l2cap_seq_list_append(seq_list: &chan->retrans_list, seq: control->reqseq); |
2111 | l2cap_ertm_resend(chan); |
2112 | } |
2113 | |
2114 | static void l2cap_retransmit_all(struct l2cap_chan *chan, |
2115 | struct l2cap_ctrl *control) |
2116 | { |
2117 | struct sk_buff *skb; |
2118 | |
2119 | BT_DBG("chan %p, control %p", chan, control); |
2120 | |
2121 | if (control->poll) |
2122 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
2123 | |
2124 | l2cap_seq_list_clear(seq_list: &chan->retrans_list); |
2125 | |
2126 | if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) |
2127 | return; |
2128 | |
2129 | if (chan->unacked_frames) { |
2130 | skb_queue_walk(&chan->tx_q, skb) { |
2131 | if (bt_cb(skb)->l2cap.txseq == control->reqseq || |
2132 | skb == chan->tx_send_head) |
2133 | break; |
2134 | } |
2135 | |
2136 | skb_queue_walk_from(&chan->tx_q, skb) { |
2137 | if (skb == chan->tx_send_head) |
2138 | break; |
2139 | |
2140 | l2cap_seq_list_append(seq_list: &chan->retrans_list, |
2141 | bt_cb(skb)->l2cap.txseq); |
2142 | } |
2143 | |
2144 | l2cap_ertm_resend(chan); |
2145 | } |
2146 | } |
2147 | |
2148 | static void l2cap_send_ack(struct l2cap_chan *chan) |
2149 | { |
2150 | struct l2cap_ctrl control; |
2151 | u16 frames_to_ack = __seq_offset(chan, seq1: chan->buffer_seq, |
2152 | seq2: chan->last_acked_seq); |
2153 | int threshold; |
2154 | |
2155 | BT_DBG("chan %p last_acked_seq %d buffer_seq %d", |
2156 | chan, chan->last_acked_seq, chan->buffer_seq); |
2157 | |
2158 | memset(&control, 0, sizeof(control)); |
2159 | control.sframe = 1; |
2160 | |
2161 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && |
2162 | chan->rx_state == L2CAP_RX_STATE_RECV) { |
2163 | __clear_ack_timer(chan); |
2164 | control.super = L2CAP_SUPER_RNR; |
2165 | control.reqseq = chan->buffer_seq; |
2166 | l2cap_send_sframe(chan, control: &control); |
2167 | } else { |
2168 | if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { |
2169 | l2cap_ertm_send(chan); |
2170 | /* If any i-frames were sent, they included an ack */ |
2171 | if (chan->buffer_seq == chan->last_acked_seq) |
2172 | frames_to_ack = 0; |
2173 | } |
2174 | |
2175 | /* Ack now if the window is 3/4ths full. |
2176 | * Calculate without mul or div |
2177 | */ |
2178 | threshold = chan->ack_win; |
2179 | threshold += threshold << 1; |
2180 | threshold >>= 2; |
2181 | |
2182 | BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, |
2183 | threshold); |
2184 | |
2185 | if (frames_to_ack >= threshold) { |
2186 | __clear_ack_timer(chan); |
2187 | control.super = L2CAP_SUPER_RR; |
2188 | control.reqseq = chan->buffer_seq; |
2189 | l2cap_send_sframe(chan, control: &control); |
2190 | frames_to_ack = 0; |
2191 | } |
2192 | |
2193 | if (frames_to_ack) |
2194 | __set_ack_timer(chan); |
2195 | } |
2196 | } |
2197 | |
2198 | static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, |
2199 | struct msghdr *msg, int len, |
2200 | int count, struct sk_buff *skb) |
2201 | { |
2202 | struct l2cap_conn *conn = chan->conn; |
2203 | struct sk_buff **frag; |
2204 | int sent = 0; |
2205 | |
2206 | if (!copy_from_iter_full(addr: skb_put(skb, len: count), bytes: count, i: &msg->msg_iter)) |
2207 | return -EFAULT; |
2208 | |
2209 | sent += count; |
2210 | len -= count; |
2211 | |
2212 | /* Continuation fragments (no L2CAP header) */ |
2213 | frag = &skb_shinfo(skb)->frag_list; |
2214 | while (len) { |
2215 | struct sk_buff *tmp; |
2216 | |
2217 | count = min_t(unsigned int, conn->mtu, len); |
2218 | |
2219 | tmp = chan->ops->alloc_skb(chan, 0, count, |
2220 | msg->msg_flags & MSG_DONTWAIT); |
2221 | if (IS_ERR(ptr: tmp)) |
2222 | return PTR_ERR(ptr: tmp); |
2223 | |
2224 | *frag = tmp; |
2225 | |
2226 | if (!copy_from_iter_full(addr: skb_put(skb: *frag, len: count), bytes: count, |
2227 | i: &msg->msg_iter)) |
2228 | return -EFAULT; |
2229 | |
2230 | sent += count; |
2231 | len -= count; |
2232 | |
2233 | skb->len += (*frag)->len; |
2234 | skb->data_len += (*frag)->len; |
2235 | |
2236 | frag = &(*frag)->next; |
2237 | } |
2238 | |
2239 | return sent; |
2240 | } |
2241 | |
2242 | static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, |
2243 | struct msghdr *msg, size_t len) |
2244 | { |
2245 | struct l2cap_conn *conn = chan->conn; |
2246 | struct sk_buff *skb; |
2247 | int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; |
2248 | struct l2cap_hdr *lh; |
2249 | |
2250 | BT_DBG("chan %p psm 0x%2.2x len %zu", chan, |
2251 | __le16_to_cpu(chan->psm), len); |
2252 | |
2253 | count = min_t(unsigned int, (conn->mtu - hlen), len); |
2254 | |
2255 | skb = chan->ops->alloc_skb(chan, hlen, count, |
2256 | msg->msg_flags & MSG_DONTWAIT); |
2257 | if (IS_ERR(ptr: skb)) |
2258 | return skb; |
2259 | |
2260 | /* Create L2CAP header */ |
2261 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
2262 | lh->cid = cpu_to_le16(chan->dcid); |
2263 | lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); |
2264 | put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); |
2265 | |
2266 | err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); |
2267 | if (unlikely(err < 0)) { |
2268 | kfree_skb(skb); |
2269 | return ERR_PTR(error: err); |
2270 | } |
2271 | return skb; |
2272 | } |
2273 | |
2274 | static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, |
2275 | struct msghdr *msg, size_t len) |
2276 | { |
2277 | struct l2cap_conn *conn = chan->conn; |
2278 | struct sk_buff *skb; |
2279 | int err, count; |
2280 | struct l2cap_hdr *lh; |
2281 | |
2282 | BT_DBG("chan %p len %zu", chan, len); |
2283 | |
2284 | count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); |
2285 | |
2286 | skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, |
2287 | msg->msg_flags & MSG_DONTWAIT); |
2288 | if (IS_ERR(ptr: skb)) |
2289 | return skb; |
2290 | |
2291 | /* Create L2CAP header */ |
2292 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
2293 | lh->cid = cpu_to_le16(chan->dcid); |
2294 | lh->len = cpu_to_le16(len); |
2295 | |
2296 | err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); |
2297 | if (unlikely(err < 0)) { |
2298 | kfree_skb(skb); |
2299 | return ERR_PTR(error: err); |
2300 | } |
2301 | return skb; |
2302 | } |
2303 | |
2304 | static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, |
2305 | struct msghdr *msg, size_t len, |
2306 | u16 sdulen) |
2307 | { |
2308 | struct l2cap_conn *conn = chan->conn; |
2309 | struct sk_buff *skb; |
2310 | int err, count, hlen; |
2311 | struct l2cap_hdr *lh; |
2312 | |
2313 | BT_DBG("chan %p len %zu", chan, len); |
2314 | |
2315 | if (!conn) |
2316 | return ERR_PTR(error: -ENOTCONN); |
2317 | |
2318 | hlen = __ertm_hdr_size(chan); |
2319 | |
2320 | if (sdulen) |
2321 | hlen += L2CAP_SDULEN_SIZE; |
2322 | |
2323 | if (chan->fcs == L2CAP_FCS_CRC16) |
2324 | hlen += L2CAP_FCS_SIZE; |
2325 | |
2326 | count = min_t(unsigned int, (conn->mtu - hlen), len); |
2327 | |
2328 | skb = chan->ops->alloc_skb(chan, hlen, count, |
2329 | msg->msg_flags & MSG_DONTWAIT); |
2330 | if (IS_ERR(ptr: skb)) |
2331 | return skb; |
2332 | |
2333 | /* Create L2CAP header */ |
2334 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
2335 | lh->cid = cpu_to_le16(chan->dcid); |
2336 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); |
2337 | |
2338 | /* Control header is populated later */ |
2339 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
2340 | put_unaligned_le32(val: 0, p: skb_put(skb, L2CAP_EXT_CTRL_SIZE)); |
2341 | else |
2342 | put_unaligned_le16(val: 0, p: skb_put(skb, L2CAP_ENH_CTRL_SIZE)); |
2343 | |
2344 | if (sdulen) |
2345 | put_unaligned_le16(val: sdulen, p: skb_put(skb, L2CAP_SDULEN_SIZE)); |
2346 | |
2347 | err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); |
2348 | if (unlikely(err < 0)) { |
2349 | kfree_skb(skb); |
2350 | return ERR_PTR(error: err); |
2351 | } |
2352 | |
2353 | bt_cb(skb)->l2cap.fcs = chan->fcs; |
2354 | bt_cb(skb)->l2cap.retries = 0; |
2355 | return skb; |
2356 | } |
2357 | |
2358 | static int l2cap_segment_sdu(struct l2cap_chan *chan, |
2359 | struct sk_buff_head *seg_queue, |
2360 | struct msghdr *msg, size_t len) |
2361 | { |
2362 | struct sk_buff *skb; |
2363 | u16 sdu_len; |
2364 | size_t pdu_len; |
2365 | u8 sar; |
2366 | |
2367 | BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); |
2368 | |
2369 | /* It is critical that ERTM PDUs fit in a single HCI fragment, |
2370 | * so fragmented skbs are not used. The HCI layer's handling |
2371 | * of fragmented skbs is not compatible with ERTM's queueing. |
2372 | */ |
2373 | |
2374 | /* PDU size is derived from the HCI MTU */ |
2375 | pdu_len = chan->conn->mtu; |
2376 | |
2377 | /* Constrain PDU size for BR/EDR connections */ |
2378 | pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); |
2379 | |
2380 | /* Adjust for largest possible L2CAP overhead. */ |
2381 | if (chan->fcs) |
2382 | pdu_len -= L2CAP_FCS_SIZE; |
2383 | |
2384 | pdu_len -= __ertm_hdr_size(chan); |
2385 | |
2386 | /* Remote device may have requested smaller PDUs */ |
2387 | pdu_len = min_t(size_t, pdu_len, chan->remote_mps); |
2388 | |
2389 | if (len <= pdu_len) { |
2390 | sar = L2CAP_SAR_UNSEGMENTED; |
2391 | sdu_len = 0; |
2392 | pdu_len = len; |
2393 | } else { |
2394 | sar = L2CAP_SAR_START; |
2395 | sdu_len = len; |
2396 | } |
2397 | |
2398 | while (len > 0) { |
2399 | skb = l2cap_create_iframe_pdu(chan, msg, len: pdu_len, sdulen: sdu_len); |
2400 | |
2401 | if (IS_ERR(ptr: skb)) { |
2402 | __skb_queue_purge(list: seg_queue); |
2403 | return PTR_ERR(ptr: skb); |
2404 | } |
2405 | |
2406 | bt_cb(skb)->l2cap.sar = sar; |
2407 | __skb_queue_tail(list: seg_queue, newsk: skb); |
2408 | |
2409 | len -= pdu_len; |
2410 | if (sdu_len) |
2411 | sdu_len = 0; |
2412 | |
2413 | if (len <= pdu_len) { |
2414 | sar = L2CAP_SAR_END; |
2415 | pdu_len = len; |
2416 | } else { |
2417 | sar = L2CAP_SAR_CONTINUE; |
2418 | } |
2419 | } |
2420 | |
2421 | return 0; |
2422 | } |
2423 | |
2424 | static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, |
2425 | struct msghdr *msg, |
2426 | size_t len, u16 sdulen) |
2427 | { |
2428 | struct l2cap_conn *conn = chan->conn; |
2429 | struct sk_buff *skb; |
2430 | int err, count, hlen; |
2431 | struct l2cap_hdr *lh; |
2432 | |
2433 | BT_DBG("chan %p len %zu", chan, len); |
2434 | |
2435 | if (!conn) |
2436 | return ERR_PTR(error: -ENOTCONN); |
2437 | |
2438 | hlen = L2CAP_HDR_SIZE; |
2439 | |
2440 | if (sdulen) |
2441 | hlen += L2CAP_SDULEN_SIZE; |
2442 | |
2443 | count = min_t(unsigned int, (conn->mtu - hlen), len); |
2444 | |
2445 | skb = chan->ops->alloc_skb(chan, hlen, count, |
2446 | msg->msg_flags & MSG_DONTWAIT); |
2447 | if (IS_ERR(ptr: skb)) |
2448 | return skb; |
2449 | |
2450 | /* Create L2CAP header */ |
2451 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
2452 | lh->cid = cpu_to_le16(chan->dcid); |
2453 | lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); |
2454 | |
2455 | if (sdulen) |
2456 | put_unaligned_le16(val: sdulen, p: skb_put(skb, L2CAP_SDULEN_SIZE)); |
2457 | |
2458 | err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); |
2459 | if (unlikely(err < 0)) { |
2460 | kfree_skb(skb); |
2461 | return ERR_PTR(error: err); |
2462 | } |
2463 | |
2464 | return skb; |
2465 | } |
2466 | |
2467 | static int l2cap_segment_le_sdu(struct l2cap_chan *chan, |
2468 | struct sk_buff_head *seg_queue, |
2469 | struct msghdr *msg, size_t len) |
2470 | { |
2471 | struct sk_buff *skb; |
2472 | size_t pdu_len; |
2473 | u16 sdu_len; |
2474 | |
2475 | BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); |
2476 | |
2477 | sdu_len = len; |
2478 | pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; |
2479 | |
2480 | while (len > 0) { |
2481 | if (len <= pdu_len) |
2482 | pdu_len = len; |
2483 | |
2484 | skb = l2cap_create_le_flowctl_pdu(chan, msg, len: pdu_len, sdulen: sdu_len); |
2485 | if (IS_ERR(ptr: skb)) { |
2486 | __skb_queue_purge(list: seg_queue); |
2487 | return PTR_ERR(ptr: skb); |
2488 | } |
2489 | |
2490 | __skb_queue_tail(list: seg_queue, newsk: skb); |
2491 | |
2492 | len -= pdu_len; |
2493 | |
2494 | if (sdu_len) { |
2495 | sdu_len = 0; |
2496 | pdu_len += L2CAP_SDULEN_SIZE; |
2497 | } |
2498 | } |
2499 | |
2500 | return 0; |
2501 | } |
2502 | |
2503 | static void l2cap_le_flowctl_send(struct l2cap_chan *chan) |
2504 | { |
2505 | int sent = 0; |
2506 | |
2507 | BT_DBG("chan %p", chan); |
2508 | |
2509 | while (chan->tx_credits && !skb_queue_empty(list: &chan->tx_q)) { |
2510 | l2cap_do_send(chan, skb: skb_dequeue(list: &chan->tx_q)); |
2511 | chan->tx_credits--; |
2512 | sent++; |
2513 | } |
2514 | |
2515 | BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, |
2516 | skb_queue_len(&chan->tx_q)); |
2517 | } |
2518 | |
2519 | static void l2cap_tx_timestamp(struct sk_buff *skb, |
2520 | const struct sockcm_cookie *sockc, |
2521 | size_t len) |
2522 | { |
2523 | struct sock *sk = skb ? skb->sk : NULL; |
2524 | |
2525 | if (sk && sk->sk_type == SOCK_STREAM) |
2526 | hci_setup_tx_timestamp(skb, key_offset: len, sockc); |
2527 | else |
2528 | hci_setup_tx_timestamp(skb, key_offset: 1, sockc); |
2529 | } |
2530 | |
2531 | static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue, |
2532 | const struct sockcm_cookie *sockc, |
2533 | size_t len) |
2534 | { |
2535 | struct sk_buff *skb = skb_peek(list_: queue); |
2536 | struct sock *sk = skb ? skb->sk : NULL; |
2537 | |
2538 | if (sk && sk->sk_type == SOCK_STREAM) |
2539 | l2cap_tx_timestamp(skb: skb_peek_tail(list_: queue), sockc, len); |
2540 | else |
2541 | l2cap_tx_timestamp(skb, sockc, len); |
2542 | } |
2543 | |
2544 | int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, |
2545 | const struct sockcm_cookie *sockc) |
2546 | { |
2547 | struct sk_buff *skb; |
2548 | int err; |
2549 | struct sk_buff_head seg_queue; |
2550 | |
2551 | if (!chan->conn) |
2552 | return -ENOTCONN; |
2553 | |
2554 | /* Connectionless channel */ |
2555 | if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { |
2556 | skb = l2cap_create_connless_pdu(chan, msg, len); |
2557 | if (IS_ERR(ptr: skb)) |
2558 | return PTR_ERR(ptr: skb); |
2559 | |
2560 | l2cap_tx_timestamp(skb, sockc, len); |
2561 | |
2562 | l2cap_do_send(chan, skb); |
2563 | return len; |
2564 | } |
2565 | |
2566 | switch (chan->mode) { |
2567 | case L2CAP_MODE_LE_FLOWCTL: |
2568 | case L2CAP_MODE_EXT_FLOWCTL: |
2569 | /* Check outgoing MTU */ |
2570 | if (len > chan->omtu) |
2571 | return -EMSGSIZE; |
2572 | |
2573 | __skb_queue_head_init(list: &seg_queue); |
2574 | |
2575 | err = l2cap_segment_le_sdu(chan, seg_queue: &seg_queue, msg, len); |
2576 | |
2577 | if (chan->state != BT_CONNECTED) { |
2578 | __skb_queue_purge(list: &seg_queue); |
2579 | err = -ENOTCONN; |
2580 | } |
2581 | |
2582 | if (err) |
2583 | return err; |
2584 | |
2585 | l2cap_tx_timestamp_seg(queue: &seg_queue, sockc, len); |
2586 | |
2587 | skb_queue_splice_tail_init(list: &seg_queue, head: &chan->tx_q); |
2588 | |
2589 | l2cap_le_flowctl_send(chan); |
2590 | |
2591 | if (!chan->tx_credits) |
2592 | chan->ops->suspend(chan); |
2593 | |
2594 | err = len; |
2595 | |
2596 | break; |
2597 | |
2598 | case L2CAP_MODE_BASIC: |
2599 | /* Check outgoing MTU */ |
2600 | if (len > chan->omtu) |
2601 | return -EMSGSIZE; |
2602 | |
2603 | /* Create a basic PDU */ |
2604 | skb = l2cap_create_basic_pdu(chan, msg, len); |
2605 | if (IS_ERR(ptr: skb)) |
2606 | return PTR_ERR(ptr: skb); |
2607 | |
2608 | l2cap_tx_timestamp(skb, sockc, len); |
2609 | |
2610 | l2cap_do_send(chan, skb); |
2611 | err = len; |
2612 | break; |
2613 | |
2614 | case L2CAP_MODE_ERTM: |
2615 | case L2CAP_MODE_STREAMING: |
2616 | /* Check outgoing MTU */ |
2617 | if (len > chan->omtu) { |
2618 | err = -EMSGSIZE; |
2619 | break; |
2620 | } |
2621 | |
2622 | __skb_queue_head_init(list: &seg_queue); |
2623 | |
2624 | /* Do segmentation before calling in to the state machine, |
2625 | * since it's possible to block while waiting for memory |
2626 | * allocation. |
2627 | */ |
2628 | err = l2cap_segment_sdu(chan, seg_queue: &seg_queue, msg, len); |
2629 | |
2630 | if (err) |
2631 | break; |
2632 | |
2633 | if (chan->mode == L2CAP_MODE_ERTM) { |
2634 | /* TODO: ERTM mode timestamping */ |
2635 | l2cap_tx(chan, NULL, skbs: &seg_queue, event: L2CAP_EV_DATA_REQUEST); |
2636 | } else { |
2637 | l2cap_tx_timestamp_seg(queue: &seg_queue, sockc, len); |
2638 | l2cap_streaming_send(chan, skbs: &seg_queue); |
2639 | } |
2640 | |
2641 | err = len; |
2642 | |
2643 | /* If the skbs were not queued for sending, they'll still be in |
2644 | * seg_queue and need to be purged. |
2645 | */ |
2646 | __skb_queue_purge(list: &seg_queue); |
2647 | break; |
2648 | |
2649 | default: |
2650 | BT_DBG("bad state %1.1x", chan->mode); |
2651 | err = -EBADFD; |
2652 | } |
2653 | |
2654 | return err; |
2655 | } |
2656 | EXPORT_SYMBOL_GPL(l2cap_chan_send); |
2657 | |
2658 | static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) |
2659 | { |
2660 | struct l2cap_ctrl control; |
2661 | u16 seq; |
2662 | |
2663 | BT_DBG("chan %p, txseq %u", chan, txseq); |
2664 | |
2665 | memset(&control, 0, sizeof(control)); |
2666 | control.sframe = 1; |
2667 | control.super = L2CAP_SUPER_SREJ; |
2668 | |
2669 | for (seq = chan->expected_tx_seq; seq != txseq; |
2670 | seq = __next_seq(chan, seq)) { |
2671 | if (!l2cap_ertm_seq_in_queue(head: &chan->srej_q, seq)) { |
2672 | control.reqseq = seq; |
2673 | l2cap_send_sframe(chan, control: &control); |
2674 | l2cap_seq_list_append(seq_list: &chan->srej_list, seq); |
2675 | } |
2676 | } |
2677 | |
2678 | chan->expected_tx_seq = __next_seq(chan, seq: txseq); |
2679 | } |
2680 | |
2681 | static void l2cap_send_srej_tail(struct l2cap_chan *chan) |
2682 | { |
2683 | struct l2cap_ctrl control; |
2684 | |
2685 | BT_DBG("chan %p", chan); |
2686 | |
2687 | if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) |
2688 | return; |
2689 | |
2690 | memset(&control, 0, sizeof(control)); |
2691 | control.sframe = 1; |
2692 | control.super = L2CAP_SUPER_SREJ; |
2693 | control.reqseq = chan->srej_list.tail; |
2694 | l2cap_send_sframe(chan, control: &control); |
2695 | } |
2696 | |
2697 | static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) |
2698 | { |
2699 | struct l2cap_ctrl control; |
2700 | u16 initial_head; |
2701 | u16 seq; |
2702 | |
2703 | BT_DBG("chan %p, txseq %u", chan, txseq); |
2704 | |
2705 | memset(&control, 0, sizeof(control)); |
2706 | control.sframe = 1; |
2707 | control.super = L2CAP_SUPER_SREJ; |
2708 | |
2709 | /* Capture initial list head to allow only one pass through the list. */ |
2710 | initial_head = chan->srej_list.head; |
2711 | |
2712 | do { |
2713 | seq = l2cap_seq_list_pop(seq_list: &chan->srej_list); |
2714 | if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) |
2715 | break; |
2716 | |
2717 | control.reqseq = seq; |
2718 | l2cap_send_sframe(chan, control: &control); |
2719 | l2cap_seq_list_append(seq_list: &chan->srej_list, seq); |
2720 | } while (chan->srej_list.head != initial_head); |
2721 | } |
2722 | |
2723 | static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) |
2724 | { |
2725 | struct sk_buff *acked_skb; |
2726 | u16 ackseq; |
2727 | |
2728 | BT_DBG("chan %p, reqseq %u", chan, reqseq); |
2729 | |
2730 | if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) |
2731 | return; |
2732 | |
2733 | BT_DBG("expected_ack_seq %u, unacked_frames %u", |
2734 | chan->expected_ack_seq, chan->unacked_frames); |
2735 | |
2736 | for (ackseq = chan->expected_ack_seq; ackseq != reqseq; |
2737 | ackseq = __next_seq(chan, seq: ackseq)) { |
2738 | |
2739 | acked_skb = l2cap_ertm_seq_in_queue(head: &chan->tx_q, seq: ackseq); |
2740 | if (acked_skb) { |
2741 | skb_unlink(skb: acked_skb, list: &chan->tx_q); |
2742 | kfree_skb(skb: acked_skb); |
2743 | chan->unacked_frames--; |
2744 | } |
2745 | } |
2746 | |
2747 | chan->expected_ack_seq = reqseq; |
2748 | |
2749 | if (chan->unacked_frames == 0) |
2750 | __clear_retrans_timer(chan); |
2751 | |
2752 | BT_DBG("unacked_frames %u", chan->unacked_frames); |
2753 | } |
2754 | |
2755 | static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) |
2756 | { |
2757 | BT_DBG("chan %p", chan); |
2758 | |
2759 | chan->expected_tx_seq = chan->buffer_seq; |
2760 | l2cap_seq_list_clear(seq_list: &chan->srej_list); |
2761 | skb_queue_purge(list: &chan->srej_q); |
2762 | chan->rx_state = L2CAP_RX_STATE_RECV; |
2763 | } |
2764 | |
2765 | static void l2cap_tx_state_xmit(struct l2cap_chan *chan, |
2766 | struct l2cap_ctrl *control, |
2767 | struct sk_buff_head *skbs, u8 event) |
2768 | { |
2769 | BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, |
2770 | event); |
2771 | |
2772 | switch (event) { |
2773 | case L2CAP_EV_DATA_REQUEST: |
2774 | if (chan->tx_send_head == NULL) |
2775 | chan->tx_send_head = skb_peek(list_: skbs); |
2776 | |
2777 | skb_queue_splice_tail_init(list: skbs, head: &chan->tx_q); |
2778 | l2cap_ertm_send(chan); |
2779 | break; |
2780 | case L2CAP_EV_LOCAL_BUSY_DETECTED: |
2781 | BT_DBG("Enter LOCAL_BUSY"); |
2782 | set_bit(nr: CONN_LOCAL_BUSY, addr: &chan->conn_state); |
2783 | |
2784 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { |
2785 | /* The SREJ_SENT state must be aborted if we are to |
2786 | * enter the LOCAL_BUSY state. |
2787 | */ |
2788 | l2cap_abort_rx_srej_sent(chan); |
2789 | } |
2790 | |
2791 | l2cap_send_ack(chan); |
2792 | |
2793 | break; |
2794 | case L2CAP_EV_LOCAL_BUSY_CLEAR: |
2795 | BT_DBG("Exit LOCAL_BUSY"); |
2796 | clear_bit(nr: CONN_LOCAL_BUSY, addr: &chan->conn_state); |
2797 | |
2798 | if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { |
2799 | struct l2cap_ctrl local_control; |
2800 | |
2801 | memset(&local_control, 0, sizeof(local_control)); |
2802 | local_control.sframe = 1; |
2803 | local_control.super = L2CAP_SUPER_RR; |
2804 | local_control.poll = 1; |
2805 | local_control.reqseq = chan->buffer_seq; |
2806 | l2cap_send_sframe(chan, control: &local_control); |
2807 | |
2808 | chan->retry_count = 1; |
2809 | __set_monitor_timer(chan); |
2810 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; |
2811 | } |
2812 | break; |
2813 | case L2CAP_EV_RECV_REQSEQ_AND_FBIT: |
2814 | l2cap_process_reqseq(chan, reqseq: control->reqseq); |
2815 | break; |
2816 | case L2CAP_EV_EXPLICIT_POLL: |
2817 | l2cap_send_rr_or_rnr(chan, poll: 1); |
2818 | chan->retry_count = 1; |
2819 | __set_monitor_timer(chan); |
2820 | __clear_ack_timer(chan); |
2821 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; |
2822 | break; |
2823 | case L2CAP_EV_RETRANS_TO: |
2824 | l2cap_send_rr_or_rnr(chan, poll: 1); |
2825 | chan->retry_count = 1; |
2826 | __set_monitor_timer(chan); |
2827 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; |
2828 | break; |
2829 | case L2CAP_EV_RECV_FBIT: |
2830 | /* Nothing to process */ |
2831 | break; |
2832 | default: |
2833 | break; |
2834 | } |
2835 | } |
2836 | |
2837 | static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, |
2838 | struct l2cap_ctrl *control, |
2839 | struct sk_buff_head *skbs, u8 event) |
2840 | { |
2841 | BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, |
2842 | event); |
2843 | |
2844 | switch (event) { |
2845 | case L2CAP_EV_DATA_REQUEST: |
2846 | if (chan->tx_send_head == NULL) |
2847 | chan->tx_send_head = skb_peek(list_: skbs); |
2848 | /* Queue data, but don't send. */ |
2849 | skb_queue_splice_tail_init(list: skbs, head: &chan->tx_q); |
2850 | break; |
2851 | case L2CAP_EV_LOCAL_BUSY_DETECTED: |
2852 | BT_DBG("Enter LOCAL_BUSY"); |
2853 | set_bit(nr: CONN_LOCAL_BUSY, addr: &chan->conn_state); |
2854 | |
2855 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { |
2856 | /* The SREJ_SENT state must be aborted if we are to |
2857 | * enter the LOCAL_BUSY state. |
2858 | */ |
2859 | l2cap_abort_rx_srej_sent(chan); |
2860 | } |
2861 | |
2862 | l2cap_send_ack(chan); |
2863 | |
2864 | break; |
2865 | case L2CAP_EV_LOCAL_BUSY_CLEAR: |
2866 | BT_DBG("Exit LOCAL_BUSY"); |
2867 | clear_bit(nr: CONN_LOCAL_BUSY, addr: &chan->conn_state); |
2868 | |
2869 | if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { |
2870 | struct l2cap_ctrl local_control; |
2871 | memset(&local_control, 0, sizeof(local_control)); |
2872 | local_control.sframe = 1; |
2873 | local_control.super = L2CAP_SUPER_RR; |
2874 | local_control.poll = 1; |
2875 | local_control.reqseq = chan->buffer_seq; |
2876 | l2cap_send_sframe(chan, control: &local_control); |
2877 | |
2878 | chan->retry_count = 1; |
2879 | __set_monitor_timer(chan); |
2880 | chan->tx_state = L2CAP_TX_STATE_WAIT_F; |
2881 | } |
2882 | break; |
2883 | case L2CAP_EV_RECV_REQSEQ_AND_FBIT: |
2884 | l2cap_process_reqseq(chan, reqseq: control->reqseq); |
2885 | fallthrough; |
2886 | |
2887 | case L2CAP_EV_RECV_FBIT: |
2888 | if (control && control->final) { |
2889 | __clear_monitor_timer(chan); |
2890 | if (chan->unacked_frames > 0) |
2891 | __set_retrans_timer(chan); |
2892 | chan->retry_count = 0; |
2893 | chan->tx_state = L2CAP_TX_STATE_XMIT; |
2894 | BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); |
2895 | } |
2896 | break; |
2897 | case L2CAP_EV_EXPLICIT_POLL: |
2898 | /* Ignore */ |
2899 | break; |
2900 | case L2CAP_EV_MONITOR_TO: |
2901 | if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { |
2902 | l2cap_send_rr_or_rnr(chan, poll: 1); |
2903 | __set_monitor_timer(chan); |
2904 | chan->retry_count++; |
2905 | } else { |
2906 | l2cap_send_disconn_req(chan, ECONNABORTED); |
2907 | } |
2908 | break; |
2909 | default: |
2910 | break; |
2911 | } |
2912 | } |
2913 | |
2914 | static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
2915 | struct sk_buff_head *skbs, u8 event) |
2916 | { |
2917 | BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", |
2918 | chan, control, skbs, event, chan->tx_state); |
2919 | |
2920 | switch (chan->tx_state) { |
2921 | case L2CAP_TX_STATE_XMIT: |
2922 | l2cap_tx_state_xmit(chan, control, skbs, event); |
2923 | break; |
2924 | case L2CAP_TX_STATE_WAIT_F: |
2925 | l2cap_tx_state_wait_f(chan, control, skbs, event); |
2926 | break; |
2927 | default: |
2928 | /* Ignore event */ |
2929 | break; |
2930 | } |
2931 | } |
2932 | |
2933 | static void l2cap_pass_to_tx(struct l2cap_chan *chan, |
2934 | struct l2cap_ctrl *control) |
2935 | { |
2936 | BT_DBG("chan %p, control %p", chan, control); |
2937 | l2cap_tx(chan, control, NULL, event: L2CAP_EV_RECV_REQSEQ_AND_FBIT); |
2938 | } |
2939 | |
2940 | static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, |
2941 | struct l2cap_ctrl *control) |
2942 | { |
2943 | BT_DBG("chan %p, control %p", chan, control); |
2944 | l2cap_tx(chan, control, NULL, event: L2CAP_EV_RECV_FBIT); |
2945 | } |
2946 | |
2947 | /* Copy frame to all raw sockets on that connection */ |
2948 | static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) |
2949 | { |
2950 | struct sk_buff *nskb; |
2951 | struct l2cap_chan *chan; |
2952 | |
2953 | BT_DBG("conn %p", conn); |
2954 | |
2955 | list_for_each_entry(chan, &conn->chan_l, list) { |
2956 | if (chan->chan_type != L2CAP_CHAN_RAW) |
2957 | continue; |
2958 | |
2959 | /* Don't send frame to the channel it came from */ |
2960 | if (bt_cb(skb)->l2cap.chan == chan) |
2961 | continue; |
2962 | |
2963 | nskb = skb_clone(skb, GFP_KERNEL); |
2964 | if (!nskb) |
2965 | continue; |
2966 | if (chan->ops->recv(chan, nskb)) |
2967 | kfree_skb(skb: nskb); |
2968 | } |
2969 | } |
2970 | |
2971 | /* ---- L2CAP signalling commands ---- */ |
2972 | static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, |
2973 | u8 ident, u16 dlen, void *data) |
2974 | { |
2975 | struct sk_buff *skb, **frag; |
2976 | struct l2cap_cmd_hdr *cmd; |
2977 | struct l2cap_hdr *lh; |
2978 | int len, count; |
2979 | |
2980 | BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", |
2981 | conn, code, ident, dlen); |
2982 | |
2983 | if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) |
2984 | return NULL; |
2985 | |
2986 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; |
2987 | count = min_t(unsigned int, conn->mtu, len); |
2988 | |
2989 | skb = bt_skb_alloc(len: count, GFP_KERNEL); |
2990 | if (!skb) |
2991 | return NULL; |
2992 | |
2993 | lh = skb_put(skb, L2CAP_HDR_SIZE); |
2994 | lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); |
2995 | |
2996 | if (conn->hcon->type == LE_LINK) |
2997 | lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); |
2998 | else |
2999 | lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); |
3000 | |
3001 | cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); |
3002 | cmd->code = code; |
3003 | cmd->ident = ident; |
3004 | cmd->len = cpu_to_le16(dlen); |
3005 | |
3006 | if (dlen) { |
3007 | count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; |
3008 | skb_put_data(skb, data, len: count); |
3009 | data += count; |
3010 | } |
3011 | |
3012 | len -= skb->len; |
3013 | |
3014 | /* Continuation fragments (no L2CAP header) */ |
3015 | frag = &skb_shinfo(skb)->frag_list; |
3016 | while (len) { |
3017 | count = min_t(unsigned int, conn->mtu, len); |
3018 | |
3019 | *frag = bt_skb_alloc(len: count, GFP_KERNEL); |
3020 | if (!*frag) |
3021 | goto fail; |
3022 | |
3023 | skb_put_data(skb: *frag, data, len: count); |
3024 | |
3025 | len -= count; |
3026 | data += count; |
3027 | |
3028 | frag = &(*frag)->next; |
3029 | } |
3030 | |
3031 | return skb; |
3032 | |
3033 | fail: |
3034 | kfree_skb(skb); |
3035 | return NULL; |
3036 | } |
3037 | |
3038 | static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, |
3039 | unsigned long *val) |
3040 | { |
3041 | struct l2cap_conf_opt *opt = *ptr; |
3042 | int len; |
3043 | |
3044 | len = L2CAP_CONF_OPT_SIZE + opt->len; |
3045 | *ptr += len; |
3046 | |
3047 | *type = opt->type; |
3048 | *olen = opt->len; |
3049 | |
3050 | switch (opt->len) { |
3051 | case 1: |
3052 | *val = *((u8 *) opt->val); |
3053 | break; |
3054 | |
3055 | case 2: |
3056 | *val = get_unaligned_le16(p: opt->val); |
3057 | break; |
3058 | |
3059 | case 4: |
3060 | *val = get_unaligned_le32(p: opt->val); |
3061 | break; |
3062 | |
3063 | default: |
3064 | *val = (unsigned long) opt->val; |
3065 | break; |
3066 | } |
3067 | |
3068 | BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); |
3069 | return len; |
3070 | } |
3071 | |
3072 | static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) |
3073 | { |
3074 | struct l2cap_conf_opt *opt = *ptr; |
3075 | |
3076 | BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); |
3077 | |
3078 | if (size < L2CAP_CONF_OPT_SIZE + len) |
3079 | return; |
3080 | |
3081 | opt->type = type; |
3082 | opt->len = len; |
3083 | |
3084 | switch (len) { |
3085 | case 1: |
3086 | *((u8 *) opt->val) = val; |
3087 | break; |
3088 | |
3089 | case 2: |
3090 | put_unaligned_le16(val, p: opt->val); |
3091 | break; |
3092 | |
3093 | case 4: |
3094 | put_unaligned_le32(val, p: opt->val); |
3095 | break; |
3096 | |
3097 | default: |
3098 | memcpy(opt->val, (void *) val, len); |
3099 | break; |
3100 | } |
3101 | |
3102 | *ptr += L2CAP_CONF_OPT_SIZE + len; |
3103 | } |
3104 | |
3105 | static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) |
3106 | { |
3107 | struct l2cap_conf_efs efs; |
3108 | |
3109 | switch (chan->mode) { |
3110 | case L2CAP_MODE_ERTM: |
3111 | efs.id = chan->local_id; |
3112 | efs.stype = chan->local_stype; |
3113 | efs.msdu = cpu_to_le16(chan->local_msdu); |
3114 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); |
3115 | efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); |
3116 | efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); |
3117 | break; |
3118 | |
3119 | case L2CAP_MODE_STREAMING: |
3120 | efs.id = 1; |
3121 | efs.stype = L2CAP_SERV_BESTEFFORT; |
3122 | efs.msdu = cpu_to_le16(chan->local_msdu); |
3123 | efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); |
3124 | efs.acc_lat = 0; |
3125 | efs.flush_to = 0; |
3126 | break; |
3127 | |
3128 | default: |
3129 | return; |
3130 | } |
3131 | |
3132 | l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, len: sizeof(efs), |
3133 | val: (unsigned long) &efs, size); |
3134 | } |
3135 | |
3136 | static void l2cap_ack_timeout(struct work_struct *work) |
3137 | { |
3138 | struct l2cap_chan *chan = container_of(work, struct l2cap_chan, |
3139 | ack_timer.work); |
3140 | u16 frames_to_ack; |
3141 | |
3142 | BT_DBG("chan %p", chan); |
3143 | |
3144 | l2cap_chan_lock(chan); |
3145 | |
3146 | frames_to_ack = __seq_offset(chan, seq1: chan->buffer_seq, |
3147 | seq2: chan->last_acked_seq); |
3148 | |
3149 | if (frames_to_ack) |
3150 | l2cap_send_rr_or_rnr(chan, poll: 0); |
3151 | |
3152 | l2cap_chan_unlock(chan); |
3153 | l2cap_chan_put(chan); |
3154 | } |
3155 | |
3156 | int l2cap_ertm_init(struct l2cap_chan *chan) |
3157 | { |
3158 | int err; |
3159 | |
3160 | chan->next_tx_seq = 0; |
3161 | chan->expected_tx_seq = 0; |
3162 | chan->expected_ack_seq = 0; |
3163 | chan->unacked_frames = 0; |
3164 | chan->buffer_seq = 0; |
3165 | chan->frames_sent = 0; |
3166 | chan->last_acked_seq = 0; |
3167 | chan->sdu = NULL; |
3168 | chan->sdu_last_frag = NULL; |
3169 | chan->sdu_len = 0; |
3170 | |
3171 | skb_queue_head_init(list: &chan->tx_q); |
3172 | |
3173 | if (chan->mode != L2CAP_MODE_ERTM) |
3174 | return 0; |
3175 | |
3176 | chan->rx_state = L2CAP_RX_STATE_RECV; |
3177 | chan->tx_state = L2CAP_TX_STATE_XMIT; |
3178 | |
3179 | skb_queue_head_init(list: &chan->srej_q); |
3180 | |
3181 | err = l2cap_seq_list_init(seq_list: &chan->srej_list, size: chan->tx_win); |
3182 | if (err < 0) |
3183 | return err; |
3184 | |
3185 | err = l2cap_seq_list_init(seq_list: &chan->retrans_list, size: chan->remote_tx_win); |
3186 | if (err < 0) |
3187 | l2cap_seq_list_free(seq_list: &chan->srej_list); |
3188 | |
3189 | return err; |
3190 | } |
3191 | |
3192 | static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) |
3193 | { |
3194 | switch (mode) { |
3195 | case L2CAP_MODE_STREAMING: |
3196 | case L2CAP_MODE_ERTM: |
3197 | if (l2cap_mode_supported(mode, feat_mask: remote_feat_mask)) |
3198 | return mode; |
3199 | fallthrough; |
3200 | default: |
3201 | return L2CAP_MODE_BASIC; |
3202 | } |
3203 | } |
3204 | |
3205 | static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) |
3206 | { |
3207 | return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW); |
3208 | } |
3209 | |
3210 | static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) |
3211 | { |
3212 | return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW); |
3213 | } |
3214 | |
3215 | static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, |
3216 | struct l2cap_conf_rfc *rfc) |
3217 | { |
3218 | rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); |
3219 | rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); |
3220 | } |
3221 | |
3222 | static inline void l2cap_txwin_setup(struct l2cap_chan *chan) |
3223 | { |
3224 | if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && |
3225 | __l2cap_ews_supported(conn: chan->conn)) { |
3226 | /* use extended control field */ |
3227 | set_bit(nr: FLAG_EXT_CTRL, addr: &chan->flags); |
3228 | chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; |
3229 | } else { |
3230 | chan->tx_win = min_t(u16, chan->tx_win, |
3231 | L2CAP_DEFAULT_TX_WINDOW); |
3232 | chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; |
3233 | } |
3234 | chan->ack_win = chan->tx_win; |
3235 | } |
3236 | |
3237 | static void l2cap_mtu_auto(struct l2cap_chan *chan) |
3238 | { |
3239 | struct hci_conn *conn = chan->conn->hcon; |
3240 | |
3241 | chan->imtu = L2CAP_DEFAULT_MIN_MTU; |
3242 | |
3243 | /* The 2-DH1 packet has between 2 and 56 information bytes |
3244 | * (including the 2-byte payload header) |
3245 | */ |
3246 | if (!(conn->pkt_type & HCI_2DH1)) |
3247 | chan->imtu = 54; |
3248 | |
3249 | /* The 3-DH1 packet has between 2 and 85 information bytes |
3250 | * (including the 2-byte payload header) |
3251 | */ |
3252 | if (!(conn->pkt_type & HCI_3DH1)) |
3253 | chan->imtu = 83; |
3254 | |
3255 | /* The 2-DH3 packet has between 2 and 369 information bytes |
3256 | * (including the 2-byte payload header) |
3257 | */ |
3258 | if (!(conn->pkt_type & HCI_2DH3)) |
3259 | chan->imtu = 367; |
3260 | |
3261 | /* The 3-DH3 packet has between 2 and 554 information bytes |
3262 | * (including the 2-byte payload header) |
3263 | */ |
3264 | if (!(conn->pkt_type & HCI_3DH3)) |
3265 | chan->imtu = 552; |
3266 | |
3267 | /* The 2-DH5 packet has between 2 and 681 information bytes |
3268 | * (including the 2-byte payload header) |
3269 | */ |
3270 | if (!(conn->pkt_type & HCI_2DH5)) |
3271 | chan->imtu = 679; |
3272 | |
3273 | /* The 3-DH5 packet has between 2 and 1023 information bytes |
3274 | * (including the 2-byte payload header) |
3275 | */ |
3276 | if (!(conn->pkt_type & HCI_3DH5)) |
3277 | chan->imtu = 1021; |
3278 | } |
3279 | |
3280 | static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) |
3281 | { |
3282 | struct l2cap_conf_req *req = data; |
3283 | struct l2cap_conf_rfc rfc = { .mode = chan->mode }; |
3284 | void *ptr = req->data; |
3285 | void *endptr = data + data_size; |
3286 | u16 size; |
3287 | |
3288 | BT_DBG("chan %p", chan); |
3289 | |
3290 | if (chan->num_conf_req || chan->num_conf_rsp) |
3291 | goto done; |
3292 | |
3293 | switch (chan->mode) { |
3294 | case L2CAP_MODE_STREAMING: |
3295 | case L2CAP_MODE_ERTM: |
3296 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) |
3297 | break; |
3298 | |
3299 | if (__l2cap_efs_supported(conn: chan->conn)) |
3300 | set_bit(nr: FLAG_EFS_ENABLE, addr: &chan->flags); |
3301 | |
3302 | fallthrough; |
3303 | default: |
3304 | chan->mode = l2cap_select_mode(mode: rfc.mode, remote_feat_mask: chan->conn->feat_mask); |
3305 | break; |
3306 | } |
3307 | |
3308 | done: |
3309 | if (chan->imtu != L2CAP_DEFAULT_MTU) { |
3310 | if (!chan->imtu) |
3311 | l2cap_mtu_auto(chan); |
3312 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_MTU, len: 2, val: chan->imtu, |
3313 | size: endptr - ptr); |
3314 | } |
3315 | |
3316 | switch (chan->mode) { |
3317 | case L2CAP_MODE_BASIC: |
3318 | if (disable_ertm) |
3319 | break; |
3320 | |
3321 | if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && |
3322 | !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) |
3323 | break; |
3324 | |
3325 | rfc.mode = L2CAP_MODE_BASIC; |
3326 | rfc.txwin_size = 0; |
3327 | rfc.max_transmit = 0; |
3328 | rfc.retrans_timeout = 0; |
3329 | rfc.monitor_timeout = 0; |
3330 | rfc.max_pdu_size = 0; |
3331 | |
3332 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3333 | val: (unsigned long) &rfc, size: endptr - ptr); |
3334 | break; |
3335 | |
3336 | case L2CAP_MODE_ERTM: |
3337 | rfc.mode = L2CAP_MODE_ERTM; |
3338 | rfc.max_transmit = chan->max_tx; |
3339 | |
3340 | __l2cap_set_ertm_timeouts(chan, rfc: &rfc); |
3341 | |
3342 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
3343 | L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - |
3344 | L2CAP_FCS_SIZE); |
3345 | rfc.max_pdu_size = cpu_to_le16(size); |
3346 | |
3347 | l2cap_txwin_setup(chan); |
3348 | |
3349 | rfc.txwin_size = min_t(u16, chan->tx_win, |
3350 | L2CAP_DEFAULT_TX_WINDOW); |
3351 | |
3352 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3353 | val: (unsigned long) &rfc, size: endptr - ptr); |
3354 | |
3355 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) |
3356 | l2cap_add_opt_efs(ptr: &ptr, chan, size: endptr - ptr); |
3357 | |
3358 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
3359 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_EWS, len: 2, |
3360 | val: chan->tx_win, size: endptr - ptr); |
3361 | |
3362 | if (chan->conn->feat_mask & L2CAP_FEAT_FCS) |
3363 | if (chan->fcs == L2CAP_FCS_NONE || |
3364 | test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { |
3365 | chan->fcs = L2CAP_FCS_NONE; |
3366 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_FCS, len: 1, |
3367 | val: chan->fcs, size: endptr - ptr); |
3368 | } |
3369 | break; |
3370 | |
3371 | case L2CAP_MODE_STREAMING: |
3372 | l2cap_txwin_setup(chan); |
3373 | rfc.mode = L2CAP_MODE_STREAMING; |
3374 | rfc.txwin_size = 0; |
3375 | rfc.max_transmit = 0; |
3376 | rfc.retrans_timeout = 0; |
3377 | rfc.monitor_timeout = 0; |
3378 | |
3379 | size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - |
3380 | L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - |
3381 | L2CAP_FCS_SIZE); |
3382 | rfc.max_pdu_size = cpu_to_le16(size); |
3383 | |
3384 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3385 | val: (unsigned long) &rfc, size: endptr - ptr); |
3386 | |
3387 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) |
3388 | l2cap_add_opt_efs(ptr: &ptr, chan, size: endptr - ptr); |
3389 | |
3390 | if (chan->conn->feat_mask & L2CAP_FEAT_FCS) |
3391 | if (chan->fcs == L2CAP_FCS_NONE || |
3392 | test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { |
3393 | chan->fcs = L2CAP_FCS_NONE; |
3394 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_FCS, len: 1, |
3395 | val: chan->fcs, size: endptr - ptr); |
3396 | } |
3397 | break; |
3398 | } |
3399 | |
3400 | req->dcid = cpu_to_le16(chan->dcid); |
3401 | req->flags = cpu_to_le16(0); |
3402 | |
3403 | return ptr - data; |
3404 | } |
3405 | |
3406 | static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) |
3407 | { |
3408 | struct l2cap_conf_rsp *rsp = data; |
3409 | void *ptr = rsp->data; |
3410 | void *endptr = data + data_size; |
3411 | void *req = chan->conf_req; |
3412 | int len = chan->conf_len; |
3413 | int type, hint, olen; |
3414 | unsigned long val; |
3415 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; |
3416 | struct l2cap_conf_efs efs; |
3417 | u8 remote_efs = 0; |
3418 | u16 mtu = L2CAP_DEFAULT_MTU; |
3419 | u16 result = L2CAP_CONF_SUCCESS; |
3420 | u16 size; |
3421 | |
3422 | BT_DBG("chan %p", chan); |
3423 | |
3424 | while (len >= L2CAP_CONF_OPT_SIZE) { |
3425 | len -= l2cap_get_conf_opt(ptr: &req, type: &type, olen: &olen, val: &val); |
3426 | if (len < 0) |
3427 | break; |
3428 | |
3429 | hint = type & L2CAP_CONF_HINT; |
3430 | type &= L2CAP_CONF_MASK; |
3431 | |
3432 | switch (type) { |
3433 | case L2CAP_CONF_MTU: |
3434 | if (olen != 2) |
3435 | break; |
3436 | mtu = val; |
3437 | break; |
3438 | |
3439 | case L2CAP_CONF_FLUSH_TO: |
3440 | if (olen != 2) |
3441 | break; |
3442 | chan->flush_to = val; |
3443 | break; |
3444 | |
3445 | case L2CAP_CONF_QOS: |
3446 | break; |
3447 | |
3448 | case L2CAP_CONF_RFC: |
3449 | if (olen != sizeof(rfc)) |
3450 | break; |
3451 | memcpy(&rfc, (void *) val, olen); |
3452 | break; |
3453 | |
3454 | case L2CAP_CONF_FCS: |
3455 | if (olen != 1) |
3456 | break; |
3457 | if (val == L2CAP_FCS_NONE) |
3458 | set_bit(nr: CONF_RECV_NO_FCS, addr: &chan->conf_state); |
3459 | break; |
3460 | |
3461 | case L2CAP_CONF_EFS: |
3462 | if (olen != sizeof(efs)) |
3463 | break; |
3464 | remote_efs = 1; |
3465 | memcpy(&efs, (void *) val, olen); |
3466 | break; |
3467 | |
3468 | case L2CAP_CONF_EWS: |
3469 | if (olen != 2) |
3470 | break; |
3471 | return -ECONNREFUSED; |
3472 | |
3473 | default: |
3474 | if (hint) |
3475 | break; |
3476 | result = L2CAP_CONF_UNKNOWN; |
3477 | l2cap_add_conf_opt(ptr: &ptr, type: (u8)type, len: sizeof(u8), val: type, size: endptr - ptr); |
3478 | break; |
3479 | } |
3480 | } |
3481 | |
3482 | if (chan->num_conf_rsp || chan->num_conf_req > 1) |
3483 | goto done; |
3484 | |
3485 | switch (chan->mode) { |
3486 | case L2CAP_MODE_STREAMING: |
3487 | case L2CAP_MODE_ERTM: |
3488 | if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { |
3489 | chan->mode = l2cap_select_mode(mode: rfc.mode, |
3490 | remote_feat_mask: chan->conn->feat_mask); |
3491 | break; |
3492 | } |
3493 | |
3494 | if (remote_efs) { |
3495 | if (__l2cap_efs_supported(conn: chan->conn)) |
3496 | set_bit(nr: FLAG_EFS_ENABLE, addr: &chan->flags); |
3497 | else |
3498 | return -ECONNREFUSED; |
3499 | } |
3500 | |
3501 | if (chan->mode != rfc.mode) |
3502 | return -ECONNREFUSED; |
3503 | |
3504 | break; |
3505 | } |
3506 | |
3507 | done: |
3508 | if (chan->mode != rfc.mode) { |
3509 | result = L2CAP_CONF_UNACCEPT; |
3510 | rfc.mode = chan->mode; |
3511 | |
3512 | if (chan->num_conf_rsp == 1) |
3513 | return -ECONNREFUSED; |
3514 | |
3515 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3516 | val: (unsigned long) &rfc, size: endptr - ptr); |
3517 | } |
3518 | |
3519 | if (result == L2CAP_CONF_SUCCESS) { |
3520 | /* Configure output options and let the other side know |
3521 | * which ones we don't like. */ |
3522 | |
3523 | if (mtu < L2CAP_DEFAULT_MIN_MTU) |
3524 | result = L2CAP_CONF_UNACCEPT; |
3525 | else { |
3526 | chan->omtu = mtu; |
3527 | set_bit(nr: CONF_MTU_DONE, addr: &chan->conf_state); |
3528 | } |
3529 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_MTU, len: 2, val: chan->omtu, size: endptr - ptr); |
3530 | |
3531 | if (remote_efs) { |
3532 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3533 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3534 | efs.stype != chan->local_stype) { |
3535 | |
3536 | result = L2CAP_CONF_UNACCEPT; |
3537 | |
3538 | if (chan->num_conf_req >= 1) |
3539 | return -ECONNREFUSED; |
3540 | |
3541 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_EFS, |
3542 | len: sizeof(efs), |
3543 | val: (unsigned long) &efs, size: endptr - ptr); |
3544 | } else { |
3545 | /* Send PENDING Conf Rsp */ |
3546 | result = L2CAP_CONF_PENDING; |
3547 | set_bit(nr: CONF_LOC_CONF_PEND, addr: &chan->conf_state); |
3548 | } |
3549 | } |
3550 | |
3551 | switch (rfc.mode) { |
3552 | case L2CAP_MODE_BASIC: |
3553 | chan->fcs = L2CAP_FCS_NONE; |
3554 | set_bit(nr: CONF_MODE_DONE, addr: &chan->conf_state); |
3555 | break; |
3556 | |
3557 | case L2CAP_MODE_ERTM: |
3558 | if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) |
3559 | chan->remote_tx_win = rfc.txwin_size; |
3560 | else |
3561 | rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; |
3562 | |
3563 | chan->remote_max_tx = rfc.max_transmit; |
3564 | |
3565 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
3566 | chan->conn->mtu - L2CAP_EXT_HDR_SIZE - |
3567 | L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); |
3568 | rfc.max_pdu_size = cpu_to_le16(size); |
3569 | chan->remote_mps = size; |
3570 | |
3571 | __l2cap_set_ertm_timeouts(chan, rfc: &rfc); |
3572 | |
3573 | set_bit(nr: CONF_MODE_DONE, addr: &chan->conf_state); |
3574 | |
3575 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, |
3576 | len: sizeof(rfc), val: (unsigned long) &rfc, size: endptr - ptr); |
3577 | |
3578 | if (remote_efs && |
3579 | test_bit(FLAG_EFS_ENABLE, &chan->flags)) { |
3580 | chan->remote_id = efs.id; |
3581 | chan->remote_stype = efs.stype; |
3582 | chan->remote_msdu = le16_to_cpu(efs.msdu); |
3583 | chan->remote_flush_to = |
3584 | le32_to_cpu(efs.flush_to); |
3585 | chan->remote_acc_lat = |
3586 | le32_to_cpu(efs.acc_lat); |
3587 | chan->remote_sdu_itime = |
3588 | le32_to_cpu(efs.sdu_itime); |
3589 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_EFS, |
3590 | len: sizeof(efs), |
3591 | val: (unsigned long) &efs, size: endptr - ptr); |
3592 | } |
3593 | break; |
3594 | |
3595 | case L2CAP_MODE_STREAMING: |
3596 | size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), |
3597 | chan->conn->mtu - L2CAP_EXT_HDR_SIZE - |
3598 | L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); |
3599 | rfc.max_pdu_size = cpu_to_le16(size); |
3600 | chan->remote_mps = size; |
3601 | |
3602 | set_bit(nr: CONF_MODE_DONE, addr: &chan->conf_state); |
3603 | |
3604 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3605 | val: (unsigned long) &rfc, size: endptr - ptr); |
3606 | |
3607 | break; |
3608 | |
3609 | default: |
3610 | result = L2CAP_CONF_UNACCEPT; |
3611 | |
3612 | memset(&rfc, 0, sizeof(rfc)); |
3613 | rfc.mode = chan->mode; |
3614 | } |
3615 | |
3616 | if (result == L2CAP_CONF_SUCCESS) |
3617 | set_bit(nr: CONF_OUTPUT_DONE, addr: &chan->conf_state); |
3618 | } |
3619 | rsp->scid = cpu_to_le16(chan->dcid); |
3620 | rsp->result = cpu_to_le16(result); |
3621 | rsp->flags = cpu_to_le16(0); |
3622 | |
3623 | return ptr - data; |
3624 | } |
3625 | |
3626 | static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, |
3627 | void *data, size_t size, u16 *result) |
3628 | { |
3629 | struct l2cap_conf_req *req = data; |
3630 | void *ptr = req->data; |
3631 | void *endptr = data + size; |
3632 | int type, olen; |
3633 | unsigned long val; |
3634 | struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; |
3635 | struct l2cap_conf_efs efs; |
3636 | |
3637 | BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); |
3638 | |
3639 | while (len >= L2CAP_CONF_OPT_SIZE) { |
3640 | len -= l2cap_get_conf_opt(ptr: &rsp, type: &type, olen: &olen, val: &val); |
3641 | if (len < 0) |
3642 | break; |
3643 | |
3644 | switch (type) { |
3645 | case L2CAP_CONF_MTU: |
3646 | if (olen != 2) |
3647 | break; |
3648 | if (val < L2CAP_DEFAULT_MIN_MTU) { |
3649 | *result = L2CAP_CONF_UNACCEPT; |
3650 | chan->imtu = L2CAP_DEFAULT_MIN_MTU; |
3651 | } else |
3652 | chan->imtu = val; |
3653 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_MTU, len: 2, val: chan->imtu, |
3654 | size: endptr - ptr); |
3655 | break; |
3656 | |
3657 | case L2CAP_CONF_FLUSH_TO: |
3658 | if (olen != 2) |
3659 | break; |
3660 | chan->flush_to = val; |
3661 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_FLUSH_TO, len: 2, |
3662 | val: chan->flush_to, size: endptr - ptr); |
3663 | break; |
3664 | |
3665 | case L2CAP_CONF_RFC: |
3666 | if (olen != sizeof(rfc)) |
3667 | break; |
3668 | memcpy(&rfc, (void *)val, olen); |
3669 | if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && |
3670 | rfc.mode != chan->mode) |
3671 | return -ECONNREFUSED; |
3672 | chan->fcs = 0; |
3673 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_RFC, len: sizeof(rfc), |
3674 | val: (unsigned long) &rfc, size: endptr - ptr); |
3675 | break; |
3676 | |
3677 | case L2CAP_CONF_EWS: |
3678 | if (olen != 2) |
3679 | break; |
3680 | chan->ack_win = min_t(u16, val, chan->ack_win); |
3681 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_EWS, len: 2, |
3682 | val: chan->tx_win, size: endptr - ptr); |
3683 | break; |
3684 | |
3685 | case L2CAP_CONF_EFS: |
3686 | if (olen != sizeof(efs)) |
3687 | break; |
3688 | memcpy(&efs, (void *)val, olen); |
3689 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3690 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3691 | efs.stype != chan->local_stype) |
3692 | return -ECONNREFUSED; |
3693 | l2cap_add_conf_opt(ptr: &ptr, L2CAP_CONF_EFS, len: sizeof(efs), |
3694 | val: (unsigned long) &efs, size: endptr - ptr); |
3695 | break; |
3696 | |
3697 | case L2CAP_CONF_FCS: |
3698 | if (olen != 1) |
3699 | break; |
3700 | if (*result == L2CAP_CONF_PENDING) |
3701 | if (val == L2CAP_FCS_NONE) |
3702 | set_bit(nr: CONF_RECV_NO_FCS, |
3703 | addr: &chan->conf_state); |
3704 | break; |
3705 | } |
3706 | } |
3707 | |
3708 | if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) |
3709 | return -ECONNREFUSED; |
3710 | |
3711 | chan->mode = rfc.mode; |
3712 | |
3713 | if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { |
3714 | switch (rfc.mode) { |
3715 | case L2CAP_MODE_ERTM: |
3716 | chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); |
3717 | chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); |
3718 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
3719 | if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) |
3720 | chan->ack_win = min_t(u16, chan->ack_win, |
3721 | rfc.txwin_size); |
3722 | |
3723 | if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { |
3724 | chan->local_msdu = le16_to_cpu(efs.msdu); |
3725 | chan->local_sdu_itime = |
3726 | le32_to_cpu(efs.sdu_itime); |
3727 | chan->local_acc_lat = le32_to_cpu(efs.acc_lat); |
3728 | chan->local_flush_to = |
3729 | le32_to_cpu(efs.flush_to); |
3730 | } |
3731 | break; |
3732 | |
3733 | case L2CAP_MODE_STREAMING: |
3734 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
3735 | } |
3736 | } |
3737 | |
3738 | req->dcid = cpu_to_le16(chan->dcid); |
3739 | req->flags = cpu_to_le16(0); |
3740 | |
3741 | return ptr - data; |
3742 | } |
3743 | |
3744 | static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, |
3745 | u16 result, u16 flags) |
3746 | { |
3747 | struct l2cap_conf_rsp *rsp = data; |
3748 | void *ptr = rsp->data; |
3749 | |
3750 | BT_DBG("chan %p", chan); |
3751 | |
3752 | rsp->scid = cpu_to_le16(chan->dcid); |
3753 | rsp->result = cpu_to_le16(result); |
3754 | rsp->flags = cpu_to_le16(flags); |
3755 | |
3756 | return ptr - data; |
3757 | } |
3758 | |
3759 | void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) |
3760 | { |
3761 | struct l2cap_le_conn_rsp rsp; |
3762 | struct l2cap_conn *conn = chan->conn; |
3763 | |
3764 | BT_DBG("chan %p", chan); |
3765 | |
3766 | rsp.dcid = cpu_to_le16(chan->scid); |
3767 | rsp.mtu = cpu_to_le16(chan->imtu); |
3768 | rsp.mps = cpu_to_le16(chan->mps); |
3769 | rsp.credits = cpu_to_le16(chan->rx_credits); |
3770 | rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); |
3771 | |
3772 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_LE_CONN_RSP, len: sizeof(rsp), |
3773 | data: &rsp); |
3774 | } |
3775 | |
3776 | static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) |
3777 | { |
3778 | int *result = data; |
3779 | |
3780 | if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) |
3781 | return; |
3782 | |
3783 | switch (chan->state) { |
3784 | case BT_CONNECT2: |
3785 | /* If channel still pending accept add to result */ |
3786 | (*result)++; |
3787 | return; |
3788 | case BT_CONNECTED: |
3789 | return; |
3790 | default: |
3791 | /* If not connected or pending accept it has been refused */ |
3792 | *result = -ECONNREFUSED; |
3793 | return; |
3794 | } |
3795 | } |
3796 | |
3797 | struct l2cap_ecred_rsp_data { |
3798 | struct { |
3799 | struct l2cap_ecred_conn_rsp_hdr rsp; |
3800 | __le16 scid[L2CAP_ECRED_MAX_CID]; |
3801 | } __packed pdu; |
3802 | int count; |
3803 | }; |
3804 | |
3805 | static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) |
3806 | { |
3807 | struct l2cap_ecred_rsp_data *rsp = data; |
3808 | struct l2cap_ecred_conn_rsp *rsp_flex = |
3809 | container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); |
3810 | |
3811 | /* Check if channel for outgoing connection or if it wasn't deferred |
3812 | * since in those cases it must be skipped. |
3813 | */ |
3814 | if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || |
3815 | !test_and_clear_bit(nr: FLAG_DEFER_SETUP, addr: &chan->flags)) |
3816 | return; |
3817 | |
3818 | /* Reset ident so only one response is sent */ |
3819 | chan->ident = 0; |
3820 | |
3821 | /* Include all channels pending with the same ident */ |
3822 | if (!rsp->pdu.rsp.result) |
3823 | rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid); |
3824 | else |
3825 | l2cap_chan_del(chan, ECONNRESET); |
3826 | } |
3827 | |
3828 | void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) |
3829 | { |
3830 | struct l2cap_conn *conn = chan->conn; |
3831 | struct l2cap_ecred_rsp_data data; |
3832 | u16 id = chan->ident; |
3833 | int result = 0; |
3834 | |
3835 | if (!id) |
3836 | return; |
3837 | |
3838 | BT_DBG("chan %p id %d", chan, id); |
3839 | |
3840 | memset(&data, 0, sizeof(data)); |
3841 | |
3842 | data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); |
3843 | data.pdu.rsp.mps = cpu_to_le16(chan->mps); |
3844 | data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); |
3845 | data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); |
3846 | |
3847 | /* Verify that all channels are ready */ |
3848 | __l2cap_chan_list_id(conn, id, func: l2cap_ecred_list_defer, data: &result); |
3849 | |
3850 | if (result > 0) |
3851 | return; |
3852 | |
3853 | if (result < 0) |
3854 | data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); |
3855 | |
3856 | /* Build response */ |
3857 | __l2cap_chan_list_id(conn, id, func: l2cap_ecred_rsp_defer, data: &data); |
3858 | |
3859 | l2cap_send_cmd(conn, ident: id, L2CAP_ECRED_CONN_RSP, |
3860 | len: sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), |
3861 | data: &data.pdu); |
3862 | } |
3863 | |
3864 | void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) |
3865 | { |
3866 | struct l2cap_conn_rsp rsp; |
3867 | struct l2cap_conn *conn = chan->conn; |
3868 | u8 buf[128]; |
3869 | u8 rsp_code; |
3870 | |
3871 | rsp.scid = cpu_to_le16(chan->dcid); |
3872 | rsp.dcid = cpu_to_le16(chan->scid); |
3873 | rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); |
3874 | rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); |
3875 | rsp_code = L2CAP_CONN_RSP; |
3876 | |
3877 | BT_DBG("chan %p rsp_code %u", chan, rsp_code); |
3878 | |
3879 | l2cap_send_cmd(conn, ident: chan->ident, code: rsp_code, len: sizeof(rsp), data: &rsp); |
3880 | |
3881 | if (test_and_set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state)) |
3882 | return; |
3883 | |
3884 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_CONF_REQ, |
3885 | len: l2cap_build_conf_req(chan, data: buf, data_size: sizeof(buf)), data: buf); |
3886 | chan->num_conf_req++; |
3887 | } |
3888 | |
3889 | static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) |
3890 | { |
3891 | int type, olen; |
3892 | unsigned long val; |
3893 | /* Use sane default values in case a misbehaving remote device |
3894 | * did not send an RFC or extended window size option. |
3895 | */ |
3896 | u16 txwin_ext = chan->ack_win; |
3897 | struct l2cap_conf_rfc rfc = { |
3898 | .mode = chan->mode, |
3899 | .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), |
3900 | .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), |
3901 | .max_pdu_size = cpu_to_le16(chan->imtu), |
3902 | .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), |
3903 | }; |
3904 | |
3905 | BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); |
3906 | |
3907 | if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) |
3908 | return; |
3909 | |
3910 | while (len >= L2CAP_CONF_OPT_SIZE) { |
3911 | len -= l2cap_get_conf_opt(ptr: &rsp, type: &type, olen: &olen, val: &val); |
3912 | if (len < 0) |
3913 | break; |
3914 | |
3915 | switch (type) { |
3916 | case L2CAP_CONF_RFC: |
3917 | if (olen != sizeof(rfc)) |
3918 | break; |
3919 | memcpy(&rfc, (void *)val, olen); |
3920 | break; |
3921 | case L2CAP_CONF_EWS: |
3922 | if (olen != 2) |
3923 | break; |
3924 | txwin_ext = val; |
3925 | break; |
3926 | } |
3927 | } |
3928 | |
3929 | switch (rfc.mode) { |
3930 | case L2CAP_MODE_ERTM: |
3931 | chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); |
3932 | chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); |
3933 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
3934 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
3935 | chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); |
3936 | else |
3937 | chan->ack_win = min_t(u16, chan->ack_win, |
3938 | rfc.txwin_size); |
3939 | break; |
3940 | case L2CAP_MODE_STREAMING: |
3941 | chan->mps = le16_to_cpu(rfc.max_pdu_size); |
3942 | } |
3943 | } |
3944 | |
3945 | static inline int l2cap_command_rej(struct l2cap_conn *conn, |
3946 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
3947 | u8 *data) |
3948 | { |
3949 | struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; |
3950 | |
3951 | if (cmd_len < sizeof(*rej)) |
3952 | return -EPROTO; |
3953 | |
3954 | if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) |
3955 | return 0; |
3956 | |
3957 | if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && |
3958 | cmd->ident == conn->info_ident) { |
3959 | cancel_delayed_work(dwork: &conn->info_timer); |
3960 | |
3961 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
3962 | conn->info_ident = 0; |
3963 | |
3964 | l2cap_conn_start(conn); |
3965 | } |
3966 | |
3967 | return 0; |
3968 | } |
3969 | |
3970 | static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, |
3971 | u8 *data, u8 rsp_code) |
3972 | { |
3973 | struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; |
3974 | struct l2cap_conn_rsp rsp; |
3975 | struct l2cap_chan *chan = NULL, *pchan = NULL; |
3976 | int result, status = L2CAP_CS_NO_INFO; |
3977 | |
3978 | u16 dcid = 0, scid = __le16_to_cpu(req->scid); |
3979 | __le16 psm = req->psm; |
3980 | |
3981 | BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); |
3982 | |
3983 | /* Check if we have socket listening on psm */ |
3984 | pchan = l2cap_global_chan_by_psm(state: BT_LISTEN, psm, src: &conn->hcon->src, |
3985 | dst: &conn->hcon->dst, ACL_LINK); |
3986 | if (!pchan) { |
3987 | result = L2CAP_CR_BAD_PSM; |
3988 | goto response; |
3989 | } |
3990 | |
3991 | l2cap_chan_lock(chan: pchan); |
3992 | |
3993 | /* Check if the ACL is secure enough (if not SDP) */ |
3994 | if (psm != cpu_to_le16(L2CAP_PSM_SDP) && |
3995 | (!hci_conn_check_link_mode(conn: conn->hcon) || |
3996 | !l2cap_check_enc_key_size(hcon: conn->hcon, chan: pchan))) { |
3997 | conn->disc_reason = HCI_ERROR_AUTH_FAILURE; |
3998 | result = L2CAP_CR_SEC_BLOCK; |
3999 | goto response; |
4000 | } |
4001 | |
4002 | result = L2CAP_CR_NO_MEM; |
4003 | |
4004 | /* Check for valid dynamic CID range (as per Erratum 3253) */ |
4005 | if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { |
4006 | result = L2CAP_CR_INVALID_SCID; |
4007 | goto response; |
4008 | } |
4009 | |
4010 | /* Check if we already have channel with that dcid */ |
4011 | if (__l2cap_get_chan_by_dcid(conn, cid: scid)) { |
4012 | result = L2CAP_CR_SCID_IN_USE; |
4013 | goto response; |
4014 | } |
4015 | |
4016 | chan = pchan->ops->new_connection(pchan); |
4017 | if (!chan) |
4018 | goto response; |
4019 | |
4020 | /* For certain devices (ex: HID mouse), support for authentication, |
4021 | * pairing and bonding is optional. For such devices, inorder to avoid |
4022 | * the ACL alive for too long after L2CAP disconnection, reset the ACL |
4023 | * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. |
4024 | */ |
4025 | conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; |
4026 | |
4027 | bacpy(dst: &chan->src, src: &conn->hcon->src); |
4028 | bacpy(dst: &chan->dst, src: &conn->hcon->dst); |
4029 | chan->src_type = bdaddr_src_type(hcon: conn->hcon); |
4030 | chan->dst_type = bdaddr_dst_type(hcon: conn->hcon); |
4031 | chan->psm = psm; |
4032 | chan->dcid = scid; |
4033 | |
4034 | __l2cap_chan_add(conn, chan); |
4035 | |
4036 | dcid = chan->scid; |
4037 | |
4038 | __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); |
4039 | |
4040 | chan->ident = cmd->ident; |
4041 | |
4042 | if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { |
4043 | if (l2cap_chan_check_security(chan, initiator: false)) { |
4044 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { |
4045 | l2cap_state_change(chan, state: BT_CONNECT2); |
4046 | result = L2CAP_CR_PEND; |
4047 | status = L2CAP_CS_AUTHOR_PEND; |
4048 | chan->ops->defer(chan); |
4049 | } else { |
4050 | l2cap_state_change(chan, state: BT_CONFIG); |
4051 | result = L2CAP_CR_SUCCESS; |
4052 | status = L2CAP_CS_NO_INFO; |
4053 | } |
4054 | } else { |
4055 | l2cap_state_change(chan, state: BT_CONNECT2); |
4056 | result = L2CAP_CR_PEND; |
4057 | status = L2CAP_CS_AUTHEN_PEND; |
4058 | } |
4059 | } else { |
4060 | l2cap_state_change(chan, state: BT_CONNECT2); |
4061 | result = L2CAP_CR_PEND; |
4062 | status = L2CAP_CS_NO_INFO; |
4063 | } |
4064 | |
4065 | response: |
4066 | rsp.scid = cpu_to_le16(scid); |
4067 | rsp.dcid = cpu_to_le16(dcid); |
4068 | rsp.result = cpu_to_le16(result); |
4069 | rsp.status = cpu_to_le16(status); |
4070 | l2cap_send_cmd(conn, ident: cmd->ident, code: rsp_code, len: sizeof(rsp), data: &rsp); |
4071 | |
4072 | if (!pchan) |
4073 | return; |
4074 | |
4075 | if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { |
4076 | struct l2cap_info_req info; |
4077 | info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); |
4078 | |
4079 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; |
4080 | conn->info_ident = l2cap_get_ident(conn); |
4081 | |
4082 | schedule_delayed_work(dwork: &conn->info_timer, L2CAP_INFO_TIMEOUT); |
4083 | |
4084 | l2cap_send_cmd(conn, ident: conn->info_ident, L2CAP_INFO_REQ, |
4085 | len: sizeof(info), data: &info); |
4086 | } |
4087 | |
4088 | if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && |
4089 | result == L2CAP_CR_SUCCESS) { |
4090 | u8 buf[128]; |
4091 | set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state); |
4092 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_CONF_REQ, |
4093 | len: l2cap_build_conf_req(chan, data: buf, data_size: sizeof(buf)), data: buf); |
4094 | chan->num_conf_req++; |
4095 | } |
4096 | |
4097 | l2cap_chan_unlock(chan: pchan); |
4098 | l2cap_chan_put(pchan); |
4099 | } |
4100 | |
4101 | static int l2cap_connect_req(struct l2cap_conn *conn, |
4102 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) |
4103 | { |
4104 | if (cmd_len < sizeof(struct l2cap_conn_req)) |
4105 | return -EPROTO; |
4106 | |
4107 | l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP); |
4108 | return 0; |
4109 | } |
4110 | |
4111 | static int l2cap_connect_create_rsp(struct l2cap_conn *conn, |
4112 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4113 | u8 *data) |
4114 | { |
4115 | struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; |
4116 | u16 scid, dcid, result, status; |
4117 | struct l2cap_chan *chan; |
4118 | u8 req[128]; |
4119 | int err; |
4120 | |
4121 | if (cmd_len < sizeof(*rsp)) |
4122 | return -EPROTO; |
4123 | |
4124 | scid = __le16_to_cpu(rsp->scid); |
4125 | dcid = __le16_to_cpu(rsp->dcid); |
4126 | result = __le16_to_cpu(rsp->result); |
4127 | status = __le16_to_cpu(rsp->status); |
4128 | |
4129 | if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || |
4130 | dcid > L2CAP_CID_DYN_END)) |
4131 | return -EPROTO; |
4132 | |
4133 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", |
4134 | dcid, scid, result, status); |
4135 | |
4136 | if (scid) { |
4137 | chan = __l2cap_get_chan_by_scid(conn, cid: scid); |
4138 | if (!chan) |
4139 | return -EBADSLT; |
4140 | } else { |
4141 | chan = __l2cap_get_chan_by_ident(conn, ident: cmd->ident); |
4142 | if (!chan) |
4143 | return -EBADSLT; |
4144 | } |
4145 | |
4146 | chan = l2cap_chan_hold_unless_zero(c: chan); |
4147 | if (!chan) |
4148 | return -EBADSLT; |
4149 | |
4150 | err = 0; |
4151 | |
4152 | l2cap_chan_lock(chan); |
4153 | |
4154 | switch (result) { |
4155 | case L2CAP_CR_SUCCESS: |
4156 | if (__l2cap_get_chan_by_dcid(conn, cid: dcid)) { |
4157 | err = -EBADSLT; |
4158 | break; |
4159 | } |
4160 | |
4161 | l2cap_state_change(chan, state: BT_CONFIG); |
4162 | chan->ident = 0; |
4163 | chan->dcid = dcid; |
4164 | clear_bit(nr: CONF_CONNECT_PEND, addr: &chan->conf_state); |
4165 | |
4166 | if (test_and_set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state)) |
4167 | break; |
4168 | |
4169 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_CONF_REQ, |
4170 | len: l2cap_build_conf_req(chan, data: req, data_size: sizeof(req)), data: req); |
4171 | chan->num_conf_req++; |
4172 | break; |
4173 | |
4174 | case L2CAP_CR_PEND: |
4175 | set_bit(nr: CONF_CONNECT_PEND, addr: &chan->conf_state); |
4176 | break; |
4177 | |
4178 | default: |
4179 | l2cap_chan_del(chan, ECONNREFUSED); |
4180 | break; |
4181 | } |
4182 | |
4183 | l2cap_chan_unlock(chan); |
4184 | l2cap_chan_put(chan); |
4185 | |
4186 | return err; |
4187 | } |
4188 | |
4189 | static inline void set_default_fcs(struct l2cap_chan *chan) |
4190 | { |
4191 | /* FCS is enabled only in ERTM or streaming mode, if one or both |
4192 | * sides request it. |
4193 | */ |
4194 | if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) |
4195 | chan->fcs = L2CAP_FCS_NONE; |
4196 | else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) |
4197 | chan->fcs = L2CAP_FCS_CRC16; |
4198 | } |
4199 | |
4200 | static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, |
4201 | u8 ident, u16 flags) |
4202 | { |
4203 | struct l2cap_conn *conn = chan->conn; |
4204 | |
4205 | BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, |
4206 | flags); |
4207 | |
4208 | clear_bit(nr: CONF_LOC_CONF_PEND, addr: &chan->conf_state); |
4209 | set_bit(nr: CONF_OUTPUT_DONE, addr: &chan->conf_state); |
4210 | |
4211 | l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, |
4212 | len: l2cap_build_conf_rsp(chan, data, |
4213 | L2CAP_CONF_SUCCESS, flags), data); |
4214 | } |
4215 | |
4216 | static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, |
4217 | u16 scid, u16 dcid) |
4218 | { |
4219 | struct l2cap_cmd_rej_cid rej; |
4220 | |
4221 | rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); |
4222 | rej.scid = __cpu_to_le16(scid); |
4223 | rej.dcid = __cpu_to_le16(dcid); |
4224 | |
4225 | l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, len: sizeof(rej), data: &rej); |
4226 | } |
4227 | |
4228 | static inline int l2cap_config_req(struct l2cap_conn *conn, |
4229 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4230 | u8 *data) |
4231 | { |
4232 | struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; |
4233 | u16 dcid, flags; |
4234 | u8 rsp[64]; |
4235 | struct l2cap_chan *chan; |
4236 | int len, err = 0; |
4237 | |
4238 | if (cmd_len < sizeof(*req)) |
4239 | return -EPROTO; |
4240 | |
4241 | dcid = __le16_to_cpu(req->dcid); |
4242 | flags = __le16_to_cpu(req->flags); |
4243 | |
4244 | BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); |
4245 | |
4246 | chan = l2cap_get_chan_by_scid(conn, cid: dcid); |
4247 | if (!chan) { |
4248 | cmd_reject_invalid_cid(conn, ident: cmd->ident, scid: dcid, dcid: 0); |
4249 | return 0; |
4250 | } |
4251 | |
4252 | if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && |
4253 | chan->state != BT_CONNECTED) { |
4254 | cmd_reject_invalid_cid(conn, ident: cmd->ident, scid: chan->scid, |
4255 | dcid: chan->dcid); |
4256 | goto unlock; |
4257 | } |
4258 | |
4259 | /* Reject if config buffer is too small. */ |
4260 | len = cmd_len - sizeof(*req); |
4261 | if (chan->conf_len + len > sizeof(chan->conf_req)) { |
4262 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_CONF_RSP, |
4263 | len: l2cap_build_conf_rsp(chan, data: rsp, |
4264 | L2CAP_CONF_REJECT, flags), data: rsp); |
4265 | goto unlock; |
4266 | } |
4267 | |
4268 | /* Store config. */ |
4269 | memcpy(chan->conf_req + chan->conf_len, req->data, len); |
4270 | chan->conf_len += len; |
4271 | |
4272 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) { |
4273 | /* Incomplete config. Send empty response. */ |
4274 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_CONF_RSP, |
4275 | len: l2cap_build_conf_rsp(chan, data: rsp, |
4276 | L2CAP_CONF_SUCCESS, flags), data: rsp); |
4277 | goto unlock; |
4278 | } |
4279 | |
4280 | /* Complete config. */ |
4281 | len = l2cap_parse_conf_req(chan, data: rsp, data_size: sizeof(rsp)); |
4282 | if (len < 0) { |
4283 | l2cap_send_disconn_req(chan, ECONNRESET); |
4284 | goto unlock; |
4285 | } |
4286 | |
4287 | chan->ident = cmd->ident; |
4288 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_CONF_RSP, len, data: rsp); |
4289 | if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) |
4290 | chan->num_conf_rsp++; |
4291 | |
4292 | /* Reset config buffer. */ |
4293 | chan->conf_len = 0; |
4294 | |
4295 | if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) |
4296 | goto unlock; |
4297 | |
4298 | if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { |
4299 | set_default_fcs(chan); |
4300 | |
4301 | if (chan->mode == L2CAP_MODE_ERTM || |
4302 | chan->mode == L2CAP_MODE_STREAMING) |
4303 | err = l2cap_ertm_init(chan); |
4304 | |
4305 | if (err < 0) |
4306 | l2cap_send_disconn_req(chan, err: -err); |
4307 | else |
4308 | l2cap_chan_ready(chan); |
4309 | |
4310 | goto unlock; |
4311 | } |
4312 | |
4313 | if (!test_and_set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state)) { |
4314 | u8 buf[64]; |
4315 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), L2CAP_CONF_REQ, |
4316 | len: l2cap_build_conf_req(chan, data: buf, data_size: sizeof(buf)), data: buf); |
4317 | chan->num_conf_req++; |
4318 | } |
4319 | |
4320 | /* Got Conf Rsp PENDING from remote side and assume we sent |
4321 | Conf Rsp PENDING in the code above */ |
4322 | if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && |
4323 | test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { |
4324 | |
4325 | /* check compatibility */ |
4326 | |
4327 | /* Send rsp for BR/EDR channel */ |
4328 | l2cap_send_efs_conf_rsp(chan, data: rsp, ident: cmd->ident, flags); |
4329 | } |
4330 | |
4331 | unlock: |
4332 | l2cap_chan_unlock(chan); |
4333 | l2cap_chan_put(chan); |
4334 | return err; |
4335 | } |
4336 | |
4337 | static inline int l2cap_config_rsp(struct l2cap_conn *conn, |
4338 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4339 | u8 *data) |
4340 | { |
4341 | struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; |
4342 | u16 scid, flags, result; |
4343 | struct l2cap_chan *chan; |
4344 | int len = cmd_len - sizeof(*rsp); |
4345 | int err = 0; |
4346 | |
4347 | if (cmd_len < sizeof(*rsp)) |
4348 | return -EPROTO; |
4349 | |
4350 | scid = __le16_to_cpu(rsp->scid); |
4351 | flags = __le16_to_cpu(rsp->flags); |
4352 | result = __le16_to_cpu(rsp->result); |
4353 | |
4354 | BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, |
4355 | result, len); |
4356 | |
4357 | chan = l2cap_get_chan_by_scid(conn, cid: scid); |
4358 | if (!chan) |
4359 | return 0; |
4360 | |
4361 | switch (result) { |
4362 | case L2CAP_CONF_SUCCESS: |
4363 | l2cap_conf_rfc_get(chan, rsp: rsp->data, len); |
4364 | clear_bit(nr: CONF_REM_CONF_PEND, addr: &chan->conf_state); |
4365 | break; |
4366 | |
4367 | case L2CAP_CONF_PENDING: |
4368 | set_bit(nr: CONF_REM_CONF_PEND, addr: &chan->conf_state); |
4369 | |
4370 | if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { |
4371 | char buf[64]; |
4372 | |
4373 | len = l2cap_parse_conf_rsp(chan, rsp: rsp->data, len, |
4374 | data: buf, size: sizeof(buf), result: &result); |
4375 | if (len < 0) { |
4376 | l2cap_send_disconn_req(chan, ECONNRESET); |
4377 | goto done; |
4378 | } |
4379 | |
4380 | l2cap_send_efs_conf_rsp(chan, data: buf, ident: cmd->ident, flags: 0); |
4381 | } |
4382 | goto done; |
4383 | |
4384 | case L2CAP_CONF_UNKNOWN: |
4385 | case L2CAP_CONF_UNACCEPT: |
4386 | if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { |
4387 | char req[64]; |
4388 | |
4389 | if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { |
4390 | l2cap_send_disconn_req(chan, ECONNRESET); |
4391 | goto done; |
4392 | } |
4393 | |
4394 | /* throw out any old stored conf requests */ |
4395 | result = L2CAP_CONF_SUCCESS; |
4396 | len = l2cap_parse_conf_rsp(chan, rsp: rsp->data, len, |
4397 | data: req, size: sizeof(req), result: &result); |
4398 | if (len < 0) { |
4399 | l2cap_send_disconn_req(chan, ECONNRESET); |
4400 | goto done; |
4401 | } |
4402 | |
4403 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), |
4404 | L2CAP_CONF_REQ, len, data: req); |
4405 | chan->num_conf_req++; |
4406 | if (result != L2CAP_CONF_SUCCESS) |
4407 | goto done; |
4408 | break; |
4409 | } |
4410 | fallthrough; |
4411 | |
4412 | default: |
4413 | l2cap_chan_set_err(chan, ECONNRESET); |
4414 | |
4415 | __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); |
4416 | l2cap_send_disconn_req(chan, ECONNRESET); |
4417 | goto done; |
4418 | } |
4419 | |
4420 | if (flags & L2CAP_CONF_FLAG_CONTINUATION) |
4421 | goto done; |
4422 | |
4423 | set_bit(nr: CONF_INPUT_DONE, addr: &chan->conf_state); |
4424 | |
4425 | if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { |
4426 | set_default_fcs(chan); |
4427 | |
4428 | if (chan->mode == L2CAP_MODE_ERTM || |
4429 | chan->mode == L2CAP_MODE_STREAMING) |
4430 | err = l2cap_ertm_init(chan); |
4431 | |
4432 | if (err < 0) |
4433 | l2cap_send_disconn_req(chan, err: -err); |
4434 | else |
4435 | l2cap_chan_ready(chan); |
4436 | } |
4437 | |
4438 | done: |
4439 | l2cap_chan_unlock(chan); |
4440 | l2cap_chan_put(chan); |
4441 | return err; |
4442 | } |
4443 | |
4444 | static inline int l2cap_disconnect_req(struct l2cap_conn *conn, |
4445 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4446 | u8 *data) |
4447 | { |
4448 | struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; |
4449 | struct l2cap_disconn_rsp rsp; |
4450 | u16 dcid, scid; |
4451 | struct l2cap_chan *chan; |
4452 | |
4453 | if (cmd_len != sizeof(*req)) |
4454 | return -EPROTO; |
4455 | |
4456 | scid = __le16_to_cpu(req->scid); |
4457 | dcid = __le16_to_cpu(req->dcid); |
4458 | |
4459 | BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); |
4460 | |
4461 | chan = l2cap_get_chan_by_scid(conn, cid: dcid); |
4462 | if (!chan) { |
4463 | cmd_reject_invalid_cid(conn, ident: cmd->ident, scid: dcid, dcid: scid); |
4464 | return 0; |
4465 | } |
4466 | |
4467 | rsp.dcid = cpu_to_le16(chan->scid); |
4468 | rsp.scid = cpu_to_le16(chan->dcid); |
4469 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_DISCONN_RSP, len: sizeof(rsp), data: &rsp); |
4470 | |
4471 | chan->ops->set_shutdown(chan); |
4472 | |
4473 | l2cap_chan_del(chan, ECONNRESET); |
4474 | |
4475 | chan->ops->close(chan); |
4476 | |
4477 | l2cap_chan_unlock(chan); |
4478 | l2cap_chan_put(chan); |
4479 | |
4480 | return 0; |
4481 | } |
4482 | |
4483 | static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, |
4484 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4485 | u8 *data) |
4486 | { |
4487 | struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; |
4488 | u16 dcid, scid; |
4489 | struct l2cap_chan *chan; |
4490 | |
4491 | if (cmd_len != sizeof(*rsp)) |
4492 | return -EPROTO; |
4493 | |
4494 | scid = __le16_to_cpu(rsp->scid); |
4495 | dcid = __le16_to_cpu(rsp->dcid); |
4496 | |
4497 | BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); |
4498 | |
4499 | chan = l2cap_get_chan_by_scid(conn, cid: scid); |
4500 | if (!chan) { |
4501 | return 0; |
4502 | } |
4503 | |
4504 | if (chan->state != BT_DISCONN) { |
4505 | l2cap_chan_unlock(chan); |
4506 | l2cap_chan_put(chan); |
4507 | return 0; |
4508 | } |
4509 | |
4510 | l2cap_chan_del(chan, 0); |
4511 | |
4512 | chan->ops->close(chan); |
4513 | |
4514 | l2cap_chan_unlock(chan); |
4515 | l2cap_chan_put(chan); |
4516 | |
4517 | return 0; |
4518 | } |
4519 | |
4520 | static inline int l2cap_information_req(struct l2cap_conn *conn, |
4521 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4522 | u8 *data) |
4523 | { |
4524 | struct l2cap_info_req *req = (struct l2cap_info_req *) data; |
4525 | u16 type; |
4526 | |
4527 | if (cmd_len != sizeof(*req)) |
4528 | return -EPROTO; |
4529 | |
4530 | type = __le16_to_cpu(req->type); |
4531 | |
4532 | BT_DBG("type 0x%4.4x", type); |
4533 | |
4534 | if (type == L2CAP_IT_FEAT_MASK) { |
4535 | u8 buf[8]; |
4536 | u32 feat_mask = l2cap_feat_mask; |
4537 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
4538 | rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); |
4539 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); |
4540 | if (!disable_ertm) |
4541 | feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING |
4542 | | L2CAP_FEAT_FCS; |
4543 | |
4544 | put_unaligned_le32(val: feat_mask, p: rsp->data); |
4545 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_INFO_RSP, len: sizeof(buf), |
4546 | data: buf); |
4547 | } else if (type == L2CAP_IT_FIXED_CHAN) { |
4548 | u8 buf[12]; |
4549 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; |
4550 | |
4551 | rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
4552 | rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); |
4553 | rsp->data[0] = conn->local_fixed_chan; |
4554 | memset(rsp->data + 1, 0, 7); |
4555 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_INFO_RSP, len: sizeof(buf), |
4556 | data: buf); |
4557 | } else { |
4558 | struct l2cap_info_rsp rsp; |
4559 | rsp.type = cpu_to_le16(type); |
4560 | rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); |
4561 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_INFO_RSP, len: sizeof(rsp), |
4562 | data: &rsp); |
4563 | } |
4564 | |
4565 | return 0; |
4566 | } |
4567 | |
4568 | static inline int l2cap_information_rsp(struct l2cap_conn *conn, |
4569 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4570 | u8 *data) |
4571 | { |
4572 | struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; |
4573 | u16 type, result; |
4574 | |
4575 | if (cmd_len < sizeof(*rsp)) |
4576 | return -EPROTO; |
4577 | |
4578 | type = __le16_to_cpu(rsp->type); |
4579 | result = __le16_to_cpu(rsp->result); |
4580 | |
4581 | BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); |
4582 | |
4583 | /* L2CAP Info req/rsp are unbound to channels, add extra checks */ |
4584 | if (cmd->ident != conn->info_ident || |
4585 | conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) |
4586 | return 0; |
4587 | |
4588 | cancel_delayed_work(dwork: &conn->info_timer); |
4589 | |
4590 | if (result != L2CAP_IR_SUCCESS) { |
4591 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
4592 | conn->info_ident = 0; |
4593 | |
4594 | l2cap_conn_start(conn); |
4595 | |
4596 | return 0; |
4597 | } |
4598 | |
4599 | switch (type) { |
4600 | case L2CAP_IT_FEAT_MASK: |
4601 | conn->feat_mask = get_unaligned_le32(p: rsp->data); |
4602 | |
4603 | if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { |
4604 | struct l2cap_info_req req; |
4605 | req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); |
4606 | |
4607 | conn->info_ident = l2cap_get_ident(conn); |
4608 | |
4609 | l2cap_send_cmd(conn, ident: conn->info_ident, |
4610 | L2CAP_INFO_REQ, len: sizeof(req), data: &req); |
4611 | } else { |
4612 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
4613 | conn->info_ident = 0; |
4614 | |
4615 | l2cap_conn_start(conn); |
4616 | } |
4617 | break; |
4618 | |
4619 | case L2CAP_IT_FIXED_CHAN: |
4620 | conn->remote_fixed_chan = rsp->data[0]; |
4621 | conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; |
4622 | conn->info_ident = 0; |
4623 | |
4624 | l2cap_conn_start(conn); |
4625 | break; |
4626 | } |
4627 | |
4628 | return 0; |
4629 | } |
4630 | |
4631 | static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, |
4632 | struct l2cap_cmd_hdr *cmd, |
4633 | u16 cmd_len, u8 *data) |
4634 | { |
4635 | struct hci_conn *hcon = conn->hcon; |
4636 | struct l2cap_conn_param_update_req *req; |
4637 | struct l2cap_conn_param_update_rsp rsp; |
4638 | u16 min, max, latency, to_multiplier; |
4639 | int err; |
4640 | |
4641 | if (hcon->role != HCI_ROLE_MASTER) |
4642 | return -EINVAL; |
4643 | |
4644 | if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) |
4645 | return -EPROTO; |
4646 | |
4647 | req = (struct l2cap_conn_param_update_req *) data; |
4648 | min = __le16_to_cpu(req->min); |
4649 | max = __le16_to_cpu(req->max); |
4650 | latency = __le16_to_cpu(req->latency); |
4651 | to_multiplier = __le16_to_cpu(req->to_multiplier); |
4652 | |
4653 | BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", |
4654 | min, max, latency, to_multiplier); |
4655 | |
4656 | memset(&rsp, 0, sizeof(rsp)); |
4657 | |
4658 | err = hci_check_conn_params(min, max, latency, to_multiplier); |
4659 | if (err) |
4660 | rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); |
4661 | else |
4662 | rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); |
4663 | |
4664 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, |
4665 | len: sizeof(rsp), data: &rsp); |
4666 | |
4667 | if (!err) { |
4668 | u8 store_hint; |
4669 | |
4670 | store_hint = hci_le_conn_update(conn: hcon, min, max, latency, |
4671 | to_multiplier); |
4672 | mgmt_new_conn_param(hdev: hcon->hdev, bdaddr: &hcon->dst, bdaddr_type: hcon->dst_type, |
4673 | store_hint, min_interval: min, max_interval: max, latency, |
4674 | timeout: to_multiplier); |
4675 | |
4676 | } |
4677 | |
4678 | return 0; |
4679 | } |
4680 | |
4681 | static int l2cap_le_connect_rsp(struct l2cap_conn *conn, |
4682 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4683 | u8 *data) |
4684 | { |
4685 | struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; |
4686 | struct hci_conn *hcon = conn->hcon; |
4687 | u16 dcid, mtu, mps, credits, result; |
4688 | struct l2cap_chan *chan; |
4689 | int err, sec_level; |
4690 | |
4691 | if (cmd_len < sizeof(*rsp)) |
4692 | return -EPROTO; |
4693 | |
4694 | dcid = __le16_to_cpu(rsp->dcid); |
4695 | mtu = __le16_to_cpu(rsp->mtu); |
4696 | mps = __le16_to_cpu(rsp->mps); |
4697 | credits = __le16_to_cpu(rsp->credits); |
4698 | result = __le16_to_cpu(rsp->result); |
4699 | |
4700 | if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || |
4701 | dcid < L2CAP_CID_DYN_START || |
4702 | dcid > L2CAP_CID_LE_DYN_END)) |
4703 | return -EPROTO; |
4704 | |
4705 | BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", |
4706 | dcid, mtu, mps, credits, result); |
4707 | |
4708 | chan = __l2cap_get_chan_by_ident(conn, ident: cmd->ident); |
4709 | if (!chan) |
4710 | return -EBADSLT; |
4711 | |
4712 | err = 0; |
4713 | |
4714 | l2cap_chan_lock(chan); |
4715 | |
4716 | switch (result) { |
4717 | case L2CAP_CR_LE_SUCCESS: |
4718 | if (__l2cap_get_chan_by_dcid(conn, cid: dcid)) { |
4719 | err = -EBADSLT; |
4720 | break; |
4721 | } |
4722 | |
4723 | chan->ident = 0; |
4724 | chan->dcid = dcid; |
4725 | chan->omtu = mtu; |
4726 | chan->remote_mps = mps; |
4727 | chan->tx_credits = credits; |
4728 | l2cap_chan_ready(chan); |
4729 | break; |
4730 | |
4731 | case L2CAP_CR_LE_AUTHENTICATION: |
4732 | case L2CAP_CR_LE_ENCRYPTION: |
4733 | /* If we already have MITM protection we can't do |
4734 | * anything. |
4735 | */ |
4736 | if (hcon->sec_level > BT_SECURITY_MEDIUM) { |
4737 | l2cap_chan_del(chan, ECONNREFUSED); |
4738 | break; |
4739 | } |
4740 | |
4741 | sec_level = hcon->sec_level + 1; |
4742 | if (chan->sec_level < sec_level) |
4743 | chan->sec_level = sec_level; |
4744 | |
4745 | /* We'll need to send a new Connect Request */ |
4746 | clear_bit(nr: FLAG_LE_CONN_REQ_SENT, addr: &chan->flags); |
4747 | |
4748 | smp_conn_security(hcon, sec_level: chan->sec_level); |
4749 | break; |
4750 | |
4751 | default: |
4752 | l2cap_chan_del(chan, ECONNREFUSED); |
4753 | break; |
4754 | } |
4755 | |
4756 | l2cap_chan_unlock(chan); |
4757 | |
4758 | return err; |
4759 | } |
4760 | |
4761 | static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, |
4762 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4763 | u8 *data) |
4764 | { |
4765 | int err = 0; |
4766 | |
4767 | switch (cmd->code) { |
4768 | case L2CAP_COMMAND_REJ: |
4769 | l2cap_command_rej(conn, cmd, cmd_len, data); |
4770 | break; |
4771 | |
4772 | case L2CAP_CONN_REQ: |
4773 | err = l2cap_connect_req(conn, cmd, cmd_len, data); |
4774 | break; |
4775 | |
4776 | case L2CAP_CONN_RSP: |
4777 | l2cap_connect_create_rsp(conn, cmd, cmd_len, data); |
4778 | break; |
4779 | |
4780 | case L2CAP_CONF_REQ: |
4781 | err = l2cap_config_req(conn, cmd, cmd_len, data); |
4782 | break; |
4783 | |
4784 | case L2CAP_CONF_RSP: |
4785 | l2cap_config_rsp(conn, cmd, cmd_len, data); |
4786 | break; |
4787 | |
4788 | case L2CAP_DISCONN_REQ: |
4789 | err = l2cap_disconnect_req(conn, cmd, cmd_len, data); |
4790 | break; |
4791 | |
4792 | case L2CAP_DISCONN_RSP: |
4793 | l2cap_disconnect_rsp(conn, cmd, cmd_len, data); |
4794 | break; |
4795 | |
4796 | case L2CAP_ECHO_REQ: |
4797 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_ECHO_RSP, len: cmd_len, data); |
4798 | break; |
4799 | |
4800 | case L2CAP_ECHO_RSP: |
4801 | break; |
4802 | |
4803 | case L2CAP_INFO_REQ: |
4804 | err = l2cap_information_req(conn, cmd, cmd_len, data); |
4805 | break; |
4806 | |
4807 | case L2CAP_INFO_RSP: |
4808 | l2cap_information_rsp(conn, cmd, cmd_len, data); |
4809 | break; |
4810 | |
4811 | default: |
4812 | BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); |
4813 | err = -EINVAL; |
4814 | break; |
4815 | } |
4816 | |
4817 | return err; |
4818 | } |
4819 | |
4820 | static int l2cap_le_connect_req(struct l2cap_conn *conn, |
4821 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4822 | u8 *data) |
4823 | { |
4824 | struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; |
4825 | struct l2cap_le_conn_rsp rsp; |
4826 | struct l2cap_chan *chan, *pchan; |
4827 | u16 dcid, scid, credits, mtu, mps; |
4828 | __le16 psm; |
4829 | u8 result; |
4830 | |
4831 | if (cmd_len != sizeof(*req)) |
4832 | return -EPROTO; |
4833 | |
4834 | scid = __le16_to_cpu(req->scid); |
4835 | mtu = __le16_to_cpu(req->mtu); |
4836 | mps = __le16_to_cpu(req->mps); |
4837 | psm = req->psm; |
4838 | dcid = 0; |
4839 | credits = 0; |
4840 | |
4841 | if (mtu < 23 || mps < 23) |
4842 | return -EPROTO; |
4843 | |
4844 | BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), |
4845 | scid, mtu, mps); |
4846 | |
4847 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A |
4848 | * page 1059: |
4849 | * |
4850 | * Valid range: 0x0001-0x00ff |
4851 | * |
4852 | * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges |
4853 | */ |
4854 | if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { |
4855 | result = L2CAP_CR_LE_BAD_PSM; |
4856 | chan = NULL; |
4857 | goto response; |
4858 | } |
4859 | |
4860 | /* Check if we have socket listening on psm */ |
4861 | pchan = l2cap_global_chan_by_psm(state: BT_LISTEN, psm, src: &conn->hcon->src, |
4862 | dst: &conn->hcon->dst, LE_LINK); |
4863 | if (!pchan) { |
4864 | result = L2CAP_CR_LE_BAD_PSM; |
4865 | chan = NULL; |
4866 | goto response; |
4867 | } |
4868 | |
4869 | l2cap_chan_lock(chan: pchan); |
4870 | |
4871 | if (!smp_sufficient_security(hcon: conn->hcon, sec_level: pchan->sec_level, |
4872 | key_pref: SMP_ALLOW_STK)) { |
4873 | result = pchan->sec_level == BT_SECURITY_MEDIUM ? |
4874 | L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION; |
4875 | chan = NULL; |
4876 | goto response_unlock; |
4877 | } |
4878 | |
4879 | /* Check for valid dynamic CID range */ |
4880 | if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { |
4881 | result = L2CAP_CR_LE_INVALID_SCID; |
4882 | chan = NULL; |
4883 | goto response_unlock; |
4884 | } |
4885 | |
4886 | /* Check if we already have channel with that dcid */ |
4887 | if (__l2cap_get_chan_by_dcid(conn, cid: scid)) { |
4888 | result = L2CAP_CR_LE_SCID_IN_USE; |
4889 | chan = NULL; |
4890 | goto response_unlock; |
4891 | } |
4892 | |
4893 | chan = pchan->ops->new_connection(pchan); |
4894 | if (!chan) { |
4895 | result = L2CAP_CR_LE_NO_MEM; |
4896 | goto response_unlock; |
4897 | } |
4898 | |
4899 | bacpy(dst: &chan->src, src: &conn->hcon->src); |
4900 | bacpy(dst: &chan->dst, src: &conn->hcon->dst); |
4901 | chan->src_type = bdaddr_src_type(hcon: conn->hcon); |
4902 | chan->dst_type = bdaddr_dst_type(hcon: conn->hcon); |
4903 | chan->psm = psm; |
4904 | chan->dcid = scid; |
4905 | chan->omtu = mtu; |
4906 | chan->remote_mps = mps; |
4907 | |
4908 | __l2cap_chan_add(conn, chan); |
4909 | |
4910 | l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); |
4911 | |
4912 | dcid = chan->scid; |
4913 | credits = chan->rx_credits; |
4914 | |
4915 | __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); |
4916 | |
4917 | chan->ident = cmd->ident; |
4918 | |
4919 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { |
4920 | l2cap_state_change(chan, state: BT_CONNECT2); |
4921 | /* The following result value is actually not defined |
4922 | * for LE CoC but we use it to let the function know |
4923 | * that it should bail out after doing its cleanup |
4924 | * instead of sending a response. |
4925 | */ |
4926 | result = L2CAP_CR_PEND; |
4927 | chan->ops->defer(chan); |
4928 | } else { |
4929 | l2cap_chan_ready(chan); |
4930 | result = L2CAP_CR_LE_SUCCESS; |
4931 | } |
4932 | |
4933 | response_unlock: |
4934 | l2cap_chan_unlock(chan: pchan); |
4935 | l2cap_chan_put(pchan); |
4936 | |
4937 | if (result == L2CAP_CR_PEND) |
4938 | return 0; |
4939 | |
4940 | response: |
4941 | if (chan) { |
4942 | rsp.mtu = cpu_to_le16(chan->imtu); |
4943 | rsp.mps = cpu_to_le16(chan->mps); |
4944 | } else { |
4945 | rsp.mtu = 0; |
4946 | rsp.mps = 0; |
4947 | } |
4948 | |
4949 | rsp.dcid = cpu_to_le16(dcid); |
4950 | rsp.credits = cpu_to_le16(credits); |
4951 | rsp.result = cpu_to_le16(result); |
4952 | |
4953 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_LE_CONN_RSP, len: sizeof(rsp), data: &rsp); |
4954 | |
4955 | return 0; |
4956 | } |
4957 | |
4958 | static inline int l2cap_le_credits(struct l2cap_conn *conn, |
4959 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
4960 | u8 *data) |
4961 | { |
4962 | struct l2cap_le_credits *pkt; |
4963 | struct l2cap_chan *chan; |
4964 | u16 cid, credits, max_credits; |
4965 | |
4966 | if (cmd_len != sizeof(*pkt)) |
4967 | return -EPROTO; |
4968 | |
4969 | pkt = (struct l2cap_le_credits *) data; |
4970 | cid = __le16_to_cpu(pkt->cid); |
4971 | credits = __le16_to_cpu(pkt->credits); |
4972 | |
4973 | BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); |
4974 | |
4975 | chan = l2cap_get_chan_by_dcid(conn, cid); |
4976 | if (!chan) |
4977 | return -EBADSLT; |
4978 | |
4979 | max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; |
4980 | if (credits > max_credits) { |
4981 | BT_ERR("LE credits overflow"); |
4982 | l2cap_send_disconn_req(chan, ECONNRESET); |
4983 | |
4984 | /* Return 0 so that we don't trigger an unnecessary |
4985 | * command reject packet. |
4986 | */ |
4987 | goto unlock; |
4988 | } |
4989 | |
4990 | chan->tx_credits += credits; |
4991 | |
4992 | /* Resume sending */ |
4993 | l2cap_le_flowctl_send(chan); |
4994 | |
4995 | if (chan->tx_credits) |
4996 | chan->ops->resume(chan); |
4997 | |
4998 | unlock: |
4999 | l2cap_chan_unlock(chan); |
5000 | l2cap_chan_put(chan); |
5001 | |
5002 | return 0; |
5003 | } |
5004 | |
5005 | static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, |
5006 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5007 | u8 *data) |
5008 | { |
5009 | struct l2cap_ecred_conn_req *req = (void *) data; |
5010 | DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID); |
5011 | struct l2cap_chan *chan, *pchan; |
5012 | u16 mtu, mps; |
5013 | __le16 psm; |
5014 | u8 result, len = 0; |
5015 | int i, num_scid; |
5016 | bool defer = false; |
5017 | |
5018 | if (!enable_ecred) |
5019 | return -EINVAL; |
5020 | |
5021 | if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { |
5022 | result = L2CAP_CR_LE_INVALID_PARAMS; |
5023 | goto response; |
5024 | } |
5025 | |
5026 | cmd_len -= sizeof(*req); |
5027 | num_scid = cmd_len / sizeof(u16); |
5028 | |
5029 | if (num_scid > L2CAP_ECRED_MAX_CID) { |
5030 | result = L2CAP_CR_LE_INVALID_PARAMS; |
5031 | goto response; |
5032 | } |
5033 | |
5034 | mtu = __le16_to_cpu(req->mtu); |
5035 | mps = __le16_to_cpu(req->mps); |
5036 | |
5037 | if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { |
5038 | result = L2CAP_CR_LE_UNACCEPT_PARAMS; |
5039 | goto response; |
5040 | } |
5041 | |
5042 | psm = req->psm; |
5043 | |
5044 | /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A |
5045 | * page 1059: |
5046 | * |
5047 | * Valid range: 0x0001-0x00ff |
5048 | * |
5049 | * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges |
5050 | */ |
5051 | if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { |
5052 | result = L2CAP_CR_LE_BAD_PSM; |
5053 | goto response; |
5054 | } |
5055 | |
5056 | BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); |
5057 | |
5058 | memset(pdu, 0, sizeof(*pdu)); |
5059 | |
5060 | /* Check if we have socket listening on psm */ |
5061 | pchan = l2cap_global_chan_by_psm(state: BT_LISTEN, psm, src: &conn->hcon->src, |
5062 | dst: &conn->hcon->dst, LE_LINK); |
5063 | if (!pchan) { |
5064 | result = L2CAP_CR_LE_BAD_PSM; |
5065 | goto response; |
5066 | } |
5067 | |
5068 | l2cap_chan_lock(chan: pchan); |
5069 | |
5070 | if (!smp_sufficient_security(hcon: conn->hcon, sec_level: pchan->sec_level, |
5071 | key_pref: SMP_ALLOW_STK)) { |
5072 | result = L2CAP_CR_LE_AUTHENTICATION; |
5073 | goto unlock; |
5074 | } |
5075 | |
5076 | result = L2CAP_CR_LE_SUCCESS; |
5077 | |
5078 | for (i = 0; i < num_scid; i++) { |
5079 | u16 scid = __le16_to_cpu(req->scid[i]); |
5080 | |
5081 | BT_DBG("scid[%d] 0x%4.4x", i, scid); |
5082 | |
5083 | pdu->dcid[i] = 0x0000; |
5084 | len += sizeof(*pdu->dcid); |
5085 | |
5086 | /* Check for valid dynamic CID range */ |
5087 | if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { |
5088 | result = L2CAP_CR_LE_INVALID_SCID; |
5089 | continue; |
5090 | } |
5091 | |
5092 | /* Check if we already have channel with that dcid */ |
5093 | if (__l2cap_get_chan_by_dcid(conn, cid: scid)) { |
5094 | result = L2CAP_CR_LE_SCID_IN_USE; |
5095 | continue; |
5096 | } |
5097 | |
5098 | chan = pchan->ops->new_connection(pchan); |
5099 | if (!chan) { |
5100 | result = L2CAP_CR_LE_NO_MEM; |
5101 | continue; |
5102 | } |
5103 | |
5104 | bacpy(dst: &chan->src, src: &conn->hcon->src); |
5105 | bacpy(dst: &chan->dst, src: &conn->hcon->dst); |
5106 | chan->src_type = bdaddr_src_type(hcon: conn->hcon); |
5107 | chan->dst_type = bdaddr_dst_type(hcon: conn->hcon); |
5108 | chan->psm = psm; |
5109 | chan->dcid = scid; |
5110 | chan->omtu = mtu; |
5111 | chan->remote_mps = mps; |
5112 | |
5113 | __l2cap_chan_add(conn, chan); |
5114 | |
5115 | l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); |
5116 | |
5117 | /* Init response */ |
5118 | if (!pdu->credits) { |
5119 | pdu->mtu = cpu_to_le16(chan->imtu); |
5120 | pdu->mps = cpu_to_le16(chan->mps); |
5121 | pdu->credits = cpu_to_le16(chan->rx_credits); |
5122 | } |
5123 | |
5124 | pdu->dcid[i] = cpu_to_le16(chan->scid); |
5125 | |
5126 | __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); |
5127 | |
5128 | chan->ident = cmd->ident; |
5129 | chan->mode = L2CAP_MODE_EXT_FLOWCTL; |
5130 | |
5131 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { |
5132 | l2cap_state_change(chan, state: BT_CONNECT2); |
5133 | defer = true; |
5134 | chan->ops->defer(chan); |
5135 | } else { |
5136 | l2cap_chan_ready(chan); |
5137 | } |
5138 | } |
5139 | |
5140 | unlock: |
5141 | l2cap_chan_unlock(chan: pchan); |
5142 | l2cap_chan_put(pchan); |
5143 | |
5144 | response: |
5145 | pdu->result = cpu_to_le16(result); |
5146 | |
5147 | if (defer) |
5148 | return 0; |
5149 | |
5150 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_ECRED_CONN_RSP, |
5151 | len: sizeof(*pdu) + len, data: pdu); |
5152 | |
5153 | return 0; |
5154 | } |
5155 | |
5156 | static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, |
5157 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5158 | u8 *data) |
5159 | { |
5160 | struct l2cap_ecred_conn_rsp *rsp = (void *) data; |
5161 | struct hci_conn *hcon = conn->hcon; |
5162 | u16 mtu, mps, credits, result; |
5163 | struct l2cap_chan *chan, *tmp; |
5164 | int err = 0, sec_level; |
5165 | int i = 0; |
5166 | |
5167 | if (cmd_len < sizeof(*rsp)) |
5168 | return -EPROTO; |
5169 | |
5170 | mtu = __le16_to_cpu(rsp->mtu); |
5171 | mps = __le16_to_cpu(rsp->mps); |
5172 | credits = __le16_to_cpu(rsp->credits); |
5173 | result = __le16_to_cpu(rsp->result); |
5174 | |
5175 | BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, |
5176 | result); |
5177 | |
5178 | cmd_len -= sizeof(*rsp); |
5179 | |
5180 | list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { |
5181 | u16 dcid; |
5182 | |
5183 | if (chan->ident != cmd->ident || |
5184 | chan->mode != L2CAP_MODE_EXT_FLOWCTL || |
5185 | chan->state == BT_CONNECTED) |
5186 | continue; |
5187 | |
5188 | l2cap_chan_lock(chan); |
5189 | |
5190 | /* Check that there is a dcid for each pending channel */ |
5191 | if (cmd_len < sizeof(dcid)) { |
5192 | l2cap_chan_del(chan, ECONNREFUSED); |
5193 | l2cap_chan_unlock(chan); |
5194 | continue; |
5195 | } |
5196 | |
5197 | dcid = __le16_to_cpu(rsp->dcid[i++]); |
5198 | cmd_len -= sizeof(u16); |
5199 | |
5200 | BT_DBG("dcid[%d] 0x%4.4x", i, dcid); |
5201 | |
5202 | /* Check if dcid is already in use */ |
5203 | if (dcid && __l2cap_get_chan_by_dcid(conn, cid: dcid)) { |
5204 | /* If a device receives a |
5205 | * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an |
5206 | * already-assigned Destination CID, then both the |
5207 | * original channel and the new channel shall be |
5208 | * immediately discarded and not used. |
5209 | */ |
5210 | l2cap_chan_del(chan, ECONNREFUSED); |
5211 | l2cap_chan_unlock(chan); |
5212 | chan = __l2cap_get_chan_by_dcid(conn, cid: dcid); |
5213 | l2cap_chan_lock(chan); |
5214 | l2cap_chan_del(chan, ECONNRESET); |
5215 | l2cap_chan_unlock(chan); |
5216 | continue; |
5217 | } |
5218 | |
5219 | switch (result) { |
5220 | case L2CAP_CR_LE_AUTHENTICATION: |
5221 | case L2CAP_CR_LE_ENCRYPTION: |
5222 | /* If we already have MITM protection we can't do |
5223 | * anything. |
5224 | */ |
5225 | if (hcon->sec_level > BT_SECURITY_MEDIUM) { |
5226 | l2cap_chan_del(chan, ECONNREFUSED); |
5227 | break; |
5228 | } |
5229 | |
5230 | sec_level = hcon->sec_level + 1; |
5231 | if (chan->sec_level < sec_level) |
5232 | chan->sec_level = sec_level; |
5233 | |
5234 | /* We'll need to send a new Connect Request */ |
5235 | clear_bit(nr: FLAG_ECRED_CONN_REQ_SENT, addr: &chan->flags); |
5236 | |
5237 | smp_conn_security(hcon, sec_level: chan->sec_level); |
5238 | break; |
5239 | |
5240 | case L2CAP_CR_LE_BAD_PSM: |
5241 | l2cap_chan_del(chan, ECONNREFUSED); |
5242 | break; |
5243 | |
5244 | default: |
5245 | /* If dcid was not set it means channels was refused */ |
5246 | if (!dcid) { |
5247 | l2cap_chan_del(chan, ECONNREFUSED); |
5248 | break; |
5249 | } |
5250 | |
5251 | chan->ident = 0; |
5252 | chan->dcid = dcid; |
5253 | chan->omtu = mtu; |
5254 | chan->remote_mps = mps; |
5255 | chan->tx_credits = credits; |
5256 | l2cap_chan_ready(chan); |
5257 | break; |
5258 | } |
5259 | |
5260 | l2cap_chan_unlock(chan); |
5261 | } |
5262 | |
5263 | return err; |
5264 | } |
5265 | |
5266 | static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, |
5267 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5268 | u8 *data) |
5269 | { |
5270 | struct l2cap_ecred_reconf_req *req = (void *) data; |
5271 | struct l2cap_ecred_reconf_rsp rsp; |
5272 | u16 mtu, mps, result; |
5273 | struct l2cap_chan *chan; |
5274 | int i, num_scid; |
5275 | |
5276 | if (!enable_ecred) |
5277 | return -EINVAL; |
5278 | |
5279 | if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { |
5280 | result = L2CAP_CR_LE_INVALID_PARAMS; |
5281 | goto respond; |
5282 | } |
5283 | |
5284 | mtu = __le16_to_cpu(req->mtu); |
5285 | mps = __le16_to_cpu(req->mps); |
5286 | |
5287 | BT_DBG("mtu %u mps %u", mtu, mps); |
5288 | |
5289 | if (mtu < L2CAP_ECRED_MIN_MTU) { |
5290 | result = L2CAP_RECONF_INVALID_MTU; |
5291 | goto respond; |
5292 | } |
5293 | |
5294 | if (mps < L2CAP_ECRED_MIN_MPS) { |
5295 | result = L2CAP_RECONF_INVALID_MPS; |
5296 | goto respond; |
5297 | } |
5298 | |
5299 | cmd_len -= sizeof(*req); |
5300 | num_scid = cmd_len / sizeof(u16); |
5301 | result = L2CAP_RECONF_SUCCESS; |
5302 | |
5303 | for (i = 0; i < num_scid; i++) { |
5304 | u16 scid; |
5305 | |
5306 | scid = __le16_to_cpu(req->scid[i]); |
5307 | if (!scid) |
5308 | return -EPROTO; |
5309 | |
5310 | chan = __l2cap_get_chan_by_dcid(conn, cid: scid); |
5311 | if (!chan) |
5312 | continue; |
5313 | |
5314 | /* If the MTU value is decreased for any of the included |
5315 | * channels, then the receiver shall disconnect all |
5316 | * included channels. |
5317 | */ |
5318 | if (chan->omtu > mtu) { |
5319 | BT_ERR("chan %p decreased MTU %u -> %u", chan, |
5320 | chan->omtu, mtu); |
5321 | result = L2CAP_RECONF_INVALID_MTU; |
5322 | } |
5323 | |
5324 | chan->omtu = mtu; |
5325 | chan->remote_mps = mps; |
5326 | } |
5327 | |
5328 | respond: |
5329 | rsp.result = cpu_to_le16(result); |
5330 | |
5331 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_ECRED_RECONF_RSP, len: sizeof(rsp), |
5332 | data: &rsp); |
5333 | |
5334 | return 0; |
5335 | } |
5336 | |
5337 | static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, |
5338 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5339 | u8 *data) |
5340 | { |
5341 | struct l2cap_chan *chan, *tmp; |
5342 | struct l2cap_ecred_conn_rsp *rsp = (void *) data; |
5343 | u16 result; |
5344 | |
5345 | if (cmd_len < sizeof(*rsp)) |
5346 | return -EPROTO; |
5347 | |
5348 | result = __le16_to_cpu(rsp->result); |
5349 | |
5350 | BT_DBG("result 0x%4.4x", rsp->result); |
5351 | |
5352 | if (!result) |
5353 | return 0; |
5354 | |
5355 | list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { |
5356 | if (chan->ident != cmd->ident) |
5357 | continue; |
5358 | |
5359 | l2cap_chan_del(chan, ECONNRESET); |
5360 | } |
5361 | |
5362 | return 0; |
5363 | } |
5364 | |
5365 | static inline int l2cap_le_command_rej(struct l2cap_conn *conn, |
5366 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5367 | u8 *data) |
5368 | { |
5369 | struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; |
5370 | struct l2cap_chan *chan; |
5371 | |
5372 | if (cmd_len < sizeof(*rej)) |
5373 | return -EPROTO; |
5374 | |
5375 | chan = __l2cap_get_chan_by_ident(conn, ident: cmd->ident); |
5376 | if (!chan) |
5377 | goto done; |
5378 | |
5379 | chan = l2cap_chan_hold_unless_zero(c: chan); |
5380 | if (!chan) |
5381 | goto done; |
5382 | |
5383 | l2cap_chan_lock(chan); |
5384 | l2cap_chan_del(chan, ECONNREFUSED); |
5385 | l2cap_chan_unlock(chan); |
5386 | l2cap_chan_put(chan); |
5387 | |
5388 | done: |
5389 | return 0; |
5390 | } |
5391 | |
5392 | static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, |
5393 | struct l2cap_cmd_hdr *cmd, u16 cmd_len, |
5394 | u8 *data) |
5395 | { |
5396 | int err = 0; |
5397 | |
5398 | switch (cmd->code) { |
5399 | case L2CAP_COMMAND_REJ: |
5400 | l2cap_le_command_rej(conn, cmd, cmd_len, data); |
5401 | break; |
5402 | |
5403 | case L2CAP_CONN_PARAM_UPDATE_REQ: |
5404 | err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); |
5405 | break; |
5406 | |
5407 | case L2CAP_CONN_PARAM_UPDATE_RSP: |
5408 | break; |
5409 | |
5410 | case L2CAP_LE_CONN_RSP: |
5411 | l2cap_le_connect_rsp(conn, cmd, cmd_len, data); |
5412 | break; |
5413 | |
5414 | case L2CAP_LE_CONN_REQ: |
5415 | err = l2cap_le_connect_req(conn, cmd, cmd_len, data); |
5416 | break; |
5417 | |
5418 | case L2CAP_LE_CREDITS: |
5419 | err = l2cap_le_credits(conn, cmd, cmd_len, data); |
5420 | break; |
5421 | |
5422 | case L2CAP_ECRED_CONN_REQ: |
5423 | err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); |
5424 | break; |
5425 | |
5426 | case L2CAP_ECRED_CONN_RSP: |
5427 | err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); |
5428 | break; |
5429 | |
5430 | case L2CAP_ECRED_RECONF_REQ: |
5431 | err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); |
5432 | break; |
5433 | |
5434 | case L2CAP_ECRED_RECONF_RSP: |
5435 | err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); |
5436 | break; |
5437 | |
5438 | case L2CAP_DISCONN_REQ: |
5439 | err = l2cap_disconnect_req(conn, cmd, cmd_len, data); |
5440 | break; |
5441 | |
5442 | case L2CAP_DISCONN_RSP: |
5443 | l2cap_disconnect_rsp(conn, cmd, cmd_len, data); |
5444 | break; |
5445 | |
5446 | default: |
5447 | BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); |
5448 | err = -EINVAL; |
5449 | break; |
5450 | } |
5451 | |
5452 | return err; |
5453 | } |
5454 | |
5455 | static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, |
5456 | struct sk_buff *skb) |
5457 | { |
5458 | struct hci_conn *hcon = conn->hcon; |
5459 | struct l2cap_cmd_hdr *cmd; |
5460 | u16 len; |
5461 | int err; |
5462 | |
5463 | if (hcon->type != LE_LINK) |
5464 | goto drop; |
5465 | |
5466 | if (skb->len < L2CAP_CMD_HDR_SIZE) |
5467 | goto drop; |
5468 | |
5469 | cmd = (void *) skb->data; |
5470 | skb_pull(skb, L2CAP_CMD_HDR_SIZE); |
5471 | |
5472 | len = le16_to_cpu(cmd->len); |
5473 | |
5474 | BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); |
5475 | |
5476 | if (len != skb->len || !cmd->ident) { |
5477 | BT_DBG("corrupted command"); |
5478 | goto drop; |
5479 | } |
5480 | |
5481 | err = l2cap_le_sig_cmd(conn, cmd, cmd_len: len, data: skb->data); |
5482 | if (err) { |
5483 | struct l2cap_cmd_rej_unk rej; |
5484 | |
5485 | BT_ERR("Wrong link type (%d)", err); |
5486 | |
5487 | rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); |
5488 | l2cap_send_cmd(conn, ident: cmd->ident, L2CAP_COMMAND_REJ, |
5489 | len: sizeof(rej), data: &rej); |
5490 | } |
5491 | |
5492 | drop: |
5493 | kfree_skb(skb); |
5494 | } |
5495 | |
5496 | static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) |
5497 | { |
5498 | struct l2cap_cmd_rej_unk rej; |
5499 | |
5500 | rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); |
5501 | l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, len: sizeof(rej), data: &rej); |
5502 | } |
5503 | |
5504 | static inline void l2cap_sig_channel(struct l2cap_conn *conn, |
5505 | struct sk_buff *skb) |
5506 | { |
5507 | struct hci_conn *hcon = conn->hcon; |
5508 | struct l2cap_cmd_hdr *cmd; |
5509 | int err; |
5510 | |
5511 | l2cap_raw_recv(conn, skb); |
5512 | |
5513 | if (hcon->type != ACL_LINK) |
5514 | goto drop; |
5515 | |
5516 | while (skb->len >= L2CAP_CMD_HDR_SIZE) { |
5517 | u16 len; |
5518 | |
5519 | cmd = (void *) skb->data; |
5520 | skb_pull(skb, L2CAP_CMD_HDR_SIZE); |
5521 | |
5522 | len = le16_to_cpu(cmd->len); |
5523 | |
5524 | BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, |
5525 | cmd->ident); |
5526 | |
5527 | if (len > skb->len || !cmd->ident) { |
5528 | BT_DBG("corrupted command"); |
5529 | l2cap_sig_send_rej(conn, ident: cmd->ident); |
5530 | skb_pull(skb, len: len > skb->len ? skb->len : len); |
5531 | continue; |
5532 | } |
5533 | |
5534 | err = l2cap_bredr_sig_cmd(conn, cmd, cmd_len: len, data: skb->data); |
5535 | if (err) { |
5536 | BT_ERR("Wrong link type (%d)", err); |
5537 | l2cap_sig_send_rej(conn, ident: cmd->ident); |
5538 | } |
5539 | |
5540 | skb_pull(skb, len); |
5541 | } |
5542 | |
5543 | if (skb->len > 0) { |
5544 | BT_DBG("corrupted command"); |
5545 | l2cap_sig_send_rej(conn, ident: 0); |
5546 | } |
5547 | |
5548 | drop: |
5549 | kfree_skb(skb); |
5550 | } |
5551 | |
5552 | static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) |
5553 | { |
5554 | u16 our_fcs, rcv_fcs; |
5555 | int hdr_size; |
5556 | |
5557 | if (test_bit(FLAG_EXT_CTRL, &chan->flags)) |
5558 | hdr_size = L2CAP_EXT_HDR_SIZE; |
5559 | else |
5560 | hdr_size = L2CAP_ENH_HDR_SIZE; |
5561 | |
5562 | if (chan->fcs == L2CAP_FCS_CRC16) { |
5563 | skb_trim(skb, len: skb->len - L2CAP_FCS_SIZE); |
5564 | rcv_fcs = get_unaligned_le16(p: skb->data + skb->len); |
5565 | our_fcs = crc16(crc: 0, p: skb->data - hdr_size, len: skb->len + hdr_size); |
5566 | |
5567 | if (our_fcs != rcv_fcs) |
5568 | return -EBADMSG; |
5569 | } |
5570 | return 0; |
5571 | } |
5572 | |
5573 | static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) |
5574 | { |
5575 | struct l2cap_ctrl control; |
5576 | |
5577 | BT_DBG("chan %p", chan); |
5578 | |
5579 | memset(&control, 0, sizeof(control)); |
5580 | control.sframe = 1; |
5581 | control.final = 1; |
5582 | control.reqseq = chan->buffer_seq; |
5583 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
5584 | |
5585 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
5586 | control.super = L2CAP_SUPER_RNR; |
5587 | l2cap_send_sframe(chan, control: &control); |
5588 | } |
5589 | |
5590 | if (test_and_clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state) && |
5591 | chan->unacked_frames > 0) |
5592 | __set_retrans_timer(chan); |
5593 | |
5594 | /* Send pending iframes */ |
5595 | l2cap_ertm_send(chan); |
5596 | |
5597 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && |
5598 | test_bit(CONN_SEND_FBIT, &chan->conn_state)) { |
5599 | /* F-bit wasn't sent in an s-frame or i-frame yet, so |
5600 | * send it now. |
5601 | */ |
5602 | control.super = L2CAP_SUPER_RR; |
5603 | l2cap_send_sframe(chan, control: &control); |
5604 | } |
5605 | } |
5606 | |
5607 | static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, |
5608 | struct sk_buff **last_frag) |
5609 | { |
5610 | /* skb->len reflects data in skb as well as all fragments |
5611 | * skb->data_len reflects only data in fragments |
5612 | */ |
5613 | if (!skb_has_frag_list(skb)) |
5614 | skb_shinfo(skb)->frag_list = new_frag; |
5615 | |
5616 | new_frag->next = NULL; |
5617 | |
5618 | (*last_frag)->next = new_frag; |
5619 | *last_frag = new_frag; |
5620 | |
5621 | skb->len += new_frag->len; |
5622 | skb->data_len += new_frag->len; |
5623 | skb->truesize += new_frag->truesize; |
5624 | } |
5625 | |
5626 | static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, |
5627 | struct l2cap_ctrl *control) |
5628 | { |
5629 | int err = -EINVAL; |
5630 | |
5631 | switch (control->sar) { |
5632 | case L2CAP_SAR_UNSEGMENTED: |
5633 | if (chan->sdu) |
5634 | break; |
5635 | |
5636 | err = chan->ops->recv(chan, skb); |
5637 | break; |
5638 | |
5639 | case L2CAP_SAR_START: |
5640 | if (chan->sdu) |
5641 | break; |
5642 | |
5643 | if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) |
5644 | break; |
5645 | |
5646 | chan->sdu_len = get_unaligned_le16(p: skb->data); |
5647 | skb_pull(skb, L2CAP_SDULEN_SIZE); |
5648 | |
5649 | if (chan->sdu_len > chan->imtu) { |
5650 | err = -EMSGSIZE; |
5651 | break; |
5652 | } |
5653 | |
5654 | if (skb->len >= chan->sdu_len) |
5655 | break; |
5656 | |
5657 | chan->sdu = skb; |
5658 | chan->sdu_last_frag = skb; |
5659 | |
5660 | skb = NULL; |
5661 | err = 0; |
5662 | break; |
5663 | |
5664 | case L2CAP_SAR_CONTINUE: |
5665 | if (!chan->sdu) |
5666 | break; |
5667 | |
5668 | append_skb_frag(skb: chan->sdu, new_frag: skb, |
5669 | last_frag: &chan->sdu_last_frag); |
5670 | skb = NULL; |
5671 | |
5672 | if (chan->sdu->len >= chan->sdu_len) |
5673 | break; |
5674 | |
5675 | err = 0; |
5676 | break; |
5677 | |
5678 | case L2CAP_SAR_END: |
5679 | if (!chan->sdu) |
5680 | break; |
5681 | |
5682 | append_skb_frag(skb: chan->sdu, new_frag: skb, |
5683 | last_frag: &chan->sdu_last_frag); |
5684 | skb = NULL; |
5685 | |
5686 | if (chan->sdu->len != chan->sdu_len) |
5687 | break; |
5688 | |
5689 | err = chan->ops->recv(chan, chan->sdu); |
5690 | |
5691 | if (!err) { |
5692 | /* Reassembly complete */ |
5693 | chan->sdu = NULL; |
5694 | chan->sdu_last_frag = NULL; |
5695 | chan->sdu_len = 0; |
5696 | } |
5697 | break; |
5698 | } |
5699 | |
5700 | if (err) { |
5701 | kfree_skb(skb); |
5702 | kfree_skb(skb: chan->sdu); |
5703 | chan->sdu = NULL; |
5704 | chan->sdu_last_frag = NULL; |
5705 | chan->sdu_len = 0; |
5706 | } |
5707 | |
5708 | return err; |
5709 | } |
5710 | |
5711 | static int l2cap_resegment(struct l2cap_chan *chan) |
5712 | { |
5713 | /* Placeholder */ |
5714 | return 0; |
5715 | } |
5716 | |
5717 | void l2cap_chan_busy(struct l2cap_chan *chan, int busy) |
5718 | { |
5719 | u8 event; |
5720 | |
5721 | if (chan->mode != L2CAP_MODE_ERTM) |
5722 | return; |
5723 | |
5724 | event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; |
5725 | l2cap_tx(chan, NULL, NULL, event); |
5726 | } |
5727 | |
5728 | static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) |
5729 | { |
5730 | int err = 0; |
5731 | /* Pass sequential frames to l2cap_reassemble_sdu() |
5732 | * until a gap is encountered. |
5733 | */ |
5734 | |
5735 | BT_DBG("chan %p", chan); |
5736 | |
5737 | while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
5738 | struct sk_buff *skb; |
5739 | BT_DBG("Searching for skb with txseq %d (queue len %d)", |
5740 | chan->buffer_seq, skb_queue_len(&chan->srej_q)); |
5741 | |
5742 | skb = l2cap_ertm_seq_in_queue(head: &chan->srej_q, seq: chan->buffer_seq); |
5743 | |
5744 | if (!skb) |
5745 | break; |
5746 | |
5747 | skb_unlink(skb, list: &chan->srej_q); |
5748 | chan->buffer_seq = __next_seq(chan, seq: chan->buffer_seq); |
5749 | err = l2cap_reassemble_sdu(chan, skb, control: &bt_cb(skb)->l2cap); |
5750 | if (err) |
5751 | break; |
5752 | } |
5753 | |
5754 | if (skb_queue_empty(list: &chan->srej_q)) { |
5755 | chan->rx_state = L2CAP_RX_STATE_RECV; |
5756 | l2cap_send_ack(chan); |
5757 | } |
5758 | |
5759 | return err; |
5760 | } |
5761 | |
5762 | static void l2cap_handle_srej(struct l2cap_chan *chan, |
5763 | struct l2cap_ctrl *control) |
5764 | { |
5765 | struct sk_buff *skb; |
5766 | |
5767 | BT_DBG("chan %p, control %p", chan, control); |
5768 | |
5769 | if (control->reqseq == chan->next_tx_seq) { |
5770 | BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); |
5771 | l2cap_send_disconn_req(chan, ECONNRESET); |
5772 | return; |
5773 | } |
5774 | |
5775 | skb = l2cap_ertm_seq_in_queue(head: &chan->tx_q, seq: control->reqseq); |
5776 | |
5777 | if (skb == NULL) { |
5778 | BT_DBG("Seq %d not available for retransmission", |
5779 | control->reqseq); |
5780 | return; |
5781 | } |
5782 | |
5783 | if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { |
5784 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
5785 | l2cap_send_disconn_req(chan, ECONNRESET); |
5786 | return; |
5787 | } |
5788 | |
5789 | clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
5790 | |
5791 | if (control->poll) { |
5792 | l2cap_pass_to_tx(chan, control); |
5793 | |
5794 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
5795 | l2cap_retransmit(chan, control); |
5796 | l2cap_ertm_send(chan); |
5797 | |
5798 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { |
5799 | set_bit(nr: CONN_SREJ_ACT, addr: &chan->conn_state); |
5800 | chan->srej_save_reqseq = control->reqseq; |
5801 | } |
5802 | } else { |
5803 | l2cap_pass_to_tx_fbit(chan, control); |
5804 | |
5805 | if (control->final) { |
5806 | if (chan->srej_save_reqseq != control->reqseq || |
5807 | !test_and_clear_bit(nr: CONN_SREJ_ACT, |
5808 | addr: &chan->conn_state)) |
5809 | l2cap_retransmit(chan, control); |
5810 | } else { |
5811 | l2cap_retransmit(chan, control); |
5812 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { |
5813 | set_bit(nr: CONN_SREJ_ACT, addr: &chan->conn_state); |
5814 | chan->srej_save_reqseq = control->reqseq; |
5815 | } |
5816 | } |
5817 | } |
5818 | } |
5819 | |
5820 | static void l2cap_handle_rej(struct l2cap_chan *chan, |
5821 | struct l2cap_ctrl *control) |
5822 | { |
5823 | struct sk_buff *skb; |
5824 | |
5825 | BT_DBG("chan %p, control %p", chan, control); |
5826 | |
5827 | if (control->reqseq == chan->next_tx_seq) { |
5828 | BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); |
5829 | l2cap_send_disconn_req(chan, ECONNRESET); |
5830 | return; |
5831 | } |
5832 | |
5833 | skb = l2cap_ertm_seq_in_queue(head: &chan->tx_q, seq: control->reqseq); |
5834 | |
5835 | if (chan->max_tx && skb && |
5836 | bt_cb(skb)->l2cap.retries >= chan->max_tx) { |
5837 | BT_DBG("Retry limit exceeded (%d)", chan->max_tx); |
5838 | l2cap_send_disconn_req(chan, ECONNRESET); |
5839 | return; |
5840 | } |
5841 | |
5842 | clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
5843 | |
5844 | l2cap_pass_to_tx(chan, control); |
5845 | |
5846 | if (control->final) { |
5847 | if (!test_and_clear_bit(nr: CONN_REJ_ACT, addr: &chan->conn_state)) |
5848 | l2cap_retransmit_all(chan, control); |
5849 | } else { |
5850 | l2cap_retransmit_all(chan, control); |
5851 | l2cap_ertm_send(chan); |
5852 | if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) |
5853 | set_bit(nr: CONN_REJ_ACT, addr: &chan->conn_state); |
5854 | } |
5855 | } |
5856 | |
5857 | static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) |
5858 | { |
5859 | BT_DBG("chan %p, txseq %d", chan, txseq); |
5860 | |
5861 | BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, |
5862 | chan->expected_tx_seq); |
5863 | |
5864 | if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { |
5865 | if (__seq_offset(chan, seq1: txseq, seq2: chan->last_acked_seq) >= |
5866 | chan->tx_win) { |
5867 | /* See notes below regarding "double poll" and |
5868 | * invalid packets. |
5869 | */ |
5870 | if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { |
5871 | BT_DBG("Invalid/Ignore - after SREJ"); |
5872 | return L2CAP_TXSEQ_INVALID_IGNORE; |
5873 | } else { |
5874 | BT_DBG("Invalid - in window after SREJ sent"); |
5875 | return L2CAP_TXSEQ_INVALID; |
5876 | } |
5877 | } |
5878 | |
5879 | if (chan->srej_list.head == txseq) { |
5880 | BT_DBG("Expected SREJ"); |
5881 | return L2CAP_TXSEQ_EXPECTED_SREJ; |
5882 | } |
5883 | |
5884 | if (l2cap_ertm_seq_in_queue(head: &chan->srej_q, seq: txseq)) { |
5885 | BT_DBG("Duplicate SREJ - txseq already stored"); |
5886 | return L2CAP_TXSEQ_DUPLICATE_SREJ; |
5887 | } |
5888 | |
5889 | if (l2cap_seq_list_contains(seq_list: &chan->srej_list, seq: txseq)) { |
5890 | BT_DBG("Unexpected SREJ - not requested"); |
5891 | return L2CAP_TXSEQ_UNEXPECTED_SREJ; |
5892 | } |
5893 | } |
5894 | |
5895 | if (chan->expected_tx_seq == txseq) { |
5896 | if (__seq_offset(chan, seq1: txseq, seq2: chan->last_acked_seq) >= |
5897 | chan->tx_win) { |
5898 | BT_DBG("Invalid - txseq outside tx window"); |
5899 | return L2CAP_TXSEQ_INVALID; |
5900 | } else { |
5901 | BT_DBG("Expected"); |
5902 | return L2CAP_TXSEQ_EXPECTED; |
5903 | } |
5904 | } |
5905 | |
5906 | if (__seq_offset(chan, seq1: txseq, seq2: chan->last_acked_seq) < |
5907 | __seq_offset(chan, seq1: chan->expected_tx_seq, seq2: chan->last_acked_seq)) { |
5908 | BT_DBG("Duplicate - expected_tx_seq later than txseq"); |
5909 | return L2CAP_TXSEQ_DUPLICATE; |
5910 | } |
5911 | |
5912 | if (__seq_offset(chan, seq1: txseq, seq2: chan->last_acked_seq) >= chan->tx_win) { |
5913 | /* A source of invalid packets is a "double poll" condition, |
5914 | * where delays cause us to send multiple poll packets. If |
5915 | * the remote stack receives and processes both polls, |
5916 | * sequence numbers can wrap around in such a way that a |
5917 | * resent frame has a sequence number that looks like new data |
5918 | * with a sequence gap. This would trigger an erroneous SREJ |
5919 | * request. |
5920 | * |
5921 | * Fortunately, this is impossible with a tx window that's |
5922 | * less than half of the maximum sequence number, which allows |
5923 | * invalid frames to be safely ignored. |
5924 | * |
5925 | * With tx window sizes greater than half of the tx window |
5926 | * maximum, the frame is invalid and cannot be ignored. This |
5927 | * causes a disconnect. |
5928 | */ |
5929 | |
5930 | if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { |
5931 | BT_DBG("Invalid/Ignore - txseq outside tx window"); |
5932 | return L2CAP_TXSEQ_INVALID_IGNORE; |
5933 | } else { |
5934 | BT_DBG("Invalid - txseq outside tx window"); |
5935 | return L2CAP_TXSEQ_INVALID; |
5936 | } |
5937 | } else { |
5938 | BT_DBG("Unexpected - txseq indicates missing frames"); |
5939 | return L2CAP_TXSEQ_UNEXPECTED; |
5940 | } |
5941 | } |
5942 | |
5943 | static int l2cap_rx_state_recv(struct l2cap_chan *chan, |
5944 | struct l2cap_ctrl *control, |
5945 | struct sk_buff *skb, u8 event) |
5946 | { |
5947 | struct l2cap_ctrl local_control; |
5948 | int err = 0; |
5949 | bool skb_in_use = false; |
5950 | |
5951 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, |
5952 | event); |
5953 | |
5954 | switch (event) { |
5955 | case L2CAP_EV_RECV_IFRAME: |
5956 | switch (l2cap_classify_txseq(chan, txseq: control->txseq)) { |
5957 | case L2CAP_TXSEQ_EXPECTED: |
5958 | l2cap_pass_to_tx(chan, control); |
5959 | |
5960 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
5961 | BT_DBG("Busy, discarding expected seq %d", |
5962 | control->txseq); |
5963 | break; |
5964 | } |
5965 | |
5966 | chan->expected_tx_seq = __next_seq(chan, |
5967 | seq: control->txseq); |
5968 | |
5969 | chan->buffer_seq = chan->expected_tx_seq; |
5970 | skb_in_use = true; |
5971 | |
5972 | /* l2cap_reassemble_sdu may free skb, hence invalidate |
5973 | * control, so make a copy in advance to use it after |
5974 | * l2cap_reassemble_sdu returns and to avoid the race |
5975 | * condition, for example: |
5976 | * |
5977 | * The current thread calls: |
5978 | * l2cap_reassemble_sdu |
5979 | * chan->ops->recv == l2cap_sock_recv_cb |
5980 | * __sock_queue_rcv_skb |
5981 | * Another thread calls: |
5982 | * bt_sock_recvmsg |
5983 | * skb_recv_datagram |
5984 | * skb_free_datagram |
5985 | * Then the current thread tries to access control, but |
5986 | * it was freed by skb_free_datagram. |
5987 | */ |
5988 | local_control = *control; |
5989 | err = l2cap_reassemble_sdu(chan, skb, control); |
5990 | if (err) |
5991 | break; |
5992 | |
5993 | if (local_control.final) { |
5994 | if (!test_and_clear_bit(nr: CONN_REJ_ACT, |
5995 | addr: &chan->conn_state)) { |
5996 | local_control.final = 0; |
5997 | l2cap_retransmit_all(chan, control: &local_control); |
5998 | l2cap_ertm_send(chan); |
5999 | } |
6000 | } |
6001 | |
6002 | if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) |
6003 | l2cap_send_ack(chan); |
6004 | break; |
6005 | case L2CAP_TXSEQ_UNEXPECTED: |
6006 | l2cap_pass_to_tx(chan, control); |
6007 | |
6008 | /* Can't issue SREJ frames in the local busy state. |
6009 | * Drop this frame, it will be seen as missing |
6010 | * when local busy is exited. |
6011 | */ |
6012 | if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { |
6013 | BT_DBG("Busy, discarding unexpected seq %d", |
6014 | control->txseq); |
6015 | break; |
6016 | } |
6017 | |
6018 | /* There was a gap in the sequence, so an SREJ |
6019 | * must be sent for each missing frame. The |
6020 | * current frame is stored for later use. |
6021 | */ |
6022 | skb_queue_tail(list: &chan->srej_q, newsk: skb); |
6023 | skb_in_use = true; |
6024 | BT_DBG("Queued %p (queue len %d)", skb, |
6025 | skb_queue_len(&chan->srej_q)); |
6026 | |
6027 | clear_bit(nr: CONN_SREJ_ACT, addr: &chan->conn_state); |
6028 | l2cap_seq_list_clear(seq_list: &chan->srej_list); |
6029 | l2cap_send_srej(chan, txseq: control->txseq); |
6030 | |
6031 | chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; |
6032 | break; |
6033 | case L2CAP_TXSEQ_DUPLICATE: |
6034 | l2cap_pass_to_tx(chan, control); |
6035 | break; |
6036 | case L2CAP_TXSEQ_INVALID_IGNORE: |
6037 | break; |
6038 | case L2CAP_TXSEQ_INVALID: |
6039 | default: |
6040 | l2cap_send_disconn_req(chan, ECONNRESET); |
6041 | break; |
6042 | } |
6043 | break; |
6044 | case L2CAP_EV_RECV_RR: |
6045 | l2cap_pass_to_tx(chan, control); |
6046 | if (control->final) { |
6047 | clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
6048 | |
6049 | if (!test_and_clear_bit(nr: CONN_REJ_ACT, |
6050 | addr: &chan->conn_state)) { |
6051 | control->final = 0; |
6052 | l2cap_retransmit_all(chan, control); |
6053 | } |
6054 | |
6055 | l2cap_ertm_send(chan); |
6056 | } else if (control->poll) { |
6057 | l2cap_send_i_or_rr_or_rnr(chan); |
6058 | } else { |
6059 | if (test_and_clear_bit(nr: CONN_REMOTE_BUSY, |
6060 | addr: &chan->conn_state) && |
6061 | chan->unacked_frames) |
6062 | __set_retrans_timer(chan); |
6063 | |
6064 | l2cap_ertm_send(chan); |
6065 | } |
6066 | break; |
6067 | case L2CAP_EV_RECV_RNR: |
6068 | set_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
6069 | l2cap_pass_to_tx(chan, control); |
6070 | if (control && control->poll) { |
6071 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
6072 | l2cap_send_rr_or_rnr(chan, poll: 0); |
6073 | } |
6074 | __clear_retrans_timer(chan); |
6075 | l2cap_seq_list_clear(seq_list: &chan->retrans_list); |
6076 | break; |
6077 | case L2CAP_EV_RECV_REJ: |
6078 | l2cap_handle_rej(chan, control); |
6079 | break; |
6080 | case L2CAP_EV_RECV_SREJ: |
6081 | l2cap_handle_srej(chan, control); |
6082 | break; |
6083 | default: |
6084 | break; |
6085 | } |
6086 | |
6087 | if (skb && !skb_in_use) { |
6088 | BT_DBG("Freeing %p", skb); |
6089 | kfree_skb(skb); |
6090 | } |
6091 | |
6092 | return err; |
6093 | } |
6094 | |
6095 | static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, |
6096 | struct l2cap_ctrl *control, |
6097 | struct sk_buff *skb, u8 event) |
6098 | { |
6099 | int err = 0; |
6100 | u16 txseq = control->txseq; |
6101 | bool skb_in_use = false; |
6102 | |
6103 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, |
6104 | event); |
6105 | |
6106 | switch (event) { |
6107 | case L2CAP_EV_RECV_IFRAME: |
6108 | switch (l2cap_classify_txseq(chan, txseq)) { |
6109 | case L2CAP_TXSEQ_EXPECTED: |
6110 | /* Keep frame for reassembly later */ |
6111 | l2cap_pass_to_tx(chan, control); |
6112 | skb_queue_tail(list: &chan->srej_q, newsk: skb); |
6113 | skb_in_use = true; |
6114 | BT_DBG("Queued %p (queue len %d)", skb, |
6115 | skb_queue_len(&chan->srej_q)); |
6116 | |
6117 | chan->expected_tx_seq = __next_seq(chan, seq: txseq); |
6118 | break; |
6119 | case L2CAP_TXSEQ_EXPECTED_SREJ: |
6120 | l2cap_seq_list_pop(seq_list: &chan->srej_list); |
6121 | |
6122 | l2cap_pass_to_tx(chan, control); |
6123 | skb_queue_tail(list: &chan->srej_q, newsk: skb); |
6124 | skb_in_use = true; |
6125 | BT_DBG("Queued %p (queue len %d)", skb, |
6126 | skb_queue_len(&chan->srej_q)); |
6127 | |
6128 | err = l2cap_rx_queued_iframes(chan); |
6129 | if (err) |
6130 | break; |
6131 | |
6132 | break; |
6133 | case L2CAP_TXSEQ_UNEXPECTED: |
6134 | /* Got a frame that can't be reassembled yet. |
6135 | * Save it for later, and send SREJs to cover |
6136 | * the missing frames. |
6137 | */ |
6138 | skb_queue_tail(list: &chan->srej_q, newsk: skb); |
6139 | skb_in_use = true; |
6140 | BT_DBG("Queued %p (queue len %d)", skb, |
6141 | skb_queue_len(&chan->srej_q)); |
6142 | |
6143 | l2cap_pass_to_tx(chan, control); |
6144 | l2cap_send_srej(chan, txseq: control->txseq); |
6145 | break; |
6146 | case L2CAP_TXSEQ_UNEXPECTED_SREJ: |
6147 | /* This frame was requested with an SREJ, but |
6148 | * some expected retransmitted frames are |
6149 | * missing. Request retransmission of missing |
6150 | * SREJ'd frames. |
6151 | */ |
6152 | skb_queue_tail(list: &chan->srej_q, newsk: skb); |
6153 | skb_in_use = true; |
6154 | BT_DBG("Queued %p (queue len %d)", skb, |
6155 | skb_queue_len(&chan->srej_q)); |
6156 | |
6157 | l2cap_pass_to_tx(chan, control); |
6158 | l2cap_send_srej_list(chan, txseq: control->txseq); |
6159 | break; |
6160 | case L2CAP_TXSEQ_DUPLICATE_SREJ: |
6161 | /* We've already queued this frame. Drop this copy. */ |
6162 | l2cap_pass_to_tx(chan, control); |
6163 | break; |
6164 | case L2CAP_TXSEQ_DUPLICATE: |
6165 | /* Expecting a later sequence number, so this frame |
6166 | * was already received. Ignore it completely. |
6167 | */ |
6168 | break; |
6169 | case L2CAP_TXSEQ_INVALID_IGNORE: |
6170 | break; |
6171 | case L2CAP_TXSEQ_INVALID: |
6172 | default: |
6173 | l2cap_send_disconn_req(chan, ECONNRESET); |
6174 | break; |
6175 | } |
6176 | break; |
6177 | case L2CAP_EV_RECV_RR: |
6178 | l2cap_pass_to_tx(chan, control); |
6179 | if (control->final) { |
6180 | clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
6181 | |
6182 | if (!test_and_clear_bit(nr: CONN_REJ_ACT, |
6183 | addr: &chan->conn_state)) { |
6184 | control->final = 0; |
6185 | l2cap_retransmit_all(chan, control); |
6186 | } |
6187 | |
6188 | l2cap_ertm_send(chan); |
6189 | } else if (control->poll) { |
6190 | if (test_and_clear_bit(nr: CONN_REMOTE_BUSY, |
6191 | addr: &chan->conn_state) && |
6192 | chan->unacked_frames) { |
6193 | __set_retrans_timer(chan); |
6194 | } |
6195 | |
6196 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
6197 | l2cap_send_srej_tail(chan); |
6198 | } else { |
6199 | if (test_and_clear_bit(nr: CONN_REMOTE_BUSY, |
6200 | addr: &chan->conn_state) && |
6201 | chan->unacked_frames) |
6202 | __set_retrans_timer(chan); |
6203 | |
6204 | l2cap_send_ack(chan); |
6205 | } |
6206 | break; |
6207 | case L2CAP_EV_RECV_RNR: |
6208 | set_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
6209 | l2cap_pass_to_tx(chan, control); |
6210 | if (control->poll) { |
6211 | l2cap_send_srej_tail(chan); |
6212 | } else { |
6213 | struct l2cap_ctrl rr_control; |
6214 | memset(&rr_control, 0, sizeof(rr_control)); |
6215 | rr_control.sframe = 1; |
6216 | rr_control.super = L2CAP_SUPER_RR; |
6217 | rr_control.reqseq = chan->buffer_seq; |
6218 | l2cap_send_sframe(chan, control: &rr_control); |
6219 | } |
6220 | |
6221 | break; |
6222 | case L2CAP_EV_RECV_REJ: |
6223 | l2cap_handle_rej(chan, control); |
6224 | break; |
6225 | case L2CAP_EV_RECV_SREJ: |
6226 | l2cap_handle_srej(chan, control); |
6227 | break; |
6228 | } |
6229 | |
6230 | if (skb && !skb_in_use) { |
6231 | BT_DBG("Freeing %p", skb); |
6232 | kfree_skb(skb); |
6233 | } |
6234 | |
6235 | return err; |
6236 | } |
6237 | |
6238 | static int l2cap_finish_move(struct l2cap_chan *chan) |
6239 | { |
6240 | BT_DBG("chan %p", chan); |
6241 | |
6242 | chan->rx_state = L2CAP_RX_STATE_RECV; |
6243 | chan->conn->mtu = chan->conn->hcon->mtu; |
6244 | |
6245 | return l2cap_resegment(chan); |
6246 | } |
6247 | |
6248 | static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, |
6249 | struct l2cap_ctrl *control, |
6250 | struct sk_buff *skb, u8 event) |
6251 | { |
6252 | int err; |
6253 | |
6254 | BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, |
6255 | event); |
6256 | |
6257 | if (!control->poll) |
6258 | return -EPROTO; |
6259 | |
6260 | l2cap_process_reqseq(chan, reqseq: control->reqseq); |
6261 | |
6262 | if (!skb_queue_empty(list: &chan->tx_q)) |
6263 | chan->tx_send_head = skb_peek(list_: &chan->tx_q); |
6264 | else |
6265 | chan->tx_send_head = NULL; |
6266 | |
6267 | /* Rewind next_tx_seq to the point expected |
6268 | * by the receiver. |
6269 | */ |
6270 | chan->next_tx_seq = control->reqseq; |
6271 | chan->unacked_frames = 0; |
6272 | |
6273 | err = l2cap_finish_move(chan); |
6274 | if (err) |
6275 | return err; |
6276 | |
6277 | set_bit(nr: CONN_SEND_FBIT, addr: &chan->conn_state); |
6278 | l2cap_send_i_or_rr_or_rnr(chan); |
6279 | |
6280 | if (event == L2CAP_EV_RECV_IFRAME) |
6281 | return -EPROTO; |
6282 | |
6283 | return l2cap_rx_state_recv(chan, control, NULL, event); |
6284 | } |
6285 | |
6286 | static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, |
6287 | struct l2cap_ctrl *control, |
6288 | struct sk_buff *skb, u8 event) |
6289 | { |
6290 | int err; |
6291 | |
6292 | if (!control->final) |
6293 | return -EPROTO; |
6294 | |
6295 | clear_bit(nr: CONN_REMOTE_BUSY, addr: &chan->conn_state); |
6296 | |
6297 | chan->rx_state = L2CAP_RX_STATE_RECV; |
6298 | l2cap_process_reqseq(chan, reqseq: control->reqseq); |
6299 | |
6300 | if (!skb_queue_empty(list: &chan->tx_q)) |
6301 | chan->tx_send_head = skb_peek(list_: &chan->tx_q); |
6302 | else |
6303 | chan->tx_send_head = NULL; |
6304 | |
6305 | /* Rewind next_tx_seq to the point expected |
6306 | * by the receiver. |
6307 | */ |
6308 | chan->next_tx_seq = control->reqseq; |
6309 | chan->unacked_frames = 0; |
6310 | chan->conn->mtu = chan->conn->hcon->mtu; |
6311 | |
6312 | err = l2cap_resegment(chan); |
6313 | |
6314 | if (!err) |
6315 | err = l2cap_rx_state_recv(chan, control, skb, event); |
6316 | |
6317 | return err; |
6318 | } |
6319 | |
6320 | static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) |
6321 | { |
6322 | /* Make sure reqseq is for a packet that has been sent but not acked */ |
6323 | u16 unacked; |
6324 | |
6325 | unacked = __seq_offset(chan, seq1: chan->next_tx_seq, seq2: chan->expected_ack_seq); |
6326 | return __seq_offset(chan, seq1: chan->next_tx_seq, seq2: reqseq) <= unacked; |
6327 | } |
6328 | |
6329 | static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
6330 | struct sk_buff *skb, u8 event) |
6331 | { |
6332 | int err = 0; |
6333 | |
6334 | BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, |
6335 | control, skb, event, chan->rx_state); |
6336 | |
6337 | if (__valid_reqseq(chan, reqseq: control->reqseq)) { |
6338 | switch (chan->rx_state) { |
6339 | case L2CAP_RX_STATE_RECV: |
6340 | err = l2cap_rx_state_recv(chan, control, skb, event); |
6341 | break; |
6342 | case L2CAP_RX_STATE_SREJ_SENT: |
6343 | err = l2cap_rx_state_srej_sent(chan, control, skb, |
6344 | event); |
6345 | break; |
6346 | case L2CAP_RX_STATE_WAIT_P: |
6347 | err = l2cap_rx_state_wait_p(chan, control, skb, event); |
6348 | break; |
6349 | case L2CAP_RX_STATE_WAIT_F: |
6350 | err = l2cap_rx_state_wait_f(chan, control, skb, event); |
6351 | break; |
6352 | default: |
6353 | /* shut it down */ |
6354 | break; |
6355 | } |
6356 | } else { |
6357 | BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", |
6358 | control->reqseq, chan->next_tx_seq, |
6359 | chan->expected_ack_seq); |
6360 | l2cap_send_disconn_req(chan, ECONNRESET); |
6361 | } |
6362 | |
6363 | return err; |
6364 | } |
6365 | |
6366 | static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, |
6367 | struct sk_buff *skb) |
6368 | { |
6369 | /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store |
6370 | * the txseq field in advance to use it after l2cap_reassemble_sdu |
6371 | * returns and to avoid the race condition, for example: |
6372 | * |
6373 | * The current thread calls: |
6374 | * l2cap_reassemble_sdu |
6375 | * chan->ops->recv == l2cap_sock_recv_cb |
6376 | * __sock_queue_rcv_skb |
6377 | * Another thread calls: |
6378 | * bt_sock_recvmsg |
6379 | * skb_recv_datagram |
6380 | * skb_free_datagram |
6381 | * Then the current thread tries to access control, but it was freed by |
6382 | * skb_free_datagram. |
6383 | */ |
6384 | u16 txseq = control->txseq; |
6385 | |
6386 | BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, |
6387 | chan->rx_state); |
6388 | |
6389 | if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { |
6390 | l2cap_pass_to_tx(chan, control); |
6391 | |
6392 | BT_DBG("buffer_seq %u->%u", chan->buffer_seq, |
6393 | __next_seq(chan, chan->buffer_seq)); |
6394 | |
6395 | chan->buffer_seq = __next_seq(chan, seq: chan->buffer_seq); |
6396 | |
6397 | l2cap_reassemble_sdu(chan, skb, control); |
6398 | } else { |
6399 | if (chan->sdu) { |
6400 | kfree_skb(skb: chan->sdu); |
6401 | chan->sdu = NULL; |
6402 | } |
6403 | chan->sdu_last_frag = NULL; |
6404 | chan->sdu_len = 0; |
6405 | |
6406 | if (skb) { |
6407 | BT_DBG("Freeing %p", skb); |
6408 | kfree_skb(skb); |
6409 | } |
6410 | } |
6411 | |
6412 | chan->last_acked_seq = txseq; |
6413 | chan->expected_tx_seq = __next_seq(chan, seq: txseq); |
6414 | |
6415 | return 0; |
6416 | } |
6417 | |
6418 | static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) |
6419 | { |
6420 | struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; |
6421 | u16 len; |
6422 | u8 event; |
6423 | |
6424 | __unpack_control(chan, skb); |
6425 | |
6426 | len = skb->len; |
6427 | |
6428 | /* |
6429 | * We can just drop the corrupted I-frame here. |
6430 | * Receiver will miss it and start proper recovery |
6431 | * procedures and ask for retransmission. |
6432 | */ |
6433 | if (l2cap_check_fcs(chan, skb)) |
6434 | goto drop; |
6435 | |
6436 | if (!control->sframe && control->sar == L2CAP_SAR_START) |
6437 | len -= L2CAP_SDULEN_SIZE; |
6438 | |
6439 | if (chan->fcs == L2CAP_FCS_CRC16) |
6440 | len -= L2CAP_FCS_SIZE; |
6441 | |
6442 | if (len > chan->mps) { |
6443 | l2cap_send_disconn_req(chan, ECONNRESET); |
6444 | goto drop; |
6445 | } |
6446 | |
6447 | if (chan->ops->filter) { |
6448 | if (chan->ops->filter(chan, skb)) |
6449 | goto drop; |
6450 | } |
6451 | |
6452 | if (!control->sframe) { |
6453 | int err; |
6454 | |
6455 | BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", |
6456 | control->sar, control->reqseq, control->final, |
6457 | control->txseq); |
6458 | |
6459 | /* Validate F-bit - F=0 always valid, F=1 only |
6460 | * valid in TX WAIT_F |
6461 | */ |
6462 | if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) |
6463 | goto drop; |
6464 | |
6465 | if (chan->mode != L2CAP_MODE_STREAMING) { |
6466 | event = L2CAP_EV_RECV_IFRAME; |
6467 | err = l2cap_rx(chan, control, skb, event); |
6468 | } else { |
6469 | err = l2cap_stream_rx(chan, control, skb); |
6470 | } |
6471 | |
6472 | if (err) |
6473 | l2cap_send_disconn_req(chan, ECONNRESET); |
6474 | } else { |
6475 | const u8 rx_func_to_event[4] = { |
6476 | L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, |
6477 | L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ |
6478 | }; |
6479 | |
6480 | /* Only I-frames are expected in streaming mode */ |
6481 | if (chan->mode == L2CAP_MODE_STREAMING) |
6482 | goto drop; |
6483 | |
6484 | BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", |
6485 | control->reqseq, control->final, control->poll, |
6486 | control->super); |
6487 | |
6488 | if (len != 0) { |
6489 | BT_ERR("Trailing bytes: %d in sframe", len); |
6490 | l2cap_send_disconn_req(chan, ECONNRESET); |
6491 | goto drop; |
6492 | } |
6493 | |
6494 | /* Validate F and P bits */ |
6495 | if (control->final && (control->poll || |
6496 | chan->tx_state != L2CAP_TX_STATE_WAIT_F)) |
6497 | goto drop; |
6498 | |
6499 | event = rx_func_to_event[control->super]; |
6500 | if (l2cap_rx(chan, control, skb, event)) |
6501 | l2cap_send_disconn_req(chan, ECONNRESET); |
6502 | } |
6503 | |
6504 | return 0; |
6505 | |
6506 | drop: |
6507 | kfree_skb(skb); |
6508 | return 0; |
6509 | } |
6510 | |
6511 | static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) |
6512 | { |
6513 | struct l2cap_conn *conn = chan->conn; |
6514 | struct l2cap_le_credits pkt; |
6515 | u16 return_credits = l2cap_le_rx_credits(chan); |
6516 | |
6517 | if (chan->rx_credits >= return_credits) |
6518 | return; |
6519 | |
6520 | return_credits -= chan->rx_credits; |
6521 | |
6522 | BT_DBG("chan %p returning %u credits to sender", chan, return_credits); |
6523 | |
6524 | chan->rx_credits += return_credits; |
6525 | |
6526 | pkt.cid = cpu_to_le16(chan->scid); |
6527 | pkt.credits = cpu_to_le16(return_credits); |
6528 | |
6529 | chan->ident = l2cap_get_ident(conn); |
6530 | |
6531 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_LE_CREDITS, len: sizeof(pkt), data: &pkt); |
6532 | } |
6533 | |
6534 | void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail) |
6535 | { |
6536 | if (chan->rx_avail == rx_avail) |
6537 | return; |
6538 | |
6539 | BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail); |
6540 | |
6541 | chan->rx_avail = rx_avail; |
6542 | |
6543 | if (chan->state == BT_CONNECTED) |
6544 | l2cap_chan_le_send_credits(chan); |
6545 | } |
6546 | |
6547 | static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) |
6548 | { |
6549 | int err; |
6550 | |
6551 | BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); |
6552 | |
6553 | /* Wait recv to confirm reception before updating the credits */ |
6554 | err = chan->ops->recv(chan, skb); |
6555 | |
6556 | if (err < 0 && chan->rx_avail != -1) { |
6557 | BT_ERR("Queueing received LE L2CAP data failed"); |
6558 | l2cap_send_disconn_req(chan, ECONNRESET); |
6559 | return err; |
6560 | } |
6561 | |
6562 | /* Update credits whenever an SDU is received */ |
6563 | l2cap_chan_le_send_credits(chan); |
6564 | |
6565 | return err; |
6566 | } |
6567 | |
6568 | static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) |
6569 | { |
6570 | int err; |
6571 | |
6572 | if (!chan->rx_credits) { |
6573 | BT_ERR("No credits to receive LE L2CAP data"); |
6574 | l2cap_send_disconn_req(chan, ECONNRESET); |
6575 | return -ENOBUFS; |
6576 | } |
6577 | |
6578 | if (chan->imtu < skb->len) { |
6579 | BT_ERR("Too big LE L2CAP PDU"); |
6580 | return -ENOBUFS; |
6581 | } |
6582 | |
6583 | chan->rx_credits--; |
6584 | BT_DBG("chan %p: rx_credits %u -> %u", |
6585 | chan, chan->rx_credits + 1, chan->rx_credits); |
6586 | |
6587 | /* Update if remote had run out of credits, this should only happens |
6588 | * if the remote is not using the entire MPS. |
6589 | */ |
6590 | if (!chan->rx_credits) |
6591 | l2cap_chan_le_send_credits(chan); |
6592 | |
6593 | err = 0; |
6594 | |
6595 | if (!chan->sdu) { |
6596 | u16 sdu_len; |
6597 | |
6598 | sdu_len = get_unaligned_le16(p: skb->data); |
6599 | skb_pull(skb, L2CAP_SDULEN_SIZE); |
6600 | |
6601 | BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", |
6602 | sdu_len, skb->len, chan->imtu); |
6603 | |
6604 | if (sdu_len > chan->imtu) { |
6605 | BT_ERR("Too big LE L2CAP SDU length received"); |
6606 | err = -EMSGSIZE; |
6607 | goto failed; |
6608 | } |
6609 | |
6610 | if (skb->len > sdu_len) { |
6611 | BT_ERR("Too much LE L2CAP data received"); |
6612 | err = -EINVAL; |
6613 | goto failed; |
6614 | } |
6615 | |
6616 | if (skb->len == sdu_len) |
6617 | return l2cap_ecred_recv(chan, skb); |
6618 | |
6619 | chan->sdu = skb; |
6620 | chan->sdu_len = sdu_len; |
6621 | chan->sdu_last_frag = skb; |
6622 | |
6623 | /* Detect if remote is not able to use the selected MPS */ |
6624 | if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { |
6625 | u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; |
6626 | |
6627 | /* Adjust the number of credits */ |
6628 | BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); |
6629 | chan->mps = mps_len; |
6630 | l2cap_chan_le_send_credits(chan); |
6631 | } |
6632 | |
6633 | return 0; |
6634 | } |
6635 | |
6636 | BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", |
6637 | chan->sdu->len, skb->len, chan->sdu_len); |
6638 | |
6639 | if (chan->sdu->len + skb->len > chan->sdu_len) { |
6640 | BT_ERR("Too much LE L2CAP data received"); |
6641 | err = -EINVAL; |
6642 | goto failed; |
6643 | } |
6644 | |
6645 | append_skb_frag(skb: chan->sdu, new_frag: skb, last_frag: &chan->sdu_last_frag); |
6646 | skb = NULL; |
6647 | |
6648 | if (chan->sdu->len == chan->sdu_len) { |
6649 | err = l2cap_ecred_recv(chan, skb: chan->sdu); |
6650 | if (!err) { |
6651 | chan->sdu = NULL; |
6652 | chan->sdu_last_frag = NULL; |
6653 | chan->sdu_len = 0; |
6654 | } |
6655 | } |
6656 | |
6657 | failed: |
6658 | if (err) { |
6659 | kfree_skb(skb); |
6660 | kfree_skb(skb: chan->sdu); |
6661 | chan->sdu = NULL; |
6662 | chan->sdu_last_frag = NULL; |
6663 | chan->sdu_len = 0; |
6664 | } |
6665 | |
6666 | /* We can't return an error here since we took care of the skb |
6667 | * freeing internally. An error return would cause the caller to |
6668 | * do a double-free of the skb. |
6669 | */ |
6670 | return 0; |
6671 | } |
6672 | |
6673 | static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, |
6674 | struct sk_buff *skb) |
6675 | { |
6676 | struct l2cap_chan *chan; |
6677 | |
6678 | chan = l2cap_get_chan_by_scid(conn, cid); |
6679 | if (!chan) { |
6680 | BT_DBG("unknown cid 0x%4.4x", cid); |
6681 | /* Drop packet and return */ |
6682 | kfree_skb(skb); |
6683 | return; |
6684 | } |
6685 | |
6686 | BT_DBG("chan %p, len %d", chan, skb->len); |
6687 | |
6688 | /* If we receive data on a fixed channel before the info req/rsp |
6689 | * procedure is done simply assume that the channel is supported |
6690 | * and mark it as ready. |
6691 | */ |
6692 | if (chan->chan_type == L2CAP_CHAN_FIXED) |
6693 | l2cap_chan_ready(chan); |
6694 | |
6695 | if (chan->state != BT_CONNECTED) |
6696 | goto drop; |
6697 | |
6698 | switch (chan->mode) { |
6699 | case L2CAP_MODE_LE_FLOWCTL: |
6700 | case L2CAP_MODE_EXT_FLOWCTL: |
6701 | if (l2cap_ecred_data_rcv(chan, skb) < 0) |
6702 | goto drop; |
6703 | |
6704 | goto done; |
6705 | |
6706 | case L2CAP_MODE_BASIC: |
6707 | /* If socket recv buffers overflows we drop data here |
6708 | * which is *bad* because L2CAP has to be reliable. |
6709 | * But we don't have any other choice. L2CAP doesn't |
6710 | * provide flow control mechanism. */ |
6711 | |
6712 | if (chan->imtu < skb->len) { |
6713 | BT_ERR("Dropping L2CAP data: receive buffer overflow"); |
6714 | goto drop; |
6715 | } |
6716 | |
6717 | if (!chan->ops->recv(chan, skb)) |
6718 | goto done; |
6719 | break; |
6720 | |
6721 | case L2CAP_MODE_ERTM: |
6722 | case L2CAP_MODE_STREAMING: |
6723 | l2cap_data_rcv(chan, skb); |
6724 | goto done; |
6725 | |
6726 | default: |
6727 | BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); |
6728 | break; |
6729 | } |
6730 | |
6731 | drop: |
6732 | kfree_skb(skb); |
6733 | |
6734 | done: |
6735 | l2cap_chan_unlock(chan); |
6736 | l2cap_chan_put(chan); |
6737 | } |
6738 | |
6739 | static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, |
6740 | struct sk_buff *skb) |
6741 | { |
6742 | struct hci_conn *hcon = conn->hcon; |
6743 | struct l2cap_chan *chan; |
6744 | |
6745 | if (hcon->type != ACL_LINK) |
6746 | goto free_skb; |
6747 | |
6748 | chan = l2cap_global_chan_by_psm(state: 0, psm, src: &hcon->src, dst: &hcon->dst, |
6749 | ACL_LINK); |
6750 | if (!chan) |
6751 | goto free_skb; |
6752 | |
6753 | BT_DBG("chan %p, len %d", chan, skb->len); |
6754 | |
6755 | l2cap_chan_lock(chan); |
6756 | |
6757 | if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) |
6758 | goto drop; |
6759 | |
6760 | if (chan->imtu < skb->len) |
6761 | goto drop; |
6762 | |
6763 | /* Store remote BD_ADDR and PSM for msg_name */ |
6764 | bacpy(dst: &bt_cb(skb)->l2cap.bdaddr, src: &hcon->dst); |
6765 | bt_cb(skb)->l2cap.psm = psm; |
6766 | |
6767 | if (!chan->ops->recv(chan, skb)) { |
6768 | l2cap_chan_unlock(chan); |
6769 | l2cap_chan_put(chan); |
6770 | return; |
6771 | } |
6772 | |
6773 | drop: |
6774 | l2cap_chan_unlock(chan); |
6775 | l2cap_chan_put(chan); |
6776 | free_skb: |
6777 | kfree_skb(skb); |
6778 | } |
6779 | |
6780 | static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) |
6781 | { |
6782 | struct l2cap_hdr *lh = (void *) skb->data; |
6783 | struct hci_conn *hcon = conn->hcon; |
6784 | u16 cid, len; |
6785 | __le16 psm; |
6786 | |
6787 | if (hcon->state != BT_CONNECTED) { |
6788 | BT_DBG("queueing pending rx skb"); |
6789 | skb_queue_tail(list: &conn->pending_rx, newsk: skb); |
6790 | return; |
6791 | } |
6792 | |
6793 | skb_pull(skb, L2CAP_HDR_SIZE); |
6794 | cid = __le16_to_cpu(lh->cid); |
6795 | len = __le16_to_cpu(lh->len); |
6796 | |
6797 | if (len != skb->len) { |
6798 | kfree_skb(skb); |
6799 | return; |
6800 | } |
6801 | |
6802 | /* Since we can't actively block incoming LE connections we must |
6803 | * at least ensure that we ignore incoming data from them. |
6804 | */ |
6805 | if (hcon->type == LE_LINK && |
6806 | hci_bdaddr_list_lookup(list: &hcon->hdev->reject_list, bdaddr: &hcon->dst, |
6807 | type: bdaddr_dst_type(hcon))) { |
6808 | kfree_skb(skb); |
6809 | return; |
6810 | } |
6811 | |
6812 | BT_DBG("len %d, cid 0x%4.4x", len, cid); |
6813 | |
6814 | switch (cid) { |
6815 | case L2CAP_CID_SIGNALING: |
6816 | l2cap_sig_channel(conn, skb); |
6817 | break; |
6818 | |
6819 | case L2CAP_CID_CONN_LESS: |
6820 | psm = get_unaligned((__le16 *) skb->data); |
6821 | skb_pull(skb, L2CAP_PSMLEN_SIZE); |
6822 | l2cap_conless_channel(conn, psm, skb); |
6823 | break; |
6824 | |
6825 | case L2CAP_CID_LE_SIGNALING: |
6826 | l2cap_le_sig_channel(conn, skb); |
6827 | break; |
6828 | |
6829 | default: |
6830 | l2cap_data_channel(conn, cid, skb); |
6831 | break; |
6832 | } |
6833 | } |
6834 | |
6835 | static void process_pending_rx(struct work_struct *work) |
6836 | { |
6837 | struct l2cap_conn *conn = container_of(work, struct l2cap_conn, |
6838 | pending_rx_work); |
6839 | struct sk_buff *skb; |
6840 | |
6841 | BT_DBG(""); |
6842 | |
6843 | mutex_lock(&conn->lock); |
6844 | |
6845 | while ((skb = skb_dequeue(list: &conn->pending_rx))) |
6846 | l2cap_recv_frame(conn, skb); |
6847 | |
6848 | mutex_unlock(lock: &conn->lock); |
6849 | } |
6850 | |
6851 | static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) |
6852 | { |
6853 | struct l2cap_conn *conn = hcon->l2cap_data; |
6854 | struct hci_chan *hchan; |
6855 | |
6856 | if (conn) |
6857 | return conn; |
6858 | |
6859 | hchan = hci_chan_create(conn: hcon); |
6860 | if (!hchan) |
6861 | return NULL; |
6862 | |
6863 | conn = kzalloc(sizeof(*conn), GFP_KERNEL); |
6864 | if (!conn) { |
6865 | hci_chan_del(chan: hchan); |
6866 | return NULL; |
6867 | } |
6868 | |
6869 | kref_init(kref: &conn->ref); |
6870 | hcon->l2cap_data = conn; |
6871 | conn->hcon = hci_conn_get(conn: hcon); |
6872 | conn->hchan = hchan; |
6873 | |
6874 | BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); |
6875 | |
6876 | conn->mtu = hcon->mtu; |
6877 | conn->feat_mask = 0; |
6878 | |
6879 | conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; |
6880 | |
6881 | if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && |
6882 | (bredr_sc_enabled(hcon->hdev) || |
6883 | hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) |
6884 | conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; |
6885 | |
6886 | mutex_init(&conn->ident_lock); |
6887 | mutex_init(&conn->lock); |
6888 | |
6889 | INIT_LIST_HEAD(list: &conn->chan_l); |
6890 | INIT_LIST_HEAD(list: &conn->users); |
6891 | |
6892 | INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); |
6893 | |
6894 | skb_queue_head_init(list: &conn->pending_rx); |
6895 | INIT_WORK(&conn->pending_rx_work, process_pending_rx); |
6896 | INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); |
6897 | |
6898 | conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; |
6899 | |
6900 | return conn; |
6901 | } |
6902 | |
6903 | static bool is_valid_psm(u16 psm, u8 dst_type) |
6904 | { |
6905 | if (!psm) |
6906 | return false; |
6907 | |
6908 | if (bdaddr_type_is_le(type: dst_type)) |
6909 | return (psm <= 0x00ff); |
6910 | |
6911 | /* PSM must be odd and lsb of upper byte must be 0 */ |
6912 | return ((psm & 0x0101) == 0x0001); |
6913 | } |
6914 | |
6915 | struct l2cap_chan_data { |
6916 | struct l2cap_chan *chan; |
6917 | struct pid *pid; |
6918 | int count; |
6919 | }; |
6920 | |
6921 | static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) |
6922 | { |
6923 | struct l2cap_chan_data *d = data; |
6924 | struct pid *pid; |
6925 | |
6926 | if (chan == d->chan) |
6927 | return; |
6928 | |
6929 | if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) |
6930 | return; |
6931 | |
6932 | pid = chan->ops->get_peer_pid(chan); |
6933 | |
6934 | /* Only count deferred channels with the same PID/PSM */ |
6935 | if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || |
6936 | chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) |
6937 | return; |
6938 | |
6939 | d->count++; |
6940 | } |
6941 | |
6942 | int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, |
6943 | bdaddr_t *dst, u8 dst_type, u16 timeout) |
6944 | { |
6945 | struct l2cap_conn *conn; |
6946 | struct hci_conn *hcon; |
6947 | struct hci_dev *hdev; |
6948 | int err; |
6949 | |
6950 | BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, |
6951 | dst, dst_type, __le16_to_cpu(psm), chan->mode); |
6952 | |
6953 | hdev = hci_get_route(dst, src: &chan->src, src_type: chan->src_type); |
6954 | if (!hdev) |
6955 | return -EHOSTUNREACH; |
6956 | |
6957 | hci_dev_lock(hdev); |
6958 | |
6959 | if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && |
6960 | chan->chan_type != L2CAP_CHAN_RAW) { |
6961 | err = -EINVAL; |
6962 | goto done; |
6963 | } |
6964 | |
6965 | if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { |
6966 | err = -EINVAL; |
6967 | goto done; |
6968 | } |
6969 | |
6970 | if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { |
6971 | err = -EINVAL; |
6972 | goto done; |
6973 | } |
6974 | |
6975 | switch (chan->mode) { |
6976 | case L2CAP_MODE_BASIC: |
6977 | break; |
6978 | case L2CAP_MODE_LE_FLOWCTL: |
6979 | break; |
6980 | case L2CAP_MODE_EXT_FLOWCTL: |
6981 | if (!enable_ecred) { |
6982 | err = -EOPNOTSUPP; |
6983 | goto done; |
6984 | } |
6985 | break; |
6986 | case L2CAP_MODE_ERTM: |
6987 | case L2CAP_MODE_STREAMING: |
6988 | if (!disable_ertm) |
6989 | break; |
6990 | fallthrough; |
6991 | default: |
6992 | err = -EOPNOTSUPP; |
6993 | goto done; |
6994 | } |
6995 | |
6996 | switch (chan->state) { |
6997 | case BT_CONNECT: |
6998 | case BT_CONNECT2: |
6999 | case BT_CONFIG: |
7000 | /* Already connecting */ |
7001 | err = 0; |
7002 | goto done; |
7003 | |
7004 | case BT_CONNECTED: |
7005 | /* Already connected */ |
7006 | err = -EISCONN; |
7007 | goto done; |
7008 | |
7009 | case BT_OPEN: |
7010 | case BT_BOUND: |
7011 | /* Can connect */ |
7012 | break; |
7013 | |
7014 | default: |
7015 | err = -EBADFD; |
7016 | goto done; |
7017 | } |
7018 | |
7019 | /* Set destination address and psm */ |
7020 | bacpy(dst: &chan->dst, src: dst); |
7021 | chan->dst_type = dst_type; |
7022 | |
7023 | chan->psm = psm; |
7024 | chan->dcid = cid; |
7025 | |
7026 | if (bdaddr_type_is_le(type: dst_type)) { |
7027 | /* Convert from L2CAP channel address type to HCI address type |
7028 | */ |
7029 | if (dst_type == BDADDR_LE_PUBLIC) |
7030 | dst_type = ADDR_LE_DEV_PUBLIC; |
7031 | else |
7032 | dst_type = ADDR_LE_DEV_RANDOM; |
7033 | |
7034 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) |
7035 | hcon = hci_connect_le(hdev, dst, dst_type, dst_resolved: false, |
7036 | sec_level: chan->sec_level, conn_timeout: timeout, |
7037 | HCI_ROLE_SLAVE, phy: 0, sec_phy: 0); |
7038 | else |
7039 | hcon = hci_connect_le_scan(hdev, dst, dst_type, |
7040 | sec_level: chan->sec_level, conn_timeout: timeout, |
7041 | conn_reason: CONN_REASON_L2CAP_CHAN); |
7042 | |
7043 | } else { |
7044 | u8 auth_type = l2cap_get_auth_type(chan); |
7045 | hcon = hci_connect_acl(hdev, dst, sec_level: chan->sec_level, auth_type, |
7046 | conn_reason: CONN_REASON_L2CAP_CHAN, timeout); |
7047 | } |
7048 | |
7049 | if (IS_ERR(ptr: hcon)) { |
7050 | err = PTR_ERR(ptr: hcon); |
7051 | goto done; |
7052 | } |
7053 | |
7054 | conn = l2cap_conn_add(hcon); |
7055 | if (!conn) { |
7056 | hci_conn_drop(conn: hcon); |
7057 | err = -ENOMEM; |
7058 | goto done; |
7059 | } |
7060 | |
7061 | if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { |
7062 | struct l2cap_chan_data data; |
7063 | |
7064 | data.chan = chan; |
7065 | data.pid = chan->ops->get_peer_pid(chan); |
7066 | data.count = 1; |
7067 | |
7068 | l2cap_chan_list(conn, l2cap_chan_by_pid, &data); |
7069 | |
7070 | /* Check if there isn't too many channels being connected */ |
7071 | if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { |
7072 | hci_conn_drop(conn: hcon); |
7073 | err = -EPROTO; |
7074 | goto done; |
7075 | } |
7076 | } |
7077 | |
7078 | mutex_lock(&conn->lock); |
7079 | l2cap_chan_lock(chan); |
7080 | |
7081 | if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { |
7082 | hci_conn_drop(conn: hcon); |
7083 | err = -EBUSY; |
7084 | goto chan_unlock; |
7085 | } |
7086 | |
7087 | /* Update source addr of the socket */ |
7088 | bacpy(dst: &chan->src, src: &hcon->src); |
7089 | chan->src_type = bdaddr_src_type(hcon); |
7090 | |
7091 | __l2cap_chan_add(conn, chan); |
7092 | |
7093 | /* l2cap_chan_add takes its own ref so we can drop this one */ |
7094 | hci_conn_drop(conn: hcon); |
7095 | |
7096 | l2cap_state_change(chan, state: BT_CONNECT); |
7097 | __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); |
7098 | |
7099 | /* Release chan->sport so that it can be reused by other |
7100 | * sockets (as it's only used for listening sockets). |
7101 | */ |
7102 | write_lock(&chan_list_lock); |
7103 | chan->sport = 0; |
7104 | write_unlock(&chan_list_lock); |
7105 | |
7106 | if (hcon->state == BT_CONNECTED) { |
7107 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { |
7108 | __clear_chan_timer(chan); |
7109 | if (l2cap_chan_check_security(chan, initiator: true)) |
7110 | l2cap_state_change(chan, state: BT_CONNECTED); |
7111 | } else |
7112 | l2cap_do_start(chan); |
7113 | } |
7114 | |
7115 | err = 0; |
7116 | |
7117 | chan_unlock: |
7118 | l2cap_chan_unlock(chan); |
7119 | mutex_unlock(lock: &conn->lock); |
7120 | done: |
7121 | hci_dev_unlock(hdev); |
7122 | hci_dev_put(d: hdev); |
7123 | return err; |
7124 | } |
7125 | EXPORT_SYMBOL_GPL(l2cap_chan_connect); |
7126 | |
7127 | static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) |
7128 | { |
7129 | struct l2cap_conn *conn = chan->conn; |
7130 | DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1); |
7131 | |
7132 | pdu->mtu = cpu_to_le16(chan->imtu); |
7133 | pdu->mps = cpu_to_le16(chan->mps); |
7134 | pdu->scid[0] = cpu_to_le16(chan->scid); |
7135 | |
7136 | chan->ident = l2cap_get_ident(conn); |
7137 | |
7138 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_ECRED_RECONF_REQ, |
7139 | len: sizeof(pdu), data: &pdu); |
7140 | } |
7141 | |
7142 | int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) |
7143 | { |
7144 | if (chan->imtu > mtu) |
7145 | return -EINVAL; |
7146 | |
7147 | BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); |
7148 | |
7149 | chan->imtu = mtu; |
7150 | |
7151 | l2cap_ecred_reconfigure(chan); |
7152 | |
7153 | return 0; |
7154 | } |
7155 | |
7156 | /* ---- L2CAP interface with lower layer (HCI) ---- */ |
7157 | |
7158 | int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) |
7159 | { |
7160 | int exact = 0, lm1 = 0, lm2 = 0; |
7161 | struct l2cap_chan *c; |
7162 | |
7163 | BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); |
7164 | |
7165 | /* Find listening sockets and check their link_mode */ |
7166 | read_lock(&chan_list_lock); |
7167 | list_for_each_entry(c, &chan_list, global_l) { |
7168 | if (c->state != BT_LISTEN) |
7169 | continue; |
7170 | |
7171 | if (!bacmp(ba1: &c->src, ba2: &hdev->bdaddr)) { |
7172 | lm1 |= HCI_LM_ACCEPT; |
7173 | if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) |
7174 | lm1 |= HCI_LM_MASTER; |
7175 | exact++; |
7176 | } else if (!bacmp(ba1: &c->src, BDADDR_ANY)) { |
7177 | lm2 |= HCI_LM_ACCEPT; |
7178 | if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) |
7179 | lm2 |= HCI_LM_MASTER; |
7180 | } |
7181 | } |
7182 | read_unlock(&chan_list_lock); |
7183 | |
7184 | return exact ? lm1 : lm2; |
7185 | } |
7186 | |
7187 | /* Find the next fixed channel in BT_LISTEN state, continue iteration |
7188 | * from an existing channel in the list or from the beginning of the |
7189 | * global list (by passing NULL as first parameter). |
7190 | */ |
7191 | static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, |
7192 | struct hci_conn *hcon) |
7193 | { |
7194 | u8 src_type = bdaddr_src_type(hcon); |
7195 | |
7196 | read_lock(&chan_list_lock); |
7197 | |
7198 | if (c) |
7199 | c = list_next_entry(c, global_l); |
7200 | else |
7201 | c = list_entry(chan_list.next, typeof(*c), global_l); |
7202 | |
7203 | list_for_each_entry_from(c, &chan_list, global_l) { |
7204 | if (c->chan_type != L2CAP_CHAN_FIXED) |
7205 | continue; |
7206 | if (c->state != BT_LISTEN) |
7207 | continue; |
7208 | if (bacmp(ba1: &c->src, ba2: &hcon->src) && bacmp(ba1: &c->src, BDADDR_ANY)) |
7209 | continue; |
7210 | if (src_type != c->src_type) |
7211 | continue; |
7212 | |
7213 | c = l2cap_chan_hold_unless_zero(c); |
7214 | read_unlock(&chan_list_lock); |
7215 | return c; |
7216 | } |
7217 | |
7218 | read_unlock(&chan_list_lock); |
7219 | |
7220 | return NULL; |
7221 | } |
7222 | |
7223 | static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) |
7224 | { |
7225 | struct hci_dev *hdev = hcon->hdev; |
7226 | struct l2cap_conn *conn; |
7227 | struct l2cap_chan *pchan; |
7228 | u8 dst_type; |
7229 | |
7230 | if (hcon->type != ACL_LINK && hcon->type != LE_LINK) |
7231 | return; |
7232 | |
7233 | BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); |
7234 | |
7235 | if (status) { |
7236 | l2cap_conn_del(hcon, err: bt_to_errno(code: status)); |
7237 | return; |
7238 | } |
7239 | |
7240 | conn = l2cap_conn_add(hcon); |
7241 | if (!conn) |
7242 | return; |
7243 | |
7244 | dst_type = bdaddr_dst_type(hcon); |
7245 | |
7246 | /* If device is blocked, do not create channels for it */ |
7247 | if (hci_bdaddr_list_lookup(list: &hdev->reject_list, bdaddr: &hcon->dst, type: dst_type)) |
7248 | return; |
7249 | |
7250 | /* Find fixed channels and notify them of the new connection. We |
7251 | * use multiple individual lookups, continuing each time where |
7252 | * we left off, because the list lock would prevent calling the |
7253 | * potentially sleeping l2cap_chan_lock() function. |
7254 | */ |
7255 | pchan = l2cap_global_fixed_chan(NULL, hcon); |
7256 | while (pchan) { |
7257 | struct l2cap_chan *chan, *next; |
7258 | |
7259 | /* Client fixed channels should override server ones */ |
7260 | if (__l2cap_get_chan_by_dcid(conn, cid: pchan->scid)) |
7261 | goto next; |
7262 | |
7263 | l2cap_chan_lock(chan: pchan); |
7264 | chan = pchan->ops->new_connection(pchan); |
7265 | if (chan) { |
7266 | bacpy(dst: &chan->src, src: &hcon->src); |
7267 | bacpy(dst: &chan->dst, src: &hcon->dst); |
7268 | chan->src_type = bdaddr_src_type(hcon); |
7269 | chan->dst_type = dst_type; |
7270 | |
7271 | __l2cap_chan_add(conn, chan); |
7272 | } |
7273 | |
7274 | l2cap_chan_unlock(chan: pchan); |
7275 | next: |
7276 | next = l2cap_global_fixed_chan(c: pchan, hcon); |
7277 | l2cap_chan_put(pchan); |
7278 | pchan = next; |
7279 | } |
7280 | |
7281 | l2cap_conn_ready(conn); |
7282 | } |
7283 | |
7284 | int l2cap_disconn_ind(struct hci_conn *hcon) |
7285 | { |
7286 | struct l2cap_conn *conn = hcon->l2cap_data; |
7287 | |
7288 | BT_DBG("hcon %p", hcon); |
7289 | |
7290 | if (!conn) |
7291 | return HCI_ERROR_REMOTE_USER_TERM; |
7292 | return conn->disc_reason; |
7293 | } |
7294 | |
7295 | static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) |
7296 | { |
7297 | if (hcon->type != ACL_LINK && hcon->type != LE_LINK) |
7298 | return; |
7299 | |
7300 | BT_DBG("hcon %p reason %d", hcon, reason); |
7301 | |
7302 | l2cap_conn_del(hcon, err: bt_to_errno(code: reason)); |
7303 | } |
7304 | |
7305 | static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) |
7306 | { |
7307 | if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) |
7308 | return; |
7309 | |
7310 | if (encrypt == 0x00) { |
7311 | if (chan->sec_level == BT_SECURITY_MEDIUM) { |
7312 | __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); |
7313 | } else if (chan->sec_level == BT_SECURITY_HIGH || |
7314 | chan->sec_level == BT_SECURITY_FIPS) |
7315 | l2cap_chan_close(chan, ECONNREFUSED); |
7316 | } else { |
7317 | if (chan->sec_level == BT_SECURITY_MEDIUM) |
7318 | __clear_chan_timer(chan); |
7319 | } |
7320 | } |
7321 | |
7322 | static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) |
7323 | { |
7324 | struct l2cap_conn *conn = hcon->l2cap_data; |
7325 | struct l2cap_chan *chan; |
7326 | |
7327 | if (!conn) |
7328 | return; |
7329 | |
7330 | BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); |
7331 | |
7332 | mutex_lock(&conn->lock); |
7333 | |
7334 | list_for_each_entry(chan, &conn->chan_l, list) { |
7335 | l2cap_chan_lock(chan); |
7336 | |
7337 | BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, |
7338 | state_to_string(chan->state)); |
7339 | |
7340 | if (!status && encrypt) |
7341 | chan->sec_level = hcon->sec_level; |
7342 | |
7343 | if (!__l2cap_no_conn_pending(chan)) { |
7344 | l2cap_chan_unlock(chan); |
7345 | continue; |
7346 | } |
7347 | |
7348 | if (!status && (chan->state == BT_CONNECTED || |
7349 | chan->state == BT_CONFIG)) { |
7350 | chan->ops->resume(chan); |
7351 | l2cap_check_encryption(chan, encrypt); |
7352 | l2cap_chan_unlock(chan); |
7353 | continue; |
7354 | } |
7355 | |
7356 | if (chan->state == BT_CONNECT) { |
7357 | if (!status && l2cap_check_enc_key_size(hcon, chan)) |
7358 | l2cap_start_connection(chan); |
7359 | else |
7360 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); |
7361 | } else if (chan->state == BT_CONNECT2 && |
7362 | !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || |
7363 | chan->mode == L2CAP_MODE_LE_FLOWCTL)) { |
7364 | struct l2cap_conn_rsp rsp; |
7365 | __u16 res, stat; |
7366 | |
7367 | if (!status && l2cap_check_enc_key_size(hcon, chan)) { |
7368 | if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { |
7369 | res = L2CAP_CR_PEND; |
7370 | stat = L2CAP_CS_AUTHOR_PEND; |
7371 | chan->ops->defer(chan); |
7372 | } else { |
7373 | l2cap_state_change(chan, state: BT_CONFIG); |
7374 | res = L2CAP_CR_SUCCESS; |
7375 | stat = L2CAP_CS_NO_INFO; |
7376 | } |
7377 | } else { |
7378 | l2cap_state_change(chan, state: BT_DISCONN); |
7379 | __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); |
7380 | res = L2CAP_CR_SEC_BLOCK; |
7381 | stat = L2CAP_CS_NO_INFO; |
7382 | } |
7383 | |
7384 | rsp.scid = cpu_to_le16(chan->dcid); |
7385 | rsp.dcid = cpu_to_le16(chan->scid); |
7386 | rsp.result = cpu_to_le16(res); |
7387 | rsp.status = cpu_to_le16(stat); |
7388 | l2cap_send_cmd(conn, ident: chan->ident, L2CAP_CONN_RSP, |
7389 | len: sizeof(rsp), data: &rsp); |
7390 | |
7391 | if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && |
7392 | res == L2CAP_CR_SUCCESS) { |
7393 | char buf[128]; |
7394 | set_bit(nr: CONF_REQ_SENT, addr: &chan->conf_state); |
7395 | l2cap_send_cmd(conn, ident: l2cap_get_ident(conn), |
7396 | L2CAP_CONF_REQ, |
7397 | len: l2cap_build_conf_req(chan, data: buf, data_size: sizeof(buf)), |
7398 | data: buf); |
7399 | chan->num_conf_req++; |
7400 | } |
7401 | } |
7402 | |
7403 | l2cap_chan_unlock(chan); |
7404 | } |
7405 | |
7406 | mutex_unlock(lock: &conn->lock); |
7407 | } |
7408 | |
7409 | /* Append fragment into frame respecting the maximum len of rx_skb */ |
7410 | static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, |
7411 | u16 len) |
7412 | { |
7413 | if (!conn->rx_skb) { |
7414 | /* Allocate skb for the complete frame (with header) */ |
7415 | conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); |
7416 | if (!conn->rx_skb) |
7417 | return -ENOMEM; |
7418 | /* Init rx_len */ |
7419 | conn->rx_len = len; |
7420 | |
7421 | skb_set_delivery_time(skb: conn->rx_skb, kt: skb->tstamp, |
7422 | tstamp_type: skb->tstamp_type); |
7423 | } |
7424 | |
7425 | /* Copy as much as the rx_skb can hold */ |
7426 | len = min_t(u16, len, skb->len); |
7427 | skb_copy_from_linear_data(skb, to: skb_put(skb: conn->rx_skb, len), len); |
7428 | skb_pull(skb, len); |
7429 | conn->rx_len -= len; |
7430 | |
7431 | return len; |
7432 | } |
7433 | |
7434 | static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) |
7435 | { |
7436 | struct sk_buff *rx_skb; |
7437 | int len; |
7438 | |
7439 | /* Append just enough to complete the header */ |
7440 | len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); |
7441 | |
7442 | /* If header could not be read just continue */ |
7443 | if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) |
7444 | return len; |
7445 | |
7446 | rx_skb = conn->rx_skb; |
7447 | len = get_unaligned_le16(p: rx_skb->data); |
7448 | |
7449 | /* Check if rx_skb has enough space to received all fragments */ |
7450 | if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(skb: rx_skb)) { |
7451 | /* Update expected len */ |
7452 | conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); |
7453 | return L2CAP_LEN_SIZE; |
7454 | } |
7455 | |
7456 | /* Reset conn->rx_skb since it will need to be reallocated in order to |
7457 | * fit all fragments. |
7458 | */ |
7459 | conn->rx_skb = NULL; |
7460 | |
7461 | /* Reallocates rx_skb using the exact expected length */ |
7462 | len = l2cap_recv_frag(conn, skb: rx_skb, |
7463 | len: len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); |
7464 | kfree_skb(skb: rx_skb); |
7465 | |
7466 | return len; |
7467 | } |
7468 | |
7469 | static void l2cap_recv_reset(struct l2cap_conn *conn) |
7470 | { |
7471 | kfree_skb(skb: conn->rx_skb); |
7472 | conn->rx_skb = NULL; |
7473 | conn->rx_len = 0; |
7474 | } |
7475 | |
7476 | struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c) |
7477 | { |
7478 | if (!c) |
7479 | return NULL; |
7480 | |
7481 | BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref)); |
7482 | |
7483 | if (!kref_get_unless_zero(kref: &c->ref)) |
7484 | return NULL; |
7485 | |
7486 | return c; |
7487 | } |
7488 | |
7489 | void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) |
7490 | { |
7491 | struct l2cap_conn *conn; |
7492 | int len; |
7493 | |
7494 | /* Lock hdev to access l2cap_data to avoid race with l2cap_conn_del */ |
7495 | hci_dev_lock(hcon->hdev); |
7496 | |
7497 | conn = hcon->l2cap_data; |
7498 | |
7499 | if (!conn) |
7500 | conn = l2cap_conn_add(hcon); |
7501 | |
7502 | conn = l2cap_conn_hold_unless_zero(c: conn); |
7503 | |
7504 | hci_dev_unlock(hcon->hdev); |
7505 | |
7506 | if (!conn) { |
7507 | kfree_skb(skb); |
7508 | return; |
7509 | } |
7510 | |
7511 | BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); |
7512 | |
7513 | mutex_lock(&conn->lock); |
7514 | |
7515 | switch (flags) { |
7516 | case ACL_START: |
7517 | case ACL_START_NO_FLUSH: |
7518 | case ACL_COMPLETE: |
7519 | if (conn->rx_skb) { |
7520 | BT_ERR("Unexpected start frame (len %d)", skb->len); |
7521 | l2cap_recv_reset(conn); |
7522 | l2cap_conn_unreliable(conn, ECOMM); |
7523 | } |
7524 | |
7525 | /* Start fragment may not contain the L2CAP length so just |
7526 | * copy the initial byte when that happens and use conn->mtu as |
7527 | * expected length. |
7528 | */ |
7529 | if (skb->len < L2CAP_LEN_SIZE) { |
7530 | l2cap_recv_frag(conn, skb, len: conn->mtu); |
7531 | break; |
7532 | } |
7533 | |
7534 | len = get_unaligned_le16(p: skb->data) + L2CAP_HDR_SIZE; |
7535 | |
7536 | if (len == skb->len) { |
7537 | /* Complete frame received */ |
7538 | l2cap_recv_frame(conn, skb); |
7539 | goto unlock; |
7540 | } |
7541 | |
7542 | BT_DBG("Start: total len %d, frag len %u", len, skb->len); |
7543 | |
7544 | if (skb->len > len) { |
7545 | BT_ERR("Frame is too long (len %u, expected len %d)", |
7546 | skb->len, len); |
7547 | /* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C |
7548 | * (Multiple Signaling Command in one PDU, Data |
7549 | * Truncated, BR/EDR) send a C-frame to the IUT with |
7550 | * PDU Length set to 8 and Channel ID set to the |
7551 | * correct signaling channel for the logical link. |
7552 | * The Information payload contains one L2CAP_ECHO_REQ |
7553 | * packet with Data Length set to 0 with 0 octets of |
7554 | * echo data and one invalid command packet due to |
7555 | * data truncated in PDU but present in HCI packet. |
7556 | * |
7557 | * Shorter the socket buffer to the PDU length to |
7558 | * allow to process valid commands from the PDU before |
7559 | * setting the socket unreliable. |
7560 | */ |
7561 | skb->len = len; |
7562 | l2cap_recv_frame(conn, skb); |
7563 | l2cap_conn_unreliable(conn, ECOMM); |
7564 | goto unlock; |
7565 | } |
7566 | |
7567 | /* Append fragment into frame (with header) */ |
7568 | if (l2cap_recv_frag(conn, skb, len) < 0) |
7569 | goto drop; |
7570 | |
7571 | break; |
7572 | |
7573 | case ACL_CONT: |
7574 | BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); |
7575 | |
7576 | if (!conn->rx_skb) { |
7577 | BT_ERR("Unexpected continuation frame (len %d)", skb->len); |
7578 | l2cap_conn_unreliable(conn, ECOMM); |
7579 | goto drop; |
7580 | } |
7581 | |
7582 | /* Complete the L2CAP length if it has not been read */ |
7583 | if (conn->rx_skb->len < L2CAP_LEN_SIZE) { |
7584 | if (l2cap_recv_len(conn, skb) < 0) { |
7585 | l2cap_conn_unreliable(conn, ECOMM); |
7586 | goto drop; |
7587 | } |
7588 | |
7589 | /* Header still could not be read just continue */ |
7590 | if (conn->rx_skb->len < L2CAP_LEN_SIZE) |
7591 | break; |
7592 | } |
7593 | |
7594 | if (skb->len > conn->rx_len) { |
7595 | BT_ERR("Fragment is too long (len %u, expected %u)", |
7596 | skb->len, conn->rx_len); |
7597 | l2cap_recv_reset(conn); |
7598 | l2cap_conn_unreliable(conn, ECOMM); |
7599 | goto drop; |
7600 | } |
7601 | |
7602 | /* Append fragment into frame (with header) */ |
7603 | l2cap_recv_frag(conn, skb, len: skb->len); |
7604 | |
7605 | if (!conn->rx_len) { |
7606 | /* Complete frame received. l2cap_recv_frame |
7607 | * takes ownership of the skb so set the global |
7608 | * rx_skb pointer to NULL first. |
7609 | */ |
7610 | struct sk_buff *rx_skb = conn->rx_skb; |
7611 | conn->rx_skb = NULL; |
7612 | l2cap_recv_frame(conn, skb: rx_skb); |
7613 | } |
7614 | break; |
7615 | } |
7616 | |
7617 | drop: |
7618 | kfree_skb(skb); |
7619 | unlock: |
7620 | mutex_unlock(lock: &conn->lock); |
7621 | l2cap_conn_put(conn); |
7622 | } |
7623 | |
7624 | static struct hci_cb l2cap_cb = { |
7625 | .name = "L2CAP", |
7626 | .connect_cfm = l2cap_connect_cfm, |
7627 | .disconn_cfm = l2cap_disconn_cfm, |
7628 | .security_cfm = l2cap_security_cfm, |
7629 | }; |
7630 | |
7631 | static int l2cap_debugfs_show(struct seq_file *f, void *p) |
7632 | { |
7633 | struct l2cap_chan *c; |
7634 | |
7635 | read_lock(&chan_list_lock); |
7636 | |
7637 | list_for_each_entry(c, &chan_list, global_l) { |
7638 | seq_printf(m: f, fmt: "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", |
7639 | &c->src, c->src_type, &c->dst, c->dst_type, |
7640 | c->state, __le16_to_cpu(c->psm), |
7641 | c->scid, c->dcid, c->imtu, c->omtu, |
7642 | c->sec_level, c->mode); |
7643 | } |
7644 | |
7645 | read_unlock(&chan_list_lock); |
7646 | |
7647 | return 0; |
7648 | } |
7649 | |
7650 | DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); |
7651 | |
7652 | static struct dentry *l2cap_debugfs; |
7653 | |
7654 | int __init l2cap_init(void) |
7655 | { |
7656 | int err; |
7657 | |
7658 | err = l2cap_init_sockets(); |
7659 | if (err < 0) |
7660 | return err; |
7661 | |
7662 | hci_register_cb(hcb: &l2cap_cb); |
7663 | |
7664 | if (IS_ERR_OR_NULL(ptr: bt_debugfs)) |
7665 | return 0; |
7666 | |
7667 | l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, |
7668 | NULL, &l2cap_debugfs_fops); |
7669 | |
7670 | return 0; |
7671 | } |
7672 | |
7673 | void l2cap_exit(void) |
7674 | { |
7675 | debugfs_remove(dentry: l2cap_debugfs); |
7676 | hci_unregister_cb(hcb: &l2cap_cb); |
7677 | l2cap_cleanup_sockets(); |
7678 | } |
7679 | |
7680 | module_param(disable_ertm, bool, 0644); |
7681 | MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); |
7682 | |
7683 | module_param(enable_ecred, bool, 0644); |
7684 | MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode"); |
7685 |
Definitions
- disable_ertm
- enable_ecred
- l2cap_feat_mask
- chan_list
- chan_list_lock
- bdaddr_type
- bdaddr_src_type
- bdaddr_dst_type
- __l2cap_get_chan_by_dcid
- __l2cap_get_chan_by_scid
- l2cap_get_chan_by_scid
- l2cap_get_chan_by_dcid
- __l2cap_get_chan_by_ident
- __l2cap_global_chan_by_addr
- l2cap_add_psm
- l2cap_add_scid
- l2cap_alloc_cid
- l2cap_state_change
- l2cap_state_change_and_error
- l2cap_chan_set_err
- __set_retrans_timer
- __set_monitor_timer
- l2cap_ertm_seq_in_queue
- l2cap_seq_list_init
- l2cap_seq_list_free
- l2cap_seq_list_contains
- l2cap_seq_list_pop
- l2cap_seq_list_clear
- l2cap_seq_list_append
- l2cap_chan_timeout
- l2cap_chan_create
- l2cap_chan_destroy
- l2cap_chan_hold
- l2cap_chan_hold_unless_zero
- l2cap_chan_put
- l2cap_chan_set_defaults
- l2cap_le_rx_credits
- l2cap_le_flowctl_init
- l2cap_ecred_init
- __l2cap_chan_add
- l2cap_chan_add
- l2cap_chan_del
- __l2cap_chan_list_id
- __l2cap_chan_list
- l2cap_chan_list
- l2cap_conn_update_id_addr
- l2cap_chan_le_connect_reject
- l2cap_chan_ecred_connect_reject
- l2cap_chan_connect_reject
- l2cap_chan_close
- l2cap_get_auth_type
- l2cap_chan_check_security
- l2cap_get_ident
- l2cap_send_acl
- l2cap_send_cmd
- l2cap_do_send
- __unpack_enhanced_control
- __unpack_extended_control
- __unpack_control
- __pack_extended_control
- __pack_enhanced_control
- __pack_control
- __ertm_hdr_size
- l2cap_create_sframe_pdu
- l2cap_send_sframe
- l2cap_send_rr_or_rnr
- __l2cap_no_conn_pending
- l2cap_send_conn_req
- l2cap_chan_ready
- l2cap_le_connect
- l2cap_ecred_conn_data
- l2cap_ecred_defer_connect
- l2cap_ecred_connect
- l2cap_le_start
- l2cap_start_connection
- l2cap_request_info
- l2cap_check_enc_key_size
- l2cap_do_start
- l2cap_mode_supported
- l2cap_send_disconn_req
- l2cap_conn_start
- l2cap_le_conn_ready
- l2cap_conn_ready
- l2cap_conn_unreliable
- l2cap_info_timeout
- l2cap_register_user
- l2cap_unregister_user
- l2cap_unregister_all_users
- l2cap_conn_del
- l2cap_conn_free
- l2cap_conn_get
- l2cap_conn_put
- l2cap_global_chan_by_psm
- l2cap_monitor_timeout
- l2cap_retrans_timeout
- l2cap_streaming_send
- l2cap_ertm_send
- l2cap_ertm_resend
- l2cap_retransmit
- l2cap_retransmit_all
- l2cap_send_ack
- l2cap_skbuff_fromiovec
- l2cap_create_connless_pdu
- l2cap_create_basic_pdu
- l2cap_create_iframe_pdu
- l2cap_segment_sdu
- l2cap_create_le_flowctl_pdu
- l2cap_segment_le_sdu
- l2cap_le_flowctl_send
- l2cap_tx_timestamp
- l2cap_tx_timestamp_seg
- l2cap_chan_send
- l2cap_send_srej
- l2cap_send_srej_tail
- l2cap_send_srej_list
- l2cap_process_reqseq
- l2cap_abort_rx_srej_sent
- l2cap_tx_state_xmit
- l2cap_tx_state_wait_f
- l2cap_tx
- l2cap_pass_to_tx
- l2cap_pass_to_tx_fbit
- l2cap_raw_recv
- l2cap_build_cmd
- l2cap_get_conf_opt
- l2cap_add_conf_opt
- l2cap_add_opt_efs
- l2cap_ack_timeout
- l2cap_ertm_init
- l2cap_select_mode
- __l2cap_ews_supported
- __l2cap_efs_supported
- __l2cap_set_ertm_timeouts
- l2cap_txwin_setup
- l2cap_mtu_auto
- l2cap_build_conf_req
- l2cap_parse_conf_req
- l2cap_parse_conf_rsp
- l2cap_build_conf_rsp
- __l2cap_le_connect_rsp_defer
- l2cap_ecred_list_defer
- l2cap_ecred_rsp_data
- l2cap_ecred_rsp_defer
- __l2cap_ecred_conn_rsp_defer
- __l2cap_connect_rsp_defer
- l2cap_conf_rfc_get
- l2cap_command_rej
- l2cap_connect
- l2cap_connect_req
- l2cap_connect_create_rsp
- set_default_fcs
- l2cap_send_efs_conf_rsp
- cmd_reject_invalid_cid
- l2cap_config_req
- l2cap_config_rsp
- l2cap_disconnect_req
- l2cap_disconnect_rsp
- l2cap_information_req
- l2cap_information_rsp
- l2cap_conn_param_update_req
- l2cap_le_connect_rsp
- l2cap_bredr_sig_cmd
- l2cap_le_connect_req
- l2cap_le_credits
- l2cap_ecred_conn_req
- l2cap_ecred_conn_rsp
- l2cap_ecred_reconf_req
- l2cap_ecred_reconf_rsp
- l2cap_le_command_rej
- l2cap_le_sig_cmd
- l2cap_le_sig_channel
- l2cap_sig_send_rej
- l2cap_sig_channel
- l2cap_check_fcs
- l2cap_send_i_or_rr_or_rnr
- append_skb_frag
- l2cap_reassemble_sdu
- l2cap_resegment
- l2cap_chan_busy
- l2cap_rx_queued_iframes
- l2cap_handle_srej
- l2cap_handle_rej
- l2cap_classify_txseq
- l2cap_rx_state_recv
- l2cap_rx_state_srej_sent
- l2cap_finish_move
- l2cap_rx_state_wait_p
- l2cap_rx_state_wait_f
- __valid_reqseq
- l2cap_rx
- l2cap_stream_rx
- l2cap_data_rcv
- l2cap_chan_le_send_credits
- l2cap_chan_rx_avail
- l2cap_ecred_recv
- l2cap_ecred_data_rcv
- l2cap_data_channel
- l2cap_conless_channel
- l2cap_recv_frame
- process_pending_rx
- l2cap_conn_add
- is_valid_psm
- l2cap_chan_data
- l2cap_chan_by_pid
- l2cap_chan_connect
- l2cap_ecred_reconfigure
- l2cap_chan_reconfigure
- l2cap_connect_ind
- l2cap_global_fixed_chan
- l2cap_connect_cfm
- l2cap_disconn_ind
- l2cap_disconn_cfm
- l2cap_check_encryption
- l2cap_security_cfm
- l2cap_recv_frag
- l2cap_recv_len
- l2cap_recv_reset
- l2cap_conn_hold_unless_zero
- l2cap_recv_acldata
- l2cap_cb
- l2cap_debugfs_show
- l2cap_debugfs
- l2cap_init
Improve your Profiling and Debugging skills
Find out more