1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * IEEE 802.15.4 scanning management |
4 | * |
5 | * Copyright (C) 2021 Qorvo US, Inc |
6 | * Authors: |
7 | * - David Girault <david.girault@qorvo.com> |
8 | * - Miquel Raynal <miquel.raynal@bootlin.com> |
9 | */ |
10 | |
11 | #include <linux/module.h> |
12 | #include <linux/rtnetlink.h> |
13 | #include <net/mac802154.h> |
14 | |
15 | #include "ieee802154_i.h" |
16 | #include "driver-ops.h" |
17 | #include "../ieee802154/nl802154.h" |
18 | |
19 | #define IEEE802154_BEACON_MHR_SZ 13 |
20 | #define IEEE802154_BEACON_PL_SZ 4 |
21 | #define IEEE802154_MAC_CMD_MHR_SZ 23 |
22 | #define IEEE802154_MAC_CMD_PL_SZ 1 |
23 | #define IEEE802154_BEACON_SKB_SZ (IEEE802154_BEACON_MHR_SZ + \ |
24 | IEEE802154_BEACON_PL_SZ) |
25 | #define IEEE802154_MAC_CMD_SKB_SZ (IEEE802154_MAC_CMD_MHR_SZ + \ |
26 | IEEE802154_MAC_CMD_PL_SZ) |
27 | |
28 | /* mac802154_scan_cleanup_locked() must be called upon scan completion or abort. |
29 | * - Completions are asynchronous, not locked by the rtnl and decided by the |
30 | * scan worker. |
31 | * - Aborts are decided by userspace, and locked by the rtnl. |
32 | * |
33 | * Concurrent modifications to the PHY, the interfaces or the hardware is in |
34 | * general prevented by the rtnl. So in most cases we don't need additional |
35 | * protection. |
36 | * |
37 | * However, the scan worker get's triggered without anybody noticing and thus we |
38 | * must ensure the presence of the devices as well as data consistency: |
39 | * - The sub-interface and device driver module get both their reference |
40 | * counters incremented whenever we start a scan, so they cannot disappear |
41 | * during operation. |
42 | * - Data consistency is achieved by the use of rcu protected pointers. |
43 | */ |
44 | static int mac802154_scan_cleanup_locked(struct ieee802154_local *local, |
45 | struct ieee802154_sub_if_data *sdata, |
46 | bool aborted) |
47 | { |
48 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
49 | struct wpan_phy *wpan_phy = local->phy; |
50 | struct cfg802154_scan_request *request; |
51 | u8 arg; |
52 | |
53 | /* Prevent any further use of the scan request */ |
54 | clear_bit(nr: IEEE802154_IS_SCANNING, addr: &local->ongoing); |
55 | cancel_delayed_work(dwork: &local->scan_work); |
56 | request = rcu_replace_pointer(local->scan_req, NULL, 1); |
57 | if (!request) |
58 | return 0; |
59 | kvfree_rcu_mightsleep(request); |
60 | |
61 | /* Advertize first, while we know the devices cannot be removed */ |
62 | if (aborted) |
63 | arg = NL802154_SCAN_DONE_REASON_ABORTED; |
64 | else |
65 | arg = NL802154_SCAN_DONE_REASON_FINISHED; |
66 | nl802154_scan_done(wpan_phy, wpan_dev, reason: arg); |
67 | |
68 | /* Cleanup software stack */ |
69 | ieee802154_mlme_op_post(local); |
70 | |
71 | /* Set the hardware back in its original state */ |
72 | drv_set_channel(local, page: wpan_phy->current_page, |
73 | channel: wpan_phy->current_channel); |
74 | ieee802154_configure_durations(phy: wpan_phy, page: wpan_phy->current_page, |
75 | channel: wpan_phy->current_channel); |
76 | drv_stop(local); |
77 | synchronize_net(); |
78 | sdata->required_filtering = sdata->iface_default_filtering; |
79 | drv_start(local, level: sdata->required_filtering, addr_filt: &local->addr_filt); |
80 | |
81 | return 0; |
82 | } |
83 | |
84 | int mac802154_abort_scan_locked(struct ieee802154_local *local, |
85 | struct ieee802154_sub_if_data *sdata) |
86 | { |
87 | ASSERT_RTNL(); |
88 | |
89 | if (!mac802154_is_scanning(local)) |
90 | return -ESRCH; |
91 | |
92 | return mac802154_scan_cleanup_locked(local, sdata, aborted: true); |
93 | } |
94 | |
95 | static unsigned int mac802154_scan_get_channel_time(u8 duration_order, |
96 | u8 symbol_duration) |
97 | { |
98 | u64 base_super_frame_duration = (u64)symbol_duration * |
99 | IEEE802154_SUPERFRAME_PERIOD * IEEE802154_SLOT_PERIOD; |
100 | |
101 | return usecs_to_jiffies(u: base_super_frame_duration * |
102 | (BIT(duration_order) + 1)); |
103 | } |
104 | |
105 | static void mac802154_flush_queued_beacons(struct ieee802154_local *local) |
106 | { |
107 | struct cfg802154_mac_pkt *mac_pkt, *tmp; |
108 | |
109 | list_for_each_entry_safe(mac_pkt, tmp, &local->rx_beacon_list, node) { |
110 | list_del(entry: &mac_pkt->node); |
111 | kfree_skb(skb: mac_pkt->skb); |
112 | kfree(objp: mac_pkt); |
113 | } |
114 | } |
115 | |
116 | static void |
117 | mac802154_scan_get_next_channel(struct ieee802154_local *local, |
118 | struct cfg802154_scan_request *scan_req, |
119 | u8 *channel) |
120 | { |
121 | (*channel)++; |
122 | *channel = find_next_bit(addr: (const unsigned long *)&scan_req->channels, |
123 | IEEE802154_MAX_CHANNEL + 1, |
124 | offset: *channel); |
125 | } |
126 | |
127 | static int mac802154_scan_find_next_chan(struct ieee802154_local *local, |
128 | struct cfg802154_scan_request *scan_req, |
129 | u8 page, u8 *channel) |
130 | { |
131 | mac802154_scan_get_next_channel(local, scan_req, channel); |
132 | if (*channel > IEEE802154_MAX_CHANNEL) |
133 | return -EINVAL; |
134 | |
135 | return 0; |
136 | } |
137 | |
138 | static int mac802154_scan_prepare_beacon_req(struct ieee802154_local *local) |
139 | { |
140 | memset(&local->scan_beacon_req, 0, sizeof(local->scan_beacon_req)); |
141 | local->scan_beacon_req.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; |
142 | local->scan_beacon_req.mhr.fc.dest_addr_mode = IEEE802154_SHORT_ADDRESSING; |
143 | local->scan_beacon_req.mhr.fc.version = IEEE802154_2003_STD; |
144 | local->scan_beacon_req.mhr.fc.source_addr_mode = IEEE802154_NO_ADDRESSING; |
145 | local->scan_beacon_req.mhr.dest.mode = IEEE802154_ADDR_SHORT; |
146 | local->scan_beacon_req.mhr.dest.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); |
147 | local->scan_beacon_req.mhr.dest.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); |
148 | local->scan_beacon_req.mac_pl.cmd_id = IEEE802154_CMD_BEACON_REQ; |
149 | |
150 | return 0; |
151 | } |
152 | |
153 | static int mac802154_transmit_beacon_req(struct ieee802154_local *local, |
154 | struct ieee802154_sub_if_data *sdata) |
155 | { |
156 | struct sk_buff *skb; |
157 | int ret; |
158 | |
159 | skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ, GFP_KERNEL); |
160 | if (!skb) |
161 | return -ENOBUFS; |
162 | |
163 | skb->dev = sdata->dev; |
164 | |
165 | ret = ieee802154_mac_cmd_push(skb, frame: &local->scan_beacon_req, NULL, pl_len: 0); |
166 | if (ret) { |
167 | kfree_skb(skb); |
168 | return ret; |
169 | } |
170 | |
171 | return ieee802154_mlme_tx(local, sdata, skb); |
172 | } |
173 | |
174 | void mac802154_scan_worker(struct work_struct *work) |
175 | { |
176 | struct ieee802154_local *local = |
177 | container_of(work, struct ieee802154_local, scan_work.work); |
178 | struct cfg802154_scan_request *scan_req; |
179 | enum nl802154_scan_types scan_req_type; |
180 | struct ieee802154_sub_if_data *sdata; |
181 | unsigned int scan_duration = 0; |
182 | struct wpan_phy *wpan_phy; |
183 | u8 scan_req_duration; |
184 | u8 page, channel; |
185 | int ret; |
186 | |
187 | /* Ensure the device receiver is turned off when changing channels |
188 | * because there is no atomic way to change the channel and know on |
189 | * which one a beacon might have been received. |
190 | */ |
191 | drv_stop(local); |
192 | synchronize_net(); |
193 | mac802154_flush_queued_beacons(local); |
194 | |
195 | rcu_read_lock(); |
196 | scan_req = rcu_dereference(local->scan_req); |
197 | if (unlikely(!scan_req)) { |
198 | rcu_read_unlock(); |
199 | return; |
200 | } |
201 | |
202 | sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev: scan_req->wpan_dev); |
203 | |
204 | /* Wait an arbitrary amount of time in case we cannot use the device */ |
205 | if (local->suspended || !ieee802154_sdata_running(sdata)) { |
206 | rcu_read_unlock(); |
207 | queue_delayed_work(wq: local->mac_wq, dwork: &local->scan_work, |
208 | delay: msecs_to_jiffies(m: 1000)); |
209 | return; |
210 | } |
211 | |
212 | wpan_phy = scan_req->wpan_phy; |
213 | scan_req_type = scan_req->type; |
214 | scan_req_duration = scan_req->duration; |
215 | |
216 | /* Look for the next valid chan */ |
217 | page = local->scan_page; |
218 | channel = local->scan_channel; |
219 | do { |
220 | ret = mac802154_scan_find_next_chan(local, scan_req, page, channel: &channel); |
221 | if (ret) { |
222 | rcu_read_unlock(); |
223 | goto end_scan; |
224 | } |
225 | } while (!ieee802154_chan_is_valid(phy: scan_req->wpan_phy, page, channel)); |
226 | |
227 | rcu_read_unlock(); |
228 | |
229 | /* Bypass the stack on purpose when changing the channel */ |
230 | rtnl_lock(); |
231 | ret = drv_set_channel(local, page, channel); |
232 | rtnl_unlock(); |
233 | if (ret) { |
234 | dev_err(&sdata->dev->dev, |
235 | "Channel change failure during scan, aborting (%d)\n" , ret); |
236 | goto end_scan; |
237 | } |
238 | |
239 | local->scan_page = page; |
240 | local->scan_channel = channel; |
241 | |
242 | rtnl_lock(); |
243 | ret = drv_start(local, level: IEEE802154_FILTERING_3_SCAN, addr_filt: &local->addr_filt); |
244 | rtnl_unlock(); |
245 | if (ret) { |
246 | dev_err(&sdata->dev->dev, |
247 | "Restarting failure after channel change, aborting (%d)\n" , ret); |
248 | goto end_scan; |
249 | } |
250 | |
251 | if (scan_req_type == NL802154_SCAN_ACTIVE) { |
252 | ret = mac802154_transmit_beacon_req(local, sdata); |
253 | if (ret) |
254 | dev_err(&sdata->dev->dev, |
255 | "Error when transmitting beacon request (%d)\n" , ret); |
256 | } |
257 | |
258 | ieee802154_configure_durations(phy: wpan_phy, page, channel); |
259 | scan_duration = mac802154_scan_get_channel_time(duration_order: scan_req_duration, |
260 | symbol_duration: wpan_phy->symbol_duration); |
261 | dev_dbg(&sdata->dev->dev, |
262 | "Scan page %u channel %u for %ums\n" , |
263 | page, channel, jiffies_to_msecs(scan_duration)); |
264 | queue_delayed_work(wq: local->mac_wq, dwork: &local->scan_work, delay: scan_duration); |
265 | return; |
266 | |
267 | end_scan: |
268 | rtnl_lock(); |
269 | mac802154_scan_cleanup_locked(local, sdata, aborted: false); |
270 | rtnl_unlock(); |
271 | } |
272 | |
273 | int mac802154_trigger_scan_locked(struct ieee802154_sub_if_data *sdata, |
274 | struct cfg802154_scan_request *request) |
275 | { |
276 | struct ieee802154_local *local = sdata->local; |
277 | |
278 | ASSERT_RTNL(); |
279 | |
280 | if (mac802154_is_scanning(local)) |
281 | return -EBUSY; |
282 | |
283 | if (request->type != NL802154_SCAN_PASSIVE && |
284 | request->type != NL802154_SCAN_ACTIVE) |
285 | return -EOPNOTSUPP; |
286 | |
287 | /* Store scanning parameters */ |
288 | rcu_assign_pointer(local->scan_req, request); |
289 | |
290 | /* Software scanning requires to set promiscuous mode, so we need to |
291 | * pause the Tx queue during the entire operation. |
292 | */ |
293 | ieee802154_mlme_op_pre(local); |
294 | |
295 | sdata->required_filtering = IEEE802154_FILTERING_3_SCAN; |
296 | local->scan_page = request->page; |
297 | local->scan_channel = -1; |
298 | set_bit(nr: IEEE802154_IS_SCANNING, addr: &local->ongoing); |
299 | if (request->type == NL802154_SCAN_ACTIVE) |
300 | mac802154_scan_prepare_beacon_req(local); |
301 | |
302 | nl802154_scan_started(wpan_phy: request->wpan_phy, wpan_dev: request->wpan_dev); |
303 | |
304 | queue_delayed_work(wq: local->mac_wq, dwork: &local->scan_work, delay: 0); |
305 | |
306 | return 0; |
307 | } |
308 | |
309 | int mac802154_process_beacon(struct ieee802154_local *local, |
310 | struct sk_buff *skb, |
311 | u8 page, u8 channel) |
312 | { |
313 | struct ieee802154_beacon_hdr *bh = (void *)skb->data; |
314 | struct ieee802154_addr *src = &mac_cb(skb)->source; |
315 | struct cfg802154_scan_request *scan_req; |
316 | struct ieee802154_coord_desc desc; |
317 | |
318 | if (skb->len != sizeof(*bh)) |
319 | return -EINVAL; |
320 | |
321 | if (unlikely(src->mode == IEEE802154_ADDR_NONE)) |
322 | return -EINVAL; |
323 | |
324 | dev_dbg(&skb->dev->dev, |
325 | "BEACON received on page %u channel %u\n" , |
326 | page, channel); |
327 | |
328 | memcpy(&desc.addr, src, sizeof(desc.addr)); |
329 | desc.page = page; |
330 | desc.channel = channel; |
331 | desc.link_quality = mac_cb(skb)->lqi; |
332 | desc.superframe_spec = get_unaligned_le16(p: skb->data); |
333 | desc.gts_permit = bh->gts_permit; |
334 | |
335 | trace_802154_scan_event(desc: &desc); |
336 | |
337 | rcu_read_lock(); |
338 | scan_req = rcu_dereference(local->scan_req); |
339 | if (likely(scan_req)) |
340 | nl802154_scan_event(wpan_phy: scan_req->wpan_phy, wpan_dev: scan_req->wpan_dev, desc: &desc); |
341 | rcu_read_unlock(); |
342 | |
343 | return 0; |
344 | } |
345 | |
346 | static int mac802154_transmit_beacon(struct ieee802154_local *local, |
347 | struct wpan_dev *wpan_dev) |
348 | { |
349 | struct cfg802154_beacon_request *beacon_req; |
350 | struct ieee802154_sub_if_data *sdata; |
351 | struct sk_buff *skb; |
352 | int ret; |
353 | |
354 | /* Update the sequence number */ |
355 | local->beacon.mhr.seq = atomic_inc_return(v: &wpan_dev->bsn) & 0xFF; |
356 | |
357 | skb = alloc_skb(IEEE802154_BEACON_SKB_SZ, GFP_KERNEL); |
358 | if (!skb) |
359 | return -ENOBUFS; |
360 | |
361 | rcu_read_lock(); |
362 | beacon_req = rcu_dereference(local->beacon_req); |
363 | if (unlikely(!beacon_req)) { |
364 | rcu_read_unlock(); |
365 | kfree_skb(skb); |
366 | return -EINVAL; |
367 | } |
368 | |
369 | sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev: beacon_req->wpan_dev); |
370 | skb->dev = sdata->dev; |
371 | |
372 | rcu_read_unlock(); |
373 | |
374 | ret = ieee802154_beacon_push(skb, beacon: &local->beacon); |
375 | if (ret) { |
376 | kfree_skb(skb); |
377 | return ret; |
378 | } |
379 | |
380 | /* Using the MLME transmission helper for sending beacons is a bit |
381 | * overkill because we do not really care about the final outcome. |
382 | * |
383 | * Even though, going through the whole net stack with a regular |
384 | * dev_queue_xmit() is not relevant either because we want beacons to be |
385 | * sent "now" rather than go through the whole net stack scheduling |
386 | * (qdisc & co). |
387 | * |
388 | * Finally, using ieee802154_subif_start_xmit() would only be an option |
389 | * if we had a generic transmit helper which would acquire the |
390 | * HARD_TX_LOCK() to prevent buffer handling conflicts with regular |
391 | * packets. |
392 | * |
393 | * So for now we keep it simple and send beacons with our MLME helper, |
394 | * even if it stops the ieee802154 queue entirely during these |
395 | * transmissions, wich anyway does not have a huge impact on the |
396 | * performances given the current design of the stack. |
397 | */ |
398 | return ieee802154_mlme_tx(local, sdata, skb); |
399 | } |
400 | |
401 | void mac802154_beacon_worker(struct work_struct *work) |
402 | { |
403 | struct ieee802154_local *local = |
404 | container_of(work, struct ieee802154_local, beacon_work.work); |
405 | struct cfg802154_beacon_request *beacon_req; |
406 | struct ieee802154_sub_if_data *sdata; |
407 | struct wpan_dev *wpan_dev; |
408 | u8 interval; |
409 | int ret; |
410 | |
411 | rcu_read_lock(); |
412 | beacon_req = rcu_dereference(local->beacon_req); |
413 | if (unlikely(!beacon_req)) { |
414 | rcu_read_unlock(); |
415 | return; |
416 | } |
417 | |
418 | sdata = IEEE802154_WPAN_DEV_TO_SUB_IF(wpan_dev: beacon_req->wpan_dev); |
419 | |
420 | /* Wait an arbitrary amount of time in case we cannot use the device */ |
421 | if (local->suspended || !ieee802154_sdata_running(sdata)) { |
422 | rcu_read_unlock(); |
423 | queue_delayed_work(wq: local->mac_wq, dwork: &local->beacon_work, |
424 | delay: msecs_to_jiffies(m: 1000)); |
425 | return; |
426 | } |
427 | |
428 | wpan_dev = beacon_req->wpan_dev; |
429 | interval = beacon_req->interval; |
430 | |
431 | rcu_read_unlock(); |
432 | |
433 | dev_dbg(&sdata->dev->dev, "Sending beacon\n" ); |
434 | ret = mac802154_transmit_beacon(local, wpan_dev); |
435 | if (ret) |
436 | dev_err(&sdata->dev->dev, |
437 | "Beacon could not be transmitted (%d)\n" , ret); |
438 | |
439 | if (interval < IEEE802154_ACTIVE_SCAN_DURATION) |
440 | queue_delayed_work(wq: local->mac_wq, dwork: &local->beacon_work, |
441 | delay: local->beacon_interval); |
442 | } |
443 | |
444 | int mac802154_stop_beacons_locked(struct ieee802154_local *local, |
445 | struct ieee802154_sub_if_data *sdata) |
446 | { |
447 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
448 | struct cfg802154_beacon_request *request; |
449 | |
450 | ASSERT_RTNL(); |
451 | |
452 | if (!mac802154_is_beaconing(local)) |
453 | return -ESRCH; |
454 | |
455 | clear_bit(nr: IEEE802154_IS_BEACONING, addr: &local->ongoing); |
456 | cancel_delayed_work(dwork: &local->beacon_work); |
457 | request = rcu_replace_pointer(local->beacon_req, NULL, 1); |
458 | if (!request) |
459 | return 0; |
460 | kvfree_rcu_mightsleep(request); |
461 | |
462 | nl802154_beaconing_done(wpan_dev); |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | int mac802154_send_beacons_locked(struct ieee802154_sub_if_data *sdata, |
468 | struct cfg802154_beacon_request *request) |
469 | { |
470 | struct ieee802154_local *local = sdata->local; |
471 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
472 | |
473 | ASSERT_RTNL(); |
474 | |
475 | if (mac802154_is_beaconing(local)) |
476 | mac802154_stop_beacons_locked(local, sdata); |
477 | |
478 | /* Store beaconing parameters */ |
479 | rcu_assign_pointer(local->beacon_req, request); |
480 | |
481 | set_bit(nr: IEEE802154_IS_BEACONING, addr: &local->ongoing); |
482 | |
483 | memset(&local->beacon, 0, sizeof(local->beacon)); |
484 | local->beacon.mhr.fc.type = IEEE802154_FC_TYPE_BEACON; |
485 | local->beacon.mhr.fc.security_enabled = 0; |
486 | local->beacon.mhr.fc.frame_pending = 0; |
487 | local->beacon.mhr.fc.ack_request = 0; |
488 | local->beacon.mhr.fc.intra_pan = 0; |
489 | local->beacon.mhr.fc.dest_addr_mode = IEEE802154_NO_ADDRESSING; |
490 | local->beacon.mhr.fc.version = IEEE802154_2003_STD; |
491 | local->beacon.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; |
492 | atomic_set(v: &request->wpan_dev->bsn, i: -1); |
493 | local->beacon.mhr.source.mode = IEEE802154_ADDR_LONG; |
494 | local->beacon.mhr.source.pan_id = request->wpan_dev->pan_id; |
495 | local->beacon.mhr.source.extended_addr = request->wpan_dev->extended_addr; |
496 | local->beacon.mac_pl.beacon_order = request->interval; |
497 | if (request->interval <= IEEE802154_MAX_SCAN_DURATION) |
498 | local->beacon.mac_pl.superframe_order = request->interval; |
499 | local->beacon.mac_pl.final_cap_slot = 0xf; |
500 | local->beacon.mac_pl.battery_life_ext = 0; |
501 | local->beacon.mac_pl.pan_coordinator = !wpan_dev->parent; |
502 | local->beacon.mac_pl.assoc_permit = 1; |
503 | |
504 | if (request->interval == IEEE802154_ACTIVE_SCAN_DURATION) |
505 | return 0; |
506 | |
507 | /* Start the beacon work */ |
508 | local->beacon_interval = |
509 | mac802154_scan_get_channel_time(duration_order: request->interval, |
510 | symbol_duration: request->wpan_phy->symbol_duration); |
511 | queue_delayed_work(wq: local->mac_wq, dwork: &local->beacon_work, delay: 0); |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | int mac802154_perform_association(struct ieee802154_sub_if_data *sdata, |
517 | struct ieee802154_pan_device *coord, |
518 | __le16 *short_addr) |
519 | { |
520 | u64 ceaddr = swab64((__force u64)coord->extended_addr); |
521 | struct ieee802154_association_req_frame frame = {}; |
522 | struct ieee802154_local *local = sdata->local; |
523 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
524 | struct sk_buff *skb; |
525 | int ret; |
526 | |
527 | frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; |
528 | frame.mhr.fc.security_enabled = 0; |
529 | frame.mhr.fc.frame_pending = 0; |
530 | frame.mhr.fc.ack_request = 1; /* We always expect an ack here */ |
531 | frame.mhr.fc.intra_pan = 0; |
532 | frame.mhr.fc.dest_addr_mode = (coord->mode == IEEE802154_ADDR_LONG) ? |
533 | IEEE802154_EXTENDED_ADDRESSING : IEEE802154_SHORT_ADDRESSING; |
534 | frame.mhr.fc.version = IEEE802154_2003_STD; |
535 | frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; |
536 | frame.mhr.source.mode = IEEE802154_ADDR_LONG; |
537 | frame.mhr.source.pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST); |
538 | frame.mhr.source.extended_addr = wpan_dev->extended_addr; |
539 | frame.mhr.dest.mode = coord->mode; |
540 | frame.mhr.dest.pan_id = coord->pan_id; |
541 | if (coord->mode == IEEE802154_ADDR_LONG) |
542 | frame.mhr.dest.extended_addr = coord->extended_addr; |
543 | else |
544 | frame.mhr.dest.short_addr = coord->short_addr; |
545 | frame.mhr.seq = atomic_inc_return(v: &wpan_dev->dsn) & 0xFF; |
546 | frame.mac_pl.cmd_id = IEEE802154_CMD_ASSOCIATION_REQ; |
547 | frame.assoc_req_pl.device_type = 1; |
548 | frame.assoc_req_pl.power_source = 1; |
549 | frame.assoc_req_pl.rx_on_when_idle = 1; |
550 | frame.assoc_req_pl.alloc_addr = 1; |
551 | |
552 | skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(frame.assoc_req_pl), |
553 | GFP_KERNEL); |
554 | if (!skb) |
555 | return -ENOBUFS; |
556 | |
557 | skb->dev = sdata->dev; |
558 | |
559 | ret = ieee802154_mac_cmd_push(skb, frame: &frame, pl: &frame.assoc_req_pl, |
560 | pl_len: sizeof(frame.assoc_req_pl)); |
561 | if (ret) { |
562 | kfree_skb(skb); |
563 | return ret; |
564 | } |
565 | |
566 | local->assoc_dev = coord; |
567 | reinit_completion(x: &local->assoc_done); |
568 | set_bit(nr: IEEE802154_IS_ASSOCIATING, addr: &local->ongoing); |
569 | |
570 | ret = ieee802154_mlme_tx_one_locked(local, sdata, skb); |
571 | if (ret) { |
572 | if (ret > 0) |
573 | ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; |
574 | dev_warn(&sdata->dev->dev, |
575 | "No ASSOC REQ ACK received from %8phC\n" , &ceaddr); |
576 | goto clear_assoc; |
577 | } |
578 | |
579 | ret = wait_for_completion_killable_timeout(x: &local->assoc_done, timeout: 10 * HZ); |
580 | if (ret <= 0) { |
581 | dev_warn(&sdata->dev->dev, |
582 | "No ASSOC RESP received from %8phC\n" , &ceaddr); |
583 | ret = -ETIMEDOUT; |
584 | goto clear_assoc; |
585 | } |
586 | |
587 | if (local->assoc_status != IEEE802154_ASSOCIATION_SUCCESSFUL) { |
588 | if (local->assoc_status == IEEE802154_PAN_AT_CAPACITY) |
589 | ret = -ERANGE; |
590 | else |
591 | ret = -EPERM; |
592 | |
593 | dev_warn(&sdata->dev->dev, |
594 | "Negative ASSOC RESP received from %8phC: %s\n" , &ceaddr, |
595 | local->assoc_status == IEEE802154_PAN_AT_CAPACITY ? |
596 | "PAN at capacity" : "access denied" ); |
597 | } |
598 | |
599 | ret = 0; |
600 | *short_addr = local->assoc_addr; |
601 | |
602 | clear_assoc: |
603 | clear_bit(nr: IEEE802154_IS_ASSOCIATING, addr: &local->ongoing); |
604 | local->assoc_dev = NULL; |
605 | |
606 | return ret; |
607 | } |
608 | |
609 | int mac802154_process_association_resp(struct ieee802154_sub_if_data *sdata, |
610 | struct sk_buff *skb) |
611 | { |
612 | struct ieee802154_addr *src = &mac_cb(skb)->source; |
613 | struct ieee802154_addr *dest = &mac_cb(skb)->dest; |
614 | u64 deaddr = swab64((__force u64)dest->extended_addr); |
615 | struct ieee802154_local *local = sdata->local; |
616 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
617 | struct ieee802154_assoc_resp_pl resp_pl = {}; |
618 | |
619 | if (skb->len != sizeof(resp_pl)) |
620 | return -EINVAL; |
621 | |
622 | if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING || |
623 | dest->mode != IEEE802154_EXTENDED_ADDRESSING)) |
624 | return -EINVAL; |
625 | |
626 | if (unlikely(dest->extended_addr != wpan_dev->extended_addr || |
627 | src->extended_addr != local->assoc_dev->extended_addr)) |
628 | return -ENODEV; |
629 | |
630 | memcpy(&resp_pl, skb->data, sizeof(resp_pl)); |
631 | local->assoc_addr = resp_pl.short_addr; |
632 | local->assoc_status = resp_pl.status; |
633 | |
634 | dev_dbg(&skb->dev->dev, |
635 | "ASSOC RESP 0x%x received from %8phC, getting short address %04x\n" , |
636 | local->assoc_status, &deaddr, local->assoc_addr); |
637 | |
638 | complete(&local->assoc_done); |
639 | |
640 | return 0; |
641 | } |
642 | |
643 | int mac802154_send_disassociation_notif(struct ieee802154_sub_if_data *sdata, |
644 | struct ieee802154_pan_device *target, |
645 | u8 reason) |
646 | { |
647 | struct ieee802154_disassociation_notif_frame frame = {}; |
648 | u64 teaddr = swab64((__force u64)target->extended_addr); |
649 | struct ieee802154_local *local = sdata->local; |
650 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
651 | struct sk_buff *skb; |
652 | int ret; |
653 | |
654 | frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; |
655 | frame.mhr.fc.security_enabled = 0; |
656 | frame.mhr.fc.frame_pending = 0; |
657 | frame.mhr.fc.ack_request = 1; |
658 | frame.mhr.fc.intra_pan = 1; |
659 | frame.mhr.fc.dest_addr_mode = (target->mode == IEEE802154_ADDR_LONG) ? |
660 | IEEE802154_EXTENDED_ADDRESSING : IEEE802154_SHORT_ADDRESSING; |
661 | frame.mhr.fc.version = IEEE802154_2003_STD; |
662 | frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; |
663 | frame.mhr.source.mode = IEEE802154_ADDR_LONG; |
664 | frame.mhr.source.pan_id = wpan_dev->pan_id; |
665 | frame.mhr.source.extended_addr = wpan_dev->extended_addr; |
666 | frame.mhr.dest.mode = target->mode; |
667 | frame.mhr.dest.pan_id = wpan_dev->pan_id; |
668 | if (target->mode == IEEE802154_ADDR_LONG) |
669 | frame.mhr.dest.extended_addr = target->extended_addr; |
670 | else |
671 | frame.mhr.dest.short_addr = target->short_addr; |
672 | frame.mhr.seq = atomic_inc_return(v: &wpan_dev->dsn) & 0xFF; |
673 | frame.mac_pl.cmd_id = IEEE802154_CMD_DISASSOCIATION_NOTIFY; |
674 | frame.disassoc_pl = reason; |
675 | |
676 | skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(frame.disassoc_pl), |
677 | GFP_KERNEL); |
678 | if (!skb) |
679 | return -ENOBUFS; |
680 | |
681 | skb->dev = sdata->dev; |
682 | |
683 | ret = ieee802154_mac_cmd_push(skb, frame: &frame, pl: &frame.disassoc_pl, |
684 | pl_len: sizeof(frame.disassoc_pl)); |
685 | if (ret) { |
686 | kfree_skb(skb); |
687 | return ret; |
688 | } |
689 | |
690 | ret = ieee802154_mlme_tx_one_locked(local, sdata, skb); |
691 | if (ret) { |
692 | dev_warn(&sdata->dev->dev, |
693 | "No DISASSOC ACK received from %8phC\n" , &teaddr); |
694 | if (ret > 0) |
695 | ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; |
696 | return ret; |
697 | } |
698 | |
699 | dev_dbg(&sdata->dev->dev, "DISASSOC ACK received from %8phC\n" , &teaddr); |
700 | return 0; |
701 | } |
702 | |
703 | static int |
704 | mac802154_send_association_resp_locked(struct ieee802154_sub_if_data *sdata, |
705 | struct ieee802154_pan_device *target, |
706 | struct ieee802154_assoc_resp_pl *assoc_resp_pl) |
707 | { |
708 | u64 teaddr = swab64((__force u64)target->extended_addr); |
709 | struct ieee802154_association_resp_frame frame = {}; |
710 | struct ieee802154_local *local = sdata->local; |
711 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
712 | struct sk_buff *skb; |
713 | int ret; |
714 | |
715 | frame.mhr.fc.type = IEEE802154_FC_TYPE_MAC_CMD; |
716 | frame.mhr.fc.security_enabled = 0; |
717 | frame.mhr.fc.frame_pending = 0; |
718 | frame.mhr.fc.ack_request = 1; /* We always expect an ack here */ |
719 | frame.mhr.fc.intra_pan = 1; |
720 | frame.mhr.fc.dest_addr_mode = IEEE802154_EXTENDED_ADDRESSING; |
721 | frame.mhr.fc.version = IEEE802154_2003_STD; |
722 | frame.mhr.fc.source_addr_mode = IEEE802154_EXTENDED_ADDRESSING; |
723 | frame.mhr.source.mode = IEEE802154_ADDR_LONG; |
724 | frame.mhr.source.extended_addr = wpan_dev->extended_addr; |
725 | frame.mhr.dest.mode = IEEE802154_ADDR_LONG; |
726 | frame.mhr.dest.pan_id = wpan_dev->pan_id; |
727 | frame.mhr.dest.extended_addr = target->extended_addr; |
728 | frame.mhr.seq = atomic_inc_return(v: &wpan_dev->dsn) & 0xFF; |
729 | frame.mac_pl.cmd_id = IEEE802154_CMD_ASSOCIATION_RESP; |
730 | |
731 | skb = alloc_skb(IEEE802154_MAC_CMD_SKB_SZ + sizeof(*assoc_resp_pl), |
732 | GFP_KERNEL); |
733 | if (!skb) |
734 | return -ENOBUFS; |
735 | |
736 | skb->dev = sdata->dev; |
737 | |
738 | ret = ieee802154_mac_cmd_push(skb, frame: &frame, pl: assoc_resp_pl, |
739 | pl_len: sizeof(*assoc_resp_pl)); |
740 | if (ret) { |
741 | kfree_skb(skb); |
742 | return ret; |
743 | } |
744 | |
745 | ret = ieee802154_mlme_tx_locked(local, sdata, skb); |
746 | if (ret) { |
747 | dev_warn(&sdata->dev->dev, |
748 | "No ASSOC RESP ACK received from %8phC\n" , &teaddr); |
749 | if (ret > 0) |
750 | ret = (ret == IEEE802154_NO_ACK) ? -EREMOTEIO : -EIO; |
751 | return ret; |
752 | } |
753 | |
754 | return 0; |
755 | } |
756 | |
757 | int mac802154_process_association_req(struct ieee802154_sub_if_data *sdata, |
758 | struct sk_buff *skb) |
759 | { |
760 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
761 | struct ieee802154_addr *src = &mac_cb(skb)->source; |
762 | struct ieee802154_addr *dest = &mac_cb(skb)->dest; |
763 | struct ieee802154_assoc_resp_pl assoc_resp_pl = {}; |
764 | struct ieee802154_assoc_req_pl assoc_req_pl; |
765 | struct ieee802154_pan_device *child, *exchild; |
766 | struct ieee802154_addr tmp = {}; |
767 | u64 ceaddr; |
768 | int ret; |
769 | |
770 | if (skb->len != sizeof(assoc_req_pl)) |
771 | return -EINVAL; |
772 | |
773 | if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING)) |
774 | return -EINVAL; |
775 | |
776 | if (unlikely(dest->pan_id != wpan_dev->pan_id)) |
777 | return -ENODEV; |
778 | |
779 | if (dest->mode == IEEE802154_EXTENDED_ADDRESSING && |
780 | unlikely(dest->extended_addr != wpan_dev->extended_addr)) |
781 | return -ENODEV; |
782 | else if (dest->mode == IEEE802154_SHORT_ADDRESSING && |
783 | unlikely(dest->short_addr != wpan_dev->short_addr)) |
784 | return -ENODEV; |
785 | |
786 | if (wpan_dev->parent) { |
787 | dev_dbg(&sdata->dev->dev, |
788 | "Ignoring ASSOC REQ, not the PAN coordinator\n" ); |
789 | return -ENODEV; |
790 | } |
791 | |
792 | mutex_lock(&wpan_dev->association_lock); |
793 | |
794 | memcpy(&assoc_req_pl, skb->data, sizeof(assoc_req_pl)); |
795 | if (assoc_req_pl.assoc_type) { |
796 | dev_err(&skb->dev->dev, "Fast associations not supported yet\n" ); |
797 | ret = -EOPNOTSUPP; |
798 | goto unlock; |
799 | } |
800 | |
801 | child = kzalloc(sizeof(*child), GFP_KERNEL); |
802 | if (!child) { |
803 | ret = -ENOMEM; |
804 | goto unlock; |
805 | } |
806 | |
807 | child->extended_addr = src->extended_addr; |
808 | child->mode = IEEE802154_EXTENDED_ADDRESSING; |
809 | ceaddr = swab64((__force u64)child->extended_addr); |
810 | |
811 | if (wpan_dev->nchildren >= wpan_dev->max_associations) { |
812 | if (!wpan_dev->max_associations) |
813 | assoc_resp_pl.status = IEEE802154_PAN_ACCESS_DENIED; |
814 | else |
815 | assoc_resp_pl.status = IEEE802154_PAN_AT_CAPACITY; |
816 | assoc_resp_pl.short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_BROADCAST); |
817 | dev_dbg(&sdata->dev->dev, |
818 | "Refusing ASSOC REQ from child %8phC, %s\n" , &ceaddr, |
819 | assoc_resp_pl.status == IEEE802154_PAN_ACCESS_DENIED ? |
820 | "access denied" : "too many children" ); |
821 | } else { |
822 | assoc_resp_pl.status = IEEE802154_ASSOCIATION_SUCCESSFUL; |
823 | if (assoc_req_pl.alloc_addr) { |
824 | assoc_resp_pl.short_addr = cfg802154_get_free_short_addr(wpan_dev); |
825 | child->mode = IEEE802154_SHORT_ADDRESSING; |
826 | } else { |
827 | assoc_resp_pl.short_addr = cpu_to_le16(IEEE802154_ADDR_SHORT_UNSPEC); |
828 | } |
829 | child->short_addr = assoc_resp_pl.short_addr; |
830 | dev_dbg(&sdata->dev->dev, |
831 | "Accepting ASSOC REQ from child %8phC, providing short address 0x%04x\n" , |
832 | &ceaddr, le16_to_cpu(child->short_addr)); |
833 | } |
834 | |
835 | ret = mac802154_send_association_resp_locked(sdata, target: child, assoc_resp_pl: &assoc_resp_pl); |
836 | if (ret || assoc_resp_pl.status != IEEE802154_ASSOCIATION_SUCCESSFUL) { |
837 | kfree(objp: child); |
838 | goto unlock; |
839 | } |
840 | |
841 | dev_dbg(&sdata->dev->dev, |
842 | "Successful association with new child %8phC\n" , &ceaddr); |
843 | |
844 | /* Ensure this child is not already associated (might happen due to |
845 | * retransmissions), in this case drop the ex structure. |
846 | */ |
847 | tmp.mode = child->mode; |
848 | tmp.extended_addr = child->extended_addr; |
849 | exchild = cfg802154_device_is_child(wpan_dev, target: &tmp); |
850 | if (exchild) { |
851 | dev_dbg(&sdata->dev->dev, |
852 | "Child %8phC was already known\n" , &ceaddr); |
853 | list_del(entry: &exchild->node); |
854 | } |
855 | |
856 | list_add(new: &child->node, head: &wpan_dev->children); |
857 | wpan_dev->nchildren++; |
858 | |
859 | unlock: |
860 | mutex_unlock(lock: &wpan_dev->association_lock); |
861 | return ret; |
862 | } |
863 | |
864 | int mac802154_process_disassociation_notif(struct ieee802154_sub_if_data *sdata, |
865 | struct sk_buff *skb) |
866 | { |
867 | struct ieee802154_addr *src = &mac_cb(skb)->source; |
868 | struct ieee802154_addr *dest = &mac_cb(skb)->dest; |
869 | struct wpan_dev *wpan_dev = &sdata->wpan_dev; |
870 | struct ieee802154_pan_device *child; |
871 | struct ieee802154_addr target; |
872 | bool parent; |
873 | u64 teaddr; |
874 | |
875 | if (skb->len != sizeof(u8)) |
876 | return -EINVAL; |
877 | |
878 | if (unlikely(src->mode != IEEE802154_EXTENDED_ADDRESSING)) |
879 | return -EINVAL; |
880 | |
881 | if (dest->mode == IEEE802154_EXTENDED_ADDRESSING && |
882 | unlikely(dest->extended_addr != wpan_dev->extended_addr)) |
883 | return -ENODEV; |
884 | else if (dest->mode == IEEE802154_SHORT_ADDRESSING && |
885 | unlikely(dest->short_addr != wpan_dev->short_addr)) |
886 | return -ENODEV; |
887 | |
888 | if (dest->pan_id != wpan_dev->pan_id) |
889 | return -ENODEV; |
890 | |
891 | target.mode = IEEE802154_EXTENDED_ADDRESSING; |
892 | target.extended_addr = src->extended_addr; |
893 | teaddr = swab64((__force u64)target.extended_addr); |
894 | dev_dbg(&skb->dev->dev, "Processing DISASSOC NOTIF from %8phC\n" , &teaddr); |
895 | |
896 | mutex_lock(&wpan_dev->association_lock); |
897 | parent = cfg802154_device_is_parent(wpan_dev, target: &target); |
898 | if (!parent) |
899 | child = cfg802154_device_is_child(wpan_dev, target: &target); |
900 | if (!parent && !child) { |
901 | mutex_unlock(lock: &wpan_dev->association_lock); |
902 | return -EINVAL; |
903 | } |
904 | |
905 | if (parent) { |
906 | kfree(objp: wpan_dev->parent); |
907 | wpan_dev->parent = NULL; |
908 | } else { |
909 | list_del(entry: &child->node); |
910 | kfree(objp: child); |
911 | wpan_dev->nchildren--; |
912 | } |
913 | |
914 | mutex_unlock(lock: &wpan_dev->association_lock); |
915 | |
916 | return 0; |
917 | } |
918 | |