1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/* af_can.c - Protocol family CAN core module
3 * (used by different CAN protocol modules)
4 *
5 * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of Volkswagen nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * Alternatively, provided that this notice is retained in full, this
21 * software may be distributed under the terms of the GNU General
22 * Public License ("GPL") version 2, in which case the provisions of the
23 * GPL apply INSTEAD OF those given above.
24 *
25 * The provided data structures and external interfaces from this code
26 * are not restricted to be used by modules with a GPL compatible license.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/stddef.h>
45#include <linux/init.h>
46#include <linux/kmod.h>
47#include <linux/slab.h>
48#include <linux/list.h>
49#include <linux/spinlock.h>
50#include <linux/rcupdate.h>
51#include <linux/uaccess.h>
52#include <linux/net.h>
53#include <linux/netdevice.h>
54#include <linux/socket.h>
55#include <linux/if_ether.h>
56#include <linux/if_arp.h>
57#include <linux/skbuff.h>
58#include <linux/can.h>
59#include <linux/can/core.h>
60#include <linux/can/skb.h>
61#include <linux/can/can-ml.h>
62#include <linux/ratelimit.h>
63#include <net/net_namespace.h>
64#include <net/sock.h>
65
66#include "af_can.h"
67
68MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
71 "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
72
73MODULE_ALIAS_NETPROTO(PF_CAN);
74
75static int stats_timer __read_mostly = 1;
76module_param(stats_timer, int, 0444);
77MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
78
79static struct kmem_cache *rcv_cache __read_mostly;
80
81/* table of registered CAN protocols */
82static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
83static DEFINE_MUTEX(proto_tab_lock);
84
85static atomic_t skbcounter = ATOMIC_INIT(0);
86
87/* af_can socket functions */
88
89void can_sock_destruct(struct sock *sk)
90{
91 skb_queue_purge(list: &sk->sk_receive_queue);
92 skb_queue_purge(list: &sk->sk_error_queue);
93}
94EXPORT_SYMBOL(can_sock_destruct);
95
96static const struct can_proto *can_get_proto(int protocol)
97{
98 const struct can_proto *cp;
99
100 rcu_read_lock();
101 cp = rcu_dereference(proto_tab[protocol]);
102 if (cp && !try_module_get(module: cp->prot->owner))
103 cp = NULL;
104 rcu_read_unlock();
105
106 return cp;
107}
108
109static inline void can_put_proto(const struct can_proto *cp)
110{
111 module_put(module: cp->prot->owner);
112}
113
114static int can_create(struct net *net, struct socket *sock, int protocol,
115 int kern)
116{
117 struct sock *sk;
118 const struct can_proto *cp;
119 int err = 0;
120
121 sock->state = SS_UNCONNECTED;
122
123 if (protocol < 0 || protocol >= CAN_NPROTO)
124 return -EINVAL;
125
126 cp = can_get_proto(protocol);
127
128#ifdef CONFIG_MODULES
129 if (!cp) {
130 /* try to load protocol module if kernel is modular */
131
132 err = request_module("can-proto-%d", protocol);
133
134 /* In case of error we only print a message but don't
135 * return the error code immediately. Below we will
136 * return -EPROTONOSUPPORT
137 */
138 if (err)
139 pr_err_ratelimited("can: request_module (can-proto-%d) failed.\n",
140 protocol);
141
142 cp = can_get_proto(protocol);
143 }
144#endif
145
146 /* check for available protocol and correct usage */
147
148 if (!cp)
149 return -EPROTONOSUPPORT;
150
151 if (cp->type != sock->type) {
152 err = -EPROTOTYPE;
153 goto errout;
154 }
155
156 sock->ops = cp->ops;
157
158 sk = sk_alloc(net, PF_CAN, GFP_KERNEL, prot: cp->prot, kern);
159 if (!sk) {
160 err = -ENOMEM;
161 goto errout;
162 }
163
164 sock_init_data(sock, sk);
165 sk->sk_destruct = can_sock_destruct;
166
167 if (sk->sk_prot->init)
168 err = sk->sk_prot->init(sk);
169
170 if (err) {
171 /* release sk on errors */
172 sock_orphan(sk);
173 sock_put(sk);
174 sock->sk = NULL;
175 } else {
176 sock_prot_inuse_add(net, prot: sk->sk_prot, val: 1);
177 }
178
179 errout:
180 can_put_proto(cp);
181 return err;
182}
183
184/* af_can tx path */
185
186/**
187 * can_send - transmit a CAN frame (optional with local loopback)
188 * @skb: pointer to socket buffer with CAN frame in data section
189 * @loop: loopback for listeners on local CAN sockets (recommended default!)
190 *
191 * Due to the loopback this routine must not be called from hardirq context.
192 *
193 * Return:
194 * 0 on success
195 * -ENETDOWN when the selected interface is down
196 * -ENOBUFS on full driver queue (see net_xmit_errno())
197 * -ENOMEM when local loopback failed at calling skb_clone()
198 * -EPERM when trying to send on a non-CAN interface
199 * -EMSGSIZE CAN frame size is bigger than CAN interface MTU
200 * -EINVAL when the skb->data does not contain a valid CAN frame
201 */
202int can_send(struct sk_buff *skb, int loop)
203{
204 struct sk_buff *newskb = NULL;
205 struct can_pkg_stats *pkg_stats = dev_net(dev: skb->dev)->can.pkg_stats;
206 int err = -EINVAL;
207
208 if (can_is_canxl_skb(skb)) {
209 skb->protocol = htons(ETH_P_CANXL);
210 } else if (can_is_can_skb(skb)) {
211 skb->protocol = htons(ETH_P_CAN);
212 } else if (can_is_canfd_skb(skb)) {
213 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
214
215 skb->protocol = htons(ETH_P_CANFD);
216
217 /* set CAN FD flag for CAN FD frames by default */
218 cfd->flags |= CANFD_FDF;
219 } else {
220 goto inval_skb;
221 }
222
223 /* Make sure the CAN frame can pass the selected CAN netdevice. */
224 if (unlikely(skb->len > skb->dev->mtu)) {
225 err = -EMSGSIZE;
226 goto inval_skb;
227 }
228
229 if (unlikely(skb->dev->type != ARPHRD_CAN)) {
230 err = -EPERM;
231 goto inval_skb;
232 }
233
234 if (unlikely(!(skb->dev->flags & IFF_UP))) {
235 err = -ENETDOWN;
236 goto inval_skb;
237 }
238
239 skb->ip_summed = CHECKSUM_UNNECESSARY;
240
241 skb_reset_mac_header(skb);
242 skb_reset_network_header(skb);
243 skb_reset_transport_header(skb);
244
245 if (loop) {
246 /* local loopback of sent CAN frames */
247
248 /* indication for the CAN driver: do loopback */
249 skb->pkt_type = PACKET_LOOPBACK;
250
251 /* The reference to the originating sock may be required
252 * by the receiving socket to check whether the frame is
253 * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
254 * Therefore we have to ensure that skb->sk remains the
255 * reference to the originating sock by restoring skb->sk
256 * after each skb_clone() or skb_orphan() usage.
257 */
258
259 if (!(skb->dev->flags & IFF_ECHO)) {
260 /* If the interface is not capable to do loopback
261 * itself, we do it here.
262 */
263 newskb = skb_clone(skb, GFP_ATOMIC);
264 if (!newskb) {
265 kfree_skb(skb);
266 return -ENOMEM;
267 }
268
269 can_skb_set_owner(skb: newskb, sk: skb->sk);
270 newskb->ip_summed = CHECKSUM_UNNECESSARY;
271 newskb->pkt_type = PACKET_BROADCAST;
272 }
273 } else {
274 /* indication for the CAN driver: no loopback required */
275 skb->pkt_type = PACKET_HOST;
276 }
277
278 /* send to netdevice */
279 err = dev_queue_xmit(skb);
280 if (err > 0)
281 err = net_xmit_errno(err);
282
283 if (err) {
284 kfree_skb(skb: newskb);
285 return err;
286 }
287
288 if (newskb)
289 netif_rx(skb: newskb);
290
291 /* update statistics */
292 atomic_long_inc(v: &pkg_stats->tx_frames);
293 atomic_long_inc(v: &pkg_stats->tx_frames_delta);
294
295 return 0;
296
297inval_skb:
298 kfree_skb(skb);
299 return err;
300}
301EXPORT_SYMBOL(can_send);
302
303/* af_can rx path */
304
305static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
306 struct net_device *dev)
307{
308 if (dev) {
309 struct can_ml_priv *can_ml = can_get_ml_priv(dev);
310 return &can_ml->dev_rcv_lists;
311 } else {
312 return net->can.rx_alldev_list;
313 }
314}
315
316/**
317 * effhash - hash function for 29 bit CAN identifier reduction
318 * @can_id: 29 bit CAN identifier
319 *
320 * Description:
321 * To reduce the linear traversal in one linked list of _single_ EFF CAN
322 * frame subscriptions the 29 bit identifier is mapped to 10 bits.
323 * (see CAN_EFF_RCV_HASH_BITS definition)
324 *
325 * Return:
326 * Hash value from 0x000 - 0x3FF ( enforced by CAN_EFF_RCV_HASH_BITS mask )
327 */
328static unsigned int effhash(canid_t can_id)
329{
330 unsigned int hash;
331
332 hash = can_id;
333 hash ^= can_id >> CAN_EFF_RCV_HASH_BITS;
334 hash ^= can_id >> (2 * CAN_EFF_RCV_HASH_BITS);
335
336 return hash & ((1 << CAN_EFF_RCV_HASH_BITS) - 1);
337}
338
339/**
340 * can_rcv_list_find - determine optimal filterlist inside device filter struct
341 * @can_id: pointer to CAN identifier of a given can_filter
342 * @mask: pointer to CAN mask of a given can_filter
343 * @dev_rcv_lists: pointer to the device filter struct
344 *
345 * Description:
346 * Returns the optimal filterlist to reduce the filter handling in the
347 * receive path. This function is called by service functions that need
348 * to register or unregister a can_filter in the filter lists.
349 *
350 * A filter matches in general, when
351 *
352 * <received_can_id> & mask == can_id & mask
353 *
354 * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
355 * relevant bits for the filter.
356 *
357 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
358 * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
359 * frames there is a special filterlist and a special rx path filter handling.
360 *
361 * Return:
362 * Pointer to optimal filterlist for the given can_id/mask pair.
363 * Consistency checked mask.
364 * Reduced can_id to have a preprocessed filter compare value.
365 */
366static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
367 struct can_dev_rcv_lists *dev_rcv_lists)
368{
369 canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
370
371 /* filter for error message frames in extra filterlist */
372 if (*mask & CAN_ERR_FLAG) {
373 /* clear CAN_ERR_FLAG in filter entry */
374 *mask &= CAN_ERR_MASK;
375 return &dev_rcv_lists->rx[RX_ERR];
376 }
377
378 /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
379
380#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
381
382 /* ensure valid values in can_mask for 'SFF only' frame filtering */
383 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
384 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
385
386 /* reduce condition testing at receive time */
387 *can_id &= *mask;
388
389 /* inverse can_id/can_mask filter */
390 if (inv)
391 return &dev_rcv_lists->rx[RX_INV];
392
393 /* mask == 0 => no condition testing at receive time */
394 if (!(*mask))
395 return &dev_rcv_lists->rx[RX_ALL];
396
397 /* extra filterlists for the subscription of a single non-RTR can_id */
398 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
399 !(*can_id & CAN_RTR_FLAG)) {
400 if (*can_id & CAN_EFF_FLAG) {
401 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS))
402 return &dev_rcv_lists->rx_eff[effhash(can_id: *can_id)];
403 } else {
404 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
405 return &dev_rcv_lists->rx_sff[*can_id];
406 }
407 }
408
409 /* default: filter via can_id/can_mask */
410 return &dev_rcv_lists->rx[RX_FIL];
411}
412
413/**
414 * can_rx_register - subscribe CAN frames from a specific interface
415 * @net: the applicable net namespace
416 * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
417 * @can_id: CAN identifier (see description)
418 * @mask: CAN mask (see description)
419 * @func: callback function on filter match
420 * @data: returned parameter for callback function
421 * @ident: string for calling module identification
422 * @sk: socket pointer (might be NULL)
423 *
424 * Description:
425 * Invokes the callback function with the received sk_buff and the given
426 * parameter 'data' on a matching receive filter. A filter matches, when
427 *
428 * <received_can_id> & mask == can_id & mask
429 *
430 * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
431 * filter for error message frames (CAN_ERR_FLAG bit set in mask).
432 *
433 * The provided pointer to the sk_buff is guaranteed to be valid as long as
434 * the callback function is running. The callback function must *not* free
435 * the given sk_buff while processing it's task. When the given sk_buff is
436 * needed after the end of the callback function it must be cloned inside
437 * the callback function with skb_clone().
438 *
439 * Return:
440 * 0 on success
441 * -ENOMEM on missing cache mem to create subscription entry
442 * -ENODEV unknown device
443 */
444int can_rx_register(struct net *net, struct net_device *dev, canid_t can_id,
445 canid_t mask, void (*func)(struct sk_buff *, void *),
446 void *data, char *ident, struct sock *sk)
447{
448 struct receiver *rcv;
449 struct hlist_head *rcv_list;
450 struct can_dev_rcv_lists *dev_rcv_lists;
451 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
452
453 /* insert new receiver (dev,canid,mask) -> (func,data) */
454
455 if (dev && (dev->type != ARPHRD_CAN || !can_get_ml_priv(dev)))
456 return -ENODEV;
457
458 if (dev && !net_eq(net1: net, net2: dev_net(dev)))
459 return -ENODEV;
460
461 rcv = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
462 if (!rcv)
463 return -ENOMEM;
464
465 spin_lock_bh(lock: &net->can.rcvlists_lock);
466
467 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
468 rcv_list = can_rcv_list_find(can_id: &can_id, mask: &mask, dev_rcv_lists);
469
470 rcv->can_id = can_id;
471 rcv->mask = mask;
472 rcv->matches = 0;
473 rcv->func = func;
474 rcv->data = data;
475 rcv->ident = ident;
476 rcv->sk = sk;
477
478 hlist_add_head_rcu(n: &rcv->list, h: rcv_list);
479 dev_rcv_lists->entries++;
480
481 rcv_lists_stats->rcv_entries++;
482 rcv_lists_stats->rcv_entries_max = max(rcv_lists_stats->rcv_entries_max,
483 rcv_lists_stats->rcv_entries);
484 spin_unlock_bh(lock: &net->can.rcvlists_lock);
485
486 return 0;
487}
488EXPORT_SYMBOL(can_rx_register);
489
490/* can_rx_delete_receiver - rcu callback for single receiver entry removal */
491static void can_rx_delete_receiver(struct rcu_head *rp)
492{
493 struct receiver *rcv = container_of(rp, struct receiver, rcu);
494 struct sock *sk = rcv->sk;
495
496 kmem_cache_free(s: rcv_cache, objp: rcv);
497 if (sk)
498 sock_put(sk);
499}
500
501/**
502 * can_rx_unregister - unsubscribe CAN frames from a specific interface
503 * @net: the applicable net namespace
504 * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
505 * @can_id: CAN identifier
506 * @mask: CAN mask
507 * @func: callback function on filter match
508 * @data: returned parameter for callback function
509 *
510 * Description:
511 * Removes subscription entry depending on given (subscription) values.
512 */
513void can_rx_unregister(struct net *net, struct net_device *dev, canid_t can_id,
514 canid_t mask, void (*func)(struct sk_buff *, void *),
515 void *data)
516{
517 struct receiver *rcv = NULL;
518 struct hlist_head *rcv_list;
519 struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats;
520 struct can_dev_rcv_lists *dev_rcv_lists;
521
522 if (dev && dev->type != ARPHRD_CAN)
523 return;
524
525 if (dev && !net_eq(net1: net, net2: dev_net(dev)))
526 return;
527
528 spin_lock_bh(lock: &net->can.rcvlists_lock);
529
530 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
531 rcv_list = can_rcv_list_find(can_id: &can_id, mask: &mask, dev_rcv_lists);
532
533 /* Search the receiver list for the item to delete. This should
534 * exist, since no receiver may be unregistered that hasn't
535 * been registered before.
536 */
537 hlist_for_each_entry_rcu(rcv, rcv_list, list) {
538 if (rcv->can_id == can_id && rcv->mask == mask &&
539 rcv->func == func && rcv->data == data)
540 break;
541 }
542
543 /* Check for bugs in CAN protocol implementations using af_can.c:
544 * 'rcv' will be NULL if no matching list item was found for removal.
545 * As this case may potentially happen when closing a socket while
546 * the notifier for removing the CAN netdev is running we just print
547 * a warning here.
548 */
549 if (!rcv) {
550 pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
551 DNAME(dev), can_id, mask);
552 goto out;
553 }
554
555 hlist_del_rcu(n: &rcv->list);
556 dev_rcv_lists->entries--;
557
558 if (rcv_lists_stats->rcv_entries > 0)
559 rcv_lists_stats->rcv_entries--;
560
561 out:
562 spin_unlock_bh(lock: &net->can.rcvlists_lock);
563
564 /* schedule the receiver item for deletion */
565 if (rcv) {
566 if (rcv->sk)
567 sock_hold(sk: rcv->sk);
568 call_rcu(head: &rcv->rcu, func: can_rx_delete_receiver);
569 }
570}
571EXPORT_SYMBOL(can_rx_unregister);
572
573static inline void deliver(struct sk_buff *skb, struct receiver *rcv)
574{
575 rcv->func(skb, rcv->data);
576 rcv->matches++;
577}
578
579static int can_rcv_filter(struct can_dev_rcv_lists *dev_rcv_lists, struct sk_buff *skb)
580{
581 struct receiver *rcv;
582 int matches = 0;
583 struct can_frame *cf = (struct can_frame *)skb->data;
584 canid_t can_id = cf->can_id;
585
586 if (dev_rcv_lists->entries == 0)
587 return 0;
588
589 if (can_id & CAN_ERR_FLAG) {
590 /* check for error message frame entries only */
591 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ERR], list) {
592 if (can_id & rcv->mask) {
593 deliver(skb, rcv);
594 matches++;
595 }
596 }
597 return matches;
598 }
599
600 /* check for unfiltered entries */
601 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_ALL], list) {
602 deliver(skb, rcv);
603 matches++;
604 }
605
606 /* check for can_id/mask entries */
607 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_FIL], list) {
608 if ((can_id & rcv->mask) == rcv->can_id) {
609 deliver(skb, rcv);
610 matches++;
611 }
612 }
613
614 /* check for inverted can_id/mask entries */
615 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx[RX_INV], list) {
616 if ((can_id & rcv->mask) != rcv->can_id) {
617 deliver(skb, rcv);
618 matches++;
619 }
620 }
621
622 /* check filterlists for single non-RTR can_ids */
623 if (can_id & CAN_RTR_FLAG)
624 return matches;
625
626 if (can_id & CAN_EFF_FLAG) {
627 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_eff[effhash(can_id)], list) {
628 if (rcv->can_id == can_id) {
629 deliver(skb, rcv);
630 matches++;
631 }
632 }
633 } else {
634 can_id &= CAN_SFF_MASK;
635 hlist_for_each_entry_rcu(rcv, &dev_rcv_lists->rx_sff[can_id], list) {
636 deliver(skb, rcv);
637 matches++;
638 }
639 }
640
641 return matches;
642}
643
644static void can_receive(struct sk_buff *skb, struct net_device *dev)
645{
646 struct can_dev_rcv_lists *dev_rcv_lists;
647 struct net *net = dev_net(dev);
648 struct can_pkg_stats *pkg_stats = net->can.pkg_stats;
649 int matches;
650
651 /* update statistics */
652 atomic_long_inc(v: &pkg_stats->rx_frames);
653 atomic_long_inc(v: &pkg_stats->rx_frames_delta);
654
655 /* create non-zero unique skb identifier together with *skb */
656 while (!(can_skb_prv(skb)->skbcnt))
657 can_skb_prv(skb)->skbcnt = atomic_inc_return(v: &skbcounter);
658
659 rcu_read_lock();
660
661 /* deliver the packet to sockets listening on all devices */
662 matches = can_rcv_filter(dev_rcv_lists: net->can.rx_alldev_list, skb);
663
664 /* find receive list for this device */
665 dev_rcv_lists = can_dev_rcv_lists_find(net, dev);
666 matches += can_rcv_filter(dev_rcv_lists, skb);
667
668 rcu_read_unlock();
669
670 /* consume the skbuff allocated by the netdevice driver */
671 consume_skb(skb);
672
673 if (matches > 0) {
674 atomic_long_inc(v: &pkg_stats->matches);
675 atomic_long_inc(v: &pkg_stats->matches_delta);
676 }
677}
678
679static int can_rcv(struct sk_buff *skb, struct net_device *dev,
680 struct packet_type *pt, struct net_device *orig_dev)
681{
682 if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_can_skb(skb))) {
683 pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
684 dev->type, skb->len);
685
686 kfree_skb(skb);
687 return NET_RX_DROP;
688 }
689
690 can_receive(skb, dev);
691 return NET_RX_SUCCESS;
692}
693
694static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
695 struct packet_type *pt, struct net_device *orig_dev)
696{
697 if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canfd_skb(skb))) {
698 pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
699 dev->type, skb->len);
700
701 kfree_skb(skb);
702 return NET_RX_DROP;
703 }
704
705 can_receive(skb, dev);
706 return NET_RX_SUCCESS;
707}
708
709static int canxl_rcv(struct sk_buff *skb, struct net_device *dev,
710 struct packet_type *pt, struct net_device *orig_dev)
711{
712 if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || !can_is_canxl_skb(skb))) {
713 pr_warn_once("PF_CAN: dropped non conform CAN XL skbuff: dev type %d, len %d\n",
714 dev->type, skb->len);
715
716 kfree_skb(skb);
717 return NET_RX_DROP;
718 }
719
720 can_receive(skb, dev);
721 return NET_RX_SUCCESS;
722}
723
724/* af_can protocol functions */
725
726/**
727 * can_proto_register - register CAN transport protocol
728 * @cp: pointer to CAN protocol structure
729 *
730 * Return:
731 * 0 on success
732 * -EINVAL invalid (out of range) protocol number
733 * -EBUSY protocol already in use
734 * -ENOBUF if proto_register() fails
735 */
736int can_proto_register(const struct can_proto *cp)
737{
738 int proto = cp->protocol;
739 int err = 0;
740
741 if (proto < 0 || proto >= CAN_NPROTO) {
742 pr_err("can: protocol number %d out of range\n", proto);
743 return -EINVAL;
744 }
745
746 err = proto_register(prot: cp->prot, alloc_slab: 0);
747 if (err < 0)
748 return err;
749
750 mutex_lock(&proto_tab_lock);
751
752 if (rcu_access_pointer(proto_tab[proto])) {
753 pr_err("can: protocol %d already registered\n", proto);
754 err = -EBUSY;
755 } else {
756 RCU_INIT_POINTER(proto_tab[proto], cp);
757 }
758
759 mutex_unlock(lock: &proto_tab_lock);
760
761 if (err < 0)
762 proto_unregister(prot: cp->prot);
763
764 return err;
765}
766EXPORT_SYMBOL(can_proto_register);
767
768/**
769 * can_proto_unregister - unregister CAN transport protocol
770 * @cp: pointer to CAN protocol structure
771 */
772void can_proto_unregister(const struct can_proto *cp)
773{
774 int proto = cp->protocol;
775
776 mutex_lock(&proto_tab_lock);
777 BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
778 RCU_INIT_POINTER(proto_tab[proto], NULL);
779 mutex_unlock(lock: &proto_tab_lock);
780
781 synchronize_rcu();
782
783 proto_unregister(prot: cp->prot);
784}
785EXPORT_SYMBOL(can_proto_unregister);
786
787static int can_pernet_init(struct net *net)
788{
789 spin_lock_init(&net->can.rcvlists_lock);
790 net->can.rx_alldev_list =
791 kzalloc(sizeof(*net->can.rx_alldev_list), GFP_KERNEL);
792 if (!net->can.rx_alldev_list)
793 goto out;
794 net->can.pkg_stats = kzalloc(sizeof(*net->can.pkg_stats), GFP_KERNEL);
795 if (!net->can.pkg_stats)
796 goto out_free_rx_alldev_list;
797 net->can.rcv_lists_stats = kzalloc(sizeof(*net->can.rcv_lists_stats), GFP_KERNEL);
798 if (!net->can.rcv_lists_stats)
799 goto out_free_pkg_stats;
800
801 if (IS_ENABLED(CONFIG_PROC_FS)) {
802 /* the statistics are updated every second (timer triggered) */
803 if (stats_timer) {
804 timer_setup(&net->can.stattimer, can_stat_update,
805 0);
806 mod_timer(timer: &net->can.stattimer,
807 expires: round_jiffies(j: jiffies + HZ));
808 }
809 net->can.pkg_stats->jiffies_init = jiffies;
810 can_init_proc(net);
811 }
812
813 return 0;
814
815 out_free_pkg_stats:
816 kfree(objp: net->can.pkg_stats);
817 out_free_rx_alldev_list:
818 kfree(objp: net->can.rx_alldev_list);
819 out:
820 return -ENOMEM;
821}
822
823static void can_pernet_exit(struct net *net)
824{
825 if (IS_ENABLED(CONFIG_PROC_FS)) {
826 can_remove_proc(net);
827 if (stats_timer)
828 timer_delete_sync(timer: &net->can.stattimer);
829 }
830
831 kfree(objp: net->can.rx_alldev_list);
832 kfree(objp: net->can.pkg_stats);
833 kfree(objp: net->can.rcv_lists_stats);
834}
835
836/* af_can module init/exit functions */
837
838static struct packet_type can_packet __read_mostly = {
839 .type = cpu_to_be16(ETH_P_CAN),
840 .func = can_rcv,
841};
842
843static struct packet_type canfd_packet __read_mostly = {
844 .type = cpu_to_be16(ETH_P_CANFD),
845 .func = canfd_rcv,
846};
847
848static struct packet_type canxl_packet __read_mostly = {
849 .type = cpu_to_be16(ETH_P_CANXL),
850 .func = canxl_rcv,
851};
852
853static const struct net_proto_family can_family_ops = {
854 .family = PF_CAN,
855 .create = can_create,
856 .owner = THIS_MODULE,
857};
858
859static struct pernet_operations can_pernet_ops __read_mostly = {
860 .init = can_pernet_init,
861 .exit = can_pernet_exit,
862};
863
864static __init int can_init(void)
865{
866 int err;
867
868 /* check for correct padding to be able to use the structs similarly */
869 BUILD_BUG_ON(offsetof(struct can_frame, len) !=
870 offsetof(struct canfd_frame, len) ||
871 offsetof(struct can_frame, len) !=
872 offsetof(struct canxl_frame, flags) ||
873 offsetof(struct can_frame, data) !=
874 offsetof(struct canfd_frame, data));
875
876 pr_info("can: controller area network core\n");
877
878 rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
879 0, 0, NULL);
880 if (!rcv_cache)
881 return -ENOMEM;
882
883 err = register_pernet_subsys(&can_pernet_ops);
884 if (err)
885 goto out_pernet;
886
887 /* protocol register */
888 err = sock_register(fam: &can_family_ops);
889 if (err)
890 goto out_sock;
891
892 dev_add_pack(pt: &can_packet);
893 dev_add_pack(pt: &canfd_packet);
894 dev_add_pack(pt: &canxl_packet);
895
896 return 0;
897
898out_sock:
899 unregister_pernet_subsys(&can_pernet_ops);
900out_pernet:
901 kmem_cache_destroy(s: rcv_cache);
902
903 return err;
904}
905
906static __exit void can_exit(void)
907{
908 /* protocol unregister */
909 dev_remove_pack(pt: &canxl_packet);
910 dev_remove_pack(pt: &canfd_packet);
911 dev_remove_pack(pt: &can_packet);
912 sock_unregister(PF_CAN);
913
914 unregister_pernet_subsys(&can_pernet_ops);
915
916 rcu_barrier(); /* Wait for completion of call_rcu()'s */
917
918 kmem_cache_destroy(s: rcv_cache);
919}
920
921module_init(can_init);
922module_exit(can_exit);
923

source code of linux/net/can/af_can.c