| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * CAIF Interface registration. |
| 4 | * Copyright (C) ST-Ericsson AB 2010 |
| 5 | * Author: Sjur Brendeland |
| 6 | * |
| 7 | * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont |
| 8 | * and Sakari Ailus <sakari.ailus@nokia.com> |
| 9 | */ |
| 10 | |
| 11 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
| 12 | |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/if_arp.h> |
| 15 | #include <linux/net.h> |
| 16 | #include <linux/netdevice.h> |
| 17 | #include <linux/mutex.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <net/netns/generic.h> |
| 21 | #include <net/net_namespace.h> |
| 22 | #include <net/pkt_sched.h> |
| 23 | #include <net/caif/caif_device.h> |
| 24 | #include <net/caif/caif_layer.h> |
| 25 | #include <net/caif/caif_dev.h> |
| 26 | #include <net/caif/cfpkt.h> |
| 27 | #include <net/caif/cfcnfg.h> |
| 28 | #include <net/caif/cfserl.h> |
| 29 | |
| 30 | MODULE_DESCRIPTION("ST-Ericsson CAIF modem protocol support" ); |
| 31 | MODULE_LICENSE("GPL" ); |
| 32 | |
| 33 | /* Used for local tracking of the CAIF net devices */ |
| 34 | struct caif_device_entry { |
| 35 | struct cflayer layer; |
| 36 | struct list_head list; |
| 37 | struct net_device *netdev; |
| 38 | int __percpu *pcpu_refcnt; |
| 39 | spinlock_t flow_lock; |
| 40 | struct sk_buff *xoff_skb; |
| 41 | void (*xoff_skb_dtor)(struct sk_buff *skb); |
| 42 | bool xoff; |
| 43 | }; |
| 44 | |
| 45 | struct caif_device_entry_list { |
| 46 | struct list_head list; |
| 47 | /* Protects simulanous deletes in list */ |
| 48 | struct mutex lock; |
| 49 | }; |
| 50 | |
| 51 | struct caif_net { |
| 52 | struct cfcnfg *cfg; |
| 53 | struct caif_device_entry_list caifdevs; |
| 54 | }; |
| 55 | |
| 56 | static unsigned int caif_net_id; |
| 57 | static int q_high = 50; /* Percent */ |
| 58 | |
| 59 | struct cfcnfg *get_cfcnfg(struct net *net) |
| 60 | { |
| 61 | struct caif_net *caifn; |
| 62 | caifn = net_generic(net, id: caif_net_id); |
| 63 | return caifn->cfg; |
| 64 | } |
| 65 | EXPORT_SYMBOL(get_cfcnfg); |
| 66 | |
| 67 | static struct caif_device_entry_list *caif_device_list(struct net *net) |
| 68 | { |
| 69 | struct caif_net *caifn; |
| 70 | caifn = net_generic(net, id: caif_net_id); |
| 71 | return &caifn->caifdevs; |
| 72 | } |
| 73 | |
| 74 | static void caifd_put(struct caif_device_entry *e) |
| 75 | { |
| 76 | this_cpu_dec(*e->pcpu_refcnt); |
| 77 | } |
| 78 | |
| 79 | static void caifd_hold(struct caif_device_entry *e) |
| 80 | { |
| 81 | this_cpu_inc(*e->pcpu_refcnt); |
| 82 | } |
| 83 | |
| 84 | static int caifd_refcnt_read(struct caif_device_entry *e) |
| 85 | { |
| 86 | int i, refcnt = 0; |
| 87 | for_each_possible_cpu(i) |
| 88 | refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); |
| 89 | return refcnt; |
| 90 | } |
| 91 | |
| 92 | /* Allocate new CAIF device. */ |
| 93 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) |
| 94 | { |
| 95 | struct caif_device_entry *caifd; |
| 96 | |
| 97 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
| 98 | if (!caifd) |
| 99 | return NULL; |
| 100 | caifd->pcpu_refcnt = alloc_percpu(int); |
| 101 | if (!caifd->pcpu_refcnt) { |
| 102 | kfree(objp: caifd); |
| 103 | return NULL; |
| 104 | } |
| 105 | caifd->netdev = dev; |
| 106 | dev_hold(dev); |
| 107 | return caifd; |
| 108 | } |
| 109 | |
| 110 | static struct caif_device_entry *caif_get(struct net_device *dev) |
| 111 | { |
| 112 | struct caif_device_entry_list *caifdevs = |
| 113 | caif_device_list(net: dev_net(dev)); |
| 114 | struct caif_device_entry *caifd; |
| 115 | |
| 116 | list_for_each_entry_rcu(caifd, &caifdevs->list, list, |
| 117 | lockdep_rtnl_is_held()) { |
| 118 | if (caifd->netdev == dev) |
| 119 | return caifd; |
| 120 | } |
| 121 | return NULL; |
| 122 | } |
| 123 | |
| 124 | static void caif_flow_cb(struct sk_buff *skb) |
| 125 | { |
| 126 | struct caif_device_entry *caifd; |
| 127 | void (*dtor)(struct sk_buff *skb) = NULL; |
| 128 | bool send_xoff; |
| 129 | |
| 130 | WARN_ON(skb->dev == NULL); |
| 131 | |
| 132 | rcu_read_lock(); |
| 133 | caifd = caif_get(dev: skb->dev); |
| 134 | |
| 135 | WARN_ON(caifd == NULL); |
| 136 | if (!caifd) { |
| 137 | rcu_read_unlock(); |
| 138 | return; |
| 139 | } |
| 140 | |
| 141 | caifd_hold(e: caifd); |
| 142 | rcu_read_unlock(); |
| 143 | |
| 144 | spin_lock_bh(lock: &caifd->flow_lock); |
| 145 | send_xoff = caifd->xoff; |
| 146 | caifd->xoff = false; |
| 147 | dtor = caifd->xoff_skb_dtor; |
| 148 | |
| 149 | if (WARN_ON(caifd->xoff_skb != skb)) |
| 150 | skb = NULL; |
| 151 | |
| 152 | caifd->xoff_skb = NULL; |
| 153 | caifd->xoff_skb_dtor = NULL; |
| 154 | |
| 155 | spin_unlock_bh(lock: &caifd->flow_lock); |
| 156 | |
| 157 | if (dtor && skb) |
| 158 | dtor(skb); |
| 159 | |
| 160 | if (send_xoff) |
| 161 | caifd->layer.up-> |
| 162 | ctrlcmd(caifd->layer.up, |
| 163 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, |
| 164 | caifd->layer.id); |
| 165 | caifd_put(e: caifd); |
| 166 | } |
| 167 | |
| 168 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
| 169 | { |
| 170 | int err, high = 0, qlen = 0; |
| 171 | struct caif_device_entry *caifd = |
| 172 | container_of(layer, struct caif_device_entry, layer); |
| 173 | struct sk_buff *skb; |
| 174 | struct netdev_queue *txq; |
| 175 | |
| 176 | rcu_read_lock_bh(); |
| 177 | |
| 178 | skb = cfpkt_tonative(pkt); |
| 179 | skb->dev = caifd->netdev; |
| 180 | skb_reset_network_header(skb); |
| 181 | skb->protocol = htons(ETH_P_CAIF); |
| 182 | |
| 183 | /* Check if we need to handle xoff */ |
| 184 | if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE)) |
| 185 | goto noxoff; |
| 186 | |
| 187 | if (unlikely(caifd->xoff)) |
| 188 | goto noxoff; |
| 189 | |
| 190 | if (likely(!netif_queue_stopped(caifd->netdev))) { |
| 191 | struct Qdisc *sch; |
| 192 | |
| 193 | /* If we run with a TX queue, check if the queue is too long*/ |
| 194 | txq = netdev_get_tx_queue(dev: skb->dev, index: 0); |
| 195 | sch = rcu_dereference_bh(txq->qdisc); |
| 196 | if (likely(qdisc_is_empty(sch))) |
| 197 | goto noxoff; |
| 198 | |
| 199 | /* can check for explicit qdisc len value only !NOLOCK, |
| 200 | * always set flow off otherwise |
| 201 | */ |
| 202 | high = (caifd->netdev->tx_queue_len * q_high) / 100; |
| 203 | if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high)) |
| 204 | goto noxoff; |
| 205 | } |
| 206 | |
| 207 | /* Hold lock while accessing xoff */ |
| 208 | spin_lock_bh(lock: &caifd->flow_lock); |
| 209 | if (caifd->xoff) { |
| 210 | spin_unlock_bh(lock: &caifd->flow_lock); |
| 211 | goto noxoff; |
| 212 | } |
| 213 | |
| 214 | /* |
| 215 | * Handle flow off, we do this by temporary hi-jacking this |
| 216 | * skb's destructor function, and replace it with our own |
| 217 | * flow-on callback. The callback will set flow-on and call |
| 218 | * the original destructor. |
| 219 | */ |
| 220 | |
| 221 | pr_debug("queue has stopped(%d) or is full (%d > %d)\n" , |
| 222 | netif_queue_stopped(caifd->netdev), |
| 223 | qlen, high); |
| 224 | caifd->xoff = true; |
| 225 | caifd->xoff_skb = skb; |
| 226 | caifd->xoff_skb_dtor = skb->destructor; |
| 227 | skb->destructor = caif_flow_cb; |
| 228 | spin_unlock_bh(lock: &caifd->flow_lock); |
| 229 | |
| 230 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
| 231 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, |
| 232 | caifd->layer.id); |
| 233 | noxoff: |
| 234 | rcu_read_unlock_bh(); |
| 235 | |
| 236 | err = dev_queue_xmit(skb); |
| 237 | if (err > 0) |
| 238 | err = -EIO; |
| 239 | |
| 240 | return err; |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * Stuff received packets into the CAIF stack. |
| 245 | * On error, returns non-zero and releases the skb. |
| 246 | */ |
| 247 | static int receive(struct sk_buff *skb, struct net_device *dev, |
| 248 | struct packet_type *pkttype, struct net_device *orig_dev) |
| 249 | { |
| 250 | struct cfpkt *pkt; |
| 251 | struct caif_device_entry *caifd; |
| 252 | int err; |
| 253 | |
| 254 | pkt = cfpkt_fromnative(dir: CAIF_DIR_IN, nativepkt: skb); |
| 255 | |
| 256 | rcu_read_lock(); |
| 257 | caifd = caif_get(dev); |
| 258 | |
| 259 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || |
| 260 | !netif_oper_up(dev: caifd->netdev)) { |
| 261 | rcu_read_unlock(); |
| 262 | kfree_skb(skb); |
| 263 | return NET_RX_DROP; |
| 264 | } |
| 265 | |
| 266 | /* Hold reference to netdevice while using CAIF stack */ |
| 267 | caifd_hold(e: caifd); |
| 268 | rcu_read_unlock(); |
| 269 | |
| 270 | err = caifd->layer.up->receive(caifd->layer.up, pkt); |
| 271 | |
| 272 | /* For -EILSEQ the packet is not freed so free it now */ |
| 273 | if (err == -EILSEQ) |
| 274 | cfpkt_destroy(pkt); |
| 275 | |
| 276 | /* Release reference to stack upwards */ |
| 277 | caifd_put(e: caifd); |
| 278 | |
| 279 | if (err != 0) |
| 280 | err = NET_RX_DROP; |
| 281 | return err; |
| 282 | } |
| 283 | |
| 284 | static struct packet_type caif_packet_type __read_mostly = { |
| 285 | .type = cpu_to_be16(ETH_P_CAIF), |
| 286 | .func = receive, |
| 287 | }; |
| 288 | |
| 289 | static void dev_flowctrl(struct net_device *dev, int on) |
| 290 | { |
| 291 | struct caif_device_entry *caifd; |
| 292 | |
| 293 | rcu_read_lock(); |
| 294 | |
| 295 | caifd = caif_get(dev); |
| 296 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
| 297 | rcu_read_unlock(); |
| 298 | return; |
| 299 | } |
| 300 | |
| 301 | caifd_hold(e: caifd); |
| 302 | rcu_read_unlock(); |
| 303 | |
| 304 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
| 305 | on ? |
| 306 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : |
| 307 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, |
| 308 | caifd->layer.id); |
| 309 | caifd_put(e: caifd); |
| 310 | } |
| 311 | |
| 312 | int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
| 313 | struct cflayer *link_support, int head_room, |
| 314 | struct cflayer **layer, |
| 315 | int (**rcv_func)(struct sk_buff *, struct net_device *, |
| 316 | struct packet_type *, |
| 317 | struct net_device *)) |
| 318 | { |
| 319 | struct caif_device_entry *caifd; |
| 320 | enum cfcnfg_phy_preference pref; |
| 321 | struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); |
| 322 | struct caif_device_entry_list *caifdevs; |
| 323 | int res; |
| 324 | |
| 325 | caifdevs = caif_device_list(net: dev_net(dev)); |
| 326 | caifd = caif_device_alloc(dev); |
| 327 | if (!caifd) |
| 328 | return -ENOMEM; |
| 329 | *layer = &caifd->layer; |
| 330 | spin_lock_init(&caifd->flow_lock); |
| 331 | |
| 332 | switch (caifdev->link_select) { |
| 333 | case CAIF_LINK_HIGH_BANDW: |
| 334 | pref = CFPHYPREF_HIGH_BW; |
| 335 | break; |
| 336 | case CAIF_LINK_LOW_LATENCY: |
| 337 | pref = CFPHYPREF_LOW_LAT; |
| 338 | break; |
| 339 | default: |
| 340 | pref = CFPHYPREF_HIGH_BW; |
| 341 | break; |
| 342 | } |
| 343 | mutex_lock(&caifdevs->lock); |
| 344 | list_add_rcu(new: &caifd->list, head: &caifdevs->list); |
| 345 | |
| 346 | strscpy(caifd->layer.name, dev->name, |
| 347 | sizeof(caifd->layer.name)); |
| 348 | caifd->layer.transmit = transmit; |
| 349 | res = cfcnfg_add_phy_layer(cnfg: cfg, |
| 350 | dev, |
| 351 | phy_layer: &caifd->layer, |
| 352 | pref, |
| 353 | link_support, |
| 354 | fcs: caifdev->use_fcs, |
| 355 | head_room); |
| 356 | mutex_unlock(lock: &caifdevs->lock); |
| 357 | if (rcv_func) |
| 358 | *rcv_func = receive; |
| 359 | return res; |
| 360 | } |
| 361 | EXPORT_SYMBOL(caif_enroll_dev); |
| 362 | |
| 363 | /* notify Caif of device events */ |
| 364 | static int caif_device_notify(struct notifier_block *me, unsigned long what, |
| 365 | void *ptr) |
| 366 | { |
| 367 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
| 368 | struct caif_device_entry *caifd = NULL; |
| 369 | struct caif_dev_common *caifdev; |
| 370 | struct cfcnfg *cfg; |
| 371 | struct cflayer *layer, *link_support; |
| 372 | int head_room = 0; |
| 373 | struct caif_device_entry_list *caifdevs; |
| 374 | int res; |
| 375 | |
| 376 | cfg = get_cfcnfg(dev_net(dev)); |
| 377 | caifdevs = caif_device_list(net: dev_net(dev)); |
| 378 | |
| 379 | caifd = caif_get(dev); |
| 380 | if (caifd == NULL && dev->type != ARPHRD_CAIF) |
| 381 | return 0; |
| 382 | |
| 383 | switch (what) { |
| 384 | case NETDEV_REGISTER: |
| 385 | if (caifd != NULL) |
| 386 | break; |
| 387 | |
| 388 | caifdev = netdev_priv(dev); |
| 389 | |
| 390 | link_support = NULL; |
| 391 | if (caifdev->use_frag) { |
| 392 | head_room = 1; |
| 393 | link_support = cfserl_create(instance: dev->ifindex, |
| 394 | use_stx: caifdev->use_stx); |
| 395 | if (!link_support) { |
| 396 | pr_warn("Out of memory\n" ); |
| 397 | break; |
| 398 | } |
| 399 | } |
| 400 | res = caif_enroll_dev(dev, caifdev, link_support, head_room, |
| 401 | &layer, NULL); |
| 402 | if (res) |
| 403 | cfserl_release(layer: link_support); |
| 404 | caifdev->flowctrl = dev_flowctrl; |
| 405 | break; |
| 406 | |
| 407 | case NETDEV_UP: |
| 408 | rcu_read_lock(); |
| 409 | |
| 410 | caifd = caif_get(dev); |
| 411 | if (caifd == NULL) { |
| 412 | rcu_read_unlock(); |
| 413 | break; |
| 414 | } |
| 415 | |
| 416 | caifd->xoff = false; |
| 417 | cfcnfg_set_phy_state(cnfg: cfg, phy_layer: &caifd->layer, up: true); |
| 418 | rcu_read_unlock(); |
| 419 | |
| 420 | break; |
| 421 | |
| 422 | case NETDEV_DOWN: |
| 423 | rcu_read_lock(); |
| 424 | |
| 425 | caifd = caif_get(dev); |
| 426 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
| 427 | rcu_read_unlock(); |
| 428 | return -EINVAL; |
| 429 | } |
| 430 | |
| 431 | cfcnfg_set_phy_state(cnfg: cfg, phy_layer: &caifd->layer, up: false); |
| 432 | caifd_hold(e: caifd); |
| 433 | rcu_read_unlock(); |
| 434 | |
| 435 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
| 436 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, |
| 437 | caifd->layer.id); |
| 438 | |
| 439 | spin_lock_bh(lock: &caifd->flow_lock); |
| 440 | |
| 441 | /* |
| 442 | * Replace our xoff-destructor with original destructor. |
| 443 | * We trust that skb->destructor *always* is called before |
| 444 | * the skb reference is invalid. The hijacked SKB destructor |
| 445 | * takes the flow_lock so manipulating the skb->destructor here |
| 446 | * should be safe. |
| 447 | */ |
| 448 | if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) |
| 449 | caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; |
| 450 | |
| 451 | caifd->xoff = false; |
| 452 | caifd->xoff_skb_dtor = NULL; |
| 453 | caifd->xoff_skb = NULL; |
| 454 | |
| 455 | spin_unlock_bh(lock: &caifd->flow_lock); |
| 456 | caifd_put(e: caifd); |
| 457 | break; |
| 458 | |
| 459 | case NETDEV_UNREGISTER: |
| 460 | mutex_lock(&caifdevs->lock); |
| 461 | |
| 462 | caifd = caif_get(dev); |
| 463 | if (caifd == NULL) { |
| 464 | mutex_unlock(lock: &caifdevs->lock); |
| 465 | break; |
| 466 | } |
| 467 | list_del_rcu(entry: &caifd->list); |
| 468 | |
| 469 | /* |
| 470 | * NETDEV_UNREGISTER is called repeatedly until all reference |
| 471 | * counts for the net-device are released. If references to |
| 472 | * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for |
| 473 | * the next call to NETDEV_UNREGISTER. |
| 474 | * |
| 475 | * If any packets are in flight down the CAIF Stack, |
| 476 | * cfcnfg_del_phy_layer will return nonzero. |
| 477 | * If no packets are in flight, the CAIF Stack associated |
| 478 | * with the net-device un-registering is freed. |
| 479 | */ |
| 480 | |
| 481 | if (caifd_refcnt_read(e: caifd) != 0 || |
| 482 | cfcnfg_del_phy_layer(cnfg: cfg, phy_layer: &caifd->layer) != 0) { |
| 483 | |
| 484 | pr_info("Wait for device inuse\n" ); |
| 485 | /* Enrole device if CAIF Stack is still in use */ |
| 486 | list_add_rcu(new: &caifd->list, head: &caifdevs->list); |
| 487 | mutex_unlock(lock: &caifdevs->lock); |
| 488 | break; |
| 489 | } |
| 490 | |
| 491 | synchronize_rcu(); |
| 492 | dev_put(dev: caifd->netdev); |
| 493 | free_percpu(pdata: caifd->pcpu_refcnt); |
| 494 | kfree(objp: caifd); |
| 495 | |
| 496 | mutex_unlock(lock: &caifdevs->lock); |
| 497 | break; |
| 498 | } |
| 499 | return 0; |
| 500 | } |
| 501 | |
| 502 | static struct notifier_block caif_device_notifier = { |
| 503 | .notifier_call = caif_device_notify, |
| 504 | .priority = 0, |
| 505 | }; |
| 506 | |
| 507 | /* Per-namespace Caif devices handling */ |
| 508 | static int caif_init_net(struct net *net) |
| 509 | { |
| 510 | struct caif_net *caifn = net_generic(net, id: caif_net_id); |
| 511 | INIT_LIST_HEAD(list: &caifn->caifdevs.list); |
| 512 | mutex_init(&caifn->caifdevs.lock); |
| 513 | |
| 514 | caifn->cfg = cfcnfg_create(); |
| 515 | if (!caifn->cfg) |
| 516 | return -ENOMEM; |
| 517 | |
| 518 | return 0; |
| 519 | } |
| 520 | |
| 521 | static void caif_exit_net(struct net *net) |
| 522 | { |
| 523 | struct caif_device_entry *caifd, *tmp; |
| 524 | struct caif_device_entry_list *caifdevs = |
| 525 | caif_device_list(net); |
| 526 | struct cfcnfg *cfg = get_cfcnfg(net); |
| 527 | |
| 528 | rtnl_lock(); |
| 529 | mutex_lock(&caifdevs->lock); |
| 530 | |
| 531 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { |
| 532 | int i = 0; |
| 533 | list_del_rcu(entry: &caifd->list); |
| 534 | cfcnfg_set_phy_state(cnfg: cfg, phy_layer: &caifd->layer, up: false); |
| 535 | |
| 536 | while (i < 10 && |
| 537 | (caifd_refcnt_read(e: caifd) != 0 || |
| 538 | cfcnfg_del_phy_layer(cnfg: cfg, phy_layer: &caifd->layer) != 0)) { |
| 539 | |
| 540 | pr_info("Wait for device inuse\n" ); |
| 541 | msleep(msecs: 250); |
| 542 | i++; |
| 543 | } |
| 544 | synchronize_rcu(); |
| 545 | dev_put(dev: caifd->netdev); |
| 546 | free_percpu(pdata: caifd->pcpu_refcnt); |
| 547 | kfree(objp: caifd); |
| 548 | } |
| 549 | cfcnfg_remove(cfg); |
| 550 | |
| 551 | mutex_unlock(lock: &caifdevs->lock); |
| 552 | rtnl_unlock(); |
| 553 | } |
| 554 | |
| 555 | static struct pernet_operations caif_net_ops = { |
| 556 | .init = caif_init_net, |
| 557 | .exit = caif_exit_net, |
| 558 | .id = &caif_net_id, |
| 559 | .size = sizeof(struct caif_net), |
| 560 | }; |
| 561 | |
| 562 | /* Initialize Caif devices list */ |
| 563 | static int __init caif_device_init(void) |
| 564 | { |
| 565 | int result; |
| 566 | |
| 567 | result = register_pernet_subsys(&caif_net_ops); |
| 568 | |
| 569 | if (result) |
| 570 | return result; |
| 571 | |
| 572 | register_netdevice_notifier(nb: &caif_device_notifier); |
| 573 | dev_add_pack(pt: &caif_packet_type); |
| 574 | |
| 575 | return result; |
| 576 | } |
| 577 | |
| 578 | static void __exit caif_device_exit(void) |
| 579 | { |
| 580 | unregister_netdevice_notifier(nb: &caif_device_notifier); |
| 581 | dev_remove_pack(pt: &caif_packet_type); |
| 582 | unregister_pernet_subsys(&caif_net_ops); |
| 583 | } |
| 584 | |
| 585 | module_init(caif_device_init); |
| 586 | module_exit(caif_device_exit); |
| 587 | |