| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (c) 2008, Intel Corporation. |
| 4 | * |
| 5 | * Author: Alexander Duyck <alexander.h.duyck@intel.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/skbuff.h> |
| 12 | #include <linux/rtnetlink.h> |
| 13 | #include <net/netlink.h> |
| 14 | #include <net/pkt_sched.h> |
| 15 | #include <net/ip.h> |
| 16 | #include <net/ipv6.h> |
| 17 | #include <net/dsfield.h> |
| 18 | #include <net/pkt_cls.h> |
| 19 | #include <net/tc_wrapper.h> |
| 20 | |
| 21 | #include <linux/tc_act/tc_skbedit.h> |
| 22 | #include <net/tc_act/tc_skbedit.h> |
| 23 | |
| 24 | static struct tc_action_ops act_skbedit_ops; |
| 25 | |
| 26 | static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params, |
| 27 | struct sk_buff *skb) |
| 28 | { |
| 29 | u16 queue_mapping = params->queue_mapping; |
| 30 | |
| 31 | if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { |
| 32 | u32 hash = skb_get_hash(skb); |
| 33 | |
| 34 | queue_mapping += hash % params->mapping_mod; |
| 35 | } |
| 36 | |
| 37 | return netdev_cap_txqueue(dev: skb->dev, queue_index: queue_mapping); |
| 38 | } |
| 39 | |
| 40 | TC_INDIRECT_SCOPE int tcf_skbedit_act(struct sk_buff *skb, |
| 41 | const struct tc_action *a, |
| 42 | struct tcf_result *res) |
| 43 | { |
| 44 | struct tcf_skbedit *d = to_skbedit(a); |
| 45 | struct tcf_skbedit_params *params; |
| 46 | |
| 47 | tcf_lastuse_update(tm: &d->tcf_tm); |
| 48 | bstats_update(this_cpu_ptr(d->common.cpu_bstats), skb); |
| 49 | |
| 50 | params = rcu_dereference_bh(d->params); |
| 51 | |
| 52 | if (params->flags & SKBEDIT_F_PRIORITY) |
| 53 | skb->priority = params->priority; |
| 54 | if (params->flags & SKBEDIT_F_INHERITDSFIELD) { |
| 55 | int wlen = skb_network_offset(skb); |
| 56 | |
| 57 | switch (skb_protocol(skb, skip_vlan: true)) { |
| 58 | case htons(ETH_P_IP): |
| 59 | wlen += sizeof(struct iphdr); |
| 60 | if (!pskb_may_pull(skb, len: wlen)) |
| 61 | goto err; |
| 62 | skb->priority = ipv4_get_dsfield(iph: ip_hdr(skb)) >> 2; |
| 63 | break; |
| 64 | |
| 65 | case htons(ETH_P_IPV6): |
| 66 | wlen += sizeof(struct ipv6hdr); |
| 67 | if (!pskb_may_pull(skb, len: wlen)) |
| 68 | goto err; |
| 69 | skb->priority = ipv6_get_dsfield(ipv6h: ipv6_hdr(skb)) >> 2; |
| 70 | break; |
| 71 | } |
| 72 | } |
| 73 | if (params->flags & SKBEDIT_F_QUEUE_MAPPING && |
| 74 | skb->dev->real_num_tx_queues > params->queue_mapping) { |
| 75 | #ifdef CONFIG_NET_EGRESS |
| 76 | netdev_xmit_skip_txqueue(skip: true); |
| 77 | #endif |
| 78 | skb_set_queue_mapping(skb, queue_mapping: tcf_skbedit_hash(params, skb)); |
| 79 | } |
| 80 | if (params->flags & SKBEDIT_F_MARK) { |
| 81 | skb->mark &= ~params->mask; |
| 82 | skb->mark |= params->mark & params->mask; |
| 83 | } |
| 84 | if (params->flags & SKBEDIT_F_PTYPE) |
| 85 | skb->pkt_type = params->ptype; |
| 86 | return params->action; |
| 87 | |
| 88 | err: |
| 89 | qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); |
| 90 | return TC_ACT_SHOT; |
| 91 | } |
| 92 | |
| 93 | static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes, |
| 94 | u64 packets, u64 drops, |
| 95 | u64 lastuse, bool hw) |
| 96 | { |
| 97 | struct tcf_skbedit *d = to_skbedit(a); |
| 98 | struct tcf_t *tm = &d->tcf_tm; |
| 99 | |
| 100 | tcf_action_update_stats(a, bytes, packets, drops, hw); |
| 101 | tm->lastuse = max_t(u64, tm->lastuse, lastuse); |
| 102 | } |
| 103 | |
| 104 | static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { |
| 105 | [TCA_SKBEDIT_PARMS] = { .len = sizeof(struct tc_skbedit) }, |
| 106 | [TCA_SKBEDIT_PRIORITY] = { .len = sizeof(u32) }, |
| 107 | [TCA_SKBEDIT_QUEUE_MAPPING] = { .len = sizeof(u16) }, |
| 108 | [TCA_SKBEDIT_MARK] = { .len = sizeof(u32) }, |
| 109 | [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, |
| 110 | [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, |
| 111 | [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, |
| 112 | [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) }, |
| 113 | }; |
| 114 | |
| 115 | static int tcf_skbedit_init(struct net *net, struct nlattr *nla, |
| 116 | struct nlattr *est, struct tc_action **a, |
| 117 | struct tcf_proto *tp, u32 act_flags, |
| 118 | struct netlink_ext_ack *extack) |
| 119 | { |
| 120 | struct tc_action_net *tn = net_generic(net, id: act_skbedit_ops.net_id); |
| 121 | bool bind = act_flags & TCA_ACT_FLAGS_BIND; |
| 122 | struct tcf_skbedit_params *params_new; |
| 123 | struct nlattr *tb[TCA_SKBEDIT_MAX + 1]; |
| 124 | struct tcf_chain *goto_ch = NULL; |
| 125 | struct tc_skbedit *parm; |
| 126 | struct tcf_skbedit *d; |
| 127 | u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; |
| 128 | u16 *queue_mapping = NULL, *ptype = NULL; |
| 129 | u16 mapping_mod = 1; |
| 130 | bool exists = false; |
| 131 | int ret = 0, err; |
| 132 | u32 index; |
| 133 | |
| 134 | if (nla == NULL) |
| 135 | return -EINVAL; |
| 136 | |
| 137 | err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla, |
| 138 | policy: skbedit_policy, NULL); |
| 139 | if (err < 0) |
| 140 | return err; |
| 141 | |
| 142 | if (tb[TCA_SKBEDIT_PARMS] == NULL) |
| 143 | return -EINVAL; |
| 144 | |
| 145 | if (tb[TCA_SKBEDIT_PRIORITY] != NULL) { |
| 146 | flags |= SKBEDIT_F_PRIORITY; |
| 147 | priority = nla_data(nla: tb[TCA_SKBEDIT_PRIORITY]); |
| 148 | } |
| 149 | |
| 150 | if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) { |
| 151 | if (is_tcf_skbedit_ingress(flags: act_flags) && |
| 152 | !(act_flags & TCA_ACT_FLAGS_SKIP_SW)) { |
| 153 | NL_SET_ERR_MSG_MOD(extack, "\"queue_mapping\" option on receive side is hardware only, use skip_sw" ); |
| 154 | return -EOPNOTSUPP; |
| 155 | } |
| 156 | flags |= SKBEDIT_F_QUEUE_MAPPING; |
| 157 | queue_mapping = nla_data(nla: tb[TCA_SKBEDIT_QUEUE_MAPPING]); |
| 158 | } |
| 159 | |
| 160 | if (tb[TCA_SKBEDIT_PTYPE] != NULL) { |
| 161 | ptype = nla_data(nla: tb[TCA_SKBEDIT_PTYPE]); |
| 162 | if (!skb_pkt_type_ok(ptype: *ptype)) |
| 163 | return -EINVAL; |
| 164 | flags |= SKBEDIT_F_PTYPE; |
| 165 | } |
| 166 | |
| 167 | if (tb[TCA_SKBEDIT_MARK] != NULL) { |
| 168 | flags |= SKBEDIT_F_MARK; |
| 169 | mark = nla_data(nla: tb[TCA_SKBEDIT_MARK]); |
| 170 | } |
| 171 | |
| 172 | if (tb[TCA_SKBEDIT_MASK] != NULL) { |
| 173 | flags |= SKBEDIT_F_MASK; |
| 174 | mask = nla_data(nla: tb[TCA_SKBEDIT_MASK]); |
| 175 | } |
| 176 | |
| 177 | if (tb[TCA_SKBEDIT_FLAGS] != NULL) { |
| 178 | u64 *pure_flags = nla_data(nla: tb[TCA_SKBEDIT_FLAGS]); |
| 179 | |
| 180 | if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) { |
| 181 | u16 *queue_mapping_max; |
| 182 | |
| 183 | if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] || |
| 184 | !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) { |
| 185 | NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping." ); |
| 186 | return -EINVAL; |
| 187 | } |
| 188 | |
| 189 | queue_mapping_max = |
| 190 | nla_data(nla: tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]); |
| 191 | if (*queue_mapping_max < *queue_mapping) { |
| 192 | NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min." ); |
| 193 | return -EINVAL; |
| 194 | } |
| 195 | |
| 196 | mapping_mod = *queue_mapping_max - *queue_mapping + 1; |
| 197 | flags |= SKBEDIT_F_TXQ_SKBHASH; |
| 198 | } |
| 199 | if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) |
| 200 | flags |= SKBEDIT_F_INHERITDSFIELD; |
| 201 | } |
| 202 | |
| 203 | parm = nla_data(nla: tb[TCA_SKBEDIT_PARMS]); |
| 204 | index = parm->index; |
| 205 | err = tcf_idr_check_alloc(tn, index: &index, a, bind); |
| 206 | if (err < 0) |
| 207 | return err; |
| 208 | exists = err; |
| 209 | if (exists && bind) |
| 210 | return ACT_P_BOUND; |
| 211 | |
| 212 | if (!flags) { |
| 213 | if (exists) |
| 214 | tcf_idr_release(a: *a, bind); |
| 215 | else |
| 216 | tcf_idr_cleanup(tn, index); |
| 217 | return -EINVAL; |
| 218 | } |
| 219 | |
| 220 | if (!exists) { |
| 221 | ret = tcf_idr_create(tn, index, est, a, |
| 222 | ops: &act_skbedit_ops, bind, cpustats: true, flags: act_flags); |
| 223 | if (ret) { |
| 224 | tcf_idr_cleanup(tn, index); |
| 225 | return ret; |
| 226 | } |
| 227 | |
| 228 | d = to_skbedit(*a); |
| 229 | ret = ACT_P_CREATED; |
| 230 | } else { |
| 231 | d = to_skbedit(*a); |
| 232 | if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) { |
| 233 | tcf_idr_release(a: *a, bind); |
| 234 | return -EEXIST; |
| 235 | } |
| 236 | } |
| 237 | err = tcf_action_check_ctrlact(action: parm->action, tp, handle: &goto_ch, newchain: extack); |
| 238 | if (err < 0) |
| 239 | goto release_idr; |
| 240 | |
| 241 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| 242 | if (unlikely(!params_new)) { |
| 243 | err = -ENOMEM; |
| 244 | goto put_chain; |
| 245 | } |
| 246 | |
| 247 | params_new->flags = flags; |
| 248 | if (flags & SKBEDIT_F_PRIORITY) |
| 249 | params_new->priority = *priority; |
| 250 | if (flags & SKBEDIT_F_QUEUE_MAPPING) { |
| 251 | params_new->queue_mapping = *queue_mapping; |
| 252 | params_new->mapping_mod = mapping_mod; |
| 253 | } |
| 254 | if (flags & SKBEDIT_F_MARK) |
| 255 | params_new->mark = *mark; |
| 256 | if (flags & SKBEDIT_F_PTYPE) |
| 257 | params_new->ptype = *ptype; |
| 258 | /* default behaviour is to use all the bits */ |
| 259 | params_new->mask = 0xffffffff; |
| 260 | if (flags & SKBEDIT_F_MASK) |
| 261 | params_new->mask = *mask; |
| 262 | |
| 263 | params_new->action = parm->action; |
| 264 | spin_lock_bh(lock: &d->tcf_lock); |
| 265 | goto_ch = tcf_action_set_ctrlact(a: *a, action: parm->action, newchain: goto_ch); |
| 266 | params_new = rcu_replace_pointer(d->params, params_new, |
| 267 | lockdep_is_held(&d->tcf_lock)); |
| 268 | spin_unlock_bh(lock: &d->tcf_lock); |
| 269 | if (params_new) |
| 270 | kfree_rcu(params_new, rcu); |
| 271 | if (goto_ch) |
| 272 | tcf_chain_put_by_act(chain: goto_ch); |
| 273 | |
| 274 | return ret; |
| 275 | put_chain: |
| 276 | if (goto_ch) |
| 277 | tcf_chain_put_by_act(chain: goto_ch); |
| 278 | release_idr: |
| 279 | tcf_idr_release(a: *a, bind); |
| 280 | return err; |
| 281 | } |
| 282 | |
| 283 | static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, |
| 284 | int bind, int ref) |
| 285 | { |
| 286 | const struct tcf_skbedit *d = to_skbedit(a); |
| 287 | unsigned char *b = skb_tail_pointer(skb); |
| 288 | const struct tcf_skbedit_params *params; |
| 289 | struct tc_skbedit opt = { |
| 290 | .index = d->tcf_index, |
| 291 | .refcnt = refcount_read(r: &d->tcf_refcnt) - ref, |
| 292 | .bindcnt = atomic_read(v: &d->tcf_bindcnt) - bind, |
| 293 | }; |
| 294 | u64 pure_flags = 0; |
| 295 | struct tcf_t t; |
| 296 | |
| 297 | rcu_read_lock(); |
| 298 | params = rcu_dereference(d->params); |
| 299 | opt.action = params->action; |
| 300 | |
| 301 | if (nla_put(skb, attrtype: TCA_SKBEDIT_PARMS, attrlen: sizeof(opt), data: &opt)) |
| 302 | goto nla_put_failure; |
| 303 | if ((params->flags & SKBEDIT_F_PRIORITY) && |
| 304 | nla_put_u32(skb, attrtype: TCA_SKBEDIT_PRIORITY, value: params->priority)) |
| 305 | goto nla_put_failure; |
| 306 | if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) && |
| 307 | nla_put_u16(skb, attrtype: TCA_SKBEDIT_QUEUE_MAPPING, value: params->queue_mapping)) |
| 308 | goto nla_put_failure; |
| 309 | if ((params->flags & SKBEDIT_F_MARK) && |
| 310 | nla_put_u32(skb, attrtype: TCA_SKBEDIT_MARK, value: params->mark)) |
| 311 | goto nla_put_failure; |
| 312 | if ((params->flags & SKBEDIT_F_PTYPE) && |
| 313 | nla_put_u16(skb, attrtype: TCA_SKBEDIT_PTYPE, value: params->ptype)) |
| 314 | goto nla_put_failure; |
| 315 | if ((params->flags & SKBEDIT_F_MASK) && |
| 316 | nla_put_u32(skb, attrtype: TCA_SKBEDIT_MASK, value: params->mask)) |
| 317 | goto nla_put_failure; |
| 318 | if (params->flags & SKBEDIT_F_INHERITDSFIELD) |
| 319 | pure_flags |= SKBEDIT_F_INHERITDSFIELD; |
| 320 | if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { |
| 321 | if (nla_put_u16(skb, attrtype: TCA_SKBEDIT_QUEUE_MAPPING_MAX, |
| 322 | value: params->queue_mapping + params->mapping_mod - 1)) |
| 323 | goto nla_put_failure; |
| 324 | |
| 325 | pure_flags |= SKBEDIT_F_TXQ_SKBHASH; |
| 326 | } |
| 327 | if (pure_flags != 0 && |
| 328 | nla_put(skb, attrtype: TCA_SKBEDIT_FLAGS, attrlen: sizeof(pure_flags), data: &pure_flags)) |
| 329 | goto nla_put_failure; |
| 330 | |
| 331 | tcf_tm_dump(dtm: &t, stm: &d->tcf_tm); |
| 332 | if (nla_put_64bit(skb, attrtype: TCA_SKBEDIT_TM, attrlen: sizeof(t), data: &t, padattr: TCA_SKBEDIT_PAD)) |
| 333 | goto nla_put_failure; |
| 334 | rcu_read_unlock(); |
| 335 | |
| 336 | return skb->len; |
| 337 | |
| 338 | nla_put_failure: |
| 339 | rcu_read_unlock(); |
| 340 | nlmsg_trim(skb, mark: b); |
| 341 | return -1; |
| 342 | } |
| 343 | |
| 344 | static void tcf_skbedit_cleanup(struct tc_action *a) |
| 345 | { |
| 346 | struct tcf_skbedit *d = to_skbedit(a); |
| 347 | struct tcf_skbedit_params *params; |
| 348 | |
| 349 | params = rcu_dereference_protected(d->params, 1); |
| 350 | if (params) |
| 351 | kfree_rcu(params, rcu); |
| 352 | } |
| 353 | |
| 354 | static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) |
| 355 | { |
| 356 | return nla_total_size(payload: sizeof(struct tc_skbedit)) |
| 357 | + nla_total_size(payload: sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ |
| 358 | + nla_total_size(payload: sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ |
| 359 | + nla_total_size(payload: sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */ |
| 360 | + nla_total_size(payload: sizeof(u32)) /* TCA_SKBEDIT_MARK */ |
| 361 | + nla_total_size(payload: sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ |
| 362 | + nla_total_size(payload: sizeof(u32)) /* TCA_SKBEDIT_MASK */ |
| 363 | + nla_total_size_64bit(payload: sizeof(u64)); /* TCA_SKBEDIT_FLAGS */ |
| 364 | } |
| 365 | |
| 366 | static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, |
| 367 | u32 *index_inc, bool bind, |
| 368 | struct netlink_ext_ack *extack) |
| 369 | { |
| 370 | if (bind) { |
| 371 | struct flow_action_entry *entry = entry_data; |
| 372 | |
| 373 | if (is_tcf_skbedit_mark(a: act)) { |
| 374 | entry->id = FLOW_ACTION_MARK; |
| 375 | entry->mark = tcf_skbedit_mark(a: act); |
| 376 | } else if (is_tcf_skbedit_ptype(a: act)) { |
| 377 | entry->id = FLOW_ACTION_PTYPE; |
| 378 | entry->ptype = tcf_skbedit_ptype(a: act); |
| 379 | } else if (is_tcf_skbedit_priority(a: act)) { |
| 380 | entry->id = FLOW_ACTION_PRIORITY; |
| 381 | entry->priority = tcf_skbedit_priority(a: act); |
| 382 | } else if (is_tcf_skbedit_tx_queue_mapping(a: act)) { |
| 383 | NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used on transmit side" ); |
| 384 | return -EOPNOTSUPP; |
| 385 | } else if (is_tcf_skbedit_rx_queue_mapping(a: act)) { |
| 386 | entry->id = FLOW_ACTION_RX_QUEUE_MAPPING; |
| 387 | entry->rx_queue = tcf_skbedit_rx_queue_mapping(a: act); |
| 388 | } else if (is_tcf_skbedit_inheritdsfield(a: act)) { |
| 389 | NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used" ); |
| 390 | return -EOPNOTSUPP; |
| 391 | } else { |
| 392 | NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload" ); |
| 393 | return -EOPNOTSUPP; |
| 394 | } |
| 395 | *index_inc = 1; |
| 396 | } else { |
| 397 | struct flow_offload_action *fl_action = entry_data; |
| 398 | |
| 399 | if (is_tcf_skbedit_mark(a: act)) |
| 400 | fl_action->id = FLOW_ACTION_MARK; |
| 401 | else if (is_tcf_skbedit_ptype(a: act)) |
| 402 | fl_action->id = FLOW_ACTION_PTYPE; |
| 403 | else if (is_tcf_skbedit_priority(a: act)) |
| 404 | fl_action->id = FLOW_ACTION_PRIORITY; |
| 405 | else if (is_tcf_skbedit_rx_queue_mapping(a: act)) |
| 406 | fl_action->id = FLOW_ACTION_RX_QUEUE_MAPPING; |
| 407 | else |
| 408 | return -EOPNOTSUPP; |
| 409 | } |
| 410 | |
| 411 | return 0; |
| 412 | } |
| 413 | |
| 414 | static struct tc_action_ops act_skbedit_ops = { |
| 415 | .kind = "skbedit" , |
| 416 | .id = TCA_ID_SKBEDIT, |
| 417 | .owner = THIS_MODULE, |
| 418 | .act = tcf_skbedit_act, |
| 419 | .stats_update = tcf_skbedit_stats_update, |
| 420 | .dump = tcf_skbedit_dump, |
| 421 | .init = tcf_skbedit_init, |
| 422 | .cleanup = tcf_skbedit_cleanup, |
| 423 | .get_fill_size = tcf_skbedit_get_fill_size, |
| 424 | .offload_act_setup = tcf_skbedit_offload_act_setup, |
| 425 | .size = sizeof(struct tcf_skbedit), |
| 426 | }; |
| 427 | MODULE_ALIAS_NET_ACT("skbedit" ); |
| 428 | |
| 429 | static __net_init int skbedit_init_net(struct net *net) |
| 430 | { |
| 431 | struct tc_action_net *tn = net_generic(net, id: act_skbedit_ops.net_id); |
| 432 | |
| 433 | return tc_action_net_init(net, tn, ops: &act_skbedit_ops); |
| 434 | } |
| 435 | |
| 436 | static void __net_exit skbedit_exit_net(struct list_head *net_list) |
| 437 | { |
| 438 | tc_action_net_exit(net_list, id: act_skbedit_ops.net_id); |
| 439 | } |
| 440 | |
| 441 | static struct pernet_operations skbedit_net_ops = { |
| 442 | .init = skbedit_init_net, |
| 443 | .exit_batch = skbedit_exit_net, |
| 444 | .id = &act_skbedit_ops.net_id, |
| 445 | .size = sizeof(struct tc_action_net), |
| 446 | }; |
| 447 | |
| 448 | MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>" ); |
| 449 | MODULE_DESCRIPTION("SKB Editing" ); |
| 450 | MODULE_LICENSE("GPL" ); |
| 451 | |
| 452 | static int __init skbedit_init_module(void) |
| 453 | { |
| 454 | return tcf_register_action(a: &act_skbedit_ops, ops: &skbedit_net_ops); |
| 455 | } |
| 456 | |
| 457 | static void __exit skbedit_cleanup_module(void) |
| 458 | { |
| 459 | tcf_unregister_action(a: &act_skbedit_ops, ops: &skbedit_net_ops); |
| 460 | } |
| 461 | |
| 462 | module_init(skbedit_init_module); |
| 463 | module_exit(skbedit_cleanup_module); |
| 464 | |