| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Checksum updating actions |
| 4 | * |
| 5 | * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/types.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | |
| 14 | #include <linux/netlink.h> |
| 15 | #include <net/netlink.h> |
| 16 | #include <linux/rtnetlink.h> |
| 17 | |
| 18 | #include <linux/skbuff.h> |
| 19 | |
| 20 | #include <net/ip.h> |
| 21 | #include <net/ipv6.h> |
| 22 | #include <net/icmp.h> |
| 23 | #include <linux/icmpv6.h> |
| 24 | #include <linux/igmp.h> |
| 25 | #include <net/tcp.h> |
| 26 | #include <net/udp.h> |
| 27 | #include <net/ip6_checksum.h> |
| 28 | #include <net/sctp/checksum.h> |
| 29 | |
| 30 | #include <net/act_api.h> |
| 31 | #include <net/pkt_cls.h> |
| 32 | |
| 33 | #include <linux/tc_act/tc_csum.h> |
| 34 | #include <net/tc_act/tc_csum.h> |
| 35 | #include <net/tc_wrapper.h> |
| 36 | |
| 37 | static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { |
| 38 | [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, |
| 39 | }; |
| 40 | |
| 41 | static struct tc_action_ops act_csum_ops; |
| 42 | |
| 43 | static int tcf_csum_init(struct net *net, struct nlattr *nla, |
| 44 | struct nlattr *est, struct tc_action **a, |
| 45 | struct tcf_proto *tp, |
| 46 | u32 flags, struct netlink_ext_ack *extack) |
| 47 | { |
| 48 | struct tc_action_net *tn = net_generic(net, id: act_csum_ops.net_id); |
| 49 | bool bind = flags & TCA_ACT_FLAGS_BIND; |
| 50 | struct tcf_csum_params *params_new; |
| 51 | struct nlattr *tb[TCA_CSUM_MAX + 1]; |
| 52 | struct tcf_chain *goto_ch = NULL; |
| 53 | struct tc_csum *parm; |
| 54 | struct tcf_csum *p; |
| 55 | int ret = 0, err; |
| 56 | u32 index; |
| 57 | |
| 58 | if (nla == NULL) |
| 59 | return -EINVAL; |
| 60 | |
| 61 | err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, policy: csum_policy, |
| 62 | NULL); |
| 63 | if (err < 0) |
| 64 | return err; |
| 65 | |
| 66 | if (tb[TCA_CSUM_PARMS] == NULL) |
| 67 | return -EINVAL; |
| 68 | parm = nla_data(nla: tb[TCA_CSUM_PARMS]); |
| 69 | index = parm->index; |
| 70 | err = tcf_idr_check_alloc(tn, index: &index, a, bind); |
| 71 | if (!err) { |
| 72 | ret = tcf_idr_create_from_flags(tn, index, est, a, |
| 73 | ops: &act_csum_ops, bind, flags); |
| 74 | if (ret) { |
| 75 | tcf_idr_cleanup(tn, index); |
| 76 | return ret; |
| 77 | } |
| 78 | ret = ACT_P_CREATED; |
| 79 | } else if (err > 0) { |
| 80 | if (bind) /* dont override defaults */ |
| 81 | return ACT_P_BOUND; |
| 82 | if (!(flags & TCA_ACT_FLAGS_REPLACE)) { |
| 83 | tcf_idr_release(a: *a, bind); |
| 84 | return -EEXIST; |
| 85 | } |
| 86 | } else { |
| 87 | return err; |
| 88 | } |
| 89 | |
| 90 | err = tcf_action_check_ctrlact(action: parm->action, tp, handle: &goto_ch, newchain: extack); |
| 91 | if (err < 0) |
| 92 | goto release_idr; |
| 93 | |
| 94 | p = to_tcf_csum(*a); |
| 95 | |
| 96 | params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); |
| 97 | if (unlikely(!params_new)) { |
| 98 | err = -ENOMEM; |
| 99 | goto put_chain; |
| 100 | } |
| 101 | params_new->update_flags = parm->update_flags; |
| 102 | params_new->action = parm->action; |
| 103 | |
| 104 | spin_lock_bh(lock: &p->tcf_lock); |
| 105 | goto_ch = tcf_action_set_ctrlact(a: *a, action: parm->action, newchain: goto_ch); |
| 106 | params_new = rcu_replace_pointer(p->params, params_new, |
| 107 | lockdep_is_held(&p->tcf_lock)); |
| 108 | spin_unlock_bh(lock: &p->tcf_lock); |
| 109 | |
| 110 | if (goto_ch) |
| 111 | tcf_chain_put_by_act(chain: goto_ch); |
| 112 | if (params_new) |
| 113 | kfree_rcu(params_new, rcu); |
| 114 | |
| 115 | return ret; |
| 116 | put_chain: |
| 117 | if (goto_ch) |
| 118 | tcf_chain_put_by_act(chain: goto_ch); |
| 119 | release_idr: |
| 120 | tcf_idr_release(a: *a, bind); |
| 121 | return err; |
| 122 | } |
| 123 | |
| 124 | /** |
| 125 | * tcf_csum_skb_nextlayer - Get next layer pointer |
| 126 | * @skb: sk_buff to use |
| 127 | * @ihl: previous summed headers length |
| 128 | * @ipl: complete packet length |
| 129 | * @jhl: next header length |
| 130 | * |
| 131 | * Check the expected next layer availability in the specified sk_buff. |
| 132 | * Return the next layer pointer if pass, NULL otherwise. |
| 133 | */ |
| 134 | static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, |
| 135 | unsigned int ihl, unsigned int ipl, |
| 136 | unsigned int jhl) |
| 137 | { |
| 138 | int ntkoff = skb_network_offset(skb); |
| 139 | int hl = ihl + jhl; |
| 140 | |
| 141 | if (!pskb_may_pull(skb, len: ipl + ntkoff) || (ipl < hl) || |
| 142 | skb_try_make_writable(skb, write_len: hl + ntkoff)) |
| 143 | return NULL; |
| 144 | else |
| 145 | return (void *)(skb_network_header(skb) + ihl); |
| 146 | } |
| 147 | |
| 148 | static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, |
| 149 | unsigned int ipl) |
| 150 | { |
| 151 | struct icmphdr *icmph; |
| 152 | |
| 153 | icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*icmph)); |
| 154 | if (icmph == NULL) |
| 155 | return 0; |
| 156 | |
| 157 | icmph->checksum = 0; |
| 158 | skb->csum = csum_partial(buff: icmph, len: ipl - ihl, sum: 0); |
| 159 | icmph->checksum = csum_fold(csum: skb->csum); |
| 160 | |
| 161 | skb->ip_summed = CHECKSUM_NONE; |
| 162 | |
| 163 | return 1; |
| 164 | } |
| 165 | |
| 166 | static int tcf_csum_ipv4_igmp(struct sk_buff *skb, |
| 167 | unsigned int ihl, unsigned int ipl) |
| 168 | { |
| 169 | struct igmphdr *igmph; |
| 170 | |
| 171 | igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*igmph)); |
| 172 | if (igmph == NULL) |
| 173 | return 0; |
| 174 | |
| 175 | igmph->csum = 0; |
| 176 | skb->csum = csum_partial(buff: igmph, len: ipl - ihl, sum: 0); |
| 177 | igmph->csum = csum_fold(csum: skb->csum); |
| 178 | |
| 179 | skb->ip_summed = CHECKSUM_NONE; |
| 180 | |
| 181 | return 1; |
| 182 | } |
| 183 | |
| 184 | static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, |
| 185 | unsigned int ipl) |
| 186 | { |
| 187 | struct icmp6hdr *icmp6h; |
| 188 | const struct ipv6hdr *ip6h; |
| 189 | |
| 190 | icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*icmp6h)); |
| 191 | if (icmp6h == NULL) |
| 192 | return 0; |
| 193 | |
| 194 | ip6h = ipv6_hdr(skb); |
| 195 | icmp6h->icmp6_cksum = 0; |
| 196 | skb->csum = csum_partial(buff: icmp6h, len: ipl - ihl, sum: 0); |
| 197 | icmp6h->icmp6_cksum = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, |
| 198 | len: ipl - ihl, IPPROTO_ICMPV6, |
| 199 | csum: skb->csum); |
| 200 | |
| 201 | skb->ip_summed = CHECKSUM_NONE; |
| 202 | |
| 203 | return 1; |
| 204 | } |
| 205 | |
| 206 | static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, |
| 207 | unsigned int ipl) |
| 208 | { |
| 209 | struct tcphdr *tcph; |
| 210 | const struct iphdr *iph; |
| 211 | |
| 212 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
| 213 | return 1; |
| 214 | |
| 215 | tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*tcph)); |
| 216 | if (tcph == NULL) |
| 217 | return 0; |
| 218 | |
| 219 | iph = ip_hdr(skb); |
| 220 | tcph->check = 0; |
| 221 | skb->csum = csum_partial(buff: tcph, len: ipl - ihl, sum: 0); |
| 222 | tcph->check = tcp_v4_check(len: ipl - ihl, |
| 223 | saddr: iph->saddr, daddr: iph->daddr, base: skb->csum); |
| 224 | |
| 225 | skb->ip_summed = CHECKSUM_NONE; |
| 226 | |
| 227 | return 1; |
| 228 | } |
| 229 | |
| 230 | static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, |
| 231 | unsigned int ipl) |
| 232 | { |
| 233 | struct tcphdr *tcph; |
| 234 | const struct ipv6hdr *ip6h; |
| 235 | |
| 236 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
| 237 | return 1; |
| 238 | |
| 239 | tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*tcph)); |
| 240 | if (tcph == NULL) |
| 241 | return 0; |
| 242 | |
| 243 | ip6h = ipv6_hdr(skb); |
| 244 | tcph->check = 0; |
| 245 | skb->csum = csum_partial(buff: tcph, len: ipl - ihl, sum: 0); |
| 246 | tcph->check = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, |
| 247 | len: ipl - ihl, IPPROTO_TCP, |
| 248 | csum: skb->csum); |
| 249 | |
| 250 | skb->ip_summed = CHECKSUM_NONE; |
| 251 | |
| 252 | return 1; |
| 253 | } |
| 254 | |
| 255 | static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, |
| 256 | unsigned int ipl, int udplite) |
| 257 | { |
| 258 | struct udphdr *udph; |
| 259 | const struct iphdr *iph; |
| 260 | u16 ul; |
| 261 | |
| 262 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
| 263 | return 1; |
| 264 | |
| 265 | /* |
| 266 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
| 267 | * udph->len to get the real length without any protocol check, |
| 268 | * UDPLITE uses udph->len for another thing, |
| 269 | * Use iph->tot_len, or just ipl. |
| 270 | */ |
| 271 | |
| 272 | udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*udph)); |
| 273 | if (udph == NULL) |
| 274 | return 0; |
| 275 | |
| 276 | iph = ip_hdr(skb); |
| 277 | ul = ntohs(udph->len); |
| 278 | |
| 279 | if (udplite || udph->check) { |
| 280 | |
| 281 | udph->check = 0; |
| 282 | |
| 283 | if (udplite) { |
| 284 | if (ul == 0) |
| 285 | skb->csum = csum_partial(buff: udph, len: ipl - ihl, sum: 0); |
| 286 | else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) |
| 287 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
| 288 | else |
| 289 | goto ignore_obscure_skb; |
| 290 | } else { |
| 291 | if (ul != ipl - ihl) |
| 292 | goto ignore_obscure_skb; |
| 293 | |
| 294 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
| 295 | } |
| 296 | |
| 297 | udph->check = csum_tcpudp_magic(saddr: iph->saddr, daddr: iph->daddr, |
| 298 | len: ul, proto: iph->protocol, |
| 299 | sum: skb->csum); |
| 300 | |
| 301 | if (!udph->check) |
| 302 | udph->check = CSUM_MANGLED_0; |
| 303 | } |
| 304 | |
| 305 | skb->ip_summed = CHECKSUM_NONE; |
| 306 | |
| 307 | ignore_obscure_skb: |
| 308 | return 1; |
| 309 | } |
| 310 | |
| 311 | static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, |
| 312 | unsigned int ipl, int udplite) |
| 313 | { |
| 314 | struct udphdr *udph; |
| 315 | const struct ipv6hdr *ip6h; |
| 316 | u16 ul; |
| 317 | |
| 318 | if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
| 319 | return 1; |
| 320 | |
| 321 | /* |
| 322 | * Support both UDP and UDPLITE checksum algorithms, Don't use |
| 323 | * udph->len to get the real length without any protocol check, |
| 324 | * UDPLITE uses udph->len for another thing, |
| 325 | * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. |
| 326 | */ |
| 327 | |
| 328 | udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*udph)); |
| 329 | if (udph == NULL) |
| 330 | return 0; |
| 331 | |
| 332 | ip6h = ipv6_hdr(skb); |
| 333 | ul = ntohs(udph->len); |
| 334 | |
| 335 | udph->check = 0; |
| 336 | |
| 337 | if (udplite) { |
| 338 | if (ul == 0) |
| 339 | skb->csum = csum_partial(buff: udph, len: ipl - ihl, sum: 0); |
| 340 | |
| 341 | else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) |
| 342 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
| 343 | |
| 344 | else |
| 345 | goto ignore_obscure_skb; |
| 346 | } else { |
| 347 | if (ul != ipl - ihl) |
| 348 | goto ignore_obscure_skb; |
| 349 | |
| 350 | skb->csum = csum_partial(buff: udph, len: ul, sum: 0); |
| 351 | } |
| 352 | |
| 353 | udph->check = csum_ipv6_magic(saddr: &ip6h->saddr, daddr: &ip6h->daddr, len: ul, |
| 354 | proto: udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, |
| 355 | csum: skb->csum); |
| 356 | |
| 357 | if (!udph->check) |
| 358 | udph->check = CSUM_MANGLED_0; |
| 359 | |
| 360 | skb->ip_summed = CHECKSUM_NONE; |
| 361 | |
| 362 | ignore_obscure_skb: |
| 363 | return 1; |
| 364 | } |
| 365 | |
| 366 | static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, |
| 367 | unsigned int ipl) |
| 368 | { |
| 369 | struct sctphdr *sctph; |
| 370 | |
| 371 | if (skb_is_gso(skb) && skb_is_gso_sctp(skb)) |
| 372 | return 1; |
| 373 | |
| 374 | sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, jhl: sizeof(*sctph)); |
| 375 | if (!sctph) |
| 376 | return 0; |
| 377 | |
| 378 | sctph->checksum = sctp_compute_cksum(skb, |
| 379 | offset: skb_network_offset(skb) + ihl); |
| 380 | skb_reset_csum_not_inet(skb); |
| 381 | |
| 382 | return 1; |
| 383 | } |
| 384 | |
| 385 | static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) |
| 386 | { |
| 387 | const struct iphdr *iph; |
| 388 | int ntkoff; |
| 389 | |
| 390 | ntkoff = skb_network_offset(skb); |
| 391 | |
| 392 | if (!pskb_may_pull(skb, len: sizeof(*iph) + ntkoff)) |
| 393 | goto fail; |
| 394 | |
| 395 | iph = ip_hdr(skb); |
| 396 | |
| 397 | switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { |
| 398 | case IPPROTO_ICMP: |
| 399 | if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) |
| 400 | if (!tcf_csum_ipv4_icmp(skb, ihl: iph->ihl * 4, |
| 401 | ntohs(iph->tot_len))) |
| 402 | goto fail; |
| 403 | break; |
| 404 | case IPPROTO_IGMP: |
| 405 | if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) |
| 406 | if (!tcf_csum_ipv4_igmp(skb, ihl: iph->ihl * 4, |
| 407 | ntohs(iph->tot_len))) |
| 408 | goto fail; |
| 409 | break; |
| 410 | case IPPROTO_TCP: |
| 411 | if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) |
| 412 | if (!tcf_csum_ipv4_tcp(skb, ihl: iph->ihl * 4, |
| 413 | ntohs(iph->tot_len))) |
| 414 | goto fail; |
| 415 | break; |
| 416 | case IPPROTO_UDP: |
| 417 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) |
| 418 | if (!tcf_csum_ipv4_udp(skb, ihl: iph->ihl * 4, |
| 419 | ntohs(iph->tot_len), udplite: 0)) |
| 420 | goto fail; |
| 421 | break; |
| 422 | case IPPROTO_UDPLITE: |
| 423 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) |
| 424 | if (!tcf_csum_ipv4_udp(skb, ihl: iph->ihl * 4, |
| 425 | ntohs(iph->tot_len), udplite: 1)) |
| 426 | goto fail; |
| 427 | break; |
| 428 | case IPPROTO_SCTP: |
| 429 | if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && |
| 430 | !tcf_csum_sctp(skb, ihl: iph->ihl * 4, ntohs(iph->tot_len))) |
| 431 | goto fail; |
| 432 | break; |
| 433 | } |
| 434 | |
| 435 | if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { |
| 436 | if (skb_try_make_writable(skb, write_len: sizeof(*iph) + ntkoff)) |
| 437 | goto fail; |
| 438 | |
| 439 | ip_send_check(ip: ip_hdr(skb)); |
| 440 | } |
| 441 | |
| 442 | return 1; |
| 443 | |
| 444 | fail: |
| 445 | return 0; |
| 446 | } |
| 447 | |
| 448 | static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, |
| 449 | unsigned int *pl) |
| 450 | { |
| 451 | int off, len, optlen; |
| 452 | unsigned char *xh = (void *)ip6xh; |
| 453 | |
| 454 | off = sizeof(*ip6xh); |
| 455 | len = ixhl - off; |
| 456 | |
| 457 | while (len > 1) { |
| 458 | switch (xh[off]) { |
| 459 | case IPV6_TLV_PAD1: |
| 460 | optlen = 1; |
| 461 | break; |
| 462 | case IPV6_TLV_JUMBO: |
| 463 | optlen = xh[off + 1] + 2; |
| 464 | if (optlen != 6 || len < 6 || (off & 3) != 2) |
| 465 | /* wrong jumbo option length/alignment */ |
| 466 | return 0; |
| 467 | *pl = ntohl(*(__be32 *)(xh + off + 2)); |
| 468 | goto done; |
| 469 | default: |
| 470 | optlen = xh[off + 1] + 2; |
| 471 | if (optlen > len) |
| 472 | /* ignore obscure options */ |
| 473 | goto done; |
| 474 | break; |
| 475 | } |
| 476 | off += optlen; |
| 477 | len -= optlen; |
| 478 | } |
| 479 | |
| 480 | done: |
| 481 | return 1; |
| 482 | } |
| 483 | |
| 484 | static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) |
| 485 | { |
| 486 | struct ipv6hdr *ip6h; |
| 487 | struct ipv6_opt_hdr *ip6xh; |
| 488 | unsigned int hl, ixhl; |
| 489 | unsigned int pl; |
| 490 | int ntkoff; |
| 491 | u8 nexthdr; |
| 492 | |
| 493 | ntkoff = skb_network_offset(skb); |
| 494 | |
| 495 | hl = sizeof(*ip6h); |
| 496 | |
| 497 | if (!pskb_may_pull(skb, len: hl + ntkoff)) |
| 498 | goto fail; |
| 499 | |
| 500 | ip6h = ipv6_hdr(skb); |
| 501 | |
| 502 | pl = ntohs(ip6h->payload_len); |
| 503 | nexthdr = ip6h->nexthdr; |
| 504 | |
| 505 | do { |
| 506 | switch (nexthdr) { |
| 507 | case NEXTHDR_FRAGMENT: |
| 508 | goto ignore_skb; |
| 509 | case NEXTHDR_ROUTING: |
| 510 | case NEXTHDR_HOP: |
| 511 | case NEXTHDR_DEST: |
| 512 | if (!pskb_may_pull(skb, len: hl + sizeof(*ip6xh) + ntkoff)) |
| 513 | goto fail; |
| 514 | ip6xh = (void *)(skb_network_header(skb) + hl); |
| 515 | ixhl = ipv6_optlen(ip6xh); |
| 516 | if (!pskb_may_pull(skb, len: hl + ixhl + ntkoff)) |
| 517 | goto fail; |
| 518 | ip6xh = (void *)(skb_network_header(skb) + hl); |
| 519 | if ((nexthdr == NEXTHDR_HOP) && |
| 520 | !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, pl: &pl))) |
| 521 | goto fail; |
| 522 | nexthdr = ip6xh->nexthdr; |
| 523 | hl += ixhl; |
| 524 | break; |
| 525 | case IPPROTO_ICMPV6: |
| 526 | if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) |
| 527 | if (!tcf_csum_ipv6_icmp(skb, |
| 528 | ihl: hl, ipl: pl + sizeof(*ip6h))) |
| 529 | goto fail; |
| 530 | goto done; |
| 531 | case IPPROTO_TCP: |
| 532 | if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) |
| 533 | if (!tcf_csum_ipv6_tcp(skb, |
| 534 | ihl: hl, ipl: pl + sizeof(*ip6h))) |
| 535 | goto fail; |
| 536 | goto done; |
| 537 | case IPPROTO_UDP: |
| 538 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) |
| 539 | if (!tcf_csum_ipv6_udp(skb, ihl: hl, |
| 540 | ipl: pl + sizeof(*ip6h), udplite: 0)) |
| 541 | goto fail; |
| 542 | goto done; |
| 543 | case IPPROTO_UDPLITE: |
| 544 | if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) |
| 545 | if (!tcf_csum_ipv6_udp(skb, ihl: hl, |
| 546 | ipl: pl + sizeof(*ip6h), udplite: 1)) |
| 547 | goto fail; |
| 548 | goto done; |
| 549 | case IPPROTO_SCTP: |
| 550 | if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) && |
| 551 | !tcf_csum_sctp(skb, ihl: hl, ipl: pl + sizeof(*ip6h))) |
| 552 | goto fail; |
| 553 | goto done; |
| 554 | default: |
| 555 | goto ignore_skb; |
| 556 | } |
| 557 | } while (pskb_may_pull(skb, len: hl + 1 + ntkoff)); |
| 558 | |
| 559 | done: |
| 560 | ignore_skb: |
| 561 | return 1; |
| 562 | |
| 563 | fail: |
| 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | TC_INDIRECT_SCOPE int tcf_csum_act(struct sk_buff *skb, |
| 568 | const struct tc_action *a, |
| 569 | struct tcf_result *res) |
| 570 | { |
| 571 | struct tcf_csum *p = to_tcf_csum(a); |
| 572 | bool orig_vlan_tag_present = false; |
| 573 | unsigned int vlan_hdr_count = 0; |
| 574 | struct tcf_csum_params *params; |
| 575 | u32 update_flags; |
| 576 | __be16 protocol; |
| 577 | int action; |
| 578 | |
| 579 | params = rcu_dereference_bh(p->params); |
| 580 | |
| 581 | tcf_lastuse_update(tm: &p->tcf_tm); |
| 582 | tcf_action_update_bstats(a: &p->common, skb); |
| 583 | |
| 584 | action = params->action; |
| 585 | if (unlikely(action == TC_ACT_SHOT)) |
| 586 | goto drop; |
| 587 | |
| 588 | update_flags = params->update_flags; |
| 589 | protocol = skb_protocol(skb, skip_vlan: false); |
| 590 | again: |
| 591 | switch (protocol) { |
| 592 | case cpu_to_be16(ETH_P_IP): |
| 593 | if (!tcf_csum_ipv4(skb, update_flags)) |
| 594 | goto drop; |
| 595 | break; |
| 596 | case cpu_to_be16(ETH_P_IPV6): |
| 597 | if (!tcf_csum_ipv6(skb, update_flags)) |
| 598 | goto drop; |
| 599 | break; |
| 600 | case cpu_to_be16(ETH_P_8021AD): |
| 601 | fallthrough; |
| 602 | case cpu_to_be16(ETH_P_8021Q): |
| 603 | if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { |
| 604 | protocol = skb->protocol; |
| 605 | orig_vlan_tag_present = true; |
| 606 | } else { |
| 607 | struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data; |
| 608 | |
| 609 | protocol = vlan->h_vlan_encapsulated_proto; |
| 610 | skb_pull(skb, VLAN_HLEN); |
| 611 | skb_reset_network_header(skb); |
| 612 | vlan_hdr_count++; |
| 613 | } |
| 614 | goto again; |
| 615 | } |
| 616 | |
| 617 | out: |
| 618 | /* Restore the skb for the pulled VLAN tags */ |
| 619 | while (vlan_hdr_count--) { |
| 620 | skb_push(skb, VLAN_HLEN); |
| 621 | skb_reset_network_header(skb); |
| 622 | } |
| 623 | |
| 624 | return action; |
| 625 | |
| 626 | drop: |
| 627 | tcf_action_inc_drop_qstats(a: &p->common); |
| 628 | action = TC_ACT_SHOT; |
| 629 | goto out; |
| 630 | } |
| 631 | |
| 632 | static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, |
| 633 | int ref) |
| 634 | { |
| 635 | const struct tcf_csum *p = to_tcf_csum(a); |
| 636 | unsigned char *b = skb_tail_pointer(skb); |
| 637 | const struct tcf_csum_params *params; |
| 638 | struct tc_csum opt = { |
| 639 | .index = p->tcf_index, |
| 640 | .refcnt = refcount_read(r: &p->tcf_refcnt) - ref, |
| 641 | .bindcnt = atomic_read(v: &p->tcf_bindcnt) - bind, |
| 642 | }; |
| 643 | struct tcf_t t; |
| 644 | |
| 645 | rcu_read_lock(); |
| 646 | params = rcu_dereference(p->params); |
| 647 | opt.action = params->action; |
| 648 | opt.update_flags = params->update_flags; |
| 649 | |
| 650 | if (nla_put(skb, attrtype: TCA_CSUM_PARMS, attrlen: sizeof(opt), data: &opt)) |
| 651 | goto nla_put_failure; |
| 652 | |
| 653 | tcf_tm_dump(dtm: &t, stm: &p->tcf_tm); |
| 654 | if (nla_put_64bit(skb, attrtype: TCA_CSUM_TM, attrlen: sizeof(t), data: &t, padattr: TCA_CSUM_PAD)) |
| 655 | goto nla_put_failure; |
| 656 | rcu_read_unlock(); |
| 657 | |
| 658 | return skb->len; |
| 659 | |
| 660 | nla_put_failure: |
| 661 | rcu_read_unlock(); |
| 662 | nlmsg_trim(skb, mark: b); |
| 663 | return -1; |
| 664 | } |
| 665 | |
| 666 | static void tcf_csum_cleanup(struct tc_action *a) |
| 667 | { |
| 668 | struct tcf_csum *p = to_tcf_csum(a); |
| 669 | struct tcf_csum_params *params; |
| 670 | |
| 671 | params = rcu_dereference_protected(p->params, 1); |
| 672 | if (params) |
| 673 | kfree_rcu(params, rcu); |
| 674 | } |
| 675 | |
| 676 | static size_t tcf_csum_get_fill_size(const struct tc_action *act) |
| 677 | { |
| 678 | return nla_total_size(payload: sizeof(struct tc_csum)); |
| 679 | } |
| 680 | |
| 681 | static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, |
| 682 | u32 *index_inc, bool bind, |
| 683 | struct netlink_ext_ack *extack) |
| 684 | { |
| 685 | if (bind) { |
| 686 | struct flow_action_entry *entry = entry_data; |
| 687 | |
| 688 | entry->id = FLOW_ACTION_CSUM; |
| 689 | entry->csum_flags = tcf_csum_update_flags(a: act); |
| 690 | *index_inc = 1; |
| 691 | } else { |
| 692 | struct flow_offload_action *fl_action = entry_data; |
| 693 | |
| 694 | fl_action->id = FLOW_ACTION_CSUM; |
| 695 | } |
| 696 | |
| 697 | return 0; |
| 698 | } |
| 699 | |
| 700 | static struct tc_action_ops act_csum_ops = { |
| 701 | .kind = "csum" , |
| 702 | .id = TCA_ID_CSUM, |
| 703 | .owner = THIS_MODULE, |
| 704 | .act = tcf_csum_act, |
| 705 | .dump = tcf_csum_dump, |
| 706 | .init = tcf_csum_init, |
| 707 | .cleanup = tcf_csum_cleanup, |
| 708 | .get_fill_size = tcf_csum_get_fill_size, |
| 709 | .offload_act_setup = tcf_csum_offload_act_setup, |
| 710 | .size = sizeof(struct tcf_csum), |
| 711 | }; |
| 712 | MODULE_ALIAS_NET_ACT("csum" ); |
| 713 | |
| 714 | static __net_init int csum_init_net(struct net *net) |
| 715 | { |
| 716 | struct tc_action_net *tn = net_generic(net, id: act_csum_ops.net_id); |
| 717 | |
| 718 | return tc_action_net_init(net, tn, ops: &act_csum_ops); |
| 719 | } |
| 720 | |
| 721 | static void __net_exit csum_exit_net(struct list_head *net_list) |
| 722 | { |
| 723 | tc_action_net_exit(net_list, id: act_csum_ops.net_id); |
| 724 | } |
| 725 | |
| 726 | static struct pernet_operations csum_net_ops = { |
| 727 | .init = csum_init_net, |
| 728 | .exit_batch = csum_exit_net, |
| 729 | .id = &act_csum_ops.net_id, |
| 730 | .size = sizeof(struct tc_action_net), |
| 731 | }; |
| 732 | |
| 733 | MODULE_DESCRIPTION("Checksum updating actions" ); |
| 734 | MODULE_LICENSE("GPL" ); |
| 735 | |
| 736 | static int __init csum_init_module(void) |
| 737 | { |
| 738 | return tcf_register_action(a: &act_csum_ops, ops: &csum_net_ops); |
| 739 | } |
| 740 | |
| 741 | static void __exit csum_cleanup_module(void) |
| 742 | { |
| 743 | tcf_unregister_action(a: &act_csum_ops, ops: &csum_net_ops); |
| 744 | } |
| 745 | |
| 746 | module_init(csum_init_module); |
| 747 | module_exit(csum_cleanup_module); |
| 748 | |