1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _IPV6_FRAG_H |
3 | #define _IPV6_FRAG_H |
4 | #include <linux/icmpv6.h> |
5 | #include <linux/kernel.h> |
6 | #include <net/addrconf.h> |
7 | #include <net/ipv6.h> |
8 | #include <net/inet_frag.h> |
9 | |
10 | enum ip6_defrag_users { |
11 | IP6_DEFRAG_LOCAL_DELIVER, |
12 | IP6_DEFRAG_CONNTRACK_IN, |
13 | __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX, |
14 | IP6_DEFRAG_CONNTRACK_OUT, |
15 | __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX, |
16 | IP6_DEFRAG_CONNTRACK_BRIDGE_IN, |
17 | __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX, |
18 | }; |
19 | |
20 | /* |
21 | * Equivalent of ipv4 struct ip |
22 | */ |
23 | struct frag_queue { |
24 | struct inet_frag_queue q; |
25 | |
26 | int iif; |
27 | __u16 nhoffset; |
28 | u8 ecn; |
29 | }; |
30 | |
31 | #if IS_ENABLED(CONFIG_IPV6) |
32 | static inline void ip6frag_init(struct inet_frag_queue *q, const void *a) |
33 | { |
34 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
35 | const struct frag_v6_compare_key *key = a; |
36 | |
37 | q->key.v6 = *key; |
38 | fq->ecn = 0; |
39 | } |
40 | |
41 | static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed) |
42 | { |
43 | return jhash2(k: data, |
44 | length: sizeof(struct frag_v6_compare_key) / sizeof(u32), initval: seed); |
45 | } |
46 | |
47 | static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed) |
48 | { |
49 | const struct inet_frag_queue *fq = data; |
50 | |
51 | return jhash2(k: (const u32 *)&fq->key.v6, |
52 | length: sizeof(struct frag_v6_compare_key) / sizeof(u32), initval: seed); |
53 | } |
54 | |
55 | static inline int |
56 | ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) |
57 | { |
58 | const struct frag_v6_compare_key *key = arg->key; |
59 | const struct inet_frag_queue *fq = ptr; |
60 | |
61 | return !!memcmp(p: &fq->key, q: key, size: sizeof(*key)); |
62 | } |
63 | |
64 | static inline void |
65 | ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) |
66 | { |
67 | struct net_device *dev = NULL; |
68 | struct sk_buff *head; |
69 | int refs = 1; |
70 | |
71 | rcu_read_lock(); |
72 | /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ |
73 | if (READ_ONCE(fq->q.fqdir->dead)) |
74 | goto out_rcu_unlock; |
75 | spin_lock(lock: &fq->q.lock); |
76 | |
77 | if (fq->q.flags & INET_FRAG_COMPLETE) |
78 | goto out; |
79 | |
80 | fq->q.flags |= INET_FRAG_DROP; |
81 | inet_frag_kill(q: &fq->q, refs: &refs); |
82 | |
83 | dev = dev_get_by_index_rcu(net, ifindex: fq->iif); |
84 | if (!dev) |
85 | goto out; |
86 | |
87 | __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); |
88 | __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); |
89 | |
90 | /* Don't send error if the first segment did not arrive. */ |
91 | if (!(fq->q.flags & INET_FRAG_FIRST_IN)) |
92 | goto out; |
93 | |
94 | /* sk_buff::dev and sk_buff::rbnode are unionized. So we |
95 | * pull the head out of the tree in order to be able to |
96 | * deal with head->dev. |
97 | */ |
98 | head = inet_frag_pull_head(q: &fq->q); |
99 | if (!head) |
100 | goto out; |
101 | |
102 | head->dev = dev; |
103 | spin_unlock(lock: &fq->q.lock); |
104 | |
105 | icmpv6_send(skb: head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, info: 0); |
106 | kfree_skb_reason(skb: head, reason: SKB_DROP_REASON_FRAG_REASM_TIMEOUT); |
107 | goto out_rcu_unlock; |
108 | |
109 | out: |
110 | spin_unlock(lock: &fq->q.lock); |
111 | out_rcu_unlock: |
112 | rcu_read_unlock(); |
113 | inet_frag_putn(q: &fq->q, refs); |
114 | } |
115 | |
116 | /* Check if the upper layer header is truncated in the first fragment. */ |
117 | static inline bool |
118 | ipv6frag_thdr_truncated(struct sk_buff *skb, int start, u8 *nexthdrp) |
119 | { |
120 | u8 nexthdr = *nexthdrp; |
121 | __be16 frag_off; |
122 | int offset; |
123 | |
124 | offset = ipv6_skip_exthdr(skb, start, nexthdrp: &nexthdr, frag_offp: &frag_off); |
125 | if (offset < 0 || (frag_off & htons(IP6_OFFSET))) |
126 | return false; |
127 | switch (nexthdr) { |
128 | case NEXTHDR_TCP: |
129 | offset += sizeof(struct tcphdr); |
130 | break; |
131 | case NEXTHDR_UDP: |
132 | offset += sizeof(struct udphdr); |
133 | break; |
134 | case NEXTHDR_ICMP: |
135 | offset += sizeof(struct icmp6hdr); |
136 | break; |
137 | default: |
138 | offset += 1; |
139 | } |
140 | if (offset > skb->len) |
141 | return true; |
142 | return false; |
143 | } |
144 | |
145 | #endif |
146 | #endif |
147 | |