1 | /* |
2 | * Rusty Russell (C)2000 -- This code is GPL. |
3 | * Patrick McHardy (c) 2006-2012 |
4 | */ |
5 | |
6 | #include <linux/kernel.h> |
7 | #include <linux/slab.h> |
8 | #include <linux/init.h> |
9 | #include <linux/module.h> |
10 | #include <linux/proc_fs.h> |
11 | #include <linux/skbuff.h> |
12 | #include <linux/netfilter.h> |
13 | #include <linux/netfilter_ipv4.h> |
14 | #include <linux/netfilter_ipv6.h> |
15 | #include <linux/netfilter_bridge.h> |
16 | #include <linux/seq_file.h> |
17 | #include <linux/rcupdate.h> |
18 | #include <net/protocol.h> |
19 | #include <net/netfilter/nf_queue.h> |
20 | #include <net/dst.h> |
21 | |
22 | #include "nf_internals.h" |
23 | |
24 | static const struct nf_queue_handler __rcu *nf_queue_handler; |
25 | |
26 | /* |
27 | * Hook for nfnetlink_queue to register its queue handler. |
28 | * We do this so that most of the NFQUEUE code can be modular. |
29 | * |
30 | * Once the queue is registered it must reinject all packets it |
31 | * receives, no matter what. |
32 | */ |
33 | |
34 | void nf_register_queue_handler(const struct nf_queue_handler *qh) |
35 | { |
36 | /* should never happen, we only have one queueing backend in kernel */ |
37 | WARN_ON(rcu_access_pointer(nf_queue_handler)); |
38 | rcu_assign_pointer(nf_queue_handler, qh); |
39 | } |
40 | EXPORT_SYMBOL(nf_register_queue_handler); |
41 | |
42 | /* The caller must flush their queue before this */ |
43 | void nf_unregister_queue_handler(void) |
44 | { |
45 | RCU_INIT_POINTER(nf_queue_handler, NULL); |
46 | } |
47 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
48 | |
49 | static void nf_queue_sock_put(struct sock *sk) |
50 | { |
51 | #ifdef CONFIG_INET |
52 | sock_gen_put(sk); |
53 | #else |
54 | sock_put(sk); |
55 | #endif |
56 | } |
57 | |
58 | static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) |
59 | { |
60 | struct nf_hook_state *state = &entry->state; |
61 | |
62 | /* Release those devices we held, or Alexey will kill me. */ |
63 | dev_put(dev: state->in); |
64 | dev_put(dev: state->out); |
65 | if (state->sk) |
66 | nf_queue_sock_put(sk: state->sk); |
67 | |
68 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
69 | dev_put(dev: entry->physin); |
70 | dev_put(dev: entry->physout); |
71 | #endif |
72 | } |
73 | |
74 | void nf_queue_entry_free(struct nf_queue_entry *entry) |
75 | { |
76 | nf_queue_entry_release_refs(entry); |
77 | kfree(objp: entry); |
78 | } |
79 | EXPORT_SYMBOL_GPL(nf_queue_entry_free); |
80 | |
81 | static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry) |
82 | { |
83 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
84 | const struct sk_buff *skb = entry->skb; |
85 | |
86 | if (nf_bridge_info_exists(skb)) { |
87 | entry->physin = nf_bridge_get_physindev(skb, net: entry->state.net); |
88 | entry->physout = nf_bridge_get_physoutdev(skb); |
89 | } else { |
90 | entry->physin = NULL; |
91 | entry->physout = NULL; |
92 | } |
93 | #endif |
94 | } |
95 | |
96 | /* Bump dev refs so they don't vanish while packet is out */ |
97 | bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) |
98 | { |
99 | struct nf_hook_state *state = &entry->state; |
100 | |
101 | if (state->sk && !refcount_inc_not_zero(r: &state->sk->sk_refcnt)) |
102 | return false; |
103 | |
104 | dev_hold(dev: state->in); |
105 | dev_hold(dev: state->out); |
106 | |
107 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
108 | dev_hold(dev: entry->physin); |
109 | dev_hold(dev: entry->physout); |
110 | #endif |
111 | return true; |
112 | } |
113 | EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); |
114 | |
115 | void nf_queue_nf_hook_drop(struct net *net) |
116 | { |
117 | const struct nf_queue_handler *qh; |
118 | |
119 | rcu_read_lock(); |
120 | qh = rcu_dereference(nf_queue_handler); |
121 | if (qh) |
122 | qh->nf_hook_drop(net); |
123 | rcu_read_unlock(); |
124 | } |
125 | EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop); |
126 | |
127 | static void nf_ip_saveroute(const struct sk_buff *skb, |
128 | struct nf_queue_entry *entry) |
129 | { |
130 | struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); |
131 | |
132 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
133 | const struct iphdr *iph = ip_hdr(skb); |
134 | |
135 | rt_info->tos = iph->tos; |
136 | rt_info->daddr = iph->daddr; |
137 | rt_info->saddr = iph->saddr; |
138 | rt_info->mark = skb->mark; |
139 | } |
140 | } |
141 | |
142 | static void nf_ip6_saveroute(const struct sk_buff *skb, |
143 | struct nf_queue_entry *entry) |
144 | { |
145 | struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); |
146 | |
147 | if (entry->state.hook == NF_INET_LOCAL_OUT) { |
148 | const struct ipv6hdr *iph = ipv6_hdr(skb); |
149 | |
150 | rt_info->daddr = iph->daddr; |
151 | rt_info->saddr = iph->saddr; |
152 | rt_info->mark = skb->mark; |
153 | } |
154 | } |
155 | |
156 | static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, |
157 | unsigned int index, unsigned int queuenum) |
158 | { |
159 | struct nf_queue_entry *entry = NULL; |
160 | const struct nf_queue_handler *qh; |
161 | unsigned int route_key_size; |
162 | int status; |
163 | |
164 | /* QUEUE == DROP if no one is waiting, to be safe. */ |
165 | qh = rcu_dereference(nf_queue_handler); |
166 | if (!qh) |
167 | return -ESRCH; |
168 | |
169 | switch (state->pf) { |
170 | case AF_INET: |
171 | route_key_size = sizeof(struct ip_rt_info); |
172 | break; |
173 | case AF_INET6: |
174 | route_key_size = sizeof(struct ip6_rt_info); |
175 | break; |
176 | default: |
177 | route_key_size = 0; |
178 | break; |
179 | } |
180 | |
181 | if (skb_sk_is_prefetched(skb)) { |
182 | struct sock *sk = skb->sk; |
183 | |
184 | if (!sk_is_refcounted(sk)) { |
185 | if (!refcount_inc_not_zero(r: &sk->sk_refcnt)) |
186 | return -ENOTCONN; |
187 | |
188 | /* drop refcount on skb_orphan */ |
189 | skb->destructor = sock_edemux; |
190 | } |
191 | } |
192 | |
193 | entry = kmalloc(size: sizeof(*entry) + route_key_size, GFP_ATOMIC); |
194 | if (!entry) |
195 | return -ENOMEM; |
196 | |
197 | if (skb_dst(skb) && !skb_dst_force(skb)) { |
198 | kfree(objp: entry); |
199 | return -ENETDOWN; |
200 | } |
201 | |
202 | *entry = (struct nf_queue_entry) { |
203 | .skb = skb, |
204 | .state = *state, |
205 | .hook_index = index, |
206 | .size = sizeof(*entry) + route_key_size, |
207 | }; |
208 | |
209 | __nf_queue_entry_init_physdevs(entry); |
210 | |
211 | if (!nf_queue_entry_get_refs(entry)) { |
212 | kfree(objp: entry); |
213 | return -ENOTCONN; |
214 | } |
215 | |
216 | switch (entry->state.pf) { |
217 | case AF_INET: |
218 | nf_ip_saveroute(skb, entry); |
219 | break; |
220 | case AF_INET6: |
221 | nf_ip6_saveroute(skb, entry); |
222 | break; |
223 | } |
224 | |
225 | status = qh->outfn(entry, queuenum); |
226 | if (status < 0) { |
227 | nf_queue_entry_free(entry); |
228 | return status; |
229 | } |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | /* Packets leaving via this function must come back through nf_reinject(). */ |
235 | int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, |
236 | unsigned int index, unsigned int verdict) |
237 | { |
238 | int ret; |
239 | |
240 | ret = __nf_queue(skb, state, index, queuenum: verdict >> NF_VERDICT_QBITS); |
241 | if (ret < 0) { |
242 | if (ret == -ESRCH && |
243 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) |
244 | return 1; |
245 | kfree_skb(skb); |
246 | } |
247 | |
248 | return 0; |
249 | } |
250 | EXPORT_SYMBOL_GPL(nf_queue); |
251 | |