1/* Connection tracking via netlink socket. Allows for user space
2 * protocol helpers and general trouble making from userspace.
3 *
4 * (C) 2001 by Jay Schulist <jschlst@samba.org>
5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
6 * (C) 2003 by Patrick Mchardy <kaber@trash.net>
7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
8 *
9 * Initial connection tracking via netlink development funded and
10 * generally made possible by Network Robots, Inc. (www.networkrobots.com)
11 *
12 * Further development of this code funded by Astaro AG (http://www.astaro.com)
13 *
14 * This software may be used and distributed according to the terms
15 * of the GNU General Public License, incorporated herein by reference.
16 */
17
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/rculist.h>
22#include <linux/rculist_nulls.h>
23#include <linux/types.h>
24#include <linux/timer.h>
25#include <linux/security.h>
26#include <linux/skbuff.h>
27#include <linux/errno.h>
28#include <linux/netlink.h>
29#include <linux/spinlock.h>
30#include <linux/interrupt.h>
31#include <linux/slab.h>
32#include <linux/siphash.h>
33
34#include <linux/netfilter.h>
35#include <net/netlink.h>
36#include <net/sock.h>
37#include <net/netfilter/nf_conntrack.h>
38#include <net/netfilter/nf_conntrack_core.h>
39#include <net/netfilter/nf_conntrack_expect.h>
40#include <net/netfilter/nf_conntrack_helper.h>
41#include <net/netfilter/nf_conntrack_seqadj.h>
42#include <net/netfilter/nf_conntrack_l4proto.h>
43#include <net/netfilter/nf_conntrack_tuple.h>
44#include <net/netfilter/nf_conntrack_acct.h>
45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_synproxy.h>
49#if IS_ENABLED(CONFIG_NF_NAT)
50#include <net/netfilter/nf_nat.h>
51#include <net/netfilter/nf_nat_helper.h>
52#endif
53
54#include <linux/netfilter/nfnetlink.h>
55#include <linux/netfilter/nfnetlink_conntrack.h>
56
57#include "nf_internals.h"
58
59MODULE_LICENSE("GPL");
60MODULE_DESCRIPTION("List and change connection tracking table");
61
62struct ctnetlink_list_dump_ctx {
63 struct nf_conn *last;
64 unsigned int cpu;
65 bool done;
66};
67
68static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
69 const struct nf_conntrack_tuple *tuple,
70 const struct nf_conntrack_l4proto *l4proto)
71{
72 int ret = 0;
73 struct nlattr *nest_parms;
74
75 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_PROTO);
76 if (!nest_parms)
77 goto nla_put_failure;
78 if (nla_put_u8(skb, attrtype: CTA_PROTO_NUM, value: tuple->dst.protonum))
79 goto nla_put_failure;
80
81 if (likely(l4proto->tuple_to_nlattr))
82 ret = l4proto->tuple_to_nlattr(skb, tuple);
83
84 nla_nest_end(skb, start: nest_parms);
85
86 return ret;
87
88nla_put_failure:
89 return -1;
90}
91
92static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
93 const struct nf_conntrack_tuple *tuple)
94{
95 if (nla_put_in_addr(skb, attrtype: CTA_IP_V4_SRC, addr: tuple->src.u3.ip) ||
96 nla_put_in_addr(skb, attrtype: CTA_IP_V4_DST, addr: tuple->dst.u3.ip))
97 return -EMSGSIZE;
98 return 0;
99}
100
101static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
102 const struct nf_conntrack_tuple *tuple)
103{
104 if (nla_put_in6_addr(skb, attrtype: CTA_IP_V6_SRC, addr: &tuple->src.u3.in6) ||
105 nla_put_in6_addr(skb, attrtype: CTA_IP_V6_DST, addr: &tuple->dst.u3.in6))
106 return -EMSGSIZE;
107 return 0;
108}
109
110static int ctnetlink_dump_tuples_ip(struct sk_buff *skb,
111 const struct nf_conntrack_tuple *tuple)
112{
113 int ret = 0;
114 struct nlattr *nest_parms;
115
116 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_IP);
117 if (!nest_parms)
118 goto nla_put_failure;
119
120 switch (tuple->src.l3num) {
121 case NFPROTO_IPV4:
122 ret = ipv4_tuple_to_nlattr(skb, tuple);
123 break;
124 case NFPROTO_IPV6:
125 ret = ipv6_tuple_to_nlattr(skb, tuple);
126 break;
127 }
128
129 nla_nest_end(skb, start: nest_parms);
130
131 return ret;
132
133nla_put_failure:
134 return -1;
135}
136
137static int ctnetlink_dump_tuples(struct sk_buff *skb,
138 const struct nf_conntrack_tuple *tuple)
139{
140 const struct nf_conntrack_l4proto *l4proto;
141 int ret;
142
143 rcu_read_lock();
144 ret = ctnetlink_dump_tuples_ip(skb, tuple);
145
146 if (ret >= 0) {
147 l4proto = nf_ct_l4proto_find(l4proto: tuple->dst.protonum);
148 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
149 }
150 rcu_read_unlock();
151 return ret;
152}
153
154static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
155 const struct nf_conntrack_zone *zone, int dir)
156{
157 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
158 return 0;
159 if (nla_put_be16(skb, attrtype, htons(zone->id)))
160 goto nla_put_failure;
161 return 0;
162
163nla_put_failure:
164 return -1;
165}
166
167static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
168{
169 if (nla_put_be32(skb, attrtype: CTA_STATUS, htonl(ct->status)))
170 goto nla_put_failure;
171 return 0;
172
173nla_put_failure:
174 return -1;
175}
176
177static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct,
178 bool skip_zero)
179{
180 long timeout;
181
182 if (nf_ct_is_confirmed(ct))
183 timeout = nf_ct_expires(ct) / HZ;
184 else
185 timeout = ct->timeout / HZ;
186
187 if (skip_zero && timeout == 0)
188 return 0;
189
190 if (nla_put_be32(skb, attrtype: CTA_TIMEOUT, htonl(timeout)))
191 goto nla_put_failure;
192 return 0;
193
194nla_put_failure:
195 return -1;
196}
197
198static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct,
199 bool destroy)
200{
201 const struct nf_conntrack_l4proto *l4proto;
202 struct nlattr *nest_proto;
203 int ret;
204
205 l4proto = nf_ct_l4proto_find(l4proto: nf_ct_protonum(ct));
206 if (!l4proto->to_nlattr)
207 return 0;
208
209 nest_proto = nla_nest_start(skb, attrtype: CTA_PROTOINFO);
210 if (!nest_proto)
211 goto nla_put_failure;
212
213 ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy);
214
215 nla_nest_end(skb, start: nest_proto);
216
217 return ret;
218
219nla_put_failure:
220 return -1;
221}
222
223static int ctnetlink_dump_helpinfo(struct sk_buff *skb,
224 const struct nf_conn *ct)
225{
226 struct nlattr *nest_helper;
227 const struct nf_conn_help *help = nfct_help(ct);
228 struct nf_conntrack_helper *helper;
229
230 if (!help)
231 return 0;
232
233 rcu_read_lock();
234 helper = rcu_dereference(help->helper);
235 if (!helper)
236 goto out;
237
238 nest_helper = nla_nest_start(skb, attrtype: CTA_HELP);
239 if (!nest_helper)
240 goto nla_put_failure;
241 if (nla_put_string(skb, attrtype: CTA_HELP_NAME, str: helper->name))
242 goto nla_put_failure;
243
244 if (helper->to_nlattr)
245 helper->to_nlattr(skb, ct);
246
247 nla_nest_end(skb, start: nest_helper);
248out:
249 rcu_read_unlock();
250 return 0;
251
252nla_put_failure:
253 rcu_read_unlock();
254 return -1;
255}
256
257static int
258dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct,
259 enum ip_conntrack_dir dir, int type)
260{
261 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
262 struct nf_conn_counter *counter = acct->counter;
263 struct nlattr *nest_count;
264 u64 pkts, bytes;
265
266 if (type == IPCTNL_MSG_CT_GET_CTRZERO) {
267 pkts = atomic64_xchg(v: &counter[dir].packets, new: 0);
268 bytes = atomic64_xchg(v: &counter[dir].bytes, new: 0);
269 } else {
270 pkts = atomic64_read(v: &counter[dir].packets);
271 bytes = atomic64_read(v: &counter[dir].bytes);
272 }
273
274 nest_count = nla_nest_start(skb, attrtype: attr);
275 if (!nest_count)
276 goto nla_put_failure;
277
278 if (nla_put_be64(skb, attrtype: CTA_COUNTERS_PACKETS, cpu_to_be64(pkts),
279 padattr: CTA_COUNTERS_PAD) ||
280 nla_put_be64(skb, attrtype: CTA_COUNTERS_BYTES, cpu_to_be64(bytes),
281 padattr: CTA_COUNTERS_PAD))
282 goto nla_put_failure;
283
284 nla_nest_end(skb, start: nest_count);
285
286 return 0;
287
288nla_put_failure:
289 return -1;
290}
291
292static int
293ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type)
294{
295 struct nf_conn_acct *acct = nf_conn_acct_find(ct);
296
297 if (!acct)
298 return 0;
299
300 if (dump_counters(skb, acct, dir: IP_CT_DIR_ORIGINAL, type) < 0)
301 return -1;
302 if (dump_counters(skb, acct, dir: IP_CT_DIR_REPLY, type) < 0)
303 return -1;
304
305 return 0;
306}
307
308static int
309ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
310{
311 struct nlattr *nest_count;
312 const struct nf_conn_tstamp *tstamp;
313
314 tstamp = nf_conn_tstamp_find(ct);
315 if (!tstamp)
316 return 0;
317
318 nest_count = nla_nest_start(skb, attrtype: CTA_TIMESTAMP);
319 if (!nest_count)
320 goto nla_put_failure;
321
322 if (nla_put_be64(skb, attrtype: CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start),
323 padattr: CTA_TIMESTAMP_PAD) ||
324 (tstamp->stop != 0 && nla_put_be64(skb, attrtype: CTA_TIMESTAMP_STOP,
325 cpu_to_be64(tstamp->stop),
326 padattr: CTA_TIMESTAMP_PAD)))
327 goto nla_put_failure;
328 nla_nest_end(skb, start: nest_count);
329
330 return 0;
331
332nla_put_failure:
333 return -1;
334}
335
336#ifdef CONFIG_NF_CONNTRACK_MARK
337static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct,
338 bool dump)
339{
340 u32 mark = READ_ONCE(ct->mark);
341
342 if (!mark && !dump)
343 return 0;
344
345 if (nla_put_be32(skb, attrtype: CTA_MARK, htonl(mark)))
346 goto nla_put_failure;
347 return 0;
348
349nla_put_failure:
350 return -1;
351}
352#else
353#define ctnetlink_dump_mark(a, b, c) (0)
354#endif
355
356#ifdef CONFIG_NF_CONNTRACK_SECMARK
357static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
358{
359 struct nlattr *nest_secctx;
360 int len, ret;
361 char *secctx;
362
363 ret = security_secid_to_secctx(secid: ct->secmark, secdata: &secctx, seclen: &len);
364 if (ret)
365 return 0;
366
367 ret = -1;
368 nest_secctx = nla_nest_start(skb, attrtype: CTA_SECCTX);
369 if (!nest_secctx)
370 goto nla_put_failure;
371
372 if (nla_put_string(skb, attrtype: CTA_SECCTX_NAME, str: secctx))
373 goto nla_put_failure;
374 nla_nest_end(skb, start: nest_secctx);
375
376 ret = 0;
377nla_put_failure:
378 security_release_secctx(secdata: secctx, seclen: len);
379 return ret;
380}
381#else
382#define ctnetlink_dump_secctx(a, b) (0)
383#endif
384
385#ifdef CONFIG_NF_CONNTRACK_LABELS
386static inline int ctnetlink_label_size(const struct nf_conn *ct)
387{
388 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
389
390 if (!labels)
391 return 0;
392 return nla_total_size(payload: sizeof(labels->bits));
393}
394
395static int
396ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct)
397{
398 struct nf_conn_labels *labels = nf_ct_labels_find(ct);
399 unsigned int i;
400
401 if (!labels)
402 return 0;
403
404 i = 0;
405 do {
406 if (labels->bits[i] != 0)
407 return nla_put(skb, attrtype: CTA_LABELS, attrlen: sizeof(labels->bits),
408 data: labels->bits);
409 i++;
410 } while (i < ARRAY_SIZE(labels->bits));
411
412 return 0;
413}
414#else
415#define ctnetlink_dump_labels(a, b) (0)
416#define ctnetlink_label_size(a) (0)
417#endif
418
419#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
420
421static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
422{
423 struct nlattr *nest_parms;
424
425 if (!(ct->status & IPS_EXPECTED))
426 return 0;
427
428 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_MASTER);
429 if (!nest_parms)
430 goto nla_put_failure;
431 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
432 goto nla_put_failure;
433 nla_nest_end(skb, start: nest_parms);
434
435 return 0;
436
437nla_put_failure:
438 return -1;
439}
440
441static int
442dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
443{
444 struct nlattr *nest_parms;
445
446 nest_parms = nla_nest_start(skb, attrtype: type);
447 if (!nest_parms)
448 goto nla_put_failure;
449
450 if (nla_put_be32(skb, attrtype: CTA_SEQADJ_CORRECTION_POS,
451 htonl(seq->correction_pos)) ||
452 nla_put_be32(skb, attrtype: CTA_SEQADJ_OFFSET_BEFORE,
453 htonl(seq->offset_before)) ||
454 nla_put_be32(skb, attrtype: CTA_SEQADJ_OFFSET_AFTER,
455 htonl(seq->offset_after)))
456 goto nla_put_failure;
457
458 nla_nest_end(skb, start: nest_parms);
459
460 return 0;
461
462nla_put_failure:
463 return -1;
464}
465
466static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct)
467{
468 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
469 struct nf_ct_seqadj *seq;
470
471 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
472 return 0;
473
474 spin_lock_bh(lock: &ct->lock);
475 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
476 if (dump_ct_seq_adj(skb, seq, type: CTA_SEQ_ADJ_ORIG) == -1)
477 goto err;
478
479 seq = &seqadj->seq[IP_CT_DIR_REPLY];
480 if (dump_ct_seq_adj(skb, seq, type: CTA_SEQ_ADJ_REPLY) == -1)
481 goto err;
482
483 spin_unlock_bh(lock: &ct->lock);
484 return 0;
485err:
486 spin_unlock_bh(lock: &ct->lock);
487 return -1;
488}
489
490static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct)
491{
492 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
493 struct nlattr *nest_parms;
494
495 if (!synproxy)
496 return 0;
497
498 nest_parms = nla_nest_start(skb, attrtype: CTA_SYNPROXY);
499 if (!nest_parms)
500 goto nla_put_failure;
501
502 if (nla_put_be32(skb, attrtype: CTA_SYNPROXY_ISN, htonl(synproxy->isn)) ||
503 nla_put_be32(skb, attrtype: CTA_SYNPROXY_ITS, htonl(synproxy->its)) ||
504 nla_put_be32(skb, attrtype: CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff)))
505 goto nla_put_failure;
506
507 nla_nest_end(skb, start: nest_parms);
508
509 return 0;
510
511nla_put_failure:
512 return -1;
513}
514
515static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
516{
517 __be32 id = (__force __be32)nf_ct_get_id(ct);
518
519 if (nla_put_be32(skb, attrtype: CTA_ID, value: id))
520 goto nla_put_failure;
521 return 0;
522
523nla_put_failure:
524 return -1;
525}
526
527static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
528{
529 if (nla_put_be32(skb, attrtype: CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
530 goto nla_put_failure;
531 return 0;
532
533nla_put_failure:
534 return -1;
535}
536
537/* all these functions access ct->ext. Caller must either hold a reference
538 * on ct or prevent its deletion by holding either the bucket spinlock or
539 * pcpu dying list lock.
540 */
541static int ctnetlink_dump_extinfo(struct sk_buff *skb,
542 struct nf_conn *ct, u32 type)
543{
544 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
545 ctnetlink_dump_timestamp(skb, ct) < 0 ||
546 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
547 ctnetlink_dump_labels(skb, ct) < 0 ||
548 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 ||
549 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
550 return -1;
551
552 return 0;
553}
554
555static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
556{
557 if (ctnetlink_dump_status(skb, ct) < 0 ||
558 ctnetlink_dump_mark(skb, ct, dump: true) < 0 ||
559 ctnetlink_dump_secctx(skb, ct) < 0 ||
560 ctnetlink_dump_id(skb, ct) < 0 ||
561 ctnetlink_dump_use(skb, ct) < 0 ||
562 ctnetlink_dump_master(skb, ct) < 0)
563 return -1;
564
565 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
566 (ctnetlink_dump_timeout(skb, ct, skip_zero: false) < 0 ||
567 ctnetlink_dump_protoinfo(skb, ct, destroy: false) < 0))
568 return -1;
569
570 return 0;
571}
572
573static int
574ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
575 struct nf_conn *ct, bool extinfo, unsigned int flags)
576{
577 const struct nf_conntrack_zone *zone;
578 struct nlmsghdr *nlh;
579 struct nlattr *nest_parms;
580 unsigned int event;
581
582 if (portid)
583 flags |= NLM_F_MULTI;
584 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, msg_type: IPCTNL_MSG_CT_NEW);
585 nlh = nfnl_msg_put(skb, portid, seq, type: event, flags, family: nf_ct_l3num(ct),
586 NFNETLINK_V0, res_id: 0);
587 if (!nlh)
588 goto nlmsg_failure;
589
590 zone = nf_ct_zone(ct);
591
592 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_ORIG);
593 if (!nest_parms)
594 goto nla_put_failure;
595 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
596 goto nla_put_failure;
597 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
598 NF_CT_ZONE_DIR_ORIG) < 0)
599 goto nla_put_failure;
600 nla_nest_end(skb, start: nest_parms);
601
602 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_REPLY);
603 if (!nest_parms)
604 goto nla_put_failure;
605 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
606 goto nla_put_failure;
607 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
608 NF_CT_ZONE_DIR_REPL) < 0)
609 goto nla_put_failure;
610 nla_nest_end(skb, start: nest_parms);
611
612 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_ZONE, zone,
613 NF_CT_DEFAULT_ZONE_DIR) < 0)
614 goto nla_put_failure;
615
616 if (ctnetlink_dump_info(skb, ct) < 0)
617 goto nla_put_failure;
618 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0)
619 goto nla_put_failure;
620
621 nlmsg_end(skb, nlh);
622 return skb->len;
623
624nlmsg_failure:
625nla_put_failure:
626 nlmsg_cancel(skb, nlh);
627 return -1;
628}
629
630static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = {
631 [CTA_IP_V4_SRC] = { .type = NLA_U32 },
632 [CTA_IP_V4_DST] = { .type = NLA_U32 },
633 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 },
634 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 },
635};
636
637#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS)
638static size_t ctnetlink_proto_size(const struct nf_conn *ct)
639{
640 const struct nf_conntrack_l4proto *l4proto;
641 size_t len, len4 = 0;
642
643 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1);
644 len *= 3u; /* ORIG, REPLY, MASTER */
645
646 l4proto = nf_ct_l4proto_find(l4proto: nf_ct_protonum(ct));
647 len += l4proto->nlattr_size;
648 if (l4proto->nlattr_tuple_size) {
649 len4 = l4proto->nlattr_tuple_size();
650 len4 *= 3u; /* ORIG, REPLY, MASTER */
651 }
652
653 return len + len4;
654}
655#endif
656
657static inline size_t ctnetlink_acct_size(const struct nf_conn *ct)
658{
659 if (!nf_ct_ext_exist(ct, id: NF_CT_EXT_ACCT))
660 return 0;
661 return 2 * nla_total_size(payload: 0) /* CTA_COUNTERS_ORIG|REPL */
662 + 2 * nla_total_size_64bit(payload: sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
663 + 2 * nla_total_size_64bit(payload: sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
664 ;
665}
666
667static inline int ctnetlink_secctx_size(const struct nf_conn *ct)
668{
669#ifdef CONFIG_NF_CONNTRACK_SECMARK
670 int len, ret;
671
672 ret = security_secid_to_secctx(secid: ct->secmark, NULL, seclen: &len);
673 if (ret)
674 return 0;
675
676 return nla_total_size(payload: 0) /* CTA_SECCTX */
677 + nla_total_size(payload: sizeof(char) * len); /* CTA_SECCTX_NAME */
678#else
679 return 0;
680#endif
681}
682
683static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct)
684{
685#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
686 if (!nf_ct_ext_exist(ct, id: NF_CT_EXT_TSTAMP))
687 return 0;
688 return nla_total_size(payload: 0) + 2 * nla_total_size_64bit(payload: sizeof(uint64_t));
689#else
690 return 0;
691#endif
692}
693
694#ifdef CONFIG_NF_CONNTRACK_EVENTS
695static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct)
696{
697 return NLMSG_ALIGN(sizeof(struct nfgenmsg))
698 + 3 * nla_total_size(payload: 0) /* CTA_TUPLE_ORIG|REPL|MASTER */
699 + 3 * nla_total_size(payload: 0) /* CTA_TUPLE_IP */
700 + 3 * nla_total_size(payload: 0) /* CTA_TUPLE_PROTO */
701 + 3 * nla_total_size(payload: sizeof(u_int8_t)) /* CTA_PROTO_NUM */
702 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_ID */
703 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_STATUS */
704 + ctnetlink_acct_size(ct)
705 + ctnetlink_timestamp_size(ct)
706 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_TIMEOUT */
707 + nla_total_size(payload: 0) /* CTA_PROTOINFO */
708 + nla_total_size(payload: 0) /* CTA_HELP */
709 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
710 + ctnetlink_secctx_size(ct)
711#if IS_ENABLED(CONFIG_NF_NAT)
712 + 2 * nla_total_size(payload: 0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
713 + 6 * nla_total_size(payload: sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
714#endif
715#ifdef CONFIG_NF_CONNTRACK_MARK
716 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_MARK */
717#endif
718#ifdef CONFIG_NF_CONNTRACK_ZONES
719 + nla_total_size(payload: sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
720#endif
721 + ctnetlink_proto_size(ct)
722 + ctnetlink_label_size(ct)
723 ;
724}
725
726static int
727ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
728{
729 const struct nf_conntrack_zone *zone;
730 struct net *net;
731 struct nlmsghdr *nlh;
732 struct nlattr *nest_parms;
733 struct nf_conn *ct = item->ct;
734 struct sk_buff *skb;
735 unsigned int type;
736 unsigned int flags = 0, group;
737 int err;
738
739 if (events & (1 << IPCT_DESTROY)) {
740 type = IPCTNL_MSG_CT_DELETE;
741 group = NFNLGRP_CONNTRACK_DESTROY;
742 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
743 type = IPCTNL_MSG_CT_NEW;
744 flags = NLM_F_CREATE|NLM_F_EXCL;
745 group = NFNLGRP_CONNTRACK_NEW;
746 } else if (events) {
747 type = IPCTNL_MSG_CT_NEW;
748 group = NFNLGRP_CONNTRACK_UPDATE;
749 } else
750 return 0;
751
752 net = nf_ct_net(ct);
753 if (!item->report && !nfnetlink_has_listeners(net, group))
754 return 0;
755
756 skb = nlmsg_new(payload: ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
757 if (skb == NULL)
758 goto errout;
759
760 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, msg_type: type);
761 nlh = nfnl_msg_put(skb, portid: item->portid, seq: 0, type, flags, family: nf_ct_l3num(ct),
762 NFNETLINK_V0, res_id: 0);
763 if (!nlh)
764 goto nlmsg_failure;
765
766 zone = nf_ct_zone(ct);
767
768 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_ORIG);
769 if (!nest_parms)
770 goto nla_put_failure;
771 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
772 goto nla_put_failure;
773 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
774 NF_CT_ZONE_DIR_ORIG) < 0)
775 goto nla_put_failure;
776 nla_nest_end(skb, start: nest_parms);
777
778 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_REPLY);
779 if (!nest_parms)
780 goto nla_put_failure;
781 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
782 goto nla_put_failure;
783 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
784 NF_CT_ZONE_DIR_REPL) < 0)
785 goto nla_put_failure;
786 nla_nest_end(skb, start: nest_parms);
787
788 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_ZONE, zone,
789 NF_CT_DEFAULT_ZONE_DIR) < 0)
790 goto nla_put_failure;
791
792 if (ctnetlink_dump_id(skb, ct) < 0)
793 goto nla_put_failure;
794
795 if (ctnetlink_dump_status(skb, ct) < 0)
796 goto nla_put_failure;
797
798 if (events & (1 << IPCT_DESTROY)) {
799 if (ctnetlink_dump_timeout(skb, ct, skip_zero: true) < 0)
800 goto nla_put_failure;
801
802 if (ctnetlink_dump_acct(skb, ct, type) < 0 ||
803 ctnetlink_dump_timestamp(skb, ct) < 0 ||
804 ctnetlink_dump_protoinfo(skb, ct, destroy: true) < 0)
805 goto nla_put_failure;
806 } else {
807 if (ctnetlink_dump_timeout(skb, ct, skip_zero: false) < 0)
808 goto nla_put_failure;
809
810 if (events & (1 << IPCT_PROTOINFO) &&
811 ctnetlink_dump_protoinfo(skb, ct, destroy: false) < 0)
812 goto nla_put_failure;
813
814 if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
815 && ctnetlink_dump_helpinfo(skb, ct) < 0)
816 goto nla_put_failure;
817
818#ifdef CONFIG_NF_CONNTRACK_SECMARK
819 if ((events & (1 << IPCT_SECMARK) || ct->secmark)
820 && ctnetlink_dump_secctx(skb, ct) < 0)
821 goto nla_put_failure;
822#endif
823 if (events & (1 << IPCT_LABEL) &&
824 ctnetlink_dump_labels(skb, ct) < 0)
825 goto nla_put_failure;
826
827 if (events & (1 << IPCT_RELATED) &&
828 ctnetlink_dump_master(skb, ct) < 0)
829 goto nla_put_failure;
830
831 if (events & (1 << IPCT_SEQADJ) &&
832 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
833 goto nla_put_failure;
834
835 if (events & (1 << IPCT_SYNPROXY) &&
836 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
837 goto nla_put_failure;
838 }
839
840#ifdef CONFIG_NF_CONNTRACK_MARK
841 if (ctnetlink_dump_mark(skb, ct, dump: events & (1 << IPCT_MARK)))
842 goto nla_put_failure;
843#endif
844 nlmsg_end(skb, nlh);
845 err = nfnetlink_send(skb, net, portid: item->portid, group, echo: item->report,
846 GFP_ATOMIC);
847 if (err == -ENOBUFS || err == -EAGAIN)
848 return -ENOBUFS;
849
850 return 0;
851
852nla_put_failure:
853 nlmsg_cancel(skb, nlh);
854nlmsg_failure:
855 kfree_skb(skb);
856errout:
857 if (nfnetlink_set_err(net, portid: 0, group, error: -ENOBUFS) > 0)
858 return -ENOBUFS;
859
860 return 0;
861}
862#endif /* CONFIG_NF_CONNTRACK_EVENTS */
863
864static int ctnetlink_done(struct netlink_callback *cb)
865{
866 if (cb->args[1])
867 nf_ct_put(ct: (struct nf_conn *)cb->args[1]);
868 kfree(objp: cb->data);
869 return 0;
870}
871
872struct ctnetlink_filter_u32 {
873 u32 val;
874 u32 mask;
875};
876
877struct ctnetlink_filter {
878 u8 family;
879 bool zone_filter;
880
881 u_int32_t orig_flags;
882 u_int32_t reply_flags;
883
884 struct nf_conntrack_tuple orig;
885 struct nf_conntrack_tuple reply;
886 struct nf_conntrack_zone zone;
887
888 struct ctnetlink_filter_u32 mark;
889 struct ctnetlink_filter_u32 status;
890};
891
892static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = {
893 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 },
894 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 },
895};
896
897static int ctnetlink_parse_filter(const struct nlattr *attr,
898 struct ctnetlink_filter *filter)
899{
900 struct nlattr *tb[CTA_FILTER_MAX + 1];
901 int ret = 0;
902
903 ret = nla_parse_nested(tb, CTA_FILTER_MAX, nla: attr, policy: cta_filter_nla_policy,
904 NULL);
905 if (ret)
906 return ret;
907
908 if (tb[CTA_FILTER_ORIG_FLAGS]) {
909 filter->orig_flags = nla_get_u32(nla: tb[CTA_FILTER_ORIG_FLAGS]);
910 if (filter->orig_flags & ~CTA_FILTER_F_ALL)
911 return -EOPNOTSUPP;
912 }
913
914 if (tb[CTA_FILTER_REPLY_FLAGS]) {
915 filter->reply_flags = nla_get_u32(nla: tb[CTA_FILTER_REPLY_FLAGS]);
916 if (filter->reply_flags & ~CTA_FILTER_F_ALL)
917 return -EOPNOTSUPP;
918 }
919
920 return 0;
921}
922
923static int ctnetlink_parse_zone(const struct nlattr *attr,
924 struct nf_conntrack_zone *zone);
925static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
926 struct nf_conntrack_tuple *tuple,
927 u32 type, u_int8_t l3num,
928 struct nf_conntrack_zone *zone,
929 u_int32_t flags);
930
931static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark,
932 const struct nlattr * const cda[])
933{
934#ifdef CONFIG_NF_CONNTRACK_MARK
935 if (cda[CTA_MARK]) {
936 mark->val = ntohl(nla_get_be32(cda[CTA_MARK]));
937
938 if (cda[CTA_MARK_MASK])
939 mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
940 else
941 mark->mask = 0xffffffff;
942 } else if (cda[CTA_MARK_MASK]) {
943 return -EINVAL;
944 }
945#endif
946 return 0;
947}
948
949static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status,
950 const struct nlattr * const cda[])
951{
952 if (cda[CTA_STATUS]) {
953 status->val = ntohl(nla_get_be32(cda[CTA_STATUS]));
954 if (cda[CTA_STATUS_MASK])
955 status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK]));
956 else
957 status->mask = status->val;
958
959 /* status->val == 0? always true, else always false. */
960 if (status->mask == 0)
961 return -EINVAL;
962 } else if (cda[CTA_STATUS_MASK]) {
963 return -EINVAL;
964 }
965
966 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */
967 BUILD_BUG_ON(__IPS_MAX_BIT >= 32);
968 return 0;
969}
970
971static struct ctnetlink_filter *
972ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
973{
974 struct ctnetlink_filter *filter;
975 int err;
976
977#ifndef CONFIG_NF_CONNTRACK_MARK
978 if (cda[CTA_MARK] || cda[CTA_MARK_MASK])
979 return ERR_PTR(-EOPNOTSUPP);
980#endif
981
982 filter = kzalloc(size: sizeof(*filter), GFP_KERNEL);
983 if (filter == NULL)
984 return ERR_PTR(error: -ENOMEM);
985
986 filter->family = family;
987
988 err = ctnetlink_filter_parse_mark(mark: &filter->mark, cda);
989 if (err)
990 goto err_filter;
991
992 err = ctnetlink_filter_parse_status(status: &filter->status, cda);
993 if (err)
994 goto err_filter;
995
996 if (cda[CTA_ZONE]) {
997 err = ctnetlink_parse_zone(attr: cda[CTA_ZONE], zone: &filter->zone);
998 if (err < 0)
999 goto err_filter;
1000 filter->zone_filter = true;
1001 }
1002
1003 if (!cda[CTA_FILTER])
1004 return filter;
1005
1006 err = ctnetlink_parse_filter(attr: cda[CTA_FILTER], filter);
1007 if (err < 0)
1008 goto err_filter;
1009
1010 if (filter->orig_flags) {
1011 if (!cda[CTA_TUPLE_ORIG]) {
1012 err = -EINVAL;
1013 goto err_filter;
1014 }
1015
1016 err = ctnetlink_parse_tuple_filter(cda, tuple: &filter->orig,
1017 type: CTA_TUPLE_ORIG,
1018 l3num: filter->family,
1019 zone: &filter->zone,
1020 flags: filter->orig_flags);
1021 if (err < 0)
1022 goto err_filter;
1023 }
1024
1025 if (filter->reply_flags) {
1026 if (!cda[CTA_TUPLE_REPLY]) {
1027 err = -EINVAL;
1028 goto err_filter;
1029 }
1030
1031 err = ctnetlink_parse_tuple_filter(cda, tuple: &filter->reply,
1032 type: CTA_TUPLE_REPLY,
1033 l3num: filter->family,
1034 zone: &filter->zone,
1035 flags: filter->reply_flags);
1036 if (err < 0)
1037 goto err_filter;
1038 }
1039
1040 return filter;
1041
1042err_filter:
1043 kfree(objp: filter);
1044
1045 return ERR_PTR(error: err);
1046}
1047
1048static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
1049{
1050 return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS] || cda[CTA_ZONE];
1051}
1052
1053static int ctnetlink_start(struct netlink_callback *cb)
1054{
1055 const struct nlattr * const *cda = cb->data;
1056 struct ctnetlink_filter *filter = NULL;
1057 struct nfgenmsg *nfmsg = nlmsg_data(nlh: cb->nlh);
1058 u8 family = nfmsg->nfgen_family;
1059
1060 if (ctnetlink_needs_filter(family, cda)) {
1061 filter = ctnetlink_alloc_filter(cda, family);
1062 if (IS_ERR(ptr: filter))
1063 return PTR_ERR(ptr: filter);
1064 }
1065
1066 cb->data = filter;
1067 return 0;
1068}
1069
1070static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple,
1071 struct nf_conntrack_tuple *ct_tuple,
1072 u_int32_t flags, int family)
1073{
1074 switch (family) {
1075 case NFPROTO_IPV4:
1076 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1077 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip)
1078 return 0;
1079
1080 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1081 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip)
1082 return 0;
1083 break;
1084 case NFPROTO_IPV6:
1085 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) &&
1086 !ipv6_addr_cmp(a1: &filter_tuple->src.u3.in6,
1087 a2: &ct_tuple->src.u3.in6))
1088 return 0;
1089
1090 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) &&
1091 !ipv6_addr_cmp(a1: &filter_tuple->dst.u3.in6,
1092 a2: &ct_tuple->dst.u3.in6))
1093 return 0;
1094 break;
1095 }
1096
1097 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) &&
1098 filter_tuple->dst.protonum != ct_tuple->dst.protonum)
1099 return 0;
1100
1101 switch (ct_tuple->dst.protonum) {
1102 case IPPROTO_TCP:
1103 case IPPROTO_UDP:
1104 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) &&
1105 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port)
1106 return 0;
1107
1108 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) &&
1109 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port)
1110 return 0;
1111 break;
1112 case IPPROTO_ICMP:
1113 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) &&
1114 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1115 return 0;
1116 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) &&
1117 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1118 return 0;
1119 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) &&
1120 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1121 return 0;
1122 break;
1123 case IPPROTO_ICMPV6:
1124 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) &&
1125 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type)
1126 return 0;
1127 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) &&
1128 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code)
1129 return 0;
1130 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) &&
1131 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id)
1132 return 0;
1133 break;
1134 }
1135
1136 return 1;
1137}
1138
1139static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
1140{
1141 struct ctnetlink_filter *filter = data;
1142 struct nf_conntrack_tuple *tuple;
1143 u32 status;
1144
1145 if (filter == NULL)
1146 goto out;
1147
1148 /* Match entries of a given L3 protocol number.
1149 * If it is not specified, ie. l3proto == 0,
1150 * then match everything.
1151 */
1152 if (filter->family && nf_ct_l3num(ct) != filter->family)
1153 goto ignore_entry;
1154
1155 if (filter->zone_filter &&
1156 !nf_ct_zone_equal_any(a: ct, b: &filter->zone))
1157 goto ignore_entry;
1158
1159 if (filter->orig_flags) {
1160 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL);
1161 if (!ctnetlink_filter_match_tuple(filter_tuple: &filter->orig, ct_tuple: tuple,
1162 flags: filter->orig_flags,
1163 family: filter->family))
1164 goto ignore_entry;
1165 }
1166
1167 if (filter->reply_flags) {
1168 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY);
1169 if (!ctnetlink_filter_match_tuple(filter_tuple: &filter->reply, ct_tuple: tuple,
1170 flags: filter->reply_flags,
1171 family: filter->family))
1172 goto ignore_entry;
1173 }
1174
1175#ifdef CONFIG_NF_CONNTRACK_MARK
1176 if ((READ_ONCE(ct->mark) & filter->mark.mask) != filter->mark.val)
1177 goto ignore_entry;
1178#endif
1179 status = (u32)READ_ONCE(ct->status);
1180 if ((status & filter->status.mask) != filter->status.val)
1181 goto ignore_entry;
1182
1183out:
1184 return 1;
1185
1186ignore_entry:
1187 return 0;
1188}
1189
1190static int
1191ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
1192{
1193 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0;
1194 struct net *net = sock_net(sk: skb->sk);
1195 struct nf_conn *ct, *last;
1196 struct nf_conntrack_tuple_hash *h;
1197 struct hlist_nulls_node *n;
1198 struct nf_conn *nf_ct_evict[8];
1199 int res, i;
1200 spinlock_t *lockp;
1201
1202 last = (struct nf_conn *)cb->args[1];
1203 i = 0;
1204
1205 local_bh_disable();
1206 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
1207restart:
1208 while (i) {
1209 i--;
1210 if (nf_ct_should_gc(ct: nf_ct_evict[i]))
1211 nf_ct_kill(ct: nf_ct_evict[i]);
1212 nf_ct_put(ct: nf_ct_evict[i]);
1213 }
1214
1215 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
1216 nf_conntrack_lock(lock: lockp);
1217 if (cb->args[0] >= nf_conntrack_htable_size) {
1218 spin_unlock(lock: lockp);
1219 goto out;
1220 }
1221 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]],
1222 hnnode) {
1223 ct = nf_ct_tuplehash_to_ctrack(hash: h);
1224 if (nf_ct_is_expired(ct)) {
1225 /* need to defer nf_ct_kill() until lock is released */
1226 if (i < ARRAY_SIZE(nf_ct_evict) &&
1227 refcount_inc_not_zero(r: &ct->ct_general.use))
1228 nf_ct_evict[i++] = ct;
1229 continue;
1230 }
1231
1232 if (!net_eq(net1: net, net2: nf_ct_net(ct)))
1233 continue;
1234
1235 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1236 continue;
1237
1238 if (cb->args[1]) {
1239 if (ct != last)
1240 continue;
1241 cb->args[1] = 0;
1242 }
1243 if (!ctnetlink_filter_match(ct, data: cb->data))
1244 continue;
1245
1246 res =
1247 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1248 seq: cb->nlh->nlmsg_seq,
1249 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1250 ct, extinfo: true, flags);
1251 if (res < 0) {
1252 nf_conntrack_get(nfct: &ct->ct_general);
1253 cb->args[1] = (unsigned long)ct;
1254 spin_unlock(lock: lockp);
1255 goto out;
1256 }
1257 }
1258 spin_unlock(lock: lockp);
1259 if (cb->args[1]) {
1260 cb->args[1] = 0;
1261 goto restart;
1262 }
1263 }
1264out:
1265 local_bh_enable();
1266 if (last) {
1267 /* nf ct hash resize happened, now clear the leftover. */
1268 if ((struct nf_conn *)cb->args[1] == last)
1269 cb->args[1] = 0;
1270
1271 nf_ct_put(ct: last);
1272 }
1273
1274 while (i) {
1275 i--;
1276 if (nf_ct_should_gc(ct: nf_ct_evict[i]))
1277 nf_ct_kill(ct: nf_ct_evict[i]);
1278 nf_ct_put(ct: nf_ct_evict[i]);
1279 }
1280
1281 return skb->len;
1282}
1283
1284static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
1285 struct nf_conntrack_tuple *t,
1286 u_int32_t flags)
1287{
1288 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1289 if (!tb[CTA_IP_V4_SRC])
1290 return -EINVAL;
1291
1292 t->src.u3.ip = nla_get_in_addr(nla: tb[CTA_IP_V4_SRC]);
1293 }
1294
1295 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1296 if (!tb[CTA_IP_V4_DST])
1297 return -EINVAL;
1298
1299 t->dst.u3.ip = nla_get_in_addr(nla: tb[CTA_IP_V4_DST]);
1300 }
1301
1302 return 0;
1303}
1304
1305static int ipv6_nlattr_to_tuple(struct nlattr *tb[],
1306 struct nf_conntrack_tuple *t,
1307 u_int32_t flags)
1308{
1309 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1310 if (!tb[CTA_IP_V6_SRC])
1311 return -EINVAL;
1312
1313 t->src.u3.in6 = nla_get_in6_addr(nla: tb[CTA_IP_V6_SRC]);
1314 }
1315
1316 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) {
1317 if (!tb[CTA_IP_V6_DST])
1318 return -EINVAL;
1319
1320 t->dst.u3.in6 = nla_get_in6_addr(nla: tb[CTA_IP_V6_DST]);
1321 }
1322
1323 return 0;
1324}
1325
1326static int ctnetlink_parse_tuple_ip(struct nlattr *attr,
1327 struct nf_conntrack_tuple *tuple,
1328 u_int32_t flags)
1329{
1330 struct nlattr *tb[CTA_IP_MAX+1];
1331 int ret = 0;
1332
1333 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, nla: attr,
1334 policy: cta_ip_nla_policy, NULL);
1335 if (ret < 0)
1336 return ret;
1337
1338 switch (tuple->src.l3num) {
1339 case NFPROTO_IPV4:
1340 ret = ipv4_nlattr_to_tuple(tb, t: tuple, flags);
1341 break;
1342 case NFPROTO_IPV6:
1343 ret = ipv6_nlattr_to_tuple(tb, t: tuple, flags);
1344 break;
1345 }
1346
1347 return ret;
1348}
1349
1350static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
1351 [CTA_PROTO_NUM] = { .type = NLA_U8 },
1352};
1353
1354static int ctnetlink_parse_tuple_proto(struct nlattr *attr,
1355 struct nf_conntrack_tuple *tuple,
1356 u_int32_t flags)
1357{
1358 const struct nf_conntrack_l4proto *l4proto;
1359 struct nlattr *tb[CTA_PROTO_MAX+1];
1360 int ret = 0;
1361
1362 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, nla: attr,
1363 policy: proto_nla_policy, NULL);
1364 if (ret < 0)
1365 return ret;
1366
1367 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)))
1368 return 0;
1369
1370 if (!tb[CTA_PROTO_NUM])
1371 return -EINVAL;
1372
1373 tuple->dst.protonum = nla_get_u8(nla: tb[CTA_PROTO_NUM]);
1374
1375 rcu_read_lock();
1376 l4proto = nf_ct_l4proto_find(l4proto: tuple->dst.protonum);
1377
1378 if (likely(l4proto->nlattr_to_tuple)) {
1379 ret = nla_validate_nested_deprecated(start: attr, CTA_PROTO_MAX,
1380 policy: l4proto->nla_policy,
1381 NULL);
1382 if (ret == 0)
1383 ret = l4proto->nlattr_to_tuple(tb, tuple, flags);
1384 }
1385
1386 rcu_read_unlock();
1387
1388 return ret;
1389}
1390
1391static int
1392ctnetlink_parse_zone(const struct nlattr *attr,
1393 struct nf_conntrack_zone *zone)
1394{
1395 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
1396 NF_CT_DEFAULT_ZONE_DIR, flags: 0);
1397#ifdef CONFIG_NF_CONNTRACK_ZONES
1398 if (attr)
1399 zone->id = ntohs(nla_get_be16(attr));
1400#else
1401 if (attr)
1402 return -EOPNOTSUPP;
1403#endif
1404 return 0;
1405}
1406
1407static int
1408ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
1409 struct nf_conntrack_zone *zone)
1410{
1411 int ret;
1412
1413 if (zone->id != NF_CT_DEFAULT_ZONE_ID)
1414 return -EINVAL;
1415
1416 ret = ctnetlink_parse_zone(attr, zone);
1417 if (ret < 0)
1418 return ret;
1419
1420 if (type == CTA_TUPLE_REPLY)
1421 zone->dir = NF_CT_ZONE_DIR_REPL;
1422 else
1423 zone->dir = NF_CT_ZONE_DIR_ORIG;
1424
1425 return 0;
1426}
1427
1428static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1429 [CTA_TUPLE_IP] = { .type = NLA_NESTED },
1430 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED },
1431 [CTA_TUPLE_ZONE] = { .type = NLA_U16 },
1432};
1433
1434#define CTA_FILTER_F_ALL_CTA_PROTO \
1435 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \
1436 CTA_FILTER_F_CTA_PROTO_DST_PORT | \
1437 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \
1438 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \
1439 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \
1440 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \
1441 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \
1442 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID)
1443
1444static int
1445ctnetlink_parse_tuple_filter(const struct nlattr * const cda[],
1446 struct nf_conntrack_tuple *tuple, u32 type,
1447 u_int8_t l3num, struct nf_conntrack_zone *zone,
1448 u_int32_t flags)
1449{
1450 struct nlattr *tb[CTA_TUPLE_MAX+1];
1451 int err;
1452
1453 memset(tuple, 0, sizeof(*tuple));
1454
1455 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, nla: cda[type],
1456 policy: tuple_nla_policy, NULL);
1457 if (err < 0)
1458 return err;
1459
1460 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6)
1461 return -EOPNOTSUPP;
1462 tuple->src.l3num = l3num;
1463
1464 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) ||
1465 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) {
1466 if (!tb[CTA_TUPLE_IP])
1467 return -EINVAL;
1468
1469 err = ctnetlink_parse_tuple_ip(attr: tb[CTA_TUPLE_IP], tuple, flags);
1470 if (err < 0)
1471 return err;
1472 }
1473
1474 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) {
1475 if (!tb[CTA_TUPLE_PROTO])
1476 return -EINVAL;
1477
1478 err = ctnetlink_parse_tuple_proto(attr: tb[CTA_TUPLE_PROTO], tuple, flags);
1479 if (err < 0)
1480 return err;
1481 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) {
1482 /* Can't manage proto flags without a protonum */
1483 return -EINVAL;
1484 }
1485
1486 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) {
1487 if (!zone)
1488 return -EINVAL;
1489
1490 err = ctnetlink_parse_tuple_zone(attr: tb[CTA_TUPLE_ZONE],
1491 type, zone);
1492 if (err < 0)
1493 return err;
1494 }
1495
1496 /* orig and expect tuples get DIR_ORIGINAL */
1497 if (type == CTA_TUPLE_REPLY)
1498 tuple->dst.dir = IP_CT_DIR_REPLY;
1499 else
1500 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
1501
1502 return 0;
1503}
1504
1505static int
1506ctnetlink_parse_tuple(const struct nlattr * const cda[],
1507 struct nf_conntrack_tuple *tuple, u32 type,
1508 u_int8_t l3num, struct nf_conntrack_zone *zone)
1509{
1510 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
1511 CTA_FILTER_FLAG(ALL));
1512}
1513
1514static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
1515 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING,
1516 .len = NF_CT_HELPER_NAME_LEN - 1 },
1517};
1518
1519static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
1520 struct nlattr **helpinfo)
1521{
1522 int err;
1523 struct nlattr *tb[CTA_HELP_MAX+1];
1524
1525 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, nla: attr,
1526 policy: help_nla_policy, NULL);
1527 if (err < 0)
1528 return err;
1529
1530 if (!tb[CTA_HELP_NAME])
1531 return -EINVAL;
1532
1533 *helper_name = nla_data(nla: tb[CTA_HELP_NAME]);
1534
1535 if (tb[CTA_HELP_INFO])
1536 *helpinfo = tb[CTA_HELP_INFO];
1537
1538 return 0;
1539}
1540
1541static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
1542 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED },
1543 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED },
1544 [CTA_STATUS] = { .type = NLA_U32 },
1545 [CTA_PROTOINFO] = { .type = NLA_NESTED },
1546 [CTA_HELP] = { .type = NLA_NESTED },
1547 [CTA_NAT_SRC] = { .type = NLA_NESTED },
1548 [CTA_TIMEOUT] = { .type = NLA_U32 },
1549 [CTA_MARK] = { .type = NLA_U32 },
1550 [CTA_ID] = { .type = NLA_U32 },
1551 [CTA_NAT_DST] = { .type = NLA_NESTED },
1552 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
1553 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED },
1554 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED },
1555 [CTA_ZONE] = { .type = NLA_U16 },
1556 [CTA_MARK_MASK] = { .type = NLA_U32 },
1557 [CTA_LABELS] = { .type = NLA_BINARY,
1558 .len = NF_CT_LABELS_MAX_SIZE },
1559 [CTA_LABELS_MASK] = { .type = NLA_BINARY,
1560 .len = NF_CT_LABELS_MAX_SIZE },
1561 [CTA_FILTER] = { .type = NLA_NESTED },
1562 [CTA_STATUS_MASK] = { .type = NLA_U32 },
1563};
1564
1565static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data)
1566{
1567 return ctnetlink_filter_match(ct, data);
1568}
1569
1570static int ctnetlink_flush_conntrack(struct net *net,
1571 const struct nlattr * const cda[],
1572 u32 portid, int report, u8 family)
1573{
1574 struct ctnetlink_filter *filter = NULL;
1575 struct nf_ct_iter_data iter = {
1576 .net = net,
1577 .portid = portid,
1578 .report = report,
1579 };
1580
1581 if (ctnetlink_needs_filter(family, cda)) {
1582 if (cda[CTA_FILTER])
1583 return -EOPNOTSUPP;
1584
1585 filter = ctnetlink_alloc_filter(cda, family);
1586 if (IS_ERR(ptr: filter))
1587 return PTR_ERR(ptr: filter);
1588
1589 iter.data = filter;
1590 }
1591
1592 nf_ct_iterate_cleanup_net(iter: ctnetlink_flush_iterate, iter_data: &iter);
1593 kfree(objp: filter);
1594
1595 return 0;
1596}
1597
1598static int ctnetlink_del_conntrack(struct sk_buff *skb,
1599 const struct nfnl_info *info,
1600 const struct nlattr * const cda[])
1601{
1602 u8 family = info->nfmsg->nfgen_family;
1603 struct nf_conntrack_tuple_hash *h;
1604 struct nf_conntrack_tuple tuple;
1605 struct nf_conntrack_zone zone;
1606 struct nf_conn *ct;
1607 int err;
1608
1609 err = ctnetlink_parse_zone(attr: cda[CTA_ZONE], zone: &zone);
1610 if (err < 0)
1611 return err;
1612
1613 if (cda[CTA_TUPLE_ORIG])
1614 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_TUPLE_ORIG,
1615 l3num: family, zone: &zone);
1616 else if (cda[CTA_TUPLE_REPLY])
1617 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_TUPLE_REPLY,
1618 l3num: family, zone: &zone);
1619 else {
1620 u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC;
1621
1622 return ctnetlink_flush_conntrack(net: info->net, cda,
1623 NETLINK_CB(skb).portid,
1624 report: nlmsg_report(nlh: info->nlh), family: u3);
1625 }
1626
1627 if (err < 0)
1628 return err;
1629
1630 h = nf_conntrack_find_get(net: info->net, zone: &zone, tuple: &tuple);
1631 if (!h)
1632 return -ENOENT;
1633
1634 ct = nf_ct_tuplehash_to_ctrack(hash: h);
1635
1636 if (cda[CTA_ID]) {
1637 __be32 id = nla_get_be32(nla: cda[CTA_ID]);
1638
1639 if (id != (__force __be32)nf_ct_get_id(ct)) {
1640 nf_ct_put(ct);
1641 return -ENOENT;
1642 }
1643 }
1644
1645 nf_ct_delete(ct, NETLINK_CB(skb).portid, report: nlmsg_report(nlh: info->nlh));
1646 nf_ct_put(ct);
1647
1648 return 0;
1649}
1650
1651static int ctnetlink_get_conntrack(struct sk_buff *skb,
1652 const struct nfnl_info *info,
1653 const struct nlattr * const cda[])
1654{
1655 u_int8_t u3 = info->nfmsg->nfgen_family;
1656 struct nf_conntrack_tuple_hash *h;
1657 struct nf_conntrack_tuple tuple;
1658 struct nf_conntrack_zone zone;
1659 struct sk_buff *skb2;
1660 struct nf_conn *ct;
1661 int err;
1662
1663 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1664 struct netlink_dump_control c = {
1665 .start = ctnetlink_start,
1666 .dump = ctnetlink_dump_table,
1667 .done = ctnetlink_done,
1668 .data = (void *)cda,
1669 };
1670
1671 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
1672 }
1673
1674 err = ctnetlink_parse_zone(attr: cda[CTA_ZONE], zone: &zone);
1675 if (err < 0)
1676 return err;
1677
1678 if (cda[CTA_TUPLE_ORIG])
1679 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_TUPLE_ORIG,
1680 l3num: u3, zone: &zone);
1681 else if (cda[CTA_TUPLE_REPLY])
1682 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_TUPLE_REPLY,
1683 l3num: u3, zone: &zone);
1684 else
1685 return -EINVAL;
1686
1687 if (err < 0)
1688 return err;
1689
1690 h = nf_conntrack_find_get(net: info->net, zone: &zone, tuple: &tuple);
1691 if (!h)
1692 return -ENOENT;
1693
1694 ct = nf_ct_tuplehash_to_ctrack(hash: h);
1695
1696 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1697 if (!skb2) {
1698 nf_ct_put(ct);
1699 return -ENOMEM;
1700 }
1701
1702 err = ctnetlink_fill_info(skb: skb2, NETLINK_CB(skb).portid,
1703 seq: info->nlh->nlmsg_seq,
1704 NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct,
1705 extinfo: true, flags: 0);
1706 nf_ct_put(ct);
1707 if (err <= 0) {
1708 kfree_skb(skb: skb2);
1709 return -ENOMEM;
1710 }
1711
1712 return nfnetlink_unicast(skb: skb2, net: info->net, NETLINK_CB(skb).portid);
1713}
1714
1715static int ctnetlink_done_list(struct netlink_callback *cb)
1716{
1717 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1718
1719 if (ctx->last)
1720 nf_ct_put(ct: ctx->last);
1721
1722 return 0;
1723}
1724
1725#ifdef CONFIG_NF_CONNTRACK_EVENTS
1726static int ctnetlink_dump_one_entry(struct sk_buff *skb,
1727 struct netlink_callback *cb,
1728 struct nf_conn *ct,
1729 bool dying)
1730{
1731 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1732 struct nfgenmsg *nfmsg = nlmsg_data(nlh: cb->nlh);
1733 u8 l3proto = nfmsg->nfgen_family;
1734 int res;
1735
1736 if (l3proto && nf_ct_l3num(ct) != l3proto)
1737 return 0;
1738
1739 if (ctx->last) {
1740 if (ct != ctx->last)
1741 return 0;
1742
1743 ctx->last = NULL;
1744 }
1745
1746 /* We can't dump extension info for the unconfirmed
1747 * list because unconfirmed conntracks can have
1748 * ct->ext reallocated (and thus freed).
1749 *
1750 * In the dying list case ct->ext can't be free'd
1751 * until after we drop pcpu->lock.
1752 */
1753 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
1754 seq: cb->nlh->nlmsg_seq,
1755 NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
1756 ct, extinfo: dying, flags: 0);
1757 if (res < 0) {
1758 if (!refcount_inc_not_zero(r: &ct->ct_general.use))
1759 return 0;
1760
1761 ctx->last = ct;
1762 }
1763
1764 return res;
1765}
1766#endif
1767
1768static int
1769ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
1770{
1771 return 0;
1772}
1773
1774static int
1775ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
1776{
1777 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
1778 struct nf_conn *last = ctx->last;
1779#ifdef CONFIG_NF_CONNTRACK_EVENTS
1780 const struct net *net = sock_net(sk: skb->sk);
1781 struct nf_conntrack_net_ecache *ecache_net;
1782 struct nf_conntrack_tuple_hash *h;
1783 struct hlist_nulls_node *n;
1784#endif
1785
1786 if (ctx->done)
1787 return 0;
1788
1789 ctx->last = NULL;
1790
1791#ifdef CONFIG_NF_CONNTRACK_EVENTS
1792 ecache_net = nf_conn_pernet_ecache(net);
1793 spin_lock_bh(lock: &ecache_net->dying_lock);
1794
1795 hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) {
1796 struct nf_conn *ct;
1797 int res;
1798
1799 ct = nf_ct_tuplehash_to_ctrack(hash: h);
1800 if (last && last != ct)
1801 continue;
1802
1803 res = ctnetlink_dump_one_entry(skb, cb, ct, dying: true);
1804 if (res < 0) {
1805 spin_unlock_bh(lock: &ecache_net->dying_lock);
1806 nf_ct_put(ct: last);
1807 return skb->len;
1808 }
1809
1810 nf_ct_put(ct: last);
1811 last = NULL;
1812 }
1813
1814 spin_unlock_bh(lock: &ecache_net->dying_lock);
1815#endif
1816 ctx->done = true;
1817 nf_ct_put(ct: last);
1818
1819 return skb->len;
1820}
1821
1822static int ctnetlink_get_ct_dying(struct sk_buff *skb,
1823 const struct nfnl_info *info,
1824 const struct nlattr * const cda[])
1825{
1826 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1827 struct netlink_dump_control c = {
1828 .dump = ctnetlink_dump_dying,
1829 .done = ctnetlink_done_list,
1830 };
1831 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
1832 }
1833
1834 return -EOPNOTSUPP;
1835}
1836
1837static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
1838 const struct nfnl_info *info,
1839 const struct nlattr * const cda[])
1840{
1841 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
1842 struct netlink_dump_control c = {
1843 .dump = ctnetlink_dump_unconfirmed,
1844 .done = ctnetlink_done_list,
1845 };
1846 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
1847 }
1848
1849 return -EOPNOTSUPP;
1850}
1851
1852#if IS_ENABLED(CONFIG_NF_NAT)
1853static int
1854ctnetlink_parse_nat_setup(struct nf_conn *ct,
1855 enum nf_nat_manip_type manip,
1856 const struct nlattr *attr)
1857 __must_hold(RCU)
1858{
1859 const struct nf_nat_hook *nat_hook;
1860 int err;
1861
1862 nat_hook = rcu_dereference(nf_nat_hook);
1863 if (!nat_hook) {
1864#ifdef CONFIG_MODULES
1865 rcu_read_unlock();
1866 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1867 if (request_module("nf-nat") < 0) {
1868 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1869 rcu_read_lock();
1870 return -EOPNOTSUPP;
1871 }
1872 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1873 rcu_read_lock();
1874 nat_hook = rcu_dereference(nf_nat_hook);
1875 if (nat_hook)
1876 return -EAGAIN;
1877#endif
1878 return -EOPNOTSUPP;
1879 }
1880
1881 err = nat_hook->parse_nat_setup(ct, manip, attr);
1882 if (err == -EAGAIN) {
1883#ifdef CONFIG_MODULES
1884 rcu_read_unlock();
1885 nfnl_unlock(NFNL_SUBSYS_CTNETLINK);
1886 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) {
1887 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1888 rcu_read_lock();
1889 return -EOPNOTSUPP;
1890 }
1891 nfnl_lock(NFNL_SUBSYS_CTNETLINK);
1892 rcu_read_lock();
1893#else
1894 err = -EOPNOTSUPP;
1895#endif
1896 }
1897 return err;
1898}
1899#endif
1900
1901static int
1902ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
1903{
1904 return nf_ct_change_status_common(ct, ntohl(nla_get_be32(cda[CTA_STATUS])));
1905}
1906
1907static int
1908ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
1909{
1910#if IS_ENABLED(CONFIG_NF_NAT)
1911 int ret;
1912
1913 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1914 return 0;
1915
1916 ret = ctnetlink_parse_nat_setup(ct, manip: NF_NAT_MANIP_DST,
1917 attr: cda[CTA_NAT_DST]);
1918 if (ret < 0)
1919 return ret;
1920
1921 return ctnetlink_parse_nat_setup(ct, manip: NF_NAT_MANIP_SRC,
1922 attr: cda[CTA_NAT_SRC]);
1923#else
1924 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
1925 return 0;
1926 return -EOPNOTSUPP;
1927#endif
1928}
1929
1930static int ctnetlink_change_helper(struct nf_conn *ct,
1931 const struct nlattr * const cda[])
1932{
1933 struct nf_conntrack_helper *helper;
1934 struct nf_conn_help *help = nfct_help(ct);
1935 char *helpname = NULL;
1936 struct nlattr *helpinfo = NULL;
1937 int err;
1938
1939 err = ctnetlink_parse_help(attr: cda[CTA_HELP], helper_name: &helpname, helpinfo: &helpinfo);
1940 if (err < 0)
1941 return err;
1942
1943 /* don't change helper of sibling connections */
1944 if (ct->master) {
1945 /* If we try to change the helper to the same thing twice,
1946 * treat the second attempt as a no-op instead of returning
1947 * an error.
1948 */
1949 err = -EBUSY;
1950 if (help) {
1951 rcu_read_lock();
1952 helper = rcu_dereference(help->helper);
1953 if (helper && !strcmp(helper->name, helpname))
1954 err = 0;
1955 rcu_read_unlock();
1956 }
1957
1958 return err;
1959 }
1960
1961 if (!strcmp(helpname, "")) {
1962 if (help && help->helper) {
1963 /* we had a helper before ... */
1964 nf_ct_remove_expectations(ct);
1965 RCU_INIT_POINTER(help->helper, NULL);
1966 }
1967
1968 return 0;
1969 }
1970
1971 rcu_read_lock();
1972 helper = __nf_conntrack_helper_find(name: helpname, l3num: nf_ct_l3num(ct),
1973 protonum: nf_ct_protonum(ct));
1974 if (helper == NULL) {
1975 rcu_read_unlock();
1976 return -EOPNOTSUPP;
1977 }
1978
1979 if (help) {
1980 if (rcu_access_pointer(help->helper) == helper) {
1981 /* update private helper data if allowed. */
1982 if (helper->from_nlattr)
1983 helper->from_nlattr(helpinfo, ct);
1984 err = 0;
1985 } else
1986 err = -EBUSY;
1987 } else {
1988 /* we cannot set a helper for an existing conntrack */
1989 err = -EOPNOTSUPP;
1990 }
1991
1992 rcu_read_unlock();
1993 return err;
1994}
1995
1996static int ctnetlink_change_timeout(struct nf_conn *ct,
1997 const struct nlattr * const cda[])
1998{
1999 return __nf_ct_change_timeout(ct, cta_timeout: (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ);
2000}
2001
2002#if defined(CONFIG_NF_CONNTRACK_MARK)
2003static void ctnetlink_change_mark(struct nf_conn *ct,
2004 const struct nlattr * const cda[])
2005{
2006 u32 mark, newmark, mask = 0;
2007
2008 if (cda[CTA_MARK_MASK])
2009 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
2010
2011 mark = ntohl(nla_get_be32(cda[CTA_MARK]));
2012 newmark = (READ_ONCE(ct->mark) & mask) ^ mark;
2013 if (newmark != READ_ONCE(ct->mark))
2014 WRITE_ONCE(ct->mark, newmark);
2015}
2016#endif
2017
2018static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
2019 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
2020 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
2021 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED },
2022};
2023
2024static int ctnetlink_change_protoinfo(struct nf_conn *ct,
2025 const struct nlattr * const cda[])
2026{
2027 const struct nlattr *attr = cda[CTA_PROTOINFO];
2028 const struct nf_conntrack_l4proto *l4proto;
2029 struct nlattr *tb[CTA_PROTOINFO_MAX+1];
2030 int err = 0;
2031
2032 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, nla: attr,
2033 policy: protoinfo_policy, NULL);
2034 if (err < 0)
2035 return err;
2036
2037 l4proto = nf_ct_l4proto_find(l4proto: nf_ct_protonum(ct));
2038 if (l4proto->from_nlattr)
2039 err = l4proto->from_nlattr(tb, ct);
2040
2041 return err;
2042}
2043
2044static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
2045 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
2046 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
2047 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
2048};
2049
2050static int change_seq_adj(struct nf_ct_seqadj *seq,
2051 const struct nlattr * const attr)
2052{
2053 int err;
2054 struct nlattr *cda[CTA_SEQADJ_MAX+1];
2055
2056 err = nla_parse_nested_deprecated(tb: cda, CTA_SEQADJ_MAX, nla: attr,
2057 policy: seqadj_policy, NULL);
2058 if (err < 0)
2059 return err;
2060
2061 if (!cda[CTA_SEQADJ_CORRECTION_POS])
2062 return -EINVAL;
2063
2064 seq->correction_pos =
2065 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
2066
2067 if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
2068 return -EINVAL;
2069
2070 seq->offset_before =
2071 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
2072
2073 if (!cda[CTA_SEQADJ_OFFSET_AFTER])
2074 return -EINVAL;
2075
2076 seq->offset_after =
2077 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
2078
2079 return 0;
2080}
2081
2082static int
2083ctnetlink_change_seq_adj(struct nf_conn *ct,
2084 const struct nlattr * const cda[])
2085{
2086 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
2087 int ret = 0;
2088
2089 if (!seqadj)
2090 return 0;
2091
2092 spin_lock_bh(lock: &ct->lock);
2093 if (cda[CTA_SEQ_ADJ_ORIG]) {
2094 ret = change_seq_adj(seq: &seqadj->seq[IP_CT_DIR_ORIGINAL],
2095 attr: cda[CTA_SEQ_ADJ_ORIG]);
2096 if (ret < 0)
2097 goto err;
2098
2099 set_bit(nr: IPS_SEQ_ADJUST_BIT, addr: &ct->status);
2100 }
2101
2102 if (cda[CTA_SEQ_ADJ_REPLY]) {
2103 ret = change_seq_adj(seq: &seqadj->seq[IP_CT_DIR_REPLY],
2104 attr: cda[CTA_SEQ_ADJ_REPLY]);
2105 if (ret < 0)
2106 goto err;
2107
2108 set_bit(nr: IPS_SEQ_ADJUST_BIT, addr: &ct->status);
2109 }
2110
2111 spin_unlock_bh(lock: &ct->lock);
2112 return 0;
2113err:
2114 spin_unlock_bh(lock: &ct->lock);
2115 return ret;
2116}
2117
2118static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = {
2119 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 },
2120 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 },
2121 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 },
2122};
2123
2124static int ctnetlink_change_synproxy(struct nf_conn *ct,
2125 const struct nlattr * const cda[])
2126{
2127 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct);
2128 struct nlattr *tb[CTA_SYNPROXY_MAX + 1];
2129 int err;
2130
2131 if (!synproxy)
2132 return 0;
2133
2134 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX,
2135 nla: cda[CTA_SYNPROXY], policy: synproxy_policy,
2136 NULL);
2137 if (err < 0)
2138 return err;
2139
2140 if (!tb[CTA_SYNPROXY_ISN] ||
2141 !tb[CTA_SYNPROXY_ITS] ||
2142 !tb[CTA_SYNPROXY_TSOFF])
2143 return -EINVAL;
2144
2145 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN]));
2146 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS]));
2147 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF]));
2148
2149 return 0;
2150}
2151
2152static int
2153ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
2154{
2155#ifdef CONFIG_NF_CONNTRACK_LABELS
2156 size_t len = nla_len(nla: cda[CTA_LABELS]);
2157 const void *mask = cda[CTA_LABELS_MASK];
2158
2159 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */
2160 return -EINVAL;
2161
2162 if (mask) {
2163 if (nla_len(nla: cda[CTA_LABELS_MASK]) == 0 ||
2164 nla_len(nla: cda[CTA_LABELS_MASK]) != len)
2165 return -EINVAL;
2166 mask = nla_data(nla: cda[CTA_LABELS_MASK]);
2167 }
2168
2169 len /= sizeof(u32);
2170
2171 return nf_connlabels_replace(ct, data: nla_data(nla: cda[CTA_LABELS]), mask, words: len);
2172#else
2173 return -EOPNOTSUPP;
2174#endif
2175}
2176
2177static int
2178ctnetlink_change_conntrack(struct nf_conn *ct,
2179 const struct nlattr * const cda[])
2180{
2181 int err;
2182
2183 /* only allow NAT changes and master assignation for new conntracks */
2184 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
2185 return -EOPNOTSUPP;
2186
2187 if (cda[CTA_HELP]) {
2188 err = ctnetlink_change_helper(ct, cda);
2189 if (err < 0)
2190 return err;
2191 }
2192
2193 if (cda[CTA_TIMEOUT]) {
2194 err = ctnetlink_change_timeout(ct, cda);
2195 if (err < 0)
2196 return err;
2197 }
2198
2199 if (cda[CTA_STATUS]) {
2200 err = ctnetlink_change_status(ct, cda);
2201 if (err < 0)
2202 return err;
2203 }
2204
2205 if (cda[CTA_PROTOINFO]) {
2206 err = ctnetlink_change_protoinfo(ct, cda);
2207 if (err < 0)
2208 return err;
2209 }
2210
2211#if defined(CONFIG_NF_CONNTRACK_MARK)
2212 if (cda[CTA_MARK])
2213 ctnetlink_change_mark(ct, cda);
2214#endif
2215
2216 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2217 err = ctnetlink_change_seq_adj(ct, cda);
2218 if (err < 0)
2219 return err;
2220 }
2221
2222 if (cda[CTA_SYNPROXY]) {
2223 err = ctnetlink_change_synproxy(ct, cda);
2224 if (err < 0)
2225 return err;
2226 }
2227
2228 if (cda[CTA_LABELS]) {
2229 err = ctnetlink_attach_labels(ct, cda);
2230 if (err < 0)
2231 return err;
2232 }
2233
2234 return 0;
2235}
2236
2237static struct nf_conn *
2238ctnetlink_create_conntrack(struct net *net,
2239 const struct nf_conntrack_zone *zone,
2240 const struct nlattr * const cda[],
2241 struct nf_conntrack_tuple *otuple,
2242 struct nf_conntrack_tuple *rtuple,
2243 u8 u3)
2244{
2245 struct nf_conn *ct;
2246 int err = -EINVAL;
2247 struct nf_conntrack_helper *helper;
2248 struct nf_conn_tstamp *tstamp;
2249 u64 timeout;
2250
2251 ct = nf_conntrack_alloc(net, zone, orig: otuple, repl: rtuple, GFP_ATOMIC);
2252 if (IS_ERR(ptr: ct))
2253 return ERR_PTR(error: -ENOMEM);
2254
2255 if (!cda[CTA_TIMEOUT])
2256 goto err1;
2257
2258 rcu_read_lock();
2259 if (cda[CTA_HELP]) {
2260 char *helpname = NULL;
2261 struct nlattr *helpinfo = NULL;
2262
2263 err = ctnetlink_parse_help(attr: cda[CTA_HELP], helper_name: &helpname, helpinfo: &helpinfo);
2264 if (err < 0)
2265 goto err2;
2266
2267 helper = __nf_conntrack_helper_find(name: helpname, l3num: nf_ct_l3num(ct),
2268 protonum: nf_ct_protonum(ct));
2269 if (helper == NULL) {
2270 rcu_read_unlock();
2271#ifdef CONFIG_MODULES
2272 if (request_module("nfct-helper-%s", helpname) < 0) {
2273 err = -EOPNOTSUPP;
2274 goto err1;
2275 }
2276
2277 rcu_read_lock();
2278 helper = __nf_conntrack_helper_find(name: helpname,
2279 l3num: nf_ct_l3num(ct),
2280 protonum: nf_ct_protonum(ct));
2281 if (helper) {
2282 err = -EAGAIN;
2283 goto err2;
2284 }
2285 rcu_read_unlock();
2286#endif
2287 err = -EOPNOTSUPP;
2288 goto err1;
2289 } else {
2290 struct nf_conn_help *help;
2291
2292 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
2293 if (help == NULL) {
2294 err = -ENOMEM;
2295 goto err2;
2296 }
2297 /* set private helper data if allowed. */
2298 if (helper->from_nlattr)
2299 helper->from_nlattr(helpinfo, ct);
2300
2301 /* disable helper auto-assignment for this entry */
2302 ct->status |= IPS_HELPER;
2303 RCU_INIT_POINTER(help->helper, helper);
2304 }
2305 }
2306
2307 err = ctnetlink_setup_nat(ct, cda);
2308 if (err < 0)
2309 goto err2;
2310
2311 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
2312 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
2313 nf_ct_ecache_ext_add(ct, ctmask: 0, expmask: 0, GFP_ATOMIC);
2314 nf_ct_labels_ext_add(ct);
2315 nfct_seqadj_ext_add(ct);
2316 nfct_synproxy_ext_add(ct);
2317
2318 /* we must add conntrack extensions before confirmation. */
2319 ct->status |= IPS_CONFIRMED;
2320
2321 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ;
2322 __nf_ct_set_timeout(ct, timeout);
2323
2324 if (cda[CTA_STATUS]) {
2325 err = ctnetlink_change_status(ct, cda);
2326 if (err < 0)
2327 goto err2;
2328 }
2329
2330 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
2331 err = ctnetlink_change_seq_adj(ct, cda);
2332 if (err < 0)
2333 goto err2;
2334 }
2335
2336 memset(&ct->proto, 0, sizeof(ct->proto));
2337 if (cda[CTA_PROTOINFO]) {
2338 err = ctnetlink_change_protoinfo(ct, cda);
2339 if (err < 0)
2340 goto err2;
2341 }
2342
2343 if (cda[CTA_SYNPROXY]) {
2344 err = ctnetlink_change_synproxy(ct, cda);
2345 if (err < 0)
2346 goto err2;
2347 }
2348
2349#if defined(CONFIG_NF_CONNTRACK_MARK)
2350 if (cda[CTA_MARK])
2351 ctnetlink_change_mark(ct, cda);
2352#endif
2353
2354 /* setup master conntrack: this is a confirmed expectation */
2355 if (cda[CTA_TUPLE_MASTER]) {
2356 struct nf_conntrack_tuple master;
2357 struct nf_conntrack_tuple_hash *master_h;
2358 struct nf_conn *master_ct;
2359
2360 err = ctnetlink_parse_tuple(cda, tuple: &master, type: CTA_TUPLE_MASTER,
2361 l3num: u3, NULL);
2362 if (err < 0)
2363 goto err2;
2364
2365 master_h = nf_conntrack_find_get(net, zone, tuple: &master);
2366 if (master_h == NULL) {
2367 err = -ENOENT;
2368 goto err2;
2369 }
2370 master_ct = nf_ct_tuplehash_to_ctrack(hash: master_h);
2371 __set_bit(IPS_EXPECTED_BIT, &ct->status);
2372 ct->master = master_ct;
2373 }
2374 tstamp = nf_conn_tstamp_find(ct);
2375 if (tstamp)
2376 tstamp->start = ktime_get_real_ns();
2377
2378 err = nf_conntrack_hash_check_insert(ct);
2379 if (err < 0)
2380 goto err3;
2381
2382 rcu_read_unlock();
2383
2384 return ct;
2385
2386err3:
2387 if (ct->master)
2388 nf_ct_put(ct: ct->master);
2389err2:
2390 rcu_read_unlock();
2391err1:
2392 nf_conntrack_free(ct);
2393 return ERR_PTR(error: err);
2394}
2395
2396static int ctnetlink_new_conntrack(struct sk_buff *skb,
2397 const struct nfnl_info *info,
2398 const struct nlattr * const cda[])
2399{
2400 struct nf_conntrack_tuple otuple, rtuple;
2401 struct nf_conntrack_tuple_hash *h = NULL;
2402 u_int8_t u3 = info->nfmsg->nfgen_family;
2403 struct nf_conntrack_zone zone;
2404 struct nf_conn *ct;
2405 int err;
2406
2407 err = ctnetlink_parse_zone(attr: cda[CTA_ZONE], zone: &zone);
2408 if (err < 0)
2409 return err;
2410
2411 if (cda[CTA_TUPLE_ORIG]) {
2412 err = ctnetlink_parse_tuple(cda, tuple: &otuple, type: CTA_TUPLE_ORIG,
2413 l3num: u3, zone: &zone);
2414 if (err < 0)
2415 return err;
2416 }
2417
2418 if (cda[CTA_TUPLE_REPLY]) {
2419 err = ctnetlink_parse_tuple(cda, tuple: &rtuple, type: CTA_TUPLE_REPLY,
2420 l3num: u3, zone: &zone);
2421 if (err < 0)
2422 return err;
2423 }
2424
2425 if (cda[CTA_TUPLE_ORIG])
2426 h = nf_conntrack_find_get(net: info->net, zone: &zone, tuple: &otuple);
2427 else if (cda[CTA_TUPLE_REPLY])
2428 h = nf_conntrack_find_get(net: info->net, zone: &zone, tuple: &rtuple);
2429
2430 if (h == NULL) {
2431 err = -ENOENT;
2432 if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
2433 enum ip_conntrack_events events;
2434
2435 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
2436 return -EINVAL;
2437 if (otuple.dst.protonum != rtuple.dst.protonum)
2438 return -EINVAL;
2439
2440 ct = ctnetlink_create_conntrack(net: info->net, zone: &zone, cda,
2441 otuple: &otuple, rtuple: &rtuple, u3);
2442 if (IS_ERR(ptr: ct))
2443 return PTR_ERR(ptr: ct);
2444
2445 err = 0;
2446 if (test_bit(IPS_EXPECTED_BIT, &ct->status))
2447 events = 1 << IPCT_RELATED;
2448 else
2449 events = 1 << IPCT_NEW;
2450
2451 if (cda[CTA_LABELS] &&
2452 ctnetlink_attach_labels(ct, cda) == 0)
2453 events |= (1 << IPCT_LABEL);
2454
2455 nf_conntrack_eventmask_report(eventmask: (1 << IPCT_REPLY) |
2456 (1 << IPCT_ASSURED) |
2457 (1 << IPCT_HELPER) |
2458 (1 << IPCT_PROTOINFO) |
2459 (1 << IPCT_SEQADJ) |
2460 (1 << IPCT_MARK) |
2461 (1 << IPCT_SYNPROXY) |
2462 events,
2463 ct, NETLINK_CB(skb).portid,
2464 report: nlmsg_report(nlh: info->nlh));
2465 nf_ct_put(ct);
2466 }
2467
2468 return err;
2469 }
2470 /* implicit 'else' */
2471
2472 err = -EEXIST;
2473 ct = nf_ct_tuplehash_to_ctrack(hash: h);
2474 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) {
2475 err = ctnetlink_change_conntrack(ct, cda);
2476 if (err == 0) {
2477 nf_conntrack_eventmask_report(eventmask: (1 << IPCT_REPLY) |
2478 (1 << IPCT_ASSURED) |
2479 (1 << IPCT_HELPER) |
2480 (1 << IPCT_LABEL) |
2481 (1 << IPCT_PROTOINFO) |
2482 (1 << IPCT_SEQADJ) |
2483 (1 << IPCT_MARK) |
2484 (1 << IPCT_SYNPROXY),
2485 ct, NETLINK_CB(skb).portid,
2486 report: nlmsg_report(nlh: info->nlh));
2487 }
2488 }
2489
2490 nf_ct_put(ct);
2491 return err;
2492}
2493
2494static int
2495ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
2496 __u16 cpu, const struct ip_conntrack_stat *st)
2497{
2498 struct nlmsghdr *nlh;
2499 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2500
2501 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
2502 msg_type: IPCTNL_MSG_CT_GET_STATS_CPU);
2503 nlh = nfnl_msg_put(skb, portid, seq, type: event, flags, AF_UNSPEC,
2504 NFNETLINK_V0, htons(cpu));
2505 if (!nlh)
2506 goto nlmsg_failure;
2507
2508 if (nla_put_be32(skb, attrtype: CTA_STATS_FOUND, htonl(st->found)) ||
2509 nla_put_be32(skb, attrtype: CTA_STATS_INVALID, htonl(st->invalid)) ||
2510 nla_put_be32(skb, attrtype: CTA_STATS_INSERT, htonl(st->insert)) ||
2511 nla_put_be32(skb, attrtype: CTA_STATS_INSERT_FAILED,
2512 htonl(st->insert_failed)) ||
2513 nla_put_be32(skb, attrtype: CTA_STATS_DROP, htonl(st->drop)) ||
2514 nla_put_be32(skb, attrtype: CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
2515 nla_put_be32(skb, attrtype: CTA_STATS_ERROR, htonl(st->error)) ||
2516 nla_put_be32(skb, attrtype: CTA_STATS_SEARCH_RESTART,
2517 htonl(st->search_restart)) ||
2518 nla_put_be32(skb, attrtype: CTA_STATS_CLASH_RESOLVE,
2519 htonl(st->clash_resolve)) ||
2520 nla_put_be32(skb, attrtype: CTA_STATS_CHAIN_TOOLONG,
2521 htonl(st->chaintoolong)))
2522 goto nla_put_failure;
2523
2524 nlmsg_end(skb, nlh);
2525 return skb->len;
2526
2527nla_put_failure:
2528nlmsg_failure:
2529 nlmsg_cancel(skb, nlh);
2530 return -1;
2531}
2532
2533static int
2534ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
2535{
2536 int cpu;
2537 struct net *net = sock_net(sk: skb->sk);
2538
2539 if (cb->args[0] == nr_cpu_ids)
2540 return 0;
2541
2542 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
2543 const struct ip_conntrack_stat *st;
2544
2545 if (!cpu_possible(cpu))
2546 continue;
2547
2548 st = per_cpu_ptr(net->ct.stat, cpu);
2549 if (ctnetlink_ct_stat_cpu_fill_info(skb,
2550 NETLINK_CB(cb->skb).portid,
2551 seq: cb->nlh->nlmsg_seq,
2552 cpu, st) < 0)
2553 break;
2554 }
2555 cb->args[0] = cpu;
2556
2557 return skb->len;
2558}
2559
2560static int ctnetlink_stat_ct_cpu(struct sk_buff *skb,
2561 const struct nfnl_info *info,
2562 const struct nlattr * const cda[])
2563{
2564 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
2565 struct netlink_dump_control c = {
2566 .dump = ctnetlink_ct_stat_cpu_dump,
2567 };
2568 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
2569 }
2570
2571 return 0;
2572}
2573
2574static int
2575ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
2576 struct net *net)
2577{
2578 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
2579 unsigned int nr_conntracks;
2580 struct nlmsghdr *nlh;
2581
2582 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, msg_type: IPCTNL_MSG_CT_GET_STATS);
2583 nlh = nfnl_msg_put(skb, portid, seq, type: event, flags, AF_UNSPEC,
2584 NFNETLINK_V0, res_id: 0);
2585 if (!nlh)
2586 goto nlmsg_failure;
2587
2588 nr_conntracks = nf_conntrack_count(net);
2589 if (nla_put_be32(skb, attrtype: CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
2590 goto nla_put_failure;
2591
2592 if (nla_put_be32(skb, attrtype: CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max)))
2593 goto nla_put_failure;
2594
2595 nlmsg_end(skb, nlh);
2596 return skb->len;
2597
2598nla_put_failure:
2599nlmsg_failure:
2600 nlmsg_cancel(skb, nlh);
2601 return -1;
2602}
2603
2604static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info,
2605 const struct nlattr * const cda[])
2606{
2607 struct sk_buff *skb2;
2608 int err;
2609
2610 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2611 if (skb2 == NULL)
2612 return -ENOMEM;
2613
2614 err = ctnetlink_stat_ct_fill_info(skb: skb2, NETLINK_CB(skb).portid,
2615 seq: info->nlh->nlmsg_seq,
2616 NFNL_MSG_TYPE(info->nlh->nlmsg_type),
2617 net: sock_net(sk: skb->sk));
2618 if (err <= 0) {
2619 kfree_skb(skb: skb2);
2620 return -ENOMEM;
2621 }
2622
2623 return nfnetlink_unicast(skb: skb2, net: info->net, NETLINK_CB(skb).portid);
2624}
2625
2626static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2627 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2628 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
2629 [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
2630 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
2631 [CTA_EXPECT_ID] = { .type = NLA_U32 },
2632 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
2633 .len = NF_CT_HELPER_NAME_LEN - 1 },
2634 [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
2635 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
2636 [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
2637 [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
2638 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
2639};
2640
2641static struct nf_conntrack_expect *
2642ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
2643 struct nf_conntrack_helper *helper,
2644 struct nf_conntrack_tuple *tuple,
2645 struct nf_conntrack_tuple *mask);
2646
2647#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
2648static size_t
2649ctnetlink_glue_build_size(const struct nf_conn *ct)
2650{
2651 return 3 * nla_total_size(payload: 0) /* CTA_TUPLE_ORIG|REPL|MASTER */
2652 + 3 * nla_total_size(payload: 0) /* CTA_TUPLE_IP */
2653 + 3 * nla_total_size(payload: 0) /* CTA_TUPLE_PROTO */
2654 + 3 * nla_total_size(payload: sizeof(u_int8_t)) /* CTA_PROTO_NUM */
2655 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_ID */
2656 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_STATUS */
2657 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_TIMEOUT */
2658 + nla_total_size(payload: 0) /* CTA_PROTOINFO */
2659 + nla_total_size(payload: 0) /* CTA_HELP */
2660 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
2661 + ctnetlink_secctx_size(ct)
2662 + ctnetlink_acct_size(ct)
2663 + ctnetlink_timestamp_size(ct)
2664#if IS_ENABLED(CONFIG_NF_NAT)
2665 + 2 * nla_total_size(payload: 0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
2666 + 6 * nla_total_size(payload: sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
2667#endif
2668#ifdef CONFIG_NF_CONNTRACK_MARK
2669 + nla_total_size(payload: sizeof(u_int32_t)) /* CTA_MARK */
2670#endif
2671#ifdef CONFIG_NF_CONNTRACK_ZONES
2672 + nla_total_size(payload: sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
2673#endif
2674 + ctnetlink_proto_size(ct)
2675 ;
2676}
2677
2678static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
2679{
2680 const struct nf_conntrack_zone *zone;
2681 struct nlattr *nest_parms;
2682
2683 zone = nf_ct_zone(ct);
2684
2685 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_ORIG);
2686 if (!nest_parms)
2687 goto nla_put_failure;
2688 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
2689 goto nla_put_failure;
2690 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
2691 NF_CT_ZONE_DIR_ORIG) < 0)
2692 goto nla_put_failure;
2693 nla_nest_end(skb, start: nest_parms);
2694
2695 nest_parms = nla_nest_start(skb, attrtype: CTA_TUPLE_REPLY);
2696 if (!nest_parms)
2697 goto nla_put_failure;
2698 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
2699 goto nla_put_failure;
2700 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_TUPLE_ZONE, zone,
2701 NF_CT_ZONE_DIR_REPL) < 0)
2702 goto nla_put_failure;
2703 nla_nest_end(skb, start: nest_parms);
2704
2705 if (ctnetlink_dump_zone_id(skb, attrtype: CTA_ZONE, zone,
2706 NF_CT_DEFAULT_ZONE_DIR) < 0)
2707 goto nla_put_failure;
2708
2709 if (ctnetlink_dump_id(skb, ct) < 0)
2710 goto nla_put_failure;
2711
2712 if (ctnetlink_dump_status(skb, ct) < 0)
2713 goto nla_put_failure;
2714
2715 if (ctnetlink_dump_timeout(skb, ct, skip_zero: false) < 0)
2716 goto nla_put_failure;
2717
2718 if (ctnetlink_dump_protoinfo(skb, ct, destroy: false) < 0)
2719 goto nla_put_failure;
2720
2721 if (ctnetlink_dump_acct(skb, ct, type: IPCTNL_MSG_CT_GET) < 0 ||
2722 ctnetlink_dump_timestamp(skb, ct) < 0)
2723 goto nla_put_failure;
2724
2725 if (ctnetlink_dump_helpinfo(skb, ct) < 0)
2726 goto nla_put_failure;
2727
2728#ifdef CONFIG_NF_CONNTRACK_SECMARK
2729 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
2730 goto nla_put_failure;
2731#endif
2732 if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
2733 goto nla_put_failure;
2734
2735 if ((ct->status & IPS_SEQ_ADJUST) &&
2736 ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
2737 goto nla_put_failure;
2738
2739 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0)
2740 goto nla_put_failure;
2741
2742#ifdef CONFIG_NF_CONNTRACK_MARK
2743 if (ctnetlink_dump_mark(skb, ct, dump: true) < 0)
2744 goto nla_put_failure;
2745#endif
2746 if (ctnetlink_dump_labels(skb, ct) < 0)
2747 goto nla_put_failure;
2748 return 0;
2749
2750nla_put_failure:
2751 return -ENOSPC;
2752}
2753
2754static int
2755ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct,
2756 enum ip_conntrack_info ctinfo,
2757 u_int16_t ct_attr, u_int16_t ct_info_attr)
2758{
2759 struct nlattr *nest_parms;
2760
2761 nest_parms = nla_nest_start(skb, attrtype: ct_attr);
2762 if (!nest_parms)
2763 goto nla_put_failure;
2764
2765 if (__ctnetlink_glue_build(skb, ct) < 0)
2766 goto nla_put_failure;
2767
2768 nla_nest_end(skb, start: nest_parms);
2769
2770 if (nla_put_be32(skb, attrtype: ct_info_attr, htonl(ctinfo)))
2771 goto nla_put_failure;
2772
2773 return 0;
2774
2775nla_put_failure:
2776 return -ENOSPC;
2777}
2778
2779static int
2780ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[])
2781{
2782 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
2783 unsigned long d = ct->status ^ status;
2784
2785 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
2786 /* SEEN_REPLY bit can only be set */
2787 return -EBUSY;
2788
2789 if (d & IPS_ASSURED && !(status & IPS_ASSURED))
2790 /* ASSURED bit can only be set */
2791 return -EBUSY;
2792
2793 /* This check is less strict than ctnetlink_change_status()
2794 * because callers often flip IPS_EXPECTED bits when sending
2795 * an NFQA_CT attribute to the kernel. So ignore the
2796 * unchangeable bits but do not error out. Also user programs
2797 * are allowed to clear the bits that they are allowed to change.
2798 */
2799 __nf_ct_change_status(ct, on: status, off: ~status);
2800 return 0;
2801}
2802
2803static int
2804ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
2805{
2806 int err;
2807
2808 if (cda[CTA_TIMEOUT]) {
2809 err = ctnetlink_change_timeout(ct, cda);
2810 if (err < 0)
2811 return err;
2812 }
2813 if (cda[CTA_STATUS]) {
2814 err = ctnetlink_update_status(ct, cda);
2815 if (err < 0)
2816 return err;
2817 }
2818 if (cda[CTA_HELP]) {
2819 err = ctnetlink_change_helper(ct, cda);
2820 if (err < 0)
2821 return err;
2822 }
2823 if (cda[CTA_LABELS]) {
2824 err = ctnetlink_attach_labels(ct, cda);
2825 if (err < 0)
2826 return err;
2827 }
2828#if defined(CONFIG_NF_CONNTRACK_MARK)
2829 if (cda[CTA_MARK]) {
2830 ctnetlink_change_mark(ct, cda);
2831 }
2832#endif
2833 return 0;
2834}
2835
2836static int
2837ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct)
2838{
2839 struct nlattr *cda[CTA_MAX+1];
2840 int ret;
2841
2842 ret = nla_parse_nested_deprecated(tb: cda, CTA_MAX, nla: attr, policy: ct_nla_policy,
2843 NULL);
2844 if (ret < 0)
2845 return ret;
2846
2847 return ctnetlink_glue_parse_ct(cda: (const struct nlattr **)cda, ct);
2848}
2849
2850static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda,
2851 const struct nf_conn *ct,
2852 struct nf_conntrack_tuple *tuple,
2853 struct nf_conntrack_tuple *mask)
2854{
2855 int err;
2856
2857 err = ctnetlink_parse_tuple(cda, tuple, type: CTA_EXPECT_TUPLE,
2858 l3num: nf_ct_l3num(ct), NULL);
2859 if (err < 0)
2860 return err;
2861
2862 return ctnetlink_parse_tuple(cda, tuple: mask, type: CTA_EXPECT_MASK,
2863 l3num: nf_ct_l3num(ct), NULL);
2864}
2865
2866static int
2867ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
2868 u32 portid, u32 report)
2869{
2870 struct nlattr *cda[CTA_EXPECT_MAX+1];
2871 struct nf_conntrack_tuple tuple, mask;
2872 struct nf_conntrack_helper *helper = NULL;
2873 struct nf_conntrack_expect *exp;
2874 int err;
2875
2876 err = nla_parse_nested_deprecated(tb: cda, CTA_EXPECT_MAX, nla: attr,
2877 policy: exp_nla_policy, NULL);
2878 if (err < 0)
2879 return err;
2880
2881 err = ctnetlink_glue_exp_parse(cda: (const struct nlattr * const *)cda,
2882 ct, tuple: &tuple, mask: &mask);
2883 if (err < 0)
2884 return err;
2885
2886 if (cda[CTA_EXPECT_HELP_NAME]) {
2887 const char *helpname = nla_data(nla: cda[CTA_EXPECT_HELP_NAME]);
2888
2889 helper = __nf_conntrack_helper_find(name: helpname, l3num: nf_ct_l3num(ct),
2890 protonum: nf_ct_protonum(ct));
2891 if (helper == NULL)
2892 return -EOPNOTSUPP;
2893 }
2894
2895 exp = ctnetlink_alloc_expect(cda: (const struct nlattr * const *)cda, ct,
2896 helper, tuple: &tuple, mask: &mask);
2897 if (IS_ERR(ptr: exp))
2898 return PTR_ERR(ptr: exp);
2899
2900 err = nf_ct_expect_related_report(expect: exp, portid, report, flags: 0);
2901 nf_ct_expect_put(exp);
2902 return err;
2903}
2904
2905static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
2906 enum ip_conntrack_info ctinfo, int diff)
2907{
2908 if (!(ct->status & IPS_NAT_MASK))
2909 return;
2910
2911 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, off: diff);
2912}
2913
2914static const struct nfnl_ct_hook ctnetlink_glue_hook = {
2915 .build_size = ctnetlink_glue_build_size,
2916 .build = ctnetlink_glue_build,
2917 .parse = ctnetlink_glue_parse,
2918 .attach_expect = ctnetlink_glue_attach_expect,
2919 .seq_adjust = ctnetlink_glue_seqadj,
2920};
2921#endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */
2922
2923/***********************************************************************
2924 * EXPECT
2925 ***********************************************************************/
2926
2927static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2928 const struct nf_conntrack_tuple *tuple,
2929 u32 type)
2930{
2931 struct nlattr *nest_parms;
2932
2933 nest_parms = nla_nest_start(skb, attrtype: type);
2934 if (!nest_parms)
2935 goto nla_put_failure;
2936 if (ctnetlink_dump_tuples(skb, tuple) < 0)
2937 goto nla_put_failure;
2938 nla_nest_end(skb, start: nest_parms);
2939
2940 return 0;
2941
2942nla_put_failure:
2943 return -1;
2944}
2945
2946static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
2947 const struct nf_conntrack_tuple *tuple,
2948 const struct nf_conntrack_tuple_mask *mask)
2949{
2950 const struct nf_conntrack_l4proto *l4proto;
2951 struct nf_conntrack_tuple m;
2952 struct nlattr *nest_parms;
2953 int ret;
2954
2955 memset(&m, 0xFF, sizeof(m));
2956 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
2957 m.src.u.all = mask->src.u.all;
2958 m.src.l3num = tuple->src.l3num;
2959 m.dst.protonum = tuple->dst.protonum;
2960
2961 nest_parms = nla_nest_start(skb, attrtype: CTA_EXPECT_MASK);
2962 if (!nest_parms)
2963 goto nla_put_failure;
2964
2965 rcu_read_lock();
2966 ret = ctnetlink_dump_tuples_ip(skb, tuple: &m);
2967 if (ret >= 0) {
2968 l4proto = nf_ct_l4proto_find(l4proto: tuple->dst.protonum);
2969 ret = ctnetlink_dump_tuples_proto(skb, tuple: &m, l4proto);
2970 }
2971 rcu_read_unlock();
2972
2973 if (unlikely(ret < 0))
2974 goto nla_put_failure;
2975
2976 nla_nest_end(skb, start: nest_parms);
2977
2978 return 0;
2979
2980nla_put_failure:
2981 return -1;
2982}
2983
2984#if IS_ENABLED(CONFIG_NF_NAT)
2985static const union nf_inet_addr any_addr;
2986#endif
2987
2988static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
2989{
2990 static siphash_aligned_key_t exp_id_seed;
2991 unsigned long a, b, c, d;
2992
2993 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
2994
2995 a = (unsigned long)exp;
2996 b = (unsigned long)exp->helper;
2997 c = (unsigned long)exp->master;
2998 d = (unsigned long)siphash(data: &exp->tuple, len: sizeof(exp->tuple), key: &exp_id_seed);
2999
3000#ifdef CONFIG_64BIT
3001 return (__force __be32)siphash_4u64(a: (u64)a, b: (u64)b, c: (u64)c, d: (u64)d, key: &exp_id_seed);
3002#else
3003 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed);
3004#endif
3005}
3006
3007static int
3008ctnetlink_exp_dump_expect(struct sk_buff *skb,
3009 const struct nf_conntrack_expect *exp)
3010{
3011 struct nf_conn *master = exp->master;
3012 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
3013 struct nf_conn_help *help;
3014#if IS_ENABLED(CONFIG_NF_NAT)
3015 struct nlattr *nest_parms;
3016 struct nf_conntrack_tuple nat_tuple = {};
3017#endif
3018 struct nf_ct_helper_expectfn *expfn;
3019
3020 if (timeout < 0)
3021 timeout = 0;
3022
3023 if (ctnetlink_exp_dump_tuple(skb, tuple: &exp->tuple, type: CTA_EXPECT_TUPLE) < 0)
3024 goto nla_put_failure;
3025 if (ctnetlink_exp_dump_mask(skb, tuple: &exp->tuple, mask: &exp->mask) < 0)
3026 goto nla_put_failure;
3027 if (ctnetlink_exp_dump_tuple(skb,
3028 tuple: &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
3029 type: CTA_EXPECT_MASTER) < 0)
3030 goto nla_put_failure;
3031
3032#if IS_ENABLED(CONFIG_NF_NAT)
3033 if (!nf_inet_addr_cmp(a1: &exp->saved_addr, a2: &any_addr) ||
3034 exp->saved_proto.all) {
3035 nest_parms = nla_nest_start(skb, attrtype: CTA_EXPECT_NAT);
3036 if (!nest_parms)
3037 goto nla_put_failure;
3038
3039 if (nla_put_be32(skb, attrtype: CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
3040 goto nla_put_failure;
3041
3042 nat_tuple.src.l3num = nf_ct_l3num(ct: master);
3043 nat_tuple.src.u3 = exp->saved_addr;
3044 nat_tuple.dst.protonum = nf_ct_protonum(ct: master);
3045 nat_tuple.src.u = exp->saved_proto;
3046
3047 if (ctnetlink_exp_dump_tuple(skb, tuple: &nat_tuple,
3048 type: CTA_EXPECT_NAT_TUPLE) < 0)
3049 goto nla_put_failure;
3050 nla_nest_end(skb, start: nest_parms);
3051 }
3052#endif
3053 if (nla_put_be32(skb, attrtype: CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
3054 nla_put_be32(skb, attrtype: CTA_EXPECT_ID, value: nf_expect_get_id(exp)) ||
3055 nla_put_be32(skb, attrtype: CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
3056 nla_put_be32(skb, attrtype: CTA_EXPECT_CLASS, htonl(exp->class)))
3057 goto nla_put_failure;
3058 help = nfct_help(ct: master);
3059 if (help) {
3060 struct nf_conntrack_helper *helper;
3061
3062 helper = rcu_dereference(help->helper);
3063 if (helper &&
3064 nla_put_string(skb, attrtype: CTA_EXPECT_HELP_NAME, str: helper->name))
3065 goto nla_put_failure;
3066 }
3067 expfn = nf_ct_helper_expectfn_find_by_symbol(symbol: exp->expectfn);
3068 if (expfn != NULL &&
3069 nla_put_string(skb, attrtype: CTA_EXPECT_FN, str: expfn->name))
3070 goto nla_put_failure;
3071
3072 return 0;
3073
3074nla_put_failure:
3075 return -1;
3076}
3077
3078static int
3079ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
3080 int event, const struct nf_conntrack_expect *exp)
3081{
3082 struct nlmsghdr *nlh;
3083 unsigned int flags = portid ? NLM_F_MULTI : 0;
3084
3085 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, msg_type: event);
3086 nlh = nfnl_msg_put(skb, portid, seq, type: event, flags,
3087 family: exp->tuple.src.l3num, NFNETLINK_V0, res_id: 0);
3088 if (!nlh)
3089 goto nlmsg_failure;
3090
3091 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3092 goto nla_put_failure;
3093
3094 nlmsg_end(skb, nlh);
3095 return skb->len;
3096
3097nlmsg_failure:
3098nla_put_failure:
3099 nlmsg_cancel(skb, nlh);
3100 return -1;
3101}
3102
3103#ifdef CONFIG_NF_CONNTRACK_EVENTS
3104static int
3105ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item)
3106{
3107 struct nf_conntrack_expect *exp = item->exp;
3108 struct net *net = nf_ct_exp_net(exp);
3109 struct nlmsghdr *nlh;
3110 struct sk_buff *skb;
3111 unsigned int type, group;
3112 int flags = 0;
3113
3114 if (events & (1 << IPEXP_DESTROY)) {
3115 type = IPCTNL_MSG_EXP_DELETE;
3116 group = NFNLGRP_CONNTRACK_EXP_DESTROY;
3117 } else if (events & (1 << IPEXP_NEW)) {
3118 type = IPCTNL_MSG_EXP_NEW;
3119 flags = NLM_F_CREATE|NLM_F_EXCL;
3120 group = NFNLGRP_CONNTRACK_EXP_NEW;
3121 } else
3122 return 0;
3123
3124 if (!item->report && !nfnetlink_has_listeners(net, group))
3125 return 0;
3126
3127 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
3128 if (skb == NULL)
3129 goto errout;
3130
3131 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, msg_type: type);
3132 nlh = nfnl_msg_put(skb, portid: item->portid, seq: 0, type, flags,
3133 family: exp->tuple.src.l3num, NFNETLINK_V0, res_id: 0);
3134 if (!nlh)
3135 goto nlmsg_failure;
3136
3137 if (ctnetlink_exp_dump_expect(skb, exp) < 0)
3138 goto nla_put_failure;
3139
3140 nlmsg_end(skb, nlh);
3141 nfnetlink_send(skb, net, portid: item->portid, group, echo: item->report, GFP_ATOMIC);
3142 return 0;
3143
3144nla_put_failure:
3145 nlmsg_cancel(skb, nlh);
3146nlmsg_failure:
3147 kfree_skb(skb);
3148errout:
3149 nfnetlink_set_err(net, portid: 0, group: 0, error: -ENOBUFS);
3150 return 0;
3151}
3152#endif
3153static int ctnetlink_exp_done(struct netlink_callback *cb)
3154{
3155 if (cb->args[1])
3156 nf_ct_expect_put(exp: (struct nf_conntrack_expect *)cb->args[1]);
3157 return 0;
3158}
3159
3160static int
3161ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3162{
3163 struct net *net = sock_net(sk: skb->sk);
3164 struct nf_conntrack_expect *exp, *last;
3165 struct nfgenmsg *nfmsg = nlmsg_data(nlh: cb->nlh);
3166 u_int8_t l3proto = nfmsg->nfgen_family;
3167
3168 rcu_read_lock();
3169 last = (struct nf_conntrack_expect *)cb->args[1];
3170 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
3171restart:
3172 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
3173 hnode) {
3174 if (l3proto && exp->tuple.src.l3num != l3proto)
3175 continue;
3176
3177 if (!net_eq(net1: nf_ct_net(ct: exp->master), net2: net))
3178 continue;
3179
3180 if (cb->args[1]) {
3181 if (exp != last)
3182 continue;
3183 cb->args[1] = 0;
3184 }
3185 if (ctnetlink_exp_fill_info(skb,
3186 NETLINK_CB(cb->skb).portid,
3187 seq: cb->nlh->nlmsg_seq,
3188 event: IPCTNL_MSG_EXP_NEW,
3189 exp) < 0) {
3190 if (!refcount_inc_not_zero(r: &exp->use))
3191 continue;
3192 cb->args[1] = (unsigned long)exp;
3193 goto out;
3194 }
3195 }
3196 if (cb->args[1]) {
3197 cb->args[1] = 0;
3198 goto restart;
3199 }
3200 }
3201out:
3202 rcu_read_unlock();
3203 if (last)
3204 nf_ct_expect_put(exp: last);
3205
3206 return skb->len;
3207}
3208
3209static int
3210ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
3211{
3212 struct nf_conntrack_expect *exp, *last;
3213 struct nfgenmsg *nfmsg = nlmsg_data(nlh: cb->nlh);
3214 struct nf_conn *ct = cb->data;
3215 struct nf_conn_help *help = nfct_help(ct);
3216 u_int8_t l3proto = nfmsg->nfgen_family;
3217
3218 if (cb->args[0])
3219 return 0;
3220
3221 rcu_read_lock();
3222 last = (struct nf_conntrack_expect *)cb->args[1];
3223restart:
3224 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
3225 if (l3proto && exp->tuple.src.l3num != l3proto)
3226 continue;
3227 if (cb->args[1]) {
3228 if (exp != last)
3229 continue;
3230 cb->args[1] = 0;
3231 }
3232 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
3233 seq: cb->nlh->nlmsg_seq,
3234 event: IPCTNL_MSG_EXP_NEW,
3235 exp) < 0) {
3236 if (!refcount_inc_not_zero(r: &exp->use))
3237 continue;
3238 cb->args[1] = (unsigned long)exp;
3239 goto out;
3240 }
3241 }
3242 if (cb->args[1]) {
3243 cb->args[1] = 0;
3244 goto restart;
3245 }
3246 cb->args[0] = 1;
3247out:
3248 rcu_read_unlock();
3249 if (last)
3250 nf_ct_expect_put(exp: last);
3251
3252 return skb->len;
3253}
3254
3255static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
3256 struct sk_buff *skb,
3257 const struct nlmsghdr *nlh,
3258 const struct nlattr * const cda[],
3259 struct netlink_ext_ack *extack)
3260{
3261 int err;
3262 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
3263 u_int8_t u3 = nfmsg->nfgen_family;
3264 struct nf_conntrack_tuple tuple;
3265 struct nf_conntrack_tuple_hash *h;
3266 struct nf_conn *ct;
3267 struct nf_conntrack_zone zone;
3268 struct netlink_dump_control c = {
3269 .dump = ctnetlink_exp_ct_dump_table,
3270 .done = ctnetlink_exp_done,
3271 };
3272
3273 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_MASTER,
3274 l3num: u3, NULL);
3275 if (err < 0)
3276 return err;
3277
3278 err = ctnetlink_parse_zone(attr: cda[CTA_EXPECT_ZONE], zone: &zone);
3279 if (err < 0)
3280 return err;
3281
3282 h = nf_conntrack_find_get(net, zone: &zone, tuple: &tuple);
3283 if (!h)
3284 return -ENOENT;
3285
3286 ct = nf_ct_tuplehash_to_ctrack(hash: h);
3287 /* No expectation linked to this connection tracking. */
3288 if (!nfct_help(ct)) {
3289 nf_ct_put(ct);
3290 return 0;
3291 }
3292
3293 c.data = ct;
3294
3295 err = netlink_dump_start(ssk: ctnl, skb, nlh, control: &c);
3296 nf_ct_put(ct);
3297
3298 return err;
3299}
3300
3301static int ctnetlink_get_expect(struct sk_buff *skb,
3302 const struct nfnl_info *info,
3303 const struct nlattr * const cda[])
3304{
3305 u_int8_t u3 = info->nfmsg->nfgen_family;
3306 struct nf_conntrack_tuple tuple;
3307 struct nf_conntrack_expect *exp;
3308 struct nf_conntrack_zone zone;
3309 struct sk_buff *skb2;
3310 int err;
3311
3312 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
3313 if (cda[CTA_EXPECT_MASTER])
3314 return ctnetlink_dump_exp_ct(net: info->net, ctnl: info->sk, skb,
3315 nlh: info->nlh, cda,
3316 extack: info->extack);
3317 else {
3318 struct netlink_dump_control c = {
3319 .dump = ctnetlink_exp_dump_table,
3320 .done = ctnetlink_exp_done,
3321 };
3322 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
3323 }
3324 }
3325
3326 err = ctnetlink_parse_zone(attr: cda[CTA_EXPECT_ZONE], zone: &zone);
3327 if (err < 0)
3328 return err;
3329
3330 if (cda[CTA_EXPECT_TUPLE])
3331 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_TUPLE,
3332 l3num: u3, NULL);
3333 else if (cda[CTA_EXPECT_MASTER])
3334 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_MASTER,
3335 l3num: u3, NULL);
3336 else
3337 return -EINVAL;
3338
3339 if (err < 0)
3340 return err;
3341
3342 exp = nf_ct_expect_find_get(net: info->net, zone: &zone, tuple: &tuple);
3343 if (!exp)
3344 return -ENOENT;
3345
3346 if (cda[CTA_EXPECT_ID]) {
3347 __be32 id = nla_get_be32(nla: cda[CTA_EXPECT_ID]);
3348
3349 if (id != nf_expect_get_id(exp)) {
3350 nf_ct_expect_put(exp);
3351 return -ENOENT;
3352 }
3353 }
3354
3355 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
3356 if (!skb2) {
3357 nf_ct_expect_put(exp);
3358 return -ENOMEM;
3359 }
3360
3361 rcu_read_lock();
3362 err = ctnetlink_exp_fill_info(skb: skb2, NETLINK_CB(skb).portid,
3363 seq: info->nlh->nlmsg_seq, event: IPCTNL_MSG_EXP_NEW,
3364 exp);
3365 rcu_read_unlock();
3366 nf_ct_expect_put(exp);
3367 if (err <= 0) {
3368 kfree_skb(skb: skb2);
3369 return -ENOMEM;
3370 }
3371
3372 return nfnetlink_unicast(skb: skb2, net: info->net, NETLINK_CB(skb).portid);
3373}
3374
3375static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data)
3376{
3377 struct nf_conntrack_helper *helper;
3378 const struct nf_conn_help *m_help;
3379 const char *name = data;
3380
3381 m_help = nfct_help(ct: exp->master);
3382
3383 helper = rcu_dereference(m_help->helper);
3384 if (!helper)
3385 return false;
3386
3387 return strcmp(helper->name, name) == 0;
3388}
3389
3390static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data)
3391{
3392 return true;
3393}
3394
3395static int ctnetlink_del_expect(struct sk_buff *skb,
3396 const struct nfnl_info *info,
3397 const struct nlattr * const cda[])
3398{
3399 u_int8_t u3 = info->nfmsg->nfgen_family;
3400 struct nf_conntrack_expect *exp;
3401 struct nf_conntrack_tuple tuple;
3402 struct nf_conntrack_zone zone;
3403 int err;
3404
3405 if (cda[CTA_EXPECT_TUPLE]) {
3406 /* delete a single expect by tuple */
3407 err = ctnetlink_parse_zone(attr: cda[CTA_EXPECT_ZONE], zone: &zone);
3408 if (err < 0)
3409 return err;
3410
3411 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_TUPLE,
3412 l3num: u3, NULL);
3413 if (err < 0)
3414 return err;
3415
3416 /* bump usage count to 2 */
3417 exp = nf_ct_expect_find_get(net: info->net, zone: &zone, tuple: &tuple);
3418 if (!exp)
3419 return -ENOENT;
3420
3421 if (cda[CTA_EXPECT_ID]) {
3422 __be32 id = nla_get_be32(nla: cda[CTA_EXPECT_ID]);
3423 if (ntohl(id) != (u32)(unsigned long)exp) {
3424 nf_ct_expect_put(exp);
3425 return -ENOENT;
3426 }
3427 }
3428
3429 /* after list removal, usage count == 1 */
3430 spin_lock_bh(lock: &nf_conntrack_expect_lock);
3431 if (del_timer(timer: &exp->timeout)) {
3432 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid,
3433 report: nlmsg_report(nlh: info->nlh));
3434 nf_ct_expect_put(exp);
3435 }
3436 spin_unlock_bh(lock: &nf_conntrack_expect_lock);
3437 /* have to put what we 'get' above.
3438 * after this line usage count == 0 */
3439 nf_ct_expect_put(exp);
3440 } else if (cda[CTA_EXPECT_HELP_NAME]) {
3441 char *name = nla_data(nla: cda[CTA_EXPECT_HELP_NAME]);
3442
3443 nf_ct_expect_iterate_net(net: info->net, iter: expect_iter_name, data: name,
3444 NETLINK_CB(skb).portid,
3445 report: nlmsg_report(nlh: info->nlh));
3446 } else {
3447 /* This basically means we have to flush everything*/
3448 nf_ct_expect_iterate_net(net: info->net, iter: expect_iter_all, NULL,
3449 NETLINK_CB(skb).portid,
3450 report: nlmsg_report(nlh: info->nlh));
3451 }
3452
3453 return 0;
3454}
3455static int
3456ctnetlink_change_expect(struct nf_conntrack_expect *x,
3457 const struct nlattr * const cda[])
3458{
3459 if (cda[CTA_EXPECT_TIMEOUT]) {
3460 if (!del_timer(timer: &x->timeout))
3461 return -ETIME;
3462
3463 x->timeout.expires = jiffies +
3464 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ;
3465 add_timer(timer: &x->timeout);
3466 }
3467 return 0;
3468}
3469
3470#if IS_ENABLED(CONFIG_NF_NAT)
3471static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
3472 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
3473 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
3474};
3475#endif
3476
3477static int
3478ctnetlink_parse_expect_nat(const struct nlattr *attr,
3479 struct nf_conntrack_expect *exp,
3480 u_int8_t u3)
3481{
3482#if IS_ENABLED(CONFIG_NF_NAT)
3483 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
3484 struct nf_conntrack_tuple nat_tuple = {};
3485 int err;
3486
3487 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, nla: attr,
3488 policy: exp_nat_nla_policy, NULL);
3489 if (err < 0)
3490 return err;
3491
3492 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
3493 return -EINVAL;
3494
3495 err = ctnetlink_parse_tuple(cda: (const struct nlattr * const *)tb,
3496 tuple: &nat_tuple, type: CTA_EXPECT_NAT_TUPLE,
3497 l3num: u3, NULL);
3498 if (err < 0)
3499 return err;
3500
3501 exp->saved_addr = nat_tuple.src.u3;
3502 exp->saved_proto = nat_tuple.src.u;
3503 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
3504
3505 return 0;
3506#else
3507 return -EOPNOTSUPP;
3508#endif
3509}
3510
3511static struct nf_conntrack_expect *
3512ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
3513 struct nf_conntrack_helper *helper,
3514 struct nf_conntrack_tuple *tuple,
3515 struct nf_conntrack_tuple *mask)
3516{
3517 u_int32_t class = 0;
3518 struct nf_conntrack_expect *exp;
3519 struct nf_conn_help *help;
3520 int err;
3521
3522 help = nfct_help(ct);
3523 if (!help)
3524 return ERR_PTR(error: -EOPNOTSUPP);
3525
3526 if (cda[CTA_EXPECT_CLASS] && helper) {
3527 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
3528 if (class > helper->expect_class_max)
3529 return ERR_PTR(error: -EINVAL);
3530 }
3531 exp = nf_ct_expect_alloc(me: ct);
3532 if (!exp)
3533 return ERR_PTR(error: -ENOMEM);
3534
3535 if (cda[CTA_EXPECT_FLAGS]) {
3536 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS]));
3537 exp->flags &= ~NF_CT_EXPECT_USERSPACE;
3538 } else {
3539 exp->flags = 0;
3540 }
3541 if (cda[CTA_EXPECT_FN]) {
3542 const char *name = nla_data(nla: cda[CTA_EXPECT_FN]);
3543 struct nf_ct_helper_expectfn *expfn;
3544
3545 expfn = nf_ct_helper_expectfn_find_by_name(name);
3546 if (expfn == NULL) {
3547 err = -EINVAL;
3548 goto err_out;
3549 }
3550 exp->expectfn = expfn->expectfn;
3551 } else
3552 exp->expectfn = NULL;
3553
3554 exp->class = class;
3555 exp->master = ct;
3556 exp->helper = helper;
3557 exp->tuple = *tuple;
3558 exp->mask.src.u3 = mask->src.u3;
3559 exp->mask.src.u.all = mask->src.u.all;
3560
3561 if (cda[CTA_EXPECT_NAT]) {
3562 err = ctnetlink_parse_expect_nat(attr: cda[CTA_EXPECT_NAT],
3563 exp, u3: nf_ct_l3num(ct));
3564 if (err < 0)
3565 goto err_out;
3566 }
3567 return exp;
3568err_out:
3569 nf_ct_expect_put(exp);
3570 return ERR_PTR(error: err);
3571}
3572
3573static int
3574ctnetlink_create_expect(struct net *net,
3575 const struct nf_conntrack_zone *zone,
3576 const struct nlattr * const cda[],
3577 u_int8_t u3, u32 portid, int report)
3578{
3579 struct nf_conntrack_tuple tuple, mask, master_tuple;
3580 struct nf_conntrack_tuple_hash *h = NULL;
3581 struct nf_conntrack_helper *helper = NULL;
3582 struct nf_conntrack_expect *exp;
3583 struct nf_conn *ct;
3584 int err;
3585
3586 /* caller guarantees that those three CTA_EXPECT_* exist */
3587 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_TUPLE,
3588 l3num: u3, NULL);
3589 if (err < 0)
3590 return err;
3591 err = ctnetlink_parse_tuple(cda, tuple: &mask, type: CTA_EXPECT_MASK,
3592 l3num: u3, NULL);
3593 if (err < 0)
3594 return err;
3595 err = ctnetlink_parse_tuple(cda, tuple: &master_tuple, type: CTA_EXPECT_MASTER,
3596 l3num: u3, NULL);
3597 if (err < 0)
3598 return err;
3599
3600 /* Look for master conntrack of this expectation */
3601 h = nf_conntrack_find_get(net, zone, tuple: &master_tuple);
3602 if (!h)
3603 return -ENOENT;
3604 ct = nf_ct_tuplehash_to_ctrack(hash: h);
3605
3606 rcu_read_lock();
3607 if (cda[CTA_EXPECT_HELP_NAME]) {
3608 const char *helpname = nla_data(nla: cda[CTA_EXPECT_HELP_NAME]);
3609
3610 helper = __nf_conntrack_helper_find(name: helpname, l3num: u3,
3611 protonum: nf_ct_protonum(ct));
3612 if (helper == NULL) {
3613 rcu_read_unlock();
3614#ifdef CONFIG_MODULES
3615 if (request_module("nfct-helper-%s", helpname) < 0) {
3616 err = -EOPNOTSUPP;
3617 goto err_ct;
3618 }
3619 rcu_read_lock();
3620 helper = __nf_conntrack_helper_find(name: helpname, l3num: u3,
3621 protonum: nf_ct_protonum(ct));
3622 if (helper) {
3623 err = -EAGAIN;
3624 goto err_rcu;
3625 }
3626 rcu_read_unlock();
3627#endif
3628 err = -EOPNOTSUPP;
3629 goto err_ct;
3630 }
3631 }
3632
3633 exp = ctnetlink_alloc_expect(cda, ct, helper, tuple: &tuple, mask: &mask);
3634 if (IS_ERR(ptr: exp)) {
3635 err = PTR_ERR(ptr: exp);
3636 goto err_rcu;
3637 }
3638
3639 err = nf_ct_expect_related_report(expect: exp, portid, report, flags: 0);
3640 nf_ct_expect_put(exp);
3641err_rcu:
3642 rcu_read_unlock();
3643err_ct:
3644 nf_ct_put(ct);
3645 return err;
3646}
3647
3648static int ctnetlink_new_expect(struct sk_buff *skb,
3649 const struct nfnl_info *info,
3650 const struct nlattr * const cda[])
3651{
3652 u_int8_t u3 = info->nfmsg->nfgen_family;
3653 struct nf_conntrack_tuple tuple;
3654 struct nf_conntrack_expect *exp;
3655 struct nf_conntrack_zone zone;
3656 int err;
3657
3658 if (!cda[CTA_EXPECT_TUPLE]
3659 || !cda[CTA_EXPECT_MASK]
3660 || !cda[CTA_EXPECT_MASTER])
3661 return -EINVAL;
3662
3663 err = ctnetlink_parse_zone(attr: cda[CTA_EXPECT_ZONE], zone: &zone);
3664 if (err < 0)
3665 return err;
3666
3667 err = ctnetlink_parse_tuple(cda, tuple: &tuple, type: CTA_EXPECT_TUPLE,
3668 l3num: u3, NULL);
3669 if (err < 0)
3670 return err;
3671
3672 spin_lock_bh(lock: &nf_conntrack_expect_lock);
3673 exp = __nf_ct_expect_find(net: info->net, zone: &zone, tuple: &tuple);
3674 if (!exp) {
3675 spin_unlock_bh(lock: &nf_conntrack_expect_lock);
3676 err = -ENOENT;
3677 if (info->nlh->nlmsg_flags & NLM_F_CREATE) {
3678 err = ctnetlink_create_expect(net: info->net, zone: &zone, cda, u3,
3679 NETLINK_CB(skb).portid,
3680 report: nlmsg_report(nlh: info->nlh));
3681 }
3682 return err;
3683 }
3684
3685 err = -EEXIST;
3686 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL))
3687 err = ctnetlink_change_expect(x: exp, cda);
3688 spin_unlock_bh(lock: &nf_conntrack_expect_lock);
3689
3690 return err;
3691}
3692
3693static int
3694ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu,
3695 const struct ip_conntrack_stat *st)
3696{
3697 struct nlmsghdr *nlh;
3698 unsigned int flags = portid ? NLM_F_MULTI : 0, event;
3699
3700 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK,
3701 msg_type: IPCTNL_MSG_EXP_GET_STATS_CPU);
3702 nlh = nfnl_msg_put(skb, portid, seq, type: event, flags, AF_UNSPEC,
3703 NFNETLINK_V0, htons(cpu));
3704 if (!nlh)
3705 goto nlmsg_failure;
3706
3707 if (nla_put_be32(skb, attrtype: CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
3708 nla_put_be32(skb, attrtype: CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
3709 nla_put_be32(skb, attrtype: CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
3710 goto nla_put_failure;
3711
3712 nlmsg_end(skb, nlh);
3713 return skb->len;
3714
3715nla_put_failure:
3716nlmsg_failure:
3717 nlmsg_cancel(skb, nlh);
3718 return -1;
3719}
3720
3721static int
3722ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
3723{
3724 int cpu;
3725 struct net *net = sock_net(sk: skb->sk);
3726
3727 if (cb->args[0] == nr_cpu_ids)
3728 return 0;
3729
3730 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
3731 const struct ip_conntrack_stat *st;
3732
3733 if (!cpu_possible(cpu))
3734 continue;
3735
3736 st = per_cpu_ptr(net->ct.stat, cpu);
3737 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid,
3738 seq: cb->nlh->nlmsg_seq,
3739 cpu, st) < 0)
3740 break;
3741 }
3742 cb->args[0] = cpu;
3743
3744 return skb->len;
3745}
3746
3747static int ctnetlink_stat_exp_cpu(struct sk_buff *skb,
3748 const struct nfnl_info *info,
3749 const struct nlattr * const cda[])
3750{
3751 if (info->nlh->nlmsg_flags & NLM_F_DUMP) {
3752 struct netlink_dump_control c = {
3753 .dump = ctnetlink_exp_stat_cpu_dump,
3754 };
3755 return netlink_dump_start(ssk: info->sk, skb, nlh: info->nlh, control: &c);
3756 }
3757
3758 return 0;
3759}
3760
3761#ifdef CONFIG_NF_CONNTRACK_EVENTS
3762static struct nf_ct_event_notifier ctnl_notifier = {
3763 .ct_event = ctnetlink_conntrack_event,
3764 .exp_event = ctnetlink_expect_event,
3765};
3766#endif
3767
3768static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
3769 [IPCTNL_MSG_CT_NEW] = {
3770 .call = ctnetlink_new_conntrack,
3771 .type = NFNL_CB_MUTEX,
3772 .attr_count = CTA_MAX,
3773 .policy = ct_nla_policy
3774 },
3775 [IPCTNL_MSG_CT_GET] = {
3776 .call = ctnetlink_get_conntrack,
3777 .type = NFNL_CB_MUTEX,
3778 .attr_count = CTA_MAX,
3779 .policy = ct_nla_policy
3780 },
3781 [IPCTNL_MSG_CT_DELETE] = {
3782 .call = ctnetlink_del_conntrack,
3783 .type = NFNL_CB_MUTEX,
3784 .attr_count = CTA_MAX,
3785 .policy = ct_nla_policy
3786 },
3787 [IPCTNL_MSG_CT_GET_CTRZERO] = {
3788 .call = ctnetlink_get_conntrack,
3789 .type = NFNL_CB_MUTEX,
3790 .attr_count = CTA_MAX,
3791 .policy = ct_nla_policy
3792 },
3793 [IPCTNL_MSG_CT_GET_STATS_CPU] = {
3794 .call = ctnetlink_stat_ct_cpu,
3795 .type = NFNL_CB_MUTEX,
3796 },
3797 [IPCTNL_MSG_CT_GET_STATS] = {
3798 .call = ctnetlink_stat_ct,
3799 .type = NFNL_CB_MUTEX,
3800 },
3801 [IPCTNL_MSG_CT_GET_DYING] = {
3802 .call = ctnetlink_get_ct_dying,
3803 .type = NFNL_CB_MUTEX,
3804 },
3805 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = {
3806 .call = ctnetlink_get_ct_unconfirmed,
3807 .type = NFNL_CB_MUTEX,
3808 },
3809};
3810
3811static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
3812 [IPCTNL_MSG_EXP_GET] = {
3813 .call = ctnetlink_get_expect,
3814 .type = NFNL_CB_MUTEX,
3815 .attr_count = CTA_EXPECT_MAX,
3816 .policy = exp_nla_policy
3817 },
3818 [IPCTNL_MSG_EXP_NEW] = {
3819 .call = ctnetlink_new_expect,
3820 .type = NFNL_CB_MUTEX,
3821 .attr_count = CTA_EXPECT_MAX,
3822 .policy = exp_nla_policy
3823 },
3824 [IPCTNL_MSG_EXP_DELETE] = {
3825 .call = ctnetlink_del_expect,
3826 .type = NFNL_CB_MUTEX,
3827 .attr_count = CTA_EXPECT_MAX,
3828 .policy = exp_nla_policy
3829 },
3830 [IPCTNL_MSG_EXP_GET_STATS_CPU] = {
3831 .call = ctnetlink_stat_exp_cpu,
3832 .type = NFNL_CB_MUTEX,
3833 },
3834};
3835
3836static const struct nfnetlink_subsystem ctnl_subsys = {
3837 .name = "conntrack",
3838 .subsys_id = NFNL_SUBSYS_CTNETLINK,
3839 .cb_count = IPCTNL_MSG_MAX,
3840 .cb = ctnl_cb,
3841};
3842
3843static const struct nfnetlink_subsystem ctnl_exp_subsys = {
3844 .name = "conntrack_expect",
3845 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
3846 .cb_count = IPCTNL_MSG_EXP_MAX,
3847 .cb = ctnl_exp_cb,
3848};
3849
3850MODULE_ALIAS("ip_conntrack_netlink");
3851MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
3852MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
3853
3854static int __net_init ctnetlink_net_init(struct net *net)
3855{
3856#ifdef CONFIG_NF_CONNTRACK_EVENTS
3857 nf_conntrack_register_notifier(net, nb: &ctnl_notifier);
3858#endif
3859 return 0;
3860}
3861
3862static void ctnetlink_net_pre_exit(struct net *net)
3863{
3864#ifdef CONFIG_NF_CONNTRACK_EVENTS
3865 nf_conntrack_unregister_notifier(net);
3866#endif
3867}
3868
3869static struct pernet_operations ctnetlink_net_ops = {
3870 .init = ctnetlink_net_init,
3871 .pre_exit = ctnetlink_net_pre_exit,
3872};
3873
3874static int __init ctnetlink_init(void)
3875{
3876 int ret;
3877
3878 NL_ASSERT_DUMP_CTX_FITS(struct ctnetlink_list_dump_ctx);
3879
3880 ret = nfnetlink_subsys_register(n: &ctnl_subsys);
3881 if (ret < 0) {
3882 pr_err("ctnetlink_init: cannot register with nfnetlink.\n");
3883 goto err_out;
3884 }
3885
3886 ret = nfnetlink_subsys_register(n: &ctnl_exp_subsys);
3887 if (ret < 0) {
3888 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n");
3889 goto err_unreg_subsys;
3890 }
3891
3892 ret = register_pernet_subsys(&ctnetlink_net_ops);
3893 if (ret < 0) {
3894 pr_err("ctnetlink_init: cannot register pernet operations\n");
3895 goto err_unreg_exp_subsys;
3896 }
3897#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3898 /* setup interaction between nf_queue and nf_conntrack_netlink. */
3899 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook);
3900#endif
3901 return 0;
3902
3903err_unreg_exp_subsys:
3904 nfnetlink_subsys_unregister(n: &ctnl_exp_subsys);
3905err_unreg_subsys:
3906 nfnetlink_subsys_unregister(n: &ctnl_subsys);
3907err_out:
3908 return ret;
3909}
3910
3911static void __exit ctnetlink_exit(void)
3912{
3913 unregister_pernet_subsys(&ctnetlink_net_ops);
3914 nfnetlink_subsys_unregister(n: &ctnl_exp_subsys);
3915 nfnetlink_subsys_unregister(n: &ctnl_subsys);
3916#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
3917 RCU_INIT_POINTER(nfnl_ct_hook, NULL);
3918#endif
3919 synchronize_rcu();
3920}
3921
3922module_init(ctnetlink_init);
3923module_exit(ctnetlink_exit);
3924

source code of linux/net/netfilter/nf_conntrack_netlink.c