1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the IP module.
8 *
9 * Version: @(#)ip.h 1.0.2 05/07/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 *
15 * Changes:
16 * Mike McLagan : Routing by source
17 */
18#ifndef _IP_H
19#define _IP_H
20
21#include <linux/types.h>
22#include <linux/ip.h>
23#include <linux/in.h>
24#include <linux/skbuff.h>
25#include <linux/jhash.h>
26#include <linux/sockptr.h>
27#include <linux/static_key.h>
28
29#include <net/inet_sock.h>
30#include <net/route.h>
31#include <net/snmp.h>
32#include <net/flow.h>
33#include <net/flow_dissector.h>
34#include <net/netns/hash.h>
35#include <net/lwtunnel.h>
36
37#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
38#define IPV4_MIN_MTU 68 /* RFC 791 */
39
40extern unsigned int sysctl_fib_sync_mem;
41extern unsigned int sysctl_fib_sync_mem_min;
42extern unsigned int sysctl_fib_sync_mem_max;
43
44struct sock;
45
46struct inet_skb_parm {
47 int iif;
48 struct ip_options opt; /* Compiled IP options */
49 u16 flags;
50
51#define IPSKB_FORWARDED BIT(0)
52#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
53#define IPSKB_XFRM_TRANSFORMED BIT(2)
54#define IPSKB_FRAG_COMPLETE BIT(3)
55#define IPSKB_REROUTED BIT(4)
56#define IPSKB_DOREDIRECT BIT(5)
57#define IPSKB_FRAG_PMTU BIT(6)
58#define IPSKB_L3SLAVE BIT(7)
59#define IPSKB_NOPOLICY BIT(8)
60#define IPSKB_MULTIPATH BIT(9)
61
62 u16 frag_max_size;
63};
64
65static inline bool ipv4_l3mdev_skb(u16 flags)
66{
67 return !!(flags & IPSKB_L3SLAVE);
68}
69
70static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
71{
72 return ip_hdr(skb)->ihl * 4;
73}
74
75struct ipcm_cookie {
76 struct sockcm_cookie sockc;
77 __be32 addr;
78 int oif;
79 struct ip_options_rcu *opt;
80 __u8 protocol;
81 __u8 ttl;
82 __s16 tos;
83 char priority;
84 __u16 gso_size;
85};
86
87static inline void ipcm_init(struct ipcm_cookie *ipcm)
88{
89 *ipcm = (struct ipcm_cookie) { .tos = -1 };
90}
91
92static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
93 const struct inet_sock *inet)
94{
95 ipcm_init(ipcm);
96
97 ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
98 ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
99 ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
100 ipcm->addr = inet->inet_saddr;
101 ipcm->protocol = inet->inet_num;
102}
103
104#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
105#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
106
107/* return enslaved device index if relevant */
108static inline int inet_sdif(const struct sk_buff *skb)
109{
110#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
111 if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
112 return IPCB(skb)->iif;
113#endif
114 return 0;
115}
116
117/* Special input handler for packets caught by router alert option.
118 They are selected only by protocol field, and then processed likely
119 local ones; but only if someone wants them! Otherwise, router
120 not running rsvpd will kill RSVP.
121
122 It is user level problem, what it will make with them.
123 I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
124 but receiver should be enough clever f.e. to forward mtrace requests,
125 sent to multicast group to reach destination designated router.
126 */
127
128struct ip_ra_chain {
129 struct ip_ra_chain __rcu *next;
130 struct sock *sk;
131 union {
132 void (*destructor)(struct sock *);
133 struct sock *saved_sk;
134 };
135 struct rcu_head rcu;
136};
137
138/* IP flags. */
139#define IP_CE 0x8000 /* Flag: "Congestion" */
140#define IP_DF 0x4000 /* Flag: "Don't Fragment" */
141#define IP_MF 0x2000 /* Flag: "More Fragments" */
142#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
143
144#define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
145
146struct msghdr;
147struct net_device;
148struct packet_type;
149struct rtable;
150struct sockaddr;
151
152int igmp_mc_init(void);
153
154/*
155 * Functions provided by ip.c
156 */
157
158int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
159 __be32 saddr, __be32 daddr,
160 struct ip_options_rcu *opt, u8 tos);
161int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
162 struct net_device *orig_dev);
163void ip_list_rcv(struct list_head *head, struct packet_type *pt,
164 struct net_device *orig_dev);
165int ip_local_deliver(struct sk_buff *skb);
166void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
167int ip_mr_input(struct sk_buff *skb);
168int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
169int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
170int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
171 int (*output)(struct net *, struct sock *, struct sk_buff *));
172
173struct ip_fraglist_iter {
174 struct sk_buff *frag;
175 struct iphdr *iph;
176 int offset;
177 unsigned int hlen;
178};
179
180void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
181 unsigned int hlen, struct ip_fraglist_iter *iter);
182void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter);
183
184static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
185{
186 struct sk_buff *skb = iter->frag;
187
188 iter->frag = skb->next;
189 skb_mark_not_on_list(skb);
190
191 return skb;
192}
193
194struct ip_frag_state {
195 bool DF;
196 unsigned int hlen;
197 unsigned int ll_rs;
198 unsigned int mtu;
199 unsigned int left;
200 int offset;
201 int ptr;
202 __be16 not_last_frag;
203};
204
205void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
206 unsigned int mtu, bool DF, struct ip_frag_state *state);
207struct sk_buff *ip_frag_next(struct sk_buff *skb,
208 struct ip_frag_state *state);
209
210void ip_send_check(struct iphdr *ip);
211int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
212int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
213
214int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
215 __u8 tos);
216void ip_init(void);
217int ip_append_data(struct sock *sk, struct flowi4 *fl4,
218 int getfrag(void *from, char *to, int offset, int len,
219 int odd, struct sk_buff *skb),
220 void *from, int len, int protolen,
221 struct ipcm_cookie *ipc,
222 struct rtable **rt,
223 unsigned int flags);
224int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
225 struct sk_buff *skb);
226struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
227 struct sk_buff_head *queue,
228 struct inet_cork *cork);
229int ip_send_skb(struct net *net, struct sk_buff *skb);
230int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
231void ip_flush_pending_frames(struct sock *sk);
232struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
233 int getfrag(void *from, char *to, int offset,
234 int len, int odd, struct sk_buff *skb),
235 void *from, int length, int transhdrlen,
236 struct ipcm_cookie *ipc, struct rtable **rtp,
237 struct inet_cork *cork, unsigned int flags);
238
239int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
240
241static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
242{
243 return __ip_make_skb(sk, fl4, queue: &sk->sk_write_queue, cork: &inet_sk(sk)->cork.base);
244}
245
246/* Get the route scope that should be used when sending a packet. */
247static inline u8 ip_sendmsg_scope(const struct inet_sock *inet,
248 const struct ipcm_cookie *ipc,
249 const struct msghdr *msg)
250{
251 if (sock_flag(sk: &inet->sk, flag: SOCK_LOCALROUTE) ||
252 msg->msg_flags & MSG_DONTROUTE ||
253 (ipc->opt && ipc->opt->opt.is_strictroute))
254 return RT_SCOPE_LINK;
255
256 return RT_SCOPE_UNIVERSE;
257}
258
259static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
260{
261 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(READ_ONCE(inet->tos));
262}
263
264/* datagram.c */
265int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
266int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
267
268void ip4_datagram_release_cb(struct sock *sk);
269
270struct ip_reply_arg {
271 struct kvec iov[1];
272 int flags;
273 __wsum csum;
274 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
275 /* -1 if not needed */
276 int bound_dev_if;
277 u8 tos;
278 kuid_t uid;
279};
280
281#define IP_REPLY_ARG_NOSRCCHECK 1
282
283static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
284{
285 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
286}
287
288void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
289 const struct ip_options *sopt,
290 __be32 daddr, __be32 saddr,
291 const struct ip_reply_arg *arg,
292 unsigned int len, u64 transmit_time, u32 txhash);
293
294#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
295#define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
296#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
297#define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
298#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
299#define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
300#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
301#define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
302#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
303#define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
304
305static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
306{
307 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
308}
309
310unsigned long snmp_fold_field(void __percpu *mib, int offt);
311#if BITS_PER_LONG==32
312u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
313 size_t syncp_offset);
314u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
315#else
316static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
317 size_t syncp_offset)
318{
319 return snmp_get_cpu_field(mib, cpu, offt: offct);
320
321}
322
323static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
324{
325 return snmp_fold_field(mib, offt);
326}
327#endif
328
329#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
330{ \
331 int i, c; \
332 for_each_possible_cpu(c) { \
333 for (i = 0; stats_list[i].name; i++) \
334 buff64[i] += snmp_get_cpu_field64( \
335 mib_statistic, \
336 c, stats_list[i].entry, \
337 offset); \
338 } \
339}
340
341#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
342{ \
343 int i, c; \
344 for_each_possible_cpu(c) { \
345 for (i = 0; stats_list[i].name; i++) \
346 buff[i] += snmp_get_cpu_field( \
347 mib_statistic, \
348 c, stats_list[i].entry); \
349 } \
350}
351
352static inline void inet_get_local_port_range(const struct net *net, int *low, int *high)
353{
354 u32 range = READ_ONCE(net->ipv4.ip_local_ports.range);
355
356 *low = range & 0xffff;
357 *high = range >> 16;
358}
359bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high);
360
361#ifdef CONFIG_SYSCTL
362static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
363{
364 if (!net->ipv4.sysctl_local_reserved_ports)
365 return false;
366 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
367}
368
369static inline bool sysctl_dev_name_is_allowed(const char *name)
370{
371 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
372}
373
374static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
375{
376 return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
377}
378
379#else
380static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
381{
382 return false;
383}
384
385static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
386{
387 return port < PROT_SOCK;
388}
389#endif
390
391__be32 inet_current_timestamp(void);
392
393/* From inetpeer.c */
394extern int inet_peer_threshold;
395extern int inet_peer_minttl;
396extern int inet_peer_maxttl;
397
398void ipfrag_init(void);
399
400void ip_static_sysctl_init(void);
401
402#define IP4_REPLY_MARK(net, mark) \
403 (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
404
405static inline bool ip_is_fragment(const struct iphdr *iph)
406{
407 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
408}
409
410#ifdef CONFIG_INET
411#include <net/dst.h>
412
413/* The function in 2.2 was invalid, producing wrong result for
414 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
415static inline
416int ip_decrease_ttl(struct iphdr *iph)
417{
418 u32 check = (__force u32)iph->check;
419 check += (__force u32)htons(0x0100);
420 iph->check = (__force __sum16)(check + (check>=0xFFFF));
421 return --iph->ttl;
422}
423
424static inline int ip_mtu_locked(const struct dst_entry *dst)
425{
426 const struct rtable *rt = (const struct rtable *)dst;
427
428 return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
429}
430
431static inline
432int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
433{
434 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
435
436 return pmtudisc == IP_PMTUDISC_DO ||
437 (pmtudisc == IP_PMTUDISC_WANT &&
438 !ip_mtu_locked(dst));
439}
440
441static inline bool ip_sk_accept_pmtu(const struct sock *sk)
442{
443 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
444
445 return pmtudisc != IP_PMTUDISC_INTERFACE &&
446 pmtudisc != IP_PMTUDISC_OMIT;
447}
448
449static inline bool ip_sk_use_pmtu(const struct sock *sk)
450{
451 return READ_ONCE(inet_sk(sk)->pmtudisc) < IP_PMTUDISC_PROBE;
452}
453
454static inline bool ip_sk_ignore_df(const struct sock *sk)
455{
456 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
457
458 return pmtudisc < IP_PMTUDISC_DO || pmtudisc == IP_PMTUDISC_OMIT;
459}
460
461static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
462 bool forwarding)
463{
464 const struct rtable *rt = container_of(dst, struct rtable, dst);
465 struct net *net = dev_net(dev: dst->dev);
466 unsigned int mtu;
467
468 if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) ||
469 ip_mtu_locked(dst) ||
470 !forwarding) {
471 mtu = rt->rt_pmtu;
472 if (mtu && time_before(jiffies, rt->dst.expires))
473 goto out;
474 }
475
476 /* 'forwarding = true' case should always honour route mtu */
477 mtu = dst_metric_raw(dst, RTAX_MTU);
478 if (mtu)
479 goto out;
480
481 mtu = READ_ONCE(dst->dev->mtu);
482
483 if (unlikely(ip_mtu_locked(dst))) {
484 if (rt->rt_uses_gateway && mtu > 576)
485 mtu = 576;
486 }
487
488out:
489 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
490
491 return mtu - lwtunnel_headroom(lwtstate: dst->lwtstate, mtu);
492}
493
494static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
495 const struct sk_buff *skb)
496{
497 unsigned int mtu;
498
499 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
500 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
501
502 return ip_dst_mtu_maybe_forward(dst: skb_dst(skb), forwarding);
503 }
504
505 mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
506 return mtu - lwtunnel_headroom(lwtstate: skb_dst(skb)->lwtstate, mtu);
507}
508
509struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
510 int fc_mx_len,
511 struct netlink_ext_ack *extack);
512static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
513{
514 if (fib_metrics != &dst_default_metrics &&
515 refcount_dec_and_test(r: &fib_metrics->refcnt))
516 kfree(objp: fib_metrics);
517}
518
519/* ipv4 and ipv6 both use refcounted metrics if it is not the default */
520static inline
521void ip_dst_init_metrics(struct dst_entry *dst, struct dst_metrics *fib_metrics)
522{
523 dst_init_metrics(dst, src_metrics: fib_metrics->metrics, read_only: true);
524
525 if (fib_metrics != &dst_default_metrics) {
526 dst->_metrics |= DST_METRICS_REFCOUNTED;
527 refcount_inc(r: &fib_metrics->refcnt);
528 }
529}
530
531static inline
532void ip_dst_metrics_put(struct dst_entry *dst)
533{
534 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
535
536 if (p != &dst_default_metrics && refcount_dec_and_test(r: &p->refcnt))
537 kfree(objp: p);
538}
539
540void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
541
542static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
543 struct sock *sk, int segs)
544{
545 struct iphdr *iph = ip_hdr(skb);
546
547 /* We had many attacks based on IPID, use the private
548 * generator as much as we can.
549 */
550 if (sk && inet_sk(sk)->inet_daddr) {
551 int val;
552
553 /* avoid atomic operations for TCP,
554 * as we hold socket lock at this point.
555 */
556 if (sk_is_tcp(sk)) {
557 sock_owned_by_me(sk);
558 val = atomic_read(v: &inet_sk(sk)->inet_id);
559 atomic_set(v: &inet_sk(sk)->inet_id, i: val + segs);
560 } else {
561 val = atomic_add_return(i: segs, v: &inet_sk(sk)->inet_id);
562 }
563 iph->id = htons(val);
564 return;
565 }
566 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
567 iph->id = 0;
568 } else {
569 /* Unfortunately we need the big hammer to get a suitable IPID */
570 __ip_select_ident(net, iph, segs);
571 }
572}
573
574static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
575 struct sock *sk)
576{
577 ip_select_ident_segs(net, skb, sk, segs: 1);
578}
579
580static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
581{
582 return csum_tcpudp_nofold(saddr: ip_hdr(skb)->saddr, daddr: ip_hdr(skb)->daddr,
583 len: skb->len, proto, sum: 0);
584}
585
586/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
587 * Equivalent to : flow->v4addrs.src = iph->saddr;
588 * flow->v4addrs.dst = iph->daddr;
589 */
590static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
591 const struct iphdr *iph)
592{
593 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
594 offsetof(typeof(flow->addrs), v4addrs.src) +
595 sizeof(flow->addrs.v4addrs.src));
596 memcpy(&flow->addrs.v4addrs, &iph->addrs, sizeof(flow->addrs.v4addrs));
597 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
598}
599
600/*
601 * Map a multicast IP onto multicast MAC for type ethernet.
602 */
603
604static inline void ip_eth_mc_map(__be32 naddr, char *buf)
605{
606 __u32 addr=ntohl(naddr);
607 buf[0]=0x01;
608 buf[1]=0x00;
609 buf[2]=0x5e;
610 buf[5]=addr&0xFF;
611 addr>>=8;
612 buf[4]=addr&0xFF;
613 addr>>=8;
614 buf[3]=addr&0x7F;
615}
616
617/*
618 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
619 * Leave P_Key as 0 to be filled in by driver.
620 */
621
622static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
623{
624 __u32 addr;
625 unsigned char scope = broadcast[5] & 0xF;
626
627 buf[0] = 0; /* Reserved */
628 buf[1] = 0xff; /* Multicast QPN */
629 buf[2] = 0xff;
630 buf[3] = 0xff;
631 addr = ntohl(naddr);
632 buf[4] = 0xff;
633 buf[5] = 0x10 | scope; /* scope from broadcast address */
634 buf[6] = 0x40; /* IPv4 signature */
635 buf[7] = 0x1b;
636 buf[8] = broadcast[8]; /* P_Key */
637 buf[9] = broadcast[9];
638 buf[10] = 0;
639 buf[11] = 0;
640 buf[12] = 0;
641 buf[13] = 0;
642 buf[14] = 0;
643 buf[15] = 0;
644 buf[19] = addr & 0xff;
645 addr >>= 8;
646 buf[18] = addr & 0xff;
647 addr >>= 8;
648 buf[17] = addr & 0xff;
649 addr >>= 8;
650 buf[16] = addr & 0x0f;
651}
652
653static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
654{
655 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
656 memcpy(buf, broadcast, 4);
657 else
658 memcpy(buf, &naddr, sizeof(naddr));
659}
660
661#if IS_ENABLED(CONFIG_IPV6)
662#include <linux/ipv6.h>
663#endif
664
665static __inline__ void inet_reset_saddr(struct sock *sk)
666{
667 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
668#if IS_ENABLED(CONFIG_IPV6)
669 if (sk->sk_family == PF_INET6) {
670 struct ipv6_pinfo *np = inet6_sk(sk: sk);
671
672 memset(&np->saddr, 0, sizeof(np->saddr));
673 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
674 }
675#endif
676}
677
678#endif
679
680static inline unsigned int ipv4_addr_hash(__be32 ip)
681{
682 return (__force unsigned int) ip;
683}
684
685static inline u32 ipv4_portaddr_hash(const struct net *net,
686 __be32 saddr,
687 unsigned int port)
688{
689 return jhash_1word(a: (__force u32)saddr, initval: net_hash_mix(net)) ^ port;
690}
691
692bool ip_call_ra_chain(struct sk_buff *skb);
693
694/*
695 * Functions provided by ip_fragment.c
696 */
697
698enum ip_defrag_users {
699 IP_DEFRAG_LOCAL_DELIVER,
700 IP_DEFRAG_CALL_RA_CHAIN,
701 IP_DEFRAG_CONNTRACK_IN,
702 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
703 IP_DEFRAG_CONNTRACK_OUT,
704 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
705 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
706 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
707 IP_DEFRAG_VS_IN,
708 IP_DEFRAG_VS_OUT,
709 IP_DEFRAG_VS_FWD,
710 IP_DEFRAG_AF_PACKET,
711 IP_DEFRAG_MACVLAN,
712};
713
714/* Return true if the value of 'user' is between 'lower_bond'
715 * and 'upper_bond' inclusively.
716 */
717static inline bool ip_defrag_user_in_between(u32 user,
718 enum ip_defrag_users lower_bond,
719 enum ip_defrag_users upper_bond)
720{
721 return user >= lower_bond && user <= upper_bond;
722}
723
724int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
725#ifdef CONFIG_INET
726struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
727#else
728static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
729{
730 return skb;
731}
732#endif
733
734/*
735 * Functions provided by ip_forward.c
736 */
737
738int ip_forward(struct sk_buff *skb);
739
740/*
741 * Functions provided by ip_options.c
742 */
743
744void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
745 __be32 daddr, struct rtable *rt);
746
747int __ip_options_echo(struct net *net, struct ip_options *dopt,
748 struct sk_buff *skb, const struct ip_options *sopt);
749static inline int ip_options_echo(struct net *net, struct ip_options *dopt,
750 struct sk_buff *skb)
751{
752 return __ip_options_echo(net, dopt, skb, sopt: &IPCB(skb)->opt);
753}
754
755void ip_options_fragment(struct sk_buff *skb);
756int __ip_options_compile(struct net *net, struct ip_options *opt,
757 struct sk_buff *skb, __be32 *info);
758int ip_options_compile(struct net *net, struct ip_options *opt,
759 struct sk_buff *skb);
760int ip_options_get(struct net *net, struct ip_options_rcu **optp,
761 sockptr_t data, int optlen);
762void ip_options_undo(struct ip_options *opt);
763void ip_forward_options(struct sk_buff *skb);
764int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
765
766/*
767 * Functions provided by ip_sockglue.c
768 */
769
770void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb, bool drop_dst);
771void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
772 struct sk_buff *skb, int tlen, int offset);
773int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
774 struct ipcm_cookie *ipc, bool allow_ipv6);
775DECLARE_STATIC_KEY_FALSE(ip4_min_ttl);
776int do_ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
777 unsigned int optlen);
778int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
779 unsigned int optlen);
780int do_ip_getsockopt(struct sock *sk, int level, int optname,
781 sockptr_t optval, sockptr_t optlen);
782int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
783 int __user *optlen);
784int ip_ra_control(struct sock *sk, unsigned char on,
785 void (*destructor)(struct sock *));
786
787int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
788void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
789 u32 info, u8 *payload);
790void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
791 u32 info);
792
793static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
794{
795 ip_cmsg_recv_offset(msg, sk: skb->sk, skb, tlen: 0, offset: 0);
796}
797
798bool icmp_global_allow(void);
799extern int sysctl_icmp_msgs_per_sec;
800extern int sysctl_icmp_msgs_burst;
801
802#ifdef CONFIG_PROC_FS
803int ip_misc_proc_init(void);
804#endif
805
806int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
807 struct netlink_ext_ack *extack);
808
809static inline bool inetdev_valid_mtu(unsigned int mtu)
810{
811 return likely(mtu >= IPV4_MIN_MTU);
812}
813
814void ip_sock_set_freebind(struct sock *sk);
815int ip_sock_set_mtu_discover(struct sock *sk, int val);
816void ip_sock_set_pktinfo(struct sock *sk);
817void ip_sock_set_recverr(struct sock *sk);
818void ip_sock_set_tos(struct sock *sk, int val);
819void __ip_sock_set_tos(struct sock *sk, int val);
820
821#endif /* _IP_H */
822

source code of linux/include/net/ip.h