1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Definitions for the Interfaces handler. |
8 | * |
9 | * Version: @(#)dev.h 1.0.10 08/12/93 |
10 | * |
11 | * Authors: Ross Biro |
12 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
13 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
14 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> |
15 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
16 | * Bjorn Ekwall. <bj0rn@blox.se> |
17 | * Pekka Riikonen <priikone@poseidon.pspt.fi> |
18 | * |
19 | * Moved to /usr/include/linux for NET3 |
20 | */ |
21 | #ifndef _LINUX_NETDEVICE_H |
22 | #define _LINUX_NETDEVICE_H |
23 | |
24 | #include <linux/timer.h> |
25 | #include <linux/bug.h> |
26 | #include <linux/delay.h> |
27 | #include <linux/atomic.h> |
28 | #include <linux/prefetch.h> |
29 | #include <asm/cache.h> |
30 | #include <asm/byteorder.h> |
31 | #include <asm/local.h> |
32 | |
33 | #include <linux/percpu.h> |
34 | #include <linux/rculist.h> |
35 | #include <linux/workqueue.h> |
36 | #include <linux/dynamic_queue_limits.h> |
37 | |
38 | #include <net/net_namespace.h> |
39 | #ifdef CONFIG_DCB |
40 | #include <net/dcbnl.h> |
41 | #endif |
42 | #include <net/netprio_cgroup.h> |
43 | |
44 | #include <linux/netdev_features.h> |
45 | #include <linux/neighbour.h> |
46 | #include <uapi/linux/netdevice.h> |
47 | #include <uapi/linux/if_bonding.h> |
48 | #include <uapi/linux/pkt_cls.h> |
49 | #include <uapi/linux/netdev.h> |
50 | #include <linux/hashtable.h> |
51 | #include <linux/rbtree.h> |
52 | #include <net/net_trackers.h> |
53 | #include <net/net_debug.h> |
54 | #include <net/dropreason-core.h> |
55 | |
56 | struct netpoll_info; |
57 | struct device; |
58 | struct ethtool_ops; |
59 | struct kernel_hwtstamp_config; |
60 | struct phy_device; |
61 | struct dsa_port; |
62 | struct ip_tunnel_parm; |
63 | struct macsec_context; |
64 | struct macsec_ops; |
65 | struct netdev_name_node; |
66 | struct sd_flow_limit; |
67 | struct sfp_bus; |
68 | /* 802.11 specific */ |
69 | struct wireless_dev; |
70 | /* 802.15.4 specific */ |
71 | struct wpan_dev; |
72 | struct mpls_dev; |
73 | /* UDP Tunnel offloads */ |
74 | struct udp_tunnel_info; |
75 | struct udp_tunnel_nic_info; |
76 | struct udp_tunnel_nic; |
77 | struct bpf_prog; |
78 | struct xdp_buff; |
79 | struct xdp_frame; |
80 | struct xdp_metadata_ops; |
81 | struct xdp_md; |
82 | /* DPLL specific */ |
83 | struct dpll_pin; |
84 | |
85 | typedef u32 xdp_features_t; |
86 | |
87 | void synchronize_net(void); |
88 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
89 | const struct ethtool_ops *ops); |
90 | void netdev_sw_irq_coalesce_default_on(struct net_device *dev); |
91 | |
92 | /* Backlog congestion levels */ |
93 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
94 | #define NET_RX_DROP 1 /* packet dropped */ |
95 | |
96 | #define MAX_NEST_DEV 8 |
97 | |
98 | /* |
99 | * Transmit return codes: transmit return codes originate from three different |
100 | * namespaces: |
101 | * |
102 | * - qdisc return codes |
103 | * - driver transmit return codes |
104 | * - errno values |
105 | * |
106 | * Drivers are allowed to return any one of those in their hard_start_xmit() |
107 | * function. Real network devices commonly used with qdiscs should only return |
108 | * the driver transmit return codes though - when qdiscs are used, the actual |
109 | * transmission happens asynchronously, so the value is not propagated to |
110 | * higher layers. Virtual network devices transmit synchronously; in this case |
111 | * the driver transmit return codes are consumed by dev_queue_xmit(), and all |
112 | * others are propagated to higher layers. |
113 | */ |
114 | |
115 | /* qdisc ->enqueue() return codes. */ |
116 | #define NET_XMIT_SUCCESS 0x00 |
117 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
118 | #define NET_XMIT_CN 0x02 /* congestion notification */ |
119 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ |
120 | |
121 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
122 | * indicates that the device will soon be dropping packets, or already drops |
123 | * some packets of the same priority; prompting us to send less aggressively. */ |
124 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
125 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
126 | |
127 | /* Driver transmit return codes */ |
128 | #define NETDEV_TX_MASK 0xf0 |
129 | |
130 | enum netdev_tx { |
131 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
132 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
133 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ |
134 | }; |
135 | typedef enum netdev_tx netdev_tx_t; |
136 | |
137 | /* |
138 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; |
139 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. |
140 | */ |
141 | static inline bool dev_xmit_complete(int rc) |
142 | { |
143 | /* |
144 | * Positive cases with an skb consumed by a driver: |
145 | * - successful transmission (rc == NETDEV_TX_OK) |
146 | * - error while transmitting (rc < 0) |
147 | * - error while queueing to a different device (rc & NET_XMIT_MASK) |
148 | */ |
149 | if (likely(rc < NET_XMIT_MASK)) |
150 | return true; |
151 | |
152 | return false; |
153 | } |
154 | |
155 | /* |
156 | * Compute the worst-case header length according to the protocols |
157 | * used. |
158 | */ |
159 | |
160 | #if defined(CONFIG_HYPERV_NET) |
161 | # define 128 |
162 | #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) |
163 | # if defined(CONFIG_MAC80211_MESH) |
164 | # define LL_MAX_HEADER 128 |
165 | # else |
166 | # define LL_MAX_HEADER 96 |
167 | # endif |
168 | #else |
169 | # define LL_MAX_HEADER 32 |
170 | #endif |
171 | |
172 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
173 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) |
174 | #define MAX_HEADER LL_MAX_HEADER |
175 | #else |
176 | #define (LL_MAX_HEADER + 48) |
177 | #endif |
178 | |
179 | /* |
180 | * Old network device statistics. Fields are native words |
181 | * (unsigned long) so they can be read and written atomically. |
182 | */ |
183 | |
184 | #define NET_DEV_STAT(FIELD) \ |
185 | union { \ |
186 | unsigned long FIELD; \ |
187 | atomic_long_t __##FIELD; \ |
188 | } |
189 | |
190 | struct net_device_stats { |
191 | NET_DEV_STAT(rx_packets); |
192 | NET_DEV_STAT(tx_packets); |
193 | NET_DEV_STAT(rx_bytes); |
194 | NET_DEV_STAT(tx_bytes); |
195 | NET_DEV_STAT(rx_errors); |
196 | NET_DEV_STAT(tx_errors); |
197 | NET_DEV_STAT(rx_dropped); |
198 | NET_DEV_STAT(tx_dropped); |
199 | NET_DEV_STAT(multicast); |
200 | NET_DEV_STAT(collisions); |
201 | NET_DEV_STAT(rx_length_errors); |
202 | NET_DEV_STAT(rx_over_errors); |
203 | NET_DEV_STAT(rx_crc_errors); |
204 | NET_DEV_STAT(rx_frame_errors); |
205 | NET_DEV_STAT(rx_fifo_errors); |
206 | NET_DEV_STAT(rx_missed_errors); |
207 | NET_DEV_STAT(tx_aborted_errors); |
208 | NET_DEV_STAT(tx_carrier_errors); |
209 | NET_DEV_STAT(tx_fifo_errors); |
210 | NET_DEV_STAT(tx_heartbeat_errors); |
211 | NET_DEV_STAT(tx_window_errors); |
212 | NET_DEV_STAT(rx_compressed); |
213 | NET_DEV_STAT(tx_compressed); |
214 | }; |
215 | #undef NET_DEV_STAT |
216 | |
217 | /* per-cpu stats, allocated on demand. |
218 | * Try to fit them in a single cache line, for dev_get_stats() sake. |
219 | */ |
220 | struct net_device_core_stats { |
221 | unsigned long rx_dropped; |
222 | unsigned long tx_dropped; |
223 | unsigned long rx_nohandler; |
224 | unsigned long rx_otherhost_dropped; |
225 | } __aligned(4 * sizeof(unsigned long)); |
226 | |
227 | #include <linux/cache.h> |
228 | #include <linux/skbuff.h> |
229 | |
230 | #ifdef CONFIG_RPS |
231 | #include <linux/static_key.h> |
232 | extern struct static_key_false rps_needed; |
233 | extern struct static_key_false rfs_needed; |
234 | #endif |
235 | |
236 | struct neighbour; |
237 | struct neigh_parms; |
238 | struct sk_buff; |
239 | |
240 | struct netdev_hw_addr { |
241 | struct list_head list; |
242 | struct rb_node node; |
243 | unsigned char addr[MAX_ADDR_LEN]; |
244 | unsigned char type; |
245 | #define NETDEV_HW_ADDR_T_LAN 1 |
246 | #define NETDEV_HW_ADDR_T_SAN 2 |
247 | #define NETDEV_HW_ADDR_T_UNICAST 3 |
248 | #define NETDEV_HW_ADDR_T_MULTICAST 4 |
249 | bool global_use; |
250 | int sync_cnt; |
251 | int refcount; |
252 | int synced; |
253 | struct rcu_head rcu_head; |
254 | }; |
255 | |
256 | struct netdev_hw_addr_list { |
257 | struct list_head list; |
258 | int count; |
259 | |
260 | /* Auxiliary tree for faster lookup on addition and deletion */ |
261 | struct rb_root tree; |
262 | }; |
263 | |
264 | #define netdev_hw_addr_list_count(l) ((l)->count) |
265 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) |
266 | #define netdev_hw_addr_list_for_each(ha, l) \ |
267 | list_for_each_entry(ha, &(l)->list, list) |
268 | |
269 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
270 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) |
271 | #define netdev_for_each_uc_addr(ha, dev) \ |
272 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) |
273 | #define netdev_for_each_synced_uc_addr(_ha, _dev) \ |
274 | netdev_for_each_uc_addr((_ha), (_dev)) \ |
275 | if ((_ha)->sync_cnt) |
276 | |
277 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
278 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) |
279 | #define netdev_for_each_mc_addr(ha, dev) \ |
280 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
281 | #define netdev_for_each_synced_mc_addr(_ha, _dev) \ |
282 | netdev_for_each_mc_addr((_ha), (_dev)) \ |
283 | if ((_ha)->sync_cnt) |
284 | |
285 | struct hh_cache { |
286 | unsigned int hh_len; |
287 | seqlock_t hh_lock; |
288 | |
289 | /* cached hardware header; allow for machine alignment needs. */ |
290 | #define HH_DATA_MOD 16 |
291 | #define HH_DATA_OFF(__len) \ |
292 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
293 | #define HH_DATA_ALIGN(__len) \ |
294 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) |
295 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; |
296 | }; |
297 | |
298 | /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. |
299 | * Alternative is: |
300 | * dev->hard_header_len ? (dev->hard_header_len + |
301 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 |
302 | * |
303 | * We could use other alignment values, but we must maintain the |
304 | * relationship HH alignment <= LL alignment. |
305 | */ |
306 | #define LL_RESERVED_SPACE(dev) \ |
307 | ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ |
308 | & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
309 | #define (dev,extra) \ |
310 | ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ |
311 | & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
312 | |
313 | struct { |
314 | int (*) (struct sk_buff *skb, struct net_device *dev, |
315 | unsigned short type, const void *daddr, |
316 | const void *saddr, unsigned int len); |
317 | int (*)(const struct sk_buff *skb, unsigned char *haddr); |
318 | int (*)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
319 | void (*)(struct hh_cache *hh, |
320 | const struct net_device *dev, |
321 | const unsigned char *haddr); |
322 | bool (*)(const char *, unsigned int len); |
323 | __be16 (*)(const struct sk_buff *skb); |
324 | }; |
325 | |
326 | /* These flag bits are private to the generic network queueing |
327 | * layer; they may not be explicitly referenced by any other |
328 | * code. |
329 | */ |
330 | |
331 | enum netdev_state_t { |
332 | __LINK_STATE_START, |
333 | __LINK_STATE_PRESENT, |
334 | __LINK_STATE_NOCARRIER, |
335 | __LINK_STATE_LINKWATCH_PENDING, |
336 | __LINK_STATE_DORMANT, |
337 | __LINK_STATE_TESTING, |
338 | }; |
339 | |
340 | struct gro_list { |
341 | struct list_head list; |
342 | int count; |
343 | }; |
344 | |
345 | /* |
346 | * size of gro hash buckets, must less than bit number of |
347 | * napi_struct::gro_bitmask |
348 | */ |
349 | #define GRO_HASH_BUCKETS 8 |
350 | |
351 | /* |
352 | * Structure for NAPI scheduling similar to tasklet but with weighting |
353 | */ |
354 | struct napi_struct { |
355 | /* The poll_list must only be managed by the entity which |
356 | * changes the state of the NAPI_STATE_SCHED bit. This means |
357 | * whoever atomically sets that bit can add this napi_struct |
358 | * to the per-CPU poll_list, and whoever clears that bit |
359 | * can remove from the list right before clearing the bit. |
360 | */ |
361 | struct list_head poll_list; |
362 | |
363 | unsigned long state; |
364 | int weight; |
365 | int defer_hard_irqs_count; |
366 | unsigned long gro_bitmask; |
367 | int (*poll)(struct napi_struct *, int); |
368 | #ifdef CONFIG_NETPOLL |
369 | /* CPU actively polling if netpoll is configured */ |
370 | int poll_owner; |
371 | #endif |
372 | /* CPU on which NAPI has been scheduled for processing */ |
373 | int list_owner; |
374 | struct net_device *dev; |
375 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
376 | struct sk_buff *skb; |
377 | struct list_head rx_list; /* Pending GRO_NORMAL skbs */ |
378 | int rx_count; /* length of rx_list */ |
379 | unsigned int napi_id; |
380 | struct hrtimer timer; |
381 | struct task_struct *thread; |
382 | /* control-path-only fields follow */ |
383 | struct list_head dev_list; |
384 | struct hlist_node napi_hash_node; |
385 | }; |
386 | |
387 | enum { |
388 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
389 | NAPI_STATE_MISSED, /* reschedule a napi */ |
390 | NAPI_STATE_DISABLE, /* Disable pending */ |
391 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
392 | NAPI_STATE_LISTED, /* NAPI added to system lists */ |
393 | NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ |
394 | NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ |
395 | NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ |
396 | NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ |
397 | NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ |
398 | }; |
399 | |
400 | enum { |
401 | NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), |
402 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), |
403 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), |
404 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), |
405 | NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), |
406 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), |
407 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), |
408 | NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), |
409 | NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), |
410 | NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), |
411 | }; |
412 | |
413 | enum gro_result { |
414 | GRO_MERGED, |
415 | GRO_MERGED_FREE, |
416 | GRO_HELD, |
417 | GRO_NORMAL, |
418 | GRO_CONSUMED, |
419 | }; |
420 | typedef enum gro_result gro_result_t; |
421 | |
422 | /* |
423 | * enum rx_handler_result - Possible return values for rx_handlers. |
424 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it |
425 | * further. |
426 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in |
427 | * case skb->dev was changed by rx_handler. |
428 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. |
429 | * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. |
430 | * |
431 | * rx_handlers are functions called from inside __netif_receive_skb(), to do |
432 | * special processing of the skb, prior to delivery to protocol handlers. |
433 | * |
434 | * Currently, a net_device can only have a single rx_handler registered. Trying |
435 | * to register a second rx_handler will return -EBUSY. |
436 | * |
437 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). |
438 | * To unregister a rx_handler on a net_device, use |
439 | * netdev_rx_handler_unregister(). |
440 | * |
441 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to |
442 | * do with the skb. |
443 | * |
444 | * If the rx_handler consumed the skb in some way, it should return |
445 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for |
446 | * the skb to be delivered in some other way. |
447 | * |
448 | * If the rx_handler changed skb->dev, to divert the skb to another |
449 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the |
450 | * new device will be called if it exists. |
451 | * |
452 | * If the rx_handler decides the skb should be ignored, it should return |
453 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that |
454 | * are registered on exact device (ptype->dev == skb->dev). |
455 | * |
456 | * If the rx_handler didn't change skb->dev, but wants the skb to be normally |
457 | * delivered, it should return RX_HANDLER_PASS. |
458 | * |
459 | * A device without a registered rx_handler will behave as if rx_handler |
460 | * returned RX_HANDLER_PASS. |
461 | */ |
462 | |
463 | enum rx_handler_result { |
464 | RX_HANDLER_CONSUMED, |
465 | RX_HANDLER_ANOTHER, |
466 | RX_HANDLER_EXACT, |
467 | RX_HANDLER_PASS, |
468 | }; |
469 | typedef enum rx_handler_result rx_handler_result_t; |
470 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
471 | |
472 | void __napi_schedule(struct napi_struct *n); |
473 | void __napi_schedule_irqoff(struct napi_struct *n); |
474 | |
475 | static inline bool napi_disable_pending(struct napi_struct *n) |
476 | { |
477 | return test_bit(NAPI_STATE_DISABLE, &n->state); |
478 | } |
479 | |
480 | static inline bool napi_prefer_busy_poll(struct napi_struct *n) |
481 | { |
482 | return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); |
483 | } |
484 | |
485 | /** |
486 | * napi_is_scheduled - test if NAPI is scheduled |
487 | * @n: NAPI context |
488 | * |
489 | * This check is "best-effort". With no locking implemented, |
490 | * a NAPI can be scheduled or terminate right after this check |
491 | * and produce not precise results. |
492 | * |
493 | * NAPI_STATE_SCHED is an internal state, napi_is_scheduled |
494 | * should not be used normally and napi_schedule should be |
495 | * used instead. |
496 | * |
497 | * Use only if the driver really needs to check if a NAPI |
498 | * is scheduled for example in the context of delayed timer |
499 | * that can be skipped if a NAPI is already scheduled. |
500 | * |
501 | * Return True if NAPI is scheduled, False otherwise. |
502 | */ |
503 | static inline bool napi_is_scheduled(struct napi_struct *n) |
504 | { |
505 | return test_bit(NAPI_STATE_SCHED, &n->state); |
506 | } |
507 | |
508 | bool napi_schedule_prep(struct napi_struct *n); |
509 | |
510 | /** |
511 | * napi_schedule - schedule NAPI poll |
512 | * @n: NAPI context |
513 | * |
514 | * Schedule NAPI poll routine to be called if it is not already |
515 | * running. |
516 | * Return true if we schedule a NAPI or false if not. |
517 | * Refer to napi_schedule_prep() for additional reason on why |
518 | * a NAPI might not be scheduled. |
519 | */ |
520 | static inline bool napi_schedule(struct napi_struct *n) |
521 | { |
522 | if (napi_schedule_prep(n)) { |
523 | __napi_schedule(n); |
524 | return true; |
525 | } |
526 | |
527 | return false; |
528 | } |
529 | |
530 | /** |
531 | * napi_schedule_irqoff - schedule NAPI poll |
532 | * @n: NAPI context |
533 | * |
534 | * Variant of napi_schedule(), assuming hard irqs are masked. |
535 | */ |
536 | static inline void napi_schedule_irqoff(struct napi_struct *n) |
537 | { |
538 | if (napi_schedule_prep(n)) |
539 | __napi_schedule_irqoff(n); |
540 | } |
541 | |
542 | /** |
543 | * napi_complete_done - NAPI processing complete |
544 | * @n: NAPI context |
545 | * @work_done: number of packets processed |
546 | * |
547 | * Mark NAPI processing as complete. Should only be called if poll budget |
548 | * has not been completely consumed. |
549 | * Prefer over napi_complete(). |
550 | * Return false if device should avoid rearming interrupts. |
551 | */ |
552 | bool napi_complete_done(struct napi_struct *n, int work_done); |
553 | |
554 | static inline bool napi_complete(struct napi_struct *n) |
555 | { |
556 | return napi_complete_done(n, work_done: 0); |
557 | } |
558 | |
559 | int dev_set_threaded(struct net_device *dev, bool threaded); |
560 | |
561 | /** |
562 | * napi_disable - prevent NAPI from scheduling |
563 | * @n: NAPI context |
564 | * |
565 | * Stop NAPI from being scheduled on this context. |
566 | * Waits till any outstanding processing completes. |
567 | */ |
568 | void napi_disable(struct napi_struct *n); |
569 | |
570 | void napi_enable(struct napi_struct *n); |
571 | |
572 | /** |
573 | * napi_synchronize - wait until NAPI is not running |
574 | * @n: NAPI context |
575 | * |
576 | * Wait until NAPI is done being scheduled on this context. |
577 | * Waits till any outstanding processing completes but |
578 | * does not disable future activations. |
579 | */ |
580 | static inline void napi_synchronize(const struct napi_struct *n) |
581 | { |
582 | if (IS_ENABLED(CONFIG_SMP)) |
583 | while (test_bit(NAPI_STATE_SCHED, &n->state)) |
584 | msleep(msecs: 1); |
585 | else |
586 | barrier(); |
587 | } |
588 | |
589 | /** |
590 | * napi_if_scheduled_mark_missed - if napi is running, set the |
591 | * NAPIF_STATE_MISSED |
592 | * @n: NAPI context |
593 | * |
594 | * If napi is running, set the NAPIF_STATE_MISSED, and return true if |
595 | * NAPI is scheduled. |
596 | **/ |
597 | static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) |
598 | { |
599 | unsigned long val, new; |
600 | |
601 | val = READ_ONCE(n->state); |
602 | do { |
603 | if (val & NAPIF_STATE_DISABLE) |
604 | return true; |
605 | |
606 | if (!(val & NAPIF_STATE_SCHED)) |
607 | return false; |
608 | |
609 | new = val | NAPIF_STATE_MISSED; |
610 | } while (!try_cmpxchg(&n->state, &val, new)); |
611 | |
612 | return true; |
613 | } |
614 | |
615 | enum netdev_queue_state_t { |
616 | __QUEUE_STATE_DRV_XOFF, |
617 | __QUEUE_STATE_STACK_XOFF, |
618 | __QUEUE_STATE_FROZEN, |
619 | }; |
620 | |
621 | #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) |
622 | #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) |
623 | #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) |
624 | |
625 | #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) |
626 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ |
627 | QUEUE_STATE_FROZEN) |
628 | #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ |
629 | QUEUE_STATE_FROZEN) |
630 | |
631 | /* |
632 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The |
633 | * netif_tx_* functions below are used to manipulate this flag. The |
634 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit |
635 | * queue independently. The netif_xmit_*stopped functions below are called |
636 | * to check if the queue has been stopped by the driver or stack (either |
637 | * of the XOFF bits are set in the state). Drivers should not need to call |
638 | * netif_xmit*stopped functions, they should only be using netif_tx_*. |
639 | */ |
640 | |
641 | struct netdev_queue { |
642 | /* |
643 | * read-mostly part |
644 | */ |
645 | struct net_device *dev; |
646 | netdevice_tracker dev_tracker; |
647 | |
648 | struct Qdisc __rcu *qdisc; |
649 | struct Qdisc __rcu *qdisc_sleeping; |
650 | #ifdef CONFIG_SYSFS |
651 | struct kobject kobj; |
652 | #endif |
653 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
654 | int numa_node; |
655 | #endif |
656 | unsigned long tx_maxrate; |
657 | /* |
658 | * Number of TX timeouts for this queue |
659 | * (/sys/class/net/DEV/Q/trans_timeout) |
660 | */ |
661 | atomic_long_t trans_timeout; |
662 | |
663 | /* Subordinate device that the queue has been assigned to */ |
664 | struct net_device *sb_dev; |
665 | #ifdef CONFIG_XDP_SOCKETS |
666 | struct xsk_buff_pool *pool; |
667 | #endif |
668 | /* |
669 | * write-mostly part |
670 | */ |
671 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
672 | int xmit_lock_owner; |
673 | /* |
674 | * Time (in jiffies) of last Tx |
675 | */ |
676 | unsigned long trans_start; |
677 | |
678 | unsigned long state; |
679 | |
680 | #ifdef CONFIG_BQL |
681 | struct dql dql; |
682 | #endif |
683 | } ____cacheline_aligned_in_smp; |
684 | |
685 | extern int sysctl_fb_tunnels_only_for_init_net; |
686 | extern int sysctl_devconf_inherit_init_net; |
687 | |
688 | /* |
689 | * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns |
690 | * == 1 : For initns only |
691 | * == 2 : For none. |
692 | */ |
693 | static inline bool net_has_fallback_tunnels(const struct net *net) |
694 | { |
695 | #if IS_ENABLED(CONFIG_SYSCTL) |
696 | int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net); |
697 | |
698 | return !fb_tunnels_only_for_init_net || |
699 | (net_eq(net1: net, net2: &init_net) && fb_tunnels_only_for_init_net == 1); |
700 | #else |
701 | return true; |
702 | #endif |
703 | } |
704 | |
705 | static inline int net_inherit_devconf(void) |
706 | { |
707 | #if IS_ENABLED(CONFIG_SYSCTL) |
708 | return READ_ONCE(sysctl_devconf_inherit_init_net); |
709 | #else |
710 | return 0; |
711 | #endif |
712 | } |
713 | |
714 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
715 | { |
716 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
717 | return q->numa_node; |
718 | #else |
719 | return NUMA_NO_NODE; |
720 | #endif |
721 | } |
722 | |
723 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) |
724 | { |
725 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
726 | q->numa_node = node; |
727 | #endif |
728 | } |
729 | |
730 | #ifdef CONFIG_RPS |
731 | /* |
732 | * This structure holds an RPS map which can be of variable length. The |
733 | * map is an array of CPUs. |
734 | */ |
735 | struct rps_map { |
736 | unsigned int len; |
737 | struct rcu_head rcu; |
738 | u16 cpus[]; |
739 | }; |
740 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
741 | |
742 | /* |
743 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
744 | * tail pointer for that CPU's input queue at the time of last enqueue, and |
745 | * a hardware filter index. |
746 | */ |
747 | struct rps_dev_flow { |
748 | u16 cpu; |
749 | u16 filter; |
750 | unsigned int last_qtail; |
751 | }; |
752 | #define RPS_NO_FILTER 0xffff |
753 | |
754 | /* |
755 | * The rps_dev_flow_table structure contains a table of flow mappings. |
756 | */ |
757 | struct rps_dev_flow_table { |
758 | unsigned int mask; |
759 | struct rcu_head rcu; |
760 | struct rps_dev_flow flows[]; |
761 | }; |
762 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ |
763 | ((_num) * sizeof(struct rps_dev_flow))) |
764 | |
765 | /* |
766 | * The rps_sock_flow_table contains mappings of flows to the last CPU |
767 | * on which they were processed by the application (set in recvmsg). |
768 | * Each entry is a 32bit value. Upper part is the high-order bits |
769 | * of flow hash, lower part is CPU number. |
770 | * rps_cpu_mask is used to partition the space, depending on number of |
771 | * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 |
772 | * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, |
773 | * meaning we use 32-6=26 bits for the hash. |
774 | */ |
775 | struct rps_sock_flow_table { |
776 | u32 mask; |
777 | |
778 | u32 ents[] ____cacheline_aligned_in_smp; |
779 | }; |
780 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
781 | |
782 | #define RPS_NO_CPU 0xffff |
783 | |
784 | extern u32 rps_cpu_mask; |
785 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; |
786 | |
787 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, |
788 | u32 hash) |
789 | { |
790 | if (table && hash) { |
791 | unsigned int index = hash & table->mask; |
792 | u32 val = hash & ~rps_cpu_mask; |
793 | |
794 | /* We only give a hint, preemption can change CPU under us */ |
795 | val |= raw_smp_processor_id(); |
796 | |
797 | /* The following WRITE_ONCE() is paired with the READ_ONCE() |
798 | * here, and another one in get_rps_cpu(). |
799 | */ |
800 | if (READ_ONCE(table->ents[index]) != val) |
801 | WRITE_ONCE(table->ents[index], val); |
802 | } |
803 | } |
804 | |
805 | #ifdef CONFIG_RFS_ACCEL |
806 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
807 | u16 filter_id); |
808 | #endif |
809 | #endif /* CONFIG_RPS */ |
810 | |
811 | /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ |
812 | enum xps_map_type { |
813 | XPS_CPUS = 0, |
814 | XPS_RXQS, |
815 | XPS_MAPS_MAX, |
816 | }; |
817 | |
818 | #ifdef CONFIG_XPS |
819 | /* |
820 | * This structure holds an XPS map which can be of variable length. The |
821 | * map is an array of queues. |
822 | */ |
823 | struct xps_map { |
824 | unsigned int len; |
825 | unsigned int alloc_len; |
826 | struct rcu_head rcu; |
827 | u16 queues[]; |
828 | }; |
829 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
830 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
831 | - sizeof(struct xps_map)) / sizeof(u16)) |
832 | |
833 | /* |
834 | * This structure holds all XPS maps for device. Maps are indexed by CPU. |
835 | * |
836 | * We keep track of the number of cpus/rxqs used when the struct is allocated, |
837 | * in nr_ids. This will help not accessing out-of-bound memory. |
838 | * |
839 | * We keep track of the number of traffic classes used when the struct is |
840 | * allocated, in num_tc. This will be used to navigate the maps, to ensure we're |
841 | * not crossing its upper bound, as the original dev->num_tc can be updated in |
842 | * the meantime. |
843 | */ |
844 | struct xps_dev_maps { |
845 | struct rcu_head rcu; |
846 | unsigned int nr_ids; |
847 | s16 num_tc; |
848 | struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ |
849 | }; |
850 | |
851 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ |
852 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) |
853 | |
854 | #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ |
855 | (_rxqs * (_tcs) * sizeof(struct xps_map *))) |
856 | |
857 | #endif /* CONFIG_XPS */ |
858 | |
859 | #define TC_MAX_QUEUE 16 |
860 | #define TC_BITMASK 15 |
861 | /* HW offloaded queuing disciplines txq count and offset maps */ |
862 | struct netdev_tc_txq { |
863 | u16 count; |
864 | u16 offset; |
865 | }; |
866 | |
867 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
868 | /* |
869 | * This structure is to hold information about the device |
870 | * configured to run FCoE protocol stack. |
871 | */ |
872 | struct netdev_fcoe_hbainfo { |
873 | char manufacturer[64]; |
874 | char serial_number[64]; |
875 | char hardware_version[64]; |
876 | char driver_version[64]; |
877 | char optionrom_version[64]; |
878 | char firmware_version[64]; |
879 | char model[256]; |
880 | char model_description[256]; |
881 | }; |
882 | #endif |
883 | |
884 | #define MAX_PHYS_ITEM_ID_LEN 32 |
885 | |
886 | /* This structure holds a unique identifier to identify some |
887 | * physical item (port for example) used by a netdevice. |
888 | */ |
889 | struct netdev_phys_item_id { |
890 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; |
891 | unsigned char id_len; |
892 | }; |
893 | |
894 | static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, |
895 | struct netdev_phys_item_id *b) |
896 | { |
897 | return a->id_len == b->id_len && |
898 | memcmp(p: a->id, q: b->id, size: a->id_len) == 0; |
899 | } |
900 | |
901 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
902 | struct sk_buff *skb, |
903 | struct net_device *sb_dev); |
904 | |
905 | enum net_device_path_type { |
906 | DEV_PATH_ETHERNET = 0, |
907 | DEV_PATH_VLAN, |
908 | DEV_PATH_BRIDGE, |
909 | DEV_PATH_PPPOE, |
910 | DEV_PATH_DSA, |
911 | DEV_PATH_MTK_WDMA, |
912 | }; |
913 | |
914 | struct net_device_path { |
915 | enum net_device_path_type type; |
916 | const struct net_device *dev; |
917 | union { |
918 | struct { |
919 | u16 id; |
920 | __be16 proto; |
921 | u8 h_dest[ETH_ALEN]; |
922 | } encap; |
923 | struct { |
924 | enum { |
925 | DEV_PATH_BR_VLAN_KEEP, |
926 | DEV_PATH_BR_VLAN_TAG, |
927 | DEV_PATH_BR_VLAN_UNTAG, |
928 | DEV_PATH_BR_VLAN_UNTAG_HW, |
929 | } vlan_mode; |
930 | u16 vlan_id; |
931 | __be16 vlan_proto; |
932 | } bridge; |
933 | struct { |
934 | int port; |
935 | u16 proto; |
936 | } dsa; |
937 | struct { |
938 | u8 wdma_idx; |
939 | u8 queue; |
940 | u16 wcid; |
941 | u8 bss; |
942 | u8 amsdu; |
943 | } mtk_wdma; |
944 | }; |
945 | }; |
946 | |
947 | #define NET_DEVICE_PATH_STACK_MAX 5 |
948 | #define NET_DEVICE_PATH_VLAN_MAX 2 |
949 | |
950 | struct net_device_path_stack { |
951 | int num_paths; |
952 | struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; |
953 | }; |
954 | |
955 | struct net_device_path_ctx { |
956 | const struct net_device *dev; |
957 | u8 daddr[ETH_ALEN]; |
958 | |
959 | int num_vlans; |
960 | struct { |
961 | u16 id; |
962 | __be16 proto; |
963 | } vlan[NET_DEVICE_PATH_VLAN_MAX]; |
964 | }; |
965 | |
966 | enum tc_setup_type { |
967 | TC_QUERY_CAPS, |
968 | TC_SETUP_QDISC_MQPRIO, |
969 | TC_SETUP_CLSU32, |
970 | TC_SETUP_CLSFLOWER, |
971 | TC_SETUP_CLSMATCHALL, |
972 | TC_SETUP_CLSBPF, |
973 | TC_SETUP_BLOCK, |
974 | TC_SETUP_QDISC_CBS, |
975 | TC_SETUP_QDISC_RED, |
976 | TC_SETUP_QDISC_PRIO, |
977 | TC_SETUP_QDISC_MQ, |
978 | TC_SETUP_QDISC_ETF, |
979 | TC_SETUP_ROOT_QDISC, |
980 | TC_SETUP_QDISC_GRED, |
981 | TC_SETUP_QDISC_TAPRIO, |
982 | TC_SETUP_FT, |
983 | TC_SETUP_QDISC_ETS, |
984 | TC_SETUP_QDISC_TBF, |
985 | TC_SETUP_QDISC_FIFO, |
986 | TC_SETUP_QDISC_HTB, |
987 | TC_SETUP_ACT, |
988 | }; |
989 | |
990 | /* These structures hold the attributes of bpf state that are being passed |
991 | * to the netdevice through the bpf op. |
992 | */ |
993 | enum bpf_netdev_command { |
994 | /* Set or clear a bpf program used in the earliest stages of packet |
995 | * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee |
996 | * is responsible for calling bpf_prog_put on any old progs that are |
997 | * stored. In case of error, the callee need not release the new prog |
998 | * reference, but on success it takes ownership and must bpf_prog_put |
999 | * when it is no longer used. |
1000 | */ |
1001 | XDP_SETUP_PROG, |
1002 | XDP_SETUP_PROG_HW, |
1003 | /* BPF program for offload callbacks, invoked at program load time. */ |
1004 | BPF_OFFLOAD_MAP_ALLOC, |
1005 | BPF_OFFLOAD_MAP_FREE, |
1006 | XDP_SETUP_XSK_POOL, |
1007 | }; |
1008 | |
1009 | struct bpf_prog_offload_ops; |
1010 | struct netlink_ext_ack; |
1011 | struct xdp_umem; |
1012 | struct xdp_dev_bulk_queue; |
1013 | struct bpf_xdp_link; |
1014 | |
1015 | enum bpf_xdp_mode { |
1016 | XDP_MODE_SKB = 0, |
1017 | XDP_MODE_DRV = 1, |
1018 | XDP_MODE_HW = 2, |
1019 | __MAX_XDP_MODE |
1020 | }; |
1021 | |
1022 | struct bpf_xdp_entity { |
1023 | struct bpf_prog *prog; |
1024 | struct bpf_xdp_link *link; |
1025 | }; |
1026 | |
1027 | struct netdev_bpf { |
1028 | enum bpf_netdev_command command; |
1029 | union { |
1030 | /* XDP_SETUP_PROG */ |
1031 | struct { |
1032 | u32 flags; |
1033 | struct bpf_prog *prog; |
1034 | struct netlink_ext_ack *extack; |
1035 | }; |
1036 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
1037 | struct { |
1038 | struct bpf_offloaded_map *offmap; |
1039 | }; |
1040 | /* XDP_SETUP_XSK_POOL */ |
1041 | struct { |
1042 | struct xsk_buff_pool *pool; |
1043 | u16 queue_id; |
1044 | } xsk; |
1045 | }; |
1046 | }; |
1047 | |
1048 | /* Flags for ndo_xsk_wakeup. */ |
1049 | #define XDP_WAKEUP_RX (1 << 0) |
1050 | #define XDP_WAKEUP_TX (1 << 1) |
1051 | |
1052 | #ifdef CONFIG_XFRM_OFFLOAD |
1053 | struct xfrmdev_ops { |
1054 | int (*xdo_dev_state_add) (struct xfrm_state *x, struct netlink_ext_ack *extack); |
1055 | void (*xdo_dev_state_delete) (struct xfrm_state *x); |
1056 | void (*xdo_dev_state_free) (struct xfrm_state *x); |
1057 | bool (*xdo_dev_offload_ok) (struct sk_buff *skb, |
1058 | struct xfrm_state *x); |
1059 | void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); |
1060 | void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); |
1061 | int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); |
1062 | void (*xdo_dev_policy_delete) (struct xfrm_policy *x); |
1063 | void (*xdo_dev_policy_free) (struct xfrm_policy *x); |
1064 | }; |
1065 | #endif |
1066 | |
1067 | struct dev_ifalias { |
1068 | struct rcu_head rcuhead; |
1069 | char ifalias[]; |
1070 | }; |
1071 | |
1072 | struct devlink; |
1073 | struct tlsdev_ops; |
1074 | |
1075 | struct netdev_net_notifier { |
1076 | struct list_head list; |
1077 | struct notifier_block *nb; |
1078 | }; |
1079 | |
1080 | /* |
1081 | * This structure defines the management hooks for network devices. |
1082 | * The following hooks can be defined; unless noted otherwise, they are |
1083 | * optional and can be filled with a null pointer. |
1084 | * |
1085 | * int (*ndo_init)(struct net_device *dev); |
1086 | * This function is called once when a network device is registered. |
1087 | * The network device can use this for any late stage initialization |
1088 | * or semantic validation. It can fail with an error code which will |
1089 | * be propagated back to register_netdev. |
1090 | * |
1091 | * void (*ndo_uninit)(struct net_device *dev); |
1092 | * This function is called when device is unregistered or when registration |
1093 | * fails. It is not called if init fails. |
1094 | * |
1095 | * int (*ndo_open)(struct net_device *dev); |
1096 | * This function is called when a network device transitions to the up |
1097 | * state. |
1098 | * |
1099 | * int (*ndo_stop)(struct net_device *dev); |
1100 | * This function is called when a network device transitions to the down |
1101 | * state. |
1102 | * |
1103 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1104 | * struct net_device *dev); |
1105 | * Called when a packet needs to be transmitted. |
1106 | * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop |
1107 | * the queue before that can happen; it's for obsolete devices and weird |
1108 | * corner cases, but the stack really does a non-trivial amount |
1109 | * of useless work if you return NETDEV_TX_BUSY. |
1110 | * Required; cannot be NULL. |
1111 | * |
1112 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
1113 | * struct net_device *dev |
1114 | * netdev_features_t features); |
1115 | * Called by core transmit path to determine if device is capable of |
1116 | * performing offload operations on a given packet. This is to give |
1117 | * the device an opportunity to implement any restrictions that cannot |
1118 | * be otherwise expressed by feature flags. The check is called with |
1119 | * the set of features that the stack has calculated and it returns |
1120 | * those the driver believes to be appropriate. |
1121 | * |
1122 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
1123 | * struct net_device *sb_dev); |
1124 | * Called to decide which queue to use when device supports multiple |
1125 | * transmit queues. |
1126 | * |
1127 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
1128 | * This function is called to allow device receiver to make |
1129 | * changes to configuration when multicast or promiscuous is enabled. |
1130 | * |
1131 | * void (*ndo_set_rx_mode)(struct net_device *dev); |
1132 | * This function is called device changes address list filtering. |
1133 | * If driver handles unicast address filtering, it should set |
1134 | * IFF_UNICAST_FLT in its priv_flags. |
1135 | * |
1136 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); |
1137 | * This function is called when the Media Access Control address |
1138 | * needs to be changed. If this interface is not defined, the |
1139 | * MAC address can not be changed. |
1140 | * |
1141 | * int (*ndo_validate_addr)(struct net_device *dev); |
1142 | * Test if Media Access Control address is valid for the device. |
1143 | * |
1144 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); |
1145 | * Old-style ioctl entry point. This is used internally by the |
1146 | * appletalk and ieee802154 subsystems but is no longer called by |
1147 | * the device ioctl handler. |
1148 | * |
1149 | * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); |
1150 | * Used by the bonding driver for its device specific ioctls: |
1151 | * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, |
1152 | * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY |
1153 | * |
1154 | * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); |
1155 | * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, |
1156 | * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. |
1157 | * |
1158 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); |
1159 | * Used to set network devices bus interface parameters. This interface |
1160 | * is retained for legacy reasons; new devices should use the bus |
1161 | * interface (PCI) for low level management. |
1162 | * |
1163 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); |
1164 | * Called when a user wants to change the Maximum Transfer Unit |
1165 | * of a device. |
1166 | * |
1167 | * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); |
1168 | * Callback used when the transmitter has not made any progress |
1169 | * for dev->watchdog ticks. |
1170 | * |
1171 | * void (*ndo_get_stats64)(struct net_device *dev, |
1172 | * struct rtnl_link_stats64 *storage); |
1173 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1174 | * Called when a user wants to get the network device usage |
1175 | * statistics. Drivers must do one of the following: |
1176 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
1177 | * rtnl_link_stats64 structure passed by the caller. |
1178 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
1179 | * (which should normally be dev->stats) and return a pointer to |
1180 | * it. The structure may be changed asynchronously only if each |
1181 | * field is written atomically. |
1182 | * 3. Update dev->stats asynchronously and atomically, and define |
1183 | * neither operation. |
1184 | * |
1185 | * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) |
1186 | * Return true if this device supports offload stats of this attr_id. |
1187 | * |
1188 | * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, |
1189 | * void *attr_data) |
1190 | * Get statistics for offload operations by attr_id. Write it into the |
1191 | * attr_data pointer. |
1192 | * |
1193 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
1194 | * If device supports VLAN filtering this function is called when a |
1195 | * VLAN id is registered. |
1196 | * |
1197 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
1198 | * If device supports VLAN filtering this function is called when a |
1199 | * VLAN id is unregistered. |
1200 | * |
1201 | * void (*ndo_poll_controller)(struct net_device *dev); |
1202 | * |
1203 | * SR-IOV management functions. |
1204 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); |
1205 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, |
1206 | * u8 qos, __be16 proto); |
1207 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
1208 | * int max_tx_rate); |
1209 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
1210 | * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); |
1211 | * int (*ndo_get_vf_config)(struct net_device *dev, |
1212 | * int vf, struct ifla_vf_info *ivf); |
1213 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
1214 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
1215 | * struct nlattr *port[]); |
1216 | * |
1217 | * Enable or disable the VF ability to query its RSS Redirection Table and |
1218 | * Hash Key. This is needed since on some devices VF share this information |
1219 | * with PF and querying it may introduce a theoretical security risk. |
1220 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); |
1221 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
1222 | * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, |
1223 | * void *type_data); |
1224 | * Called to setup any 'tc' scheduler, classifier or action on @dev. |
1225 | * This is always called from the stack with the rtnl lock held and netif |
1226 | * tx queues stopped. This allows the netdevice to perform queue |
1227 | * management safely. |
1228 | * |
1229 | * Fiber Channel over Ethernet (FCoE) offload functions. |
1230 | * int (*ndo_fcoe_enable)(struct net_device *dev); |
1231 | * Called when the FCoE protocol stack wants to start using LLD for FCoE |
1232 | * so the underlying device can perform whatever needed configuration or |
1233 | * initialization to support acceleration of FCoE traffic. |
1234 | * |
1235 | * int (*ndo_fcoe_disable)(struct net_device *dev); |
1236 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE |
1237 | * so the underlying device can perform whatever needed clean-ups to |
1238 | * stop supporting acceleration of FCoE traffic. |
1239 | * |
1240 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, |
1241 | * struct scatterlist *sgl, unsigned int sgc); |
1242 | * Called when the FCoE Initiator wants to initialize an I/O that |
1243 | * is a possible candidate for Direct Data Placement (DDP). The LLD can |
1244 | * perform necessary setup and returns 1 to indicate the device is set up |
1245 | * successfully to perform DDP on this I/O, otherwise this returns 0. |
1246 | * |
1247 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); |
1248 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as |
1249 | * indicated by the FC exchange id 'xid', so the underlying device can |
1250 | * clean up and reuse resources for later DDP requests. |
1251 | * |
1252 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, |
1253 | * struct scatterlist *sgl, unsigned int sgc); |
1254 | * Called when the FCoE Target wants to initialize an I/O that |
1255 | * is a possible candidate for Direct Data Placement (DDP). The LLD can |
1256 | * perform necessary setup and returns 1 to indicate the device is set up |
1257 | * successfully to perform DDP on this I/O, otherwise this returns 0. |
1258 | * |
1259 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1260 | * struct netdev_fcoe_hbainfo *hbainfo); |
1261 | * Called when the FCoE Protocol stack wants information on the underlying |
1262 | * device. This information is utilized by the FCoE protocol stack to |
1263 | * register attributes with Fiber Channel management service as per the |
1264 | * FC-GS Fabric Device Management Information(FDMI) specification. |
1265 | * |
1266 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
1267 | * Called when the underlying device wants to override default World Wide |
1268 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own |
1269 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE |
1270 | * protocol stack to use. |
1271 | * |
1272 | * RFS acceleration. |
1273 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, |
1274 | * u16 rxq_index, u32 flow_id); |
1275 | * Set hardware filter for RFS. rxq_index is the target queue index; |
1276 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. |
1277 | * Return the filter ID on success, or a negative error code. |
1278 | * |
1279 | * Slave management functions (for bridge, bonding, etc). |
1280 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); |
1281 | * Called to make another netdev an underling. |
1282 | * |
1283 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); |
1284 | * Called to release previously enslaved netdev. |
1285 | * |
1286 | * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, |
1287 | * struct sk_buff *skb, |
1288 | * bool all_slaves); |
1289 | * Get the xmit slave of master device. If all_slaves is true, function |
1290 | * assume all the slaves can transmit. |
1291 | * |
1292 | * Feature/offload setting functions. |
1293 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1294 | * netdev_features_t features); |
1295 | * Adjusts the requested feature flags according to device-specific |
1296 | * constraints, and returns the resulting flags. Must not modify |
1297 | * the device state. |
1298 | * |
1299 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
1300 | * Called to update device configuration to new features. Passed |
1301 | * feature set might be less than what was returned by ndo_fix_features()). |
1302 | * Must return >0 or -errno if it changed dev->features itself. |
1303 | * |
1304 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
1305 | * struct net_device *dev, |
1306 | * const unsigned char *addr, u16 vid, u16 flags, |
1307 | * struct netlink_ext_ack *extack); |
1308 | * Adds an FDB entry to dev for addr. |
1309 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
1310 | * struct net_device *dev, |
1311 | * const unsigned char *addr, u16 vid) |
1312 | * Deletes the FDB entry from dev coresponding to addr. |
1313 | * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, |
1314 | * struct netlink_ext_ack *extack); |
1315 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, |
1316 | * struct net_device *dev, struct net_device *filter_dev, |
1317 | * int *idx) |
1318 | * Used to add FDB entries to dump requests. Implementers should add |
1319 | * entries to skb and update idx with the number of entries. |
1320 | * |
1321 | * int (*ndo_mdb_add)(struct net_device *dev, struct nlattr *tb[], |
1322 | * u16 nlmsg_flags, struct netlink_ext_ack *extack); |
1323 | * Adds an MDB entry to dev. |
1324 | * int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[], |
1325 | * struct netlink_ext_ack *extack); |
1326 | * Deletes the MDB entry from dev. |
1327 | * int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb, |
1328 | * struct netlink_callback *cb); |
1329 | * Dumps MDB entries from dev. The first argument (marker) in the netlink |
1330 | * callback is used by core rtnetlink code. |
1331 | * |
1332 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
1333 | * u16 flags, struct netlink_ext_ack *extack) |
1334 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
1335 | * struct net_device *dev, u32 filter_mask, |
1336 | * int nlflags) |
1337 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, |
1338 | * u16 flags); |
1339 | * |
1340 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); |
1341 | * Called to change device carrier. Soft-devices (like dummy, team, etc) |
1342 | * which do not represent real hardware may define this to allow their |
1343 | * userspace components to manage their virtual carrier state. Devices |
1344 | * that determine carrier state from physical hardware properties (eg |
1345 | * network cables) or protocol-dependent mechanisms (eg |
1346 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. |
1347 | * |
1348 | * int (*ndo_get_phys_port_id)(struct net_device *dev, |
1349 | * struct netdev_phys_item_id *ppid); |
1350 | * Called to get ID of physical port of this device. If driver does |
1351 | * not implement this, it is assumed that the hw is not able to have |
1352 | * multiple net devices on single physical port. |
1353 | * |
1354 | * int (*ndo_get_port_parent_id)(struct net_device *dev, |
1355 | * struct netdev_phys_item_id *ppid) |
1356 | * Called to get the parent ID of the physical port of this device. |
1357 | * |
1358 | * void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1359 | * struct net_device *dev) |
1360 | * Called by upper layer devices to accelerate switching or other |
1361 | * station functionality into hardware. 'pdev is the lowerdev |
1362 | * to use for the offload and 'dev' is the net device that will |
1363 | * back the offload. Returns a pointer to the private structure |
1364 | * the upper layer will maintain. |
1365 | * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) |
1366 | * Called by upper layer device to delete the station created |
1367 | * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing |
1368 | * the station and priv is the structure returned by the add |
1369 | * operation. |
1370 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1371 | * int queue_index, u32 maxrate); |
1372 | * Called when a user wants to set a max-rate limitation of specific |
1373 | * TX queue. |
1374 | * int (*ndo_get_iflink)(const struct net_device *dev); |
1375 | * Called to get the iflink value of this device. |
1376 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); |
1377 | * This function is used to get egress tunnel information for given skb. |
1378 | * This is useful for retrieving outer tunnel header parameters while |
1379 | * sampling packet. |
1380 | * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); |
1381 | * This function is used to specify the headroom that the skb must |
1382 | * consider when allocation skb during packet reception. Setting |
1383 | * appropriate rx headroom value allows avoiding skb head copy on |
1384 | * forward. Setting a negative value resets the rx headroom to the |
1385 | * default value. |
1386 | * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); |
1387 | * This function is used to set or query state related to XDP on the |
1388 | * netdevice and manage BPF offload. See definition of |
1389 | * enum bpf_netdev_command for details. |
1390 | * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, |
1391 | * u32 flags); |
1392 | * This function is used to submit @n XDP packets for transmit on a |
1393 | * netdevice. Returns number of frames successfully transmitted, frames |
1394 | * that got dropped are freed/returned via xdp_return_frame(). |
1395 | * Returns negative number, means general error invoking ndo, meaning |
1396 | * no frames were xmit'ed and core-caller will free all frames. |
1397 | * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, |
1398 | * struct xdp_buff *xdp); |
1399 | * Get the xmit slave of master device based on the xdp_buff. |
1400 | * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); |
1401 | * This function is used to wake up the softirq, ksoftirqd or kthread |
1402 | * responsible for sending and/or receiving packets on a specific |
1403 | * queue id bound to an AF_XDP socket. The flags field specifies if |
1404 | * only RX, only Tx, or both should be woken up using the flags |
1405 | * XDP_WAKEUP_RX and XDP_WAKEUP_TX. |
1406 | * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, |
1407 | * int cmd); |
1408 | * Add, change, delete or get information on an IPv4 tunnel. |
1409 | * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); |
1410 | * If a device is paired with a peer device, return the peer instance. |
1411 | * The caller must be under RCU read context. |
1412 | * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); |
1413 | * Get the forwarding path to reach the real device from the HW destination address |
1414 | * ktime_t (*ndo_get_tstamp)(struct net_device *dev, |
1415 | * const struct skb_shared_hwtstamps *hwtstamps, |
1416 | * bool cycles); |
1417 | * Get hardware timestamp based on normal/adjustable time or free running |
1418 | * cycle counter. This function is required if physical clock supports a |
1419 | * free running cycle counter. |
1420 | * |
1421 | * int (*ndo_hwtstamp_get)(struct net_device *dev, |
1422 | * struct kernel_hwtstamp_config *kernel_config); |
1423 | * Get the currently configured hardware timestamping parameters for the |
1424 | * NIC device. |
1425 | * |
1426 | * int (*ndo_hwtstamp_set)(struct net_device *dev, |
1427 | * struct kernel_hwtstamp_config *kernel_config, |
1428 | * struct netlink_ext_ack *extack); |
1429 | * Change the hardware timestamping parameters for NIC device. |
1430 | */ |
1431 | struct net_device_ops { |
1432 | int (*ndo_init)(struct net_device *dev); |
1433 | void (*ndo_uninit)(struct net_device *dev); |
1434 | int (*ndo_open)(struct net_device *dev); |
1435 | int (*ndo_stop)(struct net_device *dev); |
1436 | netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1437 | struct net_device *dev); |
1438 | netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
1439 | struct net_device *dev, |
1440 | netdev_features_t features); |
1441 | u16 (*ndo_select_queue)(struct net_device *dev, |
1442 | struct sk_buff *skb, |
1443 | struct net_device *sb_dev); |
1444 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1445 | int flags); |
1446 | void (*ndo_set_rx_mode)(struct net_device *dev); |
1447 | int (*ndo_set_mac_address)(struct net_device *dev, |
1448 | void *addr); |
1449 | int (*ndo_validate_addr)(struct net_device *dev); |
1450 | int (*ndo_do_ioctl)(struct net_device *dev, |
1451 | struct ifreq *ifr, int cmd); |
1452 | int (*ndo_eth_ioctl)(struct net_device *dev, |
1453 | struct ifreq *ifr, int cmd); |
1454 | int (*ndo_siocbond)(struct net_device *dev, |
1455 | struct ifreq *ifr, int cmd); |
1456 | int (*ndo_siocwandev)(struct net_device *dev, |
1457 | struct if_settings *ifs); |
1458 | int (*ndo_siocdevprivate)(struct net_device *dev, |
1459 | struct ifreq *ifr, |
1460 | void __user *data, int cmd); |
1461 | int (*ndo_set_config)(struct net_device *dev, |
1462 | struct ifmap *map); |
1463 | int (*ndo_change_mtu)(struct net_device *dev, |
1464 | int new_mtu); |
1465 | int (*ndo_neigh_setup)(struct net_device *dev, |
1466 | struct neigh_parms *); |
1467 | void (*ndo_tx_timeout) (struct net_device *dev, |
1468 | unsigned int txqueue); |
1469 | |
1470 | void (*ndo_get_stats64)(struct net_device *dev, |
1471 | struct rtnl_link_stats64 *storage); |
1472 | bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); |
1473 | int (*ndo_get_offload_stats)(int attr_id, |
1474 | const struct net_device *dev, |
1475 | void *attr_data); |
1476 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1477 | |
1478 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
1479 | __be16 proto, u16 vid); |
1480 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
1481 | __be16 proto, u16 vid); |
1482 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1483 | void (*ndo_poll_controller)(struct net_device *dev); |
1484 | int (*ndo_netpoll_setup)(struct net_device *dev, |
1485 | struct netpoll_info *info); |
1486 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
1487 | #endif |
1488 | int (*ndo_set_vf_mac)(struct net_device *dev, |
1489 | int queue, u8 *mac); |
1490 | int (*ndo_set_vf_vlan)(struct net_device *dev, |
1491 | int queue, u16 vlan, |
1492 | u8 qos, __be16 proto); |
1493 | int (*ndo_set_vf_rate)(struct net_device *dev, |
1494 | int vf, int min_tx_rate, |
1495 | int max_tx_rate); |
1496 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1497 | int vf, bool setting); |
1498 | int (*ndo_set_vf_trust)(struct net_device *dev, |
1499 | int vf, bool setting); |
1500 | int (*ndo_get_vf_config)(struct net_device *dev, |
1501 | int vf, |
1502 | struct ifla_vf_info *ivf); |
1503 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
1504 | int vf, int link_state); |
1505 | int (*ndo_get_vf_stats)(struct net_device *dev, |
1506 | int vf, |
1507 | struct ifla_vf_stats |
1508 | *vf_stats); |
1509 | int (*ndo_set_vf_port)(struct net_device *dev, |
1510 | int vf, |
1511 | struct nlattr *port[]); |
1512 | int (*ndo_get_vf_port)(struct net_device *dev, |
1513 | int vf, struct sk_buff *skb); |
1514 | int (*ndo_get_vf_guid)(struct net_device *dev, |
1515 | int vf, |
1516 | struct ifla_vf_guid *node_guid, |
1517 | struct ifla_vf_guid *port_guid); |
1518 | int (*ndo_set_vf_guid)(struct net_device *dev, |
1519 | int vf, u64 guid, |
1520 | int guid_type); |
1521 | int (*)( |
1522 | struct net_device *dev, |
1523 | int vf, bool setting); |
1524 | int (*ndo_setup_tc)(struct net_device *dev, |
1525 | enum tc_setup_type type, |
1526 | void *type_data); |
1527 | #if IS_ENABLED(CONFIG_FCOE) |
1528 | int (*ndo_fcoe_enable)(struct net_device *dev); |
1529 | int (*ndo_fcoe_disable)(struct net_device *dev); |
1530 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
1531 | u16 xid, |
1532 | struct scatterlist *sgl, |
1533 | unsigned int sgc); |
1534 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, |
1535 | u16 xid); |
1536 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
1537 | u16 xid, |
1538 | struct scatterlist *sgl, |
1539 | unsigned int sgc); |
1540 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1541 | struct netdev_fcoe_hbainfo *hbainfo); |
1542 | #endif |
1543 | |
1544 | #if IS_ENABLED(CONFIG_LIBFCOE) |
1545 | #define NETDEV_FCOE_WWNN 0 |
1546 | #define NETDEV_FCOE_WWPN 1 |
1547 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, |
1548 | u64 *wwn, int type); |
1549 | #endif |
1550 | |
1551 | #ifdef CONFIG_RFS_ACCEL |
1552 | int (*ndo_rx_flow_steer)(struct net_device *dev, |
1553 | const struct sk_buff *skb, |
1554 | u16 rxq_index, |
1555 | u32 flow_id); |
1556 | #endif |
1557 | int (*ndo_add_slave)(struct net_device *dev, |
1558 | struct net_device *slave_dev, |
1559 | struct netlink_ext_ack *extack); |
1560 | int (*ndo_del_slave)(struct net_device *dev, |
1561 | struct net_device *slave_dev); |
1562 | struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, |
1563 | struct sk_buff *skb, |
1564 | bool all_slaves); |
1565 | struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, |
1566 | struct sock *sk); |
1567 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1568 | netdev_features_t features); |
1569 | int (*ndo_set_features)(struct net_device *dev, |
1570 | netdev_features_t features); |
1571 | int (*ndo_neigh_construct)(struct net_device *dev, |
1572 | struct neighbour *n); |
1573 | void (*ndo_neigh_destroy)(struct net_device *dev, |
1574 | struct neighbour *n); |
1575 | |
1576 | int (*ndo_fdb_add)(struct ndmsg *ndm, |
1577 | struct nlattr *tb[], |
1578 | struct net_device *dev, |
1579 | const unsigned char *addr, |
1580 | u16 vid, |
1581 | u16 flags, |
1582 | struct netlink_ext_ack *extack); |
1583 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
1584 | struct nlattr *tb[], |
1585 | struct net_device *dev, |
1586 | const unsigned char *addr, |
1587 | u16 vid, struct netlink_ext_ack *extack); |
1588 | int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, |
1589 | struct net_device *dev, |
1590 | struct netlink_ext_ack *extack); |
1591 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1592 | struct netlink_callback *cb, |
1593 | struct net_device *dev, |
1594 | struct net_device *filter_dev, |
1595 | int *idx); |
1596 | int (*ndo_fdb_get)(struct sk_buff *skb, |
1597 | struct nlattr *tb[], |
1598 | struct net_device *dev, |
1599 | const unsigned char *addr, |
1600 | u16 vid, u32 portid, u32 seq, |
1601 | struct netlink_ext_ack *extack); |
1602 | int (*ndo_mdb_add)(struct net_device *dev, |
1603 | struct nlattr *tb[], |
1604 | u16 nlmsg_flags, |
1605 | struct netlink_ext_ack *extack); |
1606 | int (*ndo_mdb_del)(struct net_device *dev, |
1607 | struct nlattr *tb[], |
1608 | struct netlink_ext_ack *extack); |
1609 | int (*ndo_mdb_dump)(struct net_device *dev, |
1610 | struct sk_buff *skb, |
1611 | struct netlink_callback *cb); |
1612 | int (*ndo_mdb_get)(struct net_device *dev, |
1613 | struct nlattr *tb[], u32 portid, |
1614 | u32 seq, |
1615 | struct netlink_ext_ack *extack); |
1616 | int (*ndo_bridge_setlink)(struct net_device *dev, |
1617 | struct nlmsghdr *nlh, |
1618 | u16 flags, |
1619 | struct netlink_ext_ack *extack); |
1620 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1621 | u32 pid, u32 seq, |
1622 | struct net_device *dev, |
1623 | u32 filter_mask, |
1624 | int nlflags); |
1625 | int (*ndo_bridge_dellink)(struct net_device *dev, |
1626 | struct nlmsghdr *nlh, |
1627 | u16 flags); |
1628 | int (*ndo_change_carrier)(struct net_device *dev, |
1629 | bool new_carrier); |
1630 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
1631 | struct netdev_phys_item_id *ppid); |
1632 | int (*ndo_get_port_parent_id)(struct net_device *dev, |
1633 | struct netdev_phys_item_id *ppid); |
1634 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
1635 | char *name, size_t len); |
1636 | void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1637 | struct net_device *dev); |
1638 | void (*ndo_dfwd_del_station)(struct net_device *pdev, |
1639 | void *priv); |
1640 | |
1641 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1642 | int queue_index, |
1643 | u32 maxrate); |
1644 | int (*ndo_get_iflink)(const struct net_device *dev); |
1645 | int (*ndo_fill_metadata_dst)(struct net_device *dev, |
1646 | struct sk_buff *skb); |
1647 | void (*ndo_set_rx_headroom)(struct net_device *dev, |
1648 | int needed_headroom); |
1649 | int (*ndo_bpf)(struct net_device *dev, |
1650 | struct netdev_bpf *bpf); |
1651 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
1652 | struct xdp_frame **xdp, |
1653 | u32 flags); |
1654 | struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, |
1655 | struct xdp_buff *xdp); |
1656 | int (*ndo_xsk_wakeup)(struct net_device *dev, |
1657 | u32 queue_id, u32 flags); |
1658 | int (*ndo_tunnel_ctl)(struct net_device *dev, |
1659 | struct ip_tunnel_parm *p, int cmd); |
1660 | struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); |
1661 | int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, |
1662 | struct net_device_path *path); |
1663 | ktime_t (*ndo_get_tstamp)(struct net_device *dev, |
1664 | const struct skb_shared_hwtstamps *hwtstamps, |
1665 | bool cycles); |
1666 | int (*ndo_hwtstamp_get)(struct net_device *dev, |
1667 | struct kernel_hwtstamp_config *kernel_config); |
1668 | int (*ndo_hwtstamp_set)(struct net_device *dev, |
1669 | struct kernel_hwtstamp_config *kernel_config, |
1670 | struct netlink_ext_ack *extack); |
1671 | }; |
1672 | |
1673 | /** |
1674 | * enum netdev_priv_flags - &struct net_device priv_flags |
1675 | * |
1676 | * These are the &struct net_device, they are only set internally |
1677 | * by drivers and used in the kernel. These flags are invisible to |
1678 | * userspace; this means that the order of these flags can change |
1679 | * during any kernel release. |
1680 | * |
1681 | * You should have a pretty good reason to be extending these flags. |
1682 | * |
1683 | * @IFF_802_1Q_VLAN: 802.1Q VLAN device |
1684 | * @IFF_EBRIDGE: Ethernet bridging device |
1685 | * @IFF_BONDING: bonding master or slave |
1686 | * @IFF_ISATAP: ISATAP interface (RFC4214) |
1687 | * @IFF_WAN_HDLC: WAN HDLC device |
1688 | * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to |
1689 | * release skb->dst |
1690 | * @IFF_DONT_BRIDGE: disallow bridging this ether dev |
1691 | * @IFF_DISABLE_NETPOLL: disable netpoll at run-time |
1692 | * @IFF_MACVLAN_PORT: device used as macvlan port |
1693 | * @IFF_BRIDGE_PORT: device used as bridge port |
1694 | * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port |
1695 | * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit |
1696 | * @IFF_UNICAST_FLT: Supports unicast filtering |
1697 | * @IFF_TEAM_PORT: device used as team port |
1698 | * @IFF_SUPP_NOFCS: device supports sending custom FCS |
1699 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address |
1700 | * change when it's running |
1701 | * @IFF_MACVLAN: Macvlan device |
1702 | * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account |
1703 | * underlying stacked devices |
1704 | * @IFF_L3MDEV_MASTER: device is an L3 master device |
1705 | * @IFF_NO_QUEUE: device can run without qdisc attached |
1706 | * @IFF_OPENVSWITCH: device is a Open vSwitch master |
1707 | * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device |
1708 | * @IFF_TEAM: device is a team device |
1709 | * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured |
1710 | * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external |
1711 | * entity (i.e. the master device for bridged veth) |
1712 | * @IFF_MACSEC: device is a MACsec device |
1713 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
1714 | * @IFF_FAILOVER: device is a failover master device |
1715 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
1716 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device |
1717 | * @IFF_NO_ADDRCONF: prevent ipv6 addrconf |
1718 | * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with |
1719 | * skb_headlen(skb) == 0 (data starts from frag0) |
1720 | * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN |
1721 | * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to |
1722 | * ndo_hwtstamp_set() for all timestamp requests regardless of source, |
1723 | * even if those aren't HWTSTAMP_SOURCE_NETDEV. |
1724 | */ |
1725 | enum netdev_priv_flags { |
1726 | IFF_802_1Q_VLAN = 1<<0, |
1727 | IFF_EBRIDGE = 1<<1, |
1728 | IFF_BONDING = 1<<2, |
1729 | IFF_ISATAP = 1<<3, |
1730 | IFF_WAN_HDLC = 1<<4, |
1731 | IFF_XMIT_DST_RELEASE = 1<<5, |
1732 | IFF_DONT_BRIDGE = 1<<6, |
1733 | IFF_DISABLE_NETPOLL = 1<<7, |
1734 | IFF_MACVLAN_PORT = 1<<8, |
1735 | IFF_BRIDGE_PORT = 1<<9, |
1736 | IFF_OVS_DATAPATH = 1<<10, |
1737 | IFF_TX_SKB_SHARING = 1<<11, |
1738 | IFF_UNICAST_FLT = 1<<12, |
1739 | IFF_TEAM_PORT = 1<<13, |
1740 | IFF_SUPP_NOFCS = 1<<14, |
1741 | IFF_LIVE_ADDR_CHANGE = 1<<15, |
1742 | IFF_MACVLAN = 1<<16, |
1743 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, |
1744 | IFF_L3MDEV_MASTER = 1<<18, |
1745 | IFF_NO_QUEUE = 1<<19, |
1746 | IFF_OPENVSWITCH = 1<<20, |
1747 | IFF_L3MDEV_SLAVE = 1<<21, |
1748 | IFF_TEAM = 1<<22, |
1749 | IFF_RXFH_CONFIGURED = 1<<23, |
1750 | IFF_PHONY_HEADROOM = 1<<24, |
1751 | IFF_MACSEC = 1<<25, |
1752 | IFF_NO_RX_HANDLER = 1<<26, |
1753 | IFF_FAILOVER = 1<<27, |
1754 | IFF_FAILOVER_SLAVE = 1<<28, |
1755 | IFF_L3MDEV_RX_HANDLER = 1<<29, |
1756 | IFF_NO_ADDRCONF = BIT_ULL(30), |
1757 | IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), |
1758 | IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), |
1759 | IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33), |
1760 | }; |
1761 | |
1762 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
1763 | #define IFF_EBRIDGE IFF_EBRIDGE |
1764 | #define IFF_BONDING IFF_BONDING |
1765 | #define IFF_ISATAP IFF_ISATAP |
1766 | #define IFF_WAN_HDLC IFF_WAN_HDLC |
1767 | #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE |
1768 | #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE |
1769 | #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL |
1770 | #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT |
1771 | #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT |
1772 | #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH |
1773 | #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING |
1774 | #define IFF_UNICAST_FLT IFF_UNICAST_FLT |
1775 | #define IFF_TEAM_PORT IFF_TEAM_PORT |
1776 | #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS |
1777 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE |
1778 | #define IFF_MACVLAN IFF_MACVLAN |
1779 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
1780 | #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER |
1781 | #define IFF_NO_QUEUE IFF_NO_QUEUE |
1782 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH |
1783 | #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE |
1784 | #define IFF_TEAM IFF_TEAM |
1785 | #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED |
1786 | #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM |
1787 | #define IFF_MACSEC IFF_MACSEC |
1788 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
1789 | #define IFF_FAILOVER IFF_FAILOVER |
1790 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
1791 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
1792 | #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR |
1793 | |
1794 | /* Specifies the type of the struct net_device::ml_priv pointer */ |
1795 | enum netdev_ml_priv_type { |
1796 | ML_PRIV_NONE, |
1797 | ML_PRIV_CAN, |
1798 | }; |
1799 | |
1800 | /** |
1801 | * struct net_device - The DEVICE structure. |
1802 | * |
1803 | * Actually, this whole structure is a big mistake. It mixes I/O |
1804 | * data with strictly "high-level" data, and it has to know about |
1805 | * almost every data structure used in the INET module. |
1806 | * |
1807 | * @name: This is the first field of the "visible" part of this structure |
1808 | * (i.e. as seen by users in the "Space.c" file). It is the name |
1809 | * of the interface. |
1810 | * |
1811 | * @name_node: Name hashlist node |
1812 | * @ifalias: SNMP alias |
1813 | * @mem_end: Shared memory end |
1814 | * @mem_start: Shared memory start |
1815 | * @base_addr: Device I/O address |
1816 | * @irq: Device IRQ number |
1817 | * |
1818 | * @state: Generic network queuing layer state, see netdev_state_t |
1819 | * @dev_list: The global list of network devices |
1820 | * @napi_list: List entry used for polling NAPI devices |
1821 | * @unreg_list: List entry when we are unregistering the |
1822 | * device; see the function unregister_netdev |
1823 | * @close_list: List entry used when we are closing the device |
1824 | * @ptype_all: Device-specific packet handlers for all protocols |
1825 | * @ptype_specific: Device-specific, protocol-specific packet handlers |
1826 | * |
1827 | * @adj_list: Directly linked devices, like slaves for bonding |
1828 | * @features: Currently active device features |
1829 | * @hw_features: User-changeable features |
1830 | * |
1831 | * @wanted_features: User-requested features |
1832 | * @vlan_features: Mask of features inheritable by VLAN devices |
1833 | * |
1834 | * @hw_enc_features: Mask of features inherited by encapsulating devices |
1835 | * This field indicates what encapsulation |
1836 | * offloads the hardware is capable of doing, |
1837 | * and drivers will need to set them appropriately. |
1838 | * |
1839 | * @mpls_features: Mask of features inheritable by MPLS |
1840 | * @gso_partial_features: value(s) from NETIF_F_GSO\* |
1841 | * |
1842 | * @ifindex: interface index |
1843 | * @group: The group the device belongs to |
1844 | * |
1845 | * @stats: Statistics struct, which was left as a legacy, use |
1846 | * rtnl_link_stats64 instead |
1847 | * |
1848 | * @core_stats: core networking counters, |
1849 | * do not use this in drivers |
1850 | * @carrier_up_count: Number of times the carrier has been up |
1851 | * @carrier_down_count: Number of times the carrier has been down |
1852 | * |
1853 | * @wireless_handlers: List of functions to handle Wireless Extensions, |
1854 | * instead of ioctl, |
1855 | * see <net/iw_handler.h> for details. |
1856 | * @wireless_data: Instance data managed by the core of wireless extensions |
1857 | * |
1858 | * @netdev_ops: Includes several pointers to callbacks, |
1859 | * if one wants to override the ndo_*() functions |
1860 | * @xdp_metadata_ops: Includes pointers to XDP metadata callbacks. |
1861 | * @ethtool_ops: Management operations |
1862 | * @l3mdev_ops: Layer 3 master device operations |
1863 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
1864 | * discovery handling. Necessary for e.g. 6LoWPAN. |
1865 | * @xfrmdev_ops: Transformation offload operations |
1866 | * @tlsdev_ops: Transport Layer Security offload operations |
1867 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
1868 | * of Layer 2 headers. |
1869 | * |
1870 | * @flags: Interface flags (a la BSD) |
1871 | * @xdp_features: XDP capability supported by the device |
1872 | * @priv_flags: Like 'flags' but invisible to userspace, |
1873 | * see if.h for the definitions |
1874 | * @gflags: Global flags ( kept as legacy ) |
1875 | * @padded: How much padding added by alloc_netdev() |
1876 | * @operstate: RFC2863 operstate |
1877 | * @link_mode: Mapping policy to operstate |
1878 | * @if_port: Selectable AUI, TP, ... |
1879 | * @dma: DMA channel |
1880 | * @mtu: Interface MTU value |
1881 | * @min_mtu: Interface Minimum MTU value |
1882 | * @max_mtu: Interface Maximum MTU value |
1883 | * @type: Interface hardware type |
1884 | * @hard_header_len: Maximum hardware header length. |
1885 | * @min_header_len: Minimum hardware header length |
1886 | * |
1887 | * @needed_headroom: Extra headroom the hardware may need, but not in all |
1888 | * cases can this be guaranteed |
1889 | * @needed_tailroom: Extra tailroom the hardware may need, but not in all |
1890 | * cases can this be guaranteed. Some cases also use |
1891 | * LL_MAX_HEADER instead to allocate the skb |
1892 | * |
1893 | * interface address info: |
1894 | * |
1895 | * @perm_addr: Permanent hw address |
1896 | * @addr_assign_type: Hw address assignment type |
1897 | * @addr_len: Hardware address length |
1898 | * @upper_level: Maximum depth level of upper devices. |
1899 | * @lower_level: Maximum depth level of lower devices. |
1900 | * @neigh_priv_len: Used in neigh_alloc() |
1901 | * @dev_id: Used to differentiate devices that share |
1902 | * the same link layer address |
1903 | * @dev_port: Used to differentiate devices that share |
1904 | * the same function |
1905 | * @addr_list_lock: XXX: need comments on this one |
1906 | * @name_assign_type: network interface name assignment type |
1907 | * @uc_promisc: Counter that indicates promiscuous mode |
1908 | * has been enabled due to the need to listen to |
1909 | * additional unicast addresses in a device that |
1910 | * does not implement ndo_set_rx_mode() |
1911 | * @uc: unicast mac addresses |
1912 | * @mc: multicast mac addresses |
1913 | * @dev_addrs: list of device hw addresses |
1914 | * @queues_kset: Group of all Kobjects in the Tx and RX queues |
1915 | * @promiscuity: Number of times the NIC is told to work in |
1916 | * promiscuous mode; if it becomes 0 the NIC will |
1917 | * exit promiscuous mode |
1918 | * @allmulti: Counter, enables or disables allmulticast mode |
1919 | * |
1920 | * @vlan_info: VLAN info |
1921 | * @dsa_ptr: dsa specific data |
1922 | * @tipc_ptr: TIPC specific data |
1923 | * @atalk_ptr: AppleTalk link |
1924 | * @ip_ptr: IPv4 specific data |
1925 | * @ip6_ptr: IPv6 specific data |
1926 | * @ax25_ptr: AX.25 specific data |
1927 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering |
1928 | * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network |
1929 | * device struct |
1930 | * @mpls_ptr: mpls_dev struct pointer |
1931 | * @mctp_ptr: MCTP specific data |
1932 | * |
1933 | * @dev_addr: Hw address (before bcast, |
1934 | * because most packets are unicast) |
1935 | * |
1936 | * @_rx: Array of RX queues |
1937 | * @num_rx_queues: Number of RX queues |
1938 | * allocated at register_netdev() time |
1939 | * @real_num_rx_queues: Number of RX queues currently active in device |
1940 | * @xdp_prog: XDP sockets filter program pointer |
1941 | * @gro_flush_timeout: timeout for GRO layer in NAPI |
1942 | * @napi_defer_hard_irqs: If not zero, provides a counter that would |
1943 | * allow to avoid NIC hard IRQ, on busy queues. |
1944 | * |
1945 | * @rx_handler: handler for received packets |
1946 | * @rx_handler_data: XXX: need comments on this one |
1947 | * @tcx_ingress: BPF & clsact qdisc specific data for ingress processing |
1948 | * @ingress_queue: XXX: need comments on this one |
1949 | * @nf_hooks_ingress: netfilter hooks executed for ingress packets |
1950 | * @broadcast: hw bcast address |
1951 | * |
1952 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
1953 | * indexed by RX queue number. Assigned by driver. |
1954 | * This must only be set if the ndo_rx_flow_steer |
1955 | * operation is defined |
1956 | * @index_hlist: Device index hash chain |
1957 | * |
1958 | * @_tx: Array of TX queues |
1959 | * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time |
1960 | * @real_num_tx_queues: Number of TX queues currently active in device |
1961 | * @qdisc: Root qdisc from userspace point of view |
1962 | * @tx_queue_len: Max frames per queue allowed |
1963 | * @tx_global_lock: XXX: need comments on this one |
1964 | * @xdp_bulkq: XDP device bulk queue |
1965 | * @xps_maps: all CPUs/RXQs maps for XPS device |
1966 | * |
1967 | * @xps_maps: XXX: need comments on this one |
1968 | * @tcx_egress: BPF & clsact qdisc specific data for egress processing |
1969 | * @nf_hooks_egress: netfilter hooks executed for egress packets |
1970 | * @qdisc_hash: qdisc hash table |
1971 | * @watchdog_timeo: Represents the timeout that is used by |
1972 | * the watchdog (see dev_watchdog()) |
1973 | * @watchdog_timer: List of timers |
1974 | * |
1975 | * @proto_down_reason: reason a netdev interface is held down |
1976 | * @pcpu_refcnt: Number of references to this device |
1977 | * @dev_refcnt: Number of references to this device |
1978 | * @refcnt_tracker: Tracker directory for tracked references to this device |
1979 | * @todo_list: Delayed register/unregister |
1980 | * @link_watch_list: XXX: need comments on this one |
1981 | * |
1982 | * @reg_state: Register/unregister state machine |
1983 | * @dismantle: Device is going to be freed |
1984 | * @rtnl_link_state: This enum represents the phases of creating |
1985 | * a new link |
1986 | * |
1987 | * @needs_free_netdev: Should unregister perform free_netdev? |
1988 | * @priv_destructor: Called from unregister |
1989 | * @npinfo: XXX: need comments on this one |
1990 | * @nd_net: Network namespace this network device is inside |
1991 | * |
1992 | * @ml_priv: Mid-layer private |
1993 | * @ml_priv_type: Mid-layer private type |
1994 | * @lstats: Loopback statistics |
1995 | * @tstats: Tunnel statistics |
1996 | * @dstats: Dummy statistics |
1997 | * @vstats: Virtual ethernet statistics |
1998 | * |
1999 | * @garp_port: GARP |
2000 | * @mrp_port: MRP |
2001 | * |
2002 | * @dm_private: Drop monitor private |
2003 | * |
2004 | * @dev: Class/net/name entry |
2005 | * @sysfs_groups: Space for optional device, statistics and wireless |
2006 | * sysfs groups |
2007 | * |
2008 | * @sysfs_rx_queue_group: Space for optional per-rx queue attributes |
2009 | * @rtnl_link_ops: Rtnl_link_ops |
2010 | * |
2011 | * @gso_max_size: Maximum size of generic segmentation offload |
2012 | * @tso_max_size: Device (as in HW) limit on the max TSO request size |
2013 | * @gso_max_segs: Maximum number of segments that can be passed to the |
2014 | * NIC for GSO |
2015 | * @tso_max_segs: Device (as in HW) limit on the max TSO segment count |
2016 | * @gso_ipv4_max_size: Maximum size of generic segmentation offload, |
2017 | * for IPv4. |
2018 | * |
2019 | * @dcbnl_ops: Data Center Bridging netlink ops |
2020 | * @num_tc: Number of traffic classes in the net device |
2021 | * @tc_to_txq: XXX: need comments on this one |
2022 | * @prio_tc_map: XXX: need comments on this one |
2023 | * |
2024 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp |
2025 | * |
2026 | * @priomap: XXX: need comments on this one |
2027 | * @phydev: Physical device may attach itself |
2028 | * for hardware timestamping |
2029 | * @sfp_bus: attached &struct sfp_bus structure. |
2030 | * |
2031 | * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock |
2032 | * |
2033 | * @proto_down: protocol port state information can be sent to the |
2034 | * switch driver and used to set the phys state of the |
2035 | * switch port. |
2036 | * |
2037 | * @wol_enabled: Wake-on-LAN is enabled |
2038 | * |
2039 | * @threaded: napi threaded mode is enabled |
2040 | * |
2041 | * @net_notifier_list: List of per-net netdev notifier block |
2042 | * that follow this device when it is moved |
2043 | * to another network namespace. |
2044 | * |
2045 | * @macsec_ops: MACsec offloading ops |
2046 | * |
2047 | * @udp_tunnel_nic_info: static structure describing the UDP tunnel |
2048 | * offload capabilities of the device |
2049 | * @udp_tunnel_nic: UDP tunnel offload state |
2050 | * @xdp_state: stores info on attached XDP BPF programs |
2051 | * |
2052 | * @nested_level: Used as a parameter of spin_lock_nested() of |
2053 | * dev->addr_list_lock. |
2054 | * @unlink_list: As netif_addr_lock() can be called recursively, |
2055 | * keep a list of interfaces to be deleted. |
2056 | * @gro_max_size: Maximum size of aggregated packet in generic |
2057 | * receive offload (GRO) |
2058 | * @gro_ipv4_max_size: Maximum size of aggregated packet in generic |
2059 | * receive offload (GRO), for IPv4. |
2060 | * @xdp_zc_max_segs: Maximum number of segments supported by AF_XDP |
2061 | * zero copy driver |
2062 | * |
2063 | * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. |
2064 | * @linkwatch_dev_tracker: refcount tracker used by linkwatch. |
2065 | * @watchdog_dev_tracker: refcount tracker used by watchdog. |
2066 | * @dev_registered_tracker: tracker for reference held while |
2067 | * registered |
2068 | * @offload_xstats_l3: L3 HW stats for this netdevice. |
2069 | * |
2070 | * @devlink_port: Pointer to related devlink port structure. |
2071 | * Assigned by a driver before netdev registration using |
2072 | * SET_NETDEV_DEVLINK_PORT macro. This pointer is static |
2073 | * during the time netdevice is registered. |
2074 | * |
2075 | * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, |
2076 | * where the clock is recovered. |
2077 | * |
2078 | * FIXME: cleanup struct net_device such that network protocol info |
2079 | * moves out. |
2080 | */ |
2081 | |
2082 | struct net_device { |
2083 | char name[IFNAMSIZ]; |
2084 | struct netdev_name_node *name_node; |
2085 | struct dev_ifalias __rcu *ifalias; |
2086 | /* |
2087 | * I/O specific fields |
2088 | * FIXME: Merge these and struct ifmap into one |
2089 | */ |
2090 | unsigned long mem_end; |
2091 | unsigned long mem_start; |
2092 | unsigned long base_addr; |
2093 | |
2094 | /* |
2095 | * Some hardware also needs these fields (state,dev_list, |
2096 | * napi_list,unreg_list,close_list) but they are not |
2097 | * part of the usual set specified in Space.c. |
2098 | */ |
2099 | |
2100 | unsigned long state; |
2101 | |
2102 | struct list_head dev_list; |
2103 | struct list_head napi_list; |
2104 | struct list_head unreg_list; |
2105 | struct list_head close_list; |
2106 | struct list_head ptype_all; |
2107 | struct list_head ptype_specific; |
2108 | |
2109 | struct { |
2110 | struct list_head upper; |
2111 | struct list_head lower; |
2112 | } adj_list; |
2113 | |
2114 | /* Read-mostly cache-line for fast-path access */ |
2115 | unsigned int flags; |
2116 | xdp_features_t xdp_features; |
2117 | unsigned long long priv_flags; |
2118 | const struct net_device_ops *netdev_ops; |
2119 | const struct xdp_metadata_ops *xdp_metadata_ops; |
2120 | int ifindex; |
2121 | unsigned short gflags; |
2122 | unsigned short ; |
2123 | |
2124 | /* Note : dev->mtu is often read without holding a lock. |
2125 | * Writers usually hold RTNL. |
2126 | * It is recommended to use READ_ONCE() to annotate the reads, |
2127 | * and to use WRITE_ONCE() to annotate the writes. |
2128 | */ |
2129 | unsigned int mtu; |
2130 | unsigned short needed_headroom; |
2131 | unsigned short needed_tailroom; |
2132 | |
2133 | netdev_features_t features; |
2134 | netdev_features_t hw_features; |
2135 | netdev_features_t wanted_features; |
2136 | netdev_features_t vlan_features; |
2137 | netdev_features_t hw_enc_features; |
2138 | netdev_features_t mpls_features; |
2139 | netdev_features_t gso_partial_features; |
2140 | |
2141 | unsigned int min_mtu; |
2142 | unsigned int max_mtu; |
2143 | unsigned short type; |
2144 | unsigned char ; |
2145 | unsigned char name_assign_type; |
2146 | |
2147 | int group; |
2148 | |
2149 | struct net_device_stats stats; /* not used by modern drivers */ |
2150 | |
2151 | struct net_device_core_stats __percpu *core_stats; |
2152 | |
2153 | /* Stats to monitor link on/off, flapping */ |
2154 | atomic_t carrier_up_count; |
2155 | atomic_t carrier_down_count; |
2156 | |
2157 | #ifdef CONFIG_WIRELESS_EXT |
2158 | const struct iw_handler_def *wireless_handlers; |
2159 | struct iw_public_data *wireless_data; |
2160 | #endif |
2161 | const struct ethtool_ops *ethtool_ops; |
2162 | #ifdef CONFIG_NET_L3_MASTER_DEV |
2163 | const struct l3mdev_ops *l3mdev_ops; |
2164 | #endif |
2165 | #if IS_ENABLED(CONFIG_IPV6) |
2166 | const struct ndisc_ops *ndisc_ops; |
2167 | #endif |
2168 | |
2169 | #ifdef CONFIG_XFRM_OFFLOAD |
2170 | const struct xfrmdev_ops *xfrmdev_ops; |
2171 | #endif |
2172 | |
2173 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
2174 | const struct tlsdev_ops *tlsdev_ops; |
2175 | #endif |
2176 | |
2177 | const struct header_ops *; |
2178 | |
2179 | unsigned char operstate; |
2180 | unsigned char link_mode; |
2181 | |
2182 | unsigned char if_port; |
2183 | unsigned char dma; |
2184 | |
2185 | /* Interface address info. */ |
2186 | unsigned char perm_addr[MAX_ADDR_LEN]; |
2187 | unsigned char addr_assign_type; |
2188 | unsigned char addr_len; |
2189 | unsigned char upper_level; |
2190 | unsigned char lower_level; |
2191 | |
2192 | unsigned short neigh_priv_len; |
2193 | unsigned short dev_id; |
2194 | unsigned short dev_port; |
2195 | unsigned short padded; |
2196 | |
2197 | spinlock_t addr_list_lock; |
2198 | int irq; |
2199 | |
2200 | struct netdev_hw_addr_list uc; |
2201 | struct netdev_hw_addr_list mc; |
2202 | struct netdev_hw_addr_list dev_addrs; |
2203 | |
2204 | #ifdef CONFIG_SYSFS |
2205 | struct kset *queues_kset; |
2206 | #endif |
2207 | #ifdef CONFIG_LOCKDEP |
2208 | struct list_head unlink_list; |
2209 | #endif |
2210 | unsigned int promiscuity; |
2211 | unsigned int allmulti; |
2212 | bool uc_promisc; |
2213 | #ifdef CONFIG_LOCKDEP |
2214 | unsigned char nested_level; |
2215 | #endif |
2216 | |
2217 | |
2218 | /* Protocol-specific pointers */ |
2219 | |
2220 | struct in_device __rcu *ip_ptr; |
2221 | struct inet6_dev __rcu *ip6_ptr; |
2222 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
2223 | struct vlan_info __rcu *vlan_info; |
2224 | #endif |
2225 | #if IS_ENABLED(CONFIG_NET_DSA) |
2226 | struct dsa_port *dsa_ptr; |
2227 | #endif |
2228 | #if IS_ENABLED(CONFIG_TIPC) |
2229 | struct tipc_bearer __rcu *tipc_ptr; |
2230 | #endif |
2231 | #if IS_ENABLED(CONFIG_ATALK) |
2232 | void *atalk_ptr; |
2233 | #endif |
2234 | #if IS_ENABLED(CONFIG_AX25) |
2235 | void *ax25_ptr; |
2236 | #endif |
2237 | #if IS_ENABLED(CONFIG_CFG80211) |
2238 | struct wireless_dev *ieee80211_ptr; |
2239 | #endif |
2240 | #if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN) |
2241 | struct wpan_dev *ieee802154_ptr; |
2242 | #endif |
2243 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) |
2244 | struct mpls_dev __rcu *mpls_ptr; |
2245 | #endif |
2246 | #if IS_ENABLED(CONFIG_MCTP) |
2247 | struct mctp_dev __rcu *mctp_ptr; |
2248 | #endif |
2249 | |
2250 | /* |
2251 | * Cache lines mostly used on receive path (including eth_type_trans()) |
2252 | */ |
2253 | /* Interface address info used in eth_type_trans() */ |
2254 | const unsigned char *dev_addr; |
2255 | |
2256 | struct netdev_rx_queue *_rx; |
2257 | unsigned int num_rx_queues; |
2258 | unsigned int real_num_rx_queues; |
2259 | |
2260 | struct bpf_prog __rcu *xdp_prog; |
2261 | unsigned long gro_flush_timeout; |
2262 | int napi_defer_hard_irqs; |
2263 | #define GRO_LEGACY_MAX_SIZE 65536u |
2264 | /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), |
2265 | * and shinfo->gso_segs is a 16bit field. |
2266 | */ |
2267 | #define GRO_MAX_SIZE (8 * 65535u) |
2268 | unsigned int gro_max_size; |
2269 | unsigned int gro_ipv4_max_size; |
2270 | unsigned int xdp_zc_max_segs; |
2271 | rx_handler_func_t __rcu *rx_handler; |
2272 | void __rcu *rx_handler_data; |
2273 | #ifdef CONFIG_NET_XGRESS |
2274 | struct bpf_mprog_entry __rcu *tcx_ingress; |
2275 | #endif |
2276 | struct netdev_queue __rcu *ingress_queue; |
2277 | #ifdef CONFIG_NETFILTER_INGRESS |
2278 | struct nf_hook_entries __rcu *nf_hooks_ingress; |
2279 | #endif |
2280 | |
2281 | unsigned char broadcast[MAX_ADDR_LEN]; |
2282 | #ifdef CONFIG_RFS_ACCEL |
2283 | struct cpu_rmap *rx_cpu_rmap; |
2284 | #endif |
2285 | struct hlist_node index_hlist; |
2286 | |
2287 | /* |
2288 | * Cache lines mostly used on transmit path |
2289 | */ |
2290 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
2291 | unsigned int num_tx_queues; |
2292 | unsigned int real_num_tx_queues; |
2293 | struct Qdisc __rcu *qdisc; |
2294 | unsigned int tx_queue_len; |
2295 | spinlock_t tx_global_lock; |
2296 | |
2297 | struct xdp_dev_bulk_queue __percpu *xdp_bulkq; |
2298 | |
2299 | #ifdef CONFIG_XPS |
2300 | struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; |
2301 | #endif |
2302 | #ifdef CONFIG_NET_XGRESS |
2303 | struct bpf_mprog_entry __rcu *tcx_egress; |
2304 | #endif |
2305 | #ifdef CONFIG_NETFILTER_EGRESS |
2306 | struct nf_hook_entries __rcu *nf_hooks_egress; |
2307 | #endif |
2308 | |
2309 | #ifdef CONFIG_NET_SCHED |
2310 | DECLARE_HASHTABLE (qdisc_hash, 4); |
2311 | #endif |
2312 | /* These may be needed for future network-power-down code. */ |
2313 | struct timer_list watchdog_timer; |
2314 | int watchdog_timeo; |
2315 | |
2316 | u32 proto_down_reason; |
2317 | |
2318 | struct list_head todo_list; |
2319 | |
2320 | #ifdef CONFIG_PCPU_DEV_REFCNT |
2321 | int __percpu *pcpu_refcnt; |
2322 | #else |
2323 | refcount_t dev_refcnt; |
2324 | #endif |
2325 | struct ref_tracker_dir refcnt_tracker; |
2326 | |
2327 | struct list_head link_watch_list; |
2328 | |
2329 | enum { NETREG_UNINITIALIZED=0, |
2330 | NETREG_REGISTERED, /* completed register_netdevice */ |
2331 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
2332 | NETREG_UNREGISTERED, /* completed unregister todo */ |
2333 | NETREG_RELEASED, /* called free_netdev */ |
2334 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
2335 | } reg_state:8; |
2336 | |
2337 | bool dismantle; |
2338 | |
2339 | enum { |
2340 | RTNL_LINK_INITIALIZED, |
2341 | RTNL_LINK_INITIALIZING, |
2342 | } rtnl_link_state:16; |
2343 | |
2344 | bool needs_free_netdev; |
2345 | void (*priv_destructor)(struct net_device *dev); |
2346 | |
2347 | #ifdef CONFIG_NETPOLL |
2348 | struct netpoll_info __rcu *npinfo; |
2349 | #endif |
2350 | |
2351 | possible_net_t nd_net; |
2352 | |
2353 | /* mid-layer private */ |
2354 | void *ml_priv; |
2355 | enum netdev_ml_priv_type ml_priv_type; |
2356 | |
2357 | union { |
2358 | struct pcpu_lstats __percpu *lstats; |
2359 | struct pcpu_sw_netstats __percpu *tstats; |
2360 | struct pcpu_dstats __percpu *dstats; |
2361 | }; |
2362 | |
2363 | #if IS_ENABLED(CONFIG_GARP) |
2364 | struct garp_port __rcu *garp_port; |
2365 | #endif |
2366 | #if IS_ENABLED(CONFIG_MRP) |
2367 | struct mrp_port __rcu *mrp_port; |
2368 | #endif |
2369 | #if IS_ENABLED(CONFIG_NET_DROP_MONITOR) |
2370 | struct dm_hw_stat_delta __rcu *dm_private; |
2371 | #endif |
2372 | struct device dev; |
2373 | const struct attribute_group *sysfs_groups[4]; |
2374 | const struct attribute_group *sysfs_rx_queue_group; |
2375 | |
2376 | const struct rtnl_link_ops *rtnl_link_ops; |
2377 | |
2378 | /* for setting kernel sock attribute on TCP connection setup */ |
2379 | #define GSO_MAX_SEGS 65535u |
2380 | #define GSO_LEGACY_MAX_SIZE 65536u |
2381 | /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), |
2382 | * and shinfo->gso_segs is a 16bit field. |
2383 | */ |
2384 | #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) |
2385 | |
2386 | unsigned int gso_max_size; |
2387 | #define TSO_LEGACY_MAX_SIZE 65536 |
2388 | #define TSO_MAX_SIZE UINT_MAX |
2389 | unsigned int tso_max_size; |
2390 | u16 gso_max_segs; |
2391 | #define TSO_MAX_SEGS U16_MAX |
2392 | u16 tso_max_segs; |
2393 | unsigned int gso_ipv4_max_size; |
2394 | |
2395 | #ifdef CONFIG_DCB |
2396 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2397 | #endif |
2398 | s16 num_tc; |
2399 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
2400 | u8 prio_tc_map[TC_BITMASK + 1]; |
2401 | |
2402 | #if IS_ENABLED(CONFIG_FCOE) |
2403 | unsigned int fcoe_ddp_xid; |
2404 | #endif |
2405 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
2406 | struct netprio_map __rcu *priomap; |
2407 | #endif |
2408 | struct phy_device *phydev; |
2409 | struct sfp_bus *sfp_bus; |
2410 | struct lock_class_key *qdisc_tx_busylock; |
2411 | bool proto_down; |
2412 | unsigned wol_enabled:1; |
2413 | unsigned threaded:1; |
2414 | |
2415 | struct list_head net_notifier_list; |
2416 | |
2417 | #if IS_ENABLED(CONFIG_MACSEC) |
2418 | /* MACsec management functions */ |
2419 | const struct macsec_ops *macsec_ops; |
2420 | #endif |
2421 | const struct udp_tunnel_nic_info *udp_tunnel_nic_info; |
2422 | struct udp_tunnel_nic *udp_tunnel_nic; |
2423 | |
2424 | /* protected by rtnl_lock */ |
2425 | struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; |
2426 | |
2427 | u8 dev_addr_shadow[MAX_ADDR_LEN]; |
2428 | netdevice_tracker linkwatch_dev_tracker; |
2429 | netdevice_tracker watchdog_dev_tracker; |
2430 | netdevice_tracker dev_registered_tracker; |
2431 | struct rtnl_hw_stats64 *offload_xstats_l3; |
2432 | |
2433 | struct devlink_port *devlink_port; |
2434 | |
2435 | #if IS_ENABLED(CONFIG_DPLL) |
2436 | struct dpll_pin *dpll_pin; |
2437 | #endif |
2438 | }; |
2439 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
2440 | |
2441 | /* |
2442 | * Driver should use this to assign devlink port instance to a netdevice |
2443 | * before it registers the netdevice. Therefore devlink_port is static |
2444 | * during the netdev lifetime after it is registered. |
2445 | */ |
2446 | #define SET_NETDEV_DEVLINK_PORT(dev, port) \ |
2447 | ({ \ |
2448 | WARN_ON((dev)->reg_state != NETREG_UNINITIALIZED); \ |
2449 | ((dev)->devlink_port = (port)); \ |
2450 | }) |
2451 | |
2452 | static inline bool netif_elide_gro(const struct net_device *dev) |
2453 | { |
2454 | if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) |
2455 | return true; |
2456 | return false; |
2457 | } |
2458 | |
2459 | #define NETDEV_ALIGN 32 |
2460 | |
2461 | static inline |
2462 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) |
2463 | { |
2464 | return dev->prio_tc_map[prio & TC_BITMASK]; |
2465 | } |
2466 | |
2467 | static inline |
2468 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) |
2469 | { |
2470 | if (tc >= dev->num_tc) |
2471 | return -EINVAL; |
2472 | |
2473 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; |
2474 | return 0; |
2475 | } |
2476 | |
2477 | int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); |
2478 | void netdev_reset_tc(struct net_device *dev); |
2479 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); |
2480 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc); |
2481 | |
2482 | static inline |
2483 | int netdev_get_num_tc(struct net_device *dev) |
2484 | { |
2485 | return dev->num_tc; |
2486 | } |
2487 | |
2488 | static inline void net_prefetch(void *p) |
2489 | { |
2490 | prefetch(p); |
2491 | #if L1_CACHE_BYTES < 128 |
2492 | prefetch((u8 *)p + L1_CACHE_BYTES); |
2493 | #endif |
2494 | } |
2495 | |
2496 | static inline void net_prefetchw(void *p) |
2497 | { |
2498 | prefetchw(x: p); |
2499 | #if L1_CACHE_BYTES < 128 |
2500 | prefetchw(x: (u8 *)p + L1_CACHE_BYTES); |
2501 | #endif |
2502 | } |
2503 | |
2504 | void netdev_unbind_sb_channel(struct net_device *dev, |
2505 | struct net_device *sb_dev); |
2506 | int netdev_bind_sb_channel_queue(struct net_device *dev, |
2507 | struct net_device *sb_dev, |
2508 | u8 tc, u16 count, u16 offset); |
2509 | int netdev_set_sb_channel(struct net_device *dev, u16 channel); |
2510 | static inline int netdev_get_sb_channel(struct net_device *dev) |
2511 | { |
2512 | return max_t(int, -dev->num_tc, 0); |
2513 | } |
2514 | |
2515 | static inline |
2516 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, |
2517 | unsigned int index) |
2518 | { |
2519 | DEBUG_NET_WARN_ON_ONCE(index >= dev->num_tx_queues); |
2520 | return &dev->_tx[index]; |
2521 | } |
2522 | |
2523 | static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, |
2524 | const struct sk_buff *skb) |
2525 | { |
2526 | return netdev_get_tx_queue(dev, index: skb_get_queue_mapping(skb)); |
2527 | } |
2528 | |
2529 | static inline void netdev_for_each_tx_queue(struct net_device *dev, |
2530 | void (*f)(struct net_device *, |
2531 | struct netdev_queue *, |
2532 | void *), |
2533 | void *arg) |
2534 | { |
2535 | unsigned int i; |
2536 | |
2537 | for (i = 0; i < dev->num_tx_queues; i++) |
2538 | f(dev, &dev->_tx[i], arg); |
2539 | } |
2540 | |
2541 | #define netdev_lockdep_set_classes(dev) \ |
2542 | { \ |
2543 | static struct lock_class_key qdisc_tx_busylock_key; \ |
2544 | static struct lock_class_key qdisc_xmit_lock_key; \ |
2545 | static struct lock_class_key dev_addr_list_lock_key; \ |
2546 | unsigned int i; \ |
2547 | \ |
2548 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ |
2549 | lockdep_set_class(&(dev)->addr_list_lock, \ |
2550 | &dev_addr_list_lock_key); \ |
2551 | for (i = 0; i < (dev)->num_tx_queues; i++) \ |
2552 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ |
2553 | &qdisc_xmit_lock_key); \ |
2554 | } |
2555 | |
2556 | u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
2557 | struct net_device *sb_dev); |
2558 | struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, |
2559 | struct sk_buff *skb, |
2560 | struct net_device *sb_dev); |
2561 | |
2562 | /* returns the headroom that the master device needs to take in account |
2563 | * when forwarding to this dev |
2564 | */ |
2565 | static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) |
2566 | { |
2567 | return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; |
2568 | } |
2569 | |
2570 | static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) |
2571 | { |
2572 | if (dev->netdev_ops->ndo_set_rx_headroom) |
2573 | dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); |
2574 | } |
2575 | |
2576 | /* set the device rx headroom to the dev's default */ |
2577 | static inline void netdev_reset_rx_headroom(struct net_device *dev) |
2578 | { |
2579 | netdev_set_rx_headroom(dev, new_hr: -1); |
2580 | } |
2581 | |
2582 | static inline void *netdev_get_ml_priv(struct net_device *dev, |
2583 | enum netdev_ml_priv_type type) |
2584 | { |
2585 | if (dev->ml_priv_type != type) |
2586 | return NULL; |
2587 | |
2588 | return dev->ml_priv; |
2589 | } |
2590 | |
2591 | static inline void netdev_set_ml_priv(struct net_device *dev, |
2592 | void *ml_priv, |
2593 | enum netdev_ml_priv_type type) |
2594 | { |
2595 | WARN(dev->ml_priv_type && dev->ml_priv_type != type, |
2596 | "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n" , |
2597 | dev->ml_priv_type, type); |
2598 | WARN(!dev->ml_priv_type && dev->ml_priv, |
2599 | "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n" ); |
2600 | |
2601 | dev->ml_priv = ml_priv; |
2602 | dev->ml_priv_type = type; |
2603 | } |
2604 | |
2605 | /* |
2606 | * Net namespace inlines |
2607 | */ |
2608 | static inline |
2609 | struct net *dev_net(const struct net_device *dev) |
2610 | { |
2611 | return read_pnet(pnet: &dev->nd_net); |
2612 | } |
2613 | |
2614 | static inline |
2615 | void dev_net_set(struct net_device *dev, struct net *net) |
2616 | { |
2617 | write_pnet(pnet: &dev->nd_net, net); |
2618 | } |
2619 | |
2620 | /** |
2621 | * netdev_priv - access network device private data |
2622 | * @dev: network device |
2623 | * |
2624 | * Get network device private data |
2625 | */ |
2626 | static inline void *netdev_priv(const struct net_device *dev) |
2627 | { |
2628 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
2629 | } |
2630 | |
2631 | /* Set the sysfs physical device reference for the network logical device |
2632 | * if set prior to registration will cause a symlink during initialization. |
2633 | */ |
2634 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
2635 | |
2636 | /* Set the sysfs device type for the network logical device to allow |
2637 | * fine-grained identification of different network device types. For |
2638 | * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. |
2639 | */ |
2640 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) |
2641 | |
2642 | /* Default NAPI poll() weight |
2643 | * Device drivers are strongly advised to not use bigger value |
2644 | */ |
2645 | #define NAPI_POLL_WEIGHT 64 |
2646 | |
2647 | void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, |
2648 | int (*poll)(struct napi_struct *, int), int weight); |
2649 | |
2650 | /** |
2651 | * netif_napi_add() - initialize a NAPI context |
2652 | * @dev: network device |
2653 | * @napi: NAPI context |
2654 | * @poll: polling function |
2655 | * |
2656 | * netif_napi_add() must be used to initialize a NAPI context prior to calling |
2657 | * *any* of the other NAPI-related functions. |
2658 | */ |
2659 | static inline void |
2660 | netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
2661 | int (*poll)(struct napi_struct *, int)) |
2662 | { |
2663 | netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); |
2664 | } |
2665 | |
2666 | static inline void |
2667 | netif_napi_add_tx_weight(struct net_device *dev, |
2668 | struct napi_struct *napi, |
2669 | int (*poll)(struct napi_struct *, int), |
2670 | int weight) |
2671 | { |
2672 | set_bit(nr: NAPI_STATE_NO_BUSY_POLL, addr: &napi->state); |
2673 | netif_napi_add_weight(dev, napi, poll, weight); |
2674 | } |
2675 | |
2676 | /** |
2677 | * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only |
2678 | * @dev: network device |
2679 | * @napi: NAPI context |
2680 | * @poll: polling function |
2681 | * |
2682 | * This variant of netif_napi_add() should be used from drivers using NAPI |
2683 | * to exclusively poll a TX queue. |
2684 | * This will avoid we add it into napi_hash[], thus polluting this hash table. |
2685 | */ |
2686 | static inline void netif_napi_add_tx(struct net_device *dev, |
2687 | struct napi_struct *napi, |
2688 | int (*poll)(struct napi_struct *, int)) |
2689 | { |
2690 | netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT); |
2691 | } |
2692 | |
2693 | /** |
2694 | * __netif_napi_del - remove a NAPI context |
2695 | * @napi: NAPI context |
2696 | * |
2697 | * Warning: caller must observe RCU grace period before freeing memory |
2698 | * containing @napi. Drivers might want to call this helper to combine |
2699 | * all the needed RCU grace periods into a single one. |
2700 | */ |
2701 | void __netif_napi_del(struct napi_struct *napi); |
2702 | |
2703 | /** |
2704 | * netif_napi_del - remove a NAPI context |
2705 | * @napi: NAPI context |
2706 | * |
2707 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
2708 | */ |
2709 | static inline void netif_napi_del(struct napi_struct *napi) |
2710 | { |
2711 | __netif_napi_del(napi); |
2712 | synchronize_net(); |
2713 | } |
2714 | |
2715 | struct packet_type { |
2716 | __be16 type; /* This is really htons(ether_type). */ |
2717 | bool ignore_outgoing; |
2718 | struct net_device *dev; /* NULL is wildcarded here */ |
2719 | netdevice_tracker dev_tracker; |
2720 | int (*func) (struct sk_buff *, |
2721 | struct net_device *, |
2722 | struct packet_type *, |
2723 | struct net_device *); |
2724 | void (*list_func) (struct list_head *, |
2725 | struct packet_type *, |
2726 | struct net_device *); |
2727 | bool (*id_match)(struct packet_type *ptype, |
2728 | struct sock *sk); |
2729 | struct net *af_packet_net; |
2730 | void *af_packet_priv; |
2731 | struct list_head list; |
2732 | }; |
2733 | |
2734 | struct offload_callbacks { |
2735 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
2736 | netdev_features_t features); |
2737 | struct sk_buff *(*gro_receive)(struct list_head *head, |
2738 | struct sk_buff *skb); |
2739 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
2740 | }; |
2741 | |
2742 | struct packet_offload { |
2743 | __be16 type; /* This is really htons(ether_type). */ |
2744 | u16 priority; |
2745 | struct offload_callbacks callbacks; |
2746 | struct list_head list; |
2747 | }; |
2748 | |
2749 | /* often modified stats are per-CPU, other are shared (netdev->stats) */ |
2750 | struct pcpu_sw_netstats { |
2751 | u64_stats_t rx_packets; |
2752 | u64_stats_t rx_bytes; |
2753 | u64_stats_t tx_packets; |
2754 | u64_stats_t tx_bytes; |
2755 | struct u64_stats_sync syncp; |
2756 | } __aligned(4 * sizeof(u64)); |
2757 | |
2758 | struct pcpu_lstats { |
2759 | u64_stats_t packets; |
2760 | u64_stats_t bytes; |
2761 | struct u64_stats_sync syncp; |
2762 | } __aligned(2 * sizeof(u64)); |
2763 | |
2764 | void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); |
2765 | |
2766 | static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) |
2767 | { |
2768 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
2769 | |
2770 | u64_stats_update_begin(syncp: &tstats->syncp); |
2771 | u64_stats_add(p: &tstats->rx_bytes, val: len); |
2772 | u64_stats_inc(p: &tstats->rx_packets); |
2773 | u64_stats_update_end(syncp: &tstats->syncp); |
2774 | } |
2775 | |
2776 | static inline void dev_sw_netstats_tx_add(struct net_device *dev, |
2777 | unsigned int packets, |
2778 | unsigned int len) |
2779 | { |
2780 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
2781 | |
2782 | u64_stats_update_begin(syncp: &tstats->syncp); |
2783 | u64_stats_add(p: &tstats->tx_bytes, val: len); |
2784 | u64_stats_add(p: &tstats->tx_packets, val: packets); |
2785 | u64_stats_update_end(syncp: &tstats->syncp); |
2786 | } |
2787 | |
2788 | static inline void dev_lstats_add(struct net_device *dev, unsigned int len) |
2789 | { |
2790 | struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); |
2791 | |
2792 | u64_stats_update_begin(syncp: &lstats->syncp); |
2793 | u64_stats_add(p: &lstats->bytes, val: len); |
2794 | u64_stats_inc(p: &lstats->packets); |
2795 | u64_stats_update_end(syncp: &lstats->syncp); |
2796 | } |
2797 | |
2798 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
2799 | ({ \ |
2800 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ |
2801 | if (pcpu_stats) { \ |
2802 | int __cpu; \ |
2803 | for_each_possible_cpu(__cpu) { \ |
2804 | typeof(type) *stat; \ |
2805 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
2806 | u64_stats_init(&stat->syncp); \ |
2807 | } \ |
2808 | } \ |
2809 | pcpu_stats; \ |
2810 | }) |
2811 | |
2812 | #define netdev_alloc_pcpu_stats(type) \ |
2813 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) |
2814 | |
2815 | #define devm_netdev_alloc_pcpu_stats(dev, type) \ |
2816 | ({ \ |
2817 | typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ |
2818 | if (pcpu_stats) { \ |
2819 | int __cpu; \ |
2820 | for_each_possible_cpu(__cpu) { \ |
2821 | typeof(type) *stat; \ |
2822 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
2823 | u64_stats_init(&stat->syncp); \ |
2824 | } \ |
2825 | } \ |
2826 | pcpu_stats; \ |
2827 | }) |
2828 | |
2829 | enum netdev_lag_tx_type { |
2830 | NETDEV_LAG_TX_TYPE_UNKNOWN, |
2831 | NETDEV_LAG_TX_TYPE_RANDOM, |
2832 | NETDEV_LAG_TX_TYPE_BROADCAST, |
2833 | NETDEV_LAG_TX_TYPE_ROUNDROBIN, |
2834 | NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, |
2835 | NETDEV_LAG_TX_TYPE_HASH, |
2836 | }; |
2837 | |
2838 | enum netdev_lag_hash { |
2839 | NETDEV_LAG_HASH_NONE, |
2840 | NETDEV_LAG_HASH_L2, |
2841 | NETDEV_LAG_HASH_L34, |
2842 | NETDEV_LAG_HASH_L23, |
2843 | NETDEV_LAG_HASH_E23, |
2844 | NETDEV_LAG_HASH_E34, |
2845 | NETDEV_LAG_HASH_VLAN_SRCMAC, |
2846 | NETDEV_LAG_HASH_UNKNOWN, |
2847 | }; |
2848 | |
2849 | struct netdev_lag_upper_info { |
2850 | enum netdev_lag_tx_type tx_type; |
2851 | enum netdev_lag_hash hash_type; |
2852 | }; |
2853 | |
2854 | struct netdev_lag_lower_state_info { |
2855 | u8 link_up : 1, |
2856 | tx_enabled : 1; |
2857 | }; |
2858 | |
2859 | #include <linux/notifier.h> |
2860 | |
2861 | /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() |
2862 | * and the rtnetlink notification exclusion list in rtnetlink_event() when |
2863 | * adding new types. |
2864 | */ |
2865 | enum netdev_cmd { |
2866 | NETDEV_UP = 1, /* For now you can't veto a device up/down */ |
2867 | NETDEV_DOWN, |
2868 | NETDEV_REBOOT, /* Tell a protocol stack a network interface |
2869 | detected a hardware crash and restarted |
2870 | - we can use this eg to kick tcp sessions |
2871 | once done */ |
2872 | NETDEV_CHANGE, /* Notify device state change */ |
2873 | NETDEV_REGISTER, |
2874 | NETDEV_UNREGISTER, |
2875 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ |
2876 | NETDEV_CHANGEADDR, /* notify after the address change */ |
2877 | NETDEV_PRE_CHANGEADDR, /* notify before the address change */ |
2878 | NETDEV_GOING_DOWN, |
2879 | NETDEV_CHANGENAME, |
2880 | NETDEV_FEAT_CHANGE, |
2881 | NETDEV_BONDING_FAILOVER, |
2882 | NETDEV_PRE_UP, |
2883 | NETDEV_PRE_TYPE_CHANGE, |
2884 | NETDEV_POST_TYPE_CHANGE, |
2885 | NETDEV_POST_INIT, |
2886 | NETDEV_PRE_UNINIT, |
2887 | NETDEV_RELEASE, |
2888 | NETDEV_NOTIFY_PEERS, |
2889 | NETDEV_JOIN, |
2890 | NETDEV_CHANGEUPPER, |
2891 | NETDEV_RESEND_IGMP, |
2892 | NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ |
2893 | NETDEV_CHANGEINFODATA, |
2894 | NETDEV_BONDING_INFO, |
2895 | NETDEV_PRECHANGEUPPER, |
2896 | NETDEV_CHANGELOWERSTATE, |
2897 | NETDEV_UDP_TUNNEL_PUSH_INFO, |
2898 | NETDEV_UDP_TUNNEL_DROP_INFO, |
2899 | NETDEV_CHANGE_TX_QUEUE_LEN, |
2900 | NETDEV_CVLAN_FILTER_PUSH_INFO, |
2901 | NETDEV_CVLAN_FILTER_DROP_INFO, |
2902 | NETDEV_SVLAN_FILTER_PUSH_INFO, |
2903 | NETDEV_SVLAN_FILTER_DROP_INFO, |
2904 | NETDEV_OFFLOAD_XSTATS_ENABLE, |
2905 | NETDEV_OFFLOAD_XSTATS_DISABLE, |
2906 | NETDEV_OFFLOAD_XSTATS_REPORT_USED, |
2907 | NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, |
2908 | NETDEV_XDP_FEAT_CHANGE, |
2909 | }; |
2910 | const char *netdev_cmd_to_name(enum netdev_cmd cmd); |
2911 | |
2912 | int register_netdevice_notifier(struct notifier_block *nb); |
2913 | int unregister_netdevice_notifier(struct notifier_block *nb); |
2914 | int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); |
2915 | int unregister_netdevice_notifier_net(struct net *net, |
2916 | struct notifier_block *nb); |
2917 | int register_netdevice_notifier_dev_net(struct net_device *dev, |
2918 | struct notifier_block *nb, |
2919 | struct netdev_net_notifier *nn); |
2920 | int unregister_netdevice_notifier_dev_net(struct net_device *dev, |
2921 | struct notifier_block *nb, |
2922 | struct netdev_net_notifier *nn); |
2923 | |
2924 | struct netdev_notifier_info { |
2925 | struct net_device *dev; |
2926 | struct netlink_ext_ack *extack; |
2927 | }; |
2928 | |
2929 | struct netdev_notifier_info_ext { |
2930 | struct netdev_notifier_info info; /* must be first */ |
2931 | union { |
2932 | u32 mtu; |
2933 | } ext; |
2934 | }; |
2935 | |
2936 | struct netdev_notifier_change_info { |
2937 | struct netdev_notifier_info info; /* must be first */ |
2938 | unsigned int flags_changed; |
2939 | }; |
2940 | |
2941 | struct netdev_notifier_changeupper_info { |
2942 | struct netdev_notifier_info info; /* must be first */ |
2943 | struct net_device *upper_dev; /* new upper dev */ |
2944 | bool master; /* is upper dev master */ |
2945 | bool linking; /* is the notification for link or unlink */ |
2946 | void *upper_info; /* upper dev info */ |
2947 | }; |
2948 | |
2949 | struct netdev_notifier_changelowerstate_info { |
2950 | struct netdev_notifier_info info; /* must be first */ |
2951 | void *lower_state_info; /* is lower dev state */ |
2952 | }; |
2953 | |
2954 | struct netdev_notifier_pre_changeaddr_info { |
2955 | struct netdev_notifier_info info; /* must be first */ |
2956 | const unsigned char *dev_addr; |
2957 | }; |
2958 | |
2959 | enum netdev_offload_xstats_type { |
2960 | NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, |
2961 | }; |
2962 | |
2963 | struct netdev_notifier_offload_xstats_info { |
2964 | struct netdev_notifier_info info; /* must be first */ |
2965 | enum netdev_offload_xstats_type type; |
2966 | |
2967 | union { |
2968 | /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ |
2969 | struct netdev_notifier_offload_xstats_rd *report_delta; |
2970 | /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ |
2971 | struct netdev_notifier_offload_xstats_ru *report_used; |
2972 | }; |
2973 | }; |
2974 | |
2975 | int netdev_offload_xstats_enable(struct net_device *dev, |
2976 | enum netdev_offload_xstats_type type, |
2977 | struct netlink_ext_ack *extack); |
2978 | int netdev_offload_xstats_disable(struct net_device *dev, |
2979 | enum netdev_offload_xstats_type type); |
2980 | bool netdev_offload_xstats_enabled(const struct net_device *dev, |
2981 | enum netdev_offload_xstats_type type); |
2982 | int netdev_offload_xstats_get(struct net_device *dev, |
2983 | enum netdev_offload_xstats_type type, |
2984 | struct rtnl_hw_stats64 *stats, bool *used, |
2985 | struct netlink_ext_ack *extack); |
2986 | void |
2987 | netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, |
2988 | const struct rtnl_hw_stats64 *stats); |
2989 | void |
2990 | netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); |
2991 | void netdev_offload_xstats_push_delta(struct net_device *dev, |
2992 | enum netdev_offload_xstats_type type, |
2993 | const struct rtnl_hw_stats64 *stats); |
2994 | |
2995 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
2996 | struct net_device *dev) |
2997 | { |
2998 | info->dev = dev; |
2999 | info->extack = NULL; |
3000 | } |
3001 | |
3002 | static inline struct net_device * |
3003 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) |
3004 | { |
3005 | return info->dev; |
3006 | } |
3007 | |
3008 | static inline struct netlink_ext_ack * |
3009 | netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) |
3010 | { |
3011 | return info->extack; |
3012 | } |
3013 | |
3014 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
3015 | int call_netdevice_notifiers_info(unsigned long val, |
3016 | struct netdev_notifier_info *info); |
3017 | |
3018 | extern rwlock_t dev_base_lock; /* Device list lock */ |
3019 | |
3020 | #define for_each_netdev(net, d) \ |
3021 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) |
3022 | #define for_each_netdev_reverse(net, d) \ |
3023 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) |
3024 | #define for_each_netdev_rcu(net, d) \ |
3025 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) |
3026 | #define for_each_netdev_safe(net, d, n) \ |
3027 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) |
3028 | #define for_each_netdev_continue(net, d) \ |
3029 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) |
3030 | #define for_each_netdev_continue_reverse(net, d) \ |
3031 | list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ |
3032 | dev_list) |
3033 | #define for_each_netdev_continue_rcu(net, d) \ |
3034 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) |
3035 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
3036 | for_each_netdev_rcu(&init_net, slave) \ |
3037 | if (netdev_master_upper_dev_get_rcu(slave) == (bond)) |
3038 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
3039 | |
3040 | #define for_each_netdev_dump(net, d, ifindex) \ |
3041 | xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex)) |
3042 | |
3043 | static inline struct net_device *next_net_device(struct net_device *dev) |
3044 | { |
3045 | struct list_head *lh; |
3046 | struct net *net; |
3047 | |
3048 | net = dev_net(dev); |
3049 | lh = dev->dev_list.next; |
3050 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
3051 | } |
3052 | |
3053 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
3054 | { |
3055 | struct list_head *lh; |
3056 | struct net *net; |
3057 | |
3058 | net = dev_net(dev); |
3059 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
3060 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
3061 | } |
3062 | |
3063 | static inline struct net_device *first_net_device(struct net *net) |
3064 | { |
3065 | return list_empty(head: &net->dev_base_head) ? NULL : |
3066 | net_device_entry(net->dev_base_head.next); |
3067 | } |
3068 | |
3069 | static inline struct net_device *first_net_device_rcu(struct net *net) |
3070 | { |
3071 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); |
3072 | |
3073 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
3074 | } |
3075 | |
3076 | int netdev_boot_setup_check(struct net_device *dev); |
3077 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
3078 | const char *hwaddr); |
3079 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); |
3080 | void dev_add_pack(struct packet_type *pt); |
3081 | void dev_remove_pack(struct packet_type *pt); |
3082 | void __dev_remove_pack(struct packet_type *pt); |
3083 | void dev_add_offload(struct packet_offload *po); |
3084 | void dev_remove_offload(struct packet_offload *po); |
3085 | |
3086 | int dev_get_iflink(const struct net_device *dev); |
3087 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); |
3088 | int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, |
3089 | struct net_device_path_stack *stack); |
3090 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
3091 | unsigned short mask); |
3092 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
3093 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); |
3094 | struct net_device *__dev_get_by_name(struct net *net, const char *name); |
3095 | bool netdev_name_in_use(struct net *net, const char *name); |
3096 | int dev_alloc_name(struct net_device *dev, const char *name); |
3097 | int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); |
3098 | void dev_close(struct net_device *dev); |
3099 | void dev_close_many(struct list_head *head, bool unlink); |
3100 | void dev_disable_lro(struct net_device *dev); |
3101 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
3102 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, |
3103 | struct net_device *sb_dev); |
3104 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, |
3105 | struct net_device *sb_dev); |
3106 | |
3107 | int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); |
3108 | int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
3109 | |
3110 | static inline int dev_queue_xmit(struct sk_buff *skb) |
3111 | { |
3112 | return __dev_queue_xmit(skb, NULL); |
3113 | } |
3114 | |
3115 | static inline int dev_queue_xmit_accel(struct sk_buff *skb, |
3116 | struct net_device *sb_dev) |
3117 | { |
3118 | return __dev_queue_xmit(skb, sb_dev); |
3119 | } |
3120 | |
3121 | static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) |
3122 | { |
3123 | int ret; |
3124 | |
3125 | ret = __dev_direct_xmit(skb, queue_id); |
3126 | if (!dev_xmit_complete(rc: ret)) |
3127 | kfree_skb(skb); |
3128 | return ret; |
3129 | } |
3130 | |
3131 | int register_netdevice(struct net_device *dev); |
3132 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); |
3133 | void unregister_netdevice_many(struct list_head *head); |
3134 | static inline void unregister_netdevice(struct net_device *dev) |
3135 | { |
3136 | unregister_netdevice_queue(dev, NULL); |
3137 | } |
3138 | |
3139 | int netdev_refcnt_read(const struct net_device *dev); |
3140 | void free_netdev(struct net_device *dev); |
3141 | void netdev_freemem(struct net_device *dev); |
3142 | int init_dummy_netdev(struct net_device *dev); |
3143 | |
3144 | struct net_device *netdev_get_xmit_slave(struct net_device *dev, |
3145 | struct sk_buff *skb, |
3146 | bool all_slaves); |
3147 | struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, |
3148 | struct sock *sk); |
3149 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
3150 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); |
3151 | struct net_device *netdev_get_by_index(struct net *net, int ifindex, |
3152 | netdevice_tracker *tracker, gfp_t gfp); |
3153 | struct net_device *netdev_get_by_name(struct net *net, const char *name, |
3154 | netdevice_tracker *tracker, gfp_t gfp); |
3155 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); |
3156 | struct net_device *dev_get_by_napi_id(unsigned int napi_id); |
3157 | |
3158 | static inline int (struct sk_buff *skb, struct net_device *dev, |
3159 | unsigned short type, |
3160 | const void *daddr, const void *saddr, |
3161 | unsigned int len) |
3162 | { |
3163 | if (!dev->header_ops || !dev->header_ops->create) |
3164 | return 0; |
3165 | |
3166 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); |
3167 | } |
3168 | |
3169 | static inline int (const struct sk_buff *skb, |
3170 | unsigned char *haddr) |
3171 | { |
3172 | const struct net_device *dev = skb->dev; |
3173 | |
3174 | if (!dev->header_ops || !dev->header_ops->parse) |
3175 | return 0; |
3176 | return dev->header_ops->parse(skb, haddr); |
3177 | } |
3178 | |
3179 | static inline __be16 (const struct sk_buff *skb) |
3180 | { |
3181 | const struct net_device *dev = skb->dev; |
3182 | |
3183 | if (!dev->header_ops || !dev->header_ops->parse_protocol) |
3184 | return 0; |
3185 | return dev->header_ops->parse_protocol(skb); |
3186 | } |
3187 | |
3188 | /* ll_header must have at least hard_header_len allocated */ |
3189 | static inline bool (const struct net_device *dev, |
3190 | char *, int len) |
3191 | { |
3192 | if (likely(len >= dev->hard_header_len)) |
3193 | return true; |
3194 | if (len < dev->min_header_len) |
3195 | return false; |
3196 | |
3197 | if (capable(CAP_SYS_RAWIO)) { |
3198 | memset(ll_header + len, 0, dev->hard_header_len - len); |
3199 | return true; |
3200 | } |
3201 | |
3202 | if (dev->header_ops && dev->header_ops->validate) |
3203 | return dev->header_ops->validate(ll_header, len); |
3204 | |
3205 | return false; |
3206 | } |
3207 | |
3208 | static inline bool (const struct net_device *dev) |
3209 | { |
3210 | return dev->header_ops && dev->header_ops->create; |
3211 | } |
3212 | |
3213 | /* |
3214 | * Incoming packets are placed on per-CPU queues |
3215 | */ |
3216 | struct softnet_data { |
3217 | struct list_head poll_list; |
3218 | struct sk_buff_head process_queue; |
3219 | |
3220 | /* stats */ |
3221 | unsigned int processed; |
3222 | unsigned int time_squeeze; |
3223 | #ifdef CONFIG_RPS |
3224 | struct softnet_data *rps_ipi_list; |
3225 | #endif |
3226 | |
3227 | bool in_net_rx_action; |
3228 | bool in_napi_threaded_poll; |
3229 | |
3230 | #ifdef CONFIG_NET_FLOW_LIMIT |
3231 | struct sd_flow_limit __rcu *flow_limit; |
3232 | #endif |
3233 | struct Qdisc *output_queue; |
3234 | struct Qdisc **output_queue_tailp; |
3235 | struct sk_buff *completion_queue; |
3236 | #ifdef CONFIG_XFRM_OFFLOAD |
3237 | struct sk_buff_head xfrm_backlog; |
3238 | #endif |
3239 | /* written and read only by owning cpu: */ |
3240 | struct { |
3241 | u16 recursion; |
3242 | u8 more; |
3243 | #ifdef CONFIG_NET_EGRESS |
3244 | u8 skip_txqueue; |
3245 | #endif |
3246 | } xmit; |
3247 | #ifdef CONFIG_RPS |
3248 | /* input_queue_head should be written by cpu owning this struct, |
3249 | * and only read by other cpus. Worth using a cache line. |
3250 | */ |
3251 | unsigned int input_queue_head ____cacheline_aligned_in_smp; |
3252 | |
3253 | /* Elements below can be accessed between CPUs for RPS/RFS */ |
3254 | call_single_data_t csd ____cacheline_aligned_in_smp; |
3255 | struct softnet_data *rps_ipi_next; |
3256 | unsigned int cpu; |
3257 | unsigned int input_queue_tail; |
3258 | #endif |
3259 | unsigned int received_rps; |
3260 | unsigned int dropped; |
3261 | struct sk_buff_head input_pkt_queue; |
3262 | struct napi_struct backlog; |
3263 | |
3264 | /* Another possibly contended cache line */ |
3265 | spinlock_t defer_lock ____cacheline_aligned_in_smp; |
3266 | int defer_count; |
3267 | int defer_ipi_scheduled; |
3268 | struct sk_buff *defer_list; |
3269 | call_single_data_t defer_csd; |
3270 | }; |
3271 | |
3272 | static inline void input_queue_head_incr(struct softnet_data *sd) |
3273 | { |
3274 | #ifdef CONFIG_RPS |
3275 | sd->input_queue_head++; |
3276 | #endif |
3277 | } |
3278 | |
3279 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, |
3280 | unsigned int *qtail) |
3281 | { |
3282 | #ifdef CONFIG_RPS |
3283 | *qtail = ++sd->input_queue_tail; |
3284 | #endif |
3285 | } |
3286 | |
3287 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
3288 | |
3289 | static inline int dev_recursion_level(void) |
3290 | { |
3291 | return this_cpu_read(softnet_data.xmit.recursion); |
3292 | } |
3293 | |
3294 | #define XMIT_RECURSION_LIMIT 8 |
3295 | static inline bool dev_xmit_recursion(void) |
3296 | { |
3297 | return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > |
3298 | XMIT_RECURSION_LIMIT); |
3299 | } |
3300 | |
3301 | static inline void dev_xmit_recursion_inc(void) |
3302 | { |
3303 | __this_cpu_inc(softnet_data.xmit.recursion); |
3304 | } |
3305 | |
3306 | static inline void dev_xmit_recursion_dec(void) |
3307 | { |
3308 | __this_cpu_dec(softnet_data.xmit.recursion); |
3309 | } |
3310 | |
3311 | void __netif_schedule(struct Qdisc *q); |
3312 | void netif_schedule_queue(struct netdev_queue *txq); |
3313 | |
3314 | static inline void netif_tx_schedule_all(struct net_device *dev) |
3315 | { |
3316 | unsigned int i; |
3317 | |
3318 | for (i = 0; i < dev->num_tx_queues; i++) |
3319 | netif_schedule_queue(txq: netdev_get_tx_queue(dev, index: i)); |
3320 | } |
3321 | |
3322 | static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
3323 | { |
3324 | clear_bit(nr: __QUEUE_STATE_DRV_XOFF, addr: &dev_queue->state); |
3325 | } |
3326 | |
3327 | /** |
3328 | * netif_start_queue - allow transmit |
3329 | * @dev: network device |
3330 | * |
3331 | * Allow upper layers to call the device hard_start_xmit routine. |
3332 | */ |
3333 | static inline void netif_start_queue(struct net_device *dev) |
3334 | { |
3335 | netif_tx_start_queue(dev_queue: netdev_get_tx_queue(dev, index: 0)); |
3336 | } |
3337 | |
3338 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
3339 | { |
3340 | unsigned int i; |
3341 | |
3342 | for (i = 0; i < dev->num_tx_queues; i++) { |
3343 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: i); |
3344 | netif_tx_start_queue(dev_queue: txq); |
3345 | } |
3346 | } |
3347 | |
3348 | void netif_tx_wake_queue(struct netdev_queue *dev_queue); |
3349 | |
3350 | /** |
3351 | * netif_wake_queue - restart transmit |
3352 | * @dev: network device |
3353 | * |
3354 | * Allow upper layers to call the device hard_start_xmit routine. |
3355 | * Used for flow control when transmit resources are available. |
3356 | */ |
3357 | static inline void netif_wake_queue(struct net_device *dev) |
3358 | { |
3359 | netif_tx_wake_queue(dev_queue: netdev_get_tx_queue(dev, index: 0)); |
3360 | } |
3361 | |
3362 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
3363 | { |
3364 | unsigned int i; |
3365 | |
3366 | for (i = 0; i < dev->num_tx_queues; i++) { |
3367 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: i); |
3368 | netif_tx_wake_queue(dev_queue: txq); |
3369 | } |
3370 | } |
3371 | |
3372 | static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
3373 | { |
3374 | /* Must be an atomic op see netif_txq_try_stop() */ |
3375 | set_bit(nr: __QUEUE_STATE_DRV_XOFF, addr: &dev_queue->state); |
3376 | } |
3377 | |
3378 | /** |
3379 | * netif_stop_queue - stop transmitted packets |
3380 | * @dev: network device |
3381 | * |
3382 | * Stop upper layers calling the device hard_start_xmit routine. |
3383 | * Used for flow control when transmit resources are unavailable. |
3384 | */ |
3385 | static inline void netif_stop_queue(struct net_device *dev) |
3386 | { |
3387 | netif_tx_stop_queue(dev_queue: netdev_get_tx_queue(dev, index: 0)); |
3388 | } |
3389 | |
3390 | void netif_tx_stop_all_queues(struct net_device *dev); |
3391 | |
3392 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
3393 | { |
3394 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
3395 | } |
3396 | |
3397 | /** |
3398 | * netif_queue_stopped - test if transmit queue is flowblocked |
3399 | * @dev: network device |
3400 | * |
3401 | * Test if transmit queue on device is currently unable to send. |
3402 | */ |
3403 | static inline bool netif_queue_stopped(const struct net_device *dev) |
3404 | { |
3405 | return netif_tx_queue_stopped(dev_queue: netdev_get_tx_queue(dev, index: 0)); |
3406 | } |
3407 | |
3408 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) |
3409 | { |
3410 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; |
3411 | } |
3412 | |
3413 | static inline bool |
3414 | netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) |
3415 | { |
3416 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; |
3417 | } |
3418 | |
3419 | static inline bool |
3420 | netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) |
3421 | { |
3422 | return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; |
3423 | } |
3424 | |
3425 | /** |
3426 | * netdev_queue_set_dql_min_limit - set dql minimum limit |
3427 | * @dev_queue: pointer to transmit queue |
3428 | * @min_limit: dql minimum limit |
3429 | * |
3430 | * Forces xmit_more() to return true until the minimum threshold |
3431 | * defined by @min_limit is reached (or until the tx queue is |
3432 | * empty). Warning: to be use with care, misuse will impact the |
3433 | * latency. |
3434 | */ |
3435 | static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, |
3436 | unsigned int min_limit) |
3437 | { |
3438 | #ifdef CONFIG_BQL |
3439 | dev_queue->dql.min_limit = min_limit; |
3440 | #endif |
3441 | } |
3442 | |
3443 | /** |
3444 | * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write |
3445 | * @dev_queue: pointer to transmit queue |
3446 | * |
3447 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), |
3448 | * to give appropriate hint to the CPU. |
3449 | */ |
3450 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) |
3451 | { |
3452 | #ifdef CONFIG_BQL |
3453 | prefetchw(x: &dev_queue->dql.num_queued); |
3454 | #endif |
3455 | } |
3456 | |
3457 | /** |
3458 | * netdev_txq_bql_complete_prefetchw - prefetch bql data for write |
3459 | * @dev_queue: pointer to transmit queue |
3460 | * |
3461 | * BQL enabled drivers might use this helper in their TX completion path, |
3462 | * to give appropriate hint to the CPU. |
3463 | */ |
3464 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) |
3465 | { |
3466 | #ifdef CONFIG_BQL |
3467 | prefetchw(x: &dev_queue->dql.limit); |
3468 | #endif |
3469 | } |
3470 | |
3471 | /** |
3472 | * netdev_tx_sent_queue - report the number of bytes queued to a given tx queue |
3473 | * @dev_queue: network device queue |
3474 | * @bytes: number of bytes queued to the device queue |
3475 | * |
3476 | * Report the number of bytes queued for sending/completion to the network |
3477 | * device hardware queue. @bytes should be a good approximation and should |
3478 | * exactly match netdev_completed_queue() @bytes. |
3479 | * This is typically called once per packet, from ndo_start_xmit(). |
3480 | */ |
3481 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
3482 | unsigned int bytes) |
3483 | { |
3484 | #ifdef CONFIG_BQL |
3485 | dql_queued(dql: &dev_queue->dql, count: bytes); |
3486 | |
3487 | if (likely(dql_avail(&dev_queue->dql) >= 0)) |
3488 | return; |
3489 | |
3490 | set_bit(nr: __QUEUE_STATE_STACK_XOFF, addr: &dev_queue->state); |
3491 | |
3492 | /* |
3493 | * The XOFF flag must be set before checking the dql_avail below, |
3494 | * because in netdev_tx_completed_queue we update the dql_completed |
3495 | * before checking the XOFF flag. |
3496 | */ |
3497 | smp_mb(); |
3498 | |
3499 | /* check again in case another CPU has just made room avail */ |
3500 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) |
3501 | clear_bit(nr: __QUEUE_STATE_STACK_XOFF, addr: &dev_queue->state); |
3502 | #endif |
3503 | } |
3504 | |
3505 | /* Variant of netdev_tx_sent_queue() for drivers that are aware |
3506 | * that they should not test BQL status themselves. |
3507 | * We do want to change __QUEUE_STATE_STACK_XOFF only for the last |
3508 | * skb of a batch. |
3509 | * Returns true if the doorbell must be used to kick the NIC. |
3510 | */ |
3511 | static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
3512 | unsigned int bytes, |
3513 | bool xmit_more) |
3514 | { |
3515 | if (xmit_more) { |
3516 | #ifdef CONFIG_BQL |
3517 | dql_queued(dql: &dev_queue->dql, count: bytes); |
3518 | #endif |
3519 | return netif_tx_queue_stopped(dev_queue); |
3520 | } |
3521 | netdev_tx_sent_queue(dev_queue, bytes); |
3522 | return true; |
3523 | } |
3524 | |
3525 | /** |
3526 | * netdev_sent_queue - report the number of bytes queued to hardware |
3527 | * @dev: network device |
3528 | * @bytes: number of bytes queued to the hardware device queue |
3529 | * |
3530 | * Report the number of bytes queued for sending/completion to the network |
3531 | * device hardware queue#0. @bytes should be a good approximation and should |
3532 | * exactly match netdev_completed_queue() @bytes. |
3533 | * This is typically called once per packet, from ndo_start_xmit(). |
3534 | */ |
3535 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
3536 | { |
3537 | netdev_tx_sent_queue(dev_queue: netdev_get_tx_queue(dev, index: 0), bytes); |
3538 | } |
3539 | |
3540 | static inline bool __netdev_sent_queue(struct net_device *dev, |
3541 | unsigned int bytes, |
3542 | bool xmit_more) |
3543 | { |
3544 | return __netdev_tx_sent_queue(dev_queue: netdev_get_tx_queue(dev, index: 0), bytes, |
3545 | xmit_more); |
3546 | } |
3547 | |
3548 | /** |
3549 | * netdev_tx_completed_queue - report number of packets/bytes at TX completion. |
3550 | * @dev_queue: network device queue |
3551 | * @pkts: number of packets (currently ignored) |
3552 | * @bytes: number of bytes dequeued from the device queue |
3553 | * |
3554 | * Must be called at most once per TX completion round (and not per |
3555 | * individual packet), so that BQL can adjust its limits appropriately. |
3556 | */ |
3557 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, |
3558 | unsigned int pkts, unsigned int bytes) |
3559 | { |
3560 | #ifdef CONFIG_BQL |
3561 | if (unlikely(!bytes)) |
3562 | return; |
3563 | |
3564 | dql_completed(dql: &dev_queue->dql, count: bytes); |
3565 | |
3566 | /* |
3567 | * Without the memory barrier there is a small possiblity that |
3568 | * netdev_tx_sent_queue will miss the update and cause the queue to |
3569 | * be stopped forever |
3570 | */ |
3571 | smp_mb(); /* NOTE: netdev_txq_completed_mb() assumes this exists */ |
3572 | |
3573 | if (unlikely(dql_avail(&dev_queue->dql) < 0)) |
3574 | return; |
3575 | |
3576 | if (test_and_clear_bit(nr: __QUEUE_STATE_STACK_XOFF, addr: &dev_queue->state)) |
3577 | netif_schedule_queue(txq: dev_queue); |
3578 | #endif |
3579 | } |
3580 | |
3581 | /** |
3582 | * netdev_completed_queue - report bytes and packets completed by device |
3583 | * @dev: network device |
3584 | * @pkts: actual number of packets sent over the medium |
3585 | * @bytes: actual number of bytes sent over the medium |
3586 | * |
3587 | * Report the number of bytes and packets transmitted by the network device |
3588 | * hardware queue over the physical medium, @bytes must exactly match the |
3589 | * @bytes amount passed to netdev_sent_queue() |
3590 | */ |
3591 | static inline void netdev_completed_queue(struct net_device *dev, |
3592 | unsigned int pkts, unsigned int bytes) |
3593 | { |
3594 | netdev_tx_completed_queue(dev_queue: netdev_get_tx_queue(dev, index: 0), pkts, bytes); |
3595 | } |
3596 | |
3597 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) |
3598 | { |
3599 | #ifdef CONFIG_BQL |
3600 | clear_bit(nr: __QUEUE_STATE_STACK_XOFF, addr: &q->state); |
3601 | dql_reset(dql: &q->dql); |
3602 | #endif |
3603 | } |
3604 | |
3605 | /** |
3606 | * netdev_reset_queue - reset the packets and bytes count of a network device |
3607 | * @dev_queue: network device |
3608 | * |
3609 | * Reset the bytes and packet count of a network device and clear the |
3610 | * software flow control OFF bit for this network device |
3611 | */ |
3612 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
3613 | { |
3614 | netdev_tx_reset_queue(q: netdev_get_tx_queue(dev: dev_queue, index: 0)); |
3615 | } |
3616 | |
3617 | /** |
3618 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues |
3619 | * @dev: network device |
3620 | * @queue_index: given tx queue index |
3621 | * |
3622 | * Returns 0 if given tx queue index >= number of device tx queues, |
3623 | * otherwise returns the originally passed tx queue index. |
3624 | */ |
3625 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) |
3626 | { |
3627 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { |
3628 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n" , |
3629 | dev->name, queue_index, |
3630 | dev->real_num_tx_queues); |
3631 | return 0; |
3632 | } |
3633 | |
3634 | return queue_index; |
3635 | } |
3636 | |
3637 | /** |
3638 | * netif_running - test if up |
3639 | * @dev: network device |
3640 | * |
3641 | * Test if the device has been brought up. |
3642 | */ |
3643 | static inline bool netif_running(const struct net_device *dev) |
3644 | { |
3645 | return test_bit(__LINK_STATE_START, &dev->state); |
3646 | } |
3647 | |
3648 | /* |
3649 | * Routines to manage the subqueues on a device. We only need start, |
3650 | * stop, and a check if it's stopped. All other device management is |
3651 | * done at the overall netdevice level. |
3652 | * Also test the device if we're multiqueue. |
3653 | */ |
3654 | |
3655 | /** |
3656 | * netif_start_subqueue - allow sending packets on subqueue |
3657 | * @dev: network device |
3658 | * @queue_index: sub queue index |
3659 | * |
3660 | * Start individual transmit queue of a device with multiple transmit queues. |
3661 | */ |
3662 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
3663 | { |
3664 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: queue_index); |
3665 | |
3666 | netif_tx_start_queue(dev_queue: txq); |
3667 | } |
3668 | |
3669 | /** |
3670 | * netif_stop_subqueue - stop sending packets on subqueue |
3671 | * @dev: network device |
3672 | * @queue_index: sub queue index |
3673 | * |
3674 | * Stop individual transmit queue of a device with multiple transmit queues. |
3675 | */ |
3676 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
3677 | { |
3678 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: queue_index); |
3679 | netif_tx_stop_queue(dev_queue: txq); |
3680 | } |
3681 | |
3682 | /** |
3683 | * __netif_subqueue_stopped - test status of subqueue |
3684 | * @dev: network device |
3685 | * @queue_index: sub queue index |
3686 | * |
3687 | * Check individual transmit queue of a device with multiple transmit queues. |
3688 | */ |
3689 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, |
3690 | u16 queue_index) |
3691 | { |
3692 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: queue_index); |
3693 | |
3694 | return netif_tx_queue_stopped(dev_queue: txq); |
3695 | } |
3696 | |
3697 | /** |
3698 | * netif_subqueue_stopped - test status of subqueue |
3699 | * @dev: network device |
3700 | * @skb: sub queue buffer pointer |
3701 | * |
3702 | * Check individual transmit queue of a device with multiple transmit queues. |
3703 | */ |
3704 | static inline bool netif_subqueue_stopped(const struct net_device *dev, |
3705 | struct sk_buff *skb) |
3706 | { |
3707 | return __netif_subqueue_stopped(dev, queue_index: skb_get_queue_mapping(skb)); |
3708 | } |
3709 | |
3710 | /** |
3711 | * netif_wake_subqueue - allow sending packets on subqueue |
3712 | * @dev: network device |
3713 | * @queue_index: sub queue index |
3714 | * |
3715 | * Resume individual transmit queue of a device with multiple transmit queues. |
3716 | */ |
3717 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
3718 | { |
3719 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: queue_index); |
3720 | |
3721 | netif_tx_wake_queue(dev_queue: txq); |
3722 | } |
3723 | |
3724 | #ifdef CONFIG_XPS |
3725 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
3726 | u16 index); |
3727 | int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, |
3728 | u16 index, enum xps_map_type type); |
3729 | |
3730 | /** |
3731 | * netif_attr_test_mask - Test a CPU or Rx queue set in a mask |
3732 | * @j: CPU/Rx queue index |
3733 | * @mask: bitmask of all cpus/rx queues |
3734 | * @nr_bits: number of bits in the bitmask |
3735 | * |
3736 | * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. |
3737 | */ |
3738 | static inline bool netif_attr_test_mask(unsigned long j, |
3739 | const unsigned long *mask, |
3740 | unsigned int nr_bits) |
3741 | { |
3742 | cpu_max_bits_warn(cpu: j, bits: nr_bits); |
3743 | return test_bit(j, mask); |
3744 | } |
3745 | |
3746 | /** |
3747 | * netif_attr_test_online - Test for online CPU/Rx queue |
3748 | * @j: CPU/Rx queue index |
3749 | * @online_mask: bitmask for CPUs/Rx queues that are online |
3750 | * @nr_bits: number of bits in the bitmask |
3751 | * |
3752 | * Returns true if a CPU/Rx queue is online. |
3753 | */ |
3754 | static inline bool netif_attr_test_online(unsigned long j, |
3755 | const unsigned long *online_mask, |
3756 | unsigned int nr_bits) |
3757 | { |
3758 | cpu_max_bits_warn(cpu: j, bits: nr_bits); |
3759 | |
3760 | if (online_mask) |
3761 | return test_bit(j, online_mask); |
3762 | |
3763 | return (j < nr_bits); |
3764 | } |
3765 | |
3766 | /** |
3767 | * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask |
3768 | * @n: CPU/Rx queue index |
3769 | * @srcp: the cpumask/Rx queue mask pointer |
3770 | * @nr_bits: number of bits in the bitmask |
3771 | * |
3772 | * Returns >= nr_bits if no further CPUs/Rx queues set. |
3773 | */ |
3774 | static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, |
3775 | unsigned int nr_bits) |
3776 | { |
3777 | /* -1 is a legal arg here. */ |
3778 | if (n != -1) |
3779 | cpu_max_bits_warn(cpu: n, bits: nr_bits); |
3780 | |
3781 | if (srcp) |
3782 | return find_next_bit(addr: srcp, size: nr_bits, offset: n + 1); |
3783 | |
3784 | return n + 1; |
3785 | } |
3786 | |
3787 | /** |
3788 | * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p |
3789 | * @n: CPU/Rx queue index |
3790 | * @src1p: the first CPUs/Rx queues mask pointer |
3791 | * @src2p: the second CPUs/Rx queues mask pointer |
3792 | * @nr_bits: number of bits in the bitmask |
3793 | * |
3794 | * Returns >= nr_bits if no further CPUs/Rx queues set in both. |
3795 | */ |
3796 | static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, |
3797 | const unsigned long *src2p, |
3798 | unsigned int nr_bits) |
3799 | { |
3800 | /* -1 is a legal arg here. */ |
3801 | if (n != -1) |
3802 | cpu_max_bits_warn(cpu: n, bits: nr_bits); |
3803 | |
3804 | if (src1p && src2p) |
3805 | return find_next_and_bit(addr1: src1p, addr2: src2p, size: nr_bits, offset: n + 1); |
3806 | else if (src1p) |
3807 | return find_next_bit(addr: src1p, size: nr_bits, offset: n + 1); |
3808 | else if (src2p) |
3809 | return find_next_bit(addr: src2p, size: nr_bits, offset: n + 1); |
3810 | |
3811 | return n + 1; |
3812 | } |
3813 | #else |
3814 | static inline int netif_set_xps_queue(struct net_device *dev, |
3815 | const struct cpumask *mask, |
3816 | u16 index) |
3817 | { |
3818 | return 0; |
3819 | } |
3820 | |
3821 | static inline int __netif_set_xps_queue(struct net_device *dev, |
3822 | const unsigned long *mask, |
3823 | u16 index, enum xps_map_type type) |
3824 | { |
3825 | return 0; |
3826 | } |
3827 | #endif |
3828 | |
3829 | /** |
3830 | * netif_is_multiqueue - test if device has multiple transmit queues |
3831 | * @dev: network device |
3832 | * |
3833 | * Check if device has multiple transmit queues |
3834 | */ |
3835 | static inline bool netif_is_multiqueue(const struct net_device *dev) |
3836 | { |
3837 | return dev->num_tx_queues > 1; |
3838 | } |
3839 | |
3840 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); |
3841 | |
3842 | #ifdef CONFIG_SYSFS |
3843 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
3844 | #else |
3845 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, |
3846 | unsigned int rxqs) |
3847 | { |
3848 | dev->real_num_rx_queues = rxqs; |
3849 | return 0; |
3850 | } |
3851 | #endif |
3852 | int netif_set_real_num_queues(struct net_device *dev, |
3853 | unsigned int txq, unsigned int rxq); |
3854 | |
3855 | int (void); |
3856 | |
3857 | void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason); |
3858 | void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason); |
3859 | |
3860 | /* |
3861 | * It is not allowed to call kfree_skb() or consume_skb() from hardware |
3862 | * interrupt context or with hardware interrupts being disabled. |
3863 | * (in_hardirq() || irqs_disabled()) |
3864 | * |
3865 | * We provide four helpers that can be used in following contexts : |
3866 | * |
3867 | * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, |
3868 | * replacing kfree_skb(skb) |
3869 | * |
3870 | * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. |
3871 | * Typically used in place of consume_skb(skb) in TX completion path |
3872 | * |
3873 | * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, |
3874 | * replacing kfree_skb(skb) |
3875 | * |
3876 | * dev_consume_skb_any(skb) when caller doesn't know its current irq context, |
3877 | * and consumed a packet. Used in place of consume_skb(skb) |
3878 | */ |
3879 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
3880 | { |
3881 | dev_kfree_skb_irq_reason(skb, reason: SKB_DROP_REASON_NOT_SPECIFIED); |
3882 | } |
3883 | |
3884 | static inline void dev_consume_skb_irq(struct sk_buff *skb) |
3885 | { |
3886 | dev_kfree_skb_irq_reason(skb, reason: SKB_CONSUMED); |
3887 | } |
3888 | |
3889 | static inline void dev_kfree_skb_any(struct sk_buff *skb) |
3890 | { |
3891 | dev_kfree_skb_any_reason(skb, reason: SKB_DROP_REASON_NOT_SPECIFIED); |
3892 | } |
3893 | |
3894 | static inline void dev_consume_skb_any(struct sk_buff *skb) |
3895 | { |
3896 | dev_kfree_skb_any_reason(skb, reason: SKB_CONSUMED); |
3897 | } |
3898 | |
3899 | u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, |
3900 | struct bpf_prog *xdp_prog); |
3901 | void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); |
3902 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); |
3903 | int netif_rx(struct sk_buff *skb); |
3904 | int __netif_rx(struct sk_buff *skb); |
3905 | |
3906 | int netif_receive_skb(struct sk_buff *skb); |
3907 | int netif_receive_skb_core(struct sk_buff *skb); |
3908 | void netif_receive_skb_list_internal(struct list_head *head); |
3909 | void netif_receive_skb_list(struct list_head *head); |
3910 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
3911 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); |
3912 | struct sk_buff *napi_get_frags(struct napi_struct *napi); |
3913 | void napi_get_frags_check(struct napi_struct *napi); |
3914 | gro_result_t napi_gro_frags(struct napi_struct *napi); |
3915 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
3916 | struct packet_offload *gro_find_complete_by_type(__be16 type); |
3917 | |
3918 | static inline void napi_free_frags(struct napi_struct *napi) |
3919 | { |
3920 | kfree_skb(skb: napi->skb); |
3921 | napi->skb = NULL; |
3922 | } |
3923 | |
3924 | bool netdev_is_rx_handler_busy(struct net_device *dev); |
3925 | int netdev_rx_handler_register(struct net_device *dev, |
3926 | rx_handler_func_t *rx_handler, |
3927 | void *rx_handler_data); |
3928 | void netdev_rx_handler_unregister(struct net_device *dev); |
3929 | |
3930 | bool dev_valid_name(const char *name); |
3931 | static inline bool is_socket_ioctl_cmd(unsigned int cmd) |
3932 | { |
3933 | return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; |
3934 | } |
3935 | int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); |
3936 | int put_user_ifreq(struct ifreq *ifr, void __user *arg); |
3937 | int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, |
3938 | void __user *data, bool *need_copyout); |
3939 | int dev_ifconf(struct net *net, struct ifconf __user *ifc); |
3940 | int generic_hwtstamp_get_lower(struct net_device *dev, |
3941 | struct kernel_hwtstamp_config *kernel_cfg); |
3942 | int generic_hwtstamp_set_lower(struct net_device *dev, |
3943 | struct kernel_hwtstamp_config *kernel_cfg, |
3944 | struct netlink_ext_ack *extack); |
3945 | int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); |
3946 | unsigned int dev_get_flags(const struct net_device *); |
3947 | int __dev_change_flags(struct net_device *dev, unsigned int flags, |
3948 | struct netlink_ext_ack *extack); |
3949 | int dev_change_flags(struct net_device *dev, unsigned int flags, |
3950 | struct netlink_ext_ack *extack); |
3951 | int dev_set_alias(struct net_device *, const char *, size_t); |
3952 | int dev_get_alias(const struct net_device *, char *, size_t); |
3953 | int __dev_change_net_namespace(struct net_device *dev, struct net *net, |
3954 | const char *pat, int new_ifindex); |
3955 | static inline |
3956 | int dev_change_net_namespace(struct net_device *dev, struct net *net, |
3957 | const char *pat) |
3958 | { |
3959 | return __dev_change_net_namespace(dev, net, pat, new_ifindex: 0); |
3960 | } |
3961 | int __dev_set_mtu(struct net_device *, int); |
3962 | int dev_set_mtu(struct net_device *, int); |
3963 | int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, |
3964 | struct netlink_ext_ack *extack); |
3965 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, |
3966 | struct netlink_ext_ack *extack); |
3967 | int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, |
3968 | struct netlink_ext_ack *extack); |
3969 | int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); |
3970 | int dev_get_port_parent_id(struct net_device *dev, |
3971 | struct netdev_phys_item_id *ppid, bool recurse); |
3972 | bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); |
3973 | void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin); |
3974 | void netdev_dpll_pin_clear(struct net_device *dev); |
3975 | |
3976 | static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev) |
3977 | { |
3978 | #if IS_ENABLED(CONFIG_DPLL) |
3979 | return dev->dpll_pin; |
3980 | #else |
3981 | return NULL; |
3982 | #endif |
3983 | } |
3984 | |
3985 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
3986 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
3987 | struct netdev_queue *txq, int *ret); |
3988 | |
3989 | int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
3990 | u8 dev_xdp_prog_count(struct net_device *dev); |
3991 | u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); |
3992 | |
3993 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
3994 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
3995 | int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); |
3996 | bool is_skb_forwardable(const struct net_device *dev, |
3997 | const struct sk_buff *skb); |
3998 | |
3999 | static __always_inline bool __is_skb_forwardable(const struct net_device *dev, |
4000 | const struct sk_buff *skb, |
4001 | const bool check_mtu) |
4002 | { |
4003 | const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ |
4004 | unsigned int len; |
4005 | |
4006 | if (!(dev->flags & IFF_UP)) |
4007 | return false; |
4008 | |
4009 | if (!check_mtu) |
4010 | return true; |
4011 | |
4012 | len = dev->mtu + dev->hard_header_len + vlan_hdr_len; |
4013 | if (skb->len <= len) |
4014 | return true; |
4015 | |
4016 | /* if TSO is enabled, we don't care about the length as the packet |
4017 | * could be forwarded without being segmented before |
4018 | */ |
4019 | if (skb_is_gso(skb)) |
4020 | return true; |
4021 | |
4022 | return false; |
4023 | } |
4024 | |
4025 | void netdev_core_stats_inc(struct net_device *dev, u32 offset); |
4026 | |
4027 | #define DEV_CORE_STATS_INC(FIELD) \ |
4028 | static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ |
4029 | { \ |
4030 | netdev_core_stats_inc(dev, \ |
4031 | offsetof(struct net_device_core_stats, FIELD)); \ |
4032 | } |
4033 | DEV_CORE_STATS_INC(rx_dropped) |
4034 | DEV_CORE_STATS_INC(tx_dropped) |
4035 | DEV_CORE_STATS_INC(rx_nohandler) |
4036 | DEV_CORE_STATS_INC(rx_otherhost_dropped) |
4037 | #undef DEV_CORE_STATS_INC |
4038 | |
4039 | static __always_inline int ____dev_forward_skb(struct net_device *dev, |
4040 | struct sk_buff *skb, |
4041 | const bool check_mtu) |
4042 | { |
4043 | if (skb_orphan_frags(skb, GFP_ATOMIC) || |
4044 | unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { |
4045 | dev_core_stats_rx_dropped_inc(dev); |
4046 | kfree_skb(skb); |
4047 | return NET_RX_DROP; |
4048 | } |
4049 | |
4050 | skb_scrub_packet(skb, xnet: !net_eq(net1: dev_net(dev), net2: dev_net(dev: skb->dev))); |
4051 | skb->priority = 0; |
4052 | return 0; |
4053 | } |
4054 | |
4055 | bool dev_nit_active(struct net_device *dev); |
4056 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
4057 | |
4058 | static inline void __dev_put(struct net_device *dev) |
4059 | { |
4060 | if (dev) { |
4061 | #ifdef CONFIG_PCPU_DEV_REFCNT |
4062 | this_cpu_dec(*dev->pcpu_refcnt); |
4063 | #else |
4064 | refcount_dec(&dev->dev_refcnt); |
4065 | #endif |
4066 | } |
4067 | } |
4068 | |
4069 | static inline void __dev_hold(struct net_device *dev) |
4070 | { |
4071 | if (dev) { |
4072 | #ifdef CONFIG_PCPU_DEV_REFCNT |
4073 | this_cpu_inc(*dev->pcpu_refcnt); |
4074 | #else |
4075 | refcount_inc(&dev->dev_refcnt); |
4076 | #endif |
4077 | } |
4078 | } |
4079 | |
4080 | static inline void __netdev_tracker_alloc(struct net_device *dev, |
4081 | netdevice_tracker *tracker, |
4082 | gfp_t gfp) |
4083 | { |
4084 | #ifdef CONFIG_NET_DEV_REFCNT_TRACKER |
4085 | ref_tracker_alloc(dir: &dev->refcnt_tracker, trackerp: tracker, gfp); |
4086 | #endif |
4087 | } |
4088 | |
4089 | /* netdev_tracker_alloc() can upgrade a prior untracked reference |
4090 | * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. |
4091 | */ |
4092 | static inline void netdev_tracker_alloc(struct net_device *dev, |
4093 | netdevice_tracker *tracker, gfp_t gfp) |
4094 | { |
4095 | #ifdef CONFIG_NET_DEV_REFCNT_TRACKER |
4096 | refcount_dec(r: &dev->refcnt_tracker.no_tracker); |
4097 | __netdev_tracker_alloc(dev, tracker, gfp); |
4098 | #endif |
4099 | } |
4100 | |
4101 | static inline void netdev_tracker_free(struct net_device *dev, |
4102 | netdevice_tracker *tracker) |
4103 | { |
4104 | #ifdef CONFIG_NET_DEV_REFCNT_TRACKER |
4105 | ref_tracker_free(dir: &dev->refcnt_tracker, trackerp: tracker); |
4106 | #endif |
4107 | } |
4108 | |
4109 | static inline void netdev_hold(struct net_device *dev, |
4110 | netdevice_tracker *tracker, gfp_t gfp) |
4111 | { |
4112 | if (dev) { |
4113 | __dev_hold(dev); |
4114 | __netdev_tracker_alloc(dev, tracker, gfp); |
4115 | } |
4116 | } |
4117 | |
4118 | static inline void netdev_put(struct net_device *dev, |
4119 | netdevice_tracker *tracker) |
4120 | { |
4121 | if (dev) { |
4122 | netdev_tracker_free(dev, tracker); |
4123 | __dev_put(dev); |
4124 | } |
4125 | } |
4126 | |
4127 | /** |
4128 | * dev_hold - get reference to device |
4129 | * @dev: network device |
4130 | * |
4131 | * Hold reference to device to keep it from being freed. |
4132 | * Try using netdev_hold() instead. |
4133 | */ |
4134 | static inline void dev_hold(struct net_device *dev) |
4135 | { |
4136 | netdev_hold(dev, NULL, GFP_ATOMIC); |
4137 | } |
4138 | |
4139 | /** |
4140 | * dev_put - release reference to device |
4141 | * @dev: network device |
4142 | * |
4143 | * Release reference to device to allow it to be freed. |
4144 | * Try using netdev_put() instead. |
4145 | */ |
4146 | static inline void dev_put(struct net_device *dev) |
4147 | { |
4148 | netdev_put(dev, NULL); |
4149 | } |
4150 | |
4151 | static inline void netdev_ref_replace(struct net_device *odev, |
4152 | struct net_device *ndev, |
4153 | netdevice_tracker *tracker, |
4154 | gfp_t gfp) |
4155 | { |
4156 | if (odev) |
4157 | netdev_tracker_free(dev: odev, tracker); |
4158 | |
4159 | __dev_hold(dev: ndev); |
4160 | __dev_put(dev: odev); |
4161 | |
4162 | if (ndev) |
4163 | __netdev_tracker_alloc(dev: ndev, tracker, gfp); |
4164 | } |
4165 | |
4166 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
4167 | * and _off may be called from IRQ context, but it is caller |
4168 | * who is responsible for serialization of these calls. |
4169 | * |
4170 | * The name carrier is inappropriate, these functions should really be |
4171 | * called netif_lowerlayer_*() because they represent the state of any |
4172 | * kind of lower layer not just hardware media. |
4173 | */ |
4174 | void linkwatch_fire_event(struct net_device *dev); |
4175 | |
4176 | /** |
4177 | * netif_carrier_ok - test if carrier present |
4178 | * @dev: network device |
4179 | * |
4180 | * Check if carrier is present on device |
4181 | */ |
4182 | static inline bool netif_carrier_ok(const struct net_device *dev) |
4183 | { |
4184 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); |
4185 | } |
4186 | |
4187 | unsigned long dev_trans_start(struct net_device *dev); |
4188 | |
4189 | void __netdev_watchdog_up(struct net_device *dev); |
4190 | |
4191 | void netif_carrier_on(struct net_device *dev); |
4192 | void netif_carrier_off(struct net_device *dev); |
4193 | void netif_carrier_event(struct net_device *dev); |
4194 | |
4195 | /** |
4196 | * netif_dormant_on - mark device as dormant. |
4197 | * @dev: network device |
4198 | * |
4199 | * Mark device as dormant (as per RFC2863). |
4200 | * |
4201 | * The dormant state indicates that the relevant interface is not |
4202 | * actually in a condition to pass packets (i.e., it is not 'up') but is |
4203 | * in a "pending" state, waiting for some external event. For "on- |
4204 | * demand" interfaces, this new state identifies the situation where the |
4205 | * interface is waiting for events to place it in the up state. |
4206 | */ |
4207 | static inline void netif_dormant_on(struct net_device *dev) |
4208 | { |
4209 | if (!test_and_set_bit(nr: __LINK_STATE_DORMANT, addr: &dev->state)) |
4210 | linkwatch_fire_event(dev); |
4211 | } |
4212 | |
4213 | /** |
4214 | * netif_dormant_off - set device as not dormant. |
4215 | * @dev: network device |
4216 | * |
4217 | * Device is not in dormant state. |
4218 | */ |
4219 | static inline void netif_dormant_off(struct net_device *dev) |
4220 | { |
4221 | if (test_and_clear_bit(nr: __LINK_STATE_DORMANT, addr: &dev->state)) |
4222 | linkwatch_fire_event(dev); |
4223 | } |
4224 | |
4225 | /** |
4226 | * netif_dormant - test if device is dormant |
4227 | * @dev: network device |
4228 | * |
4229 | * Check if device is dormant. |
4230 | */ |
4231 | static inline bool netif_dormant(const struct net_device *dev) |
4232 | { |
4233 | return test_bit(__LINK_STATE_DORMANT, &dev->state); |
4234 | } |
4235 | |
4236 | |
4237 | /** |
4238 | * netif_testing_on - mark device as under test. |
4239 | * @dev: network device |
4240 | * |
4241 | * Mark device as under test (as per RFC2863). |
4242 | * |
4243 | * The testing state indicates that some test(s) must be performed on |
4244 | * the interface. After completion, of the test, the interface state |
4245 | * will change to up, dormant, or down, as appropriate. |
4246 | */ |
4247 | static inline void netif_testing_on(struct net_device *dev) |
4248 | { |
4249 | if (!test_and_set_bit(nr: __LINK_STATE_TESTING, addr: &dev->state)) |
4250 | linkwatch_fire_event(dev); |
4251 | } |
4252 | |
4253 | /** |
4254 | * netif_testing_off - set device as not under test. |
4255 | * @dev: network device |
4256 | * |
4257 | * Device is not in testing state. |
4258 | */ |
4259 | static inline void netif_testing_off(struct net_device *dev) |
4260 | { |
4261 | if (test_and_clear_bit(nr: __LINK_STATE_TESTING, addr: &dev->state)) |
4262 | linkwatch_fire_event(dev); |
4263 | } |
4264 | |
4265 | /** |
4266 | * netif_testing - test if device is under test |
4267 | * @dev: network device |
4268 | * |
4269 | * Check if device is under test |
4270 | */ |
4271 | static inline bool netif_testing(const struct net_device *dev) |
4272 | { |
4273 | return test_bit(__LINK_STATE_TESTING, &dev->state); |
4274 | } |
4275 | |
4276 | |
4277 | /** |
4278 | * netif_oper_up - test if device is operational |
4279 | * @dev: network device |
4280 | * |
4281 | * Check if carrier is operational |
4282 | */ |
4283 | static inline bool netif_oper_up(const struct net_device *dev) |
4284 | { |
4285 | return (dev->operstate == IF_OPER_UP || |
4286 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); |
4287 | } |
4288 | |
4289 | /** |
4290 | * netif_device_present - is device available or removed |
4291 | * @dev: network device |
4292 | * |
4293 | * Check if device has not been removed from system. |
4294 | */ |
4295 | static inline bool netif_device_present(const struct net_device *dev) |
4296 | { |
4297 | return test_bit(__LINK_STATE_PRESENT, &dev->state); |
4298 | } |
4299 | |
4300 | void netif_device_detach(struct net_device *dev); |
4301 | |
4302 | void netif_device_attach(struct net_device *dev); |
4303 | |
4304 | /* |
4305 | * Network interface message level settings |
4306 | */ |
4307 | |
4308 | enum { |
4309 | NETIF_MSG_DRV_BIT, |
4310 | NETIF_MSG_PROBE_BIT, |
4311 | NETIF_MSG_LINK_BIT, |
4312 | NETIF_MSG_TIMER_BIT, |
4313 | NETIF_MSG_IFDOWN_BIT, |
4314 | NETIF_MSG_IFUP_BIT, |
4315 | NETIF_MSG_RX_ERR_BIT, |
4316 | NETIF_MSG_TX_ERR_BIT, |
4317 | NETIF_MSG_TX_QUEUED_BIT, |
4318 | NETIF_MSG_INTR_BIT, |
4319 | NETIF_MSG_TX_DONE_BIT, |
4320 | NETIF_MSG_RX_STATUS_BIT, |
4321 | NETIF_MSG_PKTDATA_BIT, |
4322 | NETIF_MSG_HW_BIT, |
4323 | NETIF_MSG_WOL_BIT, |
4324 | |
4325 | /* When you add a new bit above, update netif_msg_class_names array |
4326 | * in net/ethtool/common.c |
4327 | */ |
4328 | NETIF_MSG_CLASS_COUNT, |
4329 | }; |
4330 | /* Both ethtool_ops interface and internal driver implementation use u32 */ |
4331 | static_assert(NETIF_MSG_CLASS_COUNT <= 32); |
4332 | |
4333 | #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) |
4334 | #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) |
4335 | |
4336 | #define NETIF_MSG_DRV __NETIF_MSG(DRV) |
4337 | #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) |
4338 | #define NETIF_MSG_LINK __NETIF_MSG(LINK) |
4339 | #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) |
4340 | #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) |
4341 | #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) |
4342 | #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) |
4343 | #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) |
4344 | #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) |
4345 | #define NETIF_MSG_INTR __NETIF_MSG(INTR) |
4346 | #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) |
4347 | #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) |
4348 | #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) |
4349 | #define NETIF_MSG_HW __NETIF_MSG(HW) |
4350 | #define NETIF_MSG_WOL __NETIF_MSG(WOL) |
4351 | |
4352 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) |
4353 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) |
4354 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) |
4355 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) |
4356 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) |
4357 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) |
4358 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) |
4359 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) |
4360 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) |
4361 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) |
4362 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) |
4363 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) |
4364 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) |
4365 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) |
4366 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) |
4367 | |
4368 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) |
4369 | { |
4370 | /* use default */ |
4371 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) |
4372 | return default_msg_enable_bits; |
4373 | if (debug_value == 0) /* no output */ |
4374 | return 0; |
4375 | /* set low N bits */ |
4376 | return (1U << debug_value) - 1; |
4377 | } |
4378 | |
4379 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
4380 | { |
4381 | spin_lock(lock: &txq->_xmit_lock); |
4382 | /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
4383 | WRITE_ONCE(txq->xmit_lock_owner, cpu); |
4384 | } |
4385 | |
4386 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
4387 | { |
4388 | __acquire(&txq->_xmit_lock); |
4389 | return true; |
4390 | } |
4391 | |
4392 | static inline void __netif_tx_release(struct netdev_queue *txq) |
4393 | { |
4394 | __release(&txq->_xmit_lock); |
4395 | } |
4396 | |
4397 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
4398 | { |
4399 | spin_lock_bh(lock: &txq->_xmit_lock); |
4400 | /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
4401 | WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
4402 | } |
4403 | |
4404 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
4405 | { |
4406 | bool ok = spin_trylock(lock: &txq->_xmit_lock); |
4407 | |
4408 | if (likely(ok)) { |
4409 | /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
4410 | WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
4411 | } |
4412 | return ok; |
4413 | } |
4414 | |
4415 | static inline void __netif_tx_unlock(struct netdev_queue *txq) |
4416 | { |
4417 | /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
4418 | WRITE_ONCE(txq->xmit_lock_owner, -1); |
4419 | spin_unlock(lock: &txq->_xmit_lock); |
4420 | } |
4421 | |
4422 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) |
4423 | { |
4424 | /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
4425 | WRITE_ONCE(txq->xmit_lock_owner, -1); |
4426 | spin_unlock_bh(lock: &txq->_xmit_lock); |
4427 | } |
4428 | |
4429 | /* |
4430 | * txq->trans_start can be read locklessly from dev_watchdog() |
4431 | */ |
4432 | static inline void txq_trans_update(struct netdev_queue *txq) |
4433 | { |
4434 | if (txq->xmit_lock_owner != -1) |
4435 | WRITE_ONCE(txq->trans_start, jiffies); |
4436 | } |
4437 | |
4438 | static inline void txq_trans_cond_update(struct netdev_queue *txq) |
4439 | { |
4440 | unsigned long now = jiffies; |
4441 | |
4442 | if (READ_ONCE(txq->trans_start) != now) |
4443 | WRITE_ONCE(txq->trans_start, now); |
4444 | } |
4445 | |
4446 | /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ |
4447 | static inline void netif_trans_update(struct net_device *dev) |
4448 | { |
4449 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: 0); |
4450 | |
4451 | txq_trans_cond_update(txq); |
4452 | } |
4453 | |
4454 | /** |
4455 | * netif_tx_lock - grab network device transmit lock |
4456 | * @dev: network device |
4457 | * |
4458 | * Get network device transmit lock |
4459 | */ |
4460 | void netif_tx_lock(struct net_device *dev); |
4461 | |
4462 | static inline void netif_tx_lock_bh(struct net_device *dev) |
4463 | { |
4464 | local_bh_disable(); |
4465 | netif_tx_lock(dev); |
4466 | } |
4467 | |
4468 | void netif_tx_unlock(struct net_device *dev); |
4469 | |
4470 | static inline void netif_tx_unlock_bh(struct net_device *dev) |
4471 | { |
4472 | netif_tx_unlock(dev); |
4473 | local_bh_enable(); |
4474 | } |
4475 | |
4476 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
4477 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
4478 | __netif_tx_lock(txq, cpu); \ |
4479 | } else { \ |
4480 | __netif_tx_acquire(txq); \ |
4481 | } \ |
4482 | } |
4483 | |
4484 | #define HARD_TX_TRYLOCK(dev, txq) \ |
4485 | (((dev->features & NETIF_F_LLTX) == 0) ? \ |
4486 | __netif_tx_trylock(txq) : \ |
4487 | __netif_tx_acquire(txq)) |
4488 | |
4489 | #define HARD_TX_UNLOCK(dev, txq) { \ |
4490 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
4491 | __netif_tx_unlock(txq); \ |
4492 | } else { \ |
4493 | __netif_tx_release(txq); \ |
4494 | } \ |
4495 | } |
4496 | |
4497 | static inline void netif_tx_disable(struct net_device *dev) |
4498 | { |
4499 | unsigned int i; |
4500 | int cpu; |
4501 | |
4502 | local_bh_disable(); |
4503 | cpu = smp_processor_id(); |
4504 | spin_lock(lock: &dev->tx_global_lock); |
4505 | for (i = 0; i < dev->num_tx_queues; i++) { |
4506 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: i); |
4507 | |
4508 | __netif_tx_lock(txq, cpu); |
4509 | netif_tx_stop_queue(dev_queue: txq); |
4510 | __netif_tx_unlock(txq); |
4511 | } |
4512 | spin_unlock(lock: &dev->tx_global_lock); |
4513 | local_bh_enable(); |
4514 | } |
4515 | |
4516 | static inline void netif_addr_lock(struct net_device *dev) |
4517 | { |
4518 | unsigned char nest_level = 0; |
4519 | |
4520 | #ifdef CONFIG_LOCKDEP |
4521 | nest_level = dev->nested_level; |
4522 | #endif |
4523 | spin_lock_nested(&dev->addr_list_lock, nest_level); |
4524 | } |
4525 | |
4526 | static inline void netif_addr_lock_bh(struct net_device *dev) |
4527 | { |
4528 | unsigned char nest_level = 0; |
4529 | |
4530 | #ifdef CONFIG_LOCKDEP |
4531 | nest_level = dev->nested_level; |
4532 | #endif |
4533 | local_bh_disable(); |
4534 | spin_lock_nested(&dev->addr_list_lock, nest_level); |
4535 | } |
4536 | |
4537 | static inline void netif_addr_unlock(struct net_device *dev) |
4538 | { |
4539 | spin_unlock(lock: &dev->addr_list_lock); |
4540 | } |
4541 | |
4542 | static inline void netif_addr_unlock_bh(struct net_device *dev) |
4543 | { |
4544 | spin_unlock_bh(lock: &dev->addr_list_lock); |
4545 | } |
4546 | |
4547 | /* |
4548 | * dev_addrs walker. Should be used only for read access. Call with |
4549 | * rcu_read_lock held. |
4550 | */ |
4551 | #define for_each_dev_addr(dev, ha) \ |
4552 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
4553 | |
4554 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
4555 | |
4556 | void ether_setup(struct net_device *dev); |
4557 | |
4558 | /* Support for loadable net-drivers */ |
4559 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
4560 | unsigned char name_assign_type, |
4561 | void (*setup)(struct net_device *), |
4562 | unsigned int txqs, unsigned int rxqs); |
4563 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
4564 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) |
4565 | |
4566 | #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ |
4567 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ |
4568 | count) |
4569 | |
4570 | int register_netdev(struct net_device *dev); |
4571 | void unregister_netdev(struct net_device *dev); |
4572 | |
4573 | int devm_register_netdev(struct device *dev, struct net_device *ndev); |
4574 | |
4575 | /* General hardware address lists handling functions */ |
4576 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
4577 | struct netdev_hw_addr_list *from_list, int addr_len); |
4578 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, |
4579 | struct netdev_hw_addr_list *from_list, int addr_len); |
4580 | int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, |
4581 | struct net_device *dev, |
4582 | int (*sync)(struct net_device *, const unsigned char *), |
4583 | int (*unsync)(struct net_device *, |
4584 | const unsigned char *)); |
4585 | int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, |
4586 | struct net_device *dev, |
4587 | int (*sync)(struct net_device *, |
4588 | const unsigned char *, int), |
4589 | int (*unsync)(struct net_device *, |
4590 | const unsigned char *, int)); |
4591 | void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, |
4592 | struct net_device *dev, |
4593 | int (*unsync)(struct net_device *, |
4594 | const unsigned char *, int)); |
4595 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, |
4596 | struct net_device *dev, |
4597 | int (*unsync)(struct net_device *, |
4598 | const unsigned char *)); |
4599 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
4600 | |
4601 | /* Functions used for device addresses handling */ |
4602 | void dev_addr_mod(struct net_device *dev, unsigned int offset, |
4603 | const void *addr, size_t len); |
4604 | |
4605 | static inline void |
4606 | __dev_addr_set(struct net_device *dev, const void *addr, size_t len) |
4607 | { |
4608 | dev_addr_mod(dev, offset: 0, addr, len); |
4609 | } |
4610 | |
4611 | static inline void dev_addr_set(struct net_device *dev, const u8 *addr) |
4612 | { |
4613 | __dev_addr_set(dev, addr, len: dev->addr_len); |
4614 | } |
4615 | |
4616 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
4617 | unsigned char addr_type); |
4618 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, |
4619 | unsigned char addr_type); |
4620 | |
4621 | /* Functions used for unicast addresses handling */ |
4622 | int dev_uc_add(struct net_device *dev, const unsigned char *addr); |
4623 | int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); |
4624 | int dev_uc_del(struct net_device *dev, const unsigned char *addr); |
4625 | int dev_uc_sync(struct net_device *to, struct net_device *from); |
4626 | int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); |
4627 | void dev_uc_unsync(struct net_device *to, struct net_device *from); |
4628 | void dev_uc_flush(struct net_device *dev); |
4629 | void dev_uc_init(struct net_device *dev); |
4630 | |
4631 | /** |
4632 | * __dev_uc_sync - Synchonize device's unicast list |
4633 | * @dev: device to sync |
4634 | * @sync: function to call if address should be added |
4635 | * @unsync: function to call if address should be removed |
4636 | * |
4637 | * Add newly added addresses to the interface, and release |
4638 | * addresses that have been deleted. |
4639 | */ |
4640 | static inline int __dev_uc_sync(struct net_device *dev, |
4641 | int (*sync)(struct net_device *, |
4642 | const unsigned char *), |
4643 | int (*unsync)(struct net_device *, |
4644 | const unsigned char *)) |
4645 | { |
4646 | return __hw_addr_sync_dev(list: &dev->uc, dev, sync, unsync); |
4647 | } |
4648 | |
4649 | /** |
4650 | * __dev_uc_unsync - Remove synchronized addresses from device |
4651 | * @dev: device to sync |
4652 | * @unsync: function to call if address should be removed |
4653 | * |
4654 | * Remove all addresses that were added to the device by dev_uc_sync(). |
4655 | */ |
4656 | static inline void __dev_uc_unsync(struct net_device *dev, |
4657 | int (*unsync)(struct net_device *, |
4658 | const unsigned char *)) |
4659 | { |
4660 | __hw_addr_unsync_dev(list: &dev->uc, dev, unsync); |
4661 | } |
4662 | |
4663 | /* Functions used for multicast addresses handling */ |
4664 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
4665 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); |
4666 | int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); |
4667 | int dev_mc_del(struct net_device *dev, const unsigned char *addr); |
4668 | int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); |
4669 | int dev_mc_sync(struct net_device *to, struct net_device *from); |
4670 | int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); |
4671 | void dev_mc_unsync(struct net_device *to, struct net_device *from); |
4672 | void dev_mc_flush(struct net_device *dev); |
4673 | void dev_mc_init(struct net_device *dev); |
4674 | |
4675 | /** |
4676 | * __dev_mc_sync - Synchonize device's multicast list |
4677 | * @dev: device to sync |
4678 | * @sync: function to call if address should be added |
4679 | * @unsync: function to call if address should be removed |
4680 | * |
4681 | * Add newly added addresses to the interface, and release |
4682 | * addresses that have been deleted. |
4683 | */ |
4684 | static inline int __dev_mc_sync(struct net_device *dev, |
4685 | int (*sync)(struct net_device *, |
4686 | const unsigned char *), |
4687 | int (*unsync)(struct net_device *, |
4688 | const unsigned char *)) |
4689 | { |
4690 | return __hw_addr_sync_dev(list: &dev->mc, dev, sync, unsync); |
4691 | } |
4692 | |
4693 | /** |
4694 | * __dev_mc_unsync - Remove synchronized addresses from device |
4695 | * @dev: device to sync |
4696 | * @unsync: function to call if address should be removed |
4697 | * |
4698 | * Remove all addresses that were added to the device by dev_mc_sync(). |
4699 | */ |
4700 | static inline void __dev_mc_unsync(struct net_device *dev, |
4701 | int (*unsync)(struct net_device *, |
4702 | const unsigned char *)) |
4703 | { |
4704 | __hw_addr_unsync_dev(list: &dev->mc, dev, unsync); |
4705 | } |
4706 | |
4707 | /* Functions used for secondary unicast and multicast support */ |
4708 | void dev_set_rx_mode(struct net_device *dev); |
4709 | int dev_set_promiscuity(struct net_device *dev, int inc); |
4710 | int dev_set_allmulti(struct net_device *dev, int inc); |
4711 | void netdev_state_change(struct net_device *dev); |
4712 | void __netdev_notify_peers(struct net_device *dev); |
4713 | void netdev_notify_peers(struct net_device *dev); |
4714 | void netdev_features_change(struct net_device *dev); |
4715 | /* Load a device via the kmod */ |
4716 | void dev_load(struct net *net, const char *name); |
4717 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, |
4718 | struct rtnl_link_stats64 *storage); |
4719 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, |
4720 | const struct net_device_stats *netdev_stats); |
4721 | void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, |
4722 | const struct pcpu_sw_netstats __percpu *netstats); |
4723 | void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); |
4724 | |
4725 | extern int netdev_max_backlog; |
4726 | extern int dev_rx_weight; |
4727 | extern int dev_tx_weight; |
4728 | extern int gro_normal_batch; |
4729 | |
4730 | enum { |
4731 | NESTED_SYNC_IMM_BIT, |
4732 | NESTED_SYNC_TODO_BIT, |
4733 | }; |
4734 | |
4735 | #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) |
4736 | #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) |
4737 | |
4738 | #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) |
4739 | #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) |
4740 | |
4741 | struct netdev_nested_priv { |
4742 | unsigned char flags; |
4743 | void *data; |
4744 | }; |
4745 | |
4746 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
4747 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
4748 | struct list_head **iter); |
4749 | |
4750 | /* iterate through upper list, must be called under RCU read lock */ |
4751 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ |
4752 | for (iter = &(dev)->adj_list.upper, \ |
4753 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ |
4754 | updev; \ |
4755 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) |
4756 | |
4757 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
4758 | int (*fn)(struct net_device *upper_dev, |
4759 | struct netdev_nested_priv *priv), |
4760 | struct netdev_nested_priv *priv); |
4761 | |
4762 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, |
4763 | struct net_device *upper_dev); |
4764 | |
4765 | bool netdev_has_any_upper_dev(struct net_device *dev); |
4766 | |
4767 | void *netdev_lower_get_next_private(struct net_device *dev, |
4768 | struct list_head **iter); |
4769 | void *netdev_lower_get_next_private_rcu(struct net_device *dev, |
4770 | struct list_head **iter); |
4771 | |
4772 | #define netdev_for_each_lower_private(dev, priv, iter) \ |
4773 | for (iter = (dev)->adj_list.lower.next, \ |
4774 | priv = netdev_lower_get_next_private(dev, &(iter)); \ |
4775 | priv; \ |
4776 | priv = netdev_lower_get_next_private(dev, &(iter))) |
4777 | |
4778 | #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ |
4779 | for (iter = &(dev)->adj_list.lower, \ |
4780 | priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ |
4781 | priv; \ |
4782 | priv = netdev_lower_get_next_private_rcu(dev, &(iter))) |
4783 | |
4784 | void *netdev_lower_get_next(struct net_device *dev, |
4785 | struct list_head **iter); |
4786 | |
4787 | #define netdev_for_each_lower_dev(dev, ldev, iter) \ |
4788 | for (iter = (dev)->adj_list.lower.next, \ |
4789 | ldev = netdev_lower_get_next(dev, &(iter)); \ |
4790 | ldev; \ |
4791 | ldev = netdev_lower_get_next(dev, &(iter))) |
4792 | |
4793 | struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, |
4794 | struct list_head **iter); |
4795 | int netdev_walk_all_lower_dev(struct net_device *dev, |
4796 | int (*fn)(struct net_device *lower_dev, |
4797 | struct netdev_nested_priv *priv), |
4798 | struct netdev_nested_priv *priv); |
4799 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
4800 | int (*fn)(struct net_device *lower_dev, |
4801 | struct netdev_nested_priv *priv), |
4802 | struct netdev_nested_priv *priv); |
4803 | |
4804 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
4805 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
4806 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
4807 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); |
4808 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, |
4809 | struct netlink_ext_ack *extack); |
4810 | int netdev_master_upper_dev_link(struct net_device *dev, |
4811 | struct net_device *upper_dev, |
4812 | void *upper_priv, void *upper_info, |
4813 | struct netlink_ext_ack *extack); |
4814 | void netdev_upper_dev_unlink(struct net_device *dev, |
4815 | struct net_device *upper_dev); |
4816 | int netdev_adjacent_change_prepare(struct net_device *old_dev, |
4817 | struct net_device *new_dev, |
4818 | struct net_device *dev, |
4819 | struct netlink_ext_ack *extack); |
4820 | void netdev_adjacent_change_commit(struct net_device *old_dev, |
4821 | struct net_device *new_dev, |
4822 | struct net_device *dev); |
4823 | void netdev_adjacent_change_abort(struct net_device *old_dev, |
4824 | struct net_device *new_dev, |
4825 | struct net_device *dev); |
4826 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
4827 | void *netdev_lower_dev_get_private(struct net_device *dev, |
4828 | struct net_device *lower_dev); |
4829 | void netdev_lower_state_changed(struct net_device *lower_dev, |
4830 | void *lower_state_info); |
4831 | |
4832 | /* RSS keys are 40 or 52 bytes long */ |
4833 | #define 52 |
4834 | extern u8 [NETDEV_RSS_KEY_LEN] __read_mostly; |
4835 | void (void *buffer, size_t len); |
4836 | |
4837 | int skb_checksum_help(struct sk_buff *skb); |
4838 | int skb_crc32c_csum_help(struct sk_buff *skb); |
4839 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
4840 | const netdev_features_t features); |
4841 | |
4842 | struct netdev_bonding_info { |
4843 | ifslave slave; |
4844 | ifbond master; |
4845 | }; |
4846 | |
4847 | struct netdev_notifier_bonding_info { |
4848 | struct netdev_notifier_info info; /* must be first */ |
4849 | struct netdev_bonding_info bonding_info; |
4850 | }; |
4851 | |
4852 | void netdev_bonding_info_change(struct net_device *dev, |
4853 | struct netdev_bonding_info *bonding_info); |
4854 | |
4855 | #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) |
4856 | void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); |
4857 | #else |
4858 | static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, |
4859 | const void *data) |
4860 | { |
4861 | } |
4862 | #endif |
4863 | |
4864 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
4865 | |
4866 | static inline bool can_checksum_protocol(netdev_features_t features, |
4867 | __be16 protocol) |
4868 | { |
4869 | if (protocol == htons(ETH_P_FCOE)) |
4870 | return !!(features & NETIF_F_FCOE_CRC); |
4871 | |
4872 | /* Assume this is an IP checksum (not SCTP CRC) */ |
4873 | |
4874 | if (features & NETIF_F_HW_CSUM) { |
4875 | /* Can checksum everything */ |
4876 | return true; |
4877 | } |
4878 | |
4879 | switch (protocol) { |
4880 | case htons(ETH_P_IP): |
4881 | return !!(features & NETIF_F_IP_CSUM); |
4882 | case htons(ETH_P_IPV6): |
4883 | return !!(features & NETIF_F_IPV6_CSUM); |
4884 | default: |
4885 | return false; |
4886 | } |
4887 | } |
4888 | |
4889 | #ifdef CONFIG_BUG |
4890 | void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); |
4891 | #else |
4892 | static inline void netdev_rx_csum_fault(struct net_device *dev, |
4893 | struct sk_buff *skb) |
4894 | { |
4895 | } |
4896 | #endif |
4897 | /* rx skb timestamps */ |
4898 | void net_enable_timestamp(void); |
4899 | void net_disable_timestamp(void); |
4900 | |
4901 | static inline ktime_t netdev_get_tstamp(struct net_device *dev, |
4902 | const struct skb_shared_hwtstamps *hwtstamps, |
4903 | bool cycles) |
4904 | { |
4905 | const struct net_device_ops *ops = dev->netdev_ops; |
4906 | |
4907 | if (ops->ndo_get_tstamp) |
4908 | return ops->ndo_get_tstamp(dev, hwtstamps, cycles); |
4909 | |
4910 | return hwtstamps->hwtstamp; |
4911 | } |
4912 | |
4913 | static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, |
4914 | struct sk_buff *skb, struct net_device *dev, |
4915 | bool more) |
4916 | { |
4917 | __this_cpu_write(softnet_data.xmit.more, more); |
4918 | return ops->ndo_start_xmit(skb, dev); |
4919 | } |
4920 | |
4921 | static inline bool netdev_xmit_more(void) |
4922 | { |
4923 | return __this_cpu_read(softnet_data.xmit.more); |
4924 | } |
4925 | |
4926 | static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, |
4927 | struct netdev_queue *txq, bool more) |
4928 | { |
4929 | const struct net_device_ops *ops = dev->netdev_ops; |
4930 | netdev_tx_t rc; |
4931 | |
4932 | rc = __netdev_start_xmit(ops, skb, dev, more); |
4933 | if (rc == NETDEV_TX_OK) |
4934 | txq_trans_update(txq); |
4935 | |
4936 | return rc; |
4937 | } |
4938 | |
4939 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
4940 | const void *ns); |
4941 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
4942 | const void *ns); |
4943 | |
4944 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
4945 | |
4946 | const char *netdev_drivername(const struct net_device *dev); |
4947 | |
4948 | static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, |
4949 | netdev_features_t f2) |
4950 | { |
4951 | if ((f1 ^ f2) & NETIF_F_HW_CSUM) { |
4952 | if (f1 & NETIF_F_HW_CSUM) |
4953 | f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
4954 | else |
4955 | f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
4956 | } |
4957 | |
4958 | return f1 & f2; |
4959 | } |
4960 | |
4961 | static inline netdev_features_t netdev_get_wanted_features( |
4962 | struct net_device *dev) |
4963 | { |
4964 | return (dev->features & ~dev->hw_features) | dev->wanted_features; |
4965 | } |
4966 | netdev_features_t netdev_increment_features(netdev_features_t all, |
4967 | netdev_features_t one, netdev_features_t mask); |
4968 | |
4969 | /* Allow TSO being used on stacked device : |
4970 | * Performing the GSO segmentation before last device |
4971 | * is a performance improvement. |
4972 | */ |
4973 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, |
4974 | netdev_features_t mask) |
4975 | { |
4976 | return netdev_increment_features(all: features, NETIF_F_ALL_TSO, mask); |
4977 | } |
4978 | |
4979 | int __netdev_update_features(struct net_device *dev); |
4980 | void netdev_update_features(struct net_device *dev); |
4981 | void netdev_change_features(struct net_device *dev); |
4982 | |
4983 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
4984 | struct net_device *dev); |
4985 | |
4986 | netdev_features_t passthru_features_check(struct sk_buff *skb, |
4987 | struct net_device *dev, |
4988 | netdev_features_t features); |
4989 | netdev_features_t netif_skb_features(struct sk_buff *skb); |
4990 | void skb_warn_bad_offload(const struct sk_buff *skb); |
4991 | |
4992 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
4993 | { |
4994 | netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; |
4995 | |
4996 | /* check flags correspondence */ |
4997 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); |
4998 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); |
4999 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); |
5000 | BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); |
5001 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); |
5002 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); |
5003 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); |
5004 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); |
5005 | BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); |
5006 | BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); |
5007 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
5008 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); |
5009 | BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); |
5010 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
5011 | BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); |
5012 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
5013 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
5014 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
5015 | BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); |
5016 | |
5017 | return (features & feature) == feature; |
5018 | } |
5019 | |
5020 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
5021 | { |
5022 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
5023 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
5024 | } |
5025 | |
5026 | static inline bool netif_needs_gso(struct sk_buff *skb, |
5027 | netdev_features_t features) |
5028 | { |
5029 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
5030 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && |
5031 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); |
5032 | } |
5033 | |
5034 | void netif_set_tso_max_size(struct net_device *dev, unsigned int size); |
5035 | void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs); |
5036 | void netif_inherit_tso_max(struct net_device *to, |
5037 | const struct net_device *from); |
5038 | |
5039 | static inline bool netif_is_macsec(const struct net_device *dev) |
5040 | { |
5041 | return dev->priv_flags & IFF_MACSEC; |
5042 | } |
5043 | |
5044 | static inline bool netif_is_macvlan(const struct net_device *dev) |
5045 | { |
5046 | return dev->priv_flags & IFF_MACVLAN; |
5047 | } |
5048 | |
5049 | static inline bool netif_is_macvlan_port(const struct net_device *dev) |
5050 | { |
5051 | return dev->priv_flags & IFF_MACVLAN_PORT; |
5052 | } |
5053 | |
5054 | static inline bool netif_is_bond_master(const struct net_device *dev) |
5055 | { |
5056 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; |
5057 | } |
5058 | |
5059 | static inline bool netif_is_bond_slave(const struct net_device *dev) |
5060 | { |
5061 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; |
5062 | } |
5063 | |
5064 | static inline bool netif_supports_nofcs(struct net_device *dev) |
5065 | { |
5066 | return dev->priv_flags & IFF_SUPP_NOFCS; |
5067 | } |
5068 | |
5069 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) |
5070 | { |
5071 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; |
5072 | } |
5073 | |
5074 | static inline bool netif_is_l3_master(const struct net_device *dev) |
5075 | { |
5076 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
5077 | } |
5078 | |
5079 | static inline bool netif_is_l3_slave(const struct net_device *dev) |
5080 | { |
5081 | return dev->priv_flags & IFF_L3MDEV_SLAVE; |
5082 | } |
5083 | |
5084 | static inline int dev_sdif(const struct net_device *dev) |
5085 | { |
5086 | #ifdef CONFIG_NET_L3_MASTER_DEV |
5087 | if (netif_is_l3_slave(dev)) |
5088 | return dev->ifindex; |
5089 | #endif |
5090 | return 0; |
5091 | } |
5092 | |
5093 | static inline bool netif_is_bridge_master(const struct net_device *dev) |
5094 | { |
5095 | return dev->priv_flags & IFF_EBRIDGE; |
5096 | } |
5097 | |
5098 | static inline bool netif_is_bridge_port(const struct net_device *dev) |
5099 | { |
5100 | return dev->priv_flags & IFF_BRIDGE_PORT; |
5101 | } |
5102 | |
5103 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
5104 | { |
5105 | return dev->priv_flags & IFF_OPENVSWITCH; |
5106 | } |
5107 | |
5108 | static inline bool netif_is_ovs_port(const struct net_device *dev) |
5109 | { |
5110 | return dev->priv_flags & IFF_OVS_DATAPATH; |
5111 | } |
5112 | |
5113 | static inline bool netif_is_any_bridge_master(const struct net_device *dev) |
5114 | { |
5115 | return netif_is_bridge_master(dev) || netif_is_ovs_master(dev); |
5116 | } |
5117 | |
5118 | static inline bool netif_is_any_bridge_port(const struct net_device *dev) |
5119 | { |
5120 | return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); |
5121 | } |
5122 | |
5123 | static inline bool netif_is_team_master(const struct net_device *dev) |
5124 | { |
5125 | return dev->priv_flags & IFF_TEAM; |
5126 | } |
5127 | |
5128 | static inline bool netif_is_team_port(const struct net_device *dev) |
5129 | { |
5130 | return dev->priv_flags & IFF_TEAM_PORT; |
5131 | } |
5132 | |
5133 | static inline bool netif_is_lag_master(const struct net_device *dev) |
5134 | { |
5135 | return netif_is_bond_master(dev) || netif_is_team_master(dev); |
5136 | } |
5137 | |
5138 | static inline bool netif_is_lag_port(const struct net_device *dev) |
5139 | { |
5140 | return netif_is_bond_slave(dev) || netif_is_team_port(dev); |
5141 | } |
5142 | |
5143 | static inline bool netif_is_rxfh_configured(const struct net_device *dev) |
5144 | { |
5145 | return dev->priv_flags & IFF_RXFH_CONFIGURED; |
5146 | } |
5147 | |
5148 | static inline bool netif_is_failover(const struct net_device *dev) |
5149 | { |
5150 | return dev->priv_flags & IFF_FAILOVER; |
5151 | } |
5152 | |
5153 | static inline bool netif_is_failover_slave(const struct net_device *dev) |
5154 | { |
5155 | return dev->priv_flags & IFF_FAILOVER_SLAVE; |
5156 | } |
5157 | |
5158 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ |
5159 | static inline void netif_keep_dst(struct net_device *dev) |
5160 | { |
5161 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); |
5162 | } |
5163 | |
5164 | /* return true if dev can't cope with mtu frames that need vlan tag insertion */ |
5165 | static inline bool netif_reduces_vlan_mtu(struct net_device *dev) |
5166 | { |
5167 | /* TODO: reserve and use an additional IFF bit, if we get more users */ |
5168 | return netif_is_macsec(dev); |
5169 | } |
5170 | |
5171 | extern struct pernet_operations __net_initdata loopback_net_ops; |
5172 | |
5173 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
5174 | |
5175 | /* netdev_printk helpers, similar to dev_printk */ |
5176 | |
5177 | static inline const char *netdev_name(const struct net_device *dev) |
5178 | { |
5179 | if (!dev->name[0] || strchr(dev->name, '%')) |
5180 | return "(unnamed net_device)" ; |
5181 | return dev->name; |
5182 | } |
5183 | |
5184 | static inline const char *netdev_reg_state(const struct net_device *dev) |
5185 | { |
5186 | switch (dev->reg_state) { |
5187 | case NETREG_UNINITIALIZED: return " (uninitialized)" ; |
5188 | case NETREG_REGISTERED: return "" ; |
5189 | case NETREG_UNREGISTERING: return " (unregistering)" ; |
5190 | case NETREG_UNREGISTERED: return " (unregistered)" ; |
5191 | case NETREG_RELEASED: return " (released)" ; |
5192 | case NETREG_DUMMY: return " (dummy)" ; |
5193 | } |
5194 | |
5195 | WARN_ONCE(1, "%s: unknown reg_state %d\n" , dev->name, dev->reg_state); |
5196 | return " (unknown)" ; |
5197 | } |
5198 | |
5199 | #define MODULE_ALIAS_NETDEV(device) \ |
5200 | MODULE_ALIAS("netdev-" device) |
5201 | |
5202 | /* |
5203 | * netdev_WARN() acts like dev_printk(), but with the key difference |
5204 | * of using a WARN/WARN_ON to get the message out, including the |
5205 | * file/line information and a backtrace. |
5206 | */ |
5207 | #define netdev_WARN(dev, format, args...) \ |
5208 | WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
5209 | netdev_reg_state(dev), ##args) |
5210 | |
5211 | #define netdev_WARN_ONCE(dev, format, args...) \ |
5212 | WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
5213 | netdev_reg_state(dev), ##args) |
5214 | |
5215 | /* |
5216 | * The list of packet types we will receive (as opposed to discard) |
5217 | * and the routines to invoke. |
5218 | * |
5219 | * Why 16. Because with 16 the only overlap we get on a hash of the |
5220 | * low nibble of the protocol value is RARP/SNAP/X.25. |
5221 | * |
5222 | * 0800 IP |
5223 | * 0001 802.3 |
5224 | * 0002 AX.25 |
5225 | * 0004 802.2 |
5226 | * 8035 RARP |
5227 | * 0005 SNAP |
5228 | * 0805 X.25 |
5229 | * 0806 ARP |
5230 | * 8137 IPX |
5231 | * 0009 Localtalk |
5232 | * 86DD IPv6 |
5233 | */ |
5234 | #define PTYPE_HASH_SIZE (16) |
5235 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) |
5236 | |
5237 | extern struct list_head ptype_all __read_mostly; |
5238 | extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
5239 | |
5240 | extern struct net_device *blackhole_netdev; |
5241 | |
5242 | /* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */ |
5243 | #define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD) |
5244 | #define DEV_STATS_ADD(DEV, FIELD, VAL) \ |
5245 | atomic_long_add((VAL), &(DEV)->stats.__##FIELD) |
5246 | #define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD) |
5247 | |
5248 | #endif /* _LINUX_NETDEVICE_H */ |
5249 | |