1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * VLAN An implementation of 802.1Q VLAN tagging. |
4 | * |
5 | * Authors: Ben Greear <greearb@candelatech.com> |
6 | */ |
7 | #ifndef _LINUX_IF_VLAN_H_ |
8 | #define _LINUX_IF_VLAN_H_ |
9 | |
10 | #include <linux/netdevice.h> |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/rtnetlink.h> |
13 | #include <linux/bug.h> |
14 | #include <uapi/linux/if_vlan.h> |
15 | |
16 | #define VLAN_HLEN 4 /* The additional bytes required by VLAN |
17 | * (in addition to the Ethernet header) |
18 | */ |
19 | #define VLAN_ETH_HLEN 18 /* Total octets in header. */ |
20 | #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ |
21 | |
22 | /* |
23 | * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan |
24 | */ |
25 | #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ |
26 | #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ |
27 | |
28 | #define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ |
29 | |
30 | /* |
31 | * struct vlan_hdr - vlan header |
32 | * @h_vlan_TCI: priority and VLAN ID |
33 | * @h_vlan_encapsulated_proto: packet type ID or len |
34 | */ |
35 | struct vlan_hdr { |
36 | __be16 h_vlan_TCI; |
37 | __be16 h_vlan_encapsulated_proto; |
38 | }; |
39 | |
40 | /** |
41 | * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) |
42 | * @h_dest: destination ethernet address |
43 | * @h_source: source ethernet address |
44 | * @h_vlan_proto: ethernet protocol |
45 | * @h_vlan_TCI: priority and VLAN ID |
46 | * @h_vlan_encapsulated_proto: packet type ID or len |
47 | */ |
48 | struct vlan_ethhdr { |
49 | struct_group(addrs, |
50 | unsigned char h_dest[ETH_ALEN]; |
51 | unsigned char h_source[ETH_ALEN]; |
52 | ); |
53 | __be16 h_vlan_proto; |
54 | __be16 h_vlan_TCI; |
55 | __be16 h_vlan_encapsulated_proto; |
56 | }; |
57 | |
58 | #include <linux/skbuff.h> |
59 | |
60 | static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) |
61 | { |
62 | return (struct vlan_ethhdr *)skb_mac_header(skb); |
63 | } |
64 | |
65 | /* Prefer this version in TX path, instead of |
66 | * skb_reset_mac_header() + vlan_eth_hdr() |
67 | */ |
68 | static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) |
69 | { |
70 | return (struct vlan_ethhdr *)skb->data; |
71 | } |
72 | |
73 | #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ |
74 | #define VLAN_PRIO_SHIFT 13 |
75 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ |
76 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ |
77 | #define VLAN_N_VID 4096 |
78 | |
79 | /* found in socket.c */ |
80 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
81 | |
82 | static inline bool is_vlan_dev(const struct net_device *dev) |
83 | { |
84 | return dev->priv_flags & IFF_802_1Q_VLAN; |
85 | } |
86 | |
87 | #define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) |
88 | #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) |
89 | #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) |
90 | #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) |
91 | #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) |
92 | |
93 | static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) |
94 | { |
95 | ASSERT_RTNL(); |
96 | return notifier_to_errno(ret: call_netdevice_notifiers(val: NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); |
97 | } |
98 | |
99 | static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) |
100 | { |
101 | ASSERT_RTNL(); |
102 | call_netdevice_notifiers(val: NETDEV_CVLAN_FILTER_DROP_INFO, dev); |
103 | } |
104 | |
105 | static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) |
106 | { |
107 | ASSERT_RTNL(); |
108 | return notifier_to_errno(ret: call_netdevice_notifiers(val: NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); |
109 | } |
110 | |
111 | static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) |
112 | { |
113 | ASSERT_RTNL(); |
114 | call_netdevice_notifiers(val: NETDEV_SVLAN_FILTER_DROP_INFO, dev); |
115 | } |
116 | |
117 | /** |
118 | * struct vlan_pcpu_stats - VLAN percpu rx/tx stats |
119 | * @rx_packets: number of received packets |
120 | * @rx_bytes: number of received bytes |
121 | * @rx_multicast: number of received multicast packets |
122 | * @tx_packets: number of transmitted packets |
123 | * @tx_bytes: number of transmitted bytes |
124 | * @syncp: synchronization point for 64bit counters |
125 | * @rx_errors: number of rx errors |
126 | * @tx_dropped: number of tx drops |
127 | */ |
128 | struct vlan_pcpu_stats { |
129 | u64_stats_t rx_packets; |
130 | u64_stats_t rx_bytes; |
131 | u64_stats_t rx_multicast; |
132 | u64_stats_t tx_packets; |
133 | u64_stats_t tx_bytes; |
134 | struct u64_stats_sync syncp; |
135 | u32 rx_errors; |
136 | u32 tx_dropped; |
137 | }; |
138 | |
139 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
140 | |
141 | extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, |
142 | __be16 vlan_proto, u16 vlan_id); |
143 | extern int vlan_for_each(struct net_device *dev, |
144 | int (*action)(struct net_device *dev, int vid, |
145 | void *arg), void *arg); |
146 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
147 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
148 | extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); |
149 | |
150 | /** |
151 | * struct vlan_priority_tci_mapping - vlan egress priority mappings |
152 | * @priority: skb priority |
153 | * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 |
154 | * @next: pointer to next struct |
155 | */ |
156 | struct vlan_priority_tci_mapping { |
157 | u32 priority; |
158 | u16 vlan_qos; |
159 | struct vlan_priority_tci_mapping *next; |
160 | }; |
161 | |
162 | struct proc_dir_entry; |
163 | struct netpoll; |
164 | |
165 | /** |
166 | * struct vlan_dev_priv - VLAN private device data |
167 | * @nr_ingress_mappings: number of ingress priority mappings |
168 | * @ingress_priority_map: ingress priority mappings |
169 | * @nr_egress_mappings: number of egress priority mappings |
170 | * @egress_priority_map: hash of egress priority mappings |
171 | * @vlan_proto: VLAN encapsulation protocol |
172 | * @vlan_id: VLAN identifier |
173 | * @flags: device flags |
174 | * @real_dev: underlying netdevice |
175 | * @dev_tracker: refcount tracker for @real_dev reference |
176 | * @real_dev_addr: address of underlying netdevice |
177 | * @dent: proc dir entry |
178 | * @vlan_pcpu_stats: ptr to percpu rx stats |
179 | */ |
180 | struct vlan_dev_priv { |
181 | unsigned int nr_ingress_mappings; |
182 | u32 ingress_priority_map[8]; |
183 | unsigned int nr_egress_mappings; |
184 | struct vlan_priority_tci_mapping *egress_priority_map[16]; |
185 | |
186 | __be16 vlan_proto; |
187 | u16 vlan_id; |
188 | u16 flags; |
189 | |
190 | struct net_device *real_dev; |
191 | netdevice_tracker dev_tracker; |
192 | |
193 | unsigned char real_dev_addr[ETH_ALEN]; |
194 | |
195 | struct proc_dir_entry *dent; |
196 | struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; |
197 | #ifdef CONFIG_NET_POLL_CONTROLLER |
198 | struct netpoll *netpoll; |
199 | #endif |
200 | }; |
201 | |
202 | static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) |
203 | { |
204 | return netdev_priv(dev); |
205 | } |
206 | |
207 | static inline u16 |
208 | vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) |
209 | { |
210 | struct vlan_priority_tci_mapping *mp; |
211 | |
212 | smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ |
213 | |
214 | mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; |
215 | while (mp) { |
216 | if (mp->priority == skprio) { |
217 | return mp->vlan_qos; /* This should already be shifted |
218 | * to mask correctly with the |
219 | * VLAN's TCI */ |
220 | } |
221 | mp = mp->next; |
222 | } |
223 | return 0; |
224 | } |
225 | |
226 | extern bool vlan_do_receive(struct sk_buff **skb); |
227 | |
228 | extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); |
229 | extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); |
230 | |
231 | extern int vlan_vids_add_by_dev(struct net_device *dev, |
232 | const struct net_device *by_dev); |
233 | extern void vlan_vids_del_by_dev(struct net_device *dev, |
234 | const struct net_device *by_dev); |
235 | |
236 | extern bool vlan_uses_dev(const struct net_device *dev); |
237 | |
238 | #else |
239 | static inline struct net_device * |
240 | __vlan_find_dev_deep_rcu(struct net_device *real_dev, |
241 | __be16 vlan_proto, u16 vlan_id) |
242 | { |
243 | return NULL; |
244 | } |
245 | |
246 | static inline int |
247 | vlan_for_each(struct net_device *dev, |
248 | int (*action)(struct net_device *dev, int vid, void *arg), |
249 | void *arg) |
250 | { |
251 | return 0; |
252 | } |
253 | |
254 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
255 | { |
256 | BUG(); |
257 | return NULL; |
258 | } |
259 | |
260 | static inline u16 vlan_dev_vlan_id(const struct net_device *dev) |
261 | { |
262 | BUG(); |
263 | return 0; |
264 | } |
265 | |
266 | static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) |
267 | { |
268 | BUG(); |
269 | return 0; |
270 | } |
271 | |
272 | static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, |
273 | u32 skprio) |
274 | { |
275 | return 0; |
276 | } |
277 | |
278 | static inline bool vlan_do_receive(struct sk_buff **skb) |
279 | { |
280 | return false; |
281 | } |
282 | |
283 | static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
284 | { |
285 | return 0; |
286 | } |
287 | |
288 | static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) |
289 | { |
290 | } |
291 | |
292 | static inline int vlan_vids_add_by_dev(struct net_device *dev, |
293 | const struct net_device *by_dev) |
294 | { |
295 | return 0; |
296 | } |
297 | |
298 | static inline void vlan_vids_del_by_dev(struct net_device *dev, |
299 | const struct net_device *by_dev) |
300 | { |
301 | } |
302 | |
303 | static inline bool vlan_uses_dev(const struct net_device *dev) |
304 | { |
305 | return false; |
306 | } |
307 | #endif |
308 | |
309 | /** |
310 | * eth_type_vlan - check for valid vlan ether type. |
311 | * @ethertype: ether type to check |
312 | * |
313 | * Returns true if the ether type is a vlan ether type. |
314 | */ |
315 | static inline bool eth_type_vlan(__be16 ethertype) |
316 | { |
317 | switch (ethertype) { |
318 | case htons(ETH_P_8021Q): |
319 | case htons(ETH_P_8021AD): |
320 | return true; |
321 | default: |
322 | return false; |
323 | } |
324 | } |
325 | |
326 | static inline bool vlan_hw_offload_capable(netdev_features_t features, |
327 | __be16 proto) |
328 | { |
329 | if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) |
330 | return true; |
331 | if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) |
332 | return true; |
333 | return false; |
334 | } |
335 | |
336 | /** |
337 | * __vlan_insert_inner_tag - inner VLAN tag inserting |
338 | * @skb: skbuff to tag |
339 | * @vlan_proto: VLAN encapsulation protocol |
340 | * @vlan_tci: VLAN TCI to insert |
341 | * @mac_len: MAC header length including outer vlan headers |
342 | * |
343 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
344 | * Returns error if skb_cow_head fails. |
345 | * |
346 | * Does not change skb->protocol so this function can be used during receive. |
347 | */ |
348 | static inline int __vlan_insert_inner_tag(struct sk_buff *skb, |
349 | __be16 vlan_proto, u16 vlan_tci, |
350 | unsigned int mac_len) |
351 | { |
352 | struct vlan_ethhdr *veth; |
353 | |
354 | if (skb_cow_head(skb, VLAN_HLEN) < 0) |
355 | return -ENOMEM; |
356 | |
357 | skb_push(skb, VLAN_HLEN); |
358 | |
359 | /* Move the mac header sans proto to the beginning of the new header. */ |
360 | if (likely(mac_len > ETH_TLEN)) |
361 | memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); |
362 | if (skb_mac_header_was_set(skb)) |
363 | skb->mac_header -= VLAN_HLEN; |
364 | |
365 | veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); |
366 | |
367 | /* first, the ethernet type */ |
368 | if (likely(mac_len >= ETH_TLEN)) { |
369 | /* h_vlan_encapsulated_proto should already be populated, and |
370 | * skb->data has space for h_vlan_proto |
371 | */ |
372 | veth->h_vlan_proto = vlan_proto; |
373 | } else { |
374 | /* h_vlan_encapsulated_proto should not be populated, and |
375 | * skb->data has no space for h_vlan_proto |
376 | */ |
377 | veth->h_vlan_encapsulated_proto = skb->protocol; |
378 | } |
379 | |
380 | /* now, the TCI */ |
381 | veth->h_vlan_TCI = htons(vlan_tci); |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | /** |
387 | * __vlan_insert_tag - regular VLAN tag inserting |
388 | * @skb: skbuff to tag |
389 | * @vlan_proto: VLAN encapsulation protocol |
390 | * @vlan_tci: VLAN TCI to insert |
391 | * |
392 | * Inserts the VLAN tag into @skb as part of the payload |
393 | * Returns error if skb_cow_head fails. |
394 | * |
395 | * Does not change skb->protocol so this function can be used during receive. |
396 | */ |
397 | static inline int __vlan_insert_tag(struct sk_buff *skb, |
398 | __be16 vlan_proto, u16 vlan_tci) |
399 | { |
400 | return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
401 | } |
402 | |
403 | /** |
404 | * vlan_insert_inner_tag - inner VLAN tag inserting |
405 | * @skb: skbuff to tag |
406 | * @vlan_proto: VLAN encapsulation protocol |
407 | * @vlan_tci: VLAN TCI to insert |
408 | * @mac_len: MAC header length including outer vlan headers |
409 | * |
410 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
411 | * Returns a VLAN tagged skb. This might change skb->head. |
412 | * |
413 | * Following the skb_unshare() example, in case of error, the calling function |
414 | * doesn't have to worry about freeing the original skb. |
415 | * |
416 | * Does not change skb->protocol so this function can be used during receive. |
417 | */ |
418 | static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, |
419 | __be16 vlan_proto, |
420 | u16 vlan_tci, |
421 | unsigned int mac_len) |
422 | { |
423 | int err; |
424 | |
425 | err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); |
426 | if (err) { |
427 | dev_kfree_skb_any(skb); |
428 | return NULL; |
429 | } |
430 | return skb; |
431 | } |
432 | |
433 | /** |
434 | * vlan_insert_tag - regular VLAN tag inserting |
435 | * @skb: skbuff to tag |
436 | * @vlan_proto: VLAN encapsulation protocol |
437 | * @vlan_tci: VLAN TCI to insert |
438 | * |
439 | * Inserts the VLAN tag into @skb as part of the payload |
440 | * Returns a VLAN tagged skb. This might change skb->head. |
441 | * |
442 | * Following the skb_unshare() example, in case of error, the calling function |
443 | * doesn't have to worry about freeing the original skb. |
444 | * |
445 | * Does not change skb->protocol so this function can be used during receive. |
446 | */ |
447 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, |
448 | __be16 vlan_proto, u16 vlan_tci) |
449 | { |
450 | return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
451 | } |
452 | |
453 | /** |
454 | * vlan_insert_tag_set_proto - regular VLAN tag inserting |
455 | * @skb: skbuff to tag |
456 | * @vlan_proto: VLAN encapsulation protocol |
457 | * @vlan_tci: VLAN TCI to insert |
458 | * |
459 | * Inserts the VLAN tag into @skb as part of the payload |
460 | * Returns a VLAN tagged skb. This might change skb->head. |
461 | * |
462 | * Following the skb_unshare() example, in case of error, the calling function |
463 | * doesn't have to worry about freeing the original skb. |
464 | */ |
465 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, |
466 | __be16 vlan_proto, |
467 | u16 vlan_tci) |
468 | { |
469 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); |
470 | if (skb) |
471 | skb->protocol = vlan_proto; |
472 | return skb; |
473 | } |
474 | |
475 | /** |
476 | * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info |
477 | * @skb: skbuff to clear |
478 | * |
479 | * Clears the VLAN information from @skb |
480 | */ |
481 | static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) |
482 | { |
483 | skb->vlan_all = 0; |
484 | } |
485 | |
486 | /** |
487 | * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb |
488 | * @dst: skbuff to copy to |
489 | * @src: skbuff to copy from |
490 | * |
491 | * Copies VLAN information from @src to @dst (for branchless code) |
492 | */ |
493 | static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) |
494 | { |
495 | dst->vlan_all = src->vlan_all; |
496 | } |
497 | |
498 | /* |
499 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload |
500 | * @skb: skbuff to tag |
501 | * |
502 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. |
503 | * |
504 | * Following the skb_unshare() example, in case of error, the calling function |
505 | * doesn't have to worry about freeing the original skb. |
506 | */ |
507 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) |
508 | { |
509 | skb = vlan_insert_tag_set_proto(skb, vlan_proto: skb->vlan_proto, |
510 | skb_vlan_tag_get(skb)); |
511 | if (likely(skb)) |
512 | __vlan_hwaccel_clear_tag(skb); |
513 | return skb; |
514 | } |
515 | |
516 | /** |
517 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
518 | * @skb: skbuff to tag |
519 | * @vlan_proto: VLAN encapsulation protocol |
520 | * @vlan_tci: VLAN TCI to insert |
521 | * |
522 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
523 | */ |
524 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, |
525 | __be16 vlan_proto, u16 vlan_tci) |
526 | { |
527 | skb->vlan_proto = vlan_proto; |
528 | skb->vlan_tci = vlan_tci; |
529 | } |
530 | |
531 | /** |
532 | * __vlan_get_tag - get the VLAN ID that is part of the payload |
533 | * @skb: skbuff to query |
534 | * @vlan_tci: buffer to store value |
535 | * |
536 | * Returns error if the skb is not of VLAN type |
537 | */ |
538 | static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
539 | { |
540 | struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); |
541 | |
542 | if (!eth_type_vlan(ethertype: veth->h_vlan_proto)) |
543 | return -ENODATA; |
544 | |
545 | *vlan_tci = ntohs(veth->h_vlan_TCI); |
546 | return 0; |
547 | } |
548 | |
549 | /** |
550 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] |
551 | * @skb: skbuff to query |
552 | * @vlan_tci: buffer to store value |
553 | * |
554 | * Returns error if @skb->vlan_tci is not set correctly |
555 | */ |
556 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, |
557 | u16 *vlan_tci) |
558 | { |
559 | if (skb_vlan_tag_present(skb)) { |
560 | *vlan_tci = skb_vlan_tag_get(skb); |
561 | return 0; |
562 | } else { |
563 | *vlan_tci = 0; |
564 | return -ENODATA; |
565 | } |
566 | } |
567 | |
568 | /** |
569 | * vlan_get_tag - get the VLAN ID from the skb |
570 | * @skb: skbuff to query |
571 | * @vlan_tci: buffer to store value |
572 | * |
573 | * Returns error if the skb is not VLAN tagged |
574 | */ |
575 | static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
576 | { |
577 | if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
578 | return __vlan_hwaccel_get_tag(skb, vlan_tci); |
579 | } else { |
580 | return __vlan_get_tag(skb, vlan_tci); |
581 | } |
582 | } |
583 | |
584 | /** |
585 | * vlan_get_protocol - get protocol EtherType. |
586 | * @skb: skbuff to query |
587 | * @type: first vlan protocol |
588 | * @depth: buffer to store length of eth and vlan tags in bytes |
589 | * |
590 | * Returns the EtherType of the packet, regardless of whether it is |
591 | * vlan encapsulated (normal or hardware accelerated) or not. |
592 | */ |
593 | static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, |
594 | int *depth) |
595 | { |
596 | unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; |
597 | |
598 | /* if type is 802.1Q/AD then the header should already be |
599 | * present at mac_len - VLAN_HLEN (if mac_len > 0), or at |
600 | * ETH_HLEN otherwise |
601 | */ |
602 | if (eth_type_vlan(ethertype: type)) { |
603 | if (vlan_depth) { |
604 | if (WARN_ON(vlan_depth < VLAN_HLEN)) |
605 | return 0; |
606 | vlan_depth -= VLAN_HLEN; |
607 | } else { |
608 | vlan_depth = ETH_HLEN; |
609 | } |
610 | do { |
611 | struct vlan_hdr vhdr, *vh; |
612 | |
613 | vh = skb_header_pointer(skb, offset: vlan_depth, len: sizeof(vhdr), buffer: &vhdr); |
614 | if (unlikely(!vh || !--parse_depth)) |
615 | return 0; |
616 | |
617 | type = vh->h_vlan_encapsulated_proto; |
618 | vlan_depth += VLAN_HLEN; |
619 | } while (eth_type_vlan(ethertype: type)); |
620 | } |
621 | |
622 | if (depth) |
623 | *depth = vlan_depth; |
624 | |
625 | return type; |
626 | } |
627 | |
628 | /** |
629 | * vlan_get_protocol - get protocol EtherType. |
630 | * @skb: skbuff to query |
631 | * |
632 | * Returns the EtherType of the packet, regardless of whether it is |
633 | * vlan encapsulated (normal or hardware accelerated) or not. |
634 | */ |
635 | static inline __be16 vlan_get_protocol(const struct sk_buff *skb) |
636 | { |
637 | return __vlan_get_protocol(skb, type: skb->protocol, NULL); |
638 | } |
639 | |
640 | /* This version of __vlan_get_protocol() also pulls mac header in skb->head */ |
641 | static inline __be16 vlan_get_protocol_and_depth(struct sk_buff *skb, |
642 | __be16 type, int *depth) |
643 | { |
644 | int maclen; |
645 | |
646 | type = __vlan_get_protocol(skb, type, depth: &maclen); |
647 | |
648 | if (type) { |
649 | if (!pskb_may_pull(skb, len: maclen)) |
650 | type = 0; |
651 | else if (depth) |
652 | *depth = maclen; |
653 | } |
654 | return type; |
655 | } |
656 | |
657 | /* A getter for the SKB protocol field which will handle VLAN tags consistently |
658 | * whether VLAN acceleration is enabled or not. |
659 | */ |
660 | static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) |
661 | { |
662 | if (!skip_vlan) |
663 | /* VLAN acceleration strips the VLAN header from the skb and |
664 | * moves it to skb->vlan_proto |
665 | */ |
666 | return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; |
667 | |
668 | return vlan_get_protocol(skb); |
669 | } |
670 | |
671 | static inline void vlan_set_encap_proto(struct sk_buff *skb, |
672 | struct vlan_hdr *vhdr) |
673 | { |
674 | __be16 proto; |
675 | unsigned short *rawp; |
676 | |
677 | /* |
678 | * Was a VLAN packet, grab the encapsulated protocol, which the layer |
679 | * three protocols care about. |
680 | */ |
681 | |
682 | proto = vhdr->h_vlan_encapsulated_proto; |
683 | if (eth_proto_is_802_3(proto)) { |
684 | skb->protocol = proto; |
685 | return; |
686 | } |
687 | |
688 | rawp = (unsigned short *)(vhdr + 1); |
689 | if (*rawp == 0xFFFF) |
690 | /* |
691 | * This is a magic hack to spot IPX packets. Older Novell |
692 | * breaks the protocol design and runs IPX over 802.3 without |
693 | * an 802.2 LLC layer. We look for FFFF which isn't a used |
694 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware |
695 | * but does for the rest. |
696 | */ |
697 | skb->protocol = htons(ETH_P_802_3); |
698 | else |
699 | /* |
700 | * Real 802.2 LLC |
701 | */ |
702 | skb->protocol = htons(ETH_P_802_2); |
703 | } |
704 | |
705 | /** |
706 | * vlan_remove_tag - remove outer VLAN tag from payload |
707 | * @skb: skbuff to remove tag from |
708 | * @vlan_tci: buffer to store value |
709 | * |
710 | * Expects the skb to contain a VLAN tag in the payload, and to have skb->data |
711 | * pointing at the MAC header. |
712 | * |
713 | * Returns a new pointer to skb->data, or NULL on failure to pull. |
714 | */ |
715 | static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci) |
716 | { |
717 | struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
718 | |
719 | *vlan_tci = ntohs(vhdr->h_vlan_TCI); |
720 | |
721 | memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); |
722 | vlan_set_encap_proto(skb, vhdr); |
723 | return __skb_pull(skb, VLAN_HLEN); |
724 | } |
725 | |
726 | /** |
727 | * skb_vlan_tagged - check if skb is vlan tagged. |
728 | * @skb: skbuff to query |
729 | * |
730 | * Returns true if the skb is tagged, regardless of whether it is hardware |
731 | * accelerated or not. |
732 | */ |
733 | static inline bool skb_vlan_tagged(const struct sk_buff *skb) |
734 | { |
735 | if (!skb_vlan_tag_present(skb) && |
736 | likely(!eth_type_vlan(skb->protocol))) |
737 | return false; |
738 | |
739 | return true; |
740 | } |
741 | |
742 | /** |
743 | * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. |
744 | * @skb: skbuff to query |
745 | * |
746 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
747 | * of whether it is hardware accelerated or not. |
748 | */ |
749 | static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
750 | { |
751 | __be16 protocol = skb->protocol; |
752 | |
753 | if (!skb_vlan_tag_present(skb)) { |
754 | struct vlan_ethhdr *veh; |
755 | |
756 | if (likely(!eth_type_vlan(protocol))) |
757 | return false; |
758 | |
759 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) |
760 | return false; |
761 | |
762 | veh = skb_vlan_eth_hdr(skb); |
763 | protocol = veh->h_vlan_encapsulated_proto; |
764 | } |
765 | |
766 | if (!eth_type_vlan(ethertype: protocol)) |
767 | return false; |
768 | |
769 | return true; |
770 | } |
771 | |
772 | /** |
773 | * vlan_features_check - drop unsafe features for skb with multiple tags. |
774 | * @skb: skbuff to query |
775 | * @features: features to be checked |
776 | * |
777 | * Returns features without unsafe ones if the skb has multiple tags. |
778 | */ |
779 | static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
780 | netdev_features_t features) |
781 | { |
782 | if (skb_vlan_tagged_multi(skb)) { |
783 | /* In the case of multi-tagged packets, use a direct mask |
784 | * instead of using netdev_interesect_features(), to make |
785 | * sure that only devices supporting NETIF_F_HW_CSUM will |
786 | * have checksum offloading support. |
787 | */ |
788 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | |
789 | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | |
790 | NETIF_F_HW_VLAN_STAG_TX; |
791 | } |
792 | |
793 | return features; |
794 | } |
795 | |
796 | /** |
797 | * compare_vlan_header - Compare two vlan headers |
798 | * @h1: Pointer to vlan header |
799 | * @h2: Pointer to vlan header |
800 | * |
801 | * Compare two vlan headers, returns 0 if equal. |
802 | * |
803 | * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. |
804 | */ |
805 | static inline unsigned long (const struct vlan_hdr *h1, |
806 | const struct vlan_hdr *h2) |
807 | { |
808 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
809 | return *(u32 *)h1 ^ *(u32 *)h2; |
810 | #else |
811 | return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | |
812 | ((__force u32)h1->h_vlan_encapsulated_proto ^ |
813 | (__force u32)h2->h_vlan_encapsulated_proto); |
814 | #endif |
815 | } |
816 | #endif /* !(_LINUX_IF_VLAN_H_) */ |
817 | |