1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * VLAN An implementation of 802.1Q VLAN tagging. |
4 | * |
5 | * Authors: Ben Greear <greearb@candelatech.com> |
6 | */ |
7 | #ifndef _LINUX_IF_VLAN_H_ |
8 | #define _LINUX_IF_VLAN_H_ |
9 | |
10 | #include <linux/netdevice.h> |
11 | #include <linux/etherdevice.h> |
12 | #include <linux/rtnetlink.h> |
13 | #include <linux/bug.h> |
14 | #include <uapi/linux/if_vlan.h> |
15 | |
16 | #define VLAN_HLEN 4 /* The additional bytes required by VLAN |
17 | * (in addition to the Ethernet header) |
18 | */ |
19 | #define VLAN_ETH_HLEN 18 /* Total octets in header. */ |
20 | #define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ |
21 | |
22 | /* |
23 | * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan |
24 | */ |
25 | #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ |
26 | #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ |
27 | |
28 | #define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ |
29 | |
30 | /* |
31 | * struct vlan_hdr - vlan header |
32 | * @h_vlan_TCI: priority and VLAN ID |
33 | * @h_vlan_encapsulated_proto: packet type ID or len |
34 | */ |
35 | struct vlan_hdr { |
36 | __be16 h_vlan_TCI; |
37 | __be16 h_vlan_encapsulated_proto; |
38 | }; |
39 | |
40 | /** |
41 | * struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr) |
42 | * @h_dest: destination ethernet address |
43 | * @h_source: source ethernet address |
44 | * @h_vlan_proto: ethernet protocol |
45 | * @h_vlan_TCI: priority and VLAN ID |
46 | * @h_vlan_encapsulated_proto: packet type ID or len |
47 | */ |
48 | struct vlan_ethhdr { |
49 | struct_group(addrs, |
50 | unsigned char h_dest[ETH_ALEN]; |
51 | unsigned char h_source[ETH_ALEN]; |
52 | ); |
53 | __be16 h_vlan_proto; |
54 | __be16 h_vlan_TCI; |
55 | __be16 h_vlan_encapsulated_proto; |
56 | }; |
57 | |
58 | #include <linux/skbuff.h> |
59 | |
60 | static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) |
61 | { |
62 | return (struct vlan_ethhdr *)skb_mac_header(skb); |
63 | } |
64 | |
65 | #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ |
66 | #define VLAN_PRIO_SHIFT 13 |
67 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ |
68 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ |
69 | #define VLAN_N_VID 4096 |
70 | |
71 | /* found in socket.c */ |
72 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
73 | |
74 | static inline bool is_vlan_dev(const struct net_device *dev) |
75 | { |
76 | return dev->priv_flags & IFF_802_1Q_VLAN; |
77 | } |
78 | |
79 | #define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) |
80 | #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) |
81 | #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) |
82 | #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) |
83 | #define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) |
84 | |
85 | static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) |
86 | { |
87 | ASSERT_RTNL(); |
88 | return notifier_to_errno(call_netdevice_notifiers(NETDEV_CVLAN_FILTER_PUSH_INFO, dev)); |
89 | } |
90 | |
91 | static inline void vlan_drop_rx_ctag_filter_info(struct net_device *dev) |
92 | { |
93 | ASSERT_RTNL(); |
94 | call_netdevice_notifiers(NETDEV_CVLAN_FILTER_DROP_INFO, dev); |
95 | } |
96 | |
97 | static inline int vlan_get_rx_stag_filter_info(struct net_device *dev) |
98 | { |
99 | ASSERT_RTNL(); |
100 | return notifier_to_errno(call_netdevice_notifiers(NETDEV_SVLAN_FILTER_PUSH_INFO, dev)); |
101 | } |
102 | |
103 | static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) |
104 | { |
105 | ASSERT_RTNL(); |
106 | call_netdevice_notifiers(NETDEV_SVLAN_FILTER_DROP_INFO, dev); |
107 | } |
108 | |
109 | /** |
110 | * struct vlan_pcpu_stats - VLAN percpu rx/tx stats |
111 | * @rx_packets: number of received packets |
112 | * @rx_bytes: number of received bytes |
113 | * @rx_multicast: number of received multicast packets |
114 | * @tx_packets: number of transmitted packets |
115 | * @tx_bytes: number of transmitted bytes |
116 | * @syncp: synchronization point for 64bit counters |
117 | * @rx_errors: number of rx errors |
118 | * @tx_dropped: number of tx drops |
119 | */ |
120 | struct vlan_pcpu_stats { |
121 | u64_stats_t rx_packets; |
122 | u64_stats_t rx_bytes; |
123 | u64_stats_t rx_multicast; |
124 | u64_stats_t tx_packets; |
125 | u64_stats_t tx_bytes; |
126 | struct u64_stats_sync syncp; |
127 | u32 rx_errors; |
128 | u32 tx_dropped; |
129 | }; |
130 | |
131 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
132 | |
133 | extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, |
134 | __be16 vlan_proto, u16 vlan_id); |
135 | extern int vlan_for_each(struct net_device *dev, |
136 | int (*action)(struct net_device *dev, int vid, |
137 | void *arg), void *arg); |
138 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
139 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
140 | extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); |
141 | |
142 | /** |
143 | * struct vlan_priority_tci_mapping - vlan egress priority mappings |
144 | * @priority: skb priority |
145 | * @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000 |
146 | * @next: pointer to next struct |
147 | */ |
148 | struct vlan_priority_tci_mapping { |
149 | u32 priority; |
150 | u16 vlan_qos; |
151 | struct vlan_priority_tci_mapping *next; |
152 | }; |
153 | |
154 | struct proc_dir_entry; |
155 | struct netpoll; |
156 | |
157 | /** |
158 | * struct vlan_dev_priv - VLAN private device data |
159 | * @nr_ingress_mappings: number of ingress priority mappings |
160 | * @ingress_priority_map: ingress priority mappings |
161 | * @nr_egress_mappings: number of egress priority mappings |
162 | * @egress_priority_map: hash of egress priority mappings |
163 | * @vlan_proto: VLAN encapsulation protocol |
164 | * @vlan_id: VLAN identifier |
165 | * @flags: device flags |
166 | * @real_dev: underlying netdevice |
167 | * @dev_tracker: refcount tracker for @real_dev reference |
168 | * @real_dev_addr: address of underlying netdevice |
169 | * @dent: proc dir entry |
170 | * @vlan_pcpu_stats: ptr to percpu rx stats |
171 | */ |
172 | struct vlan_dev_priv { |
173 | unsigned int nr_ingress_mappings; |
174 | u32 ingress_priority_map[8]; |
175 | unsigned int nr_egress_mappings; |
176 | struct vlan_priority_tci_mapping *egress_priority_map[16]; |
177 | |
178 | __be16 vlan_proto; |
179 | u16 vlan_id; |
180 | u16 flags; |
181 | |
182 | struct net_device *real_dev; |
183 | netdevice_tracker dev_tracker; |
184 | |
185 | unsigned char real_dev_addr[ETH_ALEN]; |
186 | |
187 | struct proc_dir_entry *dent; |
188 | struct vlan_pcpu_stats __percpu *vlan_pcpu_stats; |
189 | #ifdef CONFIG_NET_POLL_CONTROLLER |
190 | struct netpoll *netpoll; |
191 | #endif |
192 | }; |
193 | |
194 | static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev) |
195 | { |
196 | return netdev_priv(dev); |
197 | } |
198 | |
199 | static inline u16 |
200 | vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio) |
201 | { |
202 | struct vlan_priority_tci_mapping *mp; |
203 | |
204 | smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ |
205 | |
206 | mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)]; |
207 | while (mp) { |
208 | if (mp->priority == skprio) { |
209 | return mp->vlan_qos; /* This should already be shifted |
210 | * to mask correctly with the |
211 | * VLAN's TCI */ |
212 | } |
213 | mp = mp->next; |
214 | } |
215 | return 0; |
216 | } |
217 | |
218 | extern bool vlan_do_receive(struct sk_buff **skb); |
219 | |
220 | extern int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid); |
221 | extern void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid); |
222 | |
223 | extern int vlan_vids_add_by_dev(struct net_device *dev, |
224 | const struct net_device *by_dev); |
225 | extern void vlan_vids_del_by_dev(struct net_device *dev, |
226 | const struct net_device *by_dev); |
227 | |
228 | extern bool vlan_uses_dev(const struct net_device *dev); |
229 | |
230 | #else |
231 | static inline struct net_device * |
232 | __vlan_find_dev_deep_rcu(struct net_device *real_dev, |
233 | __be16 vlan_proto, u16 vlan_id) |
234 | { |
235 | return NULL; |
236 | } |
237 | |
238 | static inline int |
239 | vlan_for_each(struct net_device *dev, |
240 | int (*action)(struct net_device *dev, int vid, void *arg), |
241 | void *arg) |
242 | { |
243 | return 0; |
244 | } |
245 | |
246 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
247 | { |
248 | BUG(); |
249 | return NULL; |
250 | } |
251 | |
252 | static inline u16 vlan_dev_vlan_id(const struct net_device *dev) |
253 | { |
254 | BUG(); |
255 | return 0; |
256 | } |
257 | |
258 | static inline __be16 vlan_dev_vlan_proto(const struct net_device *dev) |
259 | { |
260 | BUG(); |
261 | return 0; |
262 | } |
263 | |
264 | static inline u16 vlan_dev_get_egress_qos_mask(struct net_device *dev, |
265 | u32 skprio) |
266 | { |
267 | return 0; |
268 | } |
269 | |
270 | static inline bool vlan_do_receive(struct sk_buff **skb) |
271 | { |
272 | return false; |
273 | } |
274 | |
275 | static inline int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid) |
276 | { |
277 | return 0; |
278 | } |
279 | |
280 | static inline void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid) |
281 | { |
282 | } |
283 | |
284 | static inline int vlan_vids_add_by_dev(struct net_device *dev, |
285 | const struct net_device *by_dev) |
286 | { |
287 | return 0; |
288 | } |
289 | |
290 | static inline void vlan_vids_del_by_dev(struct net_device *dev, |
291 | const struct net_device *by_dev) |
292 | { |
293 | } |
294 | |
295 | static inline bool vlan_uses_dev(const struct net_device *dev) |
296 | { |
297 | return false; |
298 | } |
299 | #endif |
300 | |
301 | /** |
302 | * eth_type_vlan - check for valid vlan ether type. |
303 | * @ethertype: ether type to check |
304 | * |
305 | * Returns true if the ether type is a vlan ether type. |
306 | */ |
307 | static inline bool eth_type_vlan(__be16 ethertype) |
308 | { |
309 | switch (ethertype) { |
310 | case htons(ETH_P_8021Q): |
311 | case htons(ETH_P_8021AD): |
312 | return true; |
313 | default: |
314 | return false; |
315 | } |
316 | } |
317 | |
318 | static inline bool vlan_hw_offload_capable(netdev_features_t features, |
319 | __be16 proto) |
320 | { |
321 | if (proto == htons(ETH_P_8021Q) && features & NETIF_F_HW_VLAN_CTAG_TX) |
322 | return true; |
323 | if (proto == htons(ETH_P_8021AD) && features & NETIF_F_HW_VLAN_STAG_TX) |
324 | return true; |
325 | return false; |
326 | } |
327 | |
328 | /** |
329 | * __vlan_insert_inner_tag - inner VLAN tag inserting |
330 | * @skb: skbuff to tag |
331 | * @vlan_proto: VLAN encapsulation protocol |
332 | * @vlan_tci: VLAN TCI to insert |
333 | * @mac_len: MAC header length including outer vlan headers |
334 | * |
335 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
336 | * Returns error if skb_cow_head fails. |
337 | * |
338 | * Does not change skb->protocol so this function can be used during receive. |
339 | */ |
340 | static inline int __vlan_insert_inner_tag(struct sk_buff *skb, |
341 | __be16 vlan_proto, u16 vlan_tci, |
342 | unsigned int mac_len) |
343 | { |
344 | struct vlan_ethhdr *veth; |
345 | |
346 | if (skb_cow_head(skb, VLAN_HLEN) < 0) |
347 | return -ENOMEM; |
348 | |
349 | skb_push(skb, VLAN_HLEN); |
350 | |
351 | /* Move the mac header sans proto to the beginning of the new header. */ |
352 | if (likely(mac_len > ETH_TLEN)) |
353 | memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); |
354 | skb->mac_header -= VLAN_HLEN; |
355 | |
356 | veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); |
357 | |
358 | /* first, the ethernet type */ |
359 | if (likely(mac_len >= ETH_TLEN)) { |
360 | /* h_vlan_encapsulated_proto should already be populated, and |
361 | * skb->data has space for h_vlan_proto |
362 | */ |
363 | veth->h_vlan_proto = vlan_proto; |
364 | } else { |
365 | /* h_vlan_encapsulated_proto should not be populated, and |
366 | * skb->data has no space for h_vlan_proto |
367 | */ |
368 | veth->h_vlan_encapsulated_proto = skb->protocol; |
369 | } |
370 | |
371 | /* now, the TCI */ |
372 | veth->h_vlan_TCI = htons(vlan_tci); |
373 | |
374 | return 0; |
375 | } |
376 | |
377 | /** |
378 | * __vlan_insert_tag - regular VLAN tag inserting |
379 | * @skb: skbuff to tag |
380 | * @vlan_proto: VLAN encapsulation protocol |
381 | * @vlan_tci: VLAN TCI to insert |
382 | * |
383 | * Inserts the VLAN tag into @skb as part of the payload |
384 | * Returns error if skb_cow_head fails. |
385 | * |
386 | * Does not change skb->protocol so this function can be used during receive. |
387 | */ |
388 | static inline int __vlan_insert_tag(struct sk_buff *skb, |
389 | __be16 vlan_proto, u16 vlan_tci) |
390 | { |
391 | return __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
392 | } |
393 | |
394 | /** |
395 | * vlan_insert_inner_tag - inner VLAN tag inserting |
396 | * @skb: skbuff to tag |
397 | * @vlan_proto: VLAN encapsulation protocol |
398 | * @vlan_tci: VLAN TCI to insert |
399 | * @mac_len: MAC header length including outer vlan headers |
400 | * |
401 | * Inserts the VLAN tag into @skb as part of the payload at offset mac_len |
402 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
403 | * |
404 | * Following the skb_unshare() example, in case of error, the calling function |
405 | * doesn't have to worry about freeing the original skb. |
406 | * |
407 | * Does not change skb->protocol so this function can be used during receive. |
408 | */ |
409 | static inline struct sk_buff *vlan_insert_inner_tag(struct sk_buff *skb, |
410 | __be16 vlan_proto, |
411 | u16 vlan_tci, |
412 | unsigned int mac_len) |
413 | { |
414 | int err; |
415 | |
416 | err = __vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, mac_len); |
417 | if (err) { |
418 | dev_kfree_skb_any(skb); |
419 | return NULL; |
420 | } |
421 | return skb; |
422 | } |
423 | |
424 | /** |
425 | * vlan_insert_tag - regular VLAN tag inserting |
426 | * @skb: skbuff to tag |
427 | * @vlan_proto: VLAN encapsulation protocol |
428 | * @vlan_tci: VLAN TCI to insert |
429 | * |
430 | * Inserts the VLAN tag into @skb as part of the payload |
431 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
432 | * |
433 | * Following the skb_unshare() example, in case of error, the calling function |
434 | * doesn't have to worry about freeing the original skb. |
435 | * |
436 | * Does not change skb->protocol so this function can be used during receive. |
437 | */ |
438 | static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, |
439 | __be16 vlan_proto, u16 vlan_tci) |
440 | { |
441 | return vlan_insert_inner_tag(skb, vlan_proto, vlan_tci, ETH_HLEN); |
442 | } |
443 | |
444 | /** |
445 | * vlan_insert_tag_set_proto - regular VLAN tag inserting |
446 | * @skb: skbuff to tag |
447 | * @vlan_proto: VLAN encapsulation protocol |
448 | * @vlan_tci: VLAN TCI to insert |
449 | * |
450 | * Inserts the VLAN tag into @skb as part of the payload |
451 | * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. |
452 | * |
453 | * Following the skb_unshare() example, in case of error, the calling function |
454 | * doesn't have to worry about freeing the original skb. |
455 | */ |
456 | static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, |
457 | __be16 vlan_proto, |
458 | u16 vlan_tci) |
459 | { |
460 | skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); |
461 | if (skb) |
462 | skb->protocol = vlan_proto; |
463 | return skb; |
464 | } |
465 | |
466 | /** |
467 | * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info |
468 | * @skb: skbuff to clear |
469 | * |
470 | * Clears the VLAN information from @skb |
471 | */ |
472 | static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) |
473 | { |
474 | skb->vlan_present = 0; |
475 | } |
476 | |
477 | /** |
478 | * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb |
479 | * @dst: skbuff to copy to |
480 | * @src: skbuff to copy from |
481 | * |
482 | * Copies VLAN information from @src to @dst (for branchless code) |
483 | */ |
484 | static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) |
485 | { |
486 | dst->vlan_present = src->vlan_present; |
487 | dst->vlan_proto = src->vlan_proto; |
488 | dst->vlan_tci = src->vlan_tci; |
489 | } |
490 | |
491 | /* |
492 | * __vlan_hwaccel_push_inside - pushes vlan tag to the payload |
493 | * @skb: skbuff to tag |
494 | * |
495 | * Pushes the VLAN tag from @skb->vlan_tci inside to the payload. |
496 | * |
497 | * Following the skb_unshare() example, in case of error, the calling function |
498 | * doesn't have to worry about freeing the original skb. |
499 | */ |
500 | static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) |
501 | { |
502 | skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, |
503 | skb_vlan_tag_get(skb)); |
504 | if (likely(skb)) |
505 | __vlan_hwaccel_clear_tag(skb); |
506 | return skb; |
507 | } |
508 | |
509 | /** |
510 | * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting |
511 | * @skb: skbuff to tag |
512 | * @vlan_proto: VLAN encapsulation protocol |
513 | * @vlan_tci: VLAN TCI to insert |
514 | * |
515 | * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest |
516 | */ |
517 | static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, |
518 | __be16 vlan_proto, u16 vlan_tci) |
519 | { |
520 | skb->vlan_proto = vlan_proto; |
521 | skb->vlan_tci = vlan_tci; |
522 | skb->vlan_present = 1; |
523 | } |
524 | |
525 | /** |
526 | * __vlan_get_tag - get the VLAN ID that is part of the payload |
527 | * @skb: skbuff to query |
528 | * @vlan_tci: buffer to store value |
529 | * |
530 | * Returns error if the skb is not of VLAN type |
531 | */ |
532 | static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
533 | { |
534 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; |
535 | |
536 | if (!eth_type_vlan(veth->h_vlan_proto)) |
537 | return -EINVAL; |
538 | |
539 | *vlan_tci = ntohs(veth->h_vlan_TCI); |
540 | return 0; |
541 | } |
542 | |
543 | /** |
544 | * __vlan_hwaccel_get_tag - get the VLAN ID that is in @skb->cb[] |
545 | * @skb: skbuff to query |
546 | * @vlan_tci: buffer to store value |
547 | * |
548 | * Returns error if @skb->vlan_tci is not set correctly |
549 | */ |
550 | static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, |
551 | u16 *vlan_tci) |
552 | { |
553 | if (skb_vlan_tag_present(skb)) { |
554 | *vlan_tci = skb_vlan_tag_get(skb); |
555 | return 0; |
556 | } else { |
557 | *vlan_tci = 0; |
558 | return -EINVAL; |
559 | } |
560 | } |
561 | |
562 | /** |
563 | * vlan_get_tag - get the VLAN ID from the skb |
564 | * @skb: skbuff to query |
565 | * @vlan_tci: buffer to store value |
566 | * |
567 | * Returns error if the skb is not VLAN tagged |
568 | */ |
569 | static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) |
570 | { |
571 | if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX) { |
572 | return __vlan_hwaccel_get_tag(skb, vlan_tci); |
573 | } else { |
574 | return __vlan_get_tag(skb, vlan_tci); |
575 | } |
576 | } |
577 | |
578 | /** |
579 | * vlan_get_protocol - get protocol EtherType. |
580 | * @skb: skbuff to query |
581 | * @type: first vlan protocol |
582 | * @depth: buffer to store length of eth and vlan tags in bytes |
583 | * |
584 | * Returns the EtherType of the packet, regardless of whether it is |
585 | * vlan encapsulated (normal or hardware accelerated) or not. |
586 | */ |
587 | static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, |
588 | int *depth) |
589 | { |
590 | unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; |
591 | |
592 | /* if type is 802.1Q/AD then the header should already be |
593 | * present at mac_len - VLAN_HLEN (if mac_len > 0), or at |
594 | * ETH_HLEN otherwise |
595 | */ |
596 | if (eth_type_vlan(type)) { |
597 | if (vlan_depth) { |
598 | if (WARN_ON(vlan_depth < VLAN_HLEN)) |
599 | return 0; |
600 | vlan_depth -= VLAN_HLEN; |
601 | } else { |
602 | vlan_depth = ETH_HLEN; |
603 | } |
604 | do { |
605 | struct vlan_hdr vhdr, *vh; |
606 | |
607 | vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); |
608 | if (unlikely(!vh || !--parse_depth)) |
609 | return 0; |
610 | |
611 | type = vh->h_vlan_encapsulated_proto; |
612 | vlan_depth += VLAN_HLEN; |
613 | } while (eth_type_vlan(type)); |
614 | } |
615 | |
616 | if (depth) |
617 | *depth = vlan_depth; |
618 | |
619 | return type; |
620 | } |
621 | |
622 | /** |
623 | * vlan_get_protocol - get protocol EtherType. |
624 | * @skb: skbuff to query |
625 | * |
626 | * Returns the EtherType of the packet, regardless of whether it is |
627 | * vlan encapsulated (normal or hardware accelerated) or not. |
628 | */ |
629 | static inline __be16 vlan_get_protocol(const struct sk_buff *skb) |
630 | { |
631 | return __vlan_get_protocol(skb, skb->protocol, NULL); |
632 | } |
633 | |
634 | /* A getter for the SKB protocol field which will handle VLAN tags consistently |
635 | * whether VLAN acceleration is enabled or not. |
636 | */ |
637 | static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) |
638 | { |
639 | if (!skip_vlan) |
640 | /* VLAN acceleration strips the VLAN header from the skb and |
641 | * moves it to skb->vlan_proto |
642 | */ |
643 | return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; |
644 | |
645 | return vlan_get_protocol(skb); |
646 | } |
647 | |
648 | static inline void vlan_set_encap_proto(struct sk_buff *skb, |
649 | struct vlan_hdr *vhdr) |
650 | { |
651 | __be16 proto; |
652 | unsigned short *rawp; |
653 | |
654 | /* |
655 | * Was a VLAN packet, grab the encapsulated protocol, which the layer |
656 | * three protocols care about. |
657 | */ |
658 | |
659 | proto = vhdr->h_vlan_encapsulated_proto; |
660 | if (eth_proto_is_802_3(proto)) { |
661 | skb->protocol = proto; |
662 | return; |
663 | } |
664 | |
665 | rawp = (unsigned short *)(vhdr + 1); |
666 | if (*rawp == 0xFFFF) |
667 | /* |
668 | * This is a magic hack to spot IPX packets. Older Novell |
669 | * breaks the protocol design and runs IPX over 802.3 without |
670 | * an 802.2 LLC layer. We look for FFFF which isn't a used |
671 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware |
672 | * but does for the rest. |
673 | */ |
674 | skb->protocol = htons(ETH_P_802_3); |
675 | else |
676 | /* |
677 | * Real 802.2 LLC |
678 | */ |
679 | skb->protocol = htons(ETH_P_802_2); |
680 | } |
681 | |
682 | /** |
683 | * skb_vlan_tagged - check if skb is vlan tagged. |
684 | * @skb: skbuff to query |
685 | * |
686 | * Returns true if the skb is tagged, regardless of whether it is hardware |
687 | * accelerated or not. |
688 | */ |
689 | static inline bool skb_vlan_tagged(const struct sk_buff *skb) |
690 | { |
691 | if (!skb_vlan_tag_present(skb) && |
692 | likely(!eth_type_vlan(skb->protocol))) |
693 | return false; |
694 | |
695 | return true; |
696 | } |
697 | |
698 | /** |
699 | * skb_vlan_tagged_multi - check if skb is vlan tagged with multiple headers. |
700 | * @skb: skbuff to query |
701 | * |
702 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
703 | * of whether it is hardware accelerated or not. |
704 | */ |
705 | static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
706 | { |
707 | __be16 protocol = skb->protocol; |
708 | |
709 | if (!skb_vlan_tag_present(skb)) { |
710 | struct vlan_ethhdr *veh; |
711 | |
712 | if (likely(!eth_type_vlan(protocol))) |
713 | return false; |
714 | |
715 | if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) |
716 | return false; |
717 | |
718 | veh = (struct vlan_ethhdr *)skb->data; |
719 | protocol = veh->h_vlan_encapsulated_proto; |
720 | } |
721 | |
722 | if (!eth_type_vlan(protocol)) |
723 | return false; |
724 | |
725 | return true; |
726 | } |
727 | |
728 | /** |
729 | * vlan_features_check - drop unsafe features for skb with multiple tags. |
730 | * @skb: skbuff to query |
731 | * @features: features to be checked |
732 | * |
733 | * Returns features without unsafe ones if the skb has multiple tags. |
734 | */ |
735 | static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
736 | netdev_features_t features) |
737 | { |
738 | if (skb_vlan_tagged_multi(skb)) { |
739 | /* In the case of multi-tagged packets, use a direct mask |
740 | * instead of using netdev_interesect_features(), to make |
741 | * sure that only devices supporting NETIF_F_HW_CSUM will |
742 | * have checksum offloading support. |
743 | */ |
744 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | |
745 | NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX | |
746 | NETIF_F_HW_VLAN_STAG_TX; |
747 | } |
748 | |
749 | return features; |
750 | } |
751 | |
752 | /** |
753 | * compare_vlan_header - Compare two vlan headers |
754 | * @h1: Pointer to vlan header |
755 | * @h2: Pointer to vlan header |
756 | * |
757 | * Compare two vlan headers, returns 0 if equal. |
758 | * |
759 | * Please note that alignment of h1 & h2 are only guaranteed to be 16 bits. |
760 | */ |
761 | static inline unsigned long (const struct vlan_hdr *h1, |
762 | const struct vlan_hdr *h2) |
763 | { |
764 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
765 | return *(u32 *)h1 ^ *(u32 *)h2; |
766 | #else |
767 | return ((__force u32)h1->h_vlan_TCI ^ (__force u32)h2->h_vlan_TCI) | |
768 | ((__force u32)h1->h_vlan_encapsulated_proto ^ |
769 | (__force u32)h2->h_vlan_encapsulated_proto); |
770 | #endif |
771 | } |
772 | #endif /* !(_LINUX_IF_VLAN_H_) */ |
773 | |