1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2011-2014 Autronica Fire and Security AS
3 *
4 * Author(s):
5 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
6 * This file contains device methods for creating, using and destroying
7 * virtual HSR or PRP devices.
8 */
9
10#include <linux/netdevice.h>
11#include <linux/skbuff.h>
12#include <linux/etherdevice.h>
13#include <linux/rtnetlink.h>
14#include <linux/pkt_sched.h>
15#include "hsr_device.h"
16#include "hsr_slave.h"
17#include "hsr_framereg.h"
18#include "hsr_main.h"
19#include "hsr_forward.h"
20
21static bool is_admin_up(struct net_device *dev)
22{
23 return dev && (dev->flags & IFF_UP);
24}
25
26static bool is_slave_up(struct net_device *dev)
27{
28 return dev && is_admin_up(dev) && netif_oper_up(dev);
29}
30
31static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
32{
33 struct net_device *dev = master->dev;
34
35 if (!is_admin_up(dev)) {
36 netdev_set_operstate(dev, newstate: IF_OPER_DOWN);
37 return;
38 }
39
40 if (has_carrier)
41 netdev_set_operstate(dev, newstate: IF_OPER_UP);
42 else
43 netdev_set_operstate(dev, newstate: IF_OPER_LOWERLAYERDOWN);
44}
45
46static bool hsr_check_carrier(struct hsr_port *master)
47{
48 struct hsr_port *port;
49
50 ASSERT_RTNL();
51
52 hsr_for_each_port(master->hsr, port) {
53 if (port->type != HSR_PT_MASTER && is_slave_up(dev: port->dev)) {
54 netif_carrier_on(dev: master->dev);
55 return true;
56 }
57 }
58
59 netif_carrier_off(dev: master->dev);
60
61 return false;
62}
63
64static void hsr_check_announce(struct net_device *hsr_dev,
65 unsigned char old_operstate)
66{
67 struct hsr_priv *hsr;
68
69 hsr = netdev_priv(dev: hsr_dev);
70
71 if (READ_ONCE(hsr_dev->operstate) == IF_OPER_UP && old_operstate != IF_OPER_UP) {
72 /* Went up */
73 hsr->announce_count = 0;
74 mod_timer(timer: &hsr->announce_timer,
75 expires: jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
76 }
77
78 if (READ_ONCE(hsr_dev->operstate) != IF_OPER_UP && old_operstate == IF_OPER_UP)
79 /* Went down */
80 del_timer(timer: &hsr->announce_timer);
81}
82
83void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
84{
85 struct hsr_port *master;
86 unsigned char old_operstate;
87 bool has_carrier;
88
89 master = hsr_port_get_hsr(hsr, pt: HSR_PT_MASTER);
90 /* netif_stacked_transfer_operstate() cannot be used here since
91 * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
92 */
93 old_operstate = READ_ONCE(master->dev->operstate);
94 has_carrier = hsr_check_carrier(master);
95 hsr_set_operstate(master, has_carrier);
96 hsr_check_announce(hsr_dev: master->dev, old_operstate);
97}
98
99int hsr_get_max_mtu(struct hsr_priv *hsr)
100{
101 unsigned int mtu_max;
102 struct hsr_port *port;
103
104 mtu_max = ETH_DATA_LEN;
105 hsr_for_each_port(hsr, port)
106 if (port->type != HSR_PT_MASTER)
107 mtu_max = min(port->dev->mtu, mtu_max);
108
109 if (mtu_max < HSR_HLEN)
110 return 0;
111 return mtu_max - HSR_HLEN;
112}
113
114static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
115{
116 struct hsr_priv *hsr;
117
118 hsr = netdev_priv(dev);
119
120 if (new_mtu > hsr_get_max_mtu(hsr)) {
121 netdev_info(dev, format: "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
122 HSR_HLEN);
123 return -EINVAL;
124 }
125
126 dev->mtu = new_mtu;
127
128 return 0;
129}
130
131static int hsr_dev_open(struct net_device *dev)
132{
133 struct hsr_priv *hsr;
134 struct hsr_port *port;
135 const char *designation = NULL;
136
137 hsr = netdev_priv(dev);
138
139 hsr_for_each_port(hsr, port) {
140 if (port->type == HSR_PT_MASTER)
141 continue;
142 switch (port->type) {
143 case HSR_PT_SLAVE_A:
144 designation = "Slave A";
145 break;
146 case HSR_PT_SLAVE_B:
147 designation = "Slave B";
148 break;
149 default:
150 designation = "Unknown";
151 }
152 if (!is_slave_up(dev: port->dev))
153 netdev_warn(dev, format: "%s (%s) is not up; please bring it up to get a fully working HSR network\n",
154 designation, port->dev->name);
155 }
156
157 if (!designation)
158 netdev_warn(dev, format: "No slave devices configured\n");
159
160 return 0;
161}
162
163static int hsr_dev_close(struct net_device *dev)
164{
165 struct hsr_port *port;
166 struct hsr_priv *hsr;
167
168 hsr = netdev_priv(dev);
169 hsr_for_each_port(hsr, port) {
170 if (port->type == HSR_PT_MASTER)
171 continue;
172 switch (port->type) {
173 case HSR_PT_SLAVE_A:
174 case HSR_PT_SLAVE_B:
175 dev_uc_unsync(to: port->dev, from: dev);
176 dev_mc_unsync(to: port->dev, from: dev);
177 break;
178 default:
179 break;
180 }
181 }
182
183 return 0;
184}
185
186static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
187 netdev_features_t features)
188{
189 netdev_features_t mask;
190 struct hsr_port *port;
191
192 mask = features;
193
194 /* Mask out all features that, if supported by one device, should be
195 * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
196 *
197 * Anything that's off in mask will not be enabled - so only things
198 * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
199 * may become enabled.
200 */
201 features &= ~NETIF_F_ONE_FOR_ALL;
202 hsr_for_each_port(hsr, port)
203 features = netdev_increment_features(all: features,
204 one: port->dev->features,
205 mask);
206
207 return features;
208}
209
210static netdev_features_t hsr_fix_features(struct net_device *dev,
211 netdev_features_t features)
212{
213 struct hsr_priv *hsr = netdev_priv(dev);
214
215 return hsr_features_recompute(hsr, features);
216}
217
218static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
219{
220 struct hsr_priv *hsr = netdev_priv(dev);
221 struct hsr_port *master;
222
223 master = hsr_port_get_hsr(hsr, pt: HSR_PT_MASTER);
224 if (master) {
225 skb->dev = master->dev;
226 skb_reset_mac_header(skb);
227 skb_reset_mac_len(skb);
228 spin_lock_bh(lock: &hsr->seqnr_lock);
229 hsr_forward_skb(skb, port: master);
230 spin_unlock_bh(lock: &hsr->seqnr_lock);
231 } else {
232 dev_core_stats_tx_dropped_inc(dev);
233 dev_kfree_skb_any(skb);
234 }
235 return NETDEV_TX_OK;
236}
237
238static const struct header_ops hsr_header_ops = {
239 .create = eth_header,
240 .parse = eth_header_parse,
241};
242
243static struct sk_buff *hsr_init_skb(struct hsr_port *master)
244{
245 struct hsr_priv *hsr = master->hsr;
246 struct sk_buff *skb;
247 int hlen, tlen;
248
249 hlen = LL_RESERVED_SPACE(master->dev);
250 tlen = master->dev->needed_tailroom;
251 /* skb size is same for PRP/HSR frames, only difference
252 * being, for PRP it is a trailer and for HSR it is a
253 * header
254 */
255 skb = dev_alloc_skb(length: sizeof(struct hsr_sup_tag) +
256 sizeof(struct hsr_sup_payload) + hlen + tlen);
257
258 if (!skb)
259 return skb;
260
261 skb_reserve(skb, len: hlen);
262 skb->dev = master->dev;
263 skb->priority = TC_PRIO_CONTROL;
264
265 if (dev_hard_header(skb, dev: skb->dev, ETH_P_PRP,
266 daddr: hsr->sup_multicast_addr,
267 saddr: skb->dev->dev_addr, len: skb->len) <= 0)
268 goto out;
269
270 skb_reset_mac_header(skb);
271 skb_reset_mac_len(skb);
272 skb_reset_network_header(skb);
273 skb_reset_transport_header(skb);
274
275 return skb;
276out:
277 kfree_skb(skb);
278
279 return NULL;
280}
281
282static void send_hsr_supervision_frame(struct hsr_port *master,
283 unsigned long *interval)
284{
285 struct hsr_priv *hsr = master->hsr;
286 __u8 type = HSR_TLV_LIFE_CHECK;
287 struct hsr_sup_payload *hsr_sp;
288 struct hsr_sup_tag *hsr_stag;
289 struct sk_buff *skb;
290
291 *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
292 if (hsr->announce_count < 3 && hsr->prot_version == 0) {
293 type = HSR_TLV_ANNOUNCE;
294 *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
295 hsr->announce_count++;
296 }
297
298 skb = hsr_init_skb(master);
299 if (!skb) {
300 netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n");
301 return;
302 }
303
304 hsr_stag = skb_put(skb, len: sizeof(struct hsr_sup_tag));
305 set_hsr_stag_path(hst: hsr_stag, path: (hsr->prot_version ? 0x0 : 0xf));
306 set_hsr_stag_HSR_ver(hst: hsr_stag, HSR_ver: hsr->prot_version);
307
308 /* From HSRv1 on we have separate supervision sequence numbers. */
309 spin_lock_bh(lock: &hsr->seqnr_lock);
310 if (hsr->prot_version > 0) {
311 hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
312 hsr->sup_sequence_nr++;
313 } else {
314 hsr_stag->sequence_nr = htons(hsr->sequence_nr);
315 hsr->sequence_nr++;
316 }
317
318 hsr_stag->tlv.HSR_TLV_type = type;
319 /* TODO: Why 12 in HSRv0? */
320 hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ?
321 sizeof(struct hsr_sup_payload) : 12;
322
323 /* Payload: MacAddressA */
324 hsr_sp = skb_put(skb, len: sizeof(struct hsr_sup_payload));
325 ether_addr_copy(dst: hsr_sp->macaddress_A, src: master->dev->dev_addr);
326
327 if (skb_put_padto(skb, ETH_ZLEN)) {
328 spin_unlock_bh(lock: &hsr->seqnr_lock);
329 return;
330 }
331
332 hsr_forward_skb(skb, port: master);
333 spin_unlock_bh(lock: &hsr->seqnr_lock);
334 return;
335}
336
337static void send_prp_supervision_frame(struct hsr_port *master,
338 unsigned long *interval)
339{
340 struct hsr_priv *hsr = master->hsr;
341 struct hsr_sup_payload *hsr_sp;
342 struct hsr_sup_tag *hsr_stag;
343 struct sk_buff *skb;
344
345 skb = hsr_init_skb(master);
346 if (!skb) {
347 netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n");
348 return;
349 }
350
351 *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
352 hsr_stag = skb_put(skb, len: sizeof(struct hsr_sup_tag));
353 set_hsr_stag_path(hst: hsr_stag, path: (hsr->prot_version ? 0x0 : 0xf));
354 set_hsr_stag_HSR_ver(hst: hsr_stag, HSR_ver: (hsr->prot_version ? 1 : 0));
355
356 /* From HSRv1 on we have separate supervision sequence numbers. */
357 spin_lock_bh(lock: &hsr->seqnr_lock);
358 hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
359 hsr->sup_sequence_nr++;
360 hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
361 hsr_stag->tlv.HSR_TLV_length = sizeof(struct hsr_sup_payload);
362
363 /* Payload: MacAddressA */
364 hsr_sp = skb_put(skb, len: sizeof(struct hsr_sup_payload));
365 ether_addr_copy(dst: hsr_sp->macaddress_A, src: master->dev->dev_addr);
366
367 if (skb_put_padto(skb, ETH_ZLEN)) {
368 spin_unlock_bh(lock: &hsr->seqnr_lock);
369 return;
370 }
371
372 hsr_forward_skb(skb, port: master);
373 spin_unlock_bh(lock: &hsr->seqnr_lock);
374}
375
376/* Announce (supervision frame) timer function
377 */
378static void hsr_announce(struct timer_list *t)
379{
380 struct hsr_priv *hsr;
381 struct hsr_port *master;
382 unsigned long interval;
383
384 hsr = from_timer(hsr, t, announce_timer);
385
386 rcu_read_lock();
387 master = hsr_port_get_hsr(hsr, pt: HSR_PT_MASTER);
388 hsr->proto_ops->send_sv_frame(master, &interval);
389
390 if (is_admin_up(dev: master->dev))
391 mod_timer(timer: &hsr->announce_timer, expires: jiffies + interval);
392
393 rcu_read_unlock();
394}
395
396void hsr_del_ports(struct hsr_priv *hsr)
397{
398 struct hsr_port *port;
399
400 port = hsr_port_get_hsr(hsr, pt: HSR_PT_SLAVE_A);
401 if (port)
402 hsr_del_port(port);
403
404 port = hsr_port_get_hsr(hsr, pt: HSR_PT_SLAVE_B);
405 if (port)
406 hsr_del_port(port);
407
408 port = hsr_port_get_hsr(hsr, pt: HSR_PT_MASTER);
409 if (port)
410 hsr_del_port(port);
411}
412
413static void hsr_set_rx_mode(struct net_device *dev)
414{
415 struct hsr_port *port;
416 struct hsr_priv *hsr;
417
418 hsr = netdev_priv(dev);
419
420 hsr_for_each_port(hsr, port) {
421 if (port->type == HSR_PT_MASTER)
422 continue;
423 switch (port->type) {
424 case HSR_PT_SLAVE_A:
425 case HSR_PT_SLAVE_B:
426 dev_mc_sync_multiple(to: port->dev, from: dev);
427 dev_uc_sync_multiple(to: port->dev, from: dev);
428 break;
429 default:
430 break;
431 }
432 }
433}
434
435static void hsr_change_rx_flags(struct net_device *dev, int change)
436{
437 struct hsr_port *port;
438 struct hsr_priv *hsr;
439
440 hsr = netdev_priv(dev);
441
442 hsr_for_each_port(hsr, port) {
443 if (port->type == HSR_PT_MASTER)
444 continue;
445 switch (port->type) {
446 case HSR_PT_SLAVE_A:
447 case HSR_PT_SLAVE_B:
448 if (change & IFF_ALLMULTI)
449 dev_set_allmulti(dev: port->dev,
450 inc: dev->flags &
451 IFF_ALLMULTI ? 1 : -1);
452 break;
453 default:
454 break;
455 }
456 }
457}
458
459static const struct net_device_ops hsr_device_ops = {
460 .ndo_change_mtu = hsr_dev_change_mtu,
461 .ndo_open = hsr_dev_open,
462 .ndo_stop = hsr_dev_close,
463 .ndo_start_xmit = hsr_dev_xmit,
464 .ndo_change_rx_flags = hsr_change_rx_flags,
465 .ndo_fix_features = hsr_fix_features,
466 .ndo_set_rx_mode = hsr_set_rx_mode,
467};
468
469static const struct device_type hsr_type = {
470 .name = "hsr",
471};
472
473static struct hsr_proto_ops hsr_ops = {
474 .send_sv_frame = send_hsr_supervision_frame,
475 .create_tagged_frame = hsr_create_tagged_frame,
476 .get_untagged_frame = hsr_get_untagged_frame,
477 .drop_frame = hsr_drop_frame,
478 .fill_frame_info = hsr_fill_frame_info,
479 .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
480};
481
482static struct hsr_proto_ops prp_ops = {
483 .send_sv_frame = send_prp_supervision_frame,
484 .create_tagged_frame = prp_create_tagged_frame,
485 .get_untagged_frame = prp_get_untagged_frame,
486 .drop_frame = prp_drop_frame,
487 .fill_frame_info = prp_fill_frame_info,
488 .handle_san_frame = prp_handle_san_frame,
489 .update_san_info = prp_update_san_info,
490};
491
492void hsr_dev_setup(struct net_device *dev)
493{
494 eth_hw_addr_random(dev);
495
496 ether_setup(dev);
497 dev->min_mtu = 0;
498 dev->header_ops = &hsr_header_ops;
499 dev->netdev_ops = &hsr_device_ops;
500 SET_NETDEV_DEVTYPE(dev, &hsr_type);
501 dev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
502
503 dev->needs_free_netdev = true;
504
505 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
506 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
507 NETIF_F_HW_VLAN_CTAG_TX;
508
509 dev->features = dev->hw_features;
510
511 /* Prevent recursive tx locking */
512 dev->features |= NETIF_F_LLTX;
513 /* VLAN on top of HSR needs testing and probably some work on
514 * hsr_header_create() etc.
515 */
516 dev->features |= NETIF_F_VLAN_CHALLENGED;
517 /* Not sure about this. Taken from bridge code. netdev_features.h says
518 * it means "Does not change network namespaces".
519 */
520 dev->features |= NETIF_F_NETNS_LOCAL;
521}
522
523/* Return true if dev is a HSR master; return false otherwise.
524 */
525bool is_hsr_master(struct net_device *dev)
526{
527 return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
528}
529EXPORT_SYMBOL(is_hsr_master);
530
531/* Default multicast address for HSR Supervision frames */
532static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
533 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
534};
535
536int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
537 unsigned char multicast_spec, u8 protocol_version,
538 struct netlink_ext_ack *extack)
539{
540 bool unregister = false;
541 struct hsr_priv *hsr;
542 int res;
543
544 hsr = netdev_priv(dev: hsr_dev);
545 INIT_LIST_HEAD(list: &hsr->ports);
546 INIT_LIST_HEAD(list: &hsr->node_db);
547 spin_lock_init(&hsr->list_lock);
548
549 eth_hw_addr_set(dev: hsr_dev, addr: slave[0]->dev_addr);
550
551 /* initialize protocol specific functions */
552 if (protocol_version == PRP_V1) {
553 /* For PRP, lan_id has most significant 3 bits holding
554 * the net_id of PRP_LAN_ID
555 */
556 hsr->net_id = PRP_LAN_ID << 1;
557 hsr->proto_ops = &prp_ops;
558 } else {
559 hsr->proto_ops = &hsr_ops;
560 }
561
562 /* Make sure we recognize frames from ourselves in hsr_rcv() */
563 res = hsr_create_self_node(hsr, addr_a: hsr_dev->dev_addr,
564 addr_b: slave[1]->dev_addr);
565 if (res < 0)
566 return res;
567
568 spin_lock_init(&hsr->seqnr_lock);
569 /* Overflow soon to find bugs easier: */
570 hsr->sequence_nr = HSR_SEQNR_START;
571 hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
572
573 timer_setup(&hsr->announce_timer, hsr_announce, 0);
574 timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
575
576 ether_addr_copy(dst: hsr->sup_multicast_addr, src: def_multicast_addr);
577 hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
578
579 hsr->prot_version = protocol_version;
580
581 /* Make sure the 1st call to netif_carrier_on() gets through */
582 netif_carrier_off(dev: hsr_dev);
583
584 res = hsr_add_port(hsr, dev: hsr_dev, pt: HSR_PT_MASTER, extack);
585 if (res)
586 goto err_add_master;
587
588 /* HSR forwarding offload supported in lower device? */
589 if ((slave[0]->features & NETIF_F_HW_HSR_FWD) &&
590 (slave[1]->features & NETIF_F_HW_HSR_FWD))
591 hsr->fwd_offloaded = true;
592
593 res = register_netdevice(dev: hsr_dev);
594 if (res)
595 goto err_unregister;
596
597 unregister = true;
598
599 res = hsr_add_port(hsr, dev: slave[0], pt: HSR_PT_SLAVE_A, extack);
600 if (res)
601 goto err_unregister;
602
603 res = hsr_add_port(hsr, dev: slave[1], pt: HSR_PT_SLAVE_B, extack);
604 if (res)
605 goto err_unregister;
606
607 hsr_debugfs_init(priv: hsr, hsr_dev);
608 mod_timer(timer: &hsr->prune_timer, expires: jiffies + msecs_to_jiffies(PRUNE_PERIOD));
609
610 return 0;
611
612err_unregister:
613 hsr_del_ports(hsr);
614err_add_master:
615 hsr_del_self_node(hsr);
616
617 if (unregister)
618 unregister_netdevice(dev: hsr_dev);
619 return res;
620}
621

source code of linux/net/hsr/hsr_device.c