1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/types.h>
7#include <linux/pci.h>
8#include <linux/netdevice.h>
9#include <linux/etherdevice.h>
10#include <linux/ethtool.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/if_bridge.h>
16#include <linux/workqueue.h>
17#include <linux/jiffies.h>
18#include <linux/bitops.h>
19#include <linux/list.h>
20#include <linux/notifier.h>
21#include <linux/dcbnl.h>
22#include <linux/inetdevice.h>
23#include <linux/netlink.h>
24#include <linux/jhash.h>
25#include <linux/log2.h>
26#include <linux/refcount.h>
27#include <linux/rhashtable.h>
28#include <net/switchdev.h>
29#include <net/pkt_cls.h>
30#include <net/netevent.h>
31#include <net/addrconf.h>
32#include <linux/ptp_classify.h>
33
34#include "spectrum.h"
35#include "pci.h"
36#include "core.h"
37#include "core_env.h"
38#include "reg.h"
39#include "port.h"
40#include "trap.h"
41#include "txheader.h"
42#include "spectrum_cnt.h"
43#include "spectrum_dpipe.h"
44#include "spectrum_acl_flex_actions.h"
45#include "spectrum_span.h"
46#include "spectrum_ptp.h"
47#include "spectrum_trap.h"
48
49#define MLXSW_SP_FWREV_MINOR 2010
50#define MLXSW_SP_FWREV_SUBMINOR 1006
51
52#define MLXSW_SP1_FWREV_MAJOR 13
53#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56 .major = MLXSW_SP1_FWREV_MAJOR,
57 .minor = MLXSW_SP_FWREV_MINOR,
58 .subminor = MLXSW_SP_FWREV_SUBMINOR,
59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60};
61
62#define MLXSW_SP1_FW_FILENAME \
63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64 "." __stringify(MLXSW_SP_FWREV_MINOR) \
65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67#define MLXSW_SP2_FWREV_MAJOR 29
68
69static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70 .major = MLXSW_SP2_FWREV_MAJOR,
71 .minor = MLXSW_SP_FWREV_MINOR,
72 .subminor = MLXSW_SP_FWREV_SUBMINOR,
73};
74
75#define MLXSW_SP2_FW_FILENAME \
76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77 "." __stringify(MLXSW_SP_FWREV_MINOR) \
78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80#define MLXSW_SP3_FWREV_MAJOR 30
81
82static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83 .major = MLXSW_SP3_FWREV_MAJOR,
84 .minor = MLXSW_SP_FWREV_MINOR,
85 .subminor = MLXSW_SP_FWREV_SUBMINOR,
86};
87
88#define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93#define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94 "mellanox/lc_ini_bundle_" \
95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105};
106static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108};
109
110/* tx_hdr_version
111 * Tx header version.
112 * Must be set to 1.
113 */
114MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116/* tx_hdr_ctl
117 * Packet control type.
118 * 0 - Ethernet control (e.g. EMADs, LACP)
119 * 1 - Ethernet data
120 */
121MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123/* tx_hdr_proto
124 * Packet protocol type. Must be set to 1 (Ethernet).
125 */
126MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128/* tx_hdr_rx_is_router
129 * Packet is sent from the router. Valid for data packets only.
130 */
131MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133/* tx_hdr_fid_valid
134 * Indicates if the 'fid' field is valid and should be used for
135 * forwarding lookup. Valid for data packets only.
136 */
137MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139/* tx_hdr_swid
140 * Switch partition ID. Must be set to 0.
141 */
142MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144/* tx_hdr_control_tclass
145 * Indicates if the packet should use the control TClass and not one
146 * of the data TClasses.
147 */
148MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150/* tx_hdr_etclass
151 * Egress TClass to be used on the egress device on the egress port.
152 */
153MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155/* tx_hdr_port_mid
156 * Destination local port for unicast packets.
157 * Destination multicast ID for multicast packets.
158 *
159 * Control packets are directed to a specific egress port, while data
160 * packets are transmitted through the CPU port (0) into the switch partition,
161 * where forwarding rules are applied.
162 */
163MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165/* tx_hdr_fid
166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168 * Valid for data packets only.
169 */
170MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172/* tx_hdr_type
173 * 0 - Data packets
174 * 6 - Control packets
175 */
176MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
178int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179 unsigned int counter_index, u64 *packets,
180 u64 *bytes)
181{
182 char mgpc_pl[MLXSW_REG_MGPC_LEN];
183 int err;
184
185 mlxsw_reg_mgpc_pack(payload: mgpc_pl, counter_index, opcode: MLXSW_REG_MGPC_OPCODE_NOP,
186 set_type: MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
187 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(mgpc), payload: mgpc_pl);
188 if (err)
189 return err;
190 if (packets)
191 *packets = mlxsw_reg_mgpc_packet_counter_get(buf: mgpc_pl);
192 if (bytes)
193 *bytes = mlxsw_reg_mgpc_byte_counter_get(buf: mgpc_pl);
194 return 0;
195}
196
197static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
198 unsigned int counter_index)
199{
200 char mgpc_pl[MLXSW_REG_MGPC_LEN];
201
202 mlxsw_reg_mgpc_pack(payload: mgpc_pl, counter_index, opcode: MLXSW_REG_MGPC_OPCODE_CLEAR,
203 set_type: MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
204 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(mgpc), payload: mgpc_pl);
205}
206
207int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
208 unsigned int *p_counter_index)
209{
210 int err;
211
212 err = mlxsw_sp_counter_alloc(mlxsw_sp, sub_pool_id: MLXSW_SP_COUNTER_SUB_POOL_FLOW,
213 p_counter_index);
214 if (err)
215 return err;
216 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, counter_index: *p_counter_index);
217 if (err)
218 goto err_counter_clear;
219 return 0;
220
221err_counter_clear:
222 mlxsw_sp_counter_free(mlxsw_sp, sub_pool_id: MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223 counter_index: *p_counter_index);
224 return err;
225}
226
227void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
228 unsigned int counter_index)
229{
230 mlxsw_sp_counter_free(mlxsw_sp, sub_pool_id: MLXSW_SP_COUNTER_SUB_POOL_FLOW,
231 counter_index);
232}
233
234void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
235 const struct mlxsw_tx_info *tx_info)
236{
237 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
238
239 memset(txhdr, 0, MLXSW_TXHDR_LEN);
240
241 mlxsw_tx_hdr_version_set(buf: txhdr, MLXSW_TXHDR_VERSION_1);
242 mlxsw_tx_hdr_ctl_set(buf: txhdr, val: MLXSW_TXHDR_ETH_CTL);
243 mlxsw_tx_hdr_proto_set(buf: txhdr, MLXSW_TXHDR_PROTO_ETH);
244 mlxsw_tx_hdr_swid_set(buf: txhdr, val: 0);
245 mlxsw_tx_hdr_control_tclass_set(buf: txhdr, val: 1);
246 mlxsw_tx_hdr_port_mid_set(buf: txhdr, val: tx_info->local_port);
247 mlxsw_tx_hdr_type_set(buf: txhdr, val: MLXSW_TXHDR_TYPE_CONTROL);
248}
249
250int
251mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
252 struct mlxsw_sp_port *mlxsw_sp_port,
253 struct sk_buff *skb,
254 const struct mlxsw_tx_info *tx_info)
255{
256 char *txhdr;
257 u16 max_fid;
258 int err;
259
260 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
261 err = -ENOMEM;
262 goto err_skb_cow_head;
263 }
264
265 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
266 err = -EIO;
267 goto err_res_valid;
268 }
269 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
270
271 txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
272 memset(txhdr, 0, MLXSW_TXHDR_LEN);
273
274 mlxsw_tx_hdr_version_set(buf: txhdr, MLXSW_TXHDR_VERSION_1);
275 mlxsw_tx_hdr_proto_set(buf: txhdr, MLXSW_TXHDR_PROTO_ETH);
276 mlxsw_tx_hdr_rx_is_router_set(buf: txhdr, val: true);
277 mlxsw_tx_hdr_fid_valid_set(buf: txhdr, val: true);
278 mlxsw_tx_hdr_fid_set(buf: txhdr, val: max_fid + tx_info->local_port - 1);
279 mlxsw_tx_hdr_type_set(buf: txhdr, val: MLXSW_TXHDR_TYPE_DATA);
280 return 0;
281
282err_res_valid:
283err_skb_cow_head:
284 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
285 dev_kfree_skb_any(skb);
286 return err;
287}
288
289static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
290{
291 unsigned int type;
292
293 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
294 return false;
295
296 type = ptp_classify_raw(skb);
297 return !!ptp_parse_header(skb, type);
298}
299
300static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
301 struct mlxsw_sp_port *mlxsw_sp_port,
302 struct sk_buff *skb,
303 const struct mlxsw_tx_info *tx_info)
304{
305 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
306
307 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
308 * need special handling and cannot be transmitted as regular control
309 * packets.
310 */
311 if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
312 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
313 mlxsw_sp_port, skb,
314 tx_info);
315
316 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
317 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
318 dev_kfree_skb_any(skb);
319 return -ENOMEM;
320 }
321
322 mlxsw_sp_txhdr_construct(skb, tx_info);
323 return 0;
324}
325
326enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
327{
328 switch (state) {
329 case BR_STATE_FORWARDING:
330 return MLXSW_REG_SPMS_STATE_FORWARDING;
331 case BR_STATE_LEARNING:
332 return MLXSW_REG_SPMS_STATE_LEARNING;
333 case BR_STATE_LISTENING:
334 case BR_STATE_DISABLED:
335 case BR_STATE_BLOCKING:
336 return MLXSW_REG_SPMS_STATE_DISCARDING;
337 default:
338 BUG();
339 }
340}
341
342int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
343 u8 state)
344{
345 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
346 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
347 char *spms_pl;
348 int err;
349
350 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
351 if (!spms_pl)
352 return -ENOMEM;
353 mlxsw_reg_spms_pack(payload: spms_pl, local_port: mlxsw_sp_port->local_port);
354 mlxsw_reg_spms_vid_pack(payload: spms_pl, vid, state: spms_state);
355
356 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spms), payload: spms_pl);
357 kfree(objp: spms_pl);
358 return err;
359}
360
361static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
362{
363 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
364 int err;
365
366 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spad), payload: spad_pl);
367 if (err)
368 return err;
369 mlxsw_reg_spad_base_mac_memcpy_from(buf: spad_pl, dst: mlxsw_sp->base_mac);
370 return 0;
371}
372
373int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 bool is_up)
375{
376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
377 char paos_pl[MLXSW_REG_PAOS_LEN];
378
379 mlxsw_reg_paos_pack(payload: paos_pl, local_port: mlxsw_sp_port->local_port,
380 status: is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
381 MLXSW_PORT_ADMIN_STATUS_DOWN);
382 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(paos), payload: paos_pl);
383}
384
385static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
386 const unsigned char *addr)
387{
388 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
389 char ppad_pl[MLXSW_REG_PPAD_LEN];
390
391 mlxsw_reg_ppad_pack(payload: ppad_pl, single_base_mac: true, local_port: mlxsw_sp_port->local_port);
392 mlxsw_reg_ppad_mac_memcpy_to(buf: ppad_pl, src: addr);
393 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(ppad), payload: ppad_pl);
394}
395
396static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
397{
398 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
399
400 eth_hw_addr_gen(dev: mlxsw_sp_port->dev, base_addr: mlxsw_sp->base_mac,
401 id: mlxsw_sp_port->local_port);
402 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
403 addr: mlxsw_sp_port->dev->dev_addr);
404}
405
406static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
407{
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
409 char pmtu_pl[MLXSW_REG_PMTU_LEN];
410 int err;
411
412 mlxsw_reg_pmtu_pack(payload: pmtu_pl, local_port: mlxsw_sp_port->local_port, new_mtu: 0);
413 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmtu), payload: pmtu_pl);
414 if (err)
415 return err;
416
417 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(buf: pmtu_pl);
418 return 0;
419}
420
421static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
422{
423 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
424 char pmtu_pl[MLXSW_REG_PMTU_LEN];
425
426 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
427 if (mtu > mlxsw_sp_port->max_mtu)
428 return -EINVAL;
429
430 mlxsw_reg_pmtu_pack(payload: pmtu_pl, local_port: mlxsw_sp_port->local_port, new_mtu: mtu);
431 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmtu), payload: pmtu_pl);
432}
433
434static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
435 u16 local_port, u8 swid)
436{
437 char pspa_pl[MLXSW_REG_PSPA_LEN];
438
439 mlxsw_reg_pspa_pack(payload: pspa_pl, swid, local_port);
440 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pspa), payload: pspa_pl);
441}
442
443int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
444{
445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
446 char svpe_pl[MLXSW_REG_SVPE_LEN];
447
448 mlxsw_reg_svpe_pack(payload: svpe_pl, local_port: mlxsw_sp_port->local_port, enable);
449 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(svpe), payload: svpe_pl);
450}
451
452int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
453 bool learn_enable)
454{
455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456 char *spvmlr_pl;
457 int err;
458
459 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
460 if (!spvmlr_pl)
461 return -ENOMEM;
462 mlxsw_reg_spvmlr_pack(payload: spvmlr_pl, local_port: mlxsw_sp_port->local_port, vid_begin: vid, vid_end: vid,
463 learn_enable);
464 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spvmlr), payload: spvmlr_pl);
465 kfree(objp: spvmlr_pl);
466 return err;
467}
468
469int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
470{
471 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
472 char spfsr_pl[MLXSW_REG_SPFSR_LEN];
473 int err;
474
475 if (mlxsw_sp_port->security == enable)
476 return 0;
477
478 mlxsw_reg_spfsr_pack(payload: spfsr_pl, local_port: mlxsw_sp_port->local_port, security: enable);
479 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spfsr), payload: spfsr_pl);
480 if (err)
481 return err;
482
483 mlxsw_sp_port->security = enable;
484 return 0;
485}
486
487int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
488{
489 switch (ethtype) {
490 case ETH_P_8021Q:
491 *p_sver_type = 0;
492 break;
493 case ETH_P_8021AD:
494 *p_sver_type = 1;
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
504 u16 ethtype)
505{
506 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
507 char spevet_pl[MLXSW_REG_SPEVET_LEN];
508 u8 sver_type;
509 int err;
510
511 err = mlxsw_sp_ethtype_to_sver_type(ethtype, p_sver_type: &sver_type);
512 if (err)
513 return err;
514
515 mlxsw_reg_spevet_pack(payload: spevet_pl, local_port: mlxsw_sp_port->local_port, et_vlan: sver_type);
516 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spevet), payload: spevet_pl);
517}
518
519static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
520 u16 vid, u16 ethtype)
521{
522 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
523 char spvid_pl[MLXSW_REG_SPVID_LEN];
524 u8 sver_type;
525 int err;
526
527 err = mlxsw_sp_ethtype_to_sver_type(ethtype, p_sver_type: &sver_type);
528 if (err)
529 return err;
530
531 mlxsw_reg_spvid_pack(payload: spvid_pl, local_port: mlxsw_sp_port->local_port, pvid: vid,
532 et_vlan: sver_type);
533
534 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spvid), payload: spvid_pl);
535}
536
537static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
538 bool allow)
539{
540 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
541 char spaft_pl[MLXSW_REG_SPAFT_LEN];
542
543 mlxsw_reg_spaft_pack(payload: spaft_pl, local_port: mlxsw_sp_port->local_port, allow_untagged: allow);
544 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spaft), payload: spaft_pl);
545}
546
547int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
548 u16 ethtype)
549{
550 int err;
551
552 if (!vid) {
553 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, allow: false);
554 if (err)
555 return err;
556 } else {
557 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
558 if (err)
559 return err;
560 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, allow: true);
561 if (err)
562 goto err_port_allow_untagged_set;
563 }
564
565 mlxsw_sp_port->pvid = vid;
566 return 0;
567
568err_port_allow_untagged_set:
569 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid: mlxsw_sp_port->pvid, ethtype);
570 return err;
571}
572
573static int
574mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
575{
576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
577 char sspr_pl[MLXSW_REG_SSPR_LEN];
578
579 mlxsw_reg_sspr_pack(payload: sspr_pl, local_port: mlxsw_sp_port->local_port);
580 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sspr), payload: sspr_pl);
581}
582
583static int
584mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
585 u16 local_port, char *pmlp_pl,
586 struct mlxsw_sp_port_mapping *port_mapping)
587{
588 bool separate_rxtx;
589 u8 first_lane;
590 u8 slot_index;
591 u8 module;
592 u8 width;
593 int i;
594
595 module = mlxsw_reg_pmlp_module_get(buf: pmlp_pl, index: 0);
596 slot_index = mlxsw_reg_pmlp_slot_index_get(buf: pmlp_pl, index: 0);
597 width = mlxsw_reg_pmlp_width_get(buf: pmlp_pl);
598 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(buf: pmlp_pl);
599 first_lane = mlxsw_reg_pmlp_tx_lane_get(buf: pmlp_pl, index: 0);
600
601 if (width && !is_power_of_2(n: width)) {
602 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
603 local_port);
604 return -EINVAL;
605 }
606
607 for (i = 0; i < width; i++) {
608 if (mlxsw_reg_pmlp_module_get(buf: pmlp_pl, index: i) != module) {
609 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
610 local_port);
611 return -EINVAL;
612 }
613 if (mlxsw_reg_pmlp_slot_index_get(buf: pmlp_pl, index: i) != slot_index) {
614 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
615 local_port);
616 return -EINVAL;
617 }
618 if (separate_rxtx &&
619 mlxsw_reg_pmlp_tx_lane_get(buf: pmlp_pl, index: i) !=
620 mlxsw_reg_pmlp_rx_lane_get(buf: pmlp_pl, index: i)) {
621 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
622 local_port);
623 return -EINVAL;
624 }
625 if (mlxsw_reg_pmlp_tx_lane_get(buf: pmlp_pl, index: i) != i + first_lane) {
626 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
627 local_port);
628 return -EINVAL;
629 }
630 }
631
632 port_mapping->module = module;
633 port_mapping->slot_index = slot_index;
634 port_mapping->width = width;
635 port_mapping->module_width = width;
636 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(buf: pmlp_pl, index: 0);
637 return 0;
638}
639
640static int
641mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
642 struct mlxsw_sp_port_mapping *port_mapping)
643{
644 char pmlp_pl[MLXSW_REG_PMLP_LEN];
645 int err;
646
647 mlxsw_reg_pmlp_pack(payload: pmlp_pl, local_port);
648 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmlp), payload: pmlp_pl);
649 if (err)
650 return err;
651 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
652 pmlp_pl, port_mapping);
653}
654
655static int
656mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
657 const struct mlxsw_sp_port_mapping *port_mapping)
658{
659 char pmlp_pl[MLXSW_REG_PMLP_LEN];
660 int i, err;
661
662 mlxsw_env_module_port_map(mlxsw_core: mlxsw_sp->core, slot_index: port_mapping->slot_index,
663 module: port_mapping->module);
664
665 mlxsw_reg_pmlp_pack(payload: pmlp_pl, local_port);
666 mlxsw_reg_pmlp_width_set(buf: pmlp_pl, val: port_mapping->width);
667 for (i = 0; i < port_mapping->width; i++) {
668 mlxsw_reg_pmlp_slot_index_set(buf: pmlp_pl, index: i,
669 val: port_mapping->slot_index);
670 mlxsw_reg_pmlp_module_set(buf: pmlp_pl, index: i, val: port_mapping->module);
671 mlxsw_reg_pmlp_tx_lane_set(buf: pmlp_pl, index: i, val: port_mapping->lane + i); /* Rx & Tx */
672 }
673
674 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmlp), payload: pmlp_pl);
675 if (err)
676 goto err_pmlp_write;
677 return 0;
678
679err_pmlp_write:
680 mlxsw_env_module_port_unmap(mlxsw_core: mlxsw_sp->core, slot_index: port_mapping->slot_index,
681 module: port_mapping->module);
682 return err;
683}
684
685static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
686 u8 slot_index, u8 module)
687{
688 char pmlp_pl[MLXSW_REG_PMLP_LEN];
689
690 mlxsw_reg_pmlp_pack(payload: pmlp_pl, local_port);
691 mlxsw_reg_pmlp_width_set(buf: pmlp_pl, val: 0);
692 mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmlp), payload: pmlp_pl);
693 mlxsw_env_module_port_unmap(mlxsw_core: mlxsw_sp->core, slot_index, module);
694}
695
696static int mlxsw_sp_port_open(struct net_device *dev)
697{
698 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
699 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
700 int err;
701
702 err = mlxsw_env_module_port_up(mlxsw_core: mlxsw_sp->core,
703 slot_index: mlxsw_sp_port->mapping.slot_index,
704 module: mlxsw_sp_port->mapping.module);
705 if (err)
706 return err;
707 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, is_up: true);
708 if (err)
709 goto err_port_admin_status_set;
710 netif_start_queue(dev);
711 return 0;
712
713err_port_admin_status_set:
714 mlxsw_env_module_port_down(mlxsw_core: mlxsw_sp->core,
715 slot_index: mlxsw_sp_port->mapping.slot_index,
716 module: mlxsw_sp_port->mapping.module);
717 return err;
718}
719
720static int mlxsw_sp_port_stop(struct net_device *dev)
721{
722 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
723 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
724
725 netif_stop_queue(dev);
726 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, is_up: false);
727 mlxsw_env_module_port_down(mlxsw_core: mlxsw_sp->core,
728 slot_index: mlxsw_sp_port->mapping.slot_index,
729 module: mlxsw_sp_port->mapping.module);
730 return 0;
731}
732
733static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
734 struct net_device *dev)
735{
736 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
737 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
738 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
739 const struct mlxsw_tx_info tx_info = {
740 .local_port = mlxsw_sp_port->local_port,
741 .is_emad = false,
742 };
743 u64 len;
744 int err;
745
746 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
747
748 if (mlxsw_core_skb_transmit_busy(mlxsw_core: mlxsw_sp->core, tx_info: &tx_info))
749 return NETDEV_TX_BUSY;
750
751 if (eth_skb_pad(skb)) {
752 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
753 return NETDEV_TX_OK;
754 }
755
756 err = mlxsw_sp_txhdr_handle(mlxsw_core: mlxsw_sp->core, mlxsw_sp_port, skb,
757 tx_info: &tx_info);
758 if (err)
759 return NETDEV_TX_OK;
760
761 /* TX header is consumed by HW on the way so we shouldn't count its
762 * bytes as being sent.
763 */
764 len = skb->len - MLXSW_TXHDR_LEN;
765
766 /* Due to a race we might fail here because of a full queue. In that
767 * unlikely case we simply drop the packet.
768 */
769 err = mlxsw_core_skb_transmit(mlxsw_core: mlxsw_sp->core, skb, tx_info: &tx_info);
770
771 if (!err) {
772 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
773 u64_stats_update_begin(syncp: &pcpu_stats->syncp);
774 pcpu_stats->tx_packets++;
775 pcpu_stats->tx_bytes += len;
776 u64_stats_update_end(syncp: &pcpu_stats->syncp);
777 } else {
778 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
779 dev_kfree_skb_any(skb);
780 }
781 return NETDEV_TX_OK;
782}
783
784static void mlxsw_sp_set_rx_mode(struct net_device *dev)
785{
786}
787
788static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
789{
790 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
791 struct sockaddr *addr = p;
792 int err;
793
794 if (!is_valid_ether_addr(addr: addr->sa_data))
795 return -EADDRNOTAVAIL;
796
797 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr: addr->sa_data);
798 if (err)
799 return err;
800 eth_hw_addr_set(dev, addr: addr->sa_data);
801 return 0;
802}
803
804static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
805{
806 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
807 struct mlxsw_sp_hdroom orig_hdroom;
808 struct mlxsw_sp_hdroom hdroom;
809 int err;
810
811 orig_hdroom = *mlxsw_sp_port->hdroom;
812
813 hdroom = orig_hdroom;
814 hdroom.mtu = mtu;
815 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, hdroom: &hdroom);
816
817 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom: &hdroom);
818 if (err) {
819 netdev_err(dev, format: "Failed to configure port's headroom\n");
820 return err;
821 }
822
823 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
824 if (err)
825 goto err_port_mtu_set;
826 dev->mtu = mtu;
827 return 0;
828
829err_port_mtu_set:
830 mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom: &orig_hdroom);
831 return err;
832}
833
834static int
835mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
836 struct rtnl_link_stats64 *stats)
837{
838 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
839 struct mlxsw_sp_port_pcpu_stats *p;
840 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
841 u32 tx_dropped = 0;
842 unsigned int start;
843 int i;
844
845 for_each_possible_cpu(i) {
846 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
847 do {
848 start = u64_stats_fetch_begin(syncp: &p->syncp);
849 rx_packets = p->rx_packets;
850 rx_bytes = p->rx_bytes;
851 tx_packets = p->tx_packets;
852 tx_bytes = p->tx_bytes;
853 } while (u64_stats_fetch_retry(syncp: &p->syncp, start));
854
855 stats->rx_packets += rx_packets;
856 stats->rx_bytes += rx_bytes;
857 stats->tx_packets += tx_packets;
858 stats->tx_bytes += tx_bytes;
859 /* tx_dropped is u32, updated without syncp protection. */
860 tx_dropped += p->tx_dropped;
861 }
862 stats->tx_dropped = tx_dropped;
863 return 0;
864}
865
866static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
867{
868 switch (attr_id) {
869 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
870 return true;
871 }
872
873 return false;
874}
875
876static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
877 void *sp)
878{
879 switch (attr_id) {
880 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
881 return mlxsw_sp_port_get_sw_stats64(dev, stats: sp);
882 }
883
884 return -EINVAL;
885}
886
887int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
888 int prio, char *ppcnt_pl)
889{
890 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
891 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
892
893 mlxsw_reg_ppcnt_pack(payload: ppcnt_pl, local_port: mlxsw_sp_port->local_port, grp, prio_tc: prio);
894 return mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(ppcnt), payload: ppcnt_pl);
895}
896
897static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
898 struct rtnl_link_stats64 *stats)
899{
900 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
901 int err;
902
903 err = mlxsw_sp_port_get_stats_raw(dev, grp: MLXSW_REG_PPCNT_IEEE_8023_CNT,
904 prio: 0, ppcnt_pl);
905 if (err)
906 goto out;
907
908 stats->tx_packets =
909 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(buf: ppcnt_pl);
910 stats->rx_packets =
911 mlxsw_reg_ppcnt_a_frames_received_ok_get(buf: ppcnt_pl);
912 stats->tx_bytes =
913 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(buf: ppcnt_pl);
914 stats->rx_bytes =
915 mlxsw_reg_ppcnt_a_octets_received_ok_get(buf: ppcnt_pl);
916 stats->multicast =
917 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(buf: ppcnt_pl);
918
919 stats->rx_crc_errors =
920 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(buf: ppcnt_pl);
921 stats->rx_frame_errors =
922 mlxsw_reg_ppcnt_a_alignment_errors_get(buf: ppcnt_pl);
923
924 stats->rx_length_errors = (
925 mlxsw_reg_ppcnt_a_in_range_length_errors_get(buf: ppcnt_pl) +
926 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(buf: ppcnt_pl) +
927 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(buf: ppcnt_pl));
928
929 stats->rx_errors = (stats->rx_crc_errors +
930 stats->rx_frame_errors + stats->rx_length_errors);
931
932out:
933 return err;
934}
935
936static void
937mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
938 struct mlxsw_sp_port_xstats *xstats)
939{
940 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
941 int err, i;
942
943 err = mlxsw_sp_port_get_stats_raw(dev, grp: MLXSW_REG_PPCNT_EXT_CNT, prio: 0,
944 ppcnt_pl);
945 if (!err)
946 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(buf: ppcnt_pl);
947
948 for (i = 0; i < TC_MAX_QUEUE; i++) {
949 err = mlxsw_sp_port_get_stats_raw(dev,
950 grp: MLXSW_REG_PPCNT_TC_CONG_CNT,
951 prio: i, ppcnt_pl);
952 if (err)
953 goto tc_cnt;
954
955 xstats->wred_drop[i] =
956 mlxsw_reg_ppcnt_wred_discard_get(buf: ppcnt_pl);
957 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(buf: ppcnt_pl);
958
959tc_cnt:
960 err = mlxsw_sp_port_get_stats_raw(dev, grp: MLXSW_REG_PPCNT_TC_CNT,
961 prio: i, ppcnt_pl);
962 if (err)
963 continue;
964
965 xstats->backlog[i] =
966 mlxsw_reg_ppcnt_tc_transmit_queue_get(buf: ppcnt_pl);
967 xstats->tail_drop[i] =
968 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(buf: ppcnt_pl);
969 }
970
971 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
972 err = mlxsw_sp_port_get_stats_raw(dev, grp: MLXSW_REG_PPCNT_PRIO_CNT,
973 prio: i, ppcnt_pl);
974 if (err)
975 continue;
976
977 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(buf: ppcnt_pl);
978 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(buf: ppcnt_pl);
979 }
980}
981
982static void update_stats_cache(struct work_struct *work)
983{
984 struct mlxsw_sp_port *mlxsw_sp_port =
985 container_of(work, struct mlxsw_sp_port,
986 periodic_hw_stats.update_dw.work);
987
988 if (!netif_carrier_ok(dev: mlxsw_sp_port->dev))
989 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
990 * necessary when port goes down.
991 */
992 goto out;
993
994 mlxsw_sp_port_get_hw_stats(dev: mlxsw_sp_port->dev,
995 stats: &mlxsw_sp_port->periodic_hw_stats.stats);
996 mlxsw_sp_port_get_hw_xstats(dev: mlxsw_sp_port->dev,
997 xstats: &mlxsw_sp_port->periodic_hw_stats.xstats);
998
999out:
1000 mlxsw_core_schedule_dw(dwork: &mlxsw_sp_port->periodic_hw_stats.update_dw,
1001 MLXSW_HW_STATS_UPDATE_TIME);
1002}
1003
1004/* Return the stats from a cache that is updated periodically,
1005 * as this function might get called in an atomic context.
1006 */
1007static void
1008mlxsw_sp_port_get_stats64(struct net_device *dev,
1009 struct rtnl_link_stats64 *stats)
1010{
1011 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1012
1013 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1014}
1015
1016static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1017 u16 vid_begin, u16 vid_end,
1018 bool is_member, bool untagged)
1019{
1020 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1021 char *spvm_pl;
1022 int err;
1023
1024 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1025 if (!spvm_pl)
1026 return -ENOMEM;
1027
1028 mlxsw_reg_spvm_pack(payload: spvm_pl, local_port: mlxsw_sp_port->local_port, vid_begin,
1029 vid_end, is_member, untagged);
1030 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spvm), payload: spvm_pl);
1031 kfree(objp: spvm_pl);
1032 return err;
1033}
1034
1035int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1036 u16 vid_end, bool is_member, bool untagged)
1037{
1038 u16 vid, vid_e;
1039 int err;
1040
1041 for (vid = vid_begin; vid <= vid_end;
1042 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1043 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1044 vid_end);
1045
1046 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: vid, vid_end: vid_e,
1047 is_member, untagged);
1048 if (err)
1049 return err;
1050 }
1051
1052 return 0;
1053}
1054
1055static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1056 bool flush_default)
1057{
1058 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1059
1060 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1061 &mlxsw_sp_port->vlans_list, list) {
1062 if (!flush_default &&
1063 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1064 continue;
1065 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1066 }
1067}
1068
1069static void
1070mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1071{
1072 if (mlxsw_sp_port_vlan->bridge_port)
1073 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1074 else if (mlxsw_sp_port_vlan->fid)
1075 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1076}
1077
1078struct mlxsw_sp_port_vlan *
1079mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1080{
1081 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1082 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1083 int err;
1084
1085 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1086 if (mlxsw_sp_port_vlan)
1087 return ERR_PTR(error: -EEXIST);
1088
1089 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: vid, vid_end: vid, is_member: true, untagged);
1090 if (err)
1091 return ERR_PTR(error: err);
1092
1093 mlxsw_sp_port_vlan = kzalloc(size: sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1094 if (!mlxsw_sp_port_vlan) {
1095 err = -ENOMEM;
1096 goto err_port_vlan_alloc;
1097 }
1098
1099 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1100 mlxsw_sp_port_vlan->vid = vid;
1101 list_add(new: &mlxsw_sp_port_vlan->list, head: &mlxsw_sp_port->vlans_list);
1102
1103 return mlxsw_sp_port_vlan;
1104
1105err_port_vlan_alloc:
1106 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: vid, vid_end: vid, is_member: false, untagged: false);
1107 return ERR_PTR(error: err);
1108}
1109
1110void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1111{
1112 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1113 u16 vid = mlxsw_sp_port_vlan->vid;
1114
1115 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1116 list_del(entry: &mlxsw_sp_port_vlan->list);
1117 kfree(objp: mlxsw_sp_port_vlan);
1118 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: vid, vid_end: vid, is_member: false, untagged: false);
1119}
1120
1121static int mlxsw_sp_port_add_vid(struct net_device *dev,
1122 __be16 __always_unused proto, u16 vid)
1123{
1124 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1125
1126 /* VLAN 0 is added to HW filter when device goes up, but it is
1127 * reserved in our case, so simply return.
1128 */
1129 if (!vid)
1130 return 0;
1131
1132 return PTR_ERR_OR_ZERO(ptr: mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1133}
1134
1135int mlxsw_sp_port_kill_vid(struct net_device *dev,
1136 __be16 __always_unused proto, u16 vid)
1137{
1138 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1139 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1140
1141 /* VLAN 0 is removed from HW filter when device goes down, but
1142 * it is reserved in our case, so simply return.
1143 */
1144 if (!vid)
1145 return 0;
1146
1147 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1148 if (!mlxsw_sp_port_vlan)
1149 return 0;
1150 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1151
1152 return 0;
1153}
1154
1155static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1156 struct flow_block_offload *f)
1157{
1158 switch (f->binder_type) {
1159 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1160 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, ingress: true);
1161 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1162 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, ingress: false);
1163 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1164 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1165 case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1166 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1167 default:
1168 return -EOPNOTSUPP;
1169 }
1170}
1171
1172static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1173 void *type_data)
1174{
1175 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1176
1177 switch (type) {
1178 case TC_SETUP_BLOCK:
1179 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, f: type_data);
1180 case TC_SETUP_QDISC_RED:
1181 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, p: type_data);
1182 case TC_SETUP_QDISC_PRIO:
1183 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, p: type_data);
1184 case TC_SETUP_QDISC_ETS:
1185 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, p: type_data);
1186 case TC_SETUP_QDISC_TBF:
1187 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, p: type_data);
1188 case TC_SETUP_QDISC_FIFO:
1189 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, p: type_data);
1190 default:
1191 return -EOPNOTSUPP;
1192 }
1193}
1194
1195static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1196{
1197 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1198
1199 if (!enable) {
1200 if (mlxsw_sp_flow_block_rule_count(block: mlxsw_sp_port->ing_flow_block) ||
1201 mlxsw_sp_flow_block_rule_count(block: mlxsw_sp_port->eg_flow_block)) {
1202 netdev_err(dev, format: "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1203 return -EINVAL;
1204 }
1205 mlxsw_sp_flow_block_disable_inc(block: mlxsw_sp_port->ing_flow_block);
1206 mlxsw_sp_flow_block_disable_inc(block: mlxsw_sp_port->eg_flow_block);
1207 } else {
1208 mlxsw_sp_flow_block_disable_dec(block: mlxsw_sp_port->ing_flow_block);
1209 mlxsw_sp_flow_block_disable_dec(block: mlxsw_sp_port->eg_flow_block);
1210 }
1211 return 0;
1212}
1213
1214static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1215{
1216 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1217 char pplr_pl[MLXSW_REG_PPLR_LEN];
1218 int err;
1219
1220 if (netif_running(dev))
1221 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, is_up: false);
1222
1223 mlxsw_reg_pplr_pack(payload: pplr_pl, local_port: mlxsw_sp_port->local_port, phy_local: enable);
1224 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1225 payload: pplr_pl);
1226
1227 if (netif_running(dev))
1228 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, is_up: true);
1229
1230 return err;
1231}
1232
1233typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1234
1235static int mlxsw_sp_handle_feature(struct net_device *dev,
1236 netdev_features_t wanted_features,
1237 netdev_features_t feature,
1238 mlxsw_sp_feature_handler feature_handler)
1239{
1240 netdev_features_t changes = wanted_features ^ dev->features;
1241 bool enable = !!(wanted_features & feature);
1242 int err;
1243
1244 if (!(changes & feature))
1245 return 0;
1246
1247 err = feature_handler(dev, enable);
1248 if (err) {
1249 netdev_err(dev, format: "%s feature %pNF failed, err %d\n",
1250 enable ? "Enable" : "Disable", &feature, err);
1251 return err;
1252 }
1253
1254 if (enable)
1255 dev->features |= feature;
1256 else
1257 dev->features &= ~feature;
1258
1259 return 0;
1260}
1261static int mlxsw_sp_set_features(struct net_device *dev,
1262 netdev_features_t features)
1263{
1264 netdev_features_t oper_features = dev->features;
1265 int err = 0;
1266
1267 err |= mlxsw_sp_handle_feature(dev, wanted_features: features, NETIF_F_HW_TC,
1268 feature_handler: mlxsw_sp_feature_hw_tc);
1269 err |= mlxsw_sp_handle_feature(dev, wanted_features: features, NETIF_F_LOOPBACK,
1270 feature_handler: mlxsw_sp_feature_loopback);
1271
1272 if (err) {
1273 dev->features = oper_features;
1274 return -EINVAL;
1275 }
1276
1277 return 0;
1278}
1279
1280static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1281 struct ifreq *ifr)
1282{
1283 struct hwtstamp_config config;
1284 int err;
1285
1286 if (copy_from_user(to: &config, from: ifr->ifr_data, n: sizeof(config)))
1287 return -EFAULT;
1288
1289 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1290 &config);
1291 if (err)
1292 return err;
1293
1294 if (copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config)))
1295 return -EFAULT;
1296
1297 return 0;
1298}
1299
1300static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1301 struct ifreq *ifr)
1302{
1303 struct hwtstamp_config config;
1304 int err;
1305
1306 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1307 &config);
1308 if (err)
1309 return err;
1310
1311 if (copy_to_user(to: ifr->ifr_data, from: &config, n: sizeof(config)))
1312 return -EFAULT;
1313
1314 return 0;
1315}
1316
1317static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1318{
1319 struct hwtstamp_config config = {0};
1320
1321 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1322}
1323
1324static int
1325mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326{
1327 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1328
1329 switch (cmd) {
1330 case SIOCSHWTSTAMP:
1331 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1332 case SIOCGHWTSTAMP:
1333 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1334 default:
1335 return -EOPNOTSUPP;
1336 }
1337}
1338
1339static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1340 .ndo_open = mlxsw_sp_port_open,
1341 .ndo_stop = mlxsw_sp_port_stop,
1342 .ndo_start_xmit = mlxsw_sp_port_xmit,
1343 .ndo_setup_tc = mlxsw_sp_setup_tc,
1344 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1345 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1346 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1347 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1348 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1349 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1350 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1351 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1352 .ndo_set_features = mlxsw_sp_set_features,
1353 .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
1354};
1355
1356static int
1357mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1358{
1359 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1360 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1361 const struct mlxsw_sp_port_type_speed_ops *ops;
1362 char ptys_pl[MLXSW_REG_PTYS_LEN];
1363 u32 eth_proto_cap_masked;
1364 int err;
1365
1366 ops = mlxsw_sp->port_type_speed_ops;
1367
1368 /* Set advertised speeds to speeds supported by both the driver
1369 * and the device.
1370 */
1371 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1372 0, false);
1373 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(ptys), payload: ptys_pl);
1374 if (err)
1375 return err;
1376
1377 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1378 &eth_proto_admin, &eth_proto_oper);
1379 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1380 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1381 eth_proto_cap_masked,
1382 mlxsw_sp_port->link.autoneg);
1383 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(ptys), payload: ptys_pl);
1384}
1385
1386int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1387{
1388 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1389 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1390 char ptys_pl[MLXSW_REG_PTYS_LEN];
1391 u32 eth_proto_oper;
1392 int err;
1393
1394 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1395 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1396 mlxsw_sp_port->local_port, 0,
1397 false);
1398 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(ptys), payload: ptys_pl);
1399 if (err)
1400 return err;
1401 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1402 &eth_proto_oper);
1403 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1404 return 0;
1405}
1406
1407int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1408 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1409 bool dwrr, u8 dwrr_weight)
1410{
1411 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1412 char qeec_pl[MLXSW_REG_QEEC_LEN];
1413
1414 mlxsw_reg_qeec_pack(payload: qeec_pl, local_port: mlxsw_sp_port->local_port, hr, index,
1415 next_index);
1416 mlxsw_reg_qeec_de_set(buf: qeec_pl, val: true);
1417 mlxsw_reg_qeec_dwrr_set(buf: qeec_pl, val: dwrr);
1418 mlxsw_reg_qeec_dwrr_weight_set(buf: qeec_pl, val: dwrr_weight);
1419 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(qeec), payload: qeec_pl);
1420}
1421
1422int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1423 enum mlxsw_reg_qeec_hr hr, u8 index,
1424 u8 next_index, u32 maxrate, u8 burst_size)
1425{
1426 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1427 char qeec_pl[MLXSW_REG_QEEC_LEN];
1428
1429 mlxsw_reg_qeec_pack(payload: qeec_pl, local_port: mlxsw_sp_port->local_port, hr, index,
1430 next_index);
1431 mlxsw_reg_qeec_mase_set(buf: qeec_pl, val: true);
1432 mlxsw_reg_qeec_max_shaper_rate_set(buf: qeec_pl, val: maxrate);
1433 mlxsw_reg_qeec_max_shaper_bs_set(buf: qeec_pl, val: burst_size);
1434 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(qeec), payload: qeec_pl);
1435}
1436
1437static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1438 enum mlxsw_reg_qeec_hr hr, u8 index,
1439 u8 next_index, u32 minrate)
1440{
1441 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1442 char qeec_pl[MLXSW_REG_QEEC_LEN];
1443
1444 mlxsw_reg_qeec_pack(payload: qeec_pl, local_port: mlxsw_sp_port->local_port, hr, index,
1445 next_index);
1446 mlxsw_reg_qeec_mise_set(buf: qeec_pl, val: true);
1447 mlxsw_reg_qeec_min_shaper_rate_set(buf: qeec_pl, val: minrate);
1448
1449 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(qeec), payload: qeec_pl);
1450}
1451
1452int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1453 u8 switch_prio, u8 tclass)
1454{
1455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1456 char qtct_pl[MLXSW_REG_QTCT_LEN];
1457
1458 mlxsw_reg_qtct_pack(payload: qtct_pl, local_port: mlxsw_sp_port->local_port, switch_prio,
1459 tclass);
1460 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(qtct), payload: qtct_pl);
1461}
1462
1463static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1464{
1465 int err, i;
1466
1467 /* Setup the elements hierarcy, so that each TC is linked to
1468 * one subgroup, which are all member in the same group.
1469 */
1470 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1471 hr: MLXSW_REG_QEEC_HR_GROUP, index: 0, next_index: 0, dwrr: false, dwrr_weight: 0);
1472 if (err)
1473 return err;
1474 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1475 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1476 hr: MLXSW_REG_QEEC_HR_SUBGROUP, index: i,
1477 next_index: 0, dwrr: false, dwrr_weight: 0);
1478 if (err)
1479 return err;
1480 }
1481 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1482 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1483 hr: MLXSW_REG_QEEC_HR_TC, index: i, next_index: i,
1484 dwrr: false, dwrr_weight: 0);
1485 if (err)
1486 return err;
1487
1488 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1489 hr: MLXSW_REG_QEEC_HR_TC,
1490 index: i + 8, next_index: i,
1491 dwrr: true, dwrr_weight: 100);
1492 if (err)
1493 return err;
1494 }
1495
1496 /* Make sure the max shaper is disabled in all hierarchies that support
1497 * it. Note that this disables ptps (PTP shaper), but that is intended
1498 * for the initial configuration.
1499 */
1500 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1501 hr: MLXSW_REG_QEEC_HR_PORT, index: 0, next_index: 0,
1502 MLXSW_REG_QEEC_MAS_DIS, burst_size: 0);
1503 if (err)
1504 return err;
1505 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507 hr: MLXSW_REG_QEEC_HR_SUBGROUP,
1508 index: i, next_index: 0,
1509 MLXSW_REG_QEEC_MAS_DIS, burst_size: 0);
1510 if (err)
1511 return err;
1512 }
1513 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1514 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1515 hr: MLXSW_REG_QEEC_HR_TC,
1516 index: i, next_index: i,
1517 MLXSW_REG_QEEC_MAS_DIS, burst_size: 0);
1518 if (err)
1519 return err;
1520
1521 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1522 hr: MLXSW_REG_QEEC_HR_TC,
1523 index: i + 8, next_index: i,
1524 MLXSW_REG_QEEC_MAS_DIS, burst_size: 0);
1525 if (err)
1526 return err;
1527 }
1528
1529 /* Configure the min shaper for multicast TCs. */
1530 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1531 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1532 hr: MLXSW_REG_QEEC_HR_TC,
1533 index: i + 8, next_index: i,
1534 MLXSW_REG_QEEC_MIS_MIN);
1535 if (err)
1536 return err;
1537 }
1538
1539 /* Map all priorities to traffic class 0. */
1540 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1541 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, switch_prio: i, tclass: 0);
1542 if (err)
1543 return err;
1544 }
1545
1546 return 0;
1547}
1548
1549static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1550 bool enable)
1551{
1552 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1553 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1554
1555 mlxsw_reg_qtctm_pack(payload: qtctm_pl, local_port: mlxsw_sp_port->local_port, mc: enable);
1556 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(qtctm), payload: qtctm_pl);
1557}
1558
1559static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1560{
1561 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1562 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1563 u8 module = mlxsw_sp_port->mapping.module;
1564 u64 overheat_counter;
1565 int err;
1566
1567 err = mlxsw_env_module_overheat_counter_get(mlxsw_core: mlxsw_sp->core, slot_index,
1568 module, p_counter: &overheat_counter);
1569 if (err)
1570 return err;
1571
1572 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1573 return 0;
1574}
1575
1576int
1577mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1578 bool is_8021ad_tagged,
1579 bool is_8021q_tagged)
1580{
1581 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1582 char spvc_pl[MLXSW_REG_SPVC_LEN];
1583
1584 mlxsw_reg_spvc_pack(payload: spvc_pl, local_port: mlxsw_sp_port->local_port,
1585 et1: is_8021ad_tagged, et0: is_8021q_tagged);
1586 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spvc), payload: spvc_pl);
1587}
1588
1589static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1590 u16 local_port, u8 *port_number,
1591 u8 *split_port_subnumber,
1592 u8 *slot_index)
1593{
1594 char pllp_pl[MLXSW_REG_PLLP_LEN];
1595 int err;
1596
1597 mlxsw_reg_pllp_pack(payload: pllp_pl, local_port);
1598 err = mlxsw_reg_query(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pllp), payload: pllp_pl);
1599 if (err)
1600 return err;
1601 mlxsw_reg_pllp_unpack(payload: pllp_pl, label_port: port_number,
1602 split_num: split_port_subnumber, slot_index);
1603 return 0;
1604}
1605
1606static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1607 bool split,
1608 struct mlxsw_sp_port_mapping *port_mapping)
1609{
1610 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1611 struct mlxsw_sp_port *mlxsw_sp_port;
1612 u32 lanes = port_mapping->width;
1613 u8 split_port_subnumber;
1614 struct net_device *dev;
1615 u8 port_number;
1616 u8 slot_index;
1617 bool splittable;
1618 int err;
1619
1620 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1621 if (err) {
1622 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1623 local_port);
1624 return err;
1625 }
1626
1627 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, swid: 0);
1628 if (err) {
1629 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1630 local_port);
1631 goto err_port_swid_set;
1632 }
1633
1634 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, port_number: &port_number,
1635 split_port_subnumber: &split_port_subnumber, slot_index: &slot_index);
1636 if (err) {
1637 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1638 local_port);
1639 goto err_port_label_info_get;
1640 }
1641
1642 splittable = lanes > 1 && !split;
1643 err = mlxsw_core_port_init(mlxsw_core: mlxsw_sp->core, local_port, slot_index,
1644 port_number, split, split_port_subnumber,
1645 splittable, lanes, switch_id: mlxsw_sp->base_mac,
1646 switch_id_len: sizeof(mlxsw_sp->base_mac));
1647 if (err) {
1648 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1649 local_port);
1650 goto err_core_port_init;
1651 }
1652
1653 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1654 if (!dev) {
1655 err = -ENOMEM;
1656 goto err_alloc_etherdev;
1657 }
1658 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1659 dev_net_set(dev, net: mlxsw_sp_net(mlxsw_sp));
1660 mlxsw_sp_port = netdev_priv(dev);
1661 mlxsw_core_port_netdev_link(mlxsw_core: mlxsw_sp->core, local_port,
1662 port_driver_priv: mlxsw_sp_port, dev);
1663 mlxsw_sp_port->dev = dev;
1664 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1665 mlxsw_sp_port->local_port = local_port;
1666 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1667 mlxsw_sp_port->split = split;
1668 mlxsw_sp_port->mapping = *port_mapping;
1669 mlxsw_sp_port->link.autoneg = 1;
1670 INIT_LIST_HEAD(list: &mlxsw_sp_port->vlans_list);
1671
1672 mlxsw_sp_port->pcpu_stats =
1673 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1674 if (!mlxsw_sp_port->pcpu_stats) {
1675 err = -ENOMEM;
1676 goto err_alloc_stats;
1677 }
1678
1679 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1680 &update_stats_cache);
1681
1682 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1683 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1684
1685 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1686 if (err) {
1687 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1688 mlxsw_sp_port->local_port);
1689 goto err_dev_addr_init;
1690 }
1691
1692 netif_carrier_off(dev);
1693
1694 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1695 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1696 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1697
1698 dev->min_mtu = 0;
1699 dev->max_mtu = ETH_MAX_MTU;
1700
1701 /* Each packet needs to have a Tx header (metadata) on top all other
1702 * headers.
1703 */
1704 dev->needed_headroom = MLXSW_TXHDR_LEN;
1705
1706 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1707 if (err) {
1708 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1709 mlxsw_sp_port->local_port);
1710 goto err_port_system_port_mapping_set;
1711 }
1712
1713 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1714 if (err) {
1715 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1716 mlxsw_sp_port->local_port);
1717 goto err_port_speed_by_width_set;
1718 }
1719
1720 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1721 &mlxsw_sp_port->max_speed);
1722 if (err) {
1723 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1724 mlxsw_sp_port->local_port);
1725 goto err_max_speed_get;
1726 }
1727
1728 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, p_max_mtu: &mlxsw_sp_port->max_mtu);
1729 if (err) {
1730 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1731 mlxsw_sp_port->local_port);
1732 goto err_port_max_mtu_get;
1733 }
1734
1735 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1736 if (err) {
1737 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1738 mlxsw_sp_port->local_port);
1739 goto err_port_mtu_set;
1740 }
1741
1742 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, is_up: false);
1743 if (err)
1744 goto err_port_admin_status_set;
1745
1746 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1747 if (err) {
1748 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1749 mlxsw_sp_port->local_port);
1750 goto err_port_buffers_init;
1751 }
1752
1753 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1754 if (err) {
1755 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1756 mlxsw_sp_port->local_port);
1757 goto err_port_ets_init;
1758 }
1759
1760 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, enable: true);
1761 if (err) {
1762 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1763 mlxsw_sp_port->local_port);
1764 goto err_port_tc_mc_mode;
1765 }
1766
1767 /* ETS and buffers must be initialized before DCB. */
1768 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1769 if (err) {
1770 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1771 mlxsw_sp_port->local_port);
1772 goto err_port_dcb_init;
1773 }
1774
1775 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1776 if (err) {
1777 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1778 mlxsw_sp_port->local_port);
1779 goto err_port_fids_init;
1780 }
1781
1782 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1783 if (err) {
1784 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1785 mlxsw_sp_port->local_port);
1786 goto err_port_qdiscs_init;
1787 }
1788
1789 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: 0, VLAN_N_VID - 1, is_member: false,
1790 untagged: false);
1791 if (err) {
1792 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1793 mlxsw_sp_port->local_port);
1794 goto err_port_vlan_clear;
1795 }
1796
1797 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1798 if (err) {
1799 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1800 mlxsw_sp_port->local_port);
1801 goto err_port_nve_init;
1802 }
1803
1804 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1805 ETH_P_8021Q);
1806 if (err) {
1807 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1808 mlxsw_sp_port->local_port);
1809 goto err_port_pvid_set;
1810 }
1811
1812 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1813 MLXSW_SP_DEFAULT_VID);
1814 if (IS_ERR(ptr: mlxsw_sp_port_vlan)) {
1815 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1816 mlxsw_sp_port->local_port);
1817 err = PTR_ERR(ptr: mlxsw_sp_port_vlan);
1818 goto err_port_vlan_create;
1819 }
1820 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1821
1822 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1823 * only packets with 802.1q header as tagged packets.
1824 */
1825 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, is_8021ad_tagged: false, is_8021q_tagged: true);
1826 if (err) {
1827 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1828 local_port);
1829 goto err_port_vlan_classification_set;
1830 }
1831
1832 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1833 mlxsw_sp->ptp_ops->shaper_work);
1834
1835 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1836
1837 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1838 if (err) {
1839 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1840 mlxsw_sp_port->local_port);
1841 goto err_port_overheat_init_val_set;
1842 }
1843
1844 err = register_netdev(dev);
1845 if (err) {
1846 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1847 mlxsw_sp_port->local_port);
1848 goto err_register_netdev;
1849 }
1850
1851 mlxsw_core_schedule_dw(dwork: &mlxsw_sp_port->periodic_hw_stats.update_dw, delay: 0);
1852 return 0;
1853
1854err_register_netdev:
1855err_port_overheat_init_val_set:
1856 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, is_8021ad_tagged: true, is_8021q_tagged: true);
1857err_port_vlan_classification_set:
1858 mlxsw_sp->ports[local_port] = NULL;
1859 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1860err_port_vlan_create:
1861err_port_pvid_set:
1862 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1863err_port_nve_init:
1864err_port_vlan_clear:
1865 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1866err_port_qdiscs_init:
1867 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1868err_port_fids_init:
1869 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1870err_port_dcb_init:
1871 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, enable: false);
1872err_port_tc_mc_mode:
1873err_port_ets_init:
1874 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1875err_port_buffers_init:
1876err_port_admin_status_set:
1877err_port_mtu_set:
1878err_port_max_mtu_get:
1879err_max_speed_get:
1880err_port_speed_by_width_set:
1881err_port_system_port_mapping_set:
1882err_dev_addr_init:
1883 free_percpu(pdata: mlxsw_sp_port->pcpu_stats);
1884err_alloc_stats:
1885 free_netdev(dev);
1886err_alloc_etherdev:
1887 mlxsw_core_port_fini(mlxsw_core: mlxsw_sp->core, local_port);
1888err_core_port_init:
1889err_port_label_info_get:
1890 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1891 MLXSW_PORT_SWID_DISABLED_PORT);
1892err_port_swid_set:
1893 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1894 slot_index: port_mapping->slot_index,
1895 module: port_mapping->module);
1896 return err;
1897}
1898
1899static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1900{
1901 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1903 u8 module = mlxsw_sp_port->mapping.module;
1904
1905 cancel_delayed_work_sync(dwork: &mlxsw_sp_port->periodic_hw_stats.update_dw);
1906 cancel_delayed_work_sync(dwork: &mlxsw_sp_port->ptp.shaper_dw);
1907 unregister_netdev(dev: mlxsw_sp_port->dev); /* This calls ndo_stop */
1908 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1909 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, is_8021ad_tagged: true, is_8021q_tagged: true);
1910 mlxsw_sp->ports[local_port] = NULL;
1911 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, flush_default: true);
1912 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1913 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1914 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1915 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1916 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, enable: false);
1917 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1918 free_percpu(pdata: mlxsw_sp_port->pcpu_stats);
1919 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1920 free_netdev(dev: mlxsw_sp_port->dev);
1921 mlxsw_core_port_fini(mlxsw_core: mlxsw_sp->core, local_port);
1922 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1923 MLXSW_PORT_SWID_DISABLED_PORT);
1924 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1925}
1926
1927static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1928{
1929 struct mlxsw_sp_port *mlxsw_sp_port;
1930 int err;
1931
1932 mlxsw_sp_port = kzalloc(size: sizeof(*mlxsw_sp_port), GFP_KERNEL);
1933 if (!mlxsw_sp_port)
1934 return -ENOMEM;
1935
1936 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1937 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1938
1939 err = mlxsw_core_cpu_port_init(mlxsw_core: mlxsw_sp->core,
1940 port_driver_priv: mlxsw_sp_port,
1941 switch_id: mlxsw_sp->base_mac,
1942 switch_id_len: sizeof(mlxsw_sp->base_mac));
1943 if (err) {
1944 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1945 goto err_core_cpu_port_init;
1946 }
1947
1948 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1949 return 0;
1950
1951err_core_cpu_port_init:
1952 kfree(objp: mlxsw_sp_port);
1953 return err;
1954}
1955
1956static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1957{
1958 struct mlxsw_sp_port *mlxsw_sp_port =
1959 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1960
1961 mlxsw_core_cpu_port_fini(mlxsw_core: mlxsw_sp->core);
1962 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1963 kfree(objp: mlxsw_sp_port);
1964}
1965
1966static bool mlxsw_sp_local_port_valid(u16 local_port)
1967{
1968 return local_port != MLXSW_PORT_CPU_PORT;
1969}
1970
1971static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1972{
1973 if (!mlxsw_sp_local_port_valid(local_port))
1974 return false;
1975 return mlxsw_sp->ports[local_port] != NULL;
1976}
1977
1978static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1979 u16 local_port, bool enable)
1980{
1981 char pmecr_pl[MLXSW_REG_PMECR_LEN];
1982
1983 mlxsw_reg_pmecr_pack(payload: pmecr_pl, local_port,
1984 e: enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1985 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1986 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(pmecr), payload: pmecr_pl);
1987}
1988
1989struct mlxsw_sp_port_mapping_event {
1990 struct list_head list;
1991 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1992};
1993
1994static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1995{
1996 struct mlxsw_sp_port_mapping_event *event, *next_event;
1997 struct mlxsw_sp_port_mapping_events *events;
1998 struct mlxsw_sp_port_mapping port_mapping;
1999 struct mlxsw_sp *mlxsw_sp;
2000 struct devlink *devlink;
2001 LIST_HEAD(event_queue);
2002 u16 local_port;
2003 int err;
2004
2005 events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2006 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2007 devlink = priv_to_devlink(priv: mlxsw_sp->core);
2008
2009 spin_lock_bh(lock: &events->queue_lock);
2010 list_splice_init(list: &events->queue, head: &event_queue);
2011 spin_unlock_bh(lock: &events->queue_lock);
2012
2013 list_for_each_entry_safe(event, next_event, &event_queue, list) {
2014 local_port = mlxsw_reg_pmlp_local_port_get(buf: event->pmlp_pl);
2015 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2016 pmlp_pl: event->pmlp_pl, port_mapping: &port_mapping);
2017 if (err)
2018 goto out;
2019
2020 if (WARN_ON_ONCE(!port_mapping.width))
2021 goto out;
2022
2023 devl_lock(devlink);
2024
2025 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2026 mlxsw_sp_port_create(mlxsw_sp, local_port,
2027 split: false, port_mapping: &port_mapping);
2028 else
2029 WARN_ON_ONCE(1);
2030
2031 devl_unlock(devlink);
2032
2033 mlxsw_sp->port_mapping[local_port] = port_mapping;
2034
2035out:
2036 kfree(objp: event);
2037 }
2038}
2039
2040static void
2041mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2042 char *pmlp_pl, void *priv)
2043{
2044 struct mlxsw_sp_port_mapping_events *events;
2045 struct mlxsw_sp_port_mapping_event *event;
2046 struct mlxsw_sp *mlxsw_sp = priv;
2047 u16 local_port;
2048
2049 local_port = mlxsw_reg_pmlp_local_port_get(buf: pmlp_pl);
2050 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2051 return;
2052
2053 events = &mlxsw_sp->port_mapping_events;
2054 event = kmalloc(size: sizeof(*event), GFP_ATOMIC);
2055 if (!event)
2056 return;
2057 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2058 spin_lock(lock: &events->queue_lock);
2059 list_add_tail(new: &event->list, head: &events->queue);
2060 spin_unlock(lock: &events->queue_lock);
2061 mlxsw_core_schedule_work(work: &events->work);
2062}
2063
2064static void
2065__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2066{
2067 struct mlxsw_sp_port_mapping_event *event, *next_event;
2068 struct mlxsw_sp_port_mapping_events *events;
2069
2070 events = &mlxsw_sp->port_mapping_events;
2071
2072 /* Caller needs to make sure that no new event is going to appear. */
2073 cancel_work_sync(work: &events->work);
2074 list_for_each_entry_safe(event, next_event, &events->queue, list) {
2075 list_del(entry: &event->list);
2076 kfree(objp: event);
2077 }
2078}
2079
2080static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2081{
2082 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core: mlxsw_sp->core);
2083 int i;
2084
2085 for (i = 1; i < max_ports; i++)
2086 mlxsw_sp_port_mapping_event_set(mlxsw_sp, local_port: i, enable: false);
2087 /* Make sure all scheduled events are processed */
2088 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2089
2090 for (i = 1; i < max_ports; i++)
2091 if (mlxsw_sp_port_created(mlxsw_sp, local_port: i))
2092 mlxsw_sp_port_remove(mlxsw_sp, local_port: i);
2093 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2094 kfree(objp: mlxsw_sp->ports);
2095 mlxsw_sp->ports = NULL;
2096}
2097
2098static void
2099mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2100 bool (*selector)(void *priv, u16 local_port),
2101 void *priv)
2102{
2103 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2104 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2105 int i;
2106
2107 for (i = 1; i < max_ports; i++)
2108 if (mlxsw_sp_port_created(mlxsw_sp, local_port: i) && selector(priv, i))
2109 mlxsw_sp_port_remove(mlxsw_sp, local_port: i);
2110}
2111
2112static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2113{
2114 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core: mlxsw_sp->core);
2115 struct mlxsw_sp_port_mapping_events *events;
2116 struct mlxsw_sp_port_mapping *port_mapping;
2117 size_t alloc_size;
2118 int i;
2119 int err;
2120
2121 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2122 mlxsw_sp->ports = kzalloc(size: alloc_size, GFP_KERNEL);
2123 if (!mlxsw_sp->ports)
2124 return -ENOMEM;
2125
2126 events = &mlxsw_sp->port_mapping_events;
2127 INIT_LIST_HEAD(list: &events->queue);
2128 spin_lock_init(&events->queue_lock);
2129 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2130
2131 for (i = 1; i < max_ports; i++) {
2132 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, local_port: i, enable: true);
2133 if (err)
2134 goto err_event_enable;
2135 }
2136
2137 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2138 if (err)
2139 goto err_cpu_port_create;
2140
2141 for (i = 1; i < max_ports; i++) {
2142 port_mapping = &mlxsw_sp->port_mapping[i];
2143 if (!port_mapping->width)
2144 continue;
2145 err = mlxsw_sp_port_create(mlxsw_sp, local_port: i, split: false, port_mapping);
2146 if (err)
2147 goto err_port_create;
2148 }
2149 return 0;
2150
2151err_port_create:
2152 for (i--; i >= 1; i--)
2153 if (mlxsw_sp_port_created(mlxsw_sp, local_port: i))
2154 mlxsw_sp_port_remove(mlxsw_sp, local_port: i);
2155 i = max_ports;
2156 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2157err_cpu_port_create:
2158err_event_enable:
2159 for (i--; i >= 1; i--)
2160 mlxsw_sp_port_mapping_event_set(mlxsw_sp, local_port: i, enable: false);
2161 /* Make sure all scheduled events are processed */
2162 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2163 kfree(objp: mlxsw_sp->ports);
2164 mlxsw_sp->ports = NULL;
2165 return err;
2166}
2167
2168static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2169{
2170 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core: mlxsw_sp->core);
2171 struct mlxsw_sp_port_mapping *port_mapping;
2172 int i;
2173 int err;
2174
2175 mlxsw_sp->port_mapping = kcalloc(n: max_ports,
2176 size: sizeof(struct mlxsw_sp_port_mapping),
2177 GFP_KERNEL);
2178 if (!mlxsw_sp->port_mapping)
2179 return -ENOMEM;
2180
2181 for (i = 1; i < max_ports; i++) {
2182 port_mapping = &mlxsw_sp->port_mapping[i];
2183 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port: i, port_mapping);
2184 if (err)
2185 goto err_port_module_info_get;
2186 }
2187 return 0;
2188
2189err_port_module_info_get:
2190 kfree(objp: mlxsw_sp->port_mapping);
2191 return err;
2192}
2193
2194static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2195{
2196 kfree(objp: mlxsw_sp->port_mapping);
2197}
2198
2199static int
2200mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2201 struct mlxsw_sp_port_mapping *port_mapping,
2202 unsigned int count, const char *pmtdb_pl)
2203{
2204 struct mlxsw_sp_port_mapping split_port_mapping;
2205 int err, i;
2206
2207 split_port_mapping = *port_mapping;
2208 split_port_mapping.width /= count;
2209 for (i = 0; i < count; i++) {
2210 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(buf: pmtdb_pl, index: i);
2211
2212 if (!mlxsw_sp_local_port_valid(local_port: s_local_port))
2213 continue;
2214
2215 err = mlxsw_sp_port_create(mlxsw_sp, local_port: s_local_port,
2216 split: true, port_mapping: &split_port_mapping);
2217 if (err)
2218 goto err_port_create;
2219 split_port_mapping.lane += split_port_mapping.width;
2220 }
2221
2222 return 0;
2223
2224err_port_create:
2225 for (i--; i >= 0; i--) {
2226 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(buf: pmtdb_pl, index: i);
2227
2228 if (mlxsw_sp_port_created(mlxsw_sp, local_port: s_local_port))
2229 mlxsw_sp_port_remove(mlxsw_sp, local_port: s_local_port);
2230 }
2231 return err;
2232}
2233
2234static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2235 unsigned int count,
2236 const char *pmtdb_pl)
2237{
2238 struct mlxsw_sp_port_mapping *port_mapping;
2239 int i;
2240
2241 /* Go over original unsplit ports in the gap and recreate them. */
2242 for (i = 0; i < count; i++) {
2243 u16 local_port = mlxsw_reg_pmtdb_port_num_get(buf: pmtdb_pl, index: i);
2244
2245 port_mapping = &mlxsw_sp->port_mapping[local_port];
2246 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2247 continue;
2248 mlxsw_sp_port_create(mlxsw_sp, local_port,
2249 split: false, port_mapping);
2250 }
2251}
2252
2253static struct mlxsw_sp_port *
2254mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2255{
2256 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2257 return mlxsw_sp->ports[local_port];
2258 return NULL;
2259}
2260
2261static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2262 unsigned int count,
2263 struct netlink_ext_ack *extack)
2264{
2265 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2266 struct mlxsw_sp_port_mapping port_mapping;
2267 struct mlxsw_sp_port *mlxsw_sp_port;
2268 enum mlxsw_reg_pmtdb_status status;
2269 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2270 int i;
2271 int err;
2272
2273 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2274 if (!mlxsw_sp_port) {
2275 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2276 local_port);
2277 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2278 return -EINVAL;
2279 }
2280
2281 if (mlxsw_sp_port->split) {
2282 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2283 return -EINVAL;
2284 }
2285
2286 mlxsw_reg_pmtdb_pack(payload: pmtdb_pl, slot_index: mlxsw_sp_port->mapping.slot_index,
2287 module: mlxsw_sp_port->mapping.module,
2288 ports_width: mlxsw_sp_port->mapping.module_width / count,
2289 num_ports: count);
2290 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), payload: pmtdb_pl);
2291 if (err) {
2292 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2293 return err;
2294 }
2295
2296 status = mlxsw_reg_pmtdb_status_get(buf: pmtdb_pl);
2297 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2298 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2299 return -EINVAL;
2300 }
2301
2302 port_mapping = mlxsw_sp_port->mapping;
2303
2304 for (i = 0; i < count; i++) {
2305 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(buf: pmtdb_pl, index: i);
2306
2307 if (mlxsw_sp_port_created(mlxsw_sp, local_port: s_local_port))
2308 mlxsw_sp_port_remove(mlxsw_sp, local_port: s_local_port);
2309 }
2310
2311 err = mlxsw_sp_port_split_create(mlxsw_sp, port_mapping: &port_mapping,
2312 count, pmtdb_pl);
2313 if (err) {
2314 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2315 goto err_port_split_create;
2316 }
2317
2318 return 0;
2319
2320err_port_split_create:
2321 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2322
2323 return err;
2324}
2325
2326static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2327 struct netlink_ext_ack *extack)
2328{
2329 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2330 struct mlxsw_sp_port *mlxsw_sp_port;
2331 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2332 unsigned int count;
2333 int i;
2334 int err;
2335
2336 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2337 if (!mlxsw_sp_port) {
2338 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2339 local_port);
2340 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2341 return -EINVAL;
2342 }
2343
2344 if (!mlxsw_sp_port->split) {
2345 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2346 return -EINVAL;
2347 }
2348
2349 count = mlxsw_sp_port->mapping.module_width /
2350 mlxsw_sp_port->mapping.width;
2351
2352 mlxsw_reg_pmtdb_pack(payload: pmtdb_pl, slot_index: mlxsw_sp_port->mapping.slot_index,
2353 module: mlxsw_sp_port->mapping.module,
2354 ports_width: mlxsw_sp_port->mapping.module_width / count,
2355 num_ports: count);
2356 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), payload: pmtdb_pl);
2357 if (err) {
2358 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2359 return err;
2360 }
2361
2362 for (i = 0; i < count; i++) {
2363 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(buf: pmtdb_pl, index: i);
2364
2365 if (mlxsw_sp_port_created(mlxsw_sp, local_port: s_local_port))
2366 mlxsw_sp_port_remove(mlxsw_sp, local_port: s_local_port);
2367 }
2368
2369 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2370
2371 return 0;
2372}
2373
2374static void
2375mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2376{
2377 int i;
2378
2379 for (i = 0; i < TC_MAX_QUEUE; i++)
2380 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2381}
2382
2383static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2384 char *pude_pl, void *priv)
2385{
2386 struct mlxsw_sp *mlxsw_sp = priv;
2387 struct mlxsw_sp_port *mlxsw_sp_port;
2388 enum mlxsw_reg_pude_oper_status status;
2389 u16 local_port;
2390
2391 local_port = mlxsw_reg_pude_local_port_get(buf: pude_pl);
2392
2393 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2394 return;
2395 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2396 if (!mlxsw_sp_port)
2397 return;
2398
2399 status = mlxsw_reg_pude_oper_status_get(buf: pude_pl);
2400 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2401 netdev_info(dev: mlxsw_sp_port->dev, format: "link up\n");
2402 netif_carrier_on(dev: mlxsw_sp_port->dev);
2403 mlxsw_core_schedule_dw(dwork: &mlxsw_sp_port->ptp.shaper_dw, delay: 0);
2404 } else {
2405 netdev_info(dev: mlxsw_sp_port->dev, format: "link down\n");
2406 netif_carrier_off(dev: mlxsw_sp_port->dev);
2407 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2408 }
2409}
2410
2411static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2412 char *mtpptr_pl, bool ingress)
2413{
2414 u16 local_port;
2415 u8 num_rec;
2416 int i;
2417
2418 local_port = mlxsw_reg_mtpptr_local_port_get(buf: mtpptr_pl);
2419 num_rec = mlxsw_reg_mtpptr_num_rec_get(buf: mtpptr_pl);
2420 for (i = 0; i < num_rec; i++) {
2421 u8 domain_number;
2422 u8 message_type;
2423 u16 sequence_id;
2424 u64 timestamp;
2425
2426 mlxsw_reg_mtpptr_unpack(payload: mtpptr_pl, rec: i, p_message_type: &message_type,
2427 p_domain_number: &domain_number, p_sequence_id: &sequence_id,
2428 p_timestamp: &timestamp);
2429 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2430 message_type, domain_number,
2431 sequence_id, timestamp);
2432 }
2433}
2434
2435static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2436 char *mtpptr_pl, void *priv)
2437{
2438 struct mlxsw_sp *mlxsw_sp = priv;
2439
2440 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, ingress: true);
2441}
2442
2443static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2444 char *mtpptr_pl, void *priv)
2445{
2446 struct mlxsw_sp *mlxsw_sp = priv;
2447
2448 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, ingress: false);
2449}
2450
2451void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2452 u16 local_port, void *priv)
2453{
2454 struct mlxsw_sp *mlxsw_sp = priv;
2455 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2456 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2457
2458 if (unlikely(!mlxsw_sp_port)) {
2459 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2460 local_port);
2461 return;
2462 }
2463
2464 skb->dev = mlxsw_sp_port->dev;
2465
2466 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2467 u64_stats_update_begin(syncp: &pcpu_stats->syncp);
2468 pcpu_stats->rx_packets++;
2469 pcpu_stats->rx_bytes += skb->len;
2470 u64_stats_update_end(syncp: &pcpu_stats->syncp);
2471
2472 skb->protocol = eth_type_trans(skb, dev: skb->dev);
2473 netif_receive_skb(skb);
2474}
2475
2476static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2477 void *priv)
2478{
2479 skb->offload_fwd_mark = 1;
2480 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481}
2482
2483static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2484 u16 local_port, void *priv)
2485{
2486 skb->offload_l3_fwd_mark = 1;
2487 skb->offload_fwd_mark = 1;
2488 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2489}
2490
2491void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2492 u16 local_port)
2493{
2494 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2495}
2496
2497#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2498 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2499 _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501#define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2502 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2503 _is_ctrl, SP_##_trap_group, DISCARD)
2504
2505#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2506 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2507 _is_ctrl, SP_##_trap_group, DISCARD)
2508
2509#define MLXSW_SP_EVENTL(_func, _trap_id) \
2510 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2511
2512static const struct mlxsw_listener mlxsw_sp_listener[] = {
2513 /* Events */
2514 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2515 /* L2 traps */
2516 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2517 /* L3 traps */
2518 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2519 false),
2520 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2521 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2522 false),
2523 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2524 ROUTER_EXP, false),
2525 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2526 ROUTER_EXP, false),
2527 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2528 ROUTER_EXP, false),
2529 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2530 ROUTER_EXP, false),
2531 /* Multicast Router Traps */
2532 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2533 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2534 /* NVE traps */
2535 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2536};
2537
2538static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2539 /* Events */
2540 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2541 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2542};
2543
2544static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2545 /* Events */
2546 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2547};
2548
2549static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2550{
2551 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2552 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2553 enum mlxsw_reg_qpcr_ir_units ir_units;
2554 int max_cpu_policers;
2555 bool is_bytes;
2556 u8 burst_size;
2557 u32 rate;
2558 int i, err;
2559
2560 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2561 return -EIO;
2562
2563 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2564
2565 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2566 for (i = 0; i < max_cpu_policers; i++) {
2567 is_bytes = false;
2568 switch (i) {
2569 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2570 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2572 rate = 1024;
2573 burst_size = 7;
2574 break;
2575 default:
2576 continue;
2577 }
2578
2579 __set_bit(i, mlxsw_sp->trap->policers_usage);
2580 mlxsw_reg_qpcr_pack(payload: qpcr_pl, pid: i, ir_units, bytes: is_bytes, cir: rate,
2581 cbs: burst_size);
2582 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), payload: qpcr_pl);
2583 if (err)
2584 return err;
2585 }
2586
2587 return 0;
2588}
2589
2590static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2591{
2592 char htgt_pl[MLXSW_REG_HTGT_LEN];
2593 enum mlxsw_reg_htgt_trap_group i;
2594 int max_cpu_policers;
2595 int max_trap_groups;
2596 u8 priority, tc;
2597 u16 policer_id;
2598 int err;
2599
2600 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2601 return -EIO;
2602
2603 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2604 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2605
2606 for (i = 0; i < max_trap_groups; i++) {
2607 policer_id = i;
2608 switch (i) {
2609 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2610 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2612 priority = 1;
2613 tc = 1;
2614 break;
2615 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2616 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2617 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2618 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2619 break;
2620 default:
2621 continue;
2622 }
2623
2624 if (max_cpu_policers <= policer_id &&
2625 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2626 return -EIO;
2627
2628 mlxsw_reg_htgt_pack(payload: htgt_pl, group: i, policer_id, priority, tc);
2629 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), payload: htgt_pl);
2630 if (err)
2631 return err;
2632 }
2633
2634 return 0;
2635}
2636
2637static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2638{
2639 struct mlxsw_sp_trap *trap;
2640 u64 max_policers;
2641 int err;
2642
2643 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2644 return -EIO;
2645 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2646 trap = kzalloc(struct_size(trap, policers_usage,
2647 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2648 if (!trap)
2649 return -ENOMEM;
2650 trap->max_policers = max_policers;
2651 mlxsw_sp->trap = trap;
2652
2653 err = mlxsw_sp_cpu_policers_set(mlxsw_core: mlxsw_sp->core);
2654 if (err)
2655 goto err_cpu_policers_set;
2656
2657 err = mlxsw_sp_trap_groups_set(mlxsw_core: mlxsw_sp->core);
2658 if (err)
2659 goto err_trap_groups_set;
2660
2661 err = mlxsw_core_traps_register(mlxsw_core: mlxsw_sp->core, listeners: mlxsw_sp_listener,
2662 ARRAY_SIZE(mlxsw_sp_listener),
2663 priv: mlxsw_sp);
2664 if (err)
2665 goto err_traps_register;
2666
2667 err = mlxsw_core_traps_register(mlxsw_core: mlxsw_sp->core, listeners: mlxsw_sp->listeners,
2668 listeners_count: mlxsw_sp->listeners_count, priv: mlxsw_sp);
2669 if (err)
2670 goto err_extra_traps_init;
2671
2672 return 0;
2673
2674err_extra_traps_init:
2675 mlxsw_core_traps_unregister(mlxsw_core: mlxsw_sp->core, listeners: mlxsw_sp_listener,
2676 ARRAY_SIZE(mlxsw_sp_listener),
2677 priv: mlxsw_sp);
2678err_traps_register:
2679err_trap_groups_set:
2680err_cpu_policers_set:
2681 kfree(objp: trap);
2682 return err;
2683}
2684
2685static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2686{
2687 mlxsw_core_traps_unregister(mlxsw_core: mlxsw_sp->core, listeners: mlxsw_sp->listeners,
2688 listeners_count: mlxsw_sp->listeners_count,
2689 priv: mlxsw_sp);
2690 mlxsw_core_traps_unregister(mlxsw_core: mlxsw_sp->core, listeners: mlxsw_sp_listener,
2691 ARRAY_SIZE(mlxsw_sp_listener), priv: mlxsw_sp);
2692 kfree(objp: mlxsw_sp->trap);
2693}
2694
2695static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2696{
2697 char sgcr_pl[MLXSW_REG_SGCR_LEN];
2698 u16 max_lag;
2699 int err;
2700
2701 if (mlxsw_core_lag_mode(mlxsw_core: mlxsw_sp->core) !=
2702 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2703 return 0;
2704
2705 err = mlxsw_core_max_lag(mlxsw_core: mlxsw_sp->core, p_max_lag: &max_lag);
2706 if (err)
2707 return err;
2708
2709 /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2710 * entries. The LAG table address needs to be 8-aligned, but that ought
2711 * to be the case, since the LAG table is allocated first.
2712 */
2713 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base: &mlxsw_sp->lag_pgt_base,
2714 count: max_lag * 8);
2715 if (err)
2716 return err;
2717 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2718 err = -EINVAL;
2719 goto err_mid_alloc_range;
2720 }
2721
2722 mlxsw_reg_sgcr_pack(payload: sgcr_pl, lag_lookup_pgt_base: mlxsw_sp->lag_pgt_base);
2723 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sgcr), payload: sgcr_pl);
2724 if (err)
2725 goto err_mid_alloc_range;
2726
2727 return 0;
2728
2729err_mid_alloc_range:
2730 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base: mlxsw_sp->lag_pgt_base,
2731 count: max_lag * 8);
2732 return err;
2733}
2734
2735static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2736{
2737 u16 max_lag;
2738 int err;
2739
2740 if (mlxsw_core_lag_mode(mlxsw_core: mlxsw_sp->core) !=
2741 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2742 return;
2743
2744 err = mlxsw_core_max_lag(mlxsw_core: mlxsw_sp->core, p_max_lag: &max_lag);
2745 if (err)
2746 return;
2747
2748 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base: mlxsw_sp->lag_pgt_base,
2749 count: max_lag * 8);
2750}
2751
2752#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2753
2754static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2755{
2756 char slcr_pl[MLXSW_REG_SLCR_LEN];
2757 u16 max_lag;
2758 u32 seed;
2759 int err;
2760
2761 seed = jhash(key: mlxsw_sp->base_mac, length: sizeof(mlxsw_sp->base_mac),
2762 MLXSW_SP_LAG_SEED_INIT);
2763 mlxsw_reg_slcr_pack(payload: slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2764 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2765 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2766 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2767 MLXSW_REG_SLCR_LAG_HASH_SIP |
2768 MLXSW_REG_SLCR_LAG_HASH_DIP |
2769 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2770 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2771 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2772 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(slcr), payload: slcr_pl);
2773 if (err)
2774 return err;
2775
2776 err = mlxsw_core_max_lag(mlxsw_core: mlxsw_sp->core, p_max_lag: &max_lag);
2777 if (err)
2778 return err;
2779
2780 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2781 return -EIO;
2782
2783 err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2784 if (err)
2785 return err;
2786
2787 mlxsw_sp->lags = kcalloc(n: max_lag, size: sizeof(struct mlxsw_sp_upper),
2788 GFP_KERNEL);
2789 if (!mlxsw_sp->lags) {
2790 err = -ENOMEM;
2791 goto err_kcalloc;
2792 }
2793
2794 return 0;
2795
2796err_kcalloc:
2797 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2798 return err;
2799}
2800
2801static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2802{
2803 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2804 kfree(objp: mlxsw_sp->lags);
2805}
2806
2807static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2808 .clock_init = mlxsw_sp1_ptp_clock_init,
2809 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2810 .init = mlxsw_sp1_ptp_init,
2811 .fini = mlxsw_sp1_ptp_fini,
2812 .receive = mlxsw_sp1_ptp_receive,
2813 .transmitted = mlxsw_sp1_ptp_transmitted,
2814 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2815 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2816 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2817 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2818 .get_stats_count = mlxsw_sp1_get_stats_count,
2819 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2820 .get_stats = mlxsw_sp1_get_stats,
2821 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2822};
2823
2824static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2825 .clock_init = mlxsw_sp2_ptp_clock_init,
2826 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2827 .init = mlxsw_sp2_ptp_init,
2828 .fini = mlxsw_sp2_ptp_fini,
2829 .receive = mlxsw_sp2_ptp_receive,
2830 .transmitted = mlxsw_sp2_ptp_transmitted,
2831 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2832 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2833 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2834 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2835 .get_stats_count = mlxsw_sp2_get_stats_count,
2836 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2837 .get_stats = mlxsw_sp2_get_stats,
2838 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2839};
2840
2841static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2842 .clock_init = mlxsw_sp2_ptp_clock_init,
2843 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2844 .init = mlxsw_sp2_ptp_init,
2845 .fini = mlxsw_sp2_ptp_fini,
2846 .receive = mlxsw_sp2_ptp_receive,
2847 .transmitted = mlxsw_sp2_ptp_transmitted,
2848 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2849 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2850 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2851 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2852 .get_stats_count = mlxsw_sp2_get_stats_count,
2853 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2854 .get_stats = mlxsw_sp2_get_stats,
2855 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2856};
2857
2858struct mlxsw_sp_sample_trigger_node {
2859 struct mlxsw_sp_sample_trigger trigger;
2860 struct mlxsw_sp_sample_params params;
2861 struct rhash_head ht_node;
2862 struct rcu_head rcu;
2863 refcount_t refcount;
2864};
2865
2866static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2867 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2868 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2869 .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2870 .automatic_shrinking = true,
2871};
2872
2873static void
2874mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2875 const struct mlxsw_sp_sample_trigger *trigger)
2876{
2877 memset(key, 0, sizeof(*key));
2878 key->type = trigger->type;
2879 key->local_port = trigger->local_port;
2880}
2881
2882/* RCU read lock must be held */
2883struct mlxsw_sp_sample_params *
2884mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2885 const struct mlxsw_sp_sample_trigger *trigger)
2886{
2887 struct mlxsw_sp_sample_trigger_node *trigger_node;
2888 struct mlxsw_sp_sample_trigger key;
2889
2890 mlxsw_sp_sample_trigger_key_init(key: &key, trigger);
2891 trigger_node = rhashtable_lookup(ht: &mlxsw_sp->sample_trigger_ht, key: &key,
2892 params: mlxsw_sp_sample_trigger_ht_params);
2893 if (!trigger_node)
2894 return NULL;
2895
2896 return &trigger_node->params;
2897}
2898
2899static int
2900mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2901 const struct mlxsw_sp_sample_trigger *trigger,
2902 const struct mlxsw_sp_sample_params *params)
2903{
2904 struct mlxsw_sp_sample_trigger_node *trigger_node;
2905 int err;
2906
2907 trigger_node = kzalloc(size: sizeof(*trigger_node), GFP_KERNEL);
2908 if (!trigger_node)
2909 return -ENOMEM;
2910
2911 trigger_node->trigger = *trigger;
2912 trigger_node->params = *params;
2913 refcount_set(r: &trigger_node->refcount, n: 1);
2914
2915 err = rhashtable_insert_fast(ht: &mlxsw_sp->sample_trigger_ht,
2916 obj: &trigger_node->ht_node,
2917 params: mlxsw_sp_sample_trigger_ht_params);
2918 if (err)
2919 goto err_rhashtable_insert;
2920
2921 return 0;
2922
2923err_rhashtable_insert:
2924 kfree(objp: trigger_node);
2925 return err;
2926}
2927
2928static void
2929mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2930 struct mlxsw_sp_sample_trigger_node *trigger_node)
2931{
2932 rhashtable_remove_fast(ht: &mlxsw_sp->sample_trigger_ht,
2933 obj: &trigger_node->ht_node,
2934 params: mlxsw_sp_sample_trigger_ht_params);
2935 kfree_rcu(trigger_node, rcu);
2936}
2937
2938int
2939mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2940 const struct mlxsw_sp_sample_trigger *trigger,
2941 const struct mlxsw_sp_sample_params *params,
2942 struct netlink_ext_ack *extack)
2943{
2944 struct mlxsw_sp_sample_trigger_node *trigger_node;
2945 struct mlxsw_sp_sample_trigger key;
2946
2947 ASSERT_RTNL();
2948
2949 mlxsw_sp_sample_trigger_key_init(key: &key, trigger);
2950
2951 trigger_node = rhashtable_lookup_fast(ht: &mlxsw_sp->sample_trigger_ht,
2952 key: &key,
2953 params: mlxsw_sp_sample_trigger_ht_params);
2954 if (!trigger_node)
2955 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, trigger: &key,
2956 params);
2957
2958 if (trigger_node->trigger.local_port) {
2959 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2960 return -EINVAL;
2961 }
2962
2963 if (trigger_node->params.psample_group != params->psample_group ||
2964 trigger_node->params.truncate != params->truncate ||
2965 trigger_node->params.rate != params->rate ||
2966 trigger_node->params.trunc_size != params->trunc_size) {
2967 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2968 return -EINVAL;
2969 }
2970
2971 refcount_inc(r: &trigger_node->refcount);
2972
2973 return 0;
2974}
2975
2976void
2977mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2978 const struct mlxsw_sp_sample_trigger *trigger)
2979{
2980 struct mlxsw_sp_sample_trigger_node *trigger_node;
2981 struct mlxsw_sp_sample_trigger key;
2982
2983 ASSERT_RTNL();
2984
2985 mlxsw_sp_sample_trigger_key_init(key: &key, trigger);
2986
2987 trigger_node = rhashtable_lookup_fast(ht: &mlxsw_sp->sample_trigger_ht,
2988 key: &key,
2989 params: mlxsw_sp_sample_trigger_ht_params);
2990 if (!trigger_node)
2991 return;
2992
2993 if (!refcount_dec_and_test(r: &trigger_node->refcount))
2994 return;
2995
2996 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2997}
2998
2999static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3000 unsigned long event, void *ptr);
3001
3002#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
3003#define MLXSW_SP_INCREASED_PARSING_DEPTH 128
3004#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
3005
3006static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
3007{
3008 refcount_set(r: &mlxsw_sp->parsing.parsing_depth_ref, n: 0);
3009 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
3010 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
3011 mutex_init(&mlxsw_sp->parsing.lock);
3012}
3013
3014static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
3015{
3016 mutex_destroy(lock: &mlxsw_sp->parsing.lock);
3017 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
3018}
3019
3020struct mlxsw_sp_ipv6_addr_node {
3021 struct in6_addr key;
3022 struct rhash_head ht_node;
3023 u32 kvdl_index;
3024 refcount_t refcount;
3025};
3026
3027static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3028 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3029 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3030 .key_len = sizeof(struct in6_addr),
3031 .automatic_shrinking = true,
3032};
3033
3034static int
3035mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3036 u32 *p_kvdl_index)
3037{
3038 struct mlxsw_sp_ipv6_addr_node *node;
3039 char rips_pl[MLXSW_REG_RIPS_LEN];
3040 int err;
3041
3042 err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3043 type: MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, entry_count: 1,
3044 p_entry_index: p_kvdl_index);
3045 if (err)
3046 return err;
3047
3048 mlxsw_reg_rips_pack(payload: rips_pl, index: *p_kvdl_index, ipv6: addr6);
3049 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(rips), payload: rips_pl);
3050 if (err)
3051 goto err_rips_write;
3052
3053 node = kzalloc(size: sizeof(*node), GFP_KERNEL);
3054 if (!node) {
3055 err = -ENOMEM;
3056 goto err_node_alloc;
3057 }
3058
3059 node->key = *addr6;
3060 node->kvdl_index = *p_kvdl_index;
3061 refcount_set(r: &node->refcount, n: 1);
3062
3063 err = rhashtable_insert_fast(ht: &mlxsw_sp->ipv6_addr_ht,
3064 obj: &node->ht_node,
3065 params: mlxsw_sp_ipv6_addr_ht_params);
3066 if (err)
3067 goto err_rhashtable_insert;
3068
3069 return 0;
3070
3071err_rhashtable_insert:
3072 kfree(objp: node);
3073err_node_alloc:
3074err_rips_write:
3075 mlxsw_sp_kvdl_free(mlxsw_sp, type: MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, entry_count: 1,
3076 entry_index: *p_kvdl_index);
3077 return err;
3078}
3079
3080static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3081 struct mlxsw_sp_ipv6_addr_node *node)
3082{
3083 u32 kvdl_index = node->kvdl_index;
3084
3085 rhashtable_remove_fast(ht: &mlxsw_sp->ipv6_addr_ht, obj: &node->ht_node,
3086 params: mlxsw_sp_ipv6_addr_ht_params);
3087 kfree(objp: node);
3088 mlxsw_sp_kvdl_free(mlxsw_sp, type: MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, entry_count: 1,
3089 entry_index: kvdl_index);
3090}
3091
3092int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3093 const struct in6_addr *addr6,
3094 u32 *p_kvdl_index)
3095{
3096 struct mlxsw_sp_ipv6_addr_node *node;
3097 int err = 0;
3098
3099 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3100 node = rhashtable_lookup_fast(ht: &mlxsw_sp->ipv6_addr_ht, key: addr6,
3101 params: mlxsw_sp_ipv6_addr_ht_params);
3102 if (node) {
3103 refcount_inc(r: &node->refcount);
3104 *p_kvdl_index = node->kvdl_index;
3105 goto out_unlock;
3106 }
3107
3108 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3109
3110out_unlock:
3111 mutex_unlock(lock: &mlxsw_sp->ipv6_addr_ht_lock);
3112 return err;
3113}
3114
3115void
3116mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3117{
3118 struct mlxsw_sp_ipv6_addr_node *node;
3119
3120 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3121 node = rhashtable_lookup_fast(ht: &mlxsw_sp->ipv6_addr_ht, key: addr6,
3122 params: mlxsw_sp_ipv6_addr_ht_params);
3123 if (WARN_ON(!node))
3124 goto out_unlock;
3125
3126 if (!refcount_dec_and_test(r: &node->refcount))
3127 goto out_unlock;
3128
3129 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3130
3131out_unlock:
3132 mutex_unlock(lock: &mlxsw_sp->ipv6_addr_ht_lock);
3133}
3134
3135static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3136{
3137 int err;
3138
3139 err = rhashtable_init(ht: &mlxsw_sp->ipv6_addr_ht,
3140 params: &mlxsw_sp_ipv6_addr_ht_params);
3141 if (err)
3142 return err;
3143
3144 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3145 return 0;
3146}
3147
3148static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3149{
3150 mutex_destroy(lock: &mlxsw_sp->ipv6_addr_ht_lock);
3151 rhashtable_destroy(ht: &mlxsw_sp->ipv6_addr_ht);
3152}
3153
3154static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3155 const struct mlxsw_bus_info *mlxsw_bus_info,
3156 struct netlink_ext_ack *extack)
3157{
3158 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3159 int err;
3160
3161 mlxsw_sp->core = mlxsw_core;
3162 mlxsw_sp->bus_info = mlxsw_bus_info;
3163
3164 mlxsw_sp_parsing_init(mlxsw_sp);
3165
3166 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3167 if (err) {
3168 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3169 return err;
3170 }
3171
3172 err = mlxsw_sp_kvdl_init(mlxsw_sp);
3173 if (err) {
3174 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3175 return err;
3176 }
3177
3178 err = mlxsw_sp_pgt_init(mlxsw_sp);
3179 if (err) {
3180 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3181 goto err_pgt_init;
3182 }
3183
3184 /* Initialize before FIDs so that the LAG table is at the start of PGT
3185 * and 8-aligned without overallocation.
3186 */
3187 err = mlxsw_sp_lag_init(mlxsw_sp);
3188 if (err) {
3189 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3190 goto err_lag_init;
3191 }
3192
3193 err = mlxsw_sp_fids_init(mlxsw_sp);
3194 if (err) {
3195 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3196 goto err_fids_init;
3197 }
3198
3199 err = mlxsw_sp_policers_init(mlxsw_sp);
3200 if (err) {
3201 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3202 goto err_policers_init;
3203 }
3204
3205 err = mlxsw_sp_traps_init(mlxsw_sp);
3206 if (err) {
3207 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3208 goto err_traps_init;
3209 }
3210
3211 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3212 if (err) {
3213 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3214 goto err_devlink_traps_init;
3215 }
3216
3217 err = mlxsw_sp_buffers_init(mlxsw_sp);
3218 if (err) {
3219 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3220 goto err_buffers_init;
3221 }
3222
3223 /* Initialize SPAN before router and switchdev, so that those components
3224 * can call mlxsw_sp_span_respin().
3225 */
3226 err = mlxsw_sp_span_init(mlxsw_sp);
3227 if (err) {
3228 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3229 goto err_span_init;
3230 }
3231
3232 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3233 if (err) {
3234 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3235 goto err_switchdev_init;
3236 }
3237
3238 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3239 if (err) {
3240 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3241 goto err_counter_pool_init;
3242 }
3243
3244 err = mlxsw_sp_afa_init(mlxsw_sp);
3245 if (err) {
3246 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3247 goto err_afa_init;
3248 }
3249
3250 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3251 if (err) {
3252 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3253 goto err_ipv6_addr_ht_init;
3254 }
3255
3256 err = mlxsw_sp_nve_init(mlxsw_sp);
3257 if (err) {
3258 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3259 goto err_nve_init;
3260 }
3261
3262 err = mlxsw_sp_port_range_init(mlxsw_sp);
3263 if (err) {
3264 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3265 goto err_port_range_init;
3266 }
3267
3268 err = mlxsw_sp_acl_init(mlxsw_sp);
3269 if (err) {
3270 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3271 goto err_acl_init;
3272 }
3273
3274 err = mlxsw_sp_router_init(mlxsw_sp, extack);
3275 if (err) {
3276 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3277 goto err_router_init;
3278 }
3279
3280 if (mlxsw_sp->bus_info->read_clock_capable) {
3281 /* NULL is a valid return value from clock_init */
3282 mlxsw_sp->clock =
3283 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3284 mlxsw_sp->bus_info->dev);
3285 if (IS_ERR(ptr: mlxsw_sp->clock)) {
3286 err = PTR_ERR(ptr: mlxsw_sp->clock);
3287 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3288 goto err_ptp_clock_init;
3289 }
3290 }
3291
3292 if (mlxsw_sp->clock) {
3293 /* NULL is a valid return value from ptp_ops->init */
3294 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3295 if (IS_ERR(ptr: mlxsw_sp->ptp_state)) {
3296 err = PTR_ERR(ptr: mlxsw_sp->ptp_state);
3297 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3298 goto err_ptp_init;
3299 }
3300 }
3301
3302 /* Initialize netdevice notifier after SPAN is initialized, so that the
3303 * event handler can call SPAN respin.
3304 */
3305 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3306 err = register_netdevice_notifier_net(net: mlxsw_sp_net(mlxsw_sp),
3307 nb: &mlxsw_sp->netdevice_nb);
3308 if (err) {
3309 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3310 goto err_netdev_notifier;
3311 }
3312
3313 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3314 if (err) {
3315 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3316 goto err_dpipe_init;
3317 }
3318
3319 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3320 if (err) {
3321 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3322 goto err_port_module_info_init;
3323 }
3324
3325 err = rhashtable_init(ht: &mlxsw_sp->sample_trigger_ht,
3326 params: &mlxsw_sp_sample_trigger_ht_params);
3327 if (err) {
3328 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3329 goto err_sample_trigger_init;
3330 }
3331
3332 err = mlxsw_sp_ports_create(mlxsw_sp);
3333 if (err) {
3334 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3335 goto err_ports_create;
3336 }
3337
3338 return 0;
3339
3340err_ports_create:
3341 rhashtable_destroy(ht: &mlxsw_sp->sample_trigger_ht);
3342err_sample_trigger_init:
3343 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3344err_port_module_info_init:
3345 mlxsw_sp_dpipe_fini(mlxsw_sp);
3346err_dpipe_init:
3347 unregister_netdevice_notifier_net(net: mlxsw_sp_net(mlxsw_sp),
3348 nb: &mlxsw_sp->netdevice_nb);
3349err_netdev_notifier:
3350 if (mlxsw_sp->clock)
3351 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3352err_ptp_init:
3353 if (mlxsw_sp->clock)
3354 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3355err_ptp_clock_init:
3356 mlxsw_sp_router_fini(mlxsw_sp);
3357err_router_init:
3358 mlxsw_sp_acl_fini(mlxsw_sp);
3359err_acl_init:
3360 mlxsw_sp_port_range_fini(mlxsw_sp);
3361err_port_range_init:
3362 mlxsw_sp_nve_fini(mlxsw_sp);
3363err_nve_init:
3364 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3365err_ipv6_addr_ht_init:
3366 mlxsw_sp_afa_fini(mlxsw_sp);
3367err_afa_init:
3368 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3369err_counter_pool_init:
3370 mlxsw_sp_switchdev_fini(mlxsw_sp);
3371err_switchdev_init:
3372 mlxsw_sp_span_fini(mlxsw_sp);
3373err_span_init:
3374 mlxsw_sp_buffers_fini(mlxsw_sp);
3375err_buffers_init:
3376 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3377err_devlink_traps_init:
3378 mlxsw_sp_traps_fini(mlxsw_sp);
3379err_traps_init:
3380 mlxsw_sp_policers_fini(mlxsw_sp);
3381err_policers_init:
3382 mlxsw_sp_fids_fini(mlxsw_sp);
3383err_fids_init:
3384 mlxsw_sp_lag_fini(mlxsw_sp);
3385err_lag_init:
3386 mlxsw_sp_pgt_fini(mlxsw_sp);
3387err_pgt_init:
3388 mlxsw_sp_kvdl_fini(mlxsw_sp);
3389 mlxsw_sp_parsing_fini(mlxsw_sp);
3390 return err;
3391}
3392
3393static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3394 const struct mlxsw_bus_info *mlxsw_bus_info,
3395 struct netlink_ext_ack *extack)
3396{
3397 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3398
3399 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3400 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3401 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3402 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3403 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3404 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3405 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3406 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3407 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3408 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3409 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3410 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3411 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3412 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3413 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3414 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3415 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3416 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3417 mlxsw_sp->listeners = mlxsw_sp1_listener;
3418 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3419 mlxsw_sp->fid_family_arr = mlxsw_sp1_fid_family_arr;
3420 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3421 mlxsw_sp->pgt_smpe_index_valid = true;
3422
3423 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3424}
3425
3426static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3427 const struct mlxsw_bus_info *mlxsw_bus_info,
3428 struct netlink_ext_ack *extack)
3429{
3430 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3431
3432 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3433 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3434 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3435 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3436 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3437 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3438 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3439 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3440 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3441 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3442 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3443 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3444 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3445 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3446 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3447 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3448 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3449 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3450 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3451 mlxsw_sp->listeners = mlxsw_sp2_listener;
3452 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3453 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3454 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3455 mlxsw_sp->pgt_smpe_index_valid = false;
3456
3457 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3458}
3459
3460static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3461 const struct mlxsw_bus_info *mlxsw_bus_info,
3462 struct netlink_ext_ack *extack)
3463{
3464 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3465
3466 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3467 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3468 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3469 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3470 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3471 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3472 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3473 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3474 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3475 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3476 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3477 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3478 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3479 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3480 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3481 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3482 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3483 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3484 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3485 mlxsw_sp->listeners = mlxsw_sp2_listener;
3486 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3487 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3488 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3489 mlxsw_sp->pgt_smpe_index_valid = false;
3490
3491 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3492}
3493
3494static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3495 const struct mlxsw_bus_info *mlxsw_bus_info,
3496 struct netlink_ext_ack *extack)
3497{
3498 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3499
3500 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3501 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3502 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3503 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3504 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3505 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3506 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3507 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3508 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3509 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3510 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3511 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3512 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3513 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3514 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3515 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3516 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3517 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3518 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3519 mlxsw_sp->listeners = mlxsw_sp2_listener;
3520 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3521 mlxsw_sp->fid_family_arr = mlxsw_sp2_fid_family_arr;
3522 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3523 mlxsw_sp->pgt_smpe_index_valid = false;
3524
3525 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3526}
3527
3528static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3529{
3530 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3531
3532 mlxsw_sp_ports_remove(mlxsw_sp);
3533 rhashtable_destroy(ht: &mlxsw_sp->sample_trigger_ht);
3534 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3535 mlxsw_sp_dpipe_fini(mlxsw_sp);
3536 unregister_netdevice_notifier_net(net: mlxsw_sp_net(mlxsw_sp),
3537 nb: &mlxsw_sp->netdevice_nb);
3538 if (mlxsw_sp->clock) {
3539 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3540 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3541 }
3542 mlxsw_sp_router_fini(mlxsw_sp);
3543 mlxsw_sp_acl_fini(mlxsw_sp);
3544 mlxsw_sp_port_range_fini(mlxsw_sp);
3545 mlxsw_sp_nve_fini(mlxsw_sp);
3546 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3547 mlxsw_sp_afa_fini(mlxsw_sp);
3548 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3549 mlxsw_sp_switchdev_fini(mlxsw_sp);
3550 mlxsw_sp_span_fini(mlxsw_sp);
3551 mlxsw_sp_buffers_fini(mlxsw_sp);
3552 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3553 mlxsw_sp_traps_fini(mlxsw_sp);
3554 mlxsw_sp_policers_fini(mlxsw_sp);
3555 mlxsw_sp_fids_fini(mlxsw_sp);
3556 mlxsw_sp_lag_fini(mlxsw_sp);
3557 mlxsw_sp_pgt_fini(mlxsw_sp);
3558 mlxsw_sp_kvdl_fini(mlxsw_sp);
3559 mlxsw_sp_parsing_fini(mlxsw_sp);
3560}
3561
3562static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3563 .used_flood_mode = 1,
3564 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3565 .used_max_ib_mc = 1,
3566 .max_ib_mc = 0,
3567 .used_max_pkey = 1,
3568 .max_pkey = 0,
3569 .used_ubridge = 1,
3570 .ubridge = 1,
3571 .used_kvd_sizes = 1,
3572 .kvd_hash_single_parts = 59,
3573 .kvd_hash_double_parts = 41,
3574 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3575 .swid_config = {
3576 {
3577 .used_type = 1,
3578 .type = MLXSW_PORT_SWID_TYPE_ETH,
3579 }
3580 },
3581};
3582
3583static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3584 .used_flood_mode = 1,
3585 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3586 .used_max_ib_mc = 1,
3587 .max_ib_mc = 0,
3588 .used_max_pkey = 1,
3589 .max_pkey = 0,
3590 .used_ubridge = 1,
3591 .ubridge = 1,
3592 .swid_config = {
3593 {
3594 .used_type = 1,
3595 .type = MLXSW_PORT_SWID_TYPE_ETH,
3596 }
3597 },
3598 .used_cqe_time_stamp_type = 1,
3599 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3600 .lag_mode_prefer_sw = true,
3601};
3602
3603/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3604 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3605 * table.
3606 */
3607#define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3608
3609static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3610 .used_max_lag = 1,
3611 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3612 .used_flood_mode = 1,
3613 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3614 .used_max_ib_mc = 1,
3615 .max_ib_mc = 0,
3616 .used_max_pkey = 1,
3617 .max_pkey = 0,
3618 .used_ubridge = 1,
3619 .ubridge = 1,
3620 .swid_config = {
3621 {
3622 .used_type = 1,
3623 .type = MLXSW_PORT_SWID_TYPE_ETH,
3624 }
3625 },
3626 .used_cqe_time_stamp_type = 1,
3627 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3628 .lag_mode_prefer_sw = true,
3629};
3630
3631static void
3632mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3633 struct devlink_resource_size_params *kvd_size_params,
3634 struct devlink_resource_size_params *linear_size_params,
3635 struct devlink_resource_size_params *hash_double_size_params,
3636 struct devlink_resource_size_params *hash_single_size_params)
3637{
3638 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3639 KVD_SINGLE_MIN_SIZE);
3640 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3641 KVD_DOUBLE_MIN_SIZE);
3642 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3643 u32 linear_size_min = 0;
3644
3645 devlink_resource_size_params_init(size_params: kvd_size_params, size_min: kvd_size, size_max: kvd_size,
3646 MLXSW_SP_KVD_GRANULARITY,
3647 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3648 devlink_resource_size_params_init(size_params: linear_size_params, size_min: linear_size_min,
3649 size_max: kvd_size - single_size_min -
3650 double_size_min,
3651 MLXSW_SP_KVD_GRANULARITY,
3652 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3653 devlink_resource_size_params_init(size_params: hash_double_size_params,
3654 size_min: double_size_min,
3655 size_max: kvd_size - single_size_min -
3656 linear_size_min,
3657 MLXSW_SP_KVD_GRANULARITY,
3658 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3659 devlink_resource_size_params_init(size_params: hash_single_size_params,
3660 size_min: single_size_min,
3661 size_max: kvd_size - double_size_min -
3662 linear_size_min,
3663 MLXSW_SP_KVD_GRANULARITY,
3664 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3665}
3666
3667static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3668{
3669 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3670 struct devlink_resource_size_params hash_single_size_params;
3671 struct devlink_resource_size_params hash_double_size_params;
3672 struct devlink_resource_size_params linear_size_params;
3673 struct devlink_resource_size_params kvd_size_params;
3674 u32 kvd_size, single_size, double_size, linear_size;
3675 const struct mlxsw_config_profile *profile;
3676 int err;
3677
3678 profile = &mlxsw_sp1_config_profile;
3679 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3680 return -EIO;
3681
3682 mlxsw_sp_resource_size_params_prepare(mlxsw_core, kvd_size_params: &kvd_size_params,
3683 linear_size_params: &linear_size_params,
3684 hash_double_size_params: &hash_double_size_params,
3685 hash_single_size_params: &hash_single_size_params);
3686
3687 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3688 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3689 resource_size: kvd_size, resource_id: MLXSW_SP_RESOURCE_KVD,
3690 DEVLINK_RESOURCE_ID_PARENT_TOP,
3691 size_params: &kvd_size_params);
3692 if (err)
3693 return err;
3694
3695 linear_size = profile->kvd_linear_size;
3696 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3697 resource_size: linear_size,
3698 resource_id: MLXSW_SP_RESOURCE_KVD_LINEAR,
3699 parent_resource_id: MLXSW_SP_RESOURCE_KVD,
3700 size_params: &linear_size_params);
3701 if (err)
3702 return err;
3703
3704 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3705 if (err)
3706 return err;
3707
3708 double_size = kvd_size - linear_size;
3709 double_size *= profile->kvd_hash_double_parts;
3710 double_size /= profile->kvd_hash_double_parts +
3711 profile->kvd_hash_single_parts;
3712 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3713 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3714 resource_size: double_size,
3715 resource_id: MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3716 parent_resource_id: MLXSW_SP_RESOURCE_KVD,
3717 size_params: &hash_double_size_params);
3718 if (err)
3719 return err;
3720
3721 single_size = kvd_size - double_size - linear_size;
3722 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3723 resource_size: single_size,
3724 resource_id: MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3725 parent_resource_id: MLXSW_SP_RESOURCE_KVD,
3726 size_params: &hash_single_size_params);
3727 if (err)
3728 return err;
3729
3730 return 0;
3731}
3732
3733static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3734{
3735 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3736 struct devlink_resource_size_params kvd_size_params;
3737 u32 kvd_size;
3738
3739 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3740 return -EIO;
3741
3742 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3743 devlink_resource_size_params_init(size_params: &kvd_size_params, size_min: kvd_size, size_max: kvd_size,
3744 MLXSW_SP_KVD_GRANULARITY,
3745 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3746
3747 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3748 resource_size: kvd_size, resource_id: MLXSW_SP_RESOURCE_KVD,
3749 DEVLINK_RESOURCE_ID_PARENT_TOP,
3750 size_params: &kvd_size_params);
3751}
3752
3753static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3754{
3755 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3756 struct devlink_resource_size_params span_size_params;
3757 u32 max_span;
3758
3759 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3760 return -EIO;
3761
3762 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3763 devlink_resource_size_params_init(size_params: &span_size_params, size_min: max_span, size_max: max_span,
3764 size_granularity: 1, unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3765
3766 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3767 resource_size: max_span, resource_id: MLXSW_SP_RESOURCE_SPAN,
3768 DEVLINK_RESOURCE_ID_PARENT_TOP,
3769 size_params: &span_size_params);
3770}
3771
3772static int
3773mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3774{
3775 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3776 struct devlink_resource_size_params size_params;
3777 u8 max_rif_mac_profiles;
3778
3779 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3780 max_rif_mac_profiles = 1;
3781 else
3782 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3783 MAX_RIF_MAC_PROFILES);
3784 devlink_resource_size_params_init(size_params: &size_params, size_min: max_rif_mac_profiles,
3785 size_max: max_rif_mac_profiles, size_granularity: 1,
3786 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3787
3788 return devl_resource_register(devlink,
3789 resource_name: "rif_mac_profiles",
3790 resource_size: max_rif_mac_profiles,
3791 resource_id: MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3792 DEVLINK_RESOURCE_ID_PARENT_TOP,
3793 size_params: &size_params);
3794}
3795
3796static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3797{
3798 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3799 struct devlink_resource_size_params size_params;
3800 u64 max_rifs;
3801
3802 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3803 return -EIO;
3804
3805 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3806 devlink_resource_size_params_init(size_params: &size_params, size_min: max_rifs, size_max: max_rifs,
3807 size_granularity: 1, unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3808
3809 return devl_resource_register(devlink, resource_name: "rifs", resource_size: max_rifs,
3810 resource_id: MLXSW_SP_RESOURCE_RIFS,
3811 DEVLINK_RESOURCE_ID_PARENT_TOP,
3812 size_params: &size_params);
3813}
3814
3815static int
3816mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3817{
3818 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3819 struct devlink_resource_size_params size_params;
3820 u64 max;
3821
3822 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3823 return -EIO;
3824
3825 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3826 devlink_resource_size_params_init(size_params: &size_params, size_min: max, size_max: max, size_granularity: 1,
3827 unit: DEVLINK_RESOURCE_UNIT_ENTRY);
3828
3829 return devl_resource_register(devlink, resource_name: "port_range_registers", resource_size: max,
3830 resource_id: MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3831 DEVLINK_RESOURCE_ID_PARENT_TOP,
3832 size_params: &size_params);
3833}
3834
3835static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3836{
3837 int err;
3838
3839 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3840 if (err)
3841 return err;
3842
3843 err = mlxsw_sp_resources_span_register(mlxsw_core);
3844 if (err)
3845 goto err_resources_span_register;
3846
3847 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3848 if (err)
3849 goto err_resources_counter_register;
3850
3851 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3852 if (err)
3853 goto err_policer_resources_register;
3854
3855 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3856 if (err)
3857 goto err_resources_rif_mac_profile_register;
3858
3859 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3860 if (err)
3861 goto err_resources_rifs_register;
3862
3863 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3864 if (err)
3865 goto err_resources_port_range_register;
3866
3867 return 0;
3868
3869err_resources_port_range_register:
3870err_resources_rifs_register:
3871err_resources_rif_mac_profile_register:
3872err_policer_resources_register:
3873err_resources_counter_register:
3874err_resources_span_register:
3875 devl_resources_unregister(devlink: priv_to_devlink(priv: mlxsw_core));
3876 return err;
3877}
3878
3879static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3880{
3881 int err;
3882
3883 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3884 if (err)
3885 return err;
3886
3887 err = mlxsw_sp_resources_span_register(mlxsw_core);
3888 if (err)
3889 goto err_resources_span_register;
3890
3891 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3892 if (err)
3893 goto err_resources_counter_register;
3894
3895 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3896 if (err)
3897 goto err_policer_resources_register;
3898
3899 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3900 if (err)
3901 goto err_resources_rif_mac_profile_register;
3902
3903 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3904 if (err)
3905 goto err_resources_rifs_register;
3906
3907 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3908 if (err)
3909 goto err_resources_port_range_register;
3910
3911 return 0;
3912
3913err_resources_port_range_register:
3914err_resources_rifs_register:
3915err_resources_rif_mac_profile_register:
3916err_policer_resources_register:
3917err_resources_counter_register:
3918err_resources_span_register:
3919 devl_resources_unregister(devlink: priv_to_devlink(priv: mlxsw_core));
3920 return err;
3921}
3922
3923static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3924 const struct mlxsw_config_profile *profile,
3925 u64 *p_single_size, u64 *p_double_size,
3926 u64 *p_linear_size)
3927{
3928 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3929 u32 double_size;
3930 int err;
3931
3932 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3933 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3934 return -EIO;
3935
3936 /* The hash part is what left of the kvd without the
3937 * linear part. It is split to the single size and
3938 * double size by the parts ratio from the profile.
3939 * Both sizes must be a multiplications of the
3940 * granularity from the profile. In case the user
3941 * provided the sizes they are obtained via devlink.
3942 */
3943 err = devl_resource_size_get(devlink,
3944 resource_id: MLXSW_SP_RESOURCE_KVD_LINEAR,
3945 p_resource_size: p_linear_size);
3946 if (err)
3947 *p_linear_size = profile->kvd_linear_size;
3948
3949 err = devl_resource_size_get(devlink,
3950 resource_id: MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3951 p_resource_size: p_double_size);
3952 if (err) {
3953 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3954 *p_linear_size;
3955 double_size *= profile->kvd_hash_double_parts;
3956 double_size /= profile->kvd_hash_double_parts +
3957 profile->kvd_hash_single_parts;
3958 *p_double_size = rounddown(double_size,
3959 MLXSW_SP_KVD_GRANULARITY);
3960 }
3961
3962 err = devl_resource_size_get(devlink,
3963 resource_id: MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3964 p_resource_size: p_single_size);
3965 if (err)
3966 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3967 *p_double_size - *p_linear_size;
3968
3969 /* Check results are legal. */
3970 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3971 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3972 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3973 return -EIO;
3974
3975 return 0;
3976}
3977
3978static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3979 struct sk_buff *skb, u16 local_port)
3980{
3981 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3982
3983 skb_pull(skb, MLXSW_TXHDR_LEN);
3984 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3985}
3986
3987static struct mlxsw_driver mlxsw_sp1_driver = {
3988 .kind = mlxsw_sp1_driver_name,
3989 .priv_size = sizeof(struct mlxsw_sp),
3990 .fw_req_rev = &mlxsw_sp1_fw_rev,
3991 .fw_filename = MLXSW_SP1_FW_FILENAME,
3992 .init = mlxsw_sp1_init,
3993 .fini = mlxsw_sp_fini,
3994 .port_split = mlxsw_sp_port_split,
3995 .port_unsplit = mlxsw_sp_port_unsplit,
3996 .sb_pool_get = mlxsw_sp_sb_pool_get,
3997 .sb_pool_set = mlxsw_sp_sb_pool_set,
3998 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3999 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4000 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4001 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4002 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4003 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4004 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4005 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4006 .trap_init = mlxsw_sp_trap_init,
4007 .trap_fini = mlxsw_sp_trap_fini,
4008 .trap_action_set = mlxsw_sp_trap_action_set,
4009 .trap_group_init = mlxsw_sp_trap_group_init,
4010 .trap_group_set = mlxsw_sp_trap_group_set,
4011 .trap_policer_init = mlxsw_sp_trap_policer_init,
4012 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4013 .trap_policer_set = mlxsw_sp_trap_policer_set,
4014 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4015 .txhdr_construct = mlxsw_sp_txhdr_construct,
4016 .resources_register = mlxsw_sp1_resources_register,
4017 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
4018 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4019 .txhdr_len = MLXSW_TXHDR_LEN,
4020 .profile = &mlxsw_sp1_config_profile,
4021 .sdq_supports_cqe_v2 = false,
4022};
4023
4024static struct mlxsw_driver mlxsw_sp2_driver = {
4025 .kind = mlxsw_sp2_driver_name,
4026 .priv_size = sizeof(struct mlxsw_sp),
4027 .fw_req_rev = &mlxsw_sp2_fw_rev,
4028 .fw_filename = MLXSW_SP2_FW_FILENAME,
4029 .init = mlxsw_sp2_init,
4030 .fini = mlxsw_sp_fini,
4031 .port_split = mlxsw_sp_port_split,
4032 .port_unsplit = mlxsw_sp_port_unsplit,
4033 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4034 .sb_pool_get = mlxsw_sp_sb_pool_get,
4035 .sb_pool_set = mlxsw_sp_sb_pool_set,
4036 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4037 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4038 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4039 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4040 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4041 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4042 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4043 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4044 .trap_init = mlxsw_sp_trap_init,
4045 .trap_fini = mlxsw_sp_trap_fini,
4046 .trap_action_set = mlxsw_sp_trap_action_set,
4047 .trap_group_init = mlxsw_sp_trap_group_init,
4048 .trap_group_set = mlxsw_sp_trap_group_set,
4049 .trap_policer_init = mlxsw_sp_trap_policer_init,
4050 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4051 .trap_policer_set = mlxsw_sp_trap_policer_set,
4052 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4053 .txhdr_construct = mlxsw_sp_txhdr_construct,
4054 .resources_register = mlxsw_sp2_resources_register,
4055 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4056 .txhdr_len = MLXSW_TXHDR_LEN,
4057 .profile = &mlxsw_sp2_config_profile,
4058 .sdq_supports_cqe_v2 = true,
4059};
4060
4061static struct mlxsw_driver mlxsw_sp3_driver = {
4062 .kind = mlxsw_sp3_driver_name,
4063 .priv_size = sizeof(struct mlxsw_sp),
4064 .fw_req_rev = &mlxsw_sp3_fw_rev,
4065 .fw_filename = MLXSW_SP3_FW_FILENAME,
4066 .init = mlxsw_sp3_init,
4067 .fini = mlxsw_sp_fini,
4068 .port_split = mlxsw_sp_port_split,
4069 .port_unsplit = mlxsw_sp_port_unsplit,
4070 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4071 .sb_pool_get = mlxsw_sp_sb_pool_get,
4072 .sb_pool_set = mlxsw_sp_sb_pool_set,
4073 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4074 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4075 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4076 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4077 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4078 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4079 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4080 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4081 .trap_init = mlxsw_sp_trap_init,
4082 .trap_fini = mlxsw_sp_trap_fini,
4083 .trap_action_set = mlxsw_sp_trap_action_set,
4084 .trap_group_init = mlxsw_sp_trap_group_init,
4085 .trap_group_set = mlxsw_sp_trap_group_set,
4086 .trap_policer_init = mlxsw_sp_trap_policer_init,
4087 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4088 .trap_policer_set = mlxsw_sp_trap_policer_set,
4089 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4090 .txhdr_construct = mlxsw_sp_txhdr_construct,
4091 .resources_register = mlxsw_sp2_resources_register,
4092 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4093 .txhdr_len = MLXSW_TXHDR_LEN,
4094 .profile = &mlxsw_sp2_config_profile,
4095 .sdq_supports_cqe_v2 = true,
4096};
4097
4098static struct mlxsw_driver mlxsw_sp4_driver = {
4099 .kind = mlxsw_sp4_driver_name,
4100 .priv_size = sizeof(struct mlxsw_sp),
4101 .init = mlxsw_sp4_init,
4102 .fini = mlxsw_sp_fini,
4103 .port_split = mlxsw_sp_port_split,
4104 .port_unsplit = mlxsw_sp_port_unsplit,
4105 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4106 .sb_pool_get = mlxsw_sp_sb_pool_get,
4107 .sb_pool_set = mlxsw_sp_sb_pool_set,
4108 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4109 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4110 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4111 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4112 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4113 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4114 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4115 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4116 .trap_init = mlxsw_sp_trap_init,
4117 .trap_fini = mlxsw_sp_trap_fini,
4118 .trap_action_set = mlxsw_sp_trap_action_set,
4119 .trap_group_init = mlxsw_sp_trap_group_init,
4120 .trap_group_set = mlxsw_sp_trap_group_set,
4121 .trap_policer_init = mlxsw_sp_trap_policer_init,
4122 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4123 .trap_policer_set = mlxsw_sp_trap_policer_set,
4124 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4125 .txhdr_construct = mlxsw_sp_txhdr_construct,
4126 .resources_register = mlxsw_sp2_resources_register,
4127 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4128 .txhdr_len = MLXSW_TXHDR_LEN,
4129 .profile = &mlxsw_sp4_config_profile,
4130 .sdq_supports_cqe_v2 = true,
4131};
4132
4133bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4134{
4135 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4136}
4137
4138static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4139 struct netdev_nested_priv *priv)
4140{
4141 int ret = 0;
4142
4143 if (mlxsw_sp_port_dev_check(dev: lower_dev)) {
4144 priv->data = (void *)netdev_priv(dev: lower_dev);
4145 ret = 1;
4146 }
4147
4148 return ret;
4149}
4150
4151struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4152{
4153 struct netdev_nested_priv priv = {
4154 .data = NULL,
4155 };
4156
4157 if (mlxsw_sp_port_dev_check(dev))
4158 return netdev_priv(dev);
4159
4160 netdev_walk_all_lower_dev(dev, fn: mlxsw_sp_lower_dev_walk, priv: &priv);
4161
4162 return (struct mlxsw_sp_port *)priv.data;
4163}
4164
4165struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4166{
4167 struct mlxsw_sp_port *mlxsw_sp_port;
4168
4169 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4170 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4171}
4172
4173struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4174{
4175 struct netdev_nested_priv priv = {
4176 .data = NULL,
4177 };
4178
4179 if (mlxsw_sp_port_dev_check(dev))
4180 return netdev_priv(dev);
4181
4182 netdev_walk_all_lower_dev_rcu(dev, fn: mlxsw_sp_lower_dev_walk,
4183 priv: &priv);
4184
4185 return (struct mlxsw_sp_port *)priv.data;
4186}
4187
4188int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4189{
4190 char mprs_pl[MLXSW_REG_MPRS_LEN];
4191 int err = 0;
4192
4193 mutex_lock(&mlxsw_sp->parsing.lock);
4194
4195 if (refcount_inc_not_zero(r: &mlxsw_sp->parsing.parsing_depth_ref))
4196 goto out_unlock;
4197
4198 mlxsw_reg_mprs_pack(payload: mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4199 vxlan_udp_dport: mlxsw_sp->parsing.vxlan_udp_dport);
4200 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(mprs), payload: mprs_pl);
4201 if (err)
4202 goto out_unlock;
4203
4204 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4205 refcount_set(r: &mlxsw_sp->parsing.parsing_depth_ref, n: 1);
4206
4207out_unlock:
4208 mutex_unlock(lock: &mlxsw_sp->parsing.lock);
4209 return err;
4210}
4211
4212void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4213{
4214 char mprs_pl[MLXSW_REG_MPRS_LEN];
4215
4216 mutex_lock(&mlxsw_sp->parsing.lock);
4217
4218 if (!refcount_dec_and_test(r: &mlxsw_sp->parsing.parsing_depth_ref))
4219 goto out_unlock;
4220
4221 mlxsw_reg_mprs_pack(payload: mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4222 vxlan_udp_dport: mlxsw_sp->parsing.vxlan_udp_dport);
4223 mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(mprs), payload: mprs_pl);
4224 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4225
4226out_unlock:
4227 mutex_unlock(lock: &mlxsw_sp->parsing.lock);
4228}
4229
4230int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4231 __be16 udp_dport)
4232{
4233 char mprs_pl[MLXSW_REG_MPRS_LEN];
4234 int err;
4235
4236 mutex_lock(&mlxsw_sp->parsing.lock);
4237
4238 mlxsw_reg_mprs_pack(payload: mprs_pl, parsing_depth: mlxsw_sp->parsing.parsing_depth,
4239 be16_to_cpu(udp_dport));
4240 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(mprs), payload: mprs_pl);
4241 if (err)
4242 goto out_unlock;
4243
4244 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4245
4246out_unlock:
4247 mutex_unlock(lock: &mlxsw_sp->parsing.lock);
4248 return err;
4249}
4250
4251static void
4252mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4253 struct net_device *lag_dev)
4254{
4255 struct net_device *br_dev = netdev_master_upper_dev_get(dev: lag_dev);
4256 struct net_device *upper_dev;
4257 struct list_head *iter;
4258
4259 if (netif_is_bridge_port(dev: lag_dev))
4260 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: lag_dev, br_dev);
4261
4262 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4263 if (!netif_is_bridge_port(dev: upper_dev))
4264 continue;
4265 br_dev = netdev_master_upper_dev_get(dev: upper_dev);
4266 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: upper_dev, br_dev);
4267 }
4268}
4269
4270static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4271{
4272 char sldr_pl[MLXSW_REG_SLDR_LEN];
4273
4274 mlxsw_reg_sldr_lag_create_pack(payload: sldr_pl, lag_id);
4275 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sldr), payload: sldr_pl);
4276}
4277
4278static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4279{
4280 char sldr_pl[MLXSW_REG_SLDR_LEN];
4281
4282 mlxsw_reg_sldr_lag_destroy_pack(payload: sldr_pl, lag_id);
4283 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sldr), payload: sldr_pl);
4284}
4285
4286static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4287 u16 lag_id, u8 port_index)
4288{
4289 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4290 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4291
4292 mlxsw_reg_slcor_port_add_pack(payload: slcor_pl, local_port: mlxsw_sp_port->local_port,
4293 lag_id, port_index);
4294 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(slcor), payload: slcor_pl);
4295}
4296
4297static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4298 u16 lag_id)
4299{
4300 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4301 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4302
4303 mlxsw_reg_slcor_port_remove_pack(payload: slcor_pl, local_port: mlxsw_sp_port->local_port,
4304 lag_id);
4305 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(slcor), payload: slcor_pl);
4306}
4307
4308static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4309 u16 lag_id)
4310{
4311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4312 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4313
4314 mlxsw_reg_slcor_col_enable_pack(payload: slcor_pl, local_port: mlxsw_sp_port->local_port,
4315 lag_id);
4316 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(slcor), payload: slcor_pl);
4317}
4318
4319static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4320 u16 lag_id)
4321{
4322 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4323 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4324
4325 mlxsw_reg_slcor_col_disable_pack(payload: slcor_pl, local_port: mlxsw_sp_port->local_port,
4326 lag_id);
4327 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(slcor), payload: slcor_pl);
4328}
4329
4330static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4331 struct net_device *lag_dev,
4332 u16 *p_lag_id)
4333{
4334 struct mlxsw_sp_upper *lag;
4335 int free_lag_id = -1;
4336 u16 max_lag;
4337 int err, i;
4338
4339 err = mlxsw_core_max_lag(mlxsw_core: mlxsw_sp->core, p_max_lag: &max_lag);
4340 if (err)
4341 return err;
4342
4343 for (i = 0; i < max_lag; i++) {
4344 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id: i);
4345 if (lag->ref_count) {
4346 if (lag->dev == lag_dev) {
4347 *p_lag_id = i;
4348 return 0;
4349 }
4350 } else if (free_lag_id < 0) {
4351 free_lag_id = i;
4352 }
4353 }
4354 if (free_lag_id < 0)
4355 return -EBUSY;
4356 *p_lag_id = free_lag_id;
4357 return 0;
4358}
4359
4360static bool
4361mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4362 struct net_device *lag_dev,
4363 struct netdev_lag_upper_info *lag_upper_info,
4364 struct netlink_ext_ack *extack)
4365{
4366 u16 lag_id;
4367
4368 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, p_lag_id: &lag_id) != 0) {
4369 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4370 return false;
4371 }
4372 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4373 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4374 return false;
4375 }
4376 return true;
4377}
4378
4379static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4380 u16 lag_id, u8 *p_port_index)
4381{
4382 u64 max_lag_members;
4383 int i;
4384
4385 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4386 MAX_LAG_MEMBERS);
4387 for (i = 0; i < max_lag_members; i++) {
4388 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, port_index: i)) {
4389 *p_port_index = i;
4390 return 0;
4391 }
4392 }
4393 return -EBUSY;
4394}
4395
4396static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4397 struct net_device *lag_dev,
4398 struct netlink_ext_ack *extack)
4399{
4400 struct net_device *upper_dev;
4401 struct net_device *master;
4402 struct list_head *iter;
4403 int done = 0;
4404 int err;
4405
4406 master = netdev_master_upper_dev_get(dev: lag_dev);
4407 if (master && netif_is_bridge_master(dev: master)) {
4408 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, brport_dev: lag_dev, br_dev: master,
4409 extack);
4410 if (err)
4411 return err;
4412 }
4413
4414 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4415 if (!is_vlan_dev(dev: upper_dev))
4416 continue;
4417
4418 master = netdev_master_upper_dev_get(dev: upper_dev);
4419 if (master && netif_is_bridge_master(dev: master)) {
4420 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4421 brport_dev: upper_dev, br_dev: master,
4422 extack);
4423 if (err)
4424 goto err_port_bridge_join;
4425 }
4426
4427 ++done;
4428 }
4429
4430 return 0;
4431
4432err_port_bridge_join:
4433 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4434 if (!is_vlan_dev(dev: upper_dev))
4435 continue;
4436
4437 master = netdev_master_upper_dev_get(dev: upper_dev);
4438 if (!master || !netif_is_bridge_master(dev: master))
4439 continue;
4440
4441 if (!done--)
4442 break;
4443
4444 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: upper_dev, br_dev: master);
4445 }
4446
4447 master = netdev_master_upper_dev_get(dev: lag_dev);
4448 if (master && netif_is_bridge_master(dev: master))
4449 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: lag_dev, br_dev: master);
4450
4451 return err;
4452}
4453
4454static void
4455mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4456 struct net_device *lag_dev)
4457{
4458 struct net_device *upper_dev;
4459 struct net_device *master;
4460 struct list_head *iter;
4461
4462 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4463 if (!is_vlan_dev(dev: upper_dev))
4464 continue;
4465
4466 master = netdev_master_upper_dev_get(dev: upper_dev);
4467 if (!master)
4468 continue;
4469
4470 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: upper_dev, br_dev: master);
4471 }
4472
4473 master = netdev_master_upper_dev_get(dev: lag_dev);
4474 if (master)
4475 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: lag_dev, br_dev: master);
4476}
4477
4478static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4479 struct net_device *lag_dev,
4480 struct netlink_ext_ack *extack)
4481{
4482 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4483 struct mlxsw_sp_upper *lag;
4484 u16 lag_id;
4485 u8 port_index;
4486 int err;
4487
4488 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, p_lag_id: &lag_id);
4489 if (err)
4490 return err;
4491 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4492 if (!lag->ref_count) {
4493 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4494 if (err)
4495 return err;
4496 lag->dev = lag_dev;
4497 }
4498
4499 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, p_port_index: &port_index);
4500 if (err)
4501 return err;
4502
4503 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4504 extack);
4505 if (err)
4506 goto err_lag_uppers_bridge_join;
4507
4508 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4509 if (err)
4510 goto err_col_port_add;
4511
4512 mlxsw_core_lag_mapping_set(mlxsw_core: mlxsw_sp->core, lag_id, port_index,
4513 local_port: mlxsw_sp_port->local_port);
4514 mlxsw_sp_port->lag_id = lag_id;
4515 mlxsw_sp_port->lagged = 1;
4516 lag->ref_count++;
4517
4518 /* Port is no longer usable as a router interface */
4519 if (mlxsw_sp_port->default_vlan->fid)
4520 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan: mlxsw_sp_port->default_vlan);
4521
4522 /* Join a router interface configured on the LAG, if exists */
4523 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4524 extack);
4525 if (err)
4526 goto err_router_join;
4527
4528 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, upper_dev: lag_dev, extack);
4529 if (err)
4530 goto err_replay;
4531
4532 return 0;
4533
4534err_replay:
4535 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4536err_router_join:
4537 lag->ref_count--;
4538 mlxsw_sp_port->lagged = 0;
4539 mlxsw_core_lag_mapping_clear(mlxsw_core: mlxsw_sp->core, lag_id,
4540 local_port: mlxsw_sp_port->local_port);
4541 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4542err_col_port_add:
4543 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4544err_lag_uppers_bridge_join:
4545 if (!lag->ref_count)
4546 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4547 return err;
4548}
4549
4550static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4551 struct net_device *lag_dev)
4552{
4553 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4554 u16 lag_id = mlxsw_sp_port->lag_id;
4555 struct mlxsw_sp_upper *lag;
4556
4557 if (!mlxsw_sp_port->lagged)
4558 return;
4559 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4560 WARN_ON(lag->ref_count == 0);
4561
4562 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4563
4564 /* Any VLANs configured on the port are no longer valid */
4565 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, flush_default: false);
4566 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan: mlxsw_sp_port->default_vlan);
4567 /* Make the LAG and its directly linked uppers leave bridges they
4568 * are memeber in
4569 */
4570 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4571
4572 if (lag->ref_count == 1)
4573 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4574
4575 mlxsw_core_lag_mapping_clear(mlxsw_core: mlxsw_sp->core, lag_id,
4576 local_port: mlxsw_sp_port->local_port);
4577 mlxsw_sp_port->lagged = 0;
4578 lag->ref_count--;
4579
4580 /* Make sure untagged frames are allowed to ingress */
4581 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4582 ETH_P_8021Q);
4583}
4584
4585static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4586 u16 lag_id)
4587{
4588 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4589 char sldr_pl[MLXSW_REG_SLDR_LEN];
4590
4591 mlxsw_reg_sldr_lag_add_port_pack(payload: sldr_pl, lag_id,
4592 local_port: mlxsw_sp_port->local_port);
4593 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sldr), payload: sldr_pl);
4594}
4595
4596static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4597 u16 lag_id)
4598{
4599 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4600 char sldr_pl[MLXSW_REG_SLDR_LEN];
4601
4602 mlxsw_reg_sldr_lag_remove_port_pack(payload: sldr_pl, lag_id,
4603 local_port: mlxsw_sp_port->local_port);
4604 return mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(sldr), payload: sldr_pl);
4605}
4606
4607static int
4608mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4609{
4610 int err;
4611
4612 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4613 lag_id: mlxsw_sp_port->lag_id);
4614 if (err)
4615 return err;
4616
4617 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, lag_id: mlxsw_sp_port->lag_id);
4618 if (err)
4619 goto err_dist_port_add;
4620
4621 return 0;
4622
4623err_dist_port_add:
4624 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id: mlxsw_sp_port->lag_id);
4625 return err;
4626}
4627
4628static int
4629mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4630{
4631 int err;
4632
4633 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4634 lag_id: mlxsw_sp_port->lag_id);
4635 if (err)
4636 return err;
4637
4638 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4639 lag_id: mlxsw_sp_port->lag_id);
4640 if (err)
4641 goto err_col_port_disable;
4642
4643 return 0;
4644
4645err_col_port_disable:
4646 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, lag_id: mlxsw_sp_port->lag_id);
4647 return err;
4648}
4649
4650static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4651 struct netdev_lag_lower_state_info *info)
4652{
4653 if (info->tx_enabled)
4654 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4655 else
4656 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4657}
4658
4659static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4660 bool enable)
4661{
4662 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4663 enum mlxsw_reg_spms_state spms_state;
4664 char *spms_pl;
4665 u16 vid;
4666 int err;
4667
4668 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4669 MLXSW_REG_SPMS_STATE_DISCARDING;
4670
4671 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4672 if (!spms_pl)
4673 return -ENOMEM;
4674 mlxsw_reg_spms_pack(payload: spms_pl, local_port: mlxsw_sp_port->local_port);
4675
4676 for (vid = 0; vid < VLAN_N_VID; vid++)
4677 mlxsw_reg_spms_vid_pack(payload: spms_pl, vid, state: spms_state);
4678
4679 err = mlxsw_reg_write(mlxsw_core: mlxsw_sp->core, MLXSW_REG(spms), payload: spms_pl);
4680 kfree(objp: spms_pl);
4681 return err;
4682}
4683
4684static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4685{
4686 u16 vid = 1;
4687 int err;
4688
4689 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, enable: true);
4690 if (err)
4691 return err;
4692 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, enable: true);
4693 if (err)
4694 goto err_port_stp_set;
4695 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: 1, VLAN_N_VID - 2,
4696 is_member: true, untagged: false);
4697 if (err)
4698 goto err_port_vlan_set;
4699
4700 for (; vid <= VLAN_N_VID - 1; vid++) {
4701 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4702 vid, learn_enable: false);
4703 if (err)
4704 goto err_vid_learning_set;
4705 }
4706
4707 return 0;
4708
4709err_vid_learning_set:
4710 for (vid--; vid >= 1; vid--)
4711 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, learn_enable: true);
4712err_port_vlan_set:
4713 mlxsw_sp_port_stp_set(mlxsw_sp_port, enable: false);
4714err_port_stp_set:
4715 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, enable: false);
4716 return err;
4717}
4718
4719static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4720{
4721 u16 vid;
4722
4723 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4724 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4725 vid, learn_enable: true);
4726
4727 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid_begin: 1, VLAN_N_VID - 2,
4728 is_member: false, untagged: false);
4729 mlxsw_sp_port_stp_set(mlxsw_sp_port, enable: false);
4730 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, enable: false);
4731}
4732
4733static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4734{
4735 unsigned int num_vxlans = 0;
4736 struct net_device *dev;
4737 struct list_head *iter;
4738
4739 netdev_for_each_lower_dev(br_dev, dev, iter) {
4740 if (netif_is_vxlan(dev))
4741 num_vxlans++;
4742 }
4743
4744 return num_vxlans > 1;
4745}
4746
4747static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4748{
4749 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4750 struct net_device *dev;
4751 struct list_head *iter;
4752
4753 netdev_for_each_lower_dev(br_dev, dev, iter) {
4754 u16 pvid;
4755 int err;
4756
4757 if (!netif_is_vxlan(dev))
4758 continue;
4759
4760 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev: dev, p_vid: &pvid);
4761 if (err || !pvid)
4762 continue;
4763
4764 if (test_and_set_bit(nr: pvid, addr: vlans))
4765 return false;
4766 }
4767
4768 return true;
4769}
4770
4771static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4772 struct netlink_ext_ack *extack)
4773{
4774 if (br_multicast_enabled(dev: br_dev)) {
4775 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4776 return false;
4777 }
4778
4779 if (!br_vlan_enabled(dev: br_dev) &&
4780 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4781 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4782 return false;
4783 }
4784
4785 if (br_vlan_enabled(dev: br_dev) &&
4786 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4787 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4788 return false;
4789 }
4790
4791 return true;
4792}
4793
4794static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4795 struct net_device *dev)
4796{
4797 return upper_dev == netdev_master_upper_dev_get(dev);
4798}
4799
4800static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4801 unsigned long event, void *ptr,
4802 bool process_foreign);
4803
4804static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4805 struct net_device *dev,
4806 struct netlink_ext_ack *extack)
4807{
4808 struct net_device *upper_dev;
4809 struct list_head *iter;
4810 int err;
4811
4812 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4813 struct netdev_notifier_changeupper_info info = {
4814 .info = {
4815 .dev = dev,
4816 .extack = extack,
4817 },
4818 .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4819 .upper_dev = upper_dev,
4820 .linking = true,
4821
4822 /* upper_info is relevant for LAG devices. But we would
4823 * only need this if LAG were a valid upper above
4824 * another upper (e.g. a bridge that is a member of a
4825 * LAG), and that is never a valid configuration. So we
4826 * can keep this as NULL.
4827 */
4828 .upper_info = NULL,
4829 };
4830
4831 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4832 event: NETDEV_PRECHANGEUPPER,
4833 ptr: &info, process_foreign: true);
4834 if (err)
4835 return err;
4836
4837 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, dev: upper_dev,
4838 extack);
4839 if (err)
4840 return err;
4841 }
4842
4843 return 0;
4844}
4845
4846static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4847 struct net_device *dev,
4848 unsigned long event, void *ptr,
4849 bool replay_deslavement)
4850{
4851 struct netdev_notifier_changeupper_info *info;
4852 struct mlxsw_sp_port *mlxsw_sp_port;
4853 struct netlink_ext_ack *extack;
4854 struct net_device *upper_dev;
4855 struct mlxsw_sp *mlxsw_sp;
4856 int err = 0;
4857 u16 proto;
4858
4859 mlxsw_sp_port = netdev_priv(dev);
4860 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4861 info = ptr;
4862 extack = netdev_notifier_info_to_extack(info: &info->info);
4863
4864 switch (event) {
4865 case NETDEV_PRECHANGEUPPER:
4866 upper_dev = info->upper_dev;
4867 if (!is_vlan_dev(dev: upper_dev) &&
4868 !netif_is_lag_master(dev: upper_dev) &&
4869 !netif_is_bridge_master(dev: upper_dev) &&
4870 !netif_is_ovs_master(dev: upper_dev) &&
4871 !netif_is_macvlan(dev: upper_dev) &&
4872 !netif_is_l3_master(dev: upper_dev)) {
4873 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4874 return -EINVAL;
4875 }
4876 if (!info->linking)
4877 break;
4878 if (netif_is_bridge_master(dev: upper_dev) &&
4879 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, br_dev: upper_dev) &&
4880 mlxsw_sp_bridge_has_vxlan(br_dev: upper_dev) &&
4881 !mlxsw_sp_bridge_vxlan_is_valid(br_dev: upper_dev, extack))
4882 return -EOPNOTSUPP;
4883 if (netdev_has_any_upper_dev(dev: upper_dev) &&
4884 (!netif_is_bridge_master(dev: upper_dev) ||
4885 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4886 br_dev: upper_dev))) {
4887 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4888 dev: upper_dev,
4889 extack);
4890 if (err)
4891 return err;
4892 }
4893 if (netif_is_lag_master(dev: upper_dev) &&
4894 !mlxsw_sp_master_lag_check(mlxsw_sp, lag_dev: upper_dev,
4895 lag_upper_info: info->upper_info, extack))
4896 return -EINVAL;
4897 if (netif_is_lag_master(dev: upper_dev) && vlan_uses_dev(dev)) {
4898 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4899 return -EINVAL;
4900 }
4901 if (netif_is_lag_port(dev) && is_vlan_dev(dev: upper_dev) &&
4902 !netif_is_lag_master(dev: vlan_dev_real_dev(dev: upper_dev))) {
4903 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4904 return -EINVAL;
4905 }
4906 if (netif_is_ovs_master(dev: upper_dev) && vlan_uses_dev(dev)) {
4907 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4908 return -EINVAL;
4909 }
4910 if (netif_is_ovs_port(dev) && is_vlan_dev(dev: upper_dev)) {
4911 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4912 return -EINVAL;
4913 }
4914 if (netif_is_bridge_master(dev: upper_dev)) {
4915 br_vlan_get_proto(dev: upper_dev, p_proto: &proto);
4916 if (br_vlan_enabled(dev: upper_dev) &&
4917 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4918 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4919 return -EOPNOTSUPP;
4920 }
4921 if (vlan_uses_dev(dev: lower_dev) &&
4922 br_vlan_enabled(dev: upper_dev) &&
4923 proto == ETH_P_8021AD) {
4924 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4925 return -EOPNOTSUPP;
4926 }
4927 }
4928 if (netif_is_bridge_port(dev: lower_dev) && is_vlan_dev(dev: upper_dev)) {
4929 struct net_device *br_dev = netdev_master_upper_dev_get(dev: lower_dev);
4930
4931 if (br_vlan_enabled(dev: br_dev)) {
4932 br_vlan_get_proto(dev: br_dev, p_proto: &proto);
4933 if (proto == ETH_P_8021AD) {
4934 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4935 return -EOPNOTSUPP;
4936 }
4937 }
4938 }
4939 if (is_vlan_dev(dev: upper_dev) &&
4940 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4941 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4942 return -EOPNOTSUPP;
4943 }
4944 if (is_vlan_dev(dev: upper_dev) && mlxsw_sp_port->security) {
4945 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4946 return -EOPNOTSUPP;
4947 }
4948 break;
4949 case NETDEV_CHANGEUPPER:
4950 upper_dev = info->upper_dev;
4951 if (netif_is_bridge_master(dev: upper_dev)) {
4952 if (info->linking) {
4953 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4954 brport_dev: lower_dev,
4955 br_dev: upper_dev,
4956 extack);
4957 } else {
4958 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4959 brport_dev: lower_dev,
4960 br_dev: upper_dev);
4961 if (!replay_deslavement)
4962 break;
4963 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4964 dev: lower_dev);
4965 }
4966 } else if (netif_is_lag_master(dev: upper_dev)) {
4967 if (info->linking) {
4968 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4969 lag_dev: upper_dev, extack);
4970 } else {
4971 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4972 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4973 lag_dev: upper_dev);
4974 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4975 dev);
4976 }
4977 } else if (netif_is_ovs_master(dev: upper_dev)) {
4978 if (info->linking)
4979 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4980 else
4981 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4982 } else if (netif_is_macvlan(dev: upper_dev)) {
4983 if (!info->linking)
4984 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev: upper_dev);
4985 } else if (is_vlan_dev(dev: upper_dev)) {
4986 struct net_device *br_dev;
4987
4988 if (!netif_is_bridge_port(dev: upper_dev))
4989 break;
4990 if (info->linking)
4991 break;
4992 br_dev = netdev_master_upper_dev_get(dev: upper_dev);
4993 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, brport_dev: upper_dev,
4994 br_dev);
4995 }
4996 break;
4997 }
4998
4999 return err;
5000}
5001
5002static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5003 unsigned long event, void *ptr)
5004{
5005 struct netdev_notifier_changelowerstate_info *info;
5006 struct mlxsw_sp_port *mlxsw_sp_port;
5007 int err;
5008
5009 mlxsw_sp_port = netdev_priv(dev);
5010 info = ptr;
5011
5012 switch (event) {
5013 case NETDEV_CHANGELOWERSTATE:
5014 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5015 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5016 info: info->lower_state_info);
5017 if (err)
5018 netdev_err(dev, format: "Failed to reflect link aggregation lower state change\n");
5019 }
5020 break;
5021 }
5022
5023 return 0;
5024}
5025
5026static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5027 struct net_device *port_dev,
5028 unsigned long event, void *ptr,
5029 bool replay_deslavement)
5030{
5031 switch (event) {
5032 case NETDEV_PRECHANGEUPPER:
5033 case NETDEV_CHANGEUPPER:
5034 return mlxsw_sp_netdevice_port_upper_event(lower_dev, dev: port_dev,
5035 event, ptr,
5036 replay_deslavement);
5037 case NETDEV_CHANGELOWERSTATE:
5038 return mlxsw_sp_netdevice_port_lower_event(dev: port_dev, event,
5039 ptr);
5040 }
5041
5042 return 0;
5043}
5044
5045/* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5046 * to do any per-LAG / per-LAG-upper processing.
5047 */
5048static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5049 unsigned long event,
5050 void *ptr)
5051{
5052 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5053 struct netdev_notifier_changeupper_info *info = ptr;
5054
5055 if (!mlxsw_sp)
5056 return 0;
5057
5058 switch (event) {
5059 case NETDEV_CHANGEUPPER:
5060 if (info->linking)
5061 break;
5062 if (netif_is_bridge_master(dev: info->upper_dev))
5063 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5064 break;
5065 }
5066 return 0;
5067}
5068
5069static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5070 unsigned long event, void *ptr)
5071{
5072 struct net_device *dev;
5073 struct list_head *iter;
5074 int ret;
5075
5076 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5077 if (mlxsw_sp_port_dev_check(dev)) {
5078 ret = mlxsw_sp_netdevice_port_event(lower_dev: lag_dev, port_dev: dev, event,
5079 ptr, replay_deslavement: false);
5080 if (ret)
5081 return ret;
5082 }
5083 }
5084
5085 return mlxsw_sp_netdevice_post_lag_event(dev: lag_dev, event, ptr);
5086}
5087
5088static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5089 struct net_device *dev,
5090 unsigned long event, void *ptr,
5091 u16 vid, bool replay_deslavement)
5092{
5093 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5094 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5095 struct netdev_notifier_changeupper_info *info = ptr;
5096 struct netlink_ext_ack *extack;
5097 struct net_device *upper_dev;
5098 int err = 0;
5099
5100 extack = netdev_notifier_info_to_extack(info: &info->info);
5101
5102 switch (event) {
5103 case NETDEV_PRECHANGEUPPER:
5104 upper_dev = info->upper_dev;
5105 if (!netif_is_bridge_master(dev: upper_dev) &&
5106 !netif_is_macvlan(dev: upper_dev) &&
5107 !netif_is_l3_master(dev: upper_dev)) {
5108 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5109 return -EINVAL;
5110 }
5111 if (!info->linking)
5112 break;
5113 if (netif_is_bridge_master(dev: upper_dev) &&
5114 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, br_dev: upper_dev) &&
5115 mlxsw_sp_bridge_has_vxlan(br_dev: upper_dev) &&
5116 !mlxsw_sp_bridge_vxlan_is_valid(br_dev: upper_dev, extack))
5117 return -EOPNOTSUPP;
5118 if (netdev_has_any_upper_dev(dev: upper_dev) &&
5119 (!netif_is_bridge_master(dev: upper_dev) ||
5120 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5121 br_dev: upper_dev))) {
5122 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5123 dev: upper_dev,
5124 extack);
5125 if (err)
5126 return err;
5127 }
5128 break;
5129 case NETDEV_CHANGEUPPER:
5130 upper_dev = info->upper_dev;
5131 if (netif_is_bridge_master(dev: upper_dev)) {
5132 if (info->linking) {
5133 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5134 brport_dev: vlan_dev,
5135 br_dev: upper_dev,
5136 extack);
5137 } else {
5138 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5139 brport_dev: vlan_dev,
5140 br_dev: upper_dev);
5141 if (!replay_deslavement)
5142 break;
5143 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5144 dev: vlan_dev);
5145 }
5146 } else if (netif_is_macvlan(dev: upper_dev)) {
5147 if (!info->linking)
5148 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev: upper_dev);
5149 }
5150 break;
5151 }
5152
5153 return err;
5154}
5155
5156static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5157 struct net_device *lag_dev,
5158 unsigned long event,
5159 void *ptr, u16 vid)
5160{
5161 struct net_device *dev;
5162 struct list_head *iter;
5163 int ret;
5164
5165 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5166 if (mlxsw_sp_port_dev_check(dev)) {
5167 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5168 event, ptr,
5169 vid, replay_deslavement: false);
5170 if (ret)
5171 return ret;
5172 }
5173 }
5174
5175 return mlxsw_sp_netdevice_post_lag_event(dev: vlan_dev, event, ptr);
5176}
5177
5178static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5179 struct net_device *vlan_dev,
5180 struct net_device *br_dev,
5181 unsigned long event, void *ptr,
5182 u16 vid, bool process_foreign)
5183{
5184 struct netdev_notifier_changeupper_info *info = ptr;
5185 struct netlink_ext_ack *extack;
5186 struct net_device *upper_dev;
5187
5188 if (!process_foreign && !mlxsw_sp_lower_get(dev: vlan_dev))
5189 return 0;
5190
5191 extack = netdev_notifier_info_to_extack(info: &info->info);
5192
5193 switch (event) {
5194 case NETDEV_PRECHANGEUPPER:
5195 upper_dev = info->upper_dev;
5196 if (!netif_is_macvlan(dev: upper_dev) &&
5197 !netif_is_l3_master(dev: upper_dev)) {
5198 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5199 return -EOPNOTSUPP;
5200 }
5201 break;
5202 case NETDEV_CHANGEUPPER:
5203 upper_dev = info->upper_dev;
5204 if (info->linking)
5205 break;
5206 if (netif_is_macvlan(dev: upper_dev))
5207 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev: upper_dev);
5208 break;
5209 }
5210
5211 return 0;
5212}
5213
5214static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5215 struct net_device *vlan_dev,
5216 unsigned long event, void *ptr,
5217 bool process_foreign)
5218{
5219 struct net_device *real_dev = vlan_dev_real_dev(dev: vlan_dev);
5220 u16 vid = vlan_dev_vlan_id(dev: vlan_dev);
5221
5222 if (mlxsw_sp_port_dev_check(dev: real_dev))
5223 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev: real_dev,
5224 event, ptr, vid,
5225 replay_deslavement: true);
5226 else if (netif_is_lag_master(dev: real_dev))
5227 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5228 lag_dev: real_dev, event,
5229 ptr, vid);
5230 else if (netif_is_bridge_master(dev: real_dev))
5231 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5232 br_dev: real_dev, event,
5233 ptr, vid,
5234 process_foreign);
5235
5236 return 0;
5237}
5238
5239static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5240 struct net_device *br_dev,
5241 unsigned long event, void *ptr,
5242 bool process_foreign)
5243{
5244 struct netdev_notifier_changeupper_info *info = ptr;
5245 struct netlink_ext_ack *extack;
5246 struct net_device *upper_dev;
5247 u16 proto;
5248
5249 if (!process_foreign && !mlxsw_sp_lower_get(dev: br_dev))
5250 return 0;
5251
5252 extack = netdev_notifier_info_to_extack(info: &info->info);
5253
5254 switch (event) {
5255 case NETDEV_PRECHANGEUPPER:
5256 upper_dev = info->upper_dev;
5257 if (!is_vlan_dev(dev: upper_dev) &&
5258 !netif_is_macvlan(dev: upper_dev) &&
5259 !netif_is_l3_master(dev: upper_dev)) {
5260 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5261 return -EOPNOTSUPP;
5262 }
5263 if (!info->linking)
5264 break;
5265 if (br_vlan_enabled(dev: br_dev)) {
5266 br_vlan_get_proto(dev: br_dev, p_proto: &proto);
5267 if (proto == ETH_P_8021AD) {
5268 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5269 return -EOPNOTSUPP;
5270 }
5271 }
5272 if (is_vlan_dev(dev: upper_dev) &&
5273 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5274 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5275 return -EOPNOTSUPP;
5276 }
5277 break;
5278 case NETDEV_CHANGEUPPER:
5279 upper_dev = info->upper_dev;
5280 if (info->linking)
5281 break;
5282 if (is_vlan_dev(dev: upper_dev))
5283 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev: upper_dev);
5284 if (netif_is_macvlan(dev: upper_dev))
5285 mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev: upper_dev);
5286 break;
5287 }
5288
5289 return 0;
5290}
5291
5292static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5293 unsigned long event, void *ptr)
5294{
5295 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev: macvlan_dev);
5296 struct netdev_notifier_changeupper_info *info = ptr;
5297 struct netlink_ext_ack *extack;
5298 struct net_device *upper_dev;
5299
5300 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5301 return 0;
5302
5303 extack = netdev_notifier_info_to_extack(info: &info->info);
5304 upper_dev = info->upper_dev;
5305
5306 if (!netif_is_l3_master(dev: upper_dev)) {
5307 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5308 return -EOPNOTSUPP;
5309 }
5310
5311 return 0;
5312}
5313
5314static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5315 struct net_device *dev,
5316 unsigned long event, void *ptr)
5317{
5318 struct netdev_notifier_changeupper_info *cu_info;
5319 struct netdev_notifier_info *info = ptr;
5320 struct netlink_ext_ack *extack;
5321 struct net_device *upper_dev;
5322
5323 extack = netdev_notifier_info_to_extack(info);
5324
5325 switch (event) {
5326 case NETDEV_CHANGEUPPER:
5327 cu_info = container_of(info,
5328 struct netdev_notifier_changeupper_info,
5329 info);
5330 upper_dev = cu_info->upper_dev;
5331 if (!netif_is_bridge_master(dev: upper_dev))
5332 return 0;
5333 if (!mlxsw_sp_lower_get(dev: upper_dev))
5334 return 0;
5335 if (!mlxsw_sp_bridge_vxlan_is_valid(br_dev: upper_dev, extack))
5336 return -EOPNOTSUPP;
5337 if (cu_info->linking) {
5338 if (!netif_running(dev))
5339 return 0;
5340 /* When the bridge is VLAN-aware, the VNI of the VxLAN
5341 * device needs to be mapped to a VLAN, but at this
5342 * point no VLANs are configured on the VxLAN device
5343 */
5344 if (br_vlan_enabled(dev: upper_dev))
5345 return 0;
5346 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, br_dev: upper_dev,
5347 vxlan_dev: dev, vid: 0, extack);
5348 } else {
5349 /* VLANs were already flushed, which triggered the
5350 * necessary cleanup
5351 */
5352 if (br_vlan_enabled(dev: upper_dev))
5353 return 0;
5354 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev: dev);
5355 }
5356 break;
5357 case NETDEV_PRE_UP:
5358 upper_dev = netdev_master_upper_dev_get(dev);
5359 if (!upper_dev)
5360 return 0;
5361 if (!netif_is_bridge_master(dev: upper_dev))
5362 return 0;
5363 if (!mlxsw_sp_lower_get(dev: upper_dev))
5364 return 0;
5365 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, br_dev: upper_dev, vxlan_dev: dev, vid: 0,
5366 extack);
5367 case NETDEV_DOWN:
5368 upper_dev = netdev_master_upper_dev_get(dev);
5369 if (!upper_dev)
5370 return 0;
5371 if (!netif_is_bridge_master(dev: upper_dev))
5372 return 0;
5373 if (!mlxsw_sp_lower_get(dev: upper_dev))
5374 return 0;
5375 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev: dev);
5376 break;
5377 }
5378
5379 return 0;
5380}
5381
5382static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5383 unsigned long event, void *ptr,
5384 bool process_foreign)
5385{
5386 struct net_device *dev = netdev_notifier_info_to_dev(info: ptr);
5387 struct mlxsw_sp_span_entry *span_entry;
5388 int err = 0;
5389
5390 if (event == NETDEV_UNREGISTER) {
5391 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev: dev);
5392 if (span_entry)
5393 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5394 }
5395
5396 if (netif_is_vxlan(dev))
5397 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5398 else if (mlxsw_sp_port_dev_check(dev))
5399 err = mlxsw_sp_netdevice_port_event(lower_dev: dev, port_dev: dev, event, ptr, replay_deslavement: true);
5400 else if (netif_is_lag_master(dev))
5401 err = mlxsw_sp_netdevice_lag_event(lag_dev: dev, event, ptr);
5402 else if (is_vlan_dev(dev))
5403 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, vlan_dev: dev, event, ptr,
5404 process_foreign);
5405 else if (netif_is_bridge_master(dev))
5406 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, br_dev: dev, event, ptr,
5407 process_foreign);
5408 else if (netif_is_macvlan(dev))
5409 err = mlxsw_sp_netdevice_macvlan_event(macvlan_dev: dev, event, ptr);
5410
5411 return err;
5412}
5413
5414static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5415 unsigned long event, void *ptr)
5416{
5417 struct mlxsw_sp *mlxsw_sp;
5418 int err;
5419
5420 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5421 mlxsw_sp_span_respin(mlxsw_sp);
5422 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, process_foreign: false);
5423
5424 return notifier_from_errno(err);
5425}
5426
5427static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5428 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5429 {0, },
5430};
5431
5432static struct pci_driver mlxsw_sp1_pci_driver = {
5433 .name = mlxsw_sp1_driver_name,
5434 .id_table = mlxsw_sp1_pci_id_table,
5435};
5436
5437static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5438 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5439 {0, },
5440};
5441
5442static struct pci_driver mlxsw_sp2_pci_driver = {
5443 .name = mlxsw_sp2_driver_name,
5444 .id_table = mlxsw_sp2_pci_id_table,
5445};
5446
5447static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5448 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5449 {0, },
5450};
5451
5452static struct pci_driver mlxsw_sp3_pci_driver = {
5453 .name = mlxsw_sp3_driver_name,
5454 .id_table = mlxsw_sp3_pci_id_table,
5455};
5456
5457static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5458 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5459 {0, },
5460};
5461
5462static struct pci_driver mlxsw_sp4_pci_driver = {
5463 .name = mlxsw_sp4_driver_name,
5464 .id_table = mlxsw_sp4_pci_id_table,
5465};
5466
5467static int __init mlxsw_sp_module_init(void)
5468{
5469 int err;
5470
5471 err = mlxsw_core_driver_register(mlxsw_driver: &mlxsw_sp1_driver);
5472 if (err)
5473 return err;
5474
5475 err = mlxsw_core_driver_register(mlxsw_driver: &mlxsw_sp2_driver);
5476 if (err)
5477 goto err_sp2_core_driver_register;
5478
5479 err = mlxsw_core_driver_register(mlxsw_driver: &mlxsw_sp3_driver);
5480 if (err)
5481 goto err_sp3_core_driver_register;
5482
5483 err = mlxsw_core_driver_register(mlxsw_driver: &mlxsw_sp4_driver);
5484 if (err)
5485 goto err_sp4_core_driver_register;
5486
5487 err = mlxsw_pci_driver_register(pci_driver: &mlxsw_sp1_pci_driver);
5488 if (err)
5489 goto err_sp1_pci_driver_register;
5490
5491 err = mlxsw_pci_driver_register(pci_driver: &mlxsw_sp2_pci_driver);
5492 if (err)
5493 goto err_sp2_pci_driver_register;
5494
5495 err = mlxsw_pci_driver_register(pci_driver: &mlxsw_sp3_pci_driver);
5496 if (err)
5497 goto err_sp3_pci_driver_register;
5498
5499 err = mlxsw_pci_driver_register(pci_driver: &mlxsw_sp4_pci_driver);
5500 if (err)
5501 goto err_sp4_pci_driver_register;
5502
5503 return 0;
5504
5505err_sp4_pci_driver_register:
5506 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp3_pci_driver);
5507err_sp3_pci_driver_register:
5508 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp2_pci_driver);
5509err_sp2_pci_driver_register:
5510 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp1_pci_driver);
5511err_sp1_pci_driver_register:
5512 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp4_driver);
5513err_sp4_core_driver_register:
5514 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp3_driver);
5515err_sp3_core_driver_register:
5516 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp2_driver);
5517err_sp2_core_driver_register:
5518 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp1_driver);
5519 return err;
5520}
5521
5522static void __exit mlxsw_sp_module_exit(void)
5523{
5524 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp4_pci_driver);
5525 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp3_pci_driver);
5526 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp2_pci_driver);
5527 mlxsw_pci_driver_unregister(pci_driver: &mlxsw_sp1_pci_driver);
5528 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp4_driver);
5529 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp3_driver);
5530 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp2_driver);
5531 mlxsw_core_driver_unregister(mlxsw_driver: &mlxsw_sp1_driver);
5532}
5533
5534module_init(mlxsw_sp_module_init);
5535module_exit(mlxsw_sp_module_exit);
5536
5537MODULE_LICENSE("Dual BSD/GPL");
5538MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5539MODULE_DESCRIPTION("Mellanox Spectrum driver");
5540MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5541MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5542MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5543MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5544MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5545MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5546MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5547MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
5548

source code of linux/drivers/net/ethernet/mellanox/mlxsw/spectrum.c