1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/export.h>
8#include <linux/err.h>
9#include <linux/if_link.h>
10#include <linux/netdevice.h>
11#include <linux/completion.h>
12#include <linux/skbuff.h>
13#include <linux/etherdevice.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/gfp.h>
17#include <linux/random.h>
18#include <linux/jiffies.h>
19#include <linux/mutex.h>
20#include <linux/rcupdate.h>
21#include <linux/slab.h>
22#include <linux/workqueue.h>
23#include <linux/firmware.h>
24#include <asm/byteorder.h>
25#include <net/devlink.h>
26#include <trace/events/devlink.h>
27
28#include "core.h"
29#include "core_env.h"
30#include "item.h"
31#include "cmd.h"
32#include "port.h"
33#include "trap.h"
34#include "emad.h"
35#include "reg.h"
36#include "resources.h"
37#include "../mlxfw/mlxfw.h"
38
39static LIST_HEAD(mlxsw_core_driver_list);
40static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
41
42static const char mlxsw_core_driver_name[] = "mlxsw_core";
43
44static struct workqueue_struct *mlxsw_wq;
45static struct workqueue_struct *mlxsw_owq;
46
47struct mlxsw_core_port {
48 struct devlink_port devlink_port;
49 void *port_driver_priv;
50 u16 local_port;
51 struct mlxsw_linecard *linecard;
52};
53
54void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
55{
56 return mlxsw_core_port->port_driver_priv;
57}
58EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
59
60static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
61{
62 return mlxsw_core_port->port_driver_priv != NULL;
63}
64
65struct mlxsw_core {
66 struct mlxsw_driver *driver;
67 const struct mlxsw_bus *bus;
68 void *bus_priv;
69 const struct mlxsw_bus_info *bus_info;
70 struct workqueue_struct *emad_wq;
71 struct list_head rx_listener_list;
72 struct list_head event_listener_list;
73 struct list_head irq_event_handler_list;
74 struct mutex irq_event_handler_lock; /* Locks access to handlers list */
75 struct {
76 atomic64_t tid;
77 struct list_head trans_list;
78 spinlock_t trans_list_lock; /* protects trans_list writes */
79 bool use_emad;
80 bool enable_string_tlv;
81 bool enable_latency_tlv;
82 } emad;
83 struct {
84 u16 *mapping; /* lag_id+port_index to local_port mapping */
85 } lag;
86 struct mlxsw_res res;
87 struct mlxsw_hwmon *hwmon;
88 struct mlxsw_thermal *thermal;
89 struct mlxsw_linecards *linecards;
90 struct mlxsw_core_port *ports;
91 unsigned int max_ports;
92 atomic_t active_ports_count;
93 bool fw_flash_in_progress;
94 struct {
95 struct devlink_health_reporter *fw_fatal;
96 } health;
97 struct mlxsw_env *env;
98 unsigned long driver_priv[];
99 /* driver_priv has to be always the last item */
100};
101
102struct mlxsw_linecards *mlxsw_core_linecards(struct mlxsw_core *mlxsw_core)
103{
104 return mlxsw_core->linecards;
105}
106
107void mlxsw_core_linecards_set(struct mlxsw_core *mlxsw_core,
108 struct mlxsw_linecards *linecards)
109{
110 mlxsw_core->linecards = linecards;
111}
112
113#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
114
115static u64 mlxsw_ports_occ_get(void *priv)
116{
117 struct mlxsw_core *mlxsw_core = priv;
118
119 return atomic_read(v: &mlxsw_core->active_ports_count);
120}
121
122static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
123{
124 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
125 struct devlink_resource_size_params ports_num_params;
126 u32 max_ports;
127
128 max_ports = mlxsw_core->max_ports - 1;
129 devlink_resource_size_params_init(&ports_num_params, max_ports,
130 max_ports, 1,
131 DEVLINK_RESOURCE_UNIT_ENTRY);
132
133 return devl_resource_register(devlink,
134 DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
135 max_ports, MLXSW_CORE_RESOURCE_PORTS,
136 DEVLINK_RESOURCE_ID_PARENT_TOP,
137 &ports_num_params);
138}
139
140static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
141{
142 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
143 int err;
144
145 /* Switch ports are numbered from 1 to queried value */
146 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
147 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
148 MAX_SYSTEM_PORT) + 1;
149 else
150 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
151
152 mlxsw_core->ports = kcalloc(n: mlxsw_core->max_ports,
153 size: sizeof(struct mlxsw_core_port), GFP_KERNEL);
154 if (!mlxsw_core->ports)
155 return -ENOMEM;
156
157 if (!reload) {
158 err = mlxsw_core_resources_ports_register(mlxsw_core);
159 if (err)
160 goto err_resources_ports_register;
161 }
162 atomic_set(v: &mlxsw_core->active_ports_count, i: 0);
163 devl_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
164 mlxsw_ports_occ_get, mlxsw_core);
165
166 return 0;
167
168err_resources_ports_register:
169 kfree(objp: mlxsw_core->ports);
170 return err;
171}
172
173static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
174{
175 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
176
177 devl_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
178 if (!reload)
179 devl_resources_unregister(devlink: priv_to_devlink(priv: mlxsw_core));
180
181 kfree(objp: mlxsw_core->ports);
182}
183
184unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
185{
186 return mlxsw_core->max_ports;
187}
188EXPORT_SYMBOL(mlxsw_core_max_ports);
189
190int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
191{
192 struct mlxsw_driver *driver = mlxsw_core->driver;
193
194 if (driver->profile->used_max_lag) {
195 *p_max_lag = driver->profile->max_lag;
196 return 0;
197 }
198
199 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG))
200 return -EIO;
201
202 *p_max_lag = MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG);
203 return 0;
204}
205EXPORT_SYMBOL(mlxsw_core_max_lag);
206
207void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
208{
209 return mlxsw_core->driver_priv;
210}
211EXPORT_SYMBOL(mlxsw_core_driver_priv);
212
213bool
214mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
215 const struct mlxsw_fw_rev *req_rev)
216{
217 return rev->minor > req_rev->minor ||
218 (rev->minor == req_rev->minor &&
219 rev->subminor >= req_rev->subminor);
220}
221EXPORT_SYMBOL(mlxsw_core_fw_rev_minor_subminor_validate);
222
223struct mlxsw_rx_listener_item {
224 struct list_head list;
225 struct mlxsw_rx_listener rxl;
226 void *priv;
227 bool enabled;
228};
229
230struct mlxsw_event_listener_item {
231 struct list_head list;
232 struct mlxsw_core *mlxsw_core;
233 struct mlxsw_event_listener el;
234 void *priv;
235};
236
237static const u8 mlxsw_core_trap_groups[] = {
238 MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
239 MLXSW_REG_HTGT_TRAP_GROUP_CORE_EVENT,
240};
241
242static int mlxsw_core_trap_groups_set(struct mlxsw_core *mlxsw_core)
243{
244 char htgt_pl[MLXSW_REG_HTGT_LEN];
245 int err;
246 int i;
247
248 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
249 return 0;
250
251 for (i = 0; i < ARRAY_SIZE(mlxsw_core_trap_groups); i++) {
252 mlxsw_reg_htgt_pack(htgt_pl, mlxsw_core_trap_groups[i],
253 MLXSW_REG_HTGT_INVALID_POLICER,
254 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
255 MLXSW_REG_HTGT_DEFAULT_TC);
256 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
257 if (err)
258 return err;
259 }
260 return 0;
261}
262
263/******************
264 * EMAD processing
265 ******************/
266
267/* emad_eth_hdr_dmac
268 * Destination MAC in EMAD's Ethernet header.
269 * Must be set to 01:02:c9:00:00:01
270 */
271MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
272
273/* emad_eth_hdr_smac
274 * Source MAC in EMAD's Ethernet header.
275 * Must be set to 00:02:c9:01:02:03
276 */
277MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
278
279/* emad_eth_hdr_ethertype
280 * Ethertype in EMAD's Ethernet header.
281 * Must be set to 0x8932
282 */
283MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
284
285/* emad_eth_hdr_mlx_proto
286 * Mellanox protocol.
287 * Must be set to 0x0.
288 */
289MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
290
291/* emad_eth_hdr_ver
292 * Mellanox protocol version.
293 * Must be set to 0x0.
294 */
295MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
296
297/* emad_op_tlv_type
298 * Type of the TLV.
299 * Must be set to 0x1 (operation TLV).
300 */
301MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
302
303/* emad_op_tlv_len
304 * Length of the operation TLV in u32.
305 * Must be set to 0x4.
306 */
307MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
308
309/* emad_op_tlv_dr
310 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
311 * EMAD. DR TLV must follow.
312 *
313 * Note: Currently not supported and must not be set.
314 */
315MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
316
317/* emad_op_tlv_status
318 * Returned status in case of EMAD response. Must be set to 0 in case
319 * of EMAD request.
320 * 0x0 - success
321 * 0x1 - device is busy. Requester should retry
322 * 0x2 - Mellanox protocol version not supported
323 * 0x3 - unknown TLV
324 * 0x4 - register not supported
325 * 0x5 - operation class not supported
326 * 0x6 - EMAD method not supported
327 * 0x7 - bad parameter (e.g. port out of range)
328 * 0x8 - resource not available
329 * 0x9 - message receipt acknowledgment. Requester should retry
330 * 0x70 - internal error
331 */
332MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
333
334/* emad_op_tlv_register_id
335 * Register ID of register within register TLV.
336 */
337MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
338
339/* emad_op_tlv_r
340 * Response bit. Setting to 1 indicates Response, otherwise request.
341 */
342MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
343
344/* emad_op_tlv_method
345 * EMAD method type.
346 * 0x1 - query
347 * 0x2 - write
348 * 0x3 - send (currently not supported)
349 * 0x4 - event
350 */
351MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
352
353/* emad_op_tlv_class
354 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
355 */
356MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
357
358/* emad_op_tlv_tid
359 * EMAD transaction ID. Used for pairing request and response EMADs.
360 */
361MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
362
363/* emad_string_tlv_type
364 * Type of the TLV.
365 * Must be set to 0x2 (string TLV).
366 */
367MLXSW_ITEM32(emad, string_tlv, type, 0x00, 27, 5);
368
369/* emad_string_tlv_len
370 * Length of the string TLV in u32.
371 */
372MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
373
374#define MLXSW_EMAD_STRING_TLV_STRING_LEN 128
375
376/* emad_string_tlv_string
377 * String provided by the device's firmware in case of erroneous register access
378 */
379MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
380 MLXSW_EMAD_STRING_TLV_STRING_LEN);
381
382/* emad_latency_tlv_type
383 * Type of the TLV.
384 * Must be set to 0x4 (latency TLV).
385 */
386MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5);
387
388/* emad_latency_tlv_len
389 * Length of the latency TLV in u32.
390 */
391MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11);
392
393/* emad_latency_tlv_latency_time
394 * EMAD latency time in units of uSec.
395 */
396MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32);
397
398/* emad_reg_tlv_type
399 * Type of the TLV.
400 * Must be set to 0x3 (register TLV).
401 */
402MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
403
404/* emad_reg_tlv_len
405 * Length of the operation TLV in u32.
406 */
407MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
408
409/* emad_end_tlv_type
410 * Type of the TLV.
411 * Must be set to 0x0 (end TLV).
412 */
413MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
414
415/* emad_end_tlv_len
416 * Length of the end TLV in u32.
417 * Must be set to 1.
418 */
419MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
420
421enum mlxsw_core_reg_access_type {
422 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
423 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
424};
425
426static inline const char *
427mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
428{
429 switch (type) {
430 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
431 return "query";
432 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
433 return "write";
434 }
435 BUG();
436}
437
438static void mlxsw_emad_pack_end_tlv(char *end_tlv)
439{
440 mlxsw_emad_end_tlv_type_set(buf: end_tlv, val: MLXSW_EMAD_TLV_TYPE_END);
441 mlxsw_emad_end_tlv_len_set(buf: end_tlv, MLXSW_EMAD_END_TLV_LEN);
442}
443
444static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
445 const struct mlxsw_reg_info *reg,
446 char *payload)
447{
448 mlxsw_emad_reg_tlv_type_set(buf: reg_tlv, val: MLXSW_EMAD_TLV_TYPE_REG);
449 mlxsw_emad_reg_tlv_len_set(buf: reg_tlv, val: reg->len / sizeof(u32) + 1);
450 memcpy(to: reg_tlv + sizeof(u32), from: payload, len: reg->len);
451}
452
453static void mlxsw_emad_pack_string_tlv(char *string_tlv)
454{
455 mlxsw_emad_string_tlv_type_set(buf: string_tlv, val: MLXSW_EMAD_TLV_TYPE_STRING);
456 mlxsw_emad_string_tlv_len_set(buf: string_tlv, MLXSW_EMAD_STRING_TLV_LEN);
457}
458
459static void mlxsw_emad_pack_op_tlv(char *op_tlv,
460 const struct mlxsw_reg_info *reg,
461 enum mlxsw_core_reg_access_type type,
462 u64 tid)
463{
464 mlxsw_emad_op_tlv_type_set(buf: op_tlv, val: MLXSW_EMAD_TLV_TYPE_OP);
465 mlxsw_emad_op_tlv_len_set(buf: op_tlv, MLXSW_EMAD_OP_TLV_LEN);
466 mlxsw_emad_op_tlv_dr_set(buf: op_tlv, val: 0);
467 mlxsw_emad_op_tlv_status_set(buf: op_tlv, val: 0);
468 mlxsw_emad_op_tlv_register_id_set(buf: op_tlv, val: reg->id);
469 mlxsw_emad_op_tlv_r_set(buf: op_tlv, val: MLXSW_EMAD_OP_TLV_REQUEST);
470 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
471 mlxsw_emad_op_tlv_method_set(buf: op_tlv,
472 val: MLXSW_EMAD_OP_TLV_METHOD_QUERY);
473 else
474 mlxsw_emad_op_tlv_method_set(buf: op_tlv,
475 val: MLXSW_EMAD_OP_TLV_METHOD_WRITE);
476 mlxsw_emad_op_tlv_class_set(buf: op_tlv,
477 val: MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
478 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
479}
480
481static void mlxsw_emad_pack_latency_tlv(char *latency_tlv)
482{
483 mlxsw_emad_latency_tlv_type_set(buf: latency_tlv, val: MLXSW_EMAD_TLV_TYPE_LATENCY);
484 mlxsw_emad_latency_tlv_len_set(buf: latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN);
485}
486
487static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
488{
489 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
490
491 mlxsw_emad_eth_hdr_dmac_memcpy_to(buf: eth_hdr, MLXSW_EMAD_EH_DMAC);
492 mlxsw_emad_eth_hdr_smac_memcpy_to(buf: eth_hdr, MLXSW_EMAD_EH_SMAC);
493 mlxsw_emad_eth_hdr_ethertype_set(buf: eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
494 mlxsw_emad_eth_hdr_mlx_proto_set(buf: eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
495 mlxsw_emad_eth_hdr_ver_set(buf: eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
496
497 skb_reset_mac_header(skb);
498
499 return 0;
500}
501
502static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core,
503 struct sk_buff *skb,
504 const struct mlxsw_reg_info *reg,
505 char *payload,
506 enum mlxsw_core_reg_access_type type, u64 tid)
507{
508 char *buf;
509
510 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
511 mlxsw_emad_pack_end_tlv(end_tlv: buf);
512
513 buf = skb_push(skb, len: reg->len + sizeof(u32));
514 mlxsw_emad_pack_reg_tlv(reg_tlv: buf, reg, payload);
515
516 if (mlxsw_core->emad.enable_latency_tlv) {
517 buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32));
518 mlxsw_emad_pack_latency_tlv(latency_tlv: buf);
519 }
520
521 if (mlxsw_core->emad.enable_string_tlv) {
522 buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
523 mlxsw_emad_pack_string_tlv(string_tlv: buf);
524 }
525
526 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
527 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
528
529 mlxsw_emad_construct_eth_hdr(skb);
530}
531
532struct mlxsw_emad_tlv_offsets {
533 u16 op_tlv;
534 u16 string_tlv;
535 u16 latency_tlv;
536 u16 reg_tlv;
537};
538
539static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
540{
541 u8 tlv_type = mlxsw_emad_string_tlv_type_get(tlv);
542
543 return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
544}
545
546static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv)
547{
548 u8 tlv_type = mlxsw_emad_latency_tlv_type_get(tlv);
549
550 return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY;
551}
552
553static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
554{
555 struct mlxsw_emad_tlv_offsets *offsets =
556 (struct mlxsw_emad_tlv_offsets *) skb->cb;
557
558 offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
559 offsets->string_tlv = 0;
560 offsets->latency_tlv = 0;
561
562 offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
563 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
564
565 /* If string TLV is present, it must come after the operation TLV. */
566 if (mlxsw_emad_tlv_is_string_tlv(tlv: skb->data + offsets->reg_tlv)) {
567 offsets->string_tlv = offsets->reg_tlv;
568 offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
569 }
570
571 if (mlxsw_emad_tlv_is_latency_tlv(tlv: skb->data + offsets->reg_tlv)) {
572 offsets->latency_tlv = offsets->reg_tlv;
573 offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
574 }
575}
576
577static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
578{
579 struct mlxsw_emad_tlv_offsets *offsets =
580 (struct mlxsw_emad_tlv_offsets *) skb->cb;
581
582 return ((char *) (skb->data + offsets->op_tlv));
583}
584
585static char *mlxsw_emad_string_tlv(const struct sk_buff *skb)
586{
587 struct mlxsw_emad_tlv_offsets *offsets =
588 (struct mlxsw_emad_tlv_offsets *) skb->cb;
589
590 if (!offsets->string_tlv)
591 return NULL;
592
593 return ((char *) (skb->data + offsets->string_tlv));
594}
595
596static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
597{
598 struct mlxsw_emad_tlv_offsets *offsets =
599 (struct mlxsw_emad_tlv_offsets *) skb->cb;
600
601 return ((char *) (skb->data + offsets->reg_tlv));
602}
603
604static char *mlxsw_emad_reg_payload(const char *reg_tlv)
605{
606 return ((char *) (reg_tlv + sizeof(u32)));
607}
608
609static char *mlxsw_emad_reg_payload_cmd(const char *mbox)
610{
611 return ((char *) (mbox + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
612}
613
614static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
615{
616 char *op_tlv;
617
618 op_tlv = mlxsw_emad_op_tlv(skb);
619 return mlxsw_emad_op_tlv_tid_get(op_tlv);
620}
621
622static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
623{
624 char *op_tlv;
625
626 op_tlv = mlxsw_emad_op_tlv(skb);
627 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
628}
629
630static int mlxsw_emad_process_status(char *op_tlv,
631 enum mlxsw_emad_op_tlv_status *p_status)
632{
633 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
634
635 switch (*p_status) {
636 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
637 return 0;
638 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
639 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
640 return -EAGAIN;
641 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
642 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
643 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
644 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
645 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
646 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
647 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
648 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
649 default:
650 return -EIO;
651 }
652}
653
654static int
655mlxsw_emad_process_status_skb(struct sk_buff *skb,
656 enum mlxsw_emad_op_tlv_status *p_status)
657{
658 return mlxsw_emad_process_status(op_tlv: mlxsw_emad_op_tlv(skb), p_status);
659}
660
661struct mlxsw_reg_trans {
662 struct list_head list;
663 struct list_head bulk_list;
664 struct mlxsw_core *core;
665 struct sk_buff *tx_skb;
666 struct mlxsw_tx_info tx_info;
667 struct delayed_work timeout_dw;
668 unsigned int retries;
669 u64 tid;
670 struct completion completion;
671 atomic_t active;
672 mlxsw_reg_trans_cb_t *cb;
673 unsigned long cb_priv;
674 const struct mlxsw_reg_info *reg;
675 enum mlxsw_core_reg_access_type type;
676 int err;
677 char *emad_err_string;
678 enum mlxsw_emad_op_tlv_status emad_status;
679 struct rcu_head rcu;
680};
681
682static void mlxsw_emad_process_string_tlv(const struct sk_buff *skb,
683 struct mlxsw_reg_trans *trans)
684{
685 char *string_tlv;
686 char *string;
687
688 string_tlv = mlxsw_emad_string_tlv(skb);
689 if (!string_tlv)
690 return;
691
692 trans->emad_err_string = kzalloc(MLXSW_EMAD_STRING_TLV_STRING_LEN,
693 GFP_ATOMIC);
694 if (!trans->emad_err_string)
695 return;
696
697 string = mlxsw_emad_string_tlv_string_data(buf: string_tlv);
698 strscpy(trans->emad_err_string, string,
699 MLXSW_EMAD_STRING_TLV_STRING_LEN);
700}
701
702#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
703#define MLXSW_EMAD_TIMEOUT_MS 200
704
705static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
706{
707 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
708
709 if (trans->core->fw_flash_in_progress)
710 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
711
712 queue_delayed_work(wq: trans->core->emad_wq, dwork: &trans->timeout_dw,
713 delay: timeout << trans->retries);
714}
715
716static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
717 struct mlxsw_reg_trans *trans)
718{
719 struct sk_buff *skb;
720 int err;
721
722 skb = skb_clone(skb: trans->tx_skb, GFP_KERNEL);
723 if (!skb)
724 return -ENOMEM;
725
726 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
727 skb->data + mlxsw_core->driver->txhdr_len,
728 skb->len - mlxsw_core->driver->txhdr_len);
729
730 atomic_set(v: &trans->active, i: 1);
731 err = mlxsw_core_skb_transmit(mlxsw_core, skb, tx_info: &trans->tx_info);
732 if (err) {
733 dev_kfree_skb(skb);
734 return err;
735 }
736 mlxsw_emad_trans_timeout_schedule(trans);
737 return 0;
738}
739
740static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
741{
742 struct mlxsw_core *mlxsw_core = trans->core;
743
744 dev_kfree_skb(trans->tx_skb);
745 spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock);
746 list_del_rcu(entry: &trans->list);
747 spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock);
748 trans->err = err;
749 complete(&trans->completion);
750}
751
752static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
753 struct mlxsw_reg_trans *trans)
754{
755 int err;
756
757 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
758 trans->retries++;
759 err = mlxsw_emad_transmit(mlxsw_core: trans->core, trans);
760 if (err == 0)
761 return;
762
763 if (!atomic_dec_and_test(v: &trans->active))
764 return;
765 } else {
766 err = -EIO;
767 }
768 mlxsw_emad_trans_finish(trans, err);
769}
770
771static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
772{
773 struct mlxsw_reg_trans *trans = container_of(work,
774 struct mlxsw_reg_trans,
775 timeout_dw.work);
776
777 if (!atomic_dec_and_test(v: &trans->active))
778 return;
779
780 mlxsw_emad_transmit_retry(mlxsw_core: trans->core, trans);
781}
782
783static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
784 struct mlxsw_reg_trans *trans,
785 struct sk_buff *skb)
786{
787 int err;
788
789 if (!atomic_dec_and_test(v: &trans->active))
790 return;
791
792 err = mlxsw_emad_process_status_skb(skb, p_status: &trans->emad_status);
793 if (err == -EAGAIN) {
794 mlxsw_emad_transmit_retry(mlxsw_core, trans);
795 } else {
796 if (err == 0) {
797 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
798
799 if (trans->cb)
800 trans->cb(mlxsw_core,
801 mlxsw_emad_reg_payload(reg_tlv),
802 trans->reg->len, trans->cb_priv);
803 } else {
804 mlxsw_emad_process_string_tlv(skb, trans);
805 }
806 mlxsw_emad_trans_finish(trans, err);
807 }
808}
809
810/* called with rcu read lock held */
811static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u16 local_port,
812 void *priv)
813{
814 struct mlxsw_core *mlxsw_core = priv;
815 struct mlxsw_reg_trans *trans;
816
817 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
818 skb->data, skb->len);
819
820 mlxsw_emad_tlv_parse(skb);
821
822 if (!mlxsw_emad_is_resp(skb))
823 goto free_skb;
824
825 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
826 if (mlxsw_emad_get_tid(skb) == trans->tid) {
827 mlxsw_emad_process_response(mlxsw_core, trans, skb);
828 break;
829 }
830 }
831
832free_skb:
833 dev_kfree_skb(skb);
834}
835
836static const struct mlxsw_listener mlxsw_emad_rx_listener =
837 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
838 EMAD, DISCARD);
839
840static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
841{
842 char mgir_pl[MLXSW_REG_MGIR_LEN];
843 bool string_tlv, latency_tlv;
844 int err;
845
846 mlxsw_reg_mgir_pack(payload: mgir_pl);
847 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), payload: mgir_pl);
848 if (err)
849 return err;
850
851 string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(mgir_pl);
852 mlxsw_core->emad.enable_string_tlv = string_tlv;
853
854 latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(mgir_pl);
855 mlxsw_core->emad.enable_latency_tlv = latency_tlv;
856
857 return 0;
858}
859
860static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core)
861{
862 mlxsw_core->emad.enable_latency_tlv = false;
863 mlxsw_core->emad.enable_string_tlv = false;
864}
865
866static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
867{
868 struct workqueue_struct *emad_wq;
869 u64 tid;
870 int err;
871
872 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
873 return 0;
874
875 emad_wq = alloc_workqueue(fmt: "mlxsw_core_emad", flags: 0, max_active: 0);
876 if (!emad_wq)
877 return -ENOMEM;
878 mlxsw_core->emad_wq = emad_wq;
879
880 /* Set the upper 32 bits of the transaction ID field to a random
881 * number. This allows us to discard EMADs addressed to other
882 * devices.
883 */
884 get_random_bytes(&tid, 4);
885 tid <<= 32;
886 atomic64_set(&mlxsw_core->emad.tid, tid);
887
888 INIT_LIST_HEAD(list: &mlxsw_core->emad.trans_list);
889 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
890
891 err = mlxsw_core_trap_register(mlxsw_core, listener: &mlxsw_emad_rx_listener,
892 priv: mlxsw_core);
893 if (err)
894 goto err_trap_register;
895
896 err = mlxsw_emad_tlv_enable(mlxsw_core);
897 if (err)
898 goto err_emad_tlv_enable;
899
900 mlxsw_core->emad.use_emad = true;
901
902 return 0;
903
904err_emad_tlv_enable:
905 mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_emad_rx_listener,
906 priv: mlxsw_core);
907err_trap_register:
908 destroy_workqueue(wq: mlxsw_core->emad_wq);
909 return err;
910}
911
912static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
913{
914
915 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
916 return;
917
918 mlxsw_core->emad.use_emad = false;
919 mlxsw_emad_tlv_disable(mlxsw_core);
920 mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_emad_rx_listener,
921 priv: mlxsw_core);
922 destroy_workqueue(wq: mlxsw_core->emad_wq);
923}
924
925static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
926 u16 reg_len)
927{
928 struct sk_buff *skb;
929 u16 emad_len;
930
931 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
932 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
933 sizeof(u32) + mlxsw_core->driver->txhdr_len);
934 if (mlxsw_core->emad.enable_string_tlv)
935 emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
936 if (mlxsw_core->emad.enable_latency_tlv)
937 emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
938 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
939 return NULL;
940
941 skb = netdev_alloc_skb(NULL, length: emad_len);
942 if (!skb)
943 return NULL;
944 memset(s: skb->data, c: 0, n: emad_len);
945 skb_reserve(skb, len: emad_len);
946
947 return skb;
948}
949
950static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
951 const struct mlxsw_reg_info *reg,
952 char *payload,
953 enum mlxsw_core_reg_access_type type,
954 struct mlxsw_reg_trans *trans,
955 struct list_head *bulk_list,
956 mlxsw_reg_trans_cb_t *cb,
957 unsigned long cb_priv, u64 tid)
958{
959 struct sk_buff *skb;
960 int err;
961
962 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
963 tid, reg->id, mlxsw_reg_id_str(reg->id),
964 mlxsw_core_reg_access_type_str(type));
965
966 skb = mlxsw_emad_alloc(mlxsw_core, reg_len: reg->len);
967 if (!skb)
968 return -ENOMEM;
969
970 list_add_tail(new: &trans->bulk_list, head: bulk_list);
971 trans->core = mlxsw_core;
972 trans->tx_skb = skb;
973 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
974 trans->tx_info.is_emad = true;
975 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
976 trans->tid = tid;
977 init_completion(x: &trans->completion);
978 trans->cb = cb;
979 trans->cb_priv = cb_priv;
980 trans->reg = reg;
981 trans->type = type;
982
983 mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
984 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
985
986 spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock);
987 list_add_tail_rcu(new: &trans->list, head: &mlxsw_core->emad.trans_list);
988 spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock);
989 err = mlxsw_emad_transmit(mlxsw_core, trans);
990 if (err)
991 goto err_out;
992 return 0;
993
994err_out:
995 spin_lock_bh(lock: &mlxsw_core->emad.trans_list_lock);
996 list_del_rcu(entry: &trans->list);
997 spin_unlock_bh(lock: &mlxsw_core->emad.trans_list_lock);
998 list_del(entry: &trans->bulk_list);
999 dev_kfree_skb(trans->tx_skb);
1000 return err;
1001}
1002
1003/*****************
1004 * Core functions
1005 *****************/
1006
1007int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
1008{
1009 spin_lock(lock: &mlxsw_core_driver_list_lock);
1010 list_add_tail(new: &mlxsw_driver->list, head: &mlxsw_core_driver_list);
1011 spin_unlock(lock: &mlxsw_core_driver_list_lock);
1012 return 0;
1013}
1014EXPORT_SYMBOL(mlxsw_core_driver_register);
1015
1016void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
1017{
1018 spin_lock(lock: &mlxsw_core_driver_list_lock);
1019 list_del(entry: &mlxsw_driver->list);
1020 spin_unlock(lock: &mlxsw_core_driver_list_lock);
1021}
1022EXPORT_SYMBOL(mlxsw_core_driver_unregister);
1023
1024static struct mlxsw_driver *__driver_find(const char *kind)
1025{
1026 struct mlxsw_driver *mlxsw_driver;
1027
1028 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
1029 if (strcmp(mlxsw_driver->kind, kind) == 0)
1030 return mlxsw_driver;
1031 }
1032 return NULL;
1033}
1034
1035static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
1036{
1037 struct mlxsw_driver *mlxsw_driver;
1038
1039 spin_lock(lock: &mlxsw_core_driver_list_lock);
1040 mlxsw_driver = __driver_find(kind);
1041 spin_unlock(lock: &mlxsw_core_driver_list_lock);
1042 return mlxsw_driver;
1043}
1044
1045int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core,
1046 struct mlxfw_dev *mlxfw_dev,
1047 const struct firmware *firmware,
1048 struct netlink_ext_ack *extack)
1049{
1050 int err;
1051
1052 mlxsw_core->fw_flash_in_progress = true;
1053 err = mlxfw_firmware_flash(mlxfw_dev, firmware, extack);
1054 mlxsw_core->fw_flash_in_progress = false;
1055
1056 return err;
1057}
1058
1059struct mlxsw_core_fw_info {
1060 struct mlxfw_dev mlxfw_dev;
1061 struct mlxsw_core *mlxsw_core;
1062};
1063
1064static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
1065 u16 component_index, u32 *p_max_size,
1066 u8 *p_align_bits, u16 *p_max_write_size)
1067{
1068 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1069 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1070 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1071 char mcqi_pl[MLXSW_REG_MCQI_LEN];
1072 int err;
1073
1074 mlxsw_reg_mcqi_pack(payload: mcqi_pl, component_index);
1075 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), payload: mcqi_pl);
1076 if (err)
1077 return err;
1078 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
1079
1080 *p_align_bits = max_t(u8, *p_align_bits, 2);
1081 *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
1082 return 0;
1083}
1084
1085static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
1086{
1087 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1088 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1089 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1090 char mcc_pl[MLXSW_REG_MCC_LEN];
1091 u8 control_state;
1092 int err;
1093
1094 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: 0, component_index: 0, update_handle: 0, component_size: 0);
1095 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1096 if (err)
1097 return err;
1098
1099 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
1100 if (control_state != MLXFW_FSM_STATE_IDLE)
1101 return -EBUSY;
1102
1103 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, component_index: 0, update_handle: *fwhandle, component_size: 0);
1104 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1105}
1106
1107static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1108 u16 component_index, u32 component_size)
1109{
1110 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1111 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1112 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1113 char mcc_pl[MLXSW_REG_MCC_LEN];
1114
1115 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
1116 component_index, update_handle: fwhandle, component_size);
1117 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1118}
1119
1120static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1121 u8 *data, u16 size, u32 offset)
1122{
1123 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1124 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1125 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1126 char mcda_pl[MLXSW_REG_MCDA_LEN];
1127
1128 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
1129 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), payload: mcda_pl);
1130}
1131
1132static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1133 u16 component_index)
1134{
1135 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1136 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1137 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1138 char mcc_pl[MLXSW_REG_MCC_LEN];
1139
1140 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
1141 component_index, update_handle: fwhandle, component_size: 0);
1142 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1143}
1144
1145static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1146{
1147 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1148 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1149 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1150 char mcc_pl[MLXSW_REG_MCC_LEN];
1151
1152 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, component_index: 0, update_handle: fwhandle, component_size: 0);
1153 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1154}
1155
1156static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
1157 enum mlxfw_fsm_state *fsm_state,
1158 enum mlxfw_fsm_state_err *fsm_state_err)
1159{
1160 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1161 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1162 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1163 char mcc_pl[MLXSW_REG_MCC_LEN];
1164 u8 control_state;
1165 u8 error_code;
1166 int err;
1167
1168 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: 0, component_index: 0, update_handle: fwhandle, component_size: 0);
1169 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1170 if (err)
1171 return err;
1172
1173 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
1174 *fsm_state = control_state;
1175 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
1176 return 0;
1177}
1178
1179static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1180{
1181 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1182 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1183 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1184 char mcc_pl[MLXSW_REG_MCC_LEN];
1185
1186 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_CANCEL, component_index: 0, update_handle: fwhandle, component_size: 0);
1187 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1188}
1189
1190static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
1191{
1192 struct mlxsw_core_fw_info *mlxsw_core_fw_info =
1193 container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
1194 struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
1195 char mcc_pl[MLXSW_REG_MCC_LEN];
1196
1197 mlxsw_reg_mcc_pack(payload: mcc_pl, instr: MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, component_index: 0, update_handle: fwhandle, component_size: 0);
1198 mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), payload: mcc_pl);
1199}
1200
1201static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
1202 .component_query = mlxsw_core_fw_component_query,
1203 .fsm_lock = mlxsw_core_fw_fsm_lock,
1204 .fsm_component_update = mlxsw_core_fw_fsm_component_update,
1205 .fsm_block_download = mlxsw_core_fw_fsm_block_download,
1206 .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
1207 .fsm_activate = mlxsw_core_fw_fsm_activate,
1208 .fsm_query_state = mlxsw_core_fw_fsm_query_state,
1209 .fsm_cancel = mlxsw_core_fw_fsm_cancel,
1210 .fsm_release = mlxsw_core_fw_fsm_release,
1211};
1212
1213static int mlxsw_core_dev_fw_flash(struct mlxsw_core *mlxsw_core,
1214 const struct firmware *firmware,
1215 struct netlink_ext_ack *extack)
1216{
1217 struct mlxsw_core_fw_info mlxsw_core_fw_info = {
1218 .mlxfw_dev = {
1219 .ops = &mlxsw_core_fw_mlxsw_dev_ops,
1220 .psid = mlxsw_core->bus_info->psid,
1221 .psid_size = strlen(mlxsw_core->bus_info->psid),
1222 .devlink = priv_to_devlink(priv: mlxsw_core),
1223 },
1224 .mlxsw_core = mlxsw_core
1225 };
1226
1227 return mlxsw_core_fw_flash(mlxsw_core, mlxfw_dev: &mlxsw_core_fw_info.mlxfw_dev,
1228 firmware, extack);
1229}
1230
1231static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
1232 const struct mlxsw_bus_info *mlxsw_bus_info,
1233 const struct mlxsw_fw_rev *req_rev,
1234 const char *filename)
1235{
1236 const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
1237 union devlink_param_value value;
1238 const struct firmware *firmware;
1239 int err;
1240
1241 /* Don't check if driver does not require it */
1242 if (!req_rev || !filename)
1243 return 0;
1244
1245 /* Don't check if devlink 'fw_load_policy' param is 'flash' */
1246 err = devl_param_driverinit_value_get(devlink: priv_to_devlink(priv: mlxsw_core),
1247 param_id: DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1248 val: &value);
1249 if (err)
1250 return err;
1251 if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
1252 return 0;
1253
1254 /* Validate driver & FW are compatible */
1255 if (rev->major != req_rev->major) {
1256 WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
1257 rev->major, req_rev->major);
1258 return -EINVAL;
1259 }
1260 if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
1261 return 0;
1262
1263 dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
1264 rev->major, rev->minor, rev->subminor, req_rev->major,
1265 req_rev->minor, req_rev->subminor);
1266 dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
1267
1268 err = request_firmware_direct(fw: &firmware, name: filename, device: mlxsw_bus_info->dev);
1269 if (err) {
1270 dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
1271 return err;
1272 }
1273
1274 err = mlxsw_core_dev_fw_flash(mlxsw_core, firmware, NULL);
1275 release_firmware(fw: firmware);
1276 if (err)
1277 dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
1278
1279 /* On FW flash success, tell the caller FW reset is needed
1280 * if current FW supports it.
1281 */
1282 if (rev->minor >= req_rev->can_reset_minor)
1283 return err ? err : -EAGAIN;
1284 else
1285 return 0;
1286}
1287
1288static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
1289 struct devlink_flash_update_params *params,
1290 struct netlink_ext_ack *extack)
1291{
1292 return mlxsw_core_dev_fw_flash(mlxsw_core, firmware: params->fw, extack);
1293}
1294
1295static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
1296 union devlink_param_value val,
1297 struct netlink_ext_ack *extack)
1298{
1299 if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
1300 val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
1301 NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
1302 return -EINVAL;
1303 }
1304
1305 return 0;
1306}
1307
1308static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
1309 DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
1310 mlxsw_core_devlink_param_fw_load_policy_validate),
1311};
1312
1313static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
1314{
1315 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
1316 union devlink_param_value value;
1317 int err;
1318
1319 err = devl_params_register(devlink, mlxsw_core_fw_devlink_params,
1320 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1321 if (err)
1322 return err;
1323
1324 value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
1325 devl_param_driverinit_value_set(devlink,
1326 param_id: DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
1327 init_val: value);
1328 return 0;
1329}
1330
1331static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
1332{
1333 devl_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
1334 ARRAY_SIZE(mlxsw_core_fw_devlink_params));
1335}
1336
1337static void *__dl_port(struct devlink_port *devlink_port)
1338{
1339 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
1340}
1341
1342static int mlxsw_devlink_port_split(struct devlink *devlink,
1343 struct devlink_port *port,
1344 unsigned int count,
1345 struct netlink_ext_ack *extack)
1346{
1347 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port: port);
1348 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1349
1350 if (!mlxsw_core->driver->port_split)
1351 return -EOPNOTSUPP;
1352 return mlxsw_core->driver->port_split(mlxsw_core,
1353 mlxsw_core_port->local_port,
1354 count, extack);
1355}
1356
1357static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
1358 struct devlink_port *port,
1359 struct netlink_ext_ack *extack)
1360{
1361 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port: port);
1362 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1363
1364 if (!mlxsw_core->driver->port_unsplit)
1365 return -EOPNOTSUPP;
1366 return mlxsw_core->driver->port_unsplit(mlxsw_core,
1367 mlxsw_core_port->local_port,
1368 extack);
1369}
1370
1371static int
1372mlxsw_devlink_sb_pool_get(struct devlink *devlink,
1373 unsigned int sb_index, u16 pool_index,
1374 struct devlink_sb_pool_info *pool_info)
1375{
1376 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1377 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1378
1379 if (!mlxsw_driver->sb_pool_get)
1380 return -EOPNOTSUPP;
1381 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
1382 pool_index, pool_info);
1383}
1384
1385static int
1386mlxsw_devlink_sb_pool_set(struct devlink *devlink,
1387 unsigned int sb_index, u16 pool_index, u32 size,
1388 enum devlink_sb_threshold_type threshold_type,
1389 struct netlink_ext_ack *extack)
1390{
1391 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1392 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1393
1394 if (!mlxsw_driver->sb_pool_set)
1395 return -EOPNOTSUPP;
1396 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
1397 pool_index, size, threshold_type,
1398 extack);
1399}
1400
1401static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
1402 unsigned int sb_index, u16 pool_index,
1403 u32 *p_threshold)
1404{
1405 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1406 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1407 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1408
1409 if (!mlxsw_driver->sb_port_pool_get ||
1410 !mlxsw_core_port_check(mlxsw_core_port))
1411 return -EOPNOTSUPP;
1412 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
1413 pool_index, p_threshold);
1414}
1415
1416static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
1417 unsigned int sb_index, u16 pool_index,
1418 u32 threshold,
1419 struct netlink_ext_ack *extack)
1420{
1421 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1422 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1423 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1424
1425 if (!mlxsw_driver->sb_port_pool_set ||
1426 !mlxsw_core_port_check(mlxsw_core_port))
1427 return -EOPNOTSUPP;
1428 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
1429 pool_index, threshold, extack);
1430}
1431
1432static int
1433mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
1434 unsigned int sb_index, u16 tc_index,
1435 enum devlink_sb_pool_type pool_type,
1436 u16 *p_pool_index, u32 *p_threshold)
1437{
1438 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1439 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1440 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1441
1442 if (!mlxsw_driver->sb_tc_pool_bind_get ||
1443 !mlxsw_core_port_check(mlxsw_core_port))
1444 return -EOPNOTSUPP;
1445 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
1446 tc_index, pool_type,
1447 p_pool_index, p_threshold);
1448}
1449
1450static int
1451mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
1452 unsigned int sb_index, u16 tc_index,
1453 enum devlink_sb_pool_type pool_type,
1454 u16 pool_index, u32 threshold,
1455 struct netlink_ext_ack *extack)
1456{
1457 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1458 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1459 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1460
1461 if (!mlxsw_driver->sb_tc_pool_bind_set ||
1462 !mlxsw_core_port_check(mlxsw_core_port))
1463 return -EOPNOTSUPP;
1464 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
1465 tc_index, pool_type,
1466 pool_index, threshold, extack);
1467}
1468
1469static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1470 unsigned int sb_index)
1471{
1472 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1473 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1474
1475 if (!mlxsw_driver->sb_occ_snapshot)
1476 return -EOPNOTSUPP;
1477 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1478}
1479
1480static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1481 unsigned int sb_index)
1482{
1483 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1484 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1485
1486 if (!mlxsw_driver->sb_occ_max_clear)
1487 return -EOPNOTSUPP;
1488 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1489}
1490
1491static int
1492mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1493 unsigned int sb_index, u16 pool_index,
1494 u32 *p_cur, u32 *p_max)
1495{
1496 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1497 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1498 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1499
1500 if (!mlxsw_driver->sb_occ_port_pool_get ||
1501 !mlxsw_core_port_check(mlxsw_core_port))
1502 return -EOPNOTSUPP;
1503 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1504 pool_index, p_cur, p_max);
1505}
1506
1507static int
1508mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1509 unsigned int sb_index, u16 tc_index,
1510 enum devlink_sb_pool_type pool_type,
1511 u32 *p_cur, u32 *p_max)
1512{
1513 struct mlxsw_core *mlxsw_core = devlink_priv(devlink: devlink_port->devlink);
1514 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1515 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1516
1517 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
1518 !mlxsw_core_port_check(mlxsw_core_port))
1519 return -EOPNOTSUPP;
1520 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1521 sb_index, tc_index,
1522 pool_type, p_cur, p_max);
1523}
1524
1525static int
1526mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
1527 struct netlink_ext_ack *extack)
1528{
1529 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1530 char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
1531 u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
1532 char mgir_pl[MLXSW_REG_MGIR_LEN];
1533 char buf[32];
1534 int err;
1535
1536 mlxsw_reg_mgir_pack(payload: mgir_pl);
1537 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), payload: mgir_pl);
1538 if (err)
1539 return err;
1540 mlxsw_reg_mgir_unpack(payload: mgir_pl, hw_rev: &hw_rev, fw_info_psid, fw_major: &fw_major,
1541 fw_minor: &fw_minor, fw_sub_minor: &fw_sub_minor);
1542
1543 sprintf(buf, fmt: "%X", hw_rev);
1544 err = devlink_info_version_fixed_put(req, version_name: "hw.revision", version_value: buf);
1545 if (err)
1546 return err;
1547
1548 err = devlink_info_version_fixed_put(req,
1549 DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
1550 version_value: fw_info_psid);
1551 if (err)
1552 return err;
1553
1554 sprintf(buf, fmt: "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
1555 err = devlink_info_version_running_put(req, version_name: "fw.version", version_value: buf);
1556 if (err)
1557 return err;
1558
1559 return devlink_info_version_running_put(req,
1560 DEVLINK_INFO_VERSION_GENERIC_FW,
1561 version_value: buf);
1562}
1563
1564static int
1565mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
1566 bool netns_change, enum devlink_reload_action action,
1567 enum devlink_reload_limit limit,
1568 struct netlink_ext_ack *extack)
1569{
1570 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1571
1572 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
1573 return -EOPNOTSUPP;
1574
1575 mlxsw_core_bus_device_unregister(mlxsw_core, reload: true);
1576 return 0;
1577}
1578
1579static int
1580mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
1581 enum devlink_reload_limit limit, u32 *actions_performed,
1582 struct netlink_ext_ack *extack)
1583{
1584 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1585 int err;
1586
1587 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1588 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
1589 err = mlxsw_core_bus_device_register(mlxsw_bus_info: mlxsw_core->bus_info,
1590 mlxsw_bus: mlxsw_core->bus,
1591 bus_priv: mlxsw_core->bus_priv, reload: true,
1592 devlink, extack);
1593 return err;
1594}
1595
1596static int mlxsw_devlink_flash_update(struct devlink *devlink,
1597 struct devlink_flash_update_params *params,
1598 struct netlink_ext_ack *extack)
1599{
1600 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1601
1602 return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
1603}
1604
1605static int mlxsw_devlink_trap_init(struct devlink *devlink,
1606 const struct devlink_trap *trap,
1607 void *trap_ctx)
1608{
1609 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1610 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1611
1612 if (!mlxsw_driver->trap_init)
1613 return -EOPNOTSUPP;
1614 return mlxsw_driver->trap_init(mlxsw_core, trap, trap_ctx);
1615}
1616
1617static void mlxsw_devlink_trap_fini(struct devlink *devlink,
1618 const struct devlink_trap *trap,
1619 void *trap_ctx)
1620{
1621 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1622 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1623
1624 if (!mlxsw_driver->trap_fini)
1625 return;
1626 mlxsw_driver->trap_fini(mlxsw_core, trap, trap_ctx);
1627}
1628
1629static int mlxsw_devlink_trap_action_set(struct devlink *devlink,
1630 const struct devlink_trap *trap,
1631 enum devlink_trap_action action,
1632 struct netlink_ext_ack *extack)
1633{
1634 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1635 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1636
1637 if (!mlxsw_driver->trap_action_set)
1638 return -EOPNOTSUPP;
1639 return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack);
1640}
1641
1642static int
1643mlxsw_devlink_trap_group_init(struct devlink *devlink,
1644 const struct devlink_trap_group *group)
1645{
1646 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1647 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1648
1649 if (!mlxsw_driver->trap_group_init)
1650 return -EOPNOTSUPP;
1651 return mlxsw_driver->trap_group_init(mlxsw_core, group);
1652}
1653
1654static int
1655mlxsw_devlink_trap_group_set(struct devlink *devlink,
1656 const struct devlink_trap_group *group,
1657 const struct devlink_trap_policer *policer,
1658 struct netlink_ext_ack *extack)
1659{
1660 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1661 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1662
1663 if (!mlxsw_driver->trap_group_set)
1664 return -EOPNOTSUPP;
1665 return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack);
1666}
1667
1668static int
1669mlxsw_devlink_trap_policer_init(struct devlink *devlink,
1670 const struct devlink_trap_policer *policer)
1671{
1672 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1673 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1674
1675 if (!mlxsw_driver->trap_policer_init)
1676 return -EOPNOTSUPP;
1677 return mlxsw_driver->trap_policer_init(mlxsw_core, policer);
1678}
1679
1680static void
1681mlxsw_devlink_trap_policer_fini(struct devlink *devlink,
1682 const struct devlink_trap_policer *policer)
1683{
1684 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1685 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1686
1687 if (!mlxsw_driver->trap_policer_fini)
1688 return;
1689 mlxsw_driver->trap_policer_fini(mlxsw_core, policer);
1690}
1691
1692static int
1693mlxsw_devlink_trap_policer_set(struct devlink *devlink,
1694 const struct devlink_trap_policer *policer,
1695 u64 rate, u64 burst,
1696 struct netlink_ext_ack *extack)
1697{
1698 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1699 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1700
1701 if (!mlxsw_driver->trap_policer_set)
1702 return -EOPNOTSUPP;
1703 return mlxsw_driver->trap_policer_set(mlxsw_core, policer, rate, burst,
1704 extack);
1705}
1706
1707static int
1708mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
1709 const struct devlink_trap_policer *policer,
1710 u64 *p_drops)
1711{
1712 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1713 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1714
1715 if (!mlxsw_driver->trap_policer_counter_get)
1716 return -EOPNOTSUPP;
1717 return mlxsw_driver->trap_policer_counter_get(mlxsw_core, policer,
1718 p_drops);
1719}
1720
1721static const struct devlink_ops mlxsw_devlink_ops = {
1722 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
1723 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
1724 .reload_down = mlxsw_devlink_core_bus_device_reload_down,
1725 .reload_up = mlxsw_devlink_core_bus_device_reload_up,
1726 .port_split = mlxsw_devlink_port_split,
1727 .port_unsplit = mlxsw_devlink_port_unsplit,
1728 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1729 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1730 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1731 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1732 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1733 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1734 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1735 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1736 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1737 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1738 .info_get = mlxsw_devlink_info_get,
1739 .flash_update = mlxsw_devlink_flash_update,
1740 .trap_init = mlxsw_devlink_trap_init,
1741 .trap_fini = mlxsw_devlink_trap_fini,
1742 .trap_action_set = mlxsw_devlink_trap_action_set,
1743 .trap_group_init = mlxsw_devlink_trap_group_init,
1744 .trap_group_set = mlxsw_devlink_trap_group_set,
1745 .trap_policer_init = mlxsw_devlink_trap_policer_init,
1746 .trap_policer_fini = mlxsw_devlink_trap_policer_fini,
1747 .trap_policer_set = mlxsw_devlink_trap_policer_set,
1748 .trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
1749};
1750
1751static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
1752{
1753 return mlxsw_core_fw_params_register(mlxsw_core);
1754}
1755
1756static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
1757{
1758 mlxsw_core_fw_params_unregister(mlxsw_core);
1759}
1760
1761struct mlxsw_core_health_event {
1762 struct mlxsw_core *mlxsw_core;
1763 char mfde_pl[MLXSW_REG_MFDE_LEN];
1764 struct work_struct work;
1765};
1766
1767static void mlxsw_core_health_event_work(struct work_struct *work)
1768{
1769 struct mlxsw_core_health_event *event;
1770 struct mlxsw_core *mlxsw_core;
1771
1772 event = container_of(work, struct mlxsw_core_health_event, work);
1773 mlxsw_core = event->mlxsw_core;
1774 devlink_health_report(reporter: mlxsw_core->health.fw_fatal, msg: "FW fatal event occurred",
1775 priv_ctx: event->mfde_pl);
1776 kfree(objp: event);
1777}
1778
1779static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
1780 char *mfde_pl, void *priv)
1781{
1782 struct mlxsw_core_health_event *event;
1783 struct mlxsw_core *mlxsw_core = priv;
1784
1785 event = kmalloc(size: sizeof(*event), GFP_ATOMIC);
1786 if (!event)
1787 return;
1788 event->mlxsw_core = mlxsw_core;
1789 memcpy(to: event->mfde_pl, from: mfde_pl, len: sizeof(event->mfde_pl));
1790 INIT_WORK(&event->work, mlxsw_core_health_event_work);
1791 mlxsw_core_schedule_work(work: &event->work);
1792}
1793
1794static const struct mlxsw_listener mlxsw_core_health_listener =
1795 MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
1796
1797static int
1798mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
1799 struct devlink_fmsg *fmsg)
1800{
1801 u32 val, tile_v;
1802 int err;
1803
1804 val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
1805 err = devlink_fmsg_u32_pair_put(fmsg, name: "cause_id", value: val);
1806 if (err)
1807 return err;
1808 tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
1809 if (tile_v) {
1810 val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
1811 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1812 if (err)
1813 return err;
1814 }
1815
1816 return 0;
1817}
1818
1819static int
1820mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
1821 struct devlink_fmsg *fmsg)
1822{
1823 u32 val, tile_v;
1824 int err;
1825
1826 val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
1827 err = devlink_fmsg_u32_pair_put(fmsg, name: "var0", value: val);
1828 if (err)
1829 return err;
1830 val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
1831 err = devlink_fmsg_u32_pair_put(fmsg, name: "var1", value: val);
1832 if (err)
1833 return err;
1834 val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
1835 err = devlink_fmsg_u32_pair_put(fmsg, name: "var2", value: val);
1836 if (err)
1837 return err;
1838 val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
1839 err = devlink_fmsg_u32_pair_put(fmsg, name: "var3", value: val);
1840 if (err)
1841 return err;
1842 val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
1843 err = devlink_fmsg_u32_pair_put(fmsg, name: "var4", value: val);
1844 if (err)
1845 return err;
1846 val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
1847 err = devlink_fmsg_u32_pair_put(fmsg, name: "existptr", value: val);
1848 if (err)
1849 return err;
1850 val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
1851 err = devlink_fmsg_u32_pair_put(fmsg, name: "callra", value: val);
1852 if (err)
1853 return err;
1854 val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
1855 err = devlink_fmsg_bool_pair_put(fmsg, name: "old_event", value: val);
1856 if (err)
1857 return err;
1858 tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
1859 if (tile_v) {
1860 val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
1861 err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
1862 if (err)
1863 return err;
1864 }
1865 val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
1866 err = devlink_fmsg_u32_pair_put(fmsg, name: "ext_synd", value: val);
1867 if (err)
1868 return err;
1869
1870 return 0;
1871}
1872
1873static int
1874mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
1875 struct devlink_fmsg *fmsg)
1876{
1877 u32 val;
1878 int err;
1879
1880 val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
1881 err = devlink_fmsg_bool_pair_put(fmsg, name: "old_event", value: val);
1882 if (err)
1883 return err;
1884 val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
1885 return devlink_fmsg_u32_pair_put(fmsg, name: "pipes_mask", value: val);
1886}
1887
1888static int
1889mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
1890 struct devlink_fmsg *fmsg)
1891{
1892 u32 val;
1893 int err;
1894
1895 val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
1896 err = devlink_fmsg_u32_pair_put(fmsg, name: "log_address", value: val);
1897 if (err)
1898 return err;
1899 val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
1900 err = devlink_fmsg_bool_pair_put(fmsg, name: "old_event", value: val);
1901 if (err)
1902 return err;
1903 val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
1904 err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
1905 if (err)
1906 return err;
1907 val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
1908 err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
1909 if (err)
1910 return err;
1911
1912 return 0;
1913}
1914
1915static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
1916 struct devlink_fmsg *fmsg, void *priv_ctx,
1917 struct netlink_ext_ack *extack)
1918{
1919 char *mfde_pl = priv_ctx;
1920 char *val_str;
1921 u8 event_id;
1922 u32 val;
1923 int err;
1924
1925 if (!priv_ctx)
1926 /* User-triggered dumps are not possible */
1927 return -EOPNOTSUPP;
1928
1929 val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
1930 err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
1931 if (err)
1932 return err;
1933 err = devlink_fmsg_arr_pair_nest_start(fmsg, name: "event");
1934 if (err)
1935 return err;
1936
1937 event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
1938 err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
1939 if (err)
1940 return err;
1941 switch (event_id) {
1942 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
1943 val_str = "CR space timeout";
1944 break;
1945 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
1946 val_str = "KVD insertion machine stopped";
1947 break;
1948 case MLXSW_REG_MFDE_EVENT_ID_TEST:
1949 val_str = "Test";
1950 break;
1951 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
1952 val_str = "FW assert";
1953 break;
1954 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
1955 val_str = "Fatal cause";
1956 break;
1957 default:
1958 val_str = NULL;
1959 }
1960 if (val_str) {
1961 err = devlink_fmsg_string_pair_put(fmsg, name: "desc", value: val_str);
1962 if (err)
1963 return err;
1964 }
1965
1966 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1967 if (err)
1968 return err;
1969
1970 err = devlink_fmsg_arr_pair_nest_start(fmsg, name: "severity");
1971 if (err)
1972 return err;
1973
1974 val = mlxsw_reg_mfde_severity_get(mfde_pl);
1975 err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
1976 if (err)
1977 return err;
1978 switch (val) {
1979 case MLXSW_REG_MFDE_SEVERITY_FATL:
1980 val_str = "Fatal";
1981 break;
1982 case MLXSW_REG_MFDE_SEVERITY_NRML:
1983 val_str = "Normal";
1984 break;
1985 case MLXSW_REG_MFDE_SEVERITY_INTR:
1986 val_str = "Debug";
1987 break;
1988 default:
1989 val_str = NULL;
1990 }
1991 if (val_str) {
1992 err = devlink_fmsg_string_pair_put(fmsg, name: "desc", value: val_str);
1993 if (err)
1994 return err;
1995 }
1996
1997 err = devlink_fmsg_arr_pair_nest_end(fmsg);
1998 if (err)
1999 return err;
2000
2001 val = mlxsw_reg_mfde_method_get(mfde_pl);
2002 switch (val) {
2003 case MLXSW_REG_MFDE_METHOD_QUERY:
2004 val_str = "query";
2005 break;
2006 case MLXSW_REG_MFDE_METHOD_WRITE:
2007 val_str = "write";
2008 break;
2009 default:
2010 val_str = NULL;
2011 }
2012 if (val_str) {
2013 err = devlink_fmsg_string_pair_put(fmsg, name: "method", value: val_str);
2014 if (err)
2015 return err;
2016 }
2017
2018 val = mlxsw_reg_mfde_long_process_get(mfde_pl);
2019 err = devlink_fmsg_bool_pair_put(fmsg, name: "long_process", value: val);
2020 if (err)
2021 return err;
2022
2023 val = mlxsw_reg_mfde_command_type_get(mfde_pl);
2024 switch (val) {
2025 case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
2026 val_str = "mad";
2027 break;
2028 case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
2029 val_str = "emad";
2030 break;
2031 case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
2032 val_str = "cmdif";
2033 break;
2034 default:
2035 val_str = NULL;
2036 }
2037 if (val_str) {
2038 err = devlink_fmsg_string_pair_put(fmsg, name: "command_type", value: val_str);
2039 if (err)
2040 return err;
2041 }
2042
2043 val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
2044 err = devlink_fmsg_u32_pair_put(fmsg, name: "reg_attr_id", value: val);
2045 if (err)
2046 return err;
2047
2048 switch (event_id) {
2049 case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
2050 return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
2051 fmsg);
2052 case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
2053 return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
2054 fmsg);
2055 case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
2056 return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
2057 case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
2058 return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
2059 fmsg);
2060 }
2061
2062 return 0;
2063}
2064
2065static int
2066mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
2067 struct netlink_ext_ack *extack)
2068{
2069 struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
2070 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2071 int err;
2072
2073 /* Read the register first to make sure no other bits are changed. */
2074 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl);
2075 if (err)
2076 return err;
2077 mlxsw_reg_mfgd_trigger_test_set(buf: mfgd_pl, val: true);
2078 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl);
2079}
2080
2081static const struct devlink_health_reporter_ops
2082mlxsw_core_health_fw_fatal_ops = {
2083 .name = "fw_fatal",
2084 .dump = mlxsw_core_health_fw_fatal_dump,
2085 .test = mlxsw_core_health_fw_fatal_test,
2086};
2087
2088static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
2089 bool enable)
2090{
2091 char mfgd_pl[MLXSW_REG_MFGD_LEN];
2092 int err;
2093
2094 /* Read the register first to make sure no other bits are changed. */
2095 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl);
2096 if (err)
2097 return err;
2098 mlxsw_reg_mfgd_fatal_event_mode_set(buf: mfgd_pl, val: enable);
2099 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), payload: mfgd_pl);
2100}
2101
2102static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
2103{
2104 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
2105 struct devlink_health_reporter *fw_fatal;
2106 int err;
2107
2108 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2109 return 0;
2110
2111 fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
2112 0, mlxsw_core);
2113 if (IS_ERR(ptr: fw_fatal)) {
2114 dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
2115 return PTR_ERR(ptr: fw_fatal);
2116 }
2117 mlxsw_core->health.fw_fatal = fw_fatal;
2118
2119 err = mlxsw_core_trap_register(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core);
2120 if (err)
2121 goto err_trap_register;
2122
2123 err = mlxsw_core_health_fw_fatal_config(mlxsw_core, enable: true);
2124 if (err)
2125 goto err_fw_fatal_config;
2126
2127 return 0;
2128
2129err_fw_fatal_config:
2130 mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core);
2131err_trap_register:
2132 devl_health_reporter_destroy(reporter: mlxsw_core->health.fw_fatal);
2133 return err;
2134}
2135
2136static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
2137{
2138 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2139 return;
2140
2141 mlxsw_core_health_fw_fatal_config(mlxsw_core, enable: false);
2142 mlxsw_core_trap_unregister(mlxsw_core, listener: &mlxsw_core_health_listener, priv: mlxsw_core);
2143 /* Make sure there is no more event work scheduled */
2144 mlxsw_core_flush_owq();
2145 devl_health_reporter_destroy(reporter: mlxsw_core->health.fw_fatal);
2146}
2147
2148static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
2149{
2150 INIT_LIST_HEAD(list: &mlxsw_core->irq_event_handler_list);
2151 mutex_init(&mlxsw_core->irq_event_handler_lock);
2152}
2153
2154static void mlxsw_core_irq_event_handler_fini(struct mlxsw_core *mlxsw_core)
2155{
2156 mutex_destroy(lock: &mlxsw_core->irq_event_handler_lock);
2157 WARN_ON(!list_empty(&mlxsw_core->irq_event_handler_list));
2158}
2159
2160static int
2161__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2162 const struct mlxsw_bus *mlxsw_bus,
2163 void *bus_priv, bool reload,
2164 struct devlink *devlink,
2165 struct netlink_ext_ack *extack)
2166{
2167 const char *device_kind = mlxsw_bus_info->device_kind;
2168 struct mlxsw_core *mlxsw_core;
2169 struct mlxsw_driver *mlxsw_driver;
2170 size_t alloc_size;
2171 u16 max_lag;
2172 int err;
2173
2174 mlxsw_driver = mlxsw_core_driver_get(kind: device_kind);
2175 if (!mlxsw_driver)
2176 return -EINVAL;
2177
2178 if (!reload) {
2179 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
2180 devlink = devlink_alloc(ops: &mlxsw_devlink_ops, priv_size: alloc_size,
2181 dev: mlxsw_bus_info->dev);
2182 if (!devlink) {
2183 err = -ENOMEM;
2184 goto err_devlink_alloc;
2185 }
2186 devl_lock(devlink);
2187 devl_register(devlink);
2188 }
2189
2190 mlxsw_core = devlink_priv(devlink);
2191 INIT_LIST_HEAD(list: &mlxsw_core->rx_listener_list);
2192 INIT_LIST_HEAD(list: &mlxsw_core->event_listener_list);
2193 mlxsw_core->driver = mlxsw_driver;
2194 mlxsw_core->bus = mlxsw_bus;
2195 mlxsw_core->bus_priv = bus_priv;
2196 mlxsw_core->bus_info = mlxsw_bus_info;
2197 mlxsw_core_irq_event_handler_init(mlxsw_core);
2198
2199 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
2200 &mlxsw_core->res);
2201 if (err)
2202 goto err_bus_init;
2203
2204 if (mlxsw_driver->resources_register && !reload) {
2205 err = mlxsw_driver->resources_register(mlxsw_core);
2206 if (err)
2207 goto err_register_resources;
2208 }
2209
2210 err = mlxsw_ports_init(mlxsw_core, reload);
2211 if (err)
2212 goto err_ports_init;
2213
2214 err = mlxsw_core_max_lag(mlxsw_core, p_max_lag: &max_lag);
2215 if (!err && MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
2216 alloc_size = sizeof(*mlxsw_core->lag.mapping) * max_lag *
2217 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
2218 mlxsw_core->lag.mapping = kzalloc(size: alloc_size, GFP_KERNEL);
2219 if (!mlxsw_core->lag.mapping) {
2220 err = -ENOMEM;
2221 goto err_alloc_lag_mapping;
2222 }
2223 }
2224
2225 err = mlxsw_core_trap_groups_set(mlxsw_core);
2226 if (err)
2227 goto err_trap_groups_set;
2228
2229 err = mlxsw_emad_init(mlxsw_core);
2230 if (err)
2231 goto err_emad_init;
2232
2233 if (!reload) {
2234 err = mlxsw_core_params_register(mlxsw_core);
2235 if (err)
2236 goto err_register_params;
2237 }
2238
2239 err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, req_rev: mlxsw_driver->fw_req_rev,
2240 filename: mlxsw_driver->fw_filename);
2241 if (err)
2242 goto err_fw_rev_validate;
2243
2244 err = mlxsw_linecards_init(mlxsw_core, bus_info: mlxsw_bus_info);
2245 if (err)
2246 goto err_linecards_init;
2247
2248 err = mlxsw_core_health_init(mlxsw_core);
2249 if (err)
2250 goto err_health_init;
2251
2252 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, p_hwmon: &mlxsw_core->hwmon);
2253 if (err)
2254 goto err_hwmon_init;
2255
2256 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
2257 p_thermal: &mlxsw_core->thermal);
2258 if (err)
2259 goto err_thermal_init;
2260
2261 err = mlxsw_env_init(core: mlxsw_core, bus_info: mlxsw_bus_info, p_env: &mlxsw_core->env);
2262 if (err)
2263 goto err_env_init;
2264
2265 if (mlxsw_driver->init) {
2266 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
2267 if (err)
2268 goto err_driver_init;
2269 }
2270
2271 if (!reload)
2272 devl_unlock(devlink);
2273 return 0;
2274
2275err_driver_init:
2276 mlxsw_env_fini(env: mlxsw_core->env);
2277err_env_init:
2278 mlxsw_thermal_fini(thermal: mlxsw_core->thermal);
2279err_thermal_init:
2280 mlxsw_hwmon_fini(mlxsw_hwmon: mlxsw_core->hwmon);
2281err_hwmon_init:
2282 mlxsw_core_health_fini(mlxsw_core);
2283err_health_init:
2284 mlxsw_linecards_fini(mlxsw_core);
2285err_linecards_init:
2286err_fw_rev_validate:
2287 if (!reload)
2288 mlxsw_core_params_unregister(mlxsw_core);
2289err_register_params:
2290 mlxsw_emad_fini(mlxsw_core);
2291err_emad_init:
2292err_trap_groups_set:
2293 kfree(objp: mlxsw_core->lag.mapping);
2294err_alloc_lag_mapping:
2295 mlxsw_ports_fini(mlxsw_core, reload);
2296err_ports_init:
2297 if (!reload)
2298 devl_resources_unregister(devlink);
2299err_register_resources:
2300 mlxsw_bus->fini(bus_priv);
2301err_bus_init:
2302 mlxsw_core_irq_event_handler_fini(mlxsw_core);
2303 if (!reload) {
2304 devl_unregister(devlink);
2305 devl_unlock(devlink);
2306 devlink_free(devlink);
2307 }
2308err_devlink_alloc:
2309 return err;
2310}
2311
2312int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
2313 const struct mlxsw_bus *mlxsw_bus,
2314 void *bus_priv, bool reload,
2315 struct devlink *devlink,
2316 struct netlink_ext_ack *extack)
2317{
2318 bool called_again = false;
2319 int err;
2320
2321again:
2322 err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
2323 bus_priv, reload,
2324 devlink, extack);
2325 /* -EAGAIN is returned in case the FW was updated. FW needs
2326 * a reset, so lets try to call __mlxsw_core_bus_device_register()
2327 * again.
2328 */
2329 if (err == -EAGAIN && !called_again) {
2330 called_again = true;
2331 goto again;
2332 }
2333
2334 return err;
2335}
2336EXPORT_SYMBOL(mlxsw_core_bus_device_register);
2337
2338void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
2339 bool reload)
2340{
2341 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
2342
2343 if (!reload)
2344 devl_lock(devlink);
2345
2346 if (devlink_is_reload_failed(devlink)) {
2347 if (!reload)
2348 /* Only the parts that were not de-initialized in the
2349 * failed reload attempt need to be de-initialized.
2350 */
2351 goto reload_fail_deinit;
2352 else
2353 return;
2354 }
2355
2356 if (mlxsw_core->driver->fini)
2357 mlxsw_core->driver->fini(mlxsw_core);
2358 mlxsw_env_fini(env: mlxsw_core->env);
2359 mlxsw_thermal_fini(thermal: mlxsw_core->thermal);
2360 mlxsw_hwmon_fini(mlxsw_hwmon: mlxsw_core->hwmon);
2361 mlxsw_core_health_fini(mlxsw_core);
2362 mlxsw_linecards_fini(mlxsw_core);
2363 if (!reload)
2364 mlxsw_core_params_unregister(mlxsw_core);
2365 mlxsw_emad_fini(mlxsw_core);
2366 kfree(objp: mlxsw_core->lag.mapping);
2367 mlxsw_ports_fini(mlxsw_core, reload);
2368 if (!reload)
2369 devl_resources_unregister(devlink);
2370 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
2371 mlxsw_core_irq_event_handler_fini(mlxsw_core);
2372 if (!reload) {
2373 devl_unregister(devlink);
2374 devl_unlock(devlink);
2375 devlink_free(devlink);
2376 }
2377
2378 return;
2379
2380reload_fail_deinit:
2381 mlxsw_core_params_unregister(mlxsw_core);
2382 devl_resources_unregister(devlink);
2383 devl_unregister(devlink);
2384 devl_unlock(devlink);
2385 devlink_free(devlink);
2386}
2387EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
2388
2389bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
2390 const struct mlxsw_tx_info *tx_info)
2391{
2392 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
2393 tx_info);
2394}
2395EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
2396
2397int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
2398 const struct mlxsw_tx_info *tx_info)
2399{
2400 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
2401 tx_info);
2402}
2403EXPORT_SYMBOL(mlxsw_core_skb_transmit);
2404
2405void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
2406 struct sk_buff *skb, u16 local_port)
2407{
2408 if (mlxsw_core->driver->ptp_transmitted)
2409 mlxsw_core->driver->ptp_transmitted(mlxsw_core, skb,
2410 local_port);
2411}
2412EXPORT_SYMBOL(mlxsw_core_ptp_transmitted);
2413
2414static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
2415 const struct mlxsw_rx_listener *rxl_b)
2416{
2417 return (rxl_a->func == rxl_b->func &&
2418 rxl_a->local_port == rxl_b->local_port &&
2419 rxl_a->trap_id == rxl_b->trap_id &&
2420 rxl_a->mirror_reason == rxl_b->mirror_reason);
2421}
2422
2423static struct mlxsw_rx_listener_item *
2424__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
2425 const struct mlxsw_rx_listener *rxl)
2426{
2427 struct mlxsw_rx_listener_item *rxl_item;
2428
2429 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
2430 if (__is_rx_listener_equal(rxl_a: &rxl_item->rxl, rxl_b: rxl))
2431 return rxl_item;
2432 }
2433 return NULL;
2434}
2435
2436int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
2437 const struct mlxsw_rx_listener *rxl,
2438 void *priv, bool enabled)
2439{
2440 struct mlxsw_rx_listener_item *rxl_item;
2441
2442 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2443 if (rxl_item)
2444 return -EEXIST;
2445 rxl_item = kmalloc(size: sizeof(*rxl_item), GFP_KERNEL);
2446 if (!rxl_item)
2447 return -ENOMEM;
2448 rxl_item->rxl = *rxl;
2449 rxl_item->priv = priv;
2450 rxl_item->enabled = enabled;
2451
2452 list_add_rcu(new: &rxl_item->list, head: &mlxsw_core->rx_listener_list);
2453 return 0;
2454}
2455EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
2456
2457void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
2458 const struct mlxsw_rx_listener *rxl)
2459{
2460 struct mlxsw_rx_listener_item *rxl_item;
2461
2462 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2463 if (!rxl_item)
2464 return;
2465 list_del_rcu(entry: &rxl_item->list);
2466 synchronize_rcu();
2467 kfree(objp: rxl_item);
2468}
2469EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
2470
2471static void
2472mlxsw_core_rx_listener_state_set(struct mlxsw_core *mlxsw_core,
2473 const struct mlxsw_rx_listener *rxl,
2474 bool enabled)
2475{
2476 struct mlxsw_rx_listener_item *rxl_item;
2477
2478 rxl_item = __find_rx_listener_item(mlxsw_core, rxl);
2479 if (WARN_ON(!rxl_item))
2480 return;
2481 rxl_item->enabled = enabled;
2482}
2483
2484static void mlxsw_core_event_listener_func(struct sk_buff *skb, u16 local_port,
2485 void *priv)
2486{
2487 struct mlxsw_event_listener_item *event_listener_item = priv;
2488 struct mlxsw_core *mlxsw_core;
2489 struct mlxsw_reg_info reg;
2490 char *payload;
2491 char *reg_tlv;
2492 char *op_tlv;
2493
2494 mlxsw_core = event_listener_item->mlxsw_core;
2495 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
2496 skb->data, skb->len);
2497
2498 mlxsw_emad_tlv_parse(skb);
2499 op_tlv = mlxsw_emad_op_tlv(skb);
2500 reg_tlv = mlxsw_emad_reg_tlv(skb);
2501
2502 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
2503 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
2504 payload = mlxsw_emad_reg_payload(reg_tlv);
2505 event_listener_item->el.func(&reg, payload, event_listener_item->priv);
2506 dev_kfree_skb(skb);
2507}
2508
2509static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
2510 const struct mlxsw_event_listener *el_b)
2511{
2512 return (el_a->func == el_b->func &&
2513 el_a->trap_id == el_b->trap_id);
2514}
2515
2516static struct mlxsw_event_listener_item *
2517__find_event_listener_item(struct mlxsw_core *mlxsw_core,
2518 const struct mlxsw_event_listener *el)
2519{
2520 struct mlxsw_event_listener_item *el_item;
2521
2522 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
2523 if (__is_event_listener_equal(el_a: &el_item->el, el_b: el))
2524 return el_item;
2525 }
2526 return NULL;
2527}
2528
2529int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
2530 const struct mlxsw_event_listener *el,
2531 void *priv)
2532{
2533 int err;
2534 struct mlxsw_event_listener_item *el_item;
2535 const struct mlxsw_rx_listener rxl = {
2536 .func = mlxsw_core_event_listener_func,
2537 .local_port = MLXSW_PORT_DONT_CARE,
2538 .trap_id = el->trap_id,
2539 };
2540
2541 el_item = __find_event_listener_item(mlxsw_core, el);
2542 if (el_item)
2543 return -EEXIST;
2544 el_item = kmalloc(size: sizeof(*el_item), GFP_KERNEL);
2545 if (!el_item)
2546 return -ENOMEM;
2547 el_item->mlxsw_core = mlxsw_core;
2548 el_item->el = *el;
2549 el_item->priv = priv;
2550
2551 err = mlxsw_core_rx_listener_register(mlxsw_core, rxl: &rxl, priv: el_item, enabled: true);
2552 if (err)
2553 goto err_rx_listener_register;
2554
2555 /* No reason to save item if we did not manage to register an RX
2556 * listener for it.
2557 */
2558 list_add_rcu(new: &el_item->list, head: &mlxsw_core->event_listener_list);
2559
2560 return 0;
2561
2562err_rx_listener_register:
2563 kfree(objp: el_item);
2564 return err;
2565}
2566EXPORT_SYMBOL(mlxsw_core_event_listener_register);
2567
2568void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
2569 const struct mlxsw_event_listener *el)
2570{
2571 struct mlxsw_event_listener_item *el_item;
2572 const struct mlxsw_rx_listener rxl = {
2573 .func = mlxsw_core_event_listener_func,
2574 .local_port = MLXSW_PORT_DONT_CARE,
2575 .trap_id = el->trap_id,
2576 };
2577
2578 el_item = __find_event_listener_item(mlxsw_core, el);
2579 if (!el_item)
2580 return;
2581 mlxsw_core_rx_listener_unregister(mlxsw_core, rxl: &rxl);
2582 list_del(entry: &el_item->list);
2583 kfree(objp: el_item);
2584}
2585EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
2586
2587static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
2588 const struct mlxsw_listener *listener,
2589 void *priv, bool enabled)
2590{
2591 if (listener->is_event) {
2592 WARN_ON(!enabled);
2593 return mlxsw_core_event_listener_register(mlxsw_core,
2594 el: &listener->event_listener,
2595 priv);
2596 } else {
2597 return mlxsw_core_rx_listener_register(mlxsw_core,
2598 rxl: &listener->rx_listener,
2599 priv, enabled);
2600 }
2601}
2602
2603static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
2604 const struct mlxsw_listener *listener,
2605 void *priv)
2606{
2607 if (listener->is_event)
2608 mlxsw_core_event_listener_unregister(mlxsw_core,
2609 el: &listener->event_listener);
2610 else
2611 mlxsw_core_rx_listener_unregister(mlxsw_core,
2612 rxl: &listener->rx_listener);
2613}
2614
2615int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
2616 const struct mlxsw_listener *listener, void *priv)
2617{
2618 enum mlxsw_reg_htgt_trap_group trap_group;
2619 enum mlxsw_reg_hpkt_action action;
2620 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2621 int err;
2622
2623 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2624 return 0;
2625
2626 err = mlxsw_core_listener_register(mlxsw_core, listener, priv,
2627 enabled: listener->enabled_on_register);
2628 if (err)
2629 return err;
2630
2631 action = listener->enabled_on_register ? listener->en_action :
2632 listener->dis_action;
2633 trap_group = listener->enabled_on_register ? listener->en_trap_group :
2634 listener->dis_trap_group;
2635 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2636 trap_group, listener->is_ctrl);
2637 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl);
2638 if (err)
2639 goto err_trap_set;
2640
2641 return 0;
2642
2643err_trap_set:
2644 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2645 return err;
2646}
2647EXPORT_SYMBOL(mlxsw_core_trap_register);
2648
2649void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
2650 const struct mlxsw_listener *listener,
2651 void *priv)
2652{
2653 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2654
2655 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
2656 return;
2657
2658 if (!listener->is_event) {
2659 mlxsw_reg_hpkt_pack(hpkt_pl, listener->dis_action,
2660 listener->trap_id, listener->dis_trap_group,
2661 listener->is_ctrl);
2662 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl);
2663 }
2664
2665 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
2666}
2667EXPORT_SYMBOL(mlxsw_core_trap_unregister);
2668
2669int mlxsw_core_traps_register(struct mlxsw_core *mlxsw_core,
2670 const struct mlxsw_listener *listeners,
2671 size_t listeners_count, void *priv)
2672{
2673 int i, err;
2674
2675 for (i = 0; i < listeners_count; i++) {
2676 err = mlxsw_core_trap_register(mlxsw_core,
2677 listener: &listeners[i],
2678 priv);
2679 if (err)
2680 goto err_listener_register;
2681 }
2682 return 0;
2683
2684err_listener_register:
2685 for (i--; i >= 0; i--) {
2686 mlxsw_core_trap_unregister(mlxsw_core,
2687 listener: &listeners[i],
2688 priv);
2689 }
2690 return err;
2691}
2692EXPORT_SYMBOL(mlxsw_core_traps_register);
2693
2694void mlxsw_core_traps_unregister(struct mlxsw_core *mlxsw_core,
2695 const struct mlxsw_listener *listeners,
2696 size_t listeners_count, void *priv)
2697{
2698 int i;
2699
2700 for (i = 0; i < listeners_count; i++) {
2701 mlxsw_core_trap_unregister(mlxsw_core,
2702 listener: &listeners[i],
2703 priv);
2704 }
2705}
2706EXPORT_SYMBOL(mlxsw_core_traps_unregister);
2707
2708int mlxsw_core_trap_state_set(struct mlxsw_core *mlxsw_core,
2709 const struct mlxsw_listener *listener,
2710 bool enabled)
2711{
2712 enum mlxsw_reg_htgt_trap_group trap_group;
2713 enum mlxsw_reg_hpkt_action action;
2714 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2715 int err;
2716
2717 /* Not supported for event listener */
2718 if (WARN_ON(listener->is_event))
2719 return -EINVAL;
2720
2721 action = enabled ? listener->en_action : listener->dis_action;
2722 trap_group = enabled ? listener->en_trap_group :
2723 listener->dis_trap_group;
2724 mlxsw_reg_hpkt_pack(hpkt_pl, action, listener->trap_id,
2725 trap_group, listener->is_ctrl);
2726 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), payload: hpkt_pl);
2727 if (err)
2728 return err;
2729
2730 mlxsw_core_rx_listener_state_set(mlxsw_core, rxl: &listener->rx_listener,
2731 enabled);
2732 return 0;
2733}
2734EXPORT_SYMBOL(mlxsw_core_trap_state_set);
2735
2736static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
2737{
2738 return atomic64_inc_return(&mlxsw_core->emad.tid);
2739}
2740
2741static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
2742 const struct mlxsw_reg_info *reg,
2743 char *payload,
2744 enum mlxsw_core_reg_access_type type,
2745 struct list_head *bulk_list,
2746 mlxsw_reg_trans_cb_t *cb,
2747 unsigned long cb_priv)
2748{
2749 u64 tid = mlxsw_core_tid_get(mlxsw_core);
2750 struct mlxsw_reg_trans *trans;
2751 int err;
2752
2753 trans = kzalloc(size: sizeof(*trans), GFP_KERNEL);
2754 if (!trans)
2755 return -ENOMEM;
2756
2757 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
2758 bulk_list, cb, cb_priv, tid);
2759 if (err) {
2760 kfree_rcu(trans, rcu);
2761 return err;
2762 }
2763 return 0;
2764}
2765
2766int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
2767 const struct mlxsw_reg_info *reg, char *payload,
2768 struct list_head *bulk_list,
2769 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2770{
2771 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2772 type: MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
2773 bulk_list, cb, cb_priv);
2774}
2775EXPORT_SYMBOL(mlxsw_reg_trans_query);
2776
2777int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
2778 const struct mlxsw_reg_info *reg, char *payload,
2779 struct list_head *bulk_list,
2780 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
2781{
2782 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
2783 type: MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
2784 bulk_list, cb, cb_priv);
2785}
2786EXPORT_SYMBOL(mlxsw_reg_trans_write);
2787
2788#define MLXSW_REG_TRANS_ERR_STRING_SIZE 256
2789
2790static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
2791{
2792 char err_string[MLXSW_REG_TRANS_ERR_STRING_SIZE];
2793 struct mlxsw_core *mlxsw_core = trans->core;
2794 int err;
2795
2796 wait_for_completion(&trans->completion);
2797 cancel_delayed_work_sync(dwork: &trans->timeout_dw);
2798 err = trans->err;
2799
2800 if (trans->retries)
2801 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
2802 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
2803 if (err) {
2804 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
2805 trans->tid, trans->reg->id,
2806 mlxsw_reg_id_str(trans->reg->id),
2807 mlxsw_core_reg_access_type_str(trans->type),
2808 trans->emad_status,
2809 mlxsw_emad_op_tlv_status_str(trans->emad_status));
2810
2811 snprintf(err_string, MLXSW_REG_TRANS_ERR_STRING_SIZE,
2812 "(tid=%llx,reg_id=%x(%s)) %s (%s)\n", trans->tid,
2813 trans->reg->id, mlxsw_reg_id_str(trans->reg->id),
2814 mlxsw_emad_op_tlv_status_str(trans->emad_status),
2815 trans->emad_err_string ? trans->emad_err_string : "");
2816
2817 trace_devlink_hwerr(devlink: priv_to_devlink(priv: mlxsw_core),
2818 err: trans->emad_status, msg: err_string);
2819
2820 kfree(objp: trans->emad_err_string);
2821 }
2822
2823 list_del(entry: &trans->bulk_list);
2824 kfree_rcu(trans, rcu);
2825 return err;
2826}
2827
2828int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
2829{
2830 struct mlxsw_reg_trans *trans;
2831 struct mlxsw_reg_trans *tmp;
2832 int sum_err = 0;
2833 int err;
2834
2835 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
2836 err = mlxsw_reg_trans_wait(trans);
2837 if (err && sum_err == 0)
2838 sum_err = err; /* first error to be returned */
2839 }
2840 return sum_err;
2841}
2842EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
2843
2844struct mlxsw_core_irq_event_handler_item {
2845 struct list_head list;
2846 void (*cb)(struct mlxsw_core *mlxsw_core);
2847};
2848
2849int mlxsw_core_irq_event_handler_register(struct mlxsw_core *mlxsw_core,
2850 mlxsw_irq_event_cb_t cb)
2851{
2852 struct mlxsw_core_irq_event_handler_item *item;
2853
2854 item = kzalloc(size: sizeof(*item), GFP_KERNEL);
2855 if (!item)
2856 return -ENOMEM;
2857 item->cb = cb;
2858 mutex_lock(lock: &mlxsw_core->irq_event_handler_lock);
2859 list_add_tail(new: &item->list, head: &mlxsw_core->irq_event_handler_list);
2860 mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock);
2861 return 0;
2862}
2863EXPORT_SYMBOL(mlxsw_core_irq_event_handler_register);
2864
2865void mlxsw_core_irq_event_handler_unregister(struct mlxsw_core *mlxsw_core,
2866 mlxsw_irq_event_cb_t cb)
2867{
2868 struct mlxsw_core_irq_event_handler_item *item, *tmp;
2869
2870 mutex_lock(lock: &mlxsw_core->irq_event_handler_lock);
2871 list_for_each_entry_safe(item, tmp,
2872 &mlxsw_core->irq_event_handler_list, list) {
2873 if (item->cb == cb) {
2874 list_del(entry: &item->list);
2875 kfree(objp: item);
2876 }
2877 }
2878 mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock);
2879}
2880EXPORT_SYMBOL(mlxsw_core_irq_event_handler_unregister);
2881
2882void mlxsw_core_irq_event_handlers_call(struct mlxsw_core *mlxsw_core)
2883{
2884 struct mlxsw_core_irq_event_handler_item *item;
2885
2886 mutex_lock(lock: &mlxsw_core->irq_event_handler_lock);
2887 list_for_each_entry(item, &mlxsw_core->irq_event_handler_list, list) {
2888 if (item->cb)
2889 item->cb(mlxsw_core);
2890 }
2891 mutex_unlock(lock: &mlxsw_core->irq_event_handler_lock);
2892}
2893EXPORT_SYMBOL(mlxsw_core_irq_event_handlers_call);
2894
2895static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
2896 const struct mlxsw_reg_info *reg,
2897 char *payload,
2898 enum mlxsw_core_reg_access_type type)
2899{
2900 enum mlxsw_emad_op_tlv_status status;
2901 int err, n_retry;
2902 bool reset_ok;
2903 char *in_mbox, *out_mbox, *tmp;
2904
2905 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
2906 reg->id, mlxsw_reg_id_str(reg->id),
2907 mlxsw_core_reg_access_type_str(type));
2908
2909 in_mbox = mlxsw_cmd_mbox_alloc();
2910 if (!in_mbox)
2911 return -ENOMEM;
2912
2913 out_mbox = mlxsw_cmd_mbox_alloc();
2914 if (!out_mbox) {
2915 err = -ENOMEM;
2916 goto free_in_mbox;
2917 }
2918
2919 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
2920 mlxsw_core_tid_get(mlxsw_core));
2921 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
2922 mlxsw_emad_pack_reg_tlv(reg_tlv: tmp, reg, payload);
2923
2924 /* There is a special treatment needed for MRSR (reset) register.
2925 * The command interface will return error after the command
2926 * is executed, so tell the lower layer to expect it
2927 * and cope accordingly.
2928 */
2929 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
2930
2931 n_retry = 0;
2932retry:
2933 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
2934 if (!err) {
2935 err = mlxsw_emad_process_status(op_tlv: out_mbox, p_status: &status);
2936 if (err) {
2937 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
2938 goto retry;
2939 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
2940 status, mlxsw_emad_op_tlv_status_str(status));
2941 }
2942 }
2943
2944 if (!err)
2945 memcpy(to: payload, from: mlxsw_emad_reg_payload_cmd(mbox: out_mbox),
2946 len: reg->len);
2947
2948 mlxsw_cmd_mbox_free(mbox: out_mbox);
2949free_in_mbox:
2950 mlxsw_cmd_mbox_free(mbox: in_mbox);
2951 if (err)
2952 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
2953 reg->id, mlxsw_reg_id_str(reg->id),
2954 mlxsw_core_reg_access_type_str(type));
2955 return err;
2956}
2957
2958static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
2959 char *payload, size_t payload_len,
2960 unsigned long cb_priv)
2961{
2962 char *orig_payload = (char *) cb_priv;
2963
2964 memcpy(to: orig_payload, from: payload, len: payload_len);
2965}
2966
2967static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
2968 const struct mlxsw_reg_info *reg,
2969 char *payload,
2970 enum mlxsw_core_reg_access_type type)
2971{
2972 LIST_HEAD(bulk_list);
2973 int err;
2974
2975 /* During initialization EMAD interface is not available to us,
2976 * so we default to command interface. We switch to EMAD interface
2977 * after setting the appropriate traps.
2978 */
2979 if (!mlxsw_core->emad.use_emad)
2980 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
2981 payload, type);
2982
2983 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
2984 payload, type, bulk_list: &bulk_list,
2985 cb: mlxsw_core_reg_access_cb,
2986 cb_priv: (unsigned long) payload);
2987 if (err)
2988 return err;
2989 return mlxsw_reg_trans_bulk_wait(bulk_list: &bulk_list);
2990}
2991
2992int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
2993 const struct mlxsw_reg_info *reg, char *payload)
2994{
2995 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
2996 type: MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
2997}
2998EXPORT_SYMBOL(mlxsw_reg_query);
2999
3000int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
3001 const struct mlxsw_reg_info *reg, char *payload)
3002{
3003 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
3004 type: MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
3005}
3006EXPORT_SYMBOL(mlxsw_reg_write);
3007
3008void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
3009 struct mlxsw_rx_info *rx_info)
3010{
3011 struct mlxsw_rx_listener_item *rxl_item;
3012 const struct mlxsw_rx_listener *rxl;
3013 u16 local_port;
3014 bool found = false;
3015
3016 if (rx_info->is_lag) {
3017 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
3018 __func__, rx_info->u.lag_id,
3019 rx_info->trap_id);
3020 /* Upper layer does not care if the skb came from LAG or not,
3021 * so just get the local_port for the lag port and push it up.
3022 */
3023 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
3024 rx_info->u.lag_id,
3025 rx_info->lag_port_index);
3026 } else {
3027 local_port = rx_info->u.sys_port;
3028 }
3029
3030 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
3031 __func__, local_port, rx_info->trap_id);
3032
3033 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
3034 (local_port >= mlxsw_core->max_ports))
3035 goto drop;
3036
3037 rcu_read_lock();
3038 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
3039 rxl = &rxl_item->rxl;
3040 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
3041 rxl->local_port == local_port) &&
3042 rxl->trap_id == rx_info->trap_id &&
3043 rxl->mirror_reason == rx_info->mirror_reason) {
3044 if (rxl_item->enabled)
3045 found = true;
3046 break;
3047 }
3048 }
3049 if (!found) {
3050 rcu_read_unlock();
3051 goto drop;
3052 }
3053
3054 rxl->func(skb, local_port, rxl_item->priv);
3055 rcu_read_unlock();
3056 return;
3057
3058drop:
3059 dev_kfree_skb(skb);
3060}
3061EXPORT_SYMBOL(mlxsw_core_skb_receive);
3062
3063static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
3064 u16 lag_id, u8 port_index)
3065{
3066 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
3067 port_index;
3068}
3069
3070void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
3071 u16 lag_id, u8 port_index, u16 local_port)
3072{
3073 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
3074 lag_id, port_index);
3075
3076 mlxsw_core->lag.mapping[index] = local_port;
3077}
3078EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
3079
3080u16 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
3081 u16 lag_id, u8 port_index)
3082{
3083 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
3084 lag_id, port_index);
3085
3086 return mlxsw_core->lag.mapping[index];
3087}
3088EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
3089
3090void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
3091 u16 lag_id, u16 local_port)
3092{
3093 int i;
3094
3095 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
3096 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
3097 lag_id, i);
3098
3099 if (mlxsw_core->lag.mapping[index] == local_port)
3100 mlxsw_core->lag.mapping[index] = 0;
3101 }
3102}
3103EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
3104
3105bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
3106 enum mlxsw_res_id res_id)
3107{
3108 return mlxsw_res_valid(res: &mlxsw_core->res, res_id);
3109}
3110EXPORT_SYMBOL(mlxsw_core_res_valid);
3111
3112u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
3113 enum mlxsw_res_id res_id)
3114{
3115 return mlxsw_res_get(&mlxsw_core->res, res_id);
3116}
3117EXPORT_SYMBOL(mlxsw_core_res_get);
3118
3119static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
3120 enum devlink_port_flavour flavour,
3121 u8 slot_index, u32 port_number, bool split,
3122 u32 split_port_subnumber,
3123 bool splittable, u32 lanes,
3124 const unsigned char *switch_id,
3125 unsigned char switch_id_len)
3126{
3127 struct devlink *devlink = priv_to_devlink(priv: mlxsw_core);
3128 struct mlxsw_core_port *mlxsw_core_port =
3129 &mlxsw_core->ports[local_port];
3130 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3131 struct devlink_port_attrs attrs = {};
3132 int err;
3133
3134 attrs.split = split;
3135 attrs.lanes = lanes;
3136 attrs.splittable = splittable;
3137 attrs.flavour = flavour;
3138 attrs.phys.port_number = port_number;
3139 attrs.phys.split_subport_number = split_port_subnumber;
3140 memcpy(to: attrs.switch_id.id, from: switch_id, len: switch_id_len);
3141 attrs.switch_id.id_len = switch_id_len;
3142 mlxsw_core_port->local_port = local_port;
3143 devlink_port_attrs_set(devlink_port, devlink_port_attrs: &attrs);
3144 if (slot_index) {
3145 struct mlxsw_linecard *linecard;
3146
3147 linecard = mlxsw_linecard_get(mlxsw_core->linecards,
3148 slot_index);
3149 mlxsw_core_port->linecard = linecard;
3150 devlink_port_linecard_set(devlink_port,
3151 linecard: linecard->devlink_linecard);
3152 }
3153 err = devl_port_register(devlink, devlink_port, port_index: local_port);
3154 if (err)
3155 memset(s: mlxsw_core_port, c: 0, n: sizeof(*mlxsw_core_port));
3156 return err;
3157}
3158
3159static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3160{
3161 struct mlxsw_core_port *mlxsw_core_port =
3162 &mlxsw_core->ports[local_port];
3163 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3164
3165 devl_port_unregister(devlink_port);
3166 memset(s: mlxsw_core_port, c: 0, n: sizeof(*mlxsw_core_port));
3167}
3168
3169int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
3170 u8 slot_index, u32 port_number, bool split,
3171 u32 split_port_subnumber,
3172 bool splittable, u32 lanes,
3173 const unsigned char *switch_id,
3174 unsigned char switch_id_len)
3175{
3176 int err;
3177
3178 err = __mlxsw_core_port_init(mlxsw_core, local_port,
3179 DEVLINK_PORT_FLAVOUR_PHYSICAL, slot_index,
3180 port_number, split, split_port_subnumber,
3181 splittable, lanes,
3182 switch_id, switch_id_len);
3183 if (err)
3184 return err;
3185
3186 atomic_inc(v: &mlxsw_core->active_ports_count);
3187 return 0;
3188}
3189EXPORT_SYMBOL(mlxsw_core_port_init);
3190
3191void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u16 local_port)
3192{
3193 atomic_dec(v: &mlxsw_core->active_ports_count);
3194
3195 __mlxsw_core_port_fini(mlxsw_core, local_port);
3196}
3197EXPORT_SYMBOL(mlxsw_core_port_fini);
3198
3199int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
3200 void *port_driver_priv,
3201 const unsigned char *switch_id,
3202 unsigned char switch_id_len)
3203{
3204 struct mlxsw_core_port *mlxsw_core_port =
3205 &mlxsw_core->ports[MLXSW_PORT_CPU_PORT];
3206 int err;
3207
3208 err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
3209 DEVLINK_PORT_FLAVOUR_CPU,
3210 0, 0, false, 0, false, 0,
3211 switch_id, switch_id_len);
3212 if (err)
3213 return err;
3214
3215 mlxsw_core_port->port_driver_priv = port_driver_priv;
3216 return 0;
3217}
3218EXPORT_SYMBOL(mlxsw_core_cpu_port_init);
3219
3220void mlxsw_core_cpu_port_fini(struct mlxsw_core *mlxsw_core)
3221{
3222 __mlxsw_core_port_fini(mlxsw_core, MLXSW_PORT_CPU_PORT);
3223}
3224EXPORT_SYMBOL(mlxsw_core_cpu_port_fini);
3225
3226void mlxsw_core_port_netdev_link(struct mlxsw_core *mlxsw_core, u16 local_port,
3227 void *port_driver_priv, struct net_device *dev)
3228{
3229 struct mlxsw_core_port *mlxsw_core_port =
3230 &mlxsw_core->ports[local_port];
3231 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3232
3233 mlxsw_core_port->port_driver_priv = port_driver_priv;
3234 SET_NETDEV_DEVLINK_PORT(dev, devlink_port);
3235}
3236EXPORT_SYMBOL(mlxsw_core_port_netdev_link);
3237
3238struct devlink_port *
3239mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
3240 u16 local_port)
3241{
3242 struct mlxsw_core_port *mlxsw_core_port =
3243 &mlxsw_core->ports[local_port];
3244 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
3245
3246 return devlink_port;
3247}
3248EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
3249
3250struct mlxsw_linecard *
3251mlxsw_core_port_linecard_get(struct mlxsw_core *mlxsw_core,
3252 u16 local_port)
3253{
3254 struct mlxsw_core_port *mlxsw_core_port =
3255 &mlxsw_core->ports[local_port];
3256
3257 return mlxsw_core_port->linecard;
3258}
3259
3260void mlxsw_core_ports_remove_selected(struct mlxsw_core *mlxsw_core,
3261 bool (*selector)(void *priv, u16 local_port),
3262 void *priv)
3263{
3264 if (WARN_ON_ONCE(!mlxsw_core->driver->ports_remove_selected))
3265 return;
3266 mlxsw_core->driver->ports_remove_selected(mlxsw_core, selector, priv);
3267}
3268
3269struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
3270{
3271 return mlxsw_core->env;
3272}
3273
3274static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
3275 const char *buf, size_t size)
3276{
3277 __be32 *m = (__be32 *) buf;
3278 int i;
3279 int count = size / sizeof(__be32);
3280
3281 for (i = count - 1; i >= 0; i--)
3282 if (m[i])
3283 break;
3284 i++;
3285 count = i ? i : 1;
3286 for (i = 0; i < count; i += 4)
3287 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
3288 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
3289 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
3290}
3291
3292int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
3293 u32 in_mod, bool out_mbox_direct, bool reset_ok,
3294 char *in_mbox, size_t in_mbox_size,
3295 char *out_mbox, size_t out_mbox_size)
3296{
3297 u8 status;
3298 int err;
3299
3300 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
3301 if (!mlxsw_core->bus->cmd_exec)
3302 return -EOPNOTSUPP;
3303
3304 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3305 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
3306 if (in_mbox) {
3307 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
3308 mlxsw_core_buf_dump_dbg(mlxsw_core, buf: in_mbox, size: in_mbox_size);
3309 }
3310
3311 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
3312 opcode_mod, in_mod, out_mbox_direct,
3313 in_mbox, in_mbox_size,
3314 out_mbox, out_mbox_size, &status);
3315
3316 if (!err && out_mbox) {
3317 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
3318 mlxsw_core_buf_dump_dbg(mlxsw_core, buf: out_mbox, size: out_mbox_size);
3319 }
3320
3321 if (reset_ok && err == -EIO &&
3322 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
3323 err = 0;
3324 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
3325 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
3326 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3327 in_mod, status, mlxsw_cmd_status_str(status));
3328 } else if (err == -ETIMEDOUT) {
3329 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
3330 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
3331 in_mod);
3332 }
3333
3334 return err;
3335}
3336EXPORT_SYMBOL(mlxsw_cmd_exec);
3337
3338int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
3339{
3340 return queue_delayed_work(wq: mlxsw_wq, dwork, delay);
3341}
3342EXPORT_SYMBOL(mlxsw_core_schedule_dw);
3343
3344bool mlxsw_core_schedule_work(struct work_struct *work)
3345{
3346 return queue_work(wq: mlxsw_owq, work);
3347}
3348EXPORT_SYMBOL(mlxsw_core_schedule_work);
3349
3350void mlxsw_core_flush_owq(void)
3351{
3352 flush_workqueue(mlxsw_owq);
3353}
3354EXPORT_SYMBOL(mlxsw_core_flush_owq);
3355
3356int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3357 const struct mlxsw_config_profile *profile,
3358 u64 *p_single_size, u64 *p_double_size,
3359 u64 *p_linear_size)
3360{
3361 struct mlxsw_driver *driver = mlxsw_core->driver;
3362
3363 if (!driver->kvd_sizes_get)
3364 return -EINVAL;
3365
3366 return driver->kvd_sizes_get(mlxsw_core, profile,
3367 p_single_size, p_double_size,
3368 p_linear_size);
3369}
3370EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
3371
3372int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
3373 struct mlxsw_res *res)
3374{
3375 int index, i;
3376 u64 data;
3377 u16 id;
3378 int err;
3379
3380 mlxsw_cmd_mbox_zero(mbox);
3381
3382 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
3383 index++) {
3384 err = mlxsw_cmd_query_resources(mlxsw_core, out_mbox: mbox, index);
3385 if (err)
3386 return err;
3387
3388 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
3389 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
3390 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
3391
3392 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
3393 return 0;
3394
3395 mlxsw_res_parse(res, id, data);
3396 }
3397 }
3398
3399 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
3400 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
3401 */
3402 return -EIO;
3403}
3404EXPORT_SYMBOL(mlxsw_core_resources_query);
3405
3406u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core)
3407{
3408 return mlxsw_core->bus->read_frc_h(mlxsw_core->bus_priv);
3409}
3410EXPORT_SYMBOL(mlxsw_core_read_frc_h);
3411
3412u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core)
3413{
3414 return mlxsw_core->bus->read_frc_l(mlxsw_core->bus_priv);
3415}
3416EXPORT_SYMBOL(mlxsw_core_read_frc_l);
3417
3418u32 mlxsw_core_read_utc_sec(struct mlxsw_core *mlxsw_core)
3419{
3420 return mlxsw_core->bus->read_utc_sec(mlxsw_core->bus_priv);
3421}
3422EXPORT_SYMBOL(mlxsw_core_read_utc_sec);
3423
3424u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core)
3425{
3426 return mlxsw_core->bus->read_utc_nsec(mlxsw_core->bus_priv);
3427}
3428EXPORT_SYMBOL(mlxsw_core_read_utc_nsec);
3429
3430bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
3431{
3432 return mlxsw_core->driver->sdq_supports_cqe_v2;
3433}
3434EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
3435
3436static int __init mlxsw_core_module_init(void)
3437{
3438 int err;
3439
3440 err = mlxsw_linecard_driver_register();
3441 if (err)
3442 return err;
3443
3444 mlxsw_wq = alloc_workqueue(fmt: mlxsw_core_driver_name, flags: 0, max_active: 0);
3445 if (!mlxsw_wq) {
3446 err = -ENOMEM;
3447 goto err_alloc_workqueue;
3448 }
3449 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
3450 mlxsw_core_driver_name);
3451 if (!mlxsw_owq) {
3452 err = -ENOMEM;
3453 goto err_alloc_ordered_workqueue;
3454 }
3455 return 0;
3456
3457err_alloc_ordered_workqueue:
3458 destroy_workqueue(wq: mlxsw_wq);
3459err_alloc_workqueue:
3460 mlxsw_linecard_driver_unregister();
3461 return err;
3462}
3463
3464static void __exit mlxsw_core_module_exit(void)
3465{
3466 destroy_workqueue(wq: mlxsw_owq);
3467 destroy_workqueue(wq: mlxsw_wq);
3468 mlxsw_linecard_driver_unregister();
3469}
3470
3471module_init(mlxsw_core_module_init);
3472module_exit(mlxsw_core_module_exit);
3473
3474MODULE_LICENSE("Dual BSD/GPL");
3475MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3476MODULE_DESCRIPTION("Mellanox switch device core driver");
3477

source code of linux/drivers/net/ethernet/mellanox/mlxsw/core.c