1 | /* |
2 | * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #ifndef MLX5_DRIVER_H |
34 | #define MLX5_DRIVER_H |
35 | |
36 | #include <linux/kernel.h> |
37 | #include <linux/completion.h> |
38 | #include <linux/pci.h> |
39 | #include <linux/irq.h> |
40 | #include <linux/spinlock_types.h> |
41 | #include <linux/semaphore.h> |
42 | #include <linux/slab.h> |
43 | #include <linux/vmalloc.h> |
44 | #include <linux/xarray.h> |
45 | #include <linux/workqueue.h> |
46 | #include <linux/mempool.h> |
47 | #include <linux/interrupt.h> |
48 | #include <linux/idr.h> |
49 | #include <linux/notifier.h> |
50 | #include <linux/refcount.h> |
51 | #include <linux/auxiliary_bus.h> |
52 | #include <linux/mutex.h> |
53 | |
54 | #include <linux/mlx5/device.h> |
55 | #include <linux/mlx5/doorbell.h> |
56 | #include <linux/mlx5/eq.h> |
57 | #include <linux/timecounter.h> |
58 | #include <linux/ptp_clock_kernel.h> |
59 | #include <net/devlink.h> |
60 | |
61 | #define MLX5_ADEV_NAME "mlx5_core" |
62 | |
63 | #define MLX5_IRQ_EQ_CTRL (U8_MAX) |
64 | |
65 | enum { |
66 | MLX5_BOARD_ID_LEN = 64, |
67 | }; |
68 | |
69 | enum { |
70 | MLX5_CMD_WQ_MAX_NAME = 32, |
71 | }; |
72 | |
73 | enum { |
74 | CMD_OWNER_SW = 0x0, |
75 | CMD_OWNER_HW = 0x1, |
76 | CMD_STATUS_SUCCESS = 0, |
77 | }; |
78 | |
79 | enum mlx5_sqp_t { |
80 | MLX5_SQP_SMI = 0, |
81 | MLX5_SQP_GSI = 1, |
82 | MLX5_SQP_IEEE_1588 = 2, |
83 | MLX5_SQP_SNIFFER = 3, |
84 | MLX5_SQP_SYNC_UMR = 4, |
85 | }; |
86 | |
87 | enum { |
88 | MLX5_MAX_PORTS = 4, |
89 | }; |
90 | |
91 | enum { |
92 | MLX5_ATOMIC_MODE_OFFSET = 16, |
93 | MLX5_ATOMIC_MODE_IB_COMP = 1, |
94 | MLX5_ATOMIC_MODE_CX = 2, |
95 | MLX5_ATOMIC_MODE_8B = 3, |
96 | MLX5_ATOMIC_MODE_16B = 4, |
97 | MLX5_ATOMIC_MODE_32B = 5, |
98 | MLX5_ATOMIC_MODE_64B = 6, |
99 | MLX5_ATOMIC_MODE_128B = 7, |
100 | MLX5_ATOMIC_MODE_256B = 8, |
101 | }; |
102 | |
103 | enum { |
104 | MLX5_REG_SBPR = 0xb001, |
105 | MLX5_REG_SBCM = 0xb002, |
106 | MLX5_REG_QPTS = 0x4002, |
107 | MLX5_REG_QETCR = 0x4005, |
108 | MLX5_REG_QTCT = 0x400a, |
109 | MLX5_REG_QPDPM = 0x4013, |
110 | MLX5_REG_QCAM = 0x4019, |
111 | MLX5_REG_DCBX_PARAM = 0x4020, |
112 | MLX5_REG_DCBX_APP = 0x4021, |
113 | MLX5_REG_FPGA_CAP = 0x4022, |
114 | MLX5_REG_FPGA_CTRL = 0x4023, |
115 | MLX5_REG_FPGA_ACCESS_REG = 0x4024, |
116 | MLX5_REG_CORE_DUMP = 0x402e, |
117 | MLX5_REG_PCAP = 0x5001, |
118 | MLX5_REG_PMTU = 0x5003, |
119 | MLX5_REG_PTYS = 0x5004, |
120 | MLX5_REG_PAOS = 0x5006, |
121 | MLX5_REG_PFCC = 0x5007, |
122 | MLX5_REG_PPCNT = 0x5008, |
123 | MLX5_REG_PPTB = 0x500b, |
124 | MLX5_REG_PBMC = 0x500c, |
125 | MLX5_REG_PMAOS = 0x5012, |
126 | MLX5_REG_PUDE = 0x5009, |
127 | MLX5_REG_PMPE = 0x5010, |
128 | MLX5_REG_PELC = 0x500e, |
129 | MLX5_REG_PVLC = 0x500f, |
130 | MLX5_REG_PCMR = 0x5041, |
131 | MLX5_REG_PDDR = 0x5031, |
132 | MLX5_REG_PMLP = 0x5002, |
133 | MLX5_REG_PPLM = 0x5023, |
134 | MLX5_REG_PCAM = 0x507f, |
135 | MLX5_REG_NODE_DESC = 0x6001, |
136 | MLX5_REG_HOST_ENDIANNESS = 0x7004, |
137 | MLX5_REG_MTCAP = 0x9009, |
138 | MLX5_REG_MTMP = 0x900A, |
139 | MLX5_REG_MCIA = 0x9014, |
140 | MLX5_REG_MFRL = 0x9028, |
141 | MLX5_REG_MLCR = 0x902b, |
142 | MLX5_REG_MRTC = 0x902d, |
143 | MLX5_REG_MTRC_CAP = 0x9040, |
144 | MLX5_REG_MTRC_CONF = 0x9041, |
145 | MLX5_REG_MTRC_STDB = 0x9042, |
146 | MLX5_REG_MTRC_CTRL = 0x9043, |
147 | MLX5_REG_MPEIN = 0x9050, |
148 | MLX5_REG_MPCNT = 0x9051, |
149 | MLX5_REG_MTPPS = 0x9053, |
150 | MLX5_REG_MTPPSE = 0x9054, |
151 | MLX5_REG_MTUTC = 0x9055, |
152 | MLX5_REG_MPEGC = 0x9056, |
153 | MLX5_REG_MPIR = 0x9059, |
154 | MLX5_REG_MCQS = 0x9060, |
155 | MLX5_REG_MCQI = 0x9061, |
156 | MLX5_REG_MCC = 0x9062, |
157 | MLX5_REG_MCDA = 0x9063, |
158 | MLX5_REG_MCAM = 0x907f, |
159 | MLX5_REG_MSECQ = 0x9155, |
160 | MLX5_REG_MSEES = 0x9156, |
161 | MLX5_REG_MIRC = 0x9162, |
162 | MLX5_REG_SBCAM = 0xB01F, |
163 | MLX5_REG_RESOURCE_DUMP = 0xC000, |
164 | MLX5_REG_DTOR = 0xC00E, |
165 | }; |
166 | |
167 | enum mlx5_qpts_trust_state { |
168 | MLX5_QPTS_TRUST_PCP = 1, |
169 | MLX5_QPTS_TRUST_DSCP = 2, |
170 | }; |
171 | |
172 | enum mlx5_dcbx_oper_mode { |
173 | MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, |
174 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, |
175 | }; |
176 | |
177 | enum { |
178 | MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, |
179 | MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, |
180 | MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2, |
181 | MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3, |
182 | }; |
183 | |
184 | enum mlx5_page_fault_resume_flags { |
185 | MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0, |
186 | MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1, |
187 | MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2, |
188 | MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7, |
189 | }; |
190 | |
191 | enum dbg_rsc_type { |
192 | MLX5_DBG_RSC_QP, |
193 | MLX5_DBG_RSC_EQ, |
194 | MLX5_DBG_RSC_CQ, |
195 | }; |
196 | |
197 | enum port_state_policy { |
198 | MLX5_POLICY_DOWN = 0, |
199 | MLX5_POLICY_UP = 1, |
200 | MLX5_POLICY_FOLLOW = 2, |
201 | MLX5_POLICY_INVALID = 0xffffffff |
202 | }; |
203 | |
204 | enum mlx5_coredev_type { |
205 | MLX5_COREDEV_PF, |
206 | MLX5_COREDEV_VF, |
207 | MLX5_COREDEV_SF, |
208 | }; |
209 | |
210 | struct mlx5_field_desc { |
211 | int i; |
212 | }; |
213 | |
214 | struct mlx5_rsc_debug { |
215 | struct mlx5_core_dev *dev; |
216 | void *object; |
217 | enum dbg_rsc_type type; |
218 | struct dentry *root; |
219 | struct mlx5_field_desc fields[]; |
220 | }; |
221 | |
222 | enum mlx5_dev_event { |
223 | MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ |
224 | MLX5_DEV_EVENT_PORT_AFFINITY = 129, |
225 | MLX5_DEV_EVENT_MULTIPORT_ESW = 130, |
226 | }; |
227 | |
228 | enum mlx5_port_status { |
229 | MLX5_PORT_UP = 1, |
230 | MLX5_PORT_DOWN = 2, |
231 | }; |
232 | |
233 | enum mlx5_cmdif_state { |
234 | MLX5_CMDIF_STATE_UNINITIALIZED, |
235 | MLX5_CMDIF_STATE_UP, |
236 | MLX5_CMDIF_STATE_DOWN, |
237 | }; |
238 | |
239 | struct mlx5_cmd_first { |
240 | __be32 data[4]; |
241 | }; |
242 | |
243 | struct mlx5_cmd_msg { |
244 | struct list_head list; |
245 | struct cmd_msg_cache *parent; |
246 | u32 len; |
247 | struct mlx5_cmd_first first; |
248 | struct mlx5_cmd_mailbox *next; |
249 | }; |
250 | |
251 | struct mlx5_cmd_debug { |
252 | struct dentry *dbg_root; |
253 | void *in_msg; |
254 | void *out_msg; |
255 | u8 status; |
256 | u16 inlen; |
257 | u16 outlen; |
258 | }; |
259 | |
260 | struct cmd_msg_cache { |
261 | /* protect block chain allocations |
262 | */ |
263 | spinlock_t lock; |
264 | struct list_head head; |
265 | unsigned int max_inbox_size; |
266 | unsigned int num_ent; |
267 | }; |
268 | |
269 | enum { |
270 | MLX5_NUM_COMMAND_CACHES = 5, |
271 | }; |
272 | |
273 | struct mlx5_cmd_stats { |
274 | u64 sum; |
275 | u64 n; |
276 | /* number of times command failed */ |
277 | u64 failed; |
278 | /* number of times command failed on bad status returned by FW */ |
279 | u64 failed_mbox_status; |
280 | /* last command failed returned errno */ |
281 | u32 last_failed_errno; |
282 | /* last bad status returned by FW */ |
283 | u8 last_failed_mbox_status; |
284 | /* last command failed syndrome returned by FW */ |
285 | u32 last_failed_syndrome; |
286 | struct dentry *root; |
287 | /* protect command average calculations */ |
288 | spinlock_t lock; |
289 | }; |
290 | |
291 | struct mlx5_cmd { |
292 | struct mlx5_nb nb; |
293 | |
294 | /* members which needs to be queried or reinitialized each reload */ |
295 | struct { |
296 | u16 cmdif_rev; |
297 | u8 log_sz; |
298 | u8 log_stride; |
299 | int max_reg_cmds; |
300 | unsigned long bitmask; |
301 | struct semaphore sem; |
302 | struct semaphore pages_sem; |
303 | struct semaphore throttle_sem; |
304 | } vars; |
305 | enum mlx5_cmdif_state state; |
306 | void *cmd_alloc_buf; |
307 | dma_addr_t alloc_dma; |
308 | int alloc_size; |
309 | void *cmd_buf; |
310 | dma_addr_t dma; |
311 | |
312 | /* protect command queue allocations |
313 | */ |
314 | spinlock_t alloc_lock; |
315 | |
316 | /* protect token allocations |
317 | */ |
318 | spinlock_t token_lock; |
319 | u8 token; |
320 | char wq_name[MLX5_CMD_WQ_MAX_NAME]; |
321 | struct workqueue_struct *wq; |
322 | int mode; |
323 | u16 allowed_opcode; |
324 | struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; |
325 | struct dma_pool *pool; |
326 | struct mlx5_cmd_debug dbg; |
327 | struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; |
328 | int checksum_disabled; |
329 | struct xarray stats; |
330 | }; |
331 | |
332 | struct mlx5_cmd_mailbox { |
333 | void *buf; |
334 | dma_addr_t dma; |
335 | struct mlx5_cmd_mailbox *next; |
336 | }; |
337 | |
338 | struct mlx5_buf_list { |
339 | void *buf; |
340 | dma_addr_t map; |
341 | }; |
342 | |
343 | struct mlx5_frag_buf { |
344 | struct mlx5_buf_list *frags; |
345 | int npages; |
346 | int size; |
347 | u8 page_shift; |
348 | }; |
349 | |
350 | struct mlx5_frag_buf_ctrl { |
351 | struct mlx5_buf_list *frags; |
352 | u32 sz_m1; |
353 | u16 frag_sz_m1; |
354 | u16 strides_offset; |
355 | u8 log_sz; |
356 | u8 log_stride; |
357 | u8 log_frag_strides; |
358 | }; |
359 | |
360 | struct mlx5_core_psv { |
361 | u32 psv_idx; |
362 | struct psv_layout { |
363 | u32 pd; |
364 | u16 syndrome; |
365 | u16 reserved; |
366 | u16 bg; |
367 | u16 app_tag; |
368 | u32 ref_tag; |
369 | } psv; |
370 | }; |
371 | |
372 | struct mlx5_core_sig_ctx { |
373 | struct mlx5_core_psv psv_memory; |
374 | struct mlx5_core_psv psv_wire; |
375 | struct ib_sig_err err_item; |
376 | bool sig_status_checked; |
377 | bool sig_err_exists; |
378 | u32 sigerr_count; |
379 | }; |
380 | |
381 | #define MLX5_24BIT_MASK ((1 << 24) - 1) |
382 | |
383 | enum mlx5_res_type { |
384 | MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP, |
385 | MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ, |
386 | MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ, |
387 | MLX5_RES_SRQ = 3, |
388 | MLX5_RES_XSRQ = 4, |
389 | MLX5_RES_XRQ = 5, |
390 | }; |
391 | |
392 | struct mlx5_core_rsc_common { |
393 | enum mlx5_res_type res; |
394 | refcount_t refcount; |
395 | struct completion free; |
396 | }; |
397 | |
398 | struct mlx5_uars_page { |
399 | void __iomem *map; |
400 | bool wc; |
401 | u32 index; |
402 | struct list_head list; |
403 | unsigned int bfregs; |
404 | unsigned long *reg_bitmap; /* for non fast path bf regs */ |
405 | unsigned long *fp_bitmap; |
406 | unsigned int reg_avail; |
407 | unsigned int fp_avail; |
408 | struct kref ref_count; |
409 | struct mlx5_core_dev *mdev; |
410 | }; |
411 | |
412 | struct mlx5_bfreg_head { |
413 | /* protect blue flame registers allocations */ |
414 | struct mutex lock; |
415 | struct list_head list; |
416 | }; |
417 | |
418 | struct mlx5_bfreg_data { |
419 | struct mlx5_bfreg_head reg_head; |
420 | struct mlx5_bfreg_head wc_head; |
421 | }; |
422 | |
423 | struct mlx5_sq_bfreg { |
424 | void __iomem *map; |
425 | struct mlx5_uars_page *up; |
426 | bool wc; |
427 | u32 index; |
428 | unsigned int offset; |
429 | }; |
430 | |
431 | struct mlx5_core_health { |
432 | struct health_buffer __iomem *health; |
433 | __be32 __iomem *health_counter; |
434 | struct timer_list timer; |
435 | u32 prev; |
436 | int miss_counter; |
437 | u8 synd; |
438 | u32 fatal_error; |
439 | u32 crdump_size; |
440 | struct workqueue_struct *wq; |
441 | unsigned long flags; |
442 | struct work_struct fatal_report_work; |
443 | struct work_struct report_work; |
444 | struct devlink_health_reporter *fw_reporter; |
445 | struct devlink_health_reporter *fw_fatal_reporter; |
446 | struct devlink_health_reporter *vnic_reporter; |
447 | struct delayed_work update_fw_log_ts_work; |
448 | }; |
449 | |
450 | enum { |
451 | MLX5_PF_NOTIFY_DISABLE_VF, |
452 | MLX5_PF_NOTIFY_ENABLE_VF, |
453 | }; |
454 | |
455 | struct mlx5_vf_context { |
456 | int enabled; |
457 | u64 port_guid; |
458 | u64 node_guid; |
459 | /* Valid bits are used to validate administrative guid only. |
460 | * Enabled after ndo_set_vf_guid |
461 | */ |
462 | u8 port_guid_valid:1; |
463 | u8 node_guid_valid:1; |
464 | enum port_state_policy policy; |
465 | struct blocking_notifier_head notifier; |
466 | }; |
467 | |
468 | struct mlx5_core_sriov { |
469 | struct mlx5_vf_context *vfs_ctx; |
470 | int num_vfs; |
471 | u16 max_vfs; |
472 | u16 max_ec_vfs; |
473 | }; |
474 | |
475 | struct mlx5_fc_pool { |
476 | struct mlx5_core_dev *dev; |
477 | struct mutex pool_lock; /* protects pool lists */ |
478 | struct list_head fully_used; |
479 | struct list_head partially_used; |
480 | struct list_head unused; |
481 | int available_fcs; |
482 | int used_fcs; |
483 | int threshold; |
484 | }; |
485 | |
486 | struct mlx5_fc_stats { |
487 | spinlock_t counters_idr_lock; /* protects counters_idr */ |
488 | struct idr counters_idr; |
489 | struct list_head counters; |
490 | struct llist_head addlist; |
491 | struct llist_head dellist; |
492 | |
493 | struct workqueue_struct *wq; |
494 | struct delayed_work work; |
495 | unsigned long next_query; |
496 | unsigned long sampling_interval; /* jiffies */ |
497 | u32 *bulk_query_out; |
498 | int bulk_query_len; |
499 | size_t num_counters; |
500 | bool bulk_query_alloc_failed; |
501 | unsigned long next_bulk_query_alloc; |
502 | struct mlx5_fc_pool fc_pool; |
503 | }; |
504 | |
505 | struct mlx5_events; |
506 | struct mlx5_mpfs; |
507 | struct mlx5_eswitch; |
508 | struct mlx5_lag; |
509 | struct mlx5_devcom_dev; |
510 | struct mlx5_fw_reset; |
511 | struct mlx5_eq_table; |
512 | struct mlx5_irq_table; |
513 | struct mlx5_vhca_state_notifier; |
514 | struct mlx5_sf_dev_table; |
515 | struct mlx5_sf_hw_table; |
516 | struct mlx5_sf_table; |
517 | struct mlx5_crypto_dek_priv; |
518 | |
519 | struct mlx5_rate_limit { |
520 | u32 rate; |
521 | u32 max_burst_sz; |
522 | u16 typical_pkt_sz; |
523 | }; |
524 | |
525 | struct mlx5_rl_entry { |
526 | u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)]; |
527 | u64 refcount; |
528 | u16 index; |
529 | u16 uid; |
530 | u8 dedicated : 1; |
531 | }; |
532 | |
533 | struct mlx5_rl_table { |
534 | /* protect rate limit table */ |
535 | struct mutex rl_lock; |
536 | u16 max_size; |
537 | u32 max_rate; |
538 | u32 min_rate; |
539 | struct mlx5_rl_entry *rl_entry; |
540 | u64 refcount; |
541 | }; |
542 | |
543 | struct mlx5_core_roce { |
544 | struct mlx5_flow_table *ft; |
545 | struct mlx5_flow_group *fg; |
546 | struct mlx5_flow_handle *allow_rule; |
547 | }; |
548 | |
549 | enum { |
550 | MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0, |
551 | MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1, |
552 | /* Set during device detach to block any further devices |
553 | * creation/deletion on drivers rescan. Unset during device attach. |
554 | */ |
555 | MLX5_PRIV_FLAGS_DETACH = 1 << 2, |
556 | }; |
557 | |
558 | struct mlx5_adev { |
559 | struct auxiliary_device adev; |
560 | struct mlx5_core_dev *mdev; |
561 | int idx; |
562 | }; |
563 | |
564 | struct mlx5_debugfs_entries { |
565 | struct dentry *dbg_root; |
566 | struct dentry *qp_debugfs; |
567 | struct dentry *eq_debugfs; |
568 | struct dentry *cq_debugfs; |
569 | struct dentry *cmdif_debugfs; |
570 | struct dentry *pages_debugfs; |
571 | struct dentry *lag_debugfs; |
572 | }; |
573 | |
574 | enum mlx5_func_type { |
575 | MLX5_PF, |
576 | MLX5_VF, |
577 | MLX5_SF, |
578 | MLX5_HOST_PF, |
579 | MLX5_EC_VF, |
580 | MLX5_FUNC_TYPE_NUM, |
581 | }; |
582 | |
583 | struct mlx5_ft_pool; |
584 | struct mlx5_priv { |
585 | /* IRQ table valid only for real pci devices PF or VF */ |
586 | struct mlx5_irq_table *irq_table; |
587 | struct mlx5_eq_table *eq_table; |
588 | |
589 | /* pages stuff */ |
590 | struct mlx5_nb pg_nb; |
591 | struct workqueue_struct *pg_wq; |
592 | struct xarray page_root_xa; |
593 | atomic_t reg_pages; |
594 | struct list_head free_list; |
595 | u32 fw_pages; |
596 | u32 page_counters[MLX5_FUNC_TYPE_NUM]; |
597 | u32 fw_pages_alloc_failed; |
598 | u32 give_pages_dropped; |
599 | u32 reclaim_pages_discard; |
600 | |
601 | struct mlx5_core_health health; |
602 | struct list_head traps; |
603 | |
604 | struct mlx5_debugfs_entries dbg; |
605 | |
606 | /* start: alloc staff */ |
607 | /* protect buffer allocation according to numa node */ |
608 | struct mutex alloc_mutex; |
609 | int numa_node; |
610 | |
611 | struct mutex pgdir_mutex; |
612 | struct list_head pgdir_list; |
613 | /* end: alloc staff */ |
614 | |
615 | struct mlx5_adev **adev; |
616 | int adev_idx; |
617 | int sw_vhca_id; |
618 | struct mlx5_events *events; |
619 | struct mlx5_vhca_events *vhca_events; |
620 | |
621 | struct mlx5_flow_steering *steering; |
622 | struct mlx5_mpfs *mpfs; |
623 | struct mlx5_eswitch *eswitch; |
624 | struct mlx5_core_sriov sriov; |
625 | struct mlx5_lag *lag; |
626 | u32 flags; |
627 | struct mlx5_devcom_dev *devc; |
628 | struct mlx5_devcom_comp_dev *hca_devcom_comp; |
629 | struct mlx5_fw_reset *fw_reset; |
630 | struct mlx5_core_roce roce; |
631 | struct mlx5_fc_stats fc_stats; |
632 | struct mlx5_rl_table rl_table; |
633 | struct mlx5_ft_pool *ft_pool; |
634 | |
635 | struct mlx5_bfreg_data bfregs; |
636 | struct mlx5_uars_page *uar; |
637 | #ifdef CONFIG_MLX5_SF |
638 | struct mlx5_vhca_state_notifier *vhca_state_notifier; |
639 | struct mlx5_sf_dev_table *sf_dev_table; |
640 | struct mlx5_core_dev *parent_mdev; |
641 | #endif |
642 | #ifdef CONFIG_MLX5_SF_MANAGER |
643 | struct mlx5_sf_hw_table *sf_hw_table; |
644 | struct mlx5_sf_table *sf_table; |
645 | #endif |
646 | }; |
647 | |
648 | enum mlx5_device_state { |
649 | MLX5_DEVICE_STATE_UP = 1, |
650 | MLX5_DEVICE_STATE_INTERNAL_ERROR, |
651 | }; |
652 | |
653 | enum mlx5_interface_state { |
654 | MLX5_INTERFACE_STATE_UP = BIT(0), |
655 | MLX5_BREAK_FW_WAIT = BIT(1), |
656 | }; |
657 | |
658 | enum mlx5_pci_status { |
659 | MLX5_PCI_STATUS_DISABLED, |
660 | MLX5_PCI_STATUS_ENABLED, |
661 | }; |
662 | |
663 | enum mlx5_pagefault_type_flags { |
664 | MLX5_PFAULT_REQUESTOR = 1 << 0, |
665 | MLX5_PFAULT_WRITE = 1 << 1, |
666 | MLX5_PFAULT_RDMA = 1 << 2, |
667 | }; |
668 | |
669 | struct mlx5_td { |
670 | /* protects tirs list changes while tirs refresh */ |
671 | struct mutex list_lock; |
672 | struct list_head tirs_list; |
673 | u32 tdn; |
674 | }; |
675 | |
676 | struct mlx5e_resources { |
677 | struct mlx5e_hw_objs { |
678 | u32 pdn; |
679 | struct mlx5_td td; |
680 | u32 mkey; |
681 | struct mlx5_sq_bfreg bfreg; |
682 | #define MLX5_MAX_NUM_TC 8 |
683 | u32 tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC]; |
684 | bool tisn_valid; |
685 | } hw_objs; |
686 | struct net_device *uplink_netdev; |
687 | struct mutex uplink_netdev_lock; |
688 | struct mlx5_crypto_dek_priv *dek_priv; |
689 | }; |
690 | |
691 | enum mlx5_sw_icm_type { |
692 | MLX5_SW_ICM_TYPE_STEERING, |
693 | , |
694 | , |
695 | MLX5_SW_ICM_TYPE_SW_ENCAP, |
696 | }; |
697 | |
698 | #define MLX5_MAX_RESERVED_GIDS 8 |
699 | |
700 | struct mlx5_rsvd_gids { |
701 | unsigned int start; |
702 | unsigned int count; |
703 | struct ida ida; |
704 | }; |
705 | |
706 | #define MAX_PIN_NUM 8 |
707 | struct mlx5_pps { |
708 | u8 pin_caps[MAX_PIN_NUM]; |
709 | struct work_struct out_work; |
710 | u64 start[MAX_PIN_NUM]; |
711 | u8 enabled; |
712 | u64 min_npps_period; |
713 | u64 min_out_pulse_duration_ns; |
714 | }; |
715 | |
716 | struct mlx5_timer { |
717 | struct cyclecounter cycles; |
718 | struct timecounter tc; |
719 | u32 nominal_c_mult; |
720 | unsigned long overflow_period; |
721 | struct delayed_work overflow_work; |
722 | }; |
723 | |
724 | struct mlx5_clock { |
725 | struct mlx5_nb pps_nb; |
726 | seqlock_t lock; |
727 | struct hwtstamp_config hwtstamp_config; |
728 | struct ptp_clock *ptp; |
729 | struct ptp_clock_info ptp_info; |
730 | struct mlx5_pps pps_info; |
731 | struct mlx5_timer timer; |
732 | }; |
733 | |
734 | struct mlx5_dm; |
735 | struct mlx5_fw_tracer; |
736 | struct mlx5_vxlan; |
737 | struct mlx5_geneve; |
738 | struct mlx5_hv_vhca; |
739 | |
740 | #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) |
741 | #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) |
742 | |
743 | enum { |
744 | MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0, |
745 | MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1, |
746 | }; |
747 | |
748 | enum { |
749 | MKEY_CACHE_LAST_STD_ENTRY = 20, |
750 | MLX5_IMR_KSM_CACHE_ENTRY, |
751 | MAX_MKEY_CACHE_ENTRIES |
752 | }; |
753 | |
754 | struct mlx5_profile { |
755 | u64 mask; |
756 | u8 log_max_qp; |
757 | u8 num_cmd_caches; |
758 | struct { |
759 | int size; |
760 | int limit; |
761 | } mr_cache[MAX_MKEY_CACHE_ENTRIES]; |
762 | }; |
763 | |
764 | struct mlx5_hca_cap { |
765 | u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; |
766 | u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; |
767 | }; |
768 | |
769 | struct mlx5_core_dev { |
770 | struct device *device; |
771 | enum mlx5_coredev_type coredev_type; |
772 | struct pci_dev *pdev; |
773 | /* sync pci state */ |
774 | struct mutex pci_status_mutex; |
775 | enum mlx5_pci_status pci_status; |
776 | u8 rev_id; |
777 | char board_id[MLX5_BOARD_ID_LEN]; |
778 | struct mlx5_cmd cmd; |
779 | struct { |
780 | struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; |
781 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; |
782 | u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; |
783 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; |
784 | u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; |
785 | u8 embedded_cpu; |
786 | } caps; |
787 | struct mlx5_timeouts *timeouts; |
788 | u64 sys_image_guid; |
789 | phys_addr_t iseg_base; |
790 | struct mlx5_init_seg __iomem *iseg; |
791 | phys_addr_t bar_addr; |
792 | enum mlx5_device_state state; |
793 | /* sync interface state */ |
794 | struct mutex intf_state_mutex; |
795 | struct lock_class_key lock_key; |
796 | unsigned long intf_state; |
797 | struct mlx5_priv priv; |
798 | struct mlx5_profile profile; |
799 | u32 issi; |
800 | struct mlx5e_resources mlx5e_res; |
801 | struct mlx5_dm *dm; |
802 | struct mlx5_vxlan *vxlan; |
803 | struct mlx5_geneve *geneve; |
804 | struct { |
805 | struct mlx5_rsvd_gids reserved_gids; |
806 | u32 roce_en; |
807 | } roce; |
808 | #ifdef CONFIG_MLX5_FPGA |
809 | struct mlx5_fpga_device *fpga; |
810 | #endif |
811 | struct mlx5_clock clock; |
812 | struct mlx5_ib_clock_info *clock_info; |
813 | struct mlx5_fw_tracer *tracer; |
814 | struct mlx5_rsc_dump *rsc_dump; |
815 | u32 vsc_addr; |
816 | struct mlx5_hv_vhca *hv_vhca; |
817 | struct mlx5_hwmon *hwmon; |
818 | u64 num_block_tc; |
819 | u64 num_block_ipsec; |
820 | #ifdef CONFIG_MLX5_MACSEC |
821 | struct mlx5_macsec_fs *macsec_fs; |
822 | /* MACsec notifier chain to sync MACsec core and IB database */ |
823 | struct blocking_notifier_head macsec_nh; |
824 | #endif |
825 | u64 num_ipsec_offloads; |
826 | struct mlx5_sd *sd; |
827 | }; |
828 | |
829 | struct mlx5_db { |
830 | __be32 *db; |
831 | union { |
832 | struct mlx5_db_pgdir *pgdir; |
833 | struct mlx5_ib_user_db_page *user_page; |
834 | } u; |
835 | dma_addr_t dma; |
836 | int index; |
837 | }; |
838 | |
839 | enum { |
840 | MLX5_COMP_EQ_SIZE = 1024, |
841 | }; |
842 | |
843 | enum { |
844 | MLX5_PTYS_IB = 1 << 0, |
845 | MLX5_PTYS_EN = 1 << 2, |
846 | }; |
847 | |
848 | typedef void (*mlx5_cmd_cbk_t)(int status, void *context); |
849 | |
850 | enum { |
851 | MLX5_CMD_ENT_STATE_PENDING_COMP, |
852 | }; |
853 | |
854 | struct mlx5_cmd_work_ent { |
855 | unsigned long state; |
856 | struct mlx5_cmd_msg *in; |
857 | struct mlx5_cmd_msg *out; |
858 | void *uout; |
859 | int uout_size; |
860 | mlx5_cmd_cbk_t callback; |
861 | struct delayed_work cb_timeout_work; |
862 | void *context; |
863 | int idx; |
864 | struct completion handling; |
865 | struct completion done; |
866 | struct mlx5_cmd *cmd; |
867 | struct work_struct work; |
868 | struct mlx5_cmd_layout *lay; |
869 | int ret; |
870 | int page_queue; |
871 | u8 status; |
872 | u8 token; |
873 | u64 ts1; |
874 | u64 ts2; |
875 | u16 op; |
876 | bool polling; |
877 | /* Track the max comp handlers */ |
878 | refcount_t refcnt; |
879 | }; |
880 | |
881 | enum phy_port_state { |
882 | MLX5_AAA_111 |
883 | }; |
884 | |
885 | struct mlx5_hca_vport_context { |
886 | u32 field_select; |
887 | bool sm_virt_aware; |
888 | bool has_smi; |
889 | bool has_raw; |
890 | enum port_state_policy policy; |
891 | enum phy_port_state phys_state; |
892 | enum ib_port_state vport_state; |
893 | u8 port_physical_state; |
894 | u64 sys_image_guid; |
895 | u64 port_guid; |
896 | u64 node_guid; |
897 | u32 cap_mask1; |
898 | u32 cap_mask1_perm; |
899 | u16 cap_mask2; |
900 | u16 cap_mask2_perm; |
901 | u16 lid; |
902 | u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */ |
903 | u8 lmc; |
904 | u8 subnet_timeout; |
905 | u16 sm_lid; |
906 | u8 sm_sl; |
907 | u16 qkey_violation_counter; |
908 | u16 pkey_violation_counter; |
909 | bool grh_required; |
910 | }; |
911 | |
912 | #define STRUCT_FIELD(header, field) \ |
913 | .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ |
914 | .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field |
915 | |
916 | extern struct dentry *mlx5_debugfs_root; |
917 | |
918 | static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) |
919 | { |
920 | return ioread32be(&dev->iseg->fw_rev) & 0xffff; |
921 | } |
922 | |
923 | static inline u16 fw_rev_min(struct mlx5_core_dev *dev) |
924 | { |
925 | return ioread32be(&dev->iseg->fw_rev) >> 16; |
926 | } |
927 | |
928 | static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) |
929 | { |
930 | return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; |
931 | } |
932 | |
933 | static inline u32 mlx5_base_mkey(const u32 key) |
934 | { |
935 | return key & 0xffffff00u; |
936 | } |
937 | |
938 | static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) |
939 | { |
940 | return ((u32)1 << log_sz) << log_stride; |
941 | } |
942 | |
943 | static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags, |
944 | u8 log_stride, u8 log_sz, |
945 | u16 strides_offset, |
946 | struct mlx5_frag_buf_ctrl *fbc) |
947 | { |
948 | fbc->frags = frags; |
949 | fbc->log_stride = log_stride; |
950 | fbc->log_sz = log_sz; |
951 | fbc->sz_m1 = (1 << fbc->log_sz) - 1; |
952 | fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; |
953 | fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; |
954 | fbc->strides_offset = strides_offset; |
955 | } |
956 | |
957 | static inline void mlx5_init_fbc(struct mlx5_buf_list *frags, |
958 | u8 log_stride, u8 log_sz, |
959 | struct mlx5_frag_buf_ctrl *fbc) |
960 | { |
961 | mlx5_init_fbc_offset(frags, log_stride, log_sz, strides_offset: 0, fbc); |
962 | } |
963 | |
964 | static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, |
965 | u32 ix) |
966 | { |
967 | unsigned int frag; |
968 | |
969 | ix += fbc->strides_offset; |
970 | frag = ix >> fbc->log_frag_strides; |
971 | |
972 | return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); |
973 | } |
974 | |
975 | static inline u32 |
976 | mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) |
977 | { |
978 | u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; |
979 | |
980 | return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); |
981 | } |
982 | |
983 | enum { |
984 | CMD_ALLOWED_OPCODE_ALL, |
985 | }; |
986 | |
987 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
988 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
989 | void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); |
990 | |
991 | struct mlx5_async_ctx { |
992 | struct mlx5_core_dev *dev; |
993 | atomic_t num_inflight; |
994 | struct completion inflight_done; |
995 | }; |
996 | |
997 | struct mlx5_async_work; |
998 | |
999 | typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); |
1000 | |
1001 | struct mlx5_async_work { |
1002 | struct mlx5_async_ctx *ctx; |
1003 | mlx5_async_cbk_t user_callback; |
1004 | u16 opcode; /* cmd opcode */ |
1005 | u16 op_mod; /* cmd op_mod */ |
1006 | void *out; /* pointer to the cmd output buffer */ |
1007 | }; |
1008 | |
1009 | void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, |
1010 | struct mlx5_async_ctx *ctx); |
1011 | void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); |
1012 | int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, |
1013 | void *out, int out_size, mlx5_async_cbk_t callback, |
1014 | struct mlx5_async_work *work); |
1015 | void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out); |
1016 | int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); |
1017 | int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); |
1018 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
1019 | int out_size); |
1020 | |
1021 | #define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ |
1022 | ({ \ |
1023 | mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ |
1024 | MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ |
1025 | }) |
1026 | |
1027 | #define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ |
1028 | ({ \ |
1029 | u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ |
1030 | mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ |
1031 | }) |
1032 | |
1033 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, |
1034 | void *out, int out_size); |
1035 | bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); |
1036 | |
1037 | void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); |
1038 | void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); |
1039 | |
1040 | void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data); |
1041 | |
1042 | void mlx5_health_cleanup(struct mlx5_core_dev *dev); |
1043 | int mlx5_health_init(struct mlx5_core_dev *dev); |
1044 | void mlx5_start_health_poll(struct mlx5_core_dev *dev); |
1045 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); |
1046 | void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev); |
1047 | void mlx5_drain_health_wq(struct mlx5_core_dev *dev); |
1048 | void mlx5_trigger_health_work(struct mlx5_core_dev *dev); |
1049 | int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, |
1050 | struct mlx5_frag_buf *buf, int node); |
1051 | void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); |
1052 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in, |
1053 | int inlen); |
1054 | int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey); |
1055 | int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out, |
1056 | int outlen); |
1057 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); |
1058 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); |
1059 | int mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
1060 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); |
1061 | void mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
1062 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
1063 | void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); |
1064 | void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); |
1065 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
1066 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
1067 | void mlx5_register_debugfs(void); |
1068 | void mlx5_unregister_debugfs(void); |
1069 | |
1070 | void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); |
1071 | void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); |
1072 | int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn); |
1073 | int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
1074 | int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); |
1075 | |
1076 | struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); |
1077 | void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); |
1078 | void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); |
1079 | int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, |
1080 | void *data_out, int size_out, u16 reg_id, int arg, |
1081 | int write, bool verbose); |
1082 | int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, |
1083 | int size_in, void *data_out, int size_out, |
1084 | u16 reg_num, int arg, int write); |
1085 | |
1086 | int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, |
1087 | int node); |
1088 | |
1089 | static inline int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db) |
1090 | { |
1091 | return mlx5_db_alloc_node(dev, db, node: dev->priv.numa_node); |
1092 | } |
1093 | |
1094 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); |
1095 | |
1096 | const char *mlx5_command_str(int command); |
1097 | void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
1098 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
1099 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, |
1100 | int npsvs, u32 *sig_index); |
1101 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); |
1102 | __be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev); |
1103 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); |
1104 | |
1105 | int mlx5_init_rl_table(struct mlx5_core_dev *dev); |
1106 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); |
1107 | int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, |
1108 | struct mlx5_rate_limit *rl); |
1109 | void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl); |
1110 | bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate); |
1111 | int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, |
1112 | bool dedicated_entry, u16 *index); |
1113 | void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index); |
1114 | bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0, |
1115 | struct mlx5_rate_limit *rl_1); |
1116 | int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, |
1117 | bool map_wc, bool fast_path); |
1118 | void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); |
1119 | |
1120 | unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev); |
1121 | int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector); |
1122 | unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); |
1123 | int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, |
1124 | u8 roce_version, u8 roce_l3_type, const u8 *gid, |
1125 | const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); |
1126 | |
1127 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
1128 | { |
1129 | return mkey >> 8; |
1130 | } |
1131 | |
1132 | static inline u32 mlx5_idx_to_mkey(u32 mkey_idx) |
1133 | { |
1134 | return mkey_idx << 8; |
1135 | } |
1136 | |
1137 | static inline u8 mlx5_mkey_variant(u32 mkey) |
1138 | { |
1139 | return mkey & 0xff; |
1140 | } |
1141 | |
1142 | /* Async-atomic event notifier used by mlx5 core to forward FW |
1143 | * evetns received from event queue to mlx5 consumers. |
1144 | * Optimise event queue dipatching. |
1145 | */ |
1146 | int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); |
1147 | int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); |
1148 | |
1149 | /* Async-atomic event notifier used for forwarding |
1150 | * evetns from the event queue into the to mlx5 events dispatcher, |
1151 | * eswitch, clock and others. |
1152 | */ |
1153 | int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
1154 | int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); |
1155 | |
1156 | /* Blocking event notifier used to forward SW events, used for slow path */ |
1157 | int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); |
1158 | int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); |
1159 | int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event, |
1160 | void *data); |
1161 | |
1162 | int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); |
1163 | |
1164 | int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); |
1165 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); |
1166 | bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); |
1167 | bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); |
1168 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); |
1169 | bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev); |
1170 | bool mlx5_lag_is_master(struct mlx5_core_dev *dev); |
1171 | bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); |
1172 | bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev); |
1173 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); |
1174 | u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, |
1175 | struct net_device *slave); |
1176 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, |
1177 | u64 *values, |
1178 | int num_counters, |
1179 | size_t *offsets); |
1180 | struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i); |
1181 | |
1182 | #define mlx5_lag_for_each_peer_mdev(dev, peer, i) \ |
1183 | for (i = 0, peer = mlx5_lag_get_next_peer_mdev(dev, &i); \ |
1184 | peer; \ |
1185 | peer = mlx5_lag_get_next_peer_mdev(dev, &i)) |
1186 | |
1187 | u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev); |
1188 | struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); |
1189 | void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); |
1190 | int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
1191 | u64 length, u32 log_alignment, u16 uid, |
1192 | phys_addr_t *addr, u32 *obj_id); |
1193 | int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, |
1194 | u64 length, u16 uid, phys_addr_t addr, u32 obj_id); |
1195 | |
1196 | struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); |
1197 | void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); |
1198 | |
1199 | int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, |
1200 | int vf_id, |
1201 | struct notifier_block *nb); |
1202 | void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, |
1203 | int vf_id, |
1204 | struct notifier_block *nb); |
1205 | int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, |
1206 | struct ib_device *device, |
1207 | struct rdma_netdev_alloc_params *params); |
1208 | |
1209 | enum { |
1210 | MLX5_PCI_DEV_IS_VF = 1 << 0, |
1211 | }; |
1212 | |
1213 | static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev) |
1214 | { |
1215 | return dev->coredev_type == MLX5_COREDEV_PF; |
1216 | } |
1217 | |
1218 | static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) |
1219 | { |
1220 | return dev->coredev_type == MLX5_COREDEV_VF; |
1221 | } |
1222 | |
1223 | static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) |
1224 | { |
1225 | return dev->caps.embedded_cpu; |
1226 | } |
1227 | |
1228 | static inline bool |
1229 | mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev) |
1230 | { |
1231 | return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); |
1232 | } |
1233 | |
1234 | static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev) |
1235 | { |
1236 | return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); |
1237 | } |
1238 | |
1239 | static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev) |
1240 | { |
1241 | return dev->priv.sriov.max_vfs; |
1242 | } |
1243 | |
1244 | static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) |
1245 | { |
1246 | /* LACP owner conditions: |
1247 | * 1) Function is physical. |
1248 | * 2) LAG is supported by FW. |
1249 | * 3) LAG is managed by driver (currently the only option). |
1250 | */ |
1251 | return MLX5_CAP_GEN(dev, vport_group_manager) && |
1252 | (MLX5_CAP_GEN(dev, num_lag_ports) > 1) && |
1253 | MLX5_CAP_GEN(dev, lag_master); |
1254 | } |
1255 | |
1256 | static inline u16 mlx5_core_max_ec_vfs(const struct mlx5_core_dev *dev) |
1257 | { |
1258 | return dev->priv.sriov.max_ec_vfs; |
1259 | } |
1260 | |
1261 | static inline int mlx5_get_gid_table_len(u16 param) |
1262 | { |
1263 | if (param > 4) { |
1264 | pr_warn("gid table length is zero\n" ); |
1265 | return 0; |
1266 | } |
1267 | |
1268 | return 8 * (1 << param); |
1269 | } |
1270 | |
1271 | static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) |
1272 | { |
1273 | return !!(dev->priv.rl_table.max_size); |
1274 | } |
1275 | |
1276 | static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) |
1277 | { |
1278 | return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && |
1279 | MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; |
1280 | } |
1281 | |
1282 | static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) |
1283 | { |
1284 | return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; |
1285 | } |
1286 | |
1287 | static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) |
1288 | { |
1289 | return mlx5_core_is_mp_slave(dev) || |
1290 | mlx5_core_is_mp_master(dev); |
1291 | } |
1292 | |
1293 | static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) |
1294 | { |
1295 | if (!mlx5_core_mp_enabled(dev)) |
1296 | return 1; |
1297 | |
1298 | return MLX5_CAP_GEN(dev, native_port_num); |
1299 | } |
1300 | |
1301 | static inline int mlx5_get_dev_index(struct mlx5_core_dev *dev) |
1302 | { |
1303 | int idx = MLX5_CAP_GEN(dev, native_port_num); |
1304 | |
1305 | if (idx >= 1 && idx <= MLX5_MAX_PORTS) |
1306 | return idx - 1; |
1307 | else |
1308 | return PCI_FUNC(dev->pdev->devfn); |
1309 | } |
1310 | |
1311 | enum { |
1312 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, |
1313 | }; |
1314 | |
1315 | bool mlx5_is_roce_on(struct mlx5_core_dev *dev); |
1316 | |
1317 | static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev) |
1318 | { |
1319 | if (MLX5_CAP_GEN(dev, roce_rw_supported)) |
1320 | return MLX5_CAP_GEN(dev, roce); |
1321 | |
1322 | /* If RoCE cap is read-only in FW, get RoCE state from devlink |
1323 | * in order to support RoCE enable/disable feature |
1324 | */ |
1325 | return mlx5_is_roce_on(dev); |
1326 | } |
1327 | |
1328 | #ifdef CONFIG_MLX5_MACSEC |
1329 | static inline bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev) |
1330 | { |
1331 | if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & |
1332 | MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD)) |
1333 | return false; |
1334 | |
1335 | if (!MLX5_CAP_GEN(mdev, log_max_dek)) |
1336 | return false; |
1337 | |
1338 | if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload)) |
1339 | return false; |
1340 | |
1341 | if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) || |
1342 | !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec)) |
1343 | return false; |
1344 | |
1345 | if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) || |
1346 | !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec)) |
1347 | return false; |
1348 | |
1349 | if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) && |
1350 | !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt)) |
1351 | return false; |
1352 | |
1353 | if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) && |
1354 | !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt)) |
1355 | return false; |
1356 | |
1357 | return true; |
1358 | } |
1359 | |
1360 | #define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX) |
1361 | |
1362 | static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) |
1363 | { |
1364 | if (((MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & |
1365 | NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) || |
1366 | !MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, max_modify_header_actions) || |
1367 | !mlx5e_is_macsec_device(mdev) || !mdev->macsec_fs) |
1368 | return false; |
1369 | |
1370 | return true; |
1371 | } |
1372 | #endif |
1373 | |
1374 | enum { |
1375 | MLX5_OCTWORD = 16, |
1376 | }; |
1377 | |
1378 | struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev, |
1379 | irqreturn_t (*handler)(int, void *), |
1380 | const struct irq_affinity_desc *affdesc, |
1381 | const char *name); |
1382 | void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map); |
1383 | |
1384 | #endif /* MLX5_DRIVER_H */ |
1385 | |