| 1 | /* |
| 2 | * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef __MLX5_CORE_H__ |
| 34 | #define __MLX5_CORE_H__ |
| 35 | |
| 36 | #include <linux/types.h> |
| 37 | #include <linux/kernel.h> |
| 38 | #include <linux/sched.h> |
| 39 | #include <linux/if_link.h> |
| 40 | #include <linux/firmware.h> |
| 41 | #include <linux/mlx5/cq.h> |
| 42 | #include <linux/mlx5/fs.h> |
| 43 | #include <linux/mlx5/driver.h> |
| 44 | #include "lib/devcom.h" |
| 45 | |
| 46 | extern uint mlx5_core_debug_mask; |
| 47 | |
| 48 | #define mlx5_core_dbg(__dev, format, ...) \ |
| 49 | dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \ |
| 50 | __func__, __LINE__, current->pid, \ |
| 51 | ##__VA_ARGS__) |
| 52 | |
| 53 | #define mlx5_core_dbg_once(__dev, format, ...) \ |
| 54 | dev_dbg_once((__dev)->device, \ |
| 55 | "%s:%d:(pid %d): " format, \ |
| 56 | __func__, __LINE__, current->pid, \ |
| 57 | ##__VA_ARGS__) |
| 58 | |
| 59 | #define mlx5_core_dbg_mask(__dev, mask, format, ...) \ |
| 60 | do { \ |
| 61 | if ((mask) & mlx5_core_debug_mask) \ |
| 62 | mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \ |
| 63 | } while (0) |
| 64 | |
| 65 | #define mlx5_core_err(__dev, format, ...) \ |
| 66 | dev_err((__dev)->device, "%s:%d:(pid %d): " format, \ |
| 67 | __func__, __LINE__, current->pid, \ |
| 68 | ##__VA_ARGS__) |
| 69 | |
| 70 | #define mlx5_core_err_rl(__dev, format, ...) \ |
| 71 | dev_err_ratelimited((__dev)->device, \ |
| 72 | "%s:%d:(pid %d): " format, \ |
| 73 | __func__, __LINE__, current->pid, \ |
| 74 | ##__VA_ARGS__) |
| 75 | |
| 76 | #define mlx5_core_warn(__dev, format, ...) \ |
| 77 | dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ |
| 78 | __func__, __LINE__, current->pid, \ |
| 79 | ##__VA_ARGS__) |
| 80 | |
| 81 | #define mlx5_core_warn_once(__dev, format, ...) \ |
| 82 | dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \ |
| 83 | __func__, __LINE__, current->pid, \ |
| 84 | ##__VA_ARGS__) |
| 85 | |
| 86 | #define mlx5_core_warn_rl(__dev, format, ...) \ |
| 87 | dev_warn_ratelimited((__dev)->device, \ |
| 88 | "%s:%d:(pid %d): " format, \ |
| 89 | __func__, __LINE__, current->pid, \ |
| 90 | ##__VA_ARGS__) |
| 91 | |
| 92 | #define mlx5_core_info(__dev, format, ...) \ |
| 93 | dev_info((__dev)->device, format, ##__VA_ARGS__) |
| 94 | |
| 95 | #define mlx5_core_info_rl(__dev, format, ...) \ |
| 96 | dev_info_ratelimited((__dev)->device, \ |
| 97 | "%s:%d:(pid %d): " format, \ |
| 98 | __func__, __LINE__, current->pid, \ |
| 99 | ##__VA_ARGS__) |
| 100 | |
| 101 | #define ACCESS_KEY_LEN 32 |
| 102 | #define FT_ID_FT_TYPE_OFFSET 24 |
| 103 | |
| 104 | struct mlx5_cmd_allow_other_vhca_access_attr { |
| 105 | u16 obj_type; |
| 106 | u32 obj_id; |
| 107 | u8 access_key[ACCESS_KEY_LEN]; |
| 108 | }; |
| 109 | |
| 110 | struct mlx5_cmd_alias_obj_create_attr { |
| 111 | u32 obj_id; |
| 112 | u16 vhca_id; |
| 113 | u16 obj_type; |
| 114 | u8 access_key[ACCESS_KEY_LEN]; |
| 115 | }; |
| 116 | |
| 117 | struct mlx5_port_eth_proto { |
| 118 | u32 cap; |
| 119 | u32 admin; |
| 120 | u32 oper; |
| 121 | }; |
| 122 | |
| 123 | struct mlx5_module_eeprom_query_params { |
| 124 | u16 size; |
| 125 | u16 offset; |
| 126 | u16 i2c_address; |
| 127 | u32 page; |
| 128 | u32 bank; |
| 129 | u32 module_number; |
| 130 | }; |
| 131 | |
| 132 | struct mlx5_link_info { |
| 133 | u32 speed; |
| 134 | u32 lanes; |
| 135 | }; |
| 136 | |
| 137 | static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) |
| 138 | { |
| 139 | struct device *device = dev->device; |
| 140 | struct va_format vaf; |
| 141 | va_list args; |
| 142 | |
| 143 | if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG, |
| 144 | "Level %d is out of range, set to default level\n" , level)) |
| 145 | level = LOGLEVEL_DEFAULT; |
| 146 | |
| 147 | va_start(args, format); |
| 148 | vaf.fmt = format; |
| 149 | vaf.va = &args; |
| 150 | |
| 151 | dev_printk_emit(level, dev: device, fmt: "%s %s: %pV" , dev_driver_string(dev: device), dev_name(dev: device), |
| 152 | &vaf); |
| 153 | va_end(args); |
| 154 | } |
| 155 | |
| 156 | #define mlx5_log(__dev, level, format, ...) \ |
| 157 | mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \ |
| 158 | __func__, __LINE__, current->pid, \ |
| 159 | ##__VA_ARGS__) |
| 160 | |
| 161 | static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev) |
| 162 | { |
| 163 | return &dev->pdev->dev; |
| 164 | } |
| 165 | |
| 166 | enum { |
| 167 | MLX5_CMD_DATA, /* print command payload only */ |
| 168 | MLX5_CMD_TIME, /* print command execution time */ |
| 169 | }; |
| 170 | |
| 171 | enum { |
| 172 | MLX5_DRIVER_STATUS_ABORTED = 0xfe, |
| 173 | MLX5_DRIVER_SYND = 0xbadd00de, |
| 174 | }; |
| 175 | |
| 176 | enum mlx5_semaphore_space_address { |
| 177 | MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA, |
| 178 | MLX5_SEMAPHORE_SW_RESET = 0x20, |
| 179 | }; |
| 180 | |
| 181 | #define MLX5_DEFAULT_PROF 2 |
| 182 | #define MLX5_SF_PROF 3 |
| 183 | #define MLX5_NUM_FW_CMD_THREADS 8 |
| 184 | #define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS |
| 185 | |
| 186 | static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, |
| 187 | size_t item_size, size_t num_items, |
| 188 | const char *func, int line) |
| 189 | { |
| 190 | int inlen; |
| 191 | |
| 192 | if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) { |
| 193 | mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n" , |
| 194 | __func__, func, line, fixed, item_size, num_items); |
| 195 | return -ENOMEM; |
| 196 | } |
| 197 | |
| 198 | if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) { |
| 199 | mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n" , |
| 200 | __func__, func, line, fixed, item_size, num_items); |
| 201 | return -ENOMEM; |
| 202 | } |
| 203 | |
| 204 | if (check_add_overflow((int)fixed, inlen, &inlen)) { |
| 205 | mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n" , |
| 206 | __func__, func, line, fixed, item_size, num_items); |
| 207 | return -ENOMEM; |
| 208 | } |
| 209 | |
| 210 | return inlen; |
| 211 | } |
| 212 | |
| 213 | #define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \ |
| 214 | mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__) |
| 215 | |
| 216 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); |
| 217 | int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, |
| 218 | enum mlx5_cap_mode cap_mode); |
| 219 | int mlx5_query_hca_caps(struct mlx5_core_dev *dev); |
| 220 | int mlx5_query_board_id(struct mlx5_core_dev *dev); |
| 221 | int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); |
| 222 | int mlx5_cmd_init(struct mlx5_core_dev *dev); |
| 223 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); |
| 224 | int mlx5_cmd_enable(struct mlx5_core_dev *dev); |
| 225 | void mlx5_cmd_disable(struct mlx5_core_dev *dev); |
| 226 | void mlx5_cmd_set_state(struct mlx5_core_dev *dev, |
| 227 | enum mlx5_cmdif_state cmdif_state); |
| 228 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id); |
| 229 | int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); |
| 230 | int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); |
| 231 | int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); |
| 232 | void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); |
| 233 | void mlx5_error_sw_reset(struct mlx5_core_dev *dev); |
| 234 | u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev); |
| 235 | int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev); |
| 236 | void mlx5_disable_device(struct mlx5_core_dev *dev); |
| 237 | int mlx5_recover_device(struct mlx5_core_dev *dev); |
| 238 | int mlx5_sriov_init(struct mlx5_core_dev *dev); |
| 239 | void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); |
| 240 | int mlx5_sriov_attach(struct mlx5_core_dev *dev); |
| 241 | void mlx5_sriov_detach(struct mlx5_core_dev *dev); |
| 242 | int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); |
| 243 | void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change); |
| 244 | int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count); |
| 245 | int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); |
| 246 | int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); |
| 247 | bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); |
| 248 | bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); |
| 249 | int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, |
| 250 | void *context, u32 *element_id); |
| 251 | int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, |
| 252 | void *context, u32 element_id, |
| 253 | u32 modify_bitmask); |
| 254 | int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, |
| 255 | u32 element_id); |
| 256 | int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); |
| 257 | |
| 258 | void mlx5_cmd_flush(struct mlx5_core_dev *dev); |
| 259 | void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); |
| 260 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); |
| 261 | |
| 262 | int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, |
| 263 | u8 access_reg_group); |
| 264 | int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, |
| 265 | u8 access_reg_group); |
| 266 | int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, |
| 267 | u8 feature_group, u8 access_reg_group); |
| 268 | int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir); |
| 269 | |
| 270 | void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); |
| 271 | void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); |
| 272 | void mlx5_lag_add_mdev(struct mlx5_core_dev *dev); |
| 273 | void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev); |
| 274 | void mlx5_lag_disable_change(struct mlx5_core_dev *dev); |
| 275 | void mlx5_lag_enable_change(struct mlx5_core_dev *dev); |
| 276 | |
| 277 | int mlx5_events_init(struct mlx5_core_dev *dev); |
| 278 | void mlx5_events_cleanup(struct mlx5_core_dev *dev); |
| 279 | void mlx5_events_start(struct mlx5_core_dev *dev); |
| 280 | void mlx5_events_stop(struct mlx5_core_dev *dev); |
| 281 | |
| 282 | int mlx5_adev_idx_alloc(void); |
| 283 | void mlx5_adev_idx_free(int idx); |
| 284 | void mlx5_adev_cleanup(struct mlx5_core_dev *dev); |
| 285 | int mlx5_adev_init(struct mlx5_core_dev *dev); |
| 286 | |
| 287 | int mlx5_attach_device(struct mlx5_core_dev *dev); |
| 288 | void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend); |
| 289 | int mlx5_register_device(struct mlx5_core_dev *dev); |
| 290 | void mlx5_unregister_device(struct mlx5_core_dev *dev); |
| 291 | void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); |
| 292 | bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); |
| 293 | |
| 294 | void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); |
| 295 | int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); |
| 296 | int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); |
| 297 | int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); |
| 298 | int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); |
| 299 | |
| 300 | struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev); |
| 301 | void mlx5_dm_cleanup(struct mlx5_core_dev *dev); |
| 302 | |
| 303 | void mlx5_toggle_port_link(struct mlx5_core_dev *dev); |
| 304 | int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, |
| 305 | enum mlx5_port_status status); |
| 306 | int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, |
| 307 | enum mlx5_port_status *status); |
| 308 | int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); |
| 309 | |
| 310 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); |
| 311 | int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); |
| 312 | int mlx5_query_port_pause(struct mlx5_core_dev *dev, |
| 313 | u32 *rx_pause, u32 *tx_pause); |
| 314 | |
| 315 | int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); |
| 316 | int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, |
| 317 | u8 *pfc_en_rx); |
| 318 | |
| 319 | int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, |
| 320 | u16 stall_critical_watermark, |
| 321 | u16 stall_minor_watermark); |
| 322 | int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, |
| 323 | u16 *stall_critical_watermark, |
| 324 | u16 *stall_minor_watermark); |
| 325 | |
| 326 | int mlx5_max_tc(struct mlx5_core_dev *mdev); |
| 327 | int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); |
| 328 | int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, |
| 329 | u8 prio, u8 *tc); |
| 330 | int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); |
| 331 | int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, |
| 332 | u8 tc, u8 *tc_group); |
| 333 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); |
| 334 | int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, |
| 335 | u8 tc, u8 *bw_pct); |
| 336 | int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, |
| 337 | u8 *max_bw_value, |
| 338 | u8 *max_bw_unit); |
| 339 | int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, |
| 340 | u8 *max_bw_value, |
| 341 | u8 *max_bw_unit); |
| 342 | int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); |
| 343 | int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); |
| 344 | |
| 345 | int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); |
| 346 | int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); |
| 347 | int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); |
| 348 | void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, |
| 349 | bool *enabled); |
| 350 | int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, |
| 351 | u16 offset, u16 size, u8 *data); |
| 352 | int |
| 353 | mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, |
| 354 | struct mlx5_module_eeprom_query_params *params, |
| 355 | u8 *data); |
| 356 | |
| 357 | int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); |
| 358 | int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); |
| 359 | int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); |
| 360 | int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); |
| 361 | int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); |
| 362 | int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); |
| 363 | |
| 364 | int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, |
| 365 | struct mlx5_port_eth_proto *eproto); |
| 366 | bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev); |
| 367 | const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev, |
| 368 | u32 eth_proto_oper, |
| 369 | bool force_legacy); |
| 370 | u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev, |
| 371 | struct mlx5_link_info *info, |
| 372 | bool force_legacy); |
| 373 | int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); |
| 374 | |
| 375 | #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ |
| 376 | MLX5_CAP_GEN((mdev), pps_modify) && \ |
| 377 | MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ |
| 378 | MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) |
| 379 | |
| 380 | int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, |
| 381 | struct netlink_ext_ack *extack); |
| 382 | int mlx5_fw_version_query(struct mlx5_core_dev *dev, |
| 383 | u32 *running_ver, u32 *stored_ver); |
| 384 | |
| 385 | #ifdef CONFIG_MLX5_CORE_EN |
| 386 | int mlx5e_init(void); |
| 387 | void mlx5e_cleanup(void); |
| 388 | #else |
| 389 | static inline int mlx5e_init(void){ return 0; } |
| 390 | static inline void mlx5e_cleanup(void){} |
| 391 | #endif |
| 392 | |
| 393 | static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) |
| 394 | { |
| 395 | return pci_num_vf(dev: dev->pdev) ? true : false; |
| 396 | } |
| 397 | |
| 398 | int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); |
| 399 | static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) |
| 400 | { |
| 401 | int ret; |
| 402 | |
| 403 | mlx5_devcom_comp_lock(devcom: dev->priv.hca_devcom_comp); |
| 404 | ret = mlx5_rescan_drivers_locked(dev); |
| 405 | mlx5_devcom_comp_unlock(devcom: dev->priv.hca_devcom_comp); |
| 406 | return ret; |
| 407 | } |
| 408 | |
| 409 | u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); |
| 410 | void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); |
| 411 | |
| 412 | static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) |
| 413 | { |
| 414 | return dev->coredev_type == MLX5_COREDEV_SF; |
| 415 | } |
| 416 | |
| 417 | static inline struct auxiliary_device * |
| 418 | mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev) |
| 419 | { |
| 420 | return container_of(mdev->device, struct auxiliary_device, dev); |
| 421 | } |
| 422 | |
| 423 | int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); |
| 424 | void mlx5_mdev_uninit(struct mlx5_core_dev *dev); |
| 425 | int mlx5_init_one(struct mlx5_core_dev *dev); |
| 426 | int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev); |
| 427 | void mlx5_uninit_one(struct mlx5_core_dev *dev); |
| 428 | void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend); |
| 429 | void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend); |
| 430 | int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery); |
| 431 | int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery); |
| 432 | int mlx5_init_one_light(struct mlx5_core_dev *dev); |
| 433 | void mlx5_uninit_one_light(struct mlx5_core_dev *dev); |
| 434 | void mlx5_unload_one_light(struct mlx5_core_dev *dev); |
| 435 | |
| 436 | int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport, |
| 437 | u16 opmod); |
| 438 | #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ |
| 439 | mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) |
| 440 | |
| 441 | int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id); |
| 442 | |
| 443 | static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) |
| 444 | { |
| 445 | struct mlx5_core_dev *dev = pci_get_drvdata(pdev); |
| 446 | |
| 447 | return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); |
| 448 | } |
| 449 | |
| 450 | bool mlx5_eth_supported(struct mlx5_core_dev *dev); |
| 451 | bool mlx5_rdma_supported(struct mlx5_core_dev *dev); |
| 452 | bool mlx5_vnet_supported(struct mlx5_core_dev *dev); |
| 453 | bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); |
| 454 | int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, |
| 455 | struct mlx5_cmd_allow_other_vhca_access_attr *attr); |
| 456 | int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, |
| 457 | struct mlx5_cmd_alias_obj_create_attr *alias_attr, |
| 458 | u32 *obj_id); |
| 459 | int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type); |
| 460 | |
| 461 | static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) |
| 462 | { |
| 463 | return MLX5_CAP_GEN_2(dev, ec_vf_vport_base); |
| 464 | } |
| 465 | |
| 466 | static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev) |
| 467 | { |
| 468 | return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev); |
| 469 | } |
| 470 | |
| 471 | static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num) |
| 472 | { |
| 473 | int base_vport = mlx5_core_ec_vf_vport_base(dev); |
| 474 | int max_vport = base_vport + mlx5_core_max_ec_vfs(dev); |
| 475 | |
| 476 | if (!mlx5_core_ec_sriov_enabled(dev)) |
| 477 | return false; |
| 478 | |
| 479 | return (vport_num >= base_vport && vport_num < max_vport); |
| 480 | } |
| 481 | |
| 482 | static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func) |
| 483 | { |
| 484 | return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1 |
| 485 | : vport; |
| 486 | } |
| 487 | |
| 488 | static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev) |
| 489 | { |
| 490 | if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) |
| 491 | return MLX5_CAP_GEN_2(dev, max_num_eqs_24b); |
| 492 | |
| 493 | if (MLX5_CAP_GEN(dev, max_num_eqs)) |
| 494 | return MLX5_CAP_GEN(dev, max_num_eqs); |
| 495 | |
| 496 | return 1 << MLX5_CAP_GEN(dev, log_max_eq); |
| 497 | } |
| 498 | #endif /* __MLX5_CORE_H__ */ |
| 499 | |