| 1 | /* |
| 2 | * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #ifndef __MLX5_ESWITCH_H__ |
| 34 | #define __MLX5_ESWITCH_H__ |
| 35 | |
| 36 | #include <linux/if_ether.h> |
| 37 | #include <linux/if_link.h> |
| 38 | #include <linux/atomic.h> |
| 39 | #include <linux/xarray.h> |
| 40 | #include <net/devlink.h> |
| 41 | #include <linux/mlx5/device.h> |
| 42 | #include <linux/mlx5/eswitch.h> |
| 43 | #include <linux/mlx5/vport.h> |
| 44 | #include <linux/mlx5/fs.h> |
| 45 | #include "lib/mpfs.h" |
| 46 | #include "lib/fs_chains.h" |
| 47 | #include "sf/sf.h" |
| 48 | #include "en/tc_ct.h" |
| 49 | #include "en/tc/sample.h" |
| 50 | |
| 51 | enum mlx5_mapped_obj_type { |
| 52 | MLX5_MAPPED_OBJ_CHAIN, |
| 53 | MLX5_MAPPED_OBJ_SAMPLE, |
| 54 | MLX5_MAPPED_OBJ_INT_PORT_METADATA, |
| 55 | MLX5_MAPPED_OBJ_ACT_MISS, |
| 56 | }; |
| 57 | |
| 58 | struct mlx5_mapped_obj { |
| 59 | enum mlx5_mapped_obj_type type; |
| 60 | union { |
| 61 | u32 chain; |
| 62 | u64 act_miss_cookie; |
| 63 | struct { |
| 64 | u32 group_id; |
| 65 | u32 rate; |
| 66 | u32 trunc_size; |
| 67 | u32 tunnel_id; |
| 68 | } sample; |
| 69 | u32 int_port_metadata; |
| 70 | }; |
| 71 | }; |
| 72 | |
| 73 | #ifdef CONFIG_MLX5_ESWITCH |
| 74 | |
| 75 | #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 |
| 76 | |
| 77 | #define MLX5_MAX_UC_PER_VPORT(dev) \ |
| 78 | (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) |
| 79 | |
| 80 | #define MLX5_MAX_MC_PER_VPORT(dev) \ |
| 81 | (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) |
| 82 | |
| 83 | #define mlx5_esw_has_fwd_fdb(dev) \ |
| 84 | MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) |
| 85 | |
| 86 | #define esw_chains(esw) \ |
| 87 | ((esw)->fdb_table.offloads.esw_chains_priv) |
| 88 | |
| 89 | enum { |
| 90 | MAPPING_TYPE_CHAIN, |
| 91 | MAPPING_TYPE_TUNNEL, |
| 92 | MAPPING_TYPE_TUNNEL_ENC_OPTS, |
| 93 | MAPPING_TYPE_LABELS, |
| 94 | MAPPING_TYPE_ZONE, |
| 95 | MAPPING_TYPE_INT_PORT, |
| 96 | }; |
| 97 | |
| 98 | struct vport_ingress { |
| 99 | struct mlx5_flow_table *acl; |
| 100 | struct mlx5_flow_handle *allow_rule; |
| 101 | struct { |
| 102 | struct mlx5_flow_group *allow_spoofchk_only_grp; |
| 103 | struct mlx5_flow_group *allow_untagged_spoofchk_grp; |
| 104 | struct mlx5_flow_group *allow_untagged_only_grp; |
| 105 | struct mlx5_flow_group *drop_grp; |
| 106 | struct mlx5_flow_handle *drop_rule; |
| 107 | struct mlx5_fc *drop_counter; |
| 108 | } legacy; |
| 109 | struct { |
| 110 | /* Optional group to add an FTE to do internal priority |
| 111 | * tagging on ingress packets. |
| 112 | */ |
| 113 | struct mlx5_flow_group *metadata_prio_tag_grp; |
| 114 | /* Group to add default match-all FTE entry to tag ingress |
| 115 | * packet with metadata. |
| 116 | */ |
| 117 | struct mlx5_flow_group *metadata_allmatch_grp; |
| 118 | /* Optional group to add a drop all rule */ |
| 119 | struct mlx5_flow_group *drop_grp; |
| 120 | struct mlx5_modify_hdr *modify_metadata; |
| 121 | struct mlx5_flow_handle *modify_metadata_rule; |
| 122 | struct mlx5_flow_handle *drop_rule; |
| 123 | } offloads; |
| 124 | }; |
| 125 | |
| 126 | enum vport_egress_acl_type { |
| 127 | VPORT_EGRESS_ACL_TYPE_DEFAULT, |
| 128 | VPORT_EGRESS_ACL_TYPE_SHARED_FDB, |
| 129 | }; |
| 130 | |
| 131 | struct vport_egress { |
| 132 | struct mlx5_flow_table *acl; |
| 133 | enum vport_egress_acl_type type; |
| 134 | struct mlx5_flow_handle *allowed_vlan; |
| 135 | struct mlx5_flow_group *vlan_grp; |
| 136 | union { |
| 137 | struct { |
| 138 | struct mlx5_flow_group *drop_grp; |
| 139 | struct mlx5_flow_handle *drop_rule; |
| 140 | struct mlx5_fc *drop_counter; |
| 141 | } legacy; |
| 142 | struct { |
| 143 | struct mlx5_flow_group *fwd_grp; |
| 144 | struct mlx5_flow_handle *fwd_rule; |
| 145 | struct xarray bounce_rules; |
| 146 | struct mlx5_flow_group *bounce_grp; |
| 147 | } offloads; |
| 148 | }; |
| 149 | }; |
| 150 | |
| 151 | struct mlx5_vport_drop_stats { |
| 152 | u64 rx_dropped; |
| 153 | u64 tx_dropped; |
| 154 | }; |
| 155 | |
| 156 | struct mlx5_vport_info { |
| 157 | u8 mac[ETH_ALEN]; |
| 158 | u16 vlan; |
| 159 | u64 node_guid; |
| 160 | int link_state; |
| 161 | u8 qos; |
| 162 | u8 spoofchk: 1; |
| 163 | u8 trusted: 1; |
| 164 | u8 roce_enabled: 1; |
| 165 | u8 mig_enabled: 1; |
| 166 | u8 ipsec_crypto_enabled: 1; |
| 167 | u8 ipsec_packet_enabled: 1; |
| 168 | }; |
| 169 | |
| 170 | /* Vport context events */ |
| 171 | enum mlx5_eswitch_vport_event { |
| 172 | MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), |
| 173 | MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), |
| 174 | MLX5_VPORT_PROMISC_CHANGE = BIT(3), |
| 175 | }; |
| 176 | |
| 177 | struct mlx5_vport; |
| 178 | |
| 179 | struct mlx5_devlink_port { |
| 180 | struct devlink_port dl_port; |
| 181 | struct mlx5_vport *vport; |
| 182 | }; |
| 183 | |
| 184 | static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port, |
| 185 | struct mlx5_vport *vport) |
| 186 | { |
| 187 | dl_port->vport = vport; |
| 188 | } |
| 189 | |
| 190 | static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port) |
| 191 | { |
| 192 | return container_of(dl_port, struct mlx5_devlink_port, dl_port); |
| 193 | } |
| 194 | |
| 195 | static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port) |
| 196 | { |
| 197 | return mlx5_devlink_port_get(dl_port)->vport; |
| 198 | } |
| 199 | |
| 200 | struct mlx5_vport { |
| 201 | struct mlx5_core_dev *dev; |
| 202 | struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; |
| 203 | struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; |
| 204 | struct mlx5_flow_handle *promisc_rule; |
| 205 | struct mlx5_flow_handle *allmulti_rule; |
| 206 | struct work_struct vport_change_handler; |
| 207 | |
| 208 | struct vport_ingress ingress; |
| 209 | struct vport_egress egress; |
| 210 | u32 default_metadata; |
| 211 | u32 metadata; |
| 212 | |
| 213 | struct mlx5_vport_info info; |
| 214 | |
| 215 | /* Protected with the E-Switch qos domain lock. */ |
| 216 | struct { |
| 217 | /* Vport scheduling element node. */ |
| 218 | struct mlx5_esw_sched_node *sched_node; |
| 219 | } qos; |
| 220 | |
| 221 | u16 vport; |
| 222 | bool enabled; |
| 223 | bool max_eqs_set; |
| 224 | enum mlx5_eswitch_vport_event enabled_events; |
| 225 | int index; |
| 226 | struct mlx5_devlink_port *dl_port; |
| 227 | }; |
| 228 | |
| 229 | struct mlx5_esw_indir_table; |
| 230 | |
| 231 | struct mlx5_eswitch_fdb { |
| 232 | union { |
| 233 | struct legacy_fdb { |
| 234 | struct mlx5_flow_table *fdb; |
| 235 | struct mlx5_flow_group *addr_grp; |
| 236 | struct mlx5_flow_group *allmulti_grp; |
| 237 | struct mlx5_flow_group *promisc_grp; |
| 238 | struct mlx5_flow_table *vepa_fdb; |
| 239 | struct mlx5_flow_handle *vepa_uplink_rule; |
| 240 | struct mlx5_flow_handle *vepa_star_rule; |
| 241 | } legacy; |
| 242 | |
| 243 | struct offloads_fdb { |
| 244 | struct mlx5_flow_namespace *ns; |
| 245 | struct mlx5_flow_table *tc_miss_table; |
| 246 | struct mlx5_flow_table *slow_fdb; |
| 247 | struct mlx5_flow_group *send_to_vport_grp; |
| 248 | struct mlx5_flow_group *send_to_vport_meta_grp; |
| 249 | struct mlx5_flow_group *peer_miss_grp; |
| 250 | struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS]; |
| 251 | struct mlx5_flow_group *miss_grp; |
| 252 | struct mlx5_flow_handle **send_to_vport_meta_rules; |
| 253 | struct mlx5_flow_handle *miss_rule_uni; |
| 254 | struct mlx5_flow_handle *miss_rule_multi; |
| 255 | |
| 256 | struct mlx5_fs_chains *esw_chains_priv; |
| 257 | struct { |
| 258 | DECLARE_HASHTABLE(table, 8); |
| 259 | /* Protects vports.table */ |
| 260 | struct mutex lock; |
| 261 | } vports; |
| 262 | |
| 263 | struct mlx5_esw_indir_table *indir; |
| 264 | |
| 265 | } offloads; |
| 266 | }; |
| 267 | u32 flags; |
| 268 | }; |
| 269 | |
| 270 | struct mlx5_esw_offload { |
| 271 | struct mlx5_flow_table *ft_offloads_restore; |
| 272 | struct mlx5_flow_group *restore_group; |
| 273 | struct mlx5_modify_hdr *restore_copy_hdr_id; |
| 274 | struct mapping_ctx *reg_c0_obj_pool; |
| 275 | |
| 276 | struct mlx5_flow_table *ft_offloads; |
| 277 | struct mlx5_flow_group *vport_rx_group; |
| 278 | struct mlx5_flow_group *vport_rx_drop_group; |
| 279 | struct mlx5_flow_handle *vport_rx_drop_rule; |
| 280 | struct mlx5_flow_table *ft_ipsec_tx_pol; |
| 281 | struct xarray vport_reps; |
| 282 | struct list_head peer_flows[MLX5_MAX_PORTS]; |
| 283 | struct mutex peer_mutex; |
| 284 | struct mutex encap_tbl_lock; /* protects encap_tbl */ |
| 285 | DECLARE_HASHTABLE(encap_tbl, 8); |
| 286 | struct mutex decap_tbl_lock; /* protects decap_tbl */ |
| 287 | DECLARE_HASHTABLE(decap_tbl, 8); |
| 288 | struct mod_hdr_tbl mod_hdr; |
| 289 | DECLARE_HASHTABLE(termtbl_tbl, 8); |
| 290 | struct mutex termtbl_mutex; /* protects termtbl hash */ |
| 291 | struct xarray vhca_map; |
| 292 | const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; |
| 293 | u8 inline_mode; |
| 294 | atomic64_t num_flows; |
| 295 | u64 num_block_encap; |
| 296 | u64 num_block_mode; |
| 297 | enum devlink_eswitch_encap_mode encap; |
| 298 | struct ida vport_metadata_ida; |
| 299 | unsigned int host_number; /* ECPF supports one external host */ |
| 300 | }; |
| 301 | |
| 302 | /* E-Switch MC FDB table hash node */ |
| 303 | struct esw_mc_addr { /* SRIOV only */ |
| 304 | struct l2addr_node node; |
| 305 | struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ |
| 306 | u32 refcnt; |
| 307 | }; |
| 308 | |
| 309 | struct mlx5_host_work { |
| 310 | struct work_struct work; |
| 311 | struct mlx5_eswitch *esw; |
| 312 | }; |
| 313 | |
| 314 | struct mlx5_esw_functions { |
| 315 | struct mlx5_nb nb; |
| 316 | u16 num_vfs; |
| 317 | u16 num_ec_vfs; |
| 318 | }; |
| 319 | |
| 320 | enum { |
| 321 | MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), |
| 322 | MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), |
| 323 | MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), |
| 324 | }; |
| 325 | |
| 326 | struct mlx5_esw_bridge_offloads; |
| 327 | |
| 328 | enum { |
| 329 | MLX5_ESW_FDB_CREATED = BIT(0), |
| 330 | }; |
| 331 | |
| 332 | struct dentry; |
| 333 | struct mlx5_qos_domain; |
| 334 | |
| 335 | struct mlx5_eswitch { |
| 336 | struct mlx5_core_dev *dev; |
| 337 | struct mlx5_nb nb; |
| 338 | struct mlx5_eswitch_fdb fdb_table; |
| 339 | /* legacy data structures */ |
| 340 | struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; |
| 341 | struct esw_mc_addr mc_promisc; |
| 342 | /* end of legacy */ |
| 343 | struct dentry *debugfs_root; |
| 344 | struct workqueue_struct *work_queue; |
| 345 | struct xarray vports; |
| 346 | u32 flags; |
| 347 | int total_vports; |
| 348 | int enabled_vports; |
| 349 | /* Synchronize between vport change events |
| 350 | * and async SRIOV admin state changes |
| 351 | */ |
| 352 | struct mutex state_lock; |
| 353 | |
| 354 | /* Protects eswitch mode change that occurs via one or more |
| 355 | * user commands, i.e. sriov state change, devlink commands. |
| 356 | */ |
| 357 | struct rw_semaphore mode_lock; |
| 358 | atomic64_t user_count; |
| 359 | |
| 360 | /* Protected with the E-Switch qos domain lock. */ |
| 361 | struct { |
| 362 | /* Initially 0, meaning no QoS users and QoS is disabled. */ |
| 363 | refcount_t refcnt; |
| 364 | u32 root_tsar_ix; |
| 365 | struct mlx5_qos_domain *domain; |
| 366 | /* Contains all vports with QoS enabled but no explicit node. |
| 367 | * Cannot be NULL if QoS is enabled, but may be a fake node |
| 368 | * referencing the root TSAR if the esw doesn't support nodes. |
| 369 | */ |
| 370 | struct mlx5_esw_sched_node *node0; |
| 371 | } qos; |
| 372 | |
| 373 | struct mlx5_esw_bridge_offloads *br_offloads; |
| 374 | struct mlx5_esw_offload offloads; |
| 375 | int mode; |
| 376 | u16 manager_vport; |
| 377 | u16 first_host_vport; |
| 378 | u8 num_peers; |
| 379 | struct mlx5_esw_functions esw_funcs; |
| 380 | struct { |
| 381 | u32 large_group_num; |
| 382 | } params; |
| 383 | struct blocking_notifier_head n_head; |
| 384 | struct xarray paired; |
| 385 | struct mlx5_devcom_comp_dev *devcom; |
| 386 | u16 enabled_ipsec_vf_count; |
| 387 | bool eswitch_operation_in_progress; |
| 388 | }; |
| 389 | |
| 390 | void esw_offloads_disable(struct mlx5_eswitch *esw); |
| 391 | int esw_offloads_enable(struct mlx5_eswitch *esw); |
| 392 | void esw_offloads_cleanup(struct mlx5_eswitch *esw); |
| 393 | int esw_offloads_init(struct mlx5_eswitch *esw); |
| 394 | |
| 395 | struct mlx5_flow_handle * |
| 396 | mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); |
| 397 | void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule); |
| 398 | |
| 399 | bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); |
| 400 | u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); |
| 401 | void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); |
| 402 | |
| 403 | int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); |
| 404 | |
| 405 | /* E-Switch API */ |
| 406 | int mlx5_eswitch_init(struct mlx5_core_dev *dev); |
| 407 | void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); |
| 408 | |
| 409 | #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) |
| 410 | int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); |
| 411 | int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); |
| 412 | void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); |
| 413 | void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); |
| 414 | void mlx5_eswitch_disable(struct mlx5_eswitch *esw); |
| 415 | void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key); |
| 416 | void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); |
| 417 | bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); |
| 418 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
| 419 | u16 vport, const u8 *mac); |
| 420 | int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, |
| 421 | u16 vport, int link_state); |
| 422 | int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, |
| 423 | u16 vport, u16 vlan, u8 qos); |
| 424 | int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, |
| 425 | u16 vport, bool spoofchk); |
| 426 | int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, |
| 427 | u16 vport_num, bool setting); |
| 428 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, |
| 429 | u32 max_rate, u32 min_rate); |
| 430 | int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_sched_node *node, |
| 431 | struct netlink_ext_ack *extack); |
| 432 | int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); |
| 433 | int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); |
| 434 | int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, |
| 435 | u16 vport, struct ifla_vf_info *ivi); |
| 436 | int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, |
| 437 | u16 vport, |
| 438 | struct ifla_vf_stats *vf_stats); |
| 439 | void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); |
| 440 | |
| 441 | int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, |
| 442 | bool other_vport, void *in); |
| 443 | |
| 444 | struct mlx5_flow_spec; |
| 445 | struct mlx5_esw_flow_attr; |
| 446 | struct mlx5_termtbl_handle; |
| 447 | |
| 448 | bool |
| 449 | mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, |
| 450 | struct mlx5_flow_attr *attr, |
| 451 | struct mlx5_flow_act *flow_act, |
| 452 | struct mlx5_flow_spec *spec); |
| 453 | |
| 454 | struct mlx5_flow_handle * |
| 455 | mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, |
| 456 | struct mlx5_flow_table *ft, |
| 457 | struct mlx5_flow_spec *spec, |
| 458 | struct mlx5_esw_flow_attr *attr, |
| 459 | struct mlx5_flow_act *flow_act, |
| 460 | struct mlx5_flow_destination *dest, |
| 461 | int num_dest); |
| 462 | |
| 463 | void |
| 464 | mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, |
| 465 | struct mlx5_termtbl_handle *tt); |
| 466 | |
| 467 | void |
| 468 | mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); |
| 469 | |
| 470 | struct mlx5_flow_handle * |
| 471 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
| 472 | struct mlx5_flow_spec *spec, |
| 473 | struct mlx5_flow_attr *attr); |
| 474 | struct mlx5_flow_handle * |
| 475 | mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, |
| 476 | struct mlx5_flow_spec *spec, |
| 477 | struct mlx5_flow_attr *attr); |
| 478 | void |
| 479 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, |
| 480 | struct mlx5_flow_handle *rule, |
| 481 | struct mlx5_flow_attr *attr); |
| 482 | void |
| 483 | mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, |
| 484 | struct mlx5_flow_handle *rule, |
| 485 | struct mlx5_flow_attr *attr); |
| 486 | |
| 487 | struct mlx5_flow_handle * |
| 488 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, |
| 489 | struct mlx5_flow_destination *dest); |
| 490 | |
| 491 | enum { |
| 492 | SET_VLAN_STRIP = BIT(0), |
| 493 | SET_VLAN_INSERT = BIT(1) |
| 494 | }; |
| 495 | |
| 496 | enum mlx5_flow_match_level { |
| 497 | MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, |
| 498 | MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, |
| 499 | MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, |
| 500 | MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, |
| 501 | }; |
| 502 | |
| 503 | /* current maximum for flow based vport multicasting */ |
| 504 | #define MLX5_MAX_FLOW_FWD_VPORTS 32 |
| 505 | |
| 506 | enum { |
| 507 | MLX5_ESW_DEST_ENCAP = BIT(0), |
| 508 | MLX5_ESW_DEST_ENCAP_VALID = BIT(1), |
| 509 | MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), |
| 510 | }; |
| 511 | |
| 512 | struct mlx5_esw_flow_attr { |
| 513 | struct mlx5_eswitch_rep *in_rep; |
| 514 | struct mlx5_core_dev *in_mdev; |
| 515 | struct mlx5_core_dev *counter_dev; |
| 516 | struct mlx5e_tc_int_port *dest_int_port; |
| 517 | struct mlx5e_tc_int_port *int_port; |
| 518 | |
| 519 | int split_count; |
| 520 | int out_count; |
| 521 | |
| 522 | __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; |
| 523 | u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; |
| 524 | u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; |
| 525 | u8 total_vlan; |
| 526 | struct { |
| 527 | u32 flags; |
| 528 | bool vport_valid; |
| 529 | u16 vport; |
| 530 | struct mlx5_pkt_reformat *pkt_reformat; |
| 531 | struct mlx5_core_dev *mdev; |
| 532 | struct mlx5_termtbl_handle *termtbl; |
| 533 | int src_port_rewrite_act_id; |
| 534 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; |
| 535 | struct mlx5_rx_tun_attr *rx_tun_attr; |
| 536 | struct ethhdr eth; |
| 537 | struct mlx5_pkt_reformat *decap_pkt_reformat; |
| 538 | }; |
| 539 | |
| 540 | int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, |
| 541 | struct netlink_ext_ack *extack); |
| 542 | int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); |
| 543 | int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, |
| 544 | struct netlink_ext_ack *extack); |
| 545 | int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); |
| 546 | int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, |
| 547 | enum devlink_eswitch_encap_mode encap, |
| 548 | struct netlink_ext_ack *extack); |
| 549 | int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, |
| 550 | enum devlink_eswitch_encap_mode *encap); |
| 551 | int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, |
| 552 | u8 *hw_addr, int *hw_addr_len, |
| 553 | struct netlink_ext_ack *extack); |
| 554 | int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, |
| 555 | const u8 *hw_addr, int hw_addr_len, |
| 556 | struct netlink_ext_ack *extack); |
| 557 | int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, |
| 558 | struct netlink_ext_ack *extack); |
| 559 | int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, |
| 560 | struct netlink_ext_ack *extack); |
| 561 | int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, |
| 562 | struct netlink_ext_ack *extack); |
| 563 | int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, |
| 564 | struct netlink_ext_ack *extack); |
| 565 | #ifdef CONFIG_XFRM_OFFLOAD |
| 566 | int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, |
| 567 | struct netlink_ext_ack *extack); |
| 568 | int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, |
| 569 | struct netlink_ext_ack *extack); |
| 570 | int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, |
| 571 | struct netlink_ext_ack *extack); |
| 572 | int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, |
| 573 | struct netlink_ext_ack *extack); |
| 574 | #endif /* CONFIG_XFRM_OFFLOAD */ |
| 575 | int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, |
| 576 | u32 *max_io_eqs, |
| 577 | struct netlink_ext_ack *extack); |
| 578 | int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, |
| 579 | u32 max_io_eqs, |
| 580 | struct netlink_ext_ack *extack); |
| 581 | int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, |
| 582 | struct netlink_ext_ack *extack); |
| 583 | |
| 584 | void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); |
| 585 | |
| 586 | int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, |
| 587 | u16 vport, u16 vlan, u8 qos, u8 set_flags); |
| 588 | |
| 589 | static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) |
| 590 | { |
| 591 | return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && |
| 592 | MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); |
| 593 | } |
| 594 | |
| 595 | static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, |
| 596 | u8 vlan_depth) |
| 597 | { |
| 598 | bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && |
| 599 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); |
| 600 | |
| 601 | if (vlan_depth == 1) |
| 602 | return ret; |
| 603 | |
| 604 | return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && |
| 605 | MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); |
| 606 | } |
| 607 | |
| 608 | bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, |
| 609 | struct mlx5_core_dev *dev1); |
| 610 | |
| 611 | const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); |
| 612 | |
| 613 | #define MLX5_DEBUG_ESWITCH_MASK BIT(3) |
| 614 | |
| 615 | #define esw_info(__dev, format, ...) \ |
| 616 | dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) |
| 617 | |
| 618 | #define esw_warn(__dev, format, ...) \ |
| 619 | dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) |
| 620 | |
| 621 | #define esw_debug(dev, format, ...) \ |
| 622 | mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) |
| 623 | |
| 624 | static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) |
| 625 | { |
| 626 | return esw && MLX5_ESWITCH_MANAGER(esw->dev); |
| 627 | } |
| 628 | |
| 629 | static inline bool |
| 630 | mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) |
| 631 | { |
| 632 | return esw->manager_vport == vport_num; |
| 633 | } |
| 634 | |
| 635 | static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num, |
| 636 | u16 esw_owner_vhca_id) |
| 637 | { |
| 638 | return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) || |
| 639 | (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(dev: esw->dev)); |
| 640 | } |
| 641 | |
| 642 | static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) |
| 643 | { |
| 644 | return mlx5_core_is_ecpf_esw_manager(dev) ? |
| 645 | MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; |
| 646 | } |
| 647 | |
| 648 | static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) |
| 649 | { |
| 650 | return mlx5_core_is_ecpf_esw_manager(dev); |
| 651 | } |
| 652 | |
| 653 | static inline unsigned int |
| 654 | mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, |
| 655 | u16 vport_num) |
| 656 | { |
| 657 | return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; |
| 658 | } |
| 659 | |
| 660 | static inline u16 |
| 661 | mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) |
| 662 | { |
| 663 | return dl_port_index & 0xffff; |
| 664 | } |
| 665 | |
| 666 | static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) |
| 667 | { |
| 668 | return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; |
| 669 | } |
| 670 | |
| 671 | /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ |
| 672 | void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); |
| 673 | |
| 674 | /* Each mark identifies eswitch vport type. |
| 675 | * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using |
| 676 | * a single mark. |
| 677 | * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. |
| 678 | * MLX5_ESW_VPT_SF identifies SF vport. |
| 679 | */ |
| 680 | #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 |
| 681 | #define MLX5_ESW_VPT_VF XA_MARK_1 |
| 682 | #define MLX5_ESW_VPT_SF XA_MARK_2 |
| 683 | |
| 684 | /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. |
| 685 | * Borrowed the idea from xa_for_each_marked() but with support for desired last element. |
| 686 | */ |
| 687 | |
| 688 | #define mlx5_esw_for_each_vport(esw, index, vport) \ |
| 689 | xa_for_each(&((esw)->vports), index, vport) |
| 690 | |
| 691 | #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ |
| 692 | for (index = 0, entry = xa_find(xa, &index, last, filter); \ |
| 693 | entry; entry = xa_find_after(xa, &index, last, filter)) |
| 694 | |
| 695 | #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ |
| 696 | mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) |
| 697 | |
| 698 | #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ |
| 699 | mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) |
| 700 | |
| 701 | #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ |
| 702 | mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) |
| 703 | |
| 704 | /* This macro should only be used if EC SRIOV is enabled. |
| 705 | * |
| 706 | * Because there were no more marks available on the xarray this uses a |
| 707 | * for_each_range approach. The range is only valid when EC SRIOV is enabled |
| 708 | */ |
| 709 | #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \ |
| 710 | xa_for_each_range(&((esw)->vports), \ |
| 711 | index, \ |
| 712 | vport, \ |
| 713 | MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \ |
| 714 | MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ |
| 715 | (last) - 1) |
| 716 | |
| 717 | #define mlx5_esw_for_each_rep(esw, i, rep) \ |
| 718 | xa_for_each(&((esw)->offloads.vport_reps), i, rep) |
| 719 | |
| 720 | struct mlx5_eswitch *__must_check |
| 721 | mlx5_devlink_eswitch_get(struct devlink *devlink); |
| 722 | |
| 723 | struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink); |
| 724 | |
| 725 | struct mlx5_vport *__must_check |
| 726 | mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); |
| 727 | |
| 728 | bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
| 729 | bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
| 730 | bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
| 731 | |
| 732 | int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); |
| 733 | |
| 734 | int |
| 735 | mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, |
| 736 | enum mlx5_eswitch_vport_event enabled_events); |
| 737 | void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); |
| 738 | |
| 739 | int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 740 | enum mlx5_eswitch_vport_event enabled_events); |
| 741 | void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 742 | |
| 743 | int |
| 744 | esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, |
| 745 | struct mlx5_vport *vport); |
| 746 | void |
| 747 | esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, |
| 748 | struct mlx5_vport *vport); |
| 749 | |
| 750 | struct esw_vport_tbl_namespace { |
| 751 | int max_fte; |
| 752 | int max_num_groups; |
| 753 | u32 flags; |
| 754 | }; |
| 755 | |
| 756 | struct mlx5_vport_tbl_attr { |
| 757 | u32 chain; |
| 758 | u16 prio; |
| 759 | u16 vport; |
| 760 | struct esw_vport_tbl_namespace *vport_ns; |
| 761 | }; |
| 762 | |
| 763 | struct mlx5_flow_table * |
| 764 | mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); |
| 765 | void |
| 766 | mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); |
| 767 | |
| 768 | struct mlx5_flow_handle * |
| 769 | esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); |
| 770 | |
| 771 | void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, |
| 772 | u32 *flow_group_in, |
| 773 | int match_params); |
| 774 | |
| 775 | void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, |
| 776 | u16 vport, |
| 777 | struct mlx5_flow_spec *spec); |
| 778 | |
| 779 | int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 780 | void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 781 | |
| 782 | int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 783 | struct mlx5_devlink_port *dl_port, |
| 784 | u32 controller, u32 sfnum); |
| 785 | void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 786 | |
| 787 | int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 788 | void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 789 | |
| 790 | int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, |
| 791 | enum mlx5_eswitch_vport_event enabled_events, |
| 792 | struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum); |
| 793 | void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); |
| 794 | |
| 795 | int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, |
| 796 | enum mlx5_eswitch_vport_event enabled_events); |
| 797 | void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); |
| 798 | |
| 799 | int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, |
| 800 | struct mlx5_vport *vport); |
| 801 | void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, |
| 802 | struct mlx5_vport *vport); |
| 803 | |
| 804 | int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 805 | struct mlx5_devlink_port *dl_port, |
| 806 | u32 controller, u32 sfnum); |
| 807 | void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 808 | |
| 809 | int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); |
| 810 | void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport); |
| 811 | struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); |
| 812 | |
| 813 | int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); |
| 814 | |
| 815 | int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); |
| 816 | void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); |
| 817 | int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); |
| 818 | |
| 819 | /** |
| 820 | * mlx5_esw_event_info - Indicates eswitch mode changed/changing. |
| 821 | * |
| 822 | * @new_mode: New mode of eswitch. |
| 823 | */ |
| 824 | struct mlx5_esw_event_info { |
| 825 | u16 new_mode; |
| 826 | }; |
| 827 | |
| 828 | int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); |
| 829 | void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); |
| 830 | |
| 831 | bool mlx5_esw_hold(struct mlx5_core_dev *dev); |
| 832 | void mlx5_esw_release(struct mlx5_core_dev *dev); |
| 833 | void mlx5_esw_get(struct mlx5_core_dev *dev); |
| 834 | void mlx5_esw_put(struct mlx5_core_dev *dev); |
| 835 | int mlx5_esw_try_lock(struct mlx5_eswitch *esw); |
| 836 | int mlx5_esw_lock(struct mlx5_eswitch *esw); |
| 837 | void mlx5_esw_unlock(struct mlx5_eswitch *esw); |
| 838 | |
| 839 | void esw_vport_change_handle_locked(struct mlx5_vport *vport); |
| 840 | |
| 841 | bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); |
| 842 | |
| 843 | int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, |
| 844 | struct mlx5_eswitch *slave_esw, int max_slaves); |
| 845 | void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, |
| 846 | struct mlx5_eswitch *slave_esw); |
| 847 | int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw); |
| 848 | |
| 849 | bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); |
| 850 | void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); |
| 851 | |
| 852 | int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); |
| 853 | void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); |
| 854 | |
| 855 | static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) |
| 856 | { |
| 857 | if (mlx5_esw_allowed(esw)) |
| 858 | return esw->esw_funcs.num_vfs; |
| 859 | |
| 860 | return 0; |
| 861 | } |
| 862 | |
| 863 | static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) |
| 864 | { |
| 865 | if (mlx5_esw_allowed(esw)) |
| 866 | return esw->num_peers; |
| 867 | return 0; |
| 868 | } |
| 869 | |
| 870 | static inline struct mlx5_flow_table * |
| 871 | mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) |
| 872 | { |
| 873 | return esw->fdb_table.offloads.slow_fdb; |
| 874 | } |
| 875 | |
| 876 | int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, |
| 877 | struct mlx5_esw_flow_attr *esw_attr, int attr_idx); |
| 878 | bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev); |
| 879 | void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev); |
| 880 | bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev); |
| 881 | int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, |
| 882 | struct mlx5_vport *vport); |
| 883 | int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, |
| 884 | u16 vport_num); |
| 885 | int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 886 | bool enable); |
| 887 | int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, |
| 888 | bool enable); |
| 889 | int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, |
| 890 | u16 vport_num); |
| 891 | #else /* CONFIG_MLX5_ESWITCH */ |
| 892 | /* eswitch API stubs */ |
| 893 | static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } |
| 894 | static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} |
| 895 | static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } |
| 896 | static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} |
| 897 | static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} |
| 898 | static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {} |
| 899 | static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} |
| 900 | static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } |
| 901 | static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } |
| 902 | static inline |
| 903 | int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } |
| 904 | static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) |
| 905 | { |
| 906 | return ERR_PTR(-EOPNOTSUPP); |
| 907 | } |
| 908 | |
| 909 | static inline struct mlx5_flow_handle * |
| 910 | esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) |
| 911 | { |
| 912 | return ERR_PTR(-EOPNOTSUPP); |
| 913 | } |
| 914 | |
| 915 | static inline unsigned int |
| 916 | mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, |
| 917 | u16 vport_num) |
| 918 | { |
| 919 | return vport_num; |
| 920 | } |
| 921 | |
| 922 | static inline int |
| 923 | mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, |
| 924 | struct mlx5_eswitch *slave_esw, int max_slaves) |
| 925 | { |
| 926 | return 0; |
| 927 | } |
| 928 | |
| 929 | static inline void |
| 930 | mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, |
| 931 | struct mlx5_eswitch *slave_esw) {} |
| 932 | |
| 933 | static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } |
| 934 | |
| 935 | static inline int |
| 936 | mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw) |
| 937 | { |
| 938 | return 0; |
| 939 | } |
| 940 | |
| 941 | static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) |
| 942 | { |
| 943 | return true; |
| 944 | } |
| 945 | |
| 946 | static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) |
| 947 | { |
| 948 | } |
| 949 | |
| 950 | static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } |
| 951 | static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} |
| 952 | static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) |
| 953 | { |
| 954 | return false; |
| 955 | } |
| 956 | |
| 957 | static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} |
| 958 | #endif /* CONFIG_MLX5_ESWITCH */ |
| 959 | |
| 960 | #endif /* __MLX5_ESWITCH_H__ */ |
| 961 | |