1 | /* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ |
2 | /* |
3 | * Copyright (c) 2018 Mellanox Technologies. All rights reserved. |
4 | */ |
5 | |
6 | #ifndef _MLX5_ESWITCH_ |
7 | #define _MLX5_ESWITCH_ |
8 | |
9 | #include <linux/mlx5/driver.h> |
10 | #include <linux/mlx5/vport.h> |
11 | #include <net/devlink.h> |
12 | |
13 | #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) |
14 | |
15 | enum { |
16 | MLX5_ESWITCH_LEGACY, |
17 | MLX5_ESWITCH_OFFLOADS |
18 | }; |
19 | |
20 | enum { |
21 | REP_ETH, |
22 | REP_IB, |
23 | NUM_REP_TYPES, |
24 | }; |
25 | |
26 | enum { |
27 | REP_UNREGISTERED, |
28 | REP_REGISTERED, |
29 | REP_LOADED, |
30 | }; |
31 | |
32 | enum mlx5_switchdev_event { |
33 | MLX5_SWITCHDEV_EVENT_PAIR, |
34 | MLX5_SWITCHDEV_EVENT_UNPAIR, |
35 | }; |
36 | |
37 | struct mlx5_eswitch_rep; |
38 | struct mlx5_eswitch_rep_ops { |
39 | int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); |
40 | void (*unload)(struct mlx5_eswitch_rep *rep); |
41 | void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); |
42 | int (*event)(struct mlx5_eswitch *esw, |
43 | struct mlx5_eswitch_rep *rep, |
44 | enum mlx5_switchdev_event event, |
45 | void *data); |
46 | }; |
47 | |
48 | struct mlx5_eswitch_rep_data { |
49 | void *priv; |
50 | atomic_t state; |
51 | }; |
52 | |
53 | struct mlx5_eswitch_rep { |
54 | struct mlx5_eswitch_rep_data rep_data[NUM_REP_TYPES]; |
55 | u16 vport; |
56 | u16 vlan; |
57 | /* Only IB rep is using vport_index */ |
58 | u16 vport_index; |
59 | u32 vlan_refcount; |
60 | struct mlx5_eswitch *esw; |
61 | }; |
62 | |
63 | void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, |
64 | const struct mlx5_eswitch_rep_ops *ops, |
65 | u8 rep_type); |
66 | void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); |
67 | void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, |
68 | u16 vport_num, |
69 | u8 rep_type); |
70 | struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, |
71 | u16 vport_num); |
72 | void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); |
73 | struct mlx5_flow_handle * |
74 | mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, |
75 | struct mlx5_eswitch *from_esw, |
76 | struct mlx5_eswitch_rep *rep, u32 sqn); |
77 | |
78 | #ifdef CONFIG_MLX5_ESWITCH |
79 | enum devlink_eswitch_encap_mode |
80 | mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev); |
81 | |
82 | bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw); |
83 | bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw); |
84 | |
85 | /* Reg C0 usage: |
86 | * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_REG_C0_OBJ(16) > |
87 | * |
88 | * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of |
89 | * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left |
90 | * for user data objects managed by a common mapping context. |
91 | * PFNUM + VPORT comprise the SOURCE_PORT matching. |
92 | */ |
93 | #define ESW_VPORT_BITS 12 |
94 | #define ESW_PFNUM_BITS 4 |
95 | #define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS) |
96 | #define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS) |
97 | #define ESW_REG_C0_USER_DATA_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS) |
98 | #define ESW_REG_C0_USER_DATA_METADATA_MASK GENMASK(ESW_REG_C0_USER_DATA_METADATA_BITS - 1, 0) |
99 | |
100 | static inline u32 mlx5_eswitch_get_vport_metadata_mask(void) |
101 | { |
102 | return GENMASK(31, 32 - ESW_SOURCE_PORT_METADATA_BITS); |
103 | } |
104 | |
105 | u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, |
106 | u16 vport_num); |
107 | u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, |
108 | u16 vport_num); |
109 | |
110 | /* Reg C1 usage: |
111 | * Reg C1 = < Reserved(1) | ESW_TUN_ID(12) | ESW_TUN_OPTS(11) | ESW_ZONE_ID(8) > |
112 | * |
113 | * Highest bit is reserved for other offloads as marker bit, next 12 bits of reg c1 |
114 | * is the encapsulation tunnel id, next 11 bits is encapsulation tunnel options, |
115 | * and the lowest 8 bits are used for zone id. |
116 | * |
117 | * Zone id is used to restore CT flow when packet misses on chain. |
118 | * |
119 | * Tunnel id and options are used together to restore the tunnel info metadata |
120 | * on miss and to support inner header rewrite by means of implicit chain 0 |
121 | * flows. |
122 | */ |
123 | #define ESW_RESERVED_BITS 1 |
124 | #define ESW_ZONE_ID_BITS 8 |
125 | #define ESW_TUN_OPTS_BITS 11 |
126 | #define ESW_TUN_ID_BITS 12 |
127 | #define ESW_TUN_OPTS_OFFSET ESW_ZONE_ID_BITS |
128 | #define ESW_TUN_OFFSET ESW_TUN_OPTS_OFFSET |
129 | #define ESW_ZONE_ID_MASK GENMASK(ESW_ZONE_ID_BITS - 1, 0) |
130 | #define ESW_TUN_OPTS_MASK GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, ESW_TUN_OPTS_OFFSET) |
131 | #define ESW_TUN_MASK GENMASK(31 - ESW_RESERVED_BITS, ESW_TUN_OFFSET) |
132 | #define ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT 0 /* 0 is not a valid tunnel id */ |
133 | #define ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT |
134 | /* 0x7FF is a reserved mapping */ |
135 | #define ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT GENMASK(ESW_TUN_OPTS_BITS - 1, 0) |
136 | #define ESW_TUN_SLOW_TABLE_GOTO_VPORT ((ESW_TUN_ID_SLOW_TABLE_GOTO_VPORT << ESW_TUN_OPTS_BITS) | \ |
137 | ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT) |
138 | #define ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK ESW_TUN_OPTS_MASK |
139 | /* 0x7FE is a reserved mapping for bridge ingress push vlan mark */ |
140 | #define ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN (ESW_TUN_OPTS_SLOW_TABLE_GOTO_VPORT - 1) |
141 | #define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN ((ESW_TUN_ID_BRIDGE_INGRESS_PUSH_VLAN << \ |
142 | ESW_TUN_OPTS_BITS) | \ |
143 | ESW_TUN_OPTS_BRIDGE_INGRESS_PUSH_VLAN) |
144 | #define ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK \ |
145 | GENMASK(31 - ESW_TUN_ID_BITS - ESW_RESERVED_BITS, \ |
146 | ESW_TUN_OPTS_OFFSET + 1) |
147 | |
148 | /* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */ |
149 | #define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0) |
150 | |
151 | u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); |
152 | u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); |
153 | struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); |
154 | |
155 | #else /* CONFIG_MLX5_ESWITCH */ |
156 | |
157 | static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) |
158 | { |
159 | return MLX5_ESWITCH_LEGACY; |
160 | } |
161 | |
162 | static inline enum devlink_eswitch_encap_mode |
163 | mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev) |
164 | { |
165 | return DEVLINK_ESWITCH_ENCAP_MODE_NONE; |
166 | } |
167 | |
168 | static inline bool |
169 | mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw) |
170 | { |
171 | return false; |
172 | }; |
173 | |
174 | static inline bool |
175 | mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw) |
176 | { |
177 | return false; |
178 | }; |
179 | |
180 | static inline u32 |
181 | mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, u16 vport_num) |
182 | { |
183 | return 0; |
184 | }; |
185 | |
186 | static inline u32 |
187 | mlx5_eswitch_get_vport_metadata_mask(void) |
188 | { |
189 | return 0; |
190 | } |
191 | |
192 | static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) |
193 | { |
194 | return 0; |
195 | } |
196 | |
197 | static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) |
198 | { |
199 | return NULL; |
200 | } |
201 | |
202 | #endif /* CONFIG_MLX5_ESWITCH */ |
203 | |
204 | static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev) |
205 | { |
206 | return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY; |
207 | } |
208 | |
209 | static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) |
210 | { |
211 | return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; |
212 | } |
213 | |
214 | /* The returned number is valid only when the dev is eswitch manager. */ |
215 | static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) |
216 | { |
217 | return mlx5_core_is_ecpf_esw_manager(dev) ? |
218 | MLX5_VPORT_ECPF : MLX5_VPORT_PF; |
219 | } |
220 | |
221 | #endif |
222 | |