1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/uverbs_ioctl.h>
7#include <rdma/mlx5_user_ioctl_cmds.h>
8#include <rdma/mlx5_user_ioctl_verbs.h>
9#include <linux/mlx5/driver.h>
10#include <linux/mlx5/eswitch.h>
11#include <linux/mlx5/vport.h>
12#include "mlx5_ib.h"
13#include "data_direct.h"
14
15#define UVERBS_MODULE_NAME mlx5_ib
16#include <rdma/uverbs_named_ioctl.h>
17
18static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
19 struct uverbs_attr_bundle *attrs)
20{
21 struct ib_pd *pd =
22 uverbs_attr_get_obj(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_PD_HANDLE);
23 struct mlx5_ib_pd *mpd = to_mpd(ibpd: pd);
24
25 return uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
26 from: &mpd->pdn, size: sizeof(mpd->pdn));
27}
28
29static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
30 struct mlx5_ib_uapi_query_port *info)
31{
32 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
33 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
34 bool sw_owner_supp;
35 u64 icm_rx;
36 u64 icm_tx;
37 int err;
38
39 sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
40 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
41
42 if (vport == MLX5_VPORT_UPLINK) {
43 icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
44 sw_steering_uplink_icm_address_rx);
45 icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
46 sw_steering_uplink_icm_address_tx);
47 } else {
48 MLX5_SET(query_esw_vport_context_in, in, opcode,
49 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
50 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
51 MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
52
53 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
54 out);
55
56 if (err)
57 return err;
58
59 icm_rx = MLX5_GET64(
60 query_esw_vport_context_out, out,
61 esw_vport_context.sw_steering_vport_icm_address_rx);
62
63 icm_tx = MLX5_GET64(
64 query_esw_vport_context_out, out,
65 esw_vport_context.sw_steering_vport_icm_address_tx);
66 }
67
68 if (sw_owner_supp && icm_rx) {
69 info->vport_steering_icm_rx = icm_rx;
70 info->flags |=
71 MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
72 }
73
74 if (sw_owner_supp && icm_tx) {
75 info->vport_steering_icm_tx = icm_tx;
76 info->flags |=
77 MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
78 }
79
80 return 0;
81}
82
83static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
84 struct mlx5_ib_uapi_query_port *info)
85{
86 int err = mlx5_vport_get_vhca_id(dev: mdev, vport, vhca_id: &info->vport_vhca_id);
87
88 if (err)
89 return err;
90
91 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
92
93 return 0;
94}
95
96static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
97 struct mlx5_ib_uapi_query_port *info)
98{
99 struct mlx5_core_dev *mdev;
100
101 mdev = mlx5_ib_get_native_port_mdev(dev, ib_port_num: port_num, NULL);
102 if (!mdev)
103 return -EINVAL;
104
105 info->vport_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
106 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
107
108 mlx5_ib_put_native_port_mdev(dev, port_num);
109
110 return 0;
111}
112
113static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
114 struct mlx5_ib_uapi_query_port *info)
115{
116 struct mlx5_eswitch_rep *rep;
117 struct mlx5_core_dev *mdev;
118 int err;
119
120 rep = dev->port[port_num - 1].rep;
121 if (!rep)
122 return -EOPNOTSUPP;
123
124 mdev = mlx5_eswitch_get_core_dev(esw: rep->esw);
125 if (!mdev)
126 return -EINVAL;
127
128 info->vport = rep->vport;
129 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
130
131 if (rep->vport != MLX5_VPORT_UPLINK) {
132 err = fill_vport_vhca_id(mdev, vport: rep->vport, info);
133 if (err)
134 return err;
135 }
136
137 info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
138 info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
139
140 err = fill_vport_icm_addr(mdev, vport: rep->vport, info);
141 if (err)
142 return err;
143
144 if (mlx5_eswitch_vport_match_metadata_enabled(esw: rep->esw)) {
145 info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
146 esw: rep->esw, vport_num: rep->vport);
147 info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
148 info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
149 }
150
151 return 0;
152}
153
154static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
155 struct uverbs_attr_bundle *attrs)
156{
157 struct mlx5_ib_uapi_query_port info = {};
158 struct mlx5_ib_ucontext *c;
159 struct mlx5_ib_dev *dev;
160 u32 port_num;
161 int ret;
162
163 if (uverbs_copy_from(&port_num, attrs,
164 MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
165 return -EFAULT;
166
167 c = to_mucontext(ibucontext: ib_uverbs_get_ucontext(attrs));
168 if (IS_ERR(ptr: c))
169 return PTR_ERR(ptr: c);
170 dev = to_mdev(ibdev: c->ibucontext.device);
171
172 if (!rdma_is_port_valid(device: &dev->ib_dev, port: port_num))
173 return -EINVAL;
174
175 if (mlx5_eswitch_mode(dev: dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
176 ret = fill_switchdev_info(dev, port_num, info: &info);
177 if (ret)
178 return ret;
179 } else if (mlx5_core_mp_enabled(dev: dev->mdev)) {
180 ret = fill_multiport_info(dev, port_num, info: &info);
181 if (ret)
182 return ret;
183 }
184
185 return uverbs_copy_to_struct_or_zero(bundle: attrs, idx: MLX5_IB_ATTR_QUERY_PORT, from: &info,
186 size: sizeof(info));
187}
188
189static int UVERBS_HANDLER(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH)(
190 struct uverbs_attr_bundle *attrs)
191{
192 struct mlx5_data_direct_dev *data_direct_dev;
193 struct mlx5_ib_ucontext *c;
194 struct mlx5_ib_dev *dev;
195 int out_len = uverbs_attr_get_len(attrs_bundle: attrs,
196 idx: MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH);
197 u32 dev_path_len;
198 char *dev_path;
199 int ret;
200
201 c = to_mucontext(ibucontext: ib_uverbs_get_ucontext(attrs));
202 if (IS_ERR(ptr: c))
203 return PTR_ERR(ptr: c);
204 dev = to_mdev(ibdev: c->ibucontext.device);
205 mutex_lock(&dev->data_direct_lock);
206 data_direct_dev = dev->data_direct_dev;
207 if (!data_direct_dev) {
208 ret = -ENODEV;
209 goto end;
210 }
211
212 dev_path = kobject_get_path(kobj: &data_direct_dev->device->kobj, GFP_KERNEL);
213 if (!dev_path) {
214 ret = -ENOMEM;
215 goto end;
216 }
217
218 dev_path_len = strlen(dev_path) + 1;
219 if (dev_path_len > out_len) {
220 ret = -ENOSPC;
221 goto end;
222 }
223
224 ret = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH, from: dev_path,
225 size: dev_path_len);
226 kfree(objp: dev_path);
227
228end:
229 mutex_unlock(lock: &dev->data_direct_lock);
230 return ret;
231}
232
233DECLARE_UVERBS_NAMED_METHOD(
234 MLX5_IB_METHOD_QUERY_PORT,
235 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
236 UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
237 UVERBS_ATTR_PTR_OUT(
238 MLX5_IB_ATTR_QUERY_PORT,
239 UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
240 reg_c0),
241 UA_MANDATORY));
242
243DECLARE_UVERBS_NAMED_METHOD(
244 MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH,
245 UVERBS_ATTR_PTR_OUT(
246 MLX5_IB_ATTR_GET_DATA_DIRECT_SYSFS_PATH,
247 UVERBS_ATTR_MIN_SIZE(0),
248 UA_MANDATORY));
249
250ADD_UVERBS_METHODS(mlx5_ib_device,
251 UVERBS_OBJECT_DEVICE,
252 &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT),
253 &UVERBS_METHOD(MLX5_IB_METHOD_GET_DATA_DIRECT_SYSFS_PATH));
254
255DECLARE_UVERBS_NAMED_METHOD(
256 MLX5_IB_METHOD_PD_QUERY,
257 UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
258 UVERBS_OBJECT_PD,
259 UVERBS_ACCESS_READ,
260 UA_MANDATORY),
261 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
262 UVERBS_ATTR_TYPE(u32),
263 UA_MANDATORY));
264
265ADD_UVERBS_METHODS(mlx5_ib_pd,
266 UVERBS_OBJECT_PD,
267 &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY));
268
269const struct uapi_definition mlx5_ib_std_types_defs[] = {
270 UAPI_DEF_CHAIN_OBJ_TREE(
271 UVERBS_OBJECT_PD,
272 &mlx5_ib_pd),
273 UAPI_DEF_CHAIN_OBJ_TREE(
274 UVERBS_OBJECT_DEVICE,
275 &mlx5_ib_device),
276 {},
277};
278

source code of linux/drivers/infiniband/hw/mlx5/std_types.c