1/*
2 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
3 * Copyright (c) 2020, Intel Corporation. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "rdma_core.h"
35#include "uverbs.h"
36#include <rdma/uverbs_std_types.h>
37#include "restrack.h"
38
39static int uverbs_free_mr(struct ib_uobject *uobject,
40 enum rdma_remove_reason why,
41 struct uverbs_attr_bundle *attrs)
42{
43 return ib_dereg_mr_user(mr: (struct ib_mr *)uobject->object,
44 udata: &attrs->driver_udata);
45}
46
47static int UVERBS_HANDLER(UVERBS_METHOD_ADVISE_MR)(
48 struct uverbs_attr_bundle *attrs)
49{
50 struct ib_pd *pd =
51 uverbs_attr_get_obj(attrs_bundle: attrs, idx: UVERBS_ATTR_ADVISE_MR_PD_HANDLE);
52 enum ib_uverbs_advise_mr_advice advice;
53 struct ib_device *ib_dev = pd->device;
54 struct ib_sge *sg_list;
55 int num_sge;
56 u32 flags;
57 int ret;
58
59 /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */
60 if (!ib_dev->ops.advise_mr)
61 return -EOPNOTSUPP;
62
63 ret = uverbs_get_const(&advice, attrs, UVERBS_ATTR_ADVISE_MR_ADVICE);
64 if (ret)
65 return ret;
66
67 ret = uverbs_get_flags32(to: &flags, attrs_bundle: attrs, idx: UVERBS_ATTR_ADVISE_MR_FLAGS,
68 allowed_bits: IB_UVERBS_ADVISE_MR_FLAG_FLUSH);
69 if (ret)
70 return ret;
71
72 num_sge = uverbs_attr_ptr_get_array_size(
73 attrs, idx: UVERBS_ATTR_ADVISE_MR_SGE_LIST, elem_size: sizeof(struct ib_sge));
74 if (num_sge <= 0)
75 return num_sge;
76
77 sg_list = uverbs_attr_get_alloced_ptr(attrs_bundle: attrs,
78 idx: UVERBS_ATTR_ADVISE_MR_SGE_LIST);
79 return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
80 attrs);
81}
82
83static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
84 struct uverbs_attr_bundle *attrs)
85{
86 struct ib_dm_mr_attr attr = {};
87 struct ib_uobject *uobj =
88 uverbs_attr_get_uobject(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_HANDLE);
89 struct ib_dm *dm =
90 uverbs_attr_get_obj(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_DM_HANDLE);
91 struct ib_pd *pd =
92 uverbs_attr_get_obj(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_PD_HANDLE);
93 struct ib_device *ib_dev = pd->device;
94
95 struct ib_mr *mr;
96 int ret;
97
98 if (!ib_dev->ops.reg_dm_mr)
99 return -EOPNOTSUPP;
100
101 ret = uverbs_copy_from(&attr.offset, attrs, UVERBS_ATTR_REG_DM_MR_OFFSET);
102 if (ret)
103 return ret;
104
105 ret = uverbs_copy_from(&attr.length, attrs,
106 UVERBS_ATTR_REG_DM_MR_LENGTH);
107 if (ret)
108 return ret;
109
110 ret = uverbs_get_flags32(to: &attr.access_flags, attrs_bundle: attrs,
111 idx: UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
112 allowed_bits: IB_ACCESS_SUPPORTED);
113 if (ret)
114 return ret;
115
116 if (!(attr.access_flags & IB_ZERO_BASED))
117 return -EINVAL;
118
119 ret = ib_check_mr_access(ib_dev, flags: attr.access_flags);
120 if (ret)
121 return ret;
122
123 if (attr.offset > dm->length || attr.length > dm->length ||
124 attr.length > dm->length - attr.offset)
125 return -EINVAL;
126
127 mr = pd->device->ops.reg_dm_mr(pd, dm, &attr, attrs);
128 if (IS_ERR(ptr: mr))
129 return PTR_ERR(ptr: mr);
130
131 mr->device = pd->device;
132 mr->pd = pd;
133 mr->type = IB_MR_TYPE_DM;
134 mr->dm = dm;
135 mr->uobject = uobj;
136 atomic_inc(v: &pd->usecnt);
137 atomic_inc(v: &dm->usecnt);
138
139 rdma_restrack_new(res: &mr->res, type: RDMA_RESTRACK_MR);
140 rdma_restrack_set_name(res: &mr->res, NULL);
141 rdma_restrack_add(res: &mr->res);
142 uobj->object = mr;
143
144 uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_HANDLE);
145
146 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_RESP_LKEY, from: &mr->lkey,
147 size: sizeof(mr->lkey));
148 if (ret)
149 return ret;
150
151 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
152 from: &mr->rkey, size: sizeof(mr->rkey));
153 return ret;
154}
155
156static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_MR)(
157 struct uverbs_attr_bundle *attrs)
158{
159 struct ib_mr *mr =
160 uverbs_attr_get_obj(attrs_bundle: attrs, idx: UVERBS_ATTR_QUERY_MR_HANDLE);
161 int ret;
162
163 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_QUERY_MR_RESP_LKEY, from: &mr->lkey,
164 size: sizeof(mr->lkey));
165 if (ret)
166 return ret;
167
168 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_QUERY_MR_RESP_RKEY,
169 from: &mr->rkey, size: sizeof(mr->rkey));
170
171 if (ret)
172 return ret;
173
174 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
175 from: &mr->length, size: sizeof(mr->length));
176
177 if (ret)
178 return ret;
179
180 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_QUERY_MR_RESP_IOVA,
181 from: &mr->iova, size: sizeof(mr->iova));
182
183 return IS_UVERBS_COPY_ERR(ret) ? ret : 0;
184}
185
186static int UVERBS_HANDLER(UVERBS_METHOD_REG_DMABUF_MR)(
187 struct uverbs_attr_bundle *attrs)
188{
189 struct ib_uobject *uobj =
190 uverbs_attr_get_uobject(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
191 struct ib_pd *pd =
192 uverbs_attr_get_obj(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE);
193 struct ib_device *ib_dev = pd->device;
194
195 u64 offset, length, iova;
196 u32 fd, access_flags;
197 struct ib_mr *mr;
198 int ret;
199
200 if (!ib_dev->ops.reg_user_mr_dmabuf)
201 return -EOPNOTSUPP;
202
203 ret = uverbs_copy_from(&offset, attrs,
204 UVERBS_ATTR_REG_DMABUF_MR_OFFSET);
205 if (ret)
206 return ret;
207
208 ret = uverbs_copy_from(&length, attrs,
209 UVERBS_ATTR_REG_DMABUF_MR_LENGTH);
210 if (ret)
211 return ret;
212
213 ret = uverbs_copy_from(&iova, attrs,
214 UVERBS_ATTR_REG_DMABUF_MR_IOVA);
215 if (ret)
216 return ret;
217
218 if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
219 return -EINVAL;
220
221 ret = uverbs_copy_from(&fd, attrs,
222 UVERBS_ATTR_REG_DMABUF_MR_FD);
223 if (ret)
224 return ret;
225
226 ret = uverbs_get_flags32(to: &access_flags, attrs_bundle: attrs,
227 idx: UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
228 allowed_bits: IB_ACCESS_LOCAL_WRITE |
229 IB_ACCESS_REMOTE_READ |
230 IB_ACCESS_REMOTE_WRITE |
231 IB_ACCESS_REMOTE_ATOMIC |
232 IB_ACCESS_RELAXED_ORDERING);
233 if (ret)
234 return ret;
235
236 ret = ib_check_mr_access(ib_dev, flags: access_flags);
237 if (ret)
238 return ret;
239
240 mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
241 access_flags,
242 &attrs->driver_udata);
243 if (IS_ERR(ptr: mr))
244 return PTR_ERR(ptr: mr);
245
246 mr->device = pd->device;
247 mr->pd = pd;
248 mr->type = IB_MR_TYPE_USER;
249 mr->uobject = uobj;
250 atomic_inc(v: &pd->usecnt);
251
252 rdma_restrack_new(res: &mr->res, type: RDMA_RESTRACK_MR);
253 rdma_restrack_set_name(res: &mr->res, NULL);
254 rdma_restrack_add(res: &mr->res);
255 uobj->object = mr;
256
257 uverbs_finalize_uobj_create(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DMABUF_MR_HANDLE);
258
259 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
260 from: &mr->lkey, size: sizeof(mr->lkey));
261 if (ret)
262 return ret;
263
264 ret = uverbs_copy_to(attrs_bundle: attrs, idx: UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
265 from: &mr->rkey, size: sizeof(mr->rkey));
266 return ret;
267}
268
269DECLARE_UVERBS_NAMED_METHOD(
270 UVERBS_METHOD_ADVISE_MR,
271 UVERBS_ATTR_IDR(UVERBS_ATTR_ADVISE_MR_PD_HANDLE,
272 UVERBS_OBJECT_PD,
273 UVERBS_ACCESS_READ,
274 UA_MANDATORY),
275 UVERBS_ATTR_CONST_IN(UVERBS_ATTR_ADVISE_MR_ADVICE,
276 enum ib_uverbs_advise_mr_advice,
277 UA_MANDATORY),
278 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_ADVISE_MR_FLAGS,
279 enum ib_uverbs_advise_mr_flag,
280 UA_MANDATORY),
281 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ADVISE_MR_SGE_LIST,
282 UVERBS_ATTR_MIN_SIZE(sizeof(struct ib_uverbs_sge)),
283 UA_MANDATORY,
284 UA_ALLOC_AND_COPY));
285
286DECLARE_UVERBS_NAMED_METHOD(
287 UVERBS_METHOD_QUERY_MR,
288 UVERBS_ATTR_IDR(UVERBS_ATTR_QUERY_MR_HANDLE,
289 UVERBS_OBJECT_MR,
290 UVERBS_ACCESS_READ,
291 UA_MANDATORY),
292 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_RKEY,
293 UVERBS_ATTR_TYPE(u32),
294 UA_MANDATORY),
295 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LKEY,
296 UVERBS_ATTR_TYPE(u32),
297 UA_MANDATORY),
298 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_LENGTH,
299 UVERBS_ATTR_TYPE(u64),
300 UA_MANDATORY),
301 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_MR_RESP_IOVA,
302 UVERBS_ATTR_TYPE(u64),
303 UA_OPTIONAL));
304
305DECLARE_UVERBS_NAMED_METHOD(
306 UVERBS_METHOD_DM_MR_REG,
307 UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_HANDLE,
308 UVERBS_OBJECT_MR,
309 UVERBS_ACCESS_NEW,
310 UA_MANDATORY),
311 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_OFFSET,
312 UVERBS_ATTR_TYPE(u64),
313 UA_MANDATORY),
314 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DM_MR_LENGTH,
315 UVERBS_ATTR_TYPE(u64),
316 UA_MANDATORY),
317 UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_PD_HANDLE,
318 UVERBS_OBJECT_PD,
319 UVERBS_ACCESS_READ,
320 UA_MANDATORY),
321 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DM_MR_ACCESS_FLAGS,
322 enum ib_access_flags),
323 UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DM_MR_DM_HANDLE,
324 UVERBS_OBJECT_DM,
325 UVERBS_ACCESS_READ,
326 UA_MANDATORY),
327 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_LKEY,
328 UVERBS_ATTR_TYPE(u32),
329 UA_MANDATORY),
330 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DM_MR_RESP_RKEY,
331 UVERBS_ATTR_TYPE(u32),
332 UA_MANDATORY));
333
334DECLARE_UVERBS_NAMED_METHOD(
335 UVERBS_METHOD_REG_DMABUF_MR,
336 UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_HANDLE,
337 UVERBS_OBJECT_MR,
338 UVERBS_ACCESS_NEW,
339 UA_MANDATORY),
340 UVERBS_ATTR_IDR(UVERBS_ATTR_REG_DMABUF_MR_PD_HANDLE,
341 UVERBS_OBJECT_PD,
342 UVERBS_ACCESS_READ,
343 UA_MANDATORY),
344 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_OFFSET,
345 UVERBS_ATTR_TYPE(u64),
346 UA_MANDATORY),
347 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_LENGTH,
348 UVERBS_ATTR_TYPE(u64),
349 UA_MANDATORY),
350 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_IOVA,
351 UVERBS_ATTR_TYPE(u64),
352 UA_MANDATORY),
353 UVERBS_ATTR_PTR_IN(UVERBS_ATTR_REG_DMABUF_MR_FD,
354 UVERBS_ATTR_TYPE(u32),
355 UA_MANDATORY),
356 UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_REG_DMABUF_MR_ACCESS_FLAGS,
357 enum ib_access_flags),
358 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_LKEY,
359 UVERBS_ATTR_TYPE(u32),
360 UA_MANDATORY),
361 UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
362 UVERBS_ATTR_TYPE(u32),
363 UA_MANDATORY));
364
365DECLARE_UVERBS_NAMED_METHOD_DESTROY(
366 UVERBS_METHOD_MR_DESTROY,
367 UVERBS_ATTR_IDR(UVERBS_ATTR_DESTROY_MR_HANDLE,
368 UVERBS_OBJECT_MR,
369 UVERBS_ACCESS_DESTROY,
370 UA_MANDATORY));
371
372DECLARE_UVERBS_NAMED_OBJECT(
373 UVERBS_OBJECT_MR,
374 UVERBS_TYPE_ALLOC_IDR(uverbs_free_mr),
375 &UVERBS_METHOD(UVERBS_METHOD_ADVISE_MR),
376 &UVERBS_METHOD(UVERBS_METHOD_DM_MR_REG),
377 &UVERBS_METHOD(UVERBS_METHOD_MR_DESTROY),
378 &UVERBS_METHOD(UVERBS_METHOD_QUERY_MR),
379 &UVERBS_METHOD(UVERBS_METHOD_REG_DMABUF_MR));
380
381const struct uapi_definition uverbs_def_obj_mr[] = {
382 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_MR,
383 UAPI_DEF_OBJ_NEEDS_FN(dereg_mr)),
384 {}
385};
386

source code of linux/drivers/infiniband/core/uverbs_std_types_mr.c