| 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | /* |
| 3 | * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #include <rdma/uverbs_std_types.h> |
| 7 | #include "dm.h" |
| 8 | |
| 9 | #define UVERBS_MODULE_NAME mlx5_ib |
| 10 | #include <rdma/uverbs_named_ioctl.h> |
| 11 | |
| 12 | static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, |
| 13 | u64 length, u32 alignment) |
| 14 | { |
| 15 | struct mlx5_core_dev *dev = dm->dev; |
| 16 | u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size) |
| 17 | >> PAGE_SHIFT; |
| 18 | u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); |
| 19 | u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment); |
| 20 | u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); |
| 21 | u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {}; |
| 22 | u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {}; |
| 23 | u32 mlx5_alignment; |
| 24 | u64 page_idx = 0; |
| 25 | int ret = 0; |
| 26 | |
| 27 | if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK)) |
| 28 | return -EINVAL; |
| 29 | |
| 30 | /* mlx5 device sets alignment as 64*2^driver_value |
| 31 | * so normalizing is needed. |
| 32 | */ |
| 33 | mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 : |
| 34 | alignment - MLX5_MEMIC_BASE_ALIGN; |
| 35 | if (mlx5_alignment > max_alignment) |
| 36 | return -EINVAL; |
| 37 | |
| 38 | MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC); |
| 39 | MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE); |
| 40 | MLX5_SET(alloc_memic_in, in, memic_size, length); |
| 41 | MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment, |
| 42 | mlx5_alignment); |
| 43 | |
| 44 | while (page_idx < num_memic_hw_pages) { |
| 45 | spin_lock(lock: &dm->lock); |
| 46 | page_idx = bitmap_find_next_zero_area(map: dm->memic_alloc_pages, |
| 47 | size: num_memic_hw_pages, |
| 48 | start: page_idx, |
| 49 | nr: num_pages, align_mask: 0); |
| 50 | |
| 51 | if (page_idx < num_memic_hw_pages) |
| 52 | bitmap_set(map: dm->memic_alloc_pages, |
| 53 | start: page_idx, nbits: num_pages); |
| 54 | |
| 55 | spin_unlock(lock: &dm->lock); |
| 56 | |
| 57 | if (page_idx >= num_memic_hw_pages) |
| 58 | break; |
| 59 | |
| 60 | MLX5_SET64(alloc_memic_in, in, range_start_addr, |
| 61 | hw_start_addr + (page_idx * PAGE_SIZE)); |
| 62 | |
| 63 | ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out); |
| 64 | if (ret) { |
| 65 | spin_lock(lock: &dm->lock); |
| 66 | bitmap_clear(map: dm->memic_alloc_pages, |
| 67 | start: page_idx, nbits: num_pages); |
| 68 | spin_unlock(lock: &dm->lock); |
| 69 | |
| 70 | if (ret == -EAGAIN) { |
| 71 | page_idx++; |
| 72 | continue; |
| 73 | } |
| 74 | |
| 75 | return ret; |
| 76 | } |
| 77 | |
| 78 | *addr = dev->bar_addr + |
| 79 | MLX5_GET64(alloc_memic_out, out, memic_start_addr); |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | return -ENOMEM; |
| 85 | } |
| 86 | |
| 87 | void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, |
| 88 | u64 length) |
| 89 | { |
| 90 | struct mlx5_core_dev *dev = dm->dev; |
| 91 | u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); |
| 92 | u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE); |
| 93 | u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {}; |
| 94 | u64 start_page_idx; |
| 95 | int err; |
| 96 | |
| 97 | addr -= dev->bar_addr; |
| 98 | start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT; |
| 99 | |
| 100 | MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC); |
| 101 | MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr); |
| 102 | MLX5_SET(dealloc_memic_in, in, memic_size, length); |
| 103 | |
| 104 | err = mlx5_cmd_exec_in(dev, dealloc_memic, in); |
| 105 | if (err) |
| 106 | return; |
| 107 | |
| 108 | spin_lock(lock: &dm->lock); |
| 109 | bitmap_clear(map: dm->memic_alloc_pages, |
| 110 | start: start_page_idx, nbits: num_pages); |
| 111 | spin_unlock(lock: &dm->lock); |
| 112 | } |
| 113 | |
| 114 | void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr, |
| 115 | u8 operation) |
| 116 | { |
| 117 | u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {}; |
| 118 | struct mlx5_core_dev *dev = dm->dev; |
| 119 | |
| 120 | MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC); |
| 121 | MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC); |
| 122 | MLX5_SET(modify_memic_in, in, memic_operation_type, operation); |
| 123 | MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr); |
| 124 | |
| 125 | mlx5_cmd_exec_in(dev, modify_memic, in); |
| 126 | } |
| 127 | |
| 128 | static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr, |
| 129 | u8 operation, phys_addr_t *op_addr) |
| 130 | { |
| 131 | u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {}; |
| 132 | u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {}; |
| 133 | struct mlx5_core_dev *dev = dm->dev; |
| 134 | int err; |
| 135 | |
| 136 | MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC); |
| 137 | MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC); |
| 138 | MLX5_SET(modify_memic_in, in, memic_operation_type, operation); |
| 139 | MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr); |
| 140 | |
| 141 | err = mlx5_cmd_exec_inout(dev, modify_memic, in, out); |
| 142 | if (err) |
| 143 | return err; |
| 144 | |
| 145 | *op_addr = dev->bar_addr + |
| 146 | MLX5_GET64(modify_memic_out, out, memic_operation_addr); |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int add_dm_mmap_entry(struct ib_ucontext *context, |
| 151 | struct mlx5_user_mmap_entry *mentry, u8 mmap_flag, |
| 152 | size_t size, u64 address) |
| 153 | { |
| 154 | mentry->mmap_flag = mmap_flag; |
| 155 | mentry->address = address; |
| 156 | |
| 157 | return rdma_user_mmap_entry_insert_range( |
| 158 | ucontext: context, entry: &mentry->rdma_entry, length: size, |
| 159 | min_pgoff: MLX5_IB_MMAP_DEVICE_MEM << 16, |
| 160 | max_pgoff: (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); |
| 161 | } |
| 162 | |
| 163 | static void mlx5_ib_dm_memic_free(struct kref *kref) |
| 164 | { |
| 165 | struct mlx5_ib_dm_memic *dm = |
| 166 | container_of(kref, struct mlx5_ib_dm_memic, ref); |
| 167 | struct mlx5_ib_dev *dev = to_mdev(ibdev: dm->base.ibdm.device); |
| 168 | |
| 169 | mlx5_cmd_dealloc_memic(dm: &dev->dm, addr: dm->base.dev_addr, length: dm->base.size); |
| 170 | kfree(objp: dm); |
| 171 | } |
| 172 | |
| 173 | static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry, |
| 174 | struct uverbs_attr_bundle *attrs) |
| 175 | { |
| 176 | u64 start_offset; |
| 177 | u16 page_idx; |
| 178 | int err; |
| 179 | |
| 180 | page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF; |
| 181 | start_offset = op_entry->op_addr & ~PAGE_MASK; |
| 182 | err = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, |
| 183 | from: &page_idx, size: sizeof(page_idx)); |
| 184 | if (err) |
| 185 | return err; |
| 186 | |
| 187 | return uverbs_copy_to(attrs_bundle: attrs, |
| 188 | idx: MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, |
| 189 | from: &start_offset, size: sizeof(start_offset)); |
| 190 | } |
| 191 | |
| 192 | static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op, |
| 193 | struct uverbs_attr_bundle *attrs) |
| 194 | { |
| 195 | struct mlx5_ib_dm_op_entry *op_entry; |
| 196 | |
| 197 | op_entry = xa_load(&dm->ops, index: op); |
| 198 | if (!op_entry) |
| 199 | return -ENOENT; |
| 200 | |
| 201 | return copy_op_to_user(op_entry, attrs); |
| 202 | } |
| 203 | |
| 204 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)( |
| 205 | struct uverbs_attr_bundle *attrs) |
| 206 | { |
| 207 | struct ib_uobject *uobj = uverbs_attr_get_uobject( |
| 208 | attrs_bundle: attrs, idx: MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE); |
| 209 | struct mlx5_ib_dev *dev = to_mdev(ibdev: uobj->context->device); |
| 210 | struct ib_dm *ibdm = uobj->object; |
| 211 | struct mlx5_ib_dm_memic *dm = to_memic(ibdm); |
| 212 | struct mlx5_ib_dm_op_entry *op_entry; |
| 213 | int err; |
| 214 | u8 op; |
| 215 | |
| 216 | err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP); |
| 217 | if (err) |
| 218 | return err; |
| 219 | |
| 220 | if (op >= BITS_PER_TYPE(u32)) |
| 221 | return -EOPNOTSUPP; |
| 222 | |
| 223 | if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op))) |
| 224 | return -EOPNOTSUPP; |
| 225 | |
| 226 | mutex_lock(&dm->ops_xa_lock); |
| 227 | err = map_existing_op(dm, op, attrs); |
| 228 | if (!err || err != -ENOENT) |
| 229 | goto err_unlock; |
| 230 | |
| 231 | op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL); |
| 232 | if (!op_entry) |
| 233 | goto err_unlock; |
| 234 | |
| 235 | err = mlx5_cmd_alloc_memic_op(dm: &dev->dm, addr: dm->base.dev_addr, operation: op, |
| 236 | op_addr: &op_entry->op_addr); |
| 237 | if (err) { |
| 238 | kfree(objp: op_entry); |
| 239 | goto err_unlock; |
| 240 | } |
| 241 | op_entry->op = op; |
| 242 | op_entry->dm = dm; |
| 243 | |
| 244 | err = add_dm_mmap_entry(context: uobj->context, mentry: &op_entry->mentry, |
| 245 | mmap_flag: MLX5_IB_MMAP_TYPE_MEMIC_OP, size: dm->base.size, |
| 246 | address: op_entry->op_addr & PAGE_MASK); |
| 247 | if (err) { |
| 248 | mlx5_cmd_dealloc_memic_op(dm: &dev->dm, addr: dm->base.dev_addr, operation: op); |
| 249 | kfree(objp: op_entry); |
| 250 | goto err_unlock; |
| 251 | } |
| 252 | /* From this point, entry will be freed by mmap_free */ |
| 253 | kref_get(kref: &dm->ref); |
| 254 | |
| 255 | err = copy_op_to_user(op_entry, attrs); |
| 256 | if (err) |
| 257 | goto err_remove; |
| 258 | |
| 259 | err = xa_insert(xa: &dm->ops, index: op, entry: op_entry, GFP_KERNEL); |
| 260 | if (err) |
| 261 | goto err_remove; |
| 262 | mutex_unlock(lock: &dm->ops_xa_lock); |
| 263 | |
| 264 | return 0; |
| 265 | |
| 266 | err_remove: |
| 267 | rdma_user_mmap_entry_remove(entry: &op_entry->mentry.rdma_entry); |
| 268 | err_unlock: |
| 269 | mutex_unlock(lock: &dm->ops_xa_lock); |
| 270 | |
| 271 | return err; |
| 272 | } |
| 273 | |
| 274 | static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx, |
| 275 | struct ib_dm_alloc_attr *attr, |
| 276 | struct uverbs_attr_bundle *attrs) |
| 277 | { |
| 278 | struct mlx5_dm *dm_db = &to_mdev(ibdev: ctx->device)->dm; |
| 279 | struct mlx5_ib_dm_memic *dm; |
| 280 | u64 start_offset; |
| 281 | u16 page_idx; |
| 282 | int err; |
| 283 | u64 address; |
| 284 | |
| 285 | if (!dm_db || !MLX5_CAP_DEV_MEM(dm_db->dev, memic)) |
| 286 | return ERR_PTR(error: -EOPNOTSUPP); |
| 287 | |
| 288 | dm = kzalloc(sizeof(*dm), GFP_KERNEL); |
| 289 | if (!dm) |
| 290 | return ERR_PTR(error: -ENOMEM); |
| 291 | |
| 292 | dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC; |
| 293 | dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); |
| 294 | dm->base.ibdm.device = ctx->device; |
| 295 | |
| 296 | kref_init(kref: &dm->ref); |
| 297 | xa_init(xa: &dm->ops); |
| 298 | mutex_init(&dm->ops_xa_lock); |
| 299 | dm->req_length = attr->length; |
| 300 | |
| 301 | err = mlx5_cmd_alloc_memic(dm: dm_db, addr: &dm->base.dev_addr, |
| 302 | length: dm->base.size, alignment: attr->alignment); |
| 303 | if (err) { |
| 304 | kfree(objp: dm); |
| 305 | return ERR_PTR(error: err); |
| 306 | } |
| 307 | |
| 308 | address = dm->base.dev_addr & PAGE_MASK; |
| 309 | err = add_dm_mmap_entry(context: ctx, mentry: &dm->mentry, mmap_flag: MLX5_IB_MMAP_TYPE_MEMIC, |
| 310 | size: dm->base.size, address); |
| 311 | if (err) { |
| 312 | mlx5_cmd_dealloc_memic(dm: dm_db, addr: dm->base.dev_addr, length: dm->base.size); |
| 313 | kfree(objp: dm); |
| 314 | return ERR_PTR(error: err); |
| 315 | } |
| 316 | |
| 317 | page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; |
| 318 | err = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, |
| 319 | from: &page_idx, size: sizeof(page_idx)); |
| 320 | if (err) |
| 321 | goto err_copy; |
| 322 | |
| 323 | start_offset = dm->base.dev_addr & ~PAGE_MASK; |
| 324 | err = uverbs_copy_to(attrs_bundle: attrs, |
| 325 | idx: MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, |
| 326 | from: &start_offset, size: sizeof(start_offset)); |
| 327 | if (err) |
| 328 | goto err_copy; |
| 329 | |
| 330 | return &dm->base.ibdm; |
| 331 | |
| 332 | err_copy: |
| 333 | rdma_user_mmap_entry_remove(entry: &dm->mentry.rdma_entry); |
| 334 | return ERR_PTR(error: err); |
| 335 | } |
| 336 | |
| 337 | static enum mlx5_sw_icm_type get_icm_type(int uapi_type) |
| 338 | { |
| 339 | switch (uapi_type) { |
| 340 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: |
| 341 | return MLX5_SW_ICM_TYPE_HEADER_MODIFY; |
| 342 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: |
| 343 | return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN; |
| 344 | case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM: |
| 345 | return MLX5_SW_ICM_TYPE_SW_ENCAP; |
| 346 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
| 347 | default: |
| 348 | return MLX5_SW_ICM_TYPE_STEERING; |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx, |
| 353 | struct ib_dm_alloc_attr *attr, |
| 354 | struct uverbs_attr_bundle *attrs, |
| 355 | int type) |
| 356 | { |
| 357 | struct mlx5_core_dev *dev = to_mdev(ibdev: ctx->device)->mdev; |
| 358 | enum mlx5_sw_icm_type icm_type; |
| 359 | struct mlx5_ib_dm_icm *dm; |
| 360 | u64 act_size; |
| 361 | int err; |
| 362 | |
| 363 | if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW)) |
| 364 | return ERR_PTR(error: -EPERM); |
| 365 | |
| 366 | switch (type) { |
| 367 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
| 368 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: |
| 369 | case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM: |
| 370 | if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) || |
| 371 | MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) || |
| 372 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) || |
| 373 | MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))) |
| 374 | return ERR_PTR(error: -EOPNOTSUPP); |
| 375 | break; |
| 376 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: |
| 377 | if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) || |
| 378 | !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)) |
| 379 | return ERR_PTR(error: -EOPNOTSUPP); |
| 380 | break; |
| 381 | default: |
| 382 | return ERR_PTR(error: -EOPNOTSUPP); |
| 383 | } |
| 384 | |
| 385 | dm = kzalloc(sizeof(*dm), GFP_KERNEL); |
| 386 | if (!dm) |
| 387 | return ERR_PTR(error: -ENOMEM); |
| 388 | |
| 389 | dm->base.type = type; |
| 390 | dm->base.ibdm.device = ctx->device; |
| 391 | |
| 392 | /* Allocation size must a multiple of the basic block size |
| 393 | * and a power of 2. |
| 394 | */ |
| 395 | act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev)); |
| 396 | act_size = roundup_pow_of_two(act_size); |
| 397 | |
| 398 | dm->base.size = act_size; |
| 399 | icm_type = get_icm_type(uapi_type: type); |
| 400 | |
| 401 | err = mlx5_dm_sw_icm_alloc(dev, type: icm_type, length: act_size, log_alignment: attr->alignment, |
| 402 | uid: to_mucontext(ibucontext: ctx)->devx_uid, |
| 403 | addr: &dm->base.dev_addr, obj_id: &dm->obj_id); |
| 404 | if (err) |
| 405 | goto free; |
| 406 | |
| 407 | err = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, |
| 408 | from: &dm->base.dev_addr, size: sizeof(dm->base.dev_addr)); |
| 409 | if (err) { |
| 410 | mlx5_dm_sw_icm_dealloc(dev, type: icm_type, length: dm->base.size, |
| 411 | uid: to_mucontext(ibucontext: ctx)->devx_uid, |
| 412 | addr: dm->base.dev_addr, obj_id: dm->obj_id); |
| 413 | goto free; |
| 414 | } |
| 415 | return &dm->base.ibdm; |
| 416 | free: |
| 417 | kfree(objp: dm); |
| 418 | return ERR_PTR(error: err); |
| 419 | } |
| 420 | |
| 421 | struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev, |
| 422 | struct ib_ucontext *context, |
| 423 | struct ib_dm_alloc_attr *attr, |
| 424 | struct uverbs_attr_bundle *attrs) |
| 425 | { |
| 426 | enum mlx5_ib_uapi_dm_type type; |
| 427 | int err; |
| 428 | |
| 429 | err = uverbs_get_const_default(&type, attrs, |
| 430 | MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, |
| 431 | MLX5_IB_UAPI_DM_TYPE_MEMIC); |
| 432 | if (err) |
| 433 | return ERR_PTR(error: err); |
| 434 | |
| 435 | mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n" , |
| 436 | type, attr->length, attr->alignment); |
| 437 | |
| 438 | switch (type) { |
| 439 | case MLX5_IB_UAPI_DM_TYPE_MEMIC: |
| 440 | return handle_alloc_dm_memic(ctx: context, attr, attrs); |
| 441 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
| 442 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: |
| 443 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: |
| 444 | case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM: |
| 445 | return handle_alloc_dm_sw_icm(ctx: context, attr, attrs, type); |
| 446 | default: |
| 447 | return ERR_PTR(error: -EOPNOTSUPP); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm) |
| 452 | { |
| 453 | struct mlx5_ib_dm_op_entry *entry; |
| 454 | unsigned long idx; |
| 455 | |
| 456 | mutex_lock(&dm->ops_xa_lock); |
| 457 | xa_for_each(&dm->ops, idx, entry) { |
| 458 | xa_erase(&dm->ops, index: idx); |
| 459 | rdma_user_mmap_entry_remove(entry: &entry->mentry.rdma_entry); |
| 460 | } |
| 461 | mutex_unlock(lock: &dm->ops_xa_lock); |
| 462 | } |
| 463 | |
| 464 | static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm) |
| 465 | { |
| 466 | dm_memic_remove_ops(dm); |
| 467 | rdma_user_mmap_entry_remove(entry: &dm->mentry.rdma_entry); |
| 468 | } |
| 469 | |
| 470 | static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx, |
| 471 | struct mlx5_ib_dm_icm *dm) |
| 472 | { |
| 473 | enum mlx5_sw_icm_type type = get_icm_type(uapi_type: dm->base.type); |
| 474 | struct mlx5_core_dev *dev = to_mdev(ibdev: dm->base.ibdm.device)->mdev; |
| 475 | int err; |
| 476 | |
| 477 | err = mlx5_dm_sw_icm_dealloc(dev, type, length: dm->base.size, uid: ctx->devx_uid, |
| 478 | addr: dm->base.dev_addr, obj_id: dm->obj_id); |
| 479 | if (!err) |
| 480 | kfree(objp: dm); |
| 481 | return 0; |
| 482 | } |
| 483 | |
| 484 | static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm, |
| 485 | struct uverbs_attr_bundle *attrs) |
| 486 | { |
| 487 | struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( |
| 488 | &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); |
| 489 | struct mlx5_ib_dm *dm = to_mdm(ibdm); |
| 490 | |
| 491 | switch (dm->type) { |
| 492 | case MLX5_IB_UAPI_DM_TYPE_MEMIC: |
| 493 | mlx5_dm_memic_dealloc(dm: to_memic(ibdm)); |
| 494 | return 0; |
| 495 | case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: |
| 496 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM: |
| 497 | case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM: |
| 498 | case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM: |
| 499 | return mlx5_dm_icm_dealloc(ctx, dm: to_icm(ibdm)); |
| 500 | default: |
| 501 | return -EOPNOTSUPP; |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)( |
| 506 | struct uverbs_attr_bundle *attrs) |
| 507 | { |
| 508 | struct ib_dm *ibdm = |
| 509 | uverbs_attr_get_obj(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE); |
| 510 | struct mlx5_ib_dm *dm = to_mdm(ibdm); |
| 511 | struct mlx5_ib_dm_memic *memic; |
| 512 | u64 start_offset; |
| 513 | u16 page_idx; |
| 514 | int err; |
| 515 | |
| 516 | if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC) |
| 517 | return -EOPNOTSUPP; |
| 518 | |
| 519 | memic = to_memic(ibdm); |
| 520 | page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF; |
| 521 | err = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, |
| 522 | from: &page_idx, size: sizeof(page_idx)); |
| 523 | if (err) |
| 524 | return err; |
| 525 | |
| 526 | start_offset = memic->base.dev_addr & ~PAGE_MASK; |
| 527 | err = uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, |
| 528 | from: &start_offset, size: sizeof(start_offset)); |
| 529 | if (err) |
| 530 | return err; |
| 531 | |
| 532 | return uverbs_copy_to(attrs_bundle: attrs, idx: MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, |
| 533 | from: &memic->req_length, |
| 534 | size: sizeof(memic->req_length)); |
| 535 | } |
| 536 | |
| 537 | void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev, |
| 538 | struct mlx5_user_mmap_entry *mentry) |
| 539 | { |
| 540 | struct mlx5_ib_dm_op_entry *op_entry; |
| 541 | struct mlx5_ib_dm_memic *mdm; |
| 542 | |
| 543 | switch (mentry->mmap_flag) { |
| 544 | case MLX5_IB_MMAP_TYPE_MEMIC: |
| 545 | mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry); |
| 546 | kref_put(kref: &mdm->ref, release: mlx5_ib_dm_memic_free); |
| 547 | break; |
| 548 | case MLX5_IB_MMAP_TYPE_MEMIC_OP: |
| 549 | op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry, |
| 550 | mentry); |
| 551 | mdm = op_entry->dm; |
| 552 | mlx5_cmd_dealloc_memic_op(dm: &dev->dm, addr: mdm->base.dev_addr, |
| 553 | operation: op_entry->op); |
| 554 | kfree(objp: op_entry); |
| 555 | kref_put(kref: &mdm->ref, release: mlx5_ib_dm_memic_free); |
| 556 | break; |
| 557 | default: |
| 558 | WARN_ON(true); |
| 559 | } |
| 560 | } |
| 561 | |
| 562 | DECLARE_UVERBS_NAMED_METHOD( |
| 563 | MLX5_IB_METHOD_DM_QUERY, |
| 564 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM, |
| 565 | UVERBS_ACCESS_READ, UA_MANDATORY), |
| 566 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET, |
| 567 | UVERBS_ATTR_TYPE(u64), UA_MANDATORY), |
| 568 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX, |
| 569 | UVERBS_ATTR_TYPE(u16), UA_MANDATORY), |
| 570 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH, |
| 571 | UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); |
| 572 | |
| 573 | ADD_UVERBS_ATTRIBUTES_SIMPLE( |
| 574 | mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC, |
| 575 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, |
| 576 | UVERBS_ATTR_TYPE(u64), UA_MANDATORY), |
| 577 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, |
| 578 | UVERBS_ATTR_TYPE(u16), UA_OPTIONAL), |
| 579 | UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE, |
| 580 | enum mlx5_ib_uapi_dm_type, UA_OPTIONAL)); |
| 581 | |
| 582 | DECLARE_UVERBS_NAMED_METHOD( |
| 583 | MLX5_IB_METHOD_DM_MAP_OP_ADDR, |
| 584 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE, |
| 585 | UVERBS_OBJECT_DM, |
| 586 | UVERBS_ACCESS_READ, |
| 587 | UA_MANDATORY), |
| 588 | UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP, |
| 589 | UVERBS_ATTR_TYPE(u8), |
| 590 | UA_MANDATORY), |
| 591 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET, |
| 592 | UVERBS_ATTR_TYPE(u64), |
| 593 | UA_MANDATORY), |
| 594 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX, |
| 595 | UVERBS_ATTR_TYPE(u16), |
| 596 | UA_OPTIONAL)); |
| 597 | |
| 598 | DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM, |
| 599 | &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR), |
| 600 | &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY)); |
| 601 | |
| 602 | const struct uapi_definition mlx5_ib_dm_defs[] = { |
| 603 | UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), |
| 604 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM), |
| 605 | {}, |
| 606 | }; |
| 607 | |
| 608 | const struct ib_device_ops mlx5_ib_dev_dm_ops = { |
| 609 | .alloc_dm = mlx5_ib_alloc_dm, |
| 610 | .dealloc_dm = mlx5_ib_dealloc_dm, |
| 611 | .reg_dm_mr = mlx5_ib_reg_dm_mr, |
| 612 | }; |
| 613 | |