| 1 | /* |
| 2 | * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | * |
| 32 | */ |
| 33 | #include <linux/kernel.h> |
| 34 | #include <linux/slab.h> |
| 35 | #include <linux/rculist.h> |
| 36 | #include <linux/llist.h> |
| 37 | |
| 38 | #include "rds_single_path.h" |
| 39 | #include "ib_mr.h" |
| 40 | #include "rds.h" |
| 41 | |
| 42 | struct workqueue_struct *rds_ib_mr_wq; |
| 43 | |
| 44 | static void rds_ib_odp_mr_worker(struct work_struct *work); |
| 45 | |
| 46 | static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) |
| 47 | { |
| 48 | struct rds_ib_device *rds_ibdev; |
| 49 | struct rds_ib_ipaddr *i_ipaddr; |
| 50 | |
| 51 | rcu_read_lock(); |
| 52 | list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) { |
| 53 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
| 54 | if (i_ipaddr->ipaddr == ipaddr) { |
| 55 | refcount_inc(r: &rds_ibdev->refcount); |
| 56 | rcu_read_unlock(); |
| 57 | return rds_ibdev; |
| 58 | } |
| 59 | } |
| 60 | } |
| 61 | rcu_read_unlock(); |
| 62 | |
| 63 | return NULL; |
| 64 | } |
| 65 | |
| 66 | static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) |
| 67 | { |
| 68 | struct rds_ib_ipaddr *i_ipaddr; |
| 69 | |
| 70 | i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL); |
| 71 | if (!i_ipaddr) |
| 72 | return -ENOMEM; |
| 73 | |
| 74 | i_ipaddr->ipaddr = ipaddr; |
| 75 | |
| 76 | spin_lock_irq(lock: &rds_ibdev->spinlock); |
| 77 | list_add_tail_rcu(new: &i_ipaddr->list, head: &rds_ibdev->ipaddr_list); |
| 78 | spin_unlock_irq(lock: &rds_ibdev->spinlock); |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr) |
| 84 | { |
| 85 | struct rds_ib_ipaddr *i_ipaddr; |
| 86 | struct rds_ib_ipaddr *to_free = NULL; |
| 87 | |
| 88 | |
| 89 | spin_lock_irq(lock: &rds_ibdev->spinlock); |
| 90 | list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) { |
| 91 | if (i_ipaddr->ipaddr == ipaddr) { |
| 92 | list_del_rcu(entry: &i_ipaddr->list); |
| 93 | to_free = i_ipaddr; |
| 94 | break; |
| 95 | } |
| 96 | } |
| 97 | spin_unlock_irq(lock: &rds_ibdev->spinlock); |
| 98 | |
| 99 | if (to_free) |
| 100 | kfree_rcu(to_free, rcu); |
| 101 | } |
| 102 | |
| 103 | int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, |
| 104 | struct in6_addr *ipaddr) |
| 105 | { |
| 106 | struct rds_ib_device *rds_ibdev_old; |
| 107 | |
| 108 | rds_ibdev_old = rds_ib_get_device(ipaddr: ipaddr->s6_addr32[3]); |
| 109 | if (!rds_ibdev_old) |
| 110 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr: ipaddr->s6_addr32[3]); |
| 111 | |
| 112 | if (rds_ibdev_old != rds_ibdev) { |
| 113 | rds_ib_remove_ipaddr(rds_ibdev: rds_ibdev_old, ipaddr: ipaddr->s6_addr32[3]); |
| 114 | rds_ib_dev_put(rds_ibdev: rds_ibdev_old); |
| 115 | return rds_ib_add_ipaddr(rds_ibdev, ipaddr: ipaddr->s6_addr32[3]); |
| 116 | } |
| 117 | rds_ib_dev_put(rds_ibdev: rds_ibdev_old); |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
| 123 | { |
| 124 | struct rds_ib_connection *ic = conn->c_transport_data; |
| 125 | |
| 126 | /* conn was previously on the nodev_conns_list */ |
| 127 | spin_lock_irq(lock: &ib_nodev_conns_lock); |
| 128 | BUG_ON(list_empty(&ib_nodev_conns)); |
| 129 | BUG_ON(list_empty(&ic->ib_node)); |
| 130 | list_del(entry: &ic->ib_node); |
| 131 | |
| 132 | spin_lock(lock: &rds_ibdev->spinlock); |
| 133 | list_add_tail(new: &ic->ib_node, head: &rds_ibdev->conn_list); |
| 134 | spin_unlock(lock: &rds_ibdev->spinlock); |
| 135 | spin_unlock_irq(lock: &ib_nodev_conns_lock); |
| 136 | |
| 137 | ic->rds_ibdev = rds_ibdev; |
| 138 | refcount_inc(r: &rds_ibdev->refcount); |
| 139 | } |
| 140 | |
| 141 | void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn) |
| 142 | { |
| 143 | struct rds_ib_connection *ic = conn->c_transport_data; |
| 144 | |
| 145 | /* place conn on nodev_conns_list */ |
| 146 | spin_lock(lock: &ib_nodev_conns_lock); |
| 147 | |
| 148 | spin_lock_irq(lock: &rds_ibdev->spinlock); |
| 149 | BUG_ON(list_empty(&ic->ib_node)); |
| 150 | list_del(entry: &ic->ib_node); |
| 151 | spin_unlock_irq(lock: &rds_ibdev->spinlock); |
| 152 | |
| 153 | list_add_tail(new: &ic->ib_node, head: &ib_nodev_conns); |
| 154 | |
| 155 | spin_unlock(lock: &ib_nodev_conns_lock); |
| 156 | |
| 157 | ic->rds_ibdev = NULL; |
| 158 | rds_ib_dev_put(rds_ibdev); |
| 159 | } |
| 160 | |
| 161 | void rds_ib_destroy_nodev_conns(void) |
| 162 | { |
| 163 | struct rds_ib_connection *ic, *_ic; |
| 164 | LIST_HEAD(tmp_list); |
| 165 | |
| 166 | /* avoid calling conn_destroy with irqs off */ |
| 167 | spin_lock_irq(lock: &ib_nodev_conns_lock); |
| 168 | list_splice(list: &ib_nodev_conns, head: &tmp_list); |
| 169 | spin_unlock_irq(lock: &ib_nodev_conns_lock); |
| 170 | |
| 171 | list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) |
| 172 | rds_conn_destroy(conn: ic->conn); |
| 173 | } |
| 174 | |
| 175 | void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo) |
| 176 | { |
| 177 | struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; |
| 178 | |
| 179 | iinfo->rdma_mr_max = pool_1m->max_items; |
| 180 | iinfo->rdma_mr_size = pool_1m->max_pages; |
| 181 | } |
| 182 | |
| 183 | #if IS_ENABLED(CONFIG_IPV6) |
| 184 | void rds6_ib_get_mr_info(struct rds_ib_device *rds_ibdev, |
| 185 | struct rds6_info_rdma_connection *iinfo6) |
| 186 | { |
| 187 | struct rds_ib_mr_pool *pool_1m = rds_ibdev->mr_1m_pool; |
| 188 | |
| 189 | iinfo6->rdma_mr_max = pool_1m->max_items; |
| 190 | iinfo6->rdma_mr_size = pool_1m->max_pages; |
| 191 | } |
| 192 | #endif |
| 193 | |
| 194 | struct rds_ib_mr *rds_ib_reuse_mr(struct rds_ib_mr_pool *pool) |
| 195 | { |
| 196 | struct rds_ib_mr *ibmr = NULL; |
| 197 | struct llist_node *ret; |
| 198 | unsigned long flags; |
| 199 | |
| 200 | spin_lock_irqsave(&pool->clean_lock, flags); |
| 201 | ret = llist_del_first(head: &pool->clean_list); |
| 202 | spin_unlock_irqrestore(lock: &pool->clean_lock, flags); |
| 203 | if (ret) { |
| 204 | ibmr = llist_entry(ret, struct rds_ib_mr, llnode); |
| 205 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
| 206 | rds_ib_stats_inc(s_ib_rdma_mr_8k_reused); |
| 207 | else |
| 208 | rds_ib_stats_inc(s_ib_rdma_mr_1m_reused); |
| 209 | } |
| 210 | |
| 211 | return ibmr; |
| 212 | } |
| 213 | |
| 214 | void rds_ib_sync_mr(void *trans_private, int direction) |
| 215 | { |
| 216 | struct rds_ib_mr *ibmr = trans_private; |
| 217 | struct rds_ib_device *rds_ibdev = ibmr->device; |
| 218 | |
| 219 | if (ibmr->odp) |
| 220 | return; |
| 221 | |
| 222 | switch (direction) { |
| 223 | case DMA_FROM_DEVICE: |
| 224 | ib_dma_sync_sg_for_cpu(dev: rds_ibdev->dev, sglist: ibmr->sg, |
| 225 | sg_dma_len: ibmr->sg_dma_len, direction: DMA_BIDIRECTIONAL); |
| 226 | break; |
| 227 | case DMA_TO_DEVICE: |
| 228 | ib_dma_sync_sg_for_device(dev: rds_ibdev->dev, sglist: ibmr->sg, |
| 229 | sg_dma_len: ibmr->sg_dma_len, direction: DMA_BIDIRECTIONAL); |
| 230 | break; |
| 231 | } |
| 232 | } |
| 233 | |
| 234 | void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
| 235 | { |
| 236 | struct rds_ib_device *rds_ibdev = ibmr->device; |
| 237 | |
| 238 | if (ibmr->sg_dma_len) { |
| 239 | ib_dma_unmap_sg(dev: rds_ibdev->dev, |
| 240 | sg: ibmr->sg, nents: ibmr->sg_len, |
| 241 | direction: DMA_BIDIRECTIONAL); |
| 242 | ibmr->sg_dma_len = 0; |
| 243 | } |
| 244 | |
| 245 | /* Release the s/g list */ |
| 246 | if (ibmr->sg_len) { |
| 247 | unsigned int i; |
| 248 | |
| 249 | for (i = 0; i < ibmr->sg_len; ++i) { |
| 250 | struct page *page = sg_page(sg: &ibmr->sg[i]); |
| 251 | |
| 252 | /* FIXME we need a way to tell a r/w MR |
| 253 | * from a r/o MR */ |
| 254 | WARN_ON(!page->mapping && irqs_disabled()); |
| 255 | set_page_dirty(page); |
| 256 | put_page(page); |
| 257 | } |
| 258 | kfree(objp: ibmr->sg); |
| 259 | |
| 260 | ibmr->sg = NULL; |
| 261 | ibmr->sg_len = 0; |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | void rds_ib_teardown_mr(struct rds_ib_mr *ibmr) |
| 266 | { |
| 267 | unsigned int pinned = ibmr->sg_len; |
| 268 | |
| 269 | __rds_ib_teardown_mr(ibmr); |
| 270 | if (pinned) { |
| 271 | struct rds_ib_mr_pool *pool = ibmr->pool; |
| 272 | |
| 273 | atomic_sub(i: pinned, v: &pool->free_pinned); |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all) |
| 278 | { |
| 279 | unsigned int item_count; |
| 280 | |
| 281 | item_count = atomic_read(v: &pool->item_count); |
| 282 | if (free_all) |
| 283 | return item_count; |
| 284 | |
| 285 | return 0; |
| 286 | } |
| 287 | |
| 288 | /* |
| 289 | * given an llist of mrs, put them all into the list_head for more processing |
| 290 | */ |
| 291 | static unsigned int llist_append_to_list(struct llist_head *llist, |
| 292 | struct list_head *list) |
| 293 | { |
| 294 | struct rds_ib_mr *ibmr; |
| 295 | struct llist_node *node; |
| 296 | struct llist_node *next; |
| 297 | unsigned int count = 0; |
| 298 | |
| 299 | node = llist_del_all(head: llist); |
| 300 | while (node) { |
| 301 | next = node->next; |
| 302 | ibmr = llist_entry(node, struct rds_ib_mr, llnode); |
| 303 | list_add_tail(new: &ibmr->unmap_list, head: list); |
| 304 | node = next; |
| 305 | count++; |
| 306 | } |
| 307 | return count; |
| 308 | } |
| 309 | |
| 310 | /* |
| 311 | * this takes a list head of mrs and turns it into linked llist nodes |
| 312 | * of clusters. Each cluster has linked llist nodes of |
| 313 | * MR_CLUSTER_SIZE mrs that are ready for reuse. |
| 314 | */ |
| 315 | static void list_to_llist_nodes(struct list_head *list, |
| 316 | struct llist_node **nodes_head, |
| 317 | struct llist_node **nodes_tail) |
| 318 | { |
| 319 | struct rds_ib_mr *ibmr; |
| 320 | struct llist_node *cur = NULL; |
| 321 | struct llist_node **next = nodes_head; |
| 322 | |
| 323 | list_for_each_entry(ibmr, list, unmap_list) { |
| 324 | cur = &ibmr->llnode; |
| 325 | *next = cur; |
| 326 | next = &cur->next; |
| 327 | } |
| 328 | *next = NULL; |
| 329 | *nodes_tail = cur; |
| 330 | } |
| 331 | |
| 332 | /* |
| 333 | * Flush our pool of MRs. |
| 334 | * At a minimum, all currently unused MRs are unmapped. |
| 335 | * If the number of MRs allocated exceeds the limit, we also try |
| 336 | * to free as many MRs as needed to get back to this limit. |
| 337 | */ |
| 338 | int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, |
| 339 | int free_all, struct rds_ib_mr **ibmr_ret) |
| 340 | { |
| 341 | struct rds_ib_mr *ibmr; |
| 342 | struct llist_node *clean_nodes; |
| 343 | struct llist_node *clean_tail; |
| 344 | LIST_HEAD(unmap_list); |
| 345 | unsigned long unpinned = 0; |
| 346 | unsigned int nfreed = 0, dirty_to_clean = 0, free_goal; |
| 347 | |
| 348 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
| 349 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_flush); |
| 350 | else |
| 351 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_flush); |
| 352 | |
| 353 | if (ibmr_ret) { |
| 354 | DEFINE_WAIT(wait); |
| 355 | while (!mutex_trylock(&pool->flush_lock)) { |
| 356 | ibmr = rds_ib_reuse_mr(pool); |
| 357 | if (ibmr) { |
| 358 | *ibmr_ret = ibmr; |
| 359 | finish_wait(wq_head: &pool->flush_wait, wq_entry: &wait); |
| 360 | goto out_nolock; |
| 361 | } |
| 362 | |
| 363 | prepare_to_wait(wq_head: &pool->flush_wait, wq_entry: &wait, |
| 364 | TASK_UNINTERRUPTIBLE); |
| 365 | if (llist_empty(head: &pool->clean_list)) |
| 366 | schedule(); |
| 367 | |
| 368 | ibmr = rds_ib_reuse_mr(pool); |
| 369 | if (ibmr) { |
| 370 | *ibmr_ret = ibmr; |
| 371 | finish_wait(wq_head: &pool->flush_wait, wq_entry: &wait); |
| 372 | goto out_nolock; |
| 373 | } |
| 374 | } |
| 375 | finish_wait(wq_head: &pool->flush_wait, wq_entry: &wait); |
| 376 | } else |
| 377 | mutex_lock(&pool->flush_lock); |
| 378 | |
| 379 | if (ibmr_ret) { |
| 380 | ibmr = rds_ib_reuse_mr(pool); |
| 381 | if (ibmr) { |
| 382 | *ibmr_ret = ibmr; |
| 383 | goto out; |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | /* Get the list of all MRs to be dropped. Ordering matters - |
| 388 | * we want to put drop_list ahead of free_list. |
| 389 | */ |
| 390 | dirty_to_clean = llist_append_to_list(llist: &pool->drop_list, list: &unmap_list); |
| 391 | dirty_to_clean += llist_append_to_list(llist: &pool->free_list, list: &unmap_list); |
| 392 | if (free_all) { |
| 393 | unsigned long flags; |
| 394 | |
| 395 | spin_lock_irqsave(&pool->clean_lock, flags); |
| 396 | llist_append_to_list(llist: &pool->clean_list, list: &unmap_list); |
| 397 | spin_unlock_irqrestore(lock: &pool->clean_lock, flags); |
| 398 | } |
| 399 | |
| 400 | free_goal = rds_ib_flush_goal(pool, free_all); |
| 401 | |
| 402 | if (list_empty(head: &unmap_list)) |
| 403 | goto out; |
| 404 | |
| 405 | rds_ib_unreg_frmr(list: &unmap_list, nfreed: &nfreed, unpinned: &unpinned, goal: free_goal); |
| 406 | |
| 407 | if (!list_empty(head: &unmap_list)) { |
| 408 | unsigned long flags; |
| 409 | |
| 410 | list_to_llist_nodes(list: &unmap_list, nodes_head: &clean_nodes, nodes_tail: &clean_tail); |
| 411 | if (ibmr_ret) { |
| 412 | *ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode); |
| 413 | clean_nodes = clean_nodes->next; |
| 414 | } |
| 415 | /* more than one entry in llist nodes */ |
| 416 | if (clean_nodes) { |
| 417 | spin_lock_irqsave(&pool->clean_lock, flags); |
| 418 | llist_add_batch(new_first: clean_nodes, new_last: clean_tail, |
| 419 | head: &pool->clean_list); |
| 420 | spin_unlock_irqrestore(lock: &pool->clean_lock, flags); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | atomic_sub(i: unpinned, v: &pool->free_pinned); |
| 425 | atomic_sub(i: dirty_to_clean, v: &pool->dirty_count); |
| 426 | atomic_sub(i: nfreed, v: &pool->item_count); |
| 427 | |
| 428 | out: |
| 429 | mutex_unlock(lock: &pool->flush_lock); |
| 430 | if (waitqueue_active(wq_head: &pool->flush_wait)) |
| 431 | wake_up(&pool->flush_wait); |
| 432 | out_nolock: |
| 433 | return 0; |
| 434 | } |
| 435 | |
| 436 | struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) |
| 437 | { |
| 438 | struct rds_ib_mr *ibmr = NULL; |
| 439 | int iter = 0; |
| 440 | |
| 441 | while (1) { |
| 442 | ibmr = rds_ib_reuse_mr(pool); |
| 443 | if (ibmr) |
| 444 | return ibmr; |
| 445 | |
| 446 | if (atomic_inc_return(v: &pool->item_count) <= pool->max_items) |
| 447 | break; |
| 448 | |
| 449 | atomic_dec(v: &pool->item_count); |
| 450 | |
| 451 | if (++iter > 2) { |
| 452 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
| 453 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_depleted); |
| 454 | else |
| 455 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_depleted); |
| 456 | break; |
| 457 | } |
| 458 | |
| 459 | /* We do have some empty MRs. Flush them out. */ |
| 460 | if (pool->pool_type == RDS_IB_MR_8K_POOL) |
| 461 | rds_ib_stats_inc(s_ib_rdma_mr_8k_pool_wait); |
| 462 | else |
| 463 | rds_ib_stats_inc(s_ib_rdma_mr_1m_pool_wait); |
| 464 | |
| 465 | rds_ib_flush_mr_pool(pool, free_all: 0, ibmr_ret: &ibmr); |
| 466 | if (ibmr) |
| 467 | return ibmr; |
| 468 | } |
| 469 | |
| 470 | return NULL; |
| 471 | } |
| 472 | |
| 473 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
| 474 | { |
| 475 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
| 476 | |
| 477 | rds_ib_flush_mr_pool(pool, free_all: 0, NULL); |
| 478 | } |
| 479 | |
| 480 | void rds_ib_free_mr(void *trans_private, int invalidate) |
| 481 | { |
| 482 | struct rds_ib_mr *ibmr = trans_private; |
| 483 | struct rds_ib_mr_pool *pool = ibmr->pool; |
| 484 | struct rds_ib_device *rds_ibdev = ibmr->device; |
| 485 | |
| 486 | rdsdebug("RDS/IB: free_mr nents %u\n" , ibmr->sg_len); |
| 487 | |
| 488 | if (ibmr->odp) { |
| 489 | /* A MR created and marked as use_once. We use delayed work, |
| 490 | * because there is a change that we are in interrupt and can't |
| 491 | * call to ib_dereg_mr() directly. |
| 492 | */ |
| 493 | INIT_DELAYED_WORK(&ibmr->work, rds_ib_odp_mr_worker); |
| 494 | queue_delayed_work(wq: rds_ib_mr_wq, dwork: &ibmr->work, delay: 0); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | /* Return it to the pool's free list */ |
| 499 | rds_ib_free_frmr_list(ibmr); |
| 500 | |
| 501 | atomic_add(i: ibmr->sg_len, v: &pool->free_pinned); |
| 502 | atomic_inc(v: &pool->dirty_count); |
| 503 | |
| 504 | /* If we've pinned too many pages, request a flush */ |
| 505 | if (atomic_read(v: &pool->free_pinned) >= pool->max_free_pinned || |
| 506 | atomic_read(v: &pool->dirty_count) >= pool->max_items / 5) |
| 507 | queue_delayed_work(wq: rds_ib_mr_wq, dwork: &pool->flush_worker, delay: 10); |
| 508 | |
| 509 | if (invalidate) { |
| 510 | if (likely(!in_interrupt())) { |
| 511 | rds_ib_flush_mr_pool(pool, free_all: 0, NULL); |
| 512 | } else { |
| 513 | /* We get here if the user created a MR marked |
| 514 | * as use_once and invalidate at the same time. |
| 515 | */ |
| 516 | queue_delayed_work(wq: rds_ib_mr_wq, |
| 517 | dwork: &pool->flush_worker, delay: 10); |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | rds_ib_dev_put(rds_ibdev); |
| 522 | } |
| 523 | |
| 524 | void rds_ib_flush_mrs(void) |
| 525 | { |
| 526 | struct rds_ib_device *rds_ibdev; |
| 527 | |
| 528 | down_read(sem: &rds_ib_devices_lock); |
| 529 | list_for_each_entry(rds_ibdev, &rds_ib_devices, list) { |
| 530 | if (rds_ibdev->mr_8k_pool) |
| 531 | rds_ib_flush_mr_pool(pool: rds_ibdev->mr_8k_pool, free_all: 0, NULL); |
| 532 | |
| 533 | if (rds_ibdev->mr_1m_pool) |
| 534 | rds_ib_flush_mr_pool(pool: rds_ibdev->mr_1m_pool, free_all: 0, NULL); |
| 535 | } |
| 536 | up_read(sem: &rds_ib_devices_lock); |
| 537 | } |
| 538 | |
| 539 | u32 rds_ib_get_lkey(void *trans_private) |
| 540 | { |
| 541 | struct rds_ib_mr *ibmr = trans_private; |
| 542 | |
| 543 | return ibmr->u.mr->lkey; |
| 544 | } |
| 545 | |
| 546 | void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, |
| 547 | struct rds_sock *rs, u32 *key_ret, |
| 548 | struct rds_connection *conn, |
| 549 | u64 start, u64 length, int need_odp) |
| 550 | { |
| 551 | struct rds_ib_device *rds_ibdev; |
| 552 | struct rds_ib_mr *ibmr = NULL; |
| 553 | struct rds_ib_connection *ic = NULL; |
| 554 | int ret; |
| 555 | |
| 556 | rds_ibdev = rds_ib_get_device(ipaddr: rs->rs_bound_addr.s6_addr32[3]); |
| 557 | if (!rds_ibdev) { |
| 558 | ret = -ENODEV; |
| 559 | goto out; |
| 560 | } |
| 561 | |
| 562 | if (need_odp == ODP_ZEROBASED || need_odp == ODP_VIRTUAL) { |
| 563 | u64 virt_addr = need_odp == ODP_ZEROBASED ? 0 : start; |
| 564 | int access_flags = |
| 565 | (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | |
| 566 | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_ATOMIC | |
| 567 | IB_ACCESS_ON_DEMAND); |
| 568 | struct ib_sge sge = {}; |
| 569 | struct ib_mr *ib_mr; |
| 570 | |
| 571 | if (!rds_ibdev->odp_capable) { |
| 572 | ret = -EOPNOTSUPP; |
| 573 | goto out; |
| 574 | } |
| 575 | |
| 576 | ib_mr = ib_reg_user_mr(pd: rds_ibdev->pd, start, length, virt_addr, |
| 577 | mr_access_flags: access_flags); |
| 578 | |
| 579 | if (IS_ERR(ptr: ib_mr)) { |
| 580 | rdsdebug("rds_ib_get_user_mr returned %d\n" , |
| 581 | IS_ERR(ib_mr)); |
| 582 | ret = PTR_ERR(ptr: ib_mr); |
| 583 | goto out; |
| 584 | } |
| 585 | if (key_ret) |
| 586 | *key_ret = ib_mr->rkey; |
| 587 | |
| 588 | ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL); |
| 589 | if (!ibmr) { |
| 590 | ib_dereg_mr(mr: ib_mr); |
| 591 | ret = -ENOMEM; |
| 592 | goto out; |
| 593 | } |
| 594 | ibmr->u.mr = ib_mr; |
| 595 | ibmr->odp = 1; |
| 596 | |
| 597 | sge.addr = virt_addr; |
| 598 | sge.length = length; |
| 599 | sge.lkey = ib_mr->lkey; |
| 600 | |
| 601 | ib_advise_mr(pd: rds_ibdev->pd, |
| 602 | advice: IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, |
| 603 | flags: IB_UVERBS_ADVISE_MR_FLAG_FLUSH, sg_list: &sge, num_sge: 1); |
| 604 | return ibmr; |
| 605 | } |
| 606 | |
| 607 | if (conn) |
| 608 | ic = conn->c_transport_data; |
| 609 | |
| 610 | if (!rds_ibdev->mr_8k_pool || !rds_ibdev->mr_1m_pool) { |
| 611 | ret = -ENODEV; |
| 612 | goto out; |
| 613 | } |
| 614 | |
| 615 | ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key: key_ret); |
| 616 | if (IS_ERR(ptr: ibmr)) { |
| 617 | ret = PTR_ERR(ptr: ibmr); |
| 618 | pr_warn("RDS/IB: rds_ib_get_mr failed (errno=%d)\n" , ret); |
| 619 | } else { |
| 620 | return ibmr; |
| 621 | } |
| 622 | |
| 623 | out: |
| 624 | if (rds_ibdev) |
| 625 | rds_ib_dev_put(rds_ibdev); |
| 626 | |
| 627 | return ERR_PTR(error: ret); |
| 628 | } |
| 629 | |
| 630 | void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) |
| 631 | { |
| 632 | cancel_delayed_work_sync(dwork: &pool->flush_worker); |
| 633 | rds_ib_flush_mr_pool(pool, free_all: 1, NULL); |
| 634 | WARN_ON(atomic_read(&pool->item_count)); |
| 635 | WARN_ON(atomic_read(&pool->free_pinned)); |
| 636 | kfree(objp: pool); |
| 637 | } |
| 638 | |
| 639 | struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev, |
| 640 | int pool_type) |
| 641 | { |
| 642 | struct rds_ib_mr_pool *pool; |
| 643 | |
| 644 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
| 645 | if (!pool) |
| 646 | return ERR_PTR(error: -ENOMEM); |
| 647 | |
| 648 | pool->pool_type = pool_type; |
| 649 | init_llist_head(list: &pool->free_list); |
| 650 | init_llist_head(list: &pool->drop_list); |
| 651 | init_llist_head(list: &pool->clean_list); |
| 652 | spin_lock_init(&pool->clean_lock); |
| 653 | mutex_init(&pool->flush_lock); |
| 654 | init_waitqueue_head(&pool->flush_wait); |
| 655 | INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); |
| 656 | |
| 657 | if (pool_type == RDS_IB_MR_1M_POOL) { |
| 658 | /* +1 allows for unaligned MRs */ |
| 659 | pool->max_pages = RDS_MR_1M_MSG_SIZE + 1; |
| 660 | pool->max_items = rds_ibdev->max_1m_mrs; |
| 661 | } else { |
| 662 | /* pool_type == RDS_IB_MR_8K_POOL */ |
| 663 | pool->max_pages = RDS_MR_8K_MSG_SIZE + 1; |
| 664 | pool->max_items = rds_ibdev->max_8k_mrs; |
| 665 | } |
| 666 | |
| 667 | pool->max_free_pinned = pool->max_items * pool->max_pages / 4; |
| 668 | pool->max_items_soft = rds_ibdev->max_mrs * 3 / 4; |
| 669 | |
| 670 | return pool; |
| 671 | } |
| 672 | |
| 673 | int rds_ib_mr_init(void) |
| 674 | { |
| 675 | rds_ib_mr_wq = alloc_workqueue("rds_mr_flushd" , |
| 676 | WQ_MEM_RECLAIM | WQ_PERCPU, 0); |
| 677 | if (!rds_ib_mr_wq) |
| 678 | return -ENOMEM; |
| 679 | return 0; |
| 680 | } |
| 681 | |
| 682 | /* By the time this is called all the IB devices should have been torn down and |
| 683 | * had their pools freed. As each pool is freed its work struct is waited on, |
| 684 | * so the pool flushing work queue should be idle by the time we get here. |
| 685 | */ |
| 686 | void rds_ib_mr_exit(void) |
| 687 | { |
| 688 | destroy_workqueue(wq: rds_ib_mr_wq); |
| 689 | } |
| 690 | |
| 691 | static void rds_ib_odp_mr_worker(struct work_struct *work) |
| 692 | { |
| 693 | struct rds_ib_mr *ibmr; |
| 694 | |
| 695 | ibmr = container_of(work, struct rds_ib_mr, work.work); |
| 696 | ib_dereg_mr(mr: ibmr->u.mr); |
| 697 | kfree(objp: ibmr); |
| 698 | } |
| 699 | |