1 | /* |
2 | * Copyright (c) 2014 Mellanox Technologies. All rights reserved. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/types.h> |
34 | #include <linux/sched.h> |
35 | #include <linux/sched/mm.h> |
36 | #include <linux/sched/task.h> |
37 | #include <linux/pid.h> |
38 | #include <linux/slab.h> |
39 | #include <linux/export.h> |
40 | #include <linux/vmalloc.h> |
41 | #include <linux/hugetlb.h> |
42 | #include <linux/interval_tree.h> |
43 | #include <linux/hmm.h> |
44 | #include <linux/pagemap.h> |
45 | |
46 | #include <rdma/ib_umem_odp.h> |
47 | |
48 | #include "uverbs.h" |
49 | |
50 | static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, |
51 | const struct mmu_interval_notifier_ops *ops) |
52 | { |
53 | int ret; |
54 | |
55 | umem_odp->umem.is_odp = 1; |
56 | mutex_init(&umem_odp->umem_mutex); |
57 | |
58 | if (!umem_odp->is_implicit_odp) { |
59 | size_t page_size = 1UL << umem_odp->page_shift; |
60 | unsigned long start; |
61 | unsigned long end; |
62 | size_t ndmas, npfns; |
63 | |
64 | start = ALIGN_DOWN(umem_odp->umem.address, page_size); |
65 | if (check_add_overflow(umem_odp->umem.address, |
66 | (unsigned long)umem_odp->umem.length, |
67 | &end)) |
68 | return -EOVERFLOW; |
69 | end = ALIGN(end, page_size); |
70 | if (unlikely(end < page_size)) |
71 | return -EOVERFLOW; |
72 | |
73 | ndmas = (end - start) >> umem_odp->page_shift; |
74 | if (!ndmas) |
75 | return -EINVAL; |
76 | |
77 | npfns = (end - start) >> PAGE_SHIFT; |
78 | umem_odp->pfn_list = kvcalloc( |
79 | n: npfns, size: sizeof(*umem_odp->pfn_list), GFP_KERNEL); |
80 | if (!umem_odp->pfn_list) |
81 | return -ENOMEM; |
82 | |
83 | umem_odp->dma_list = kvcalloc( |
84 | n: ndmas, size: sizeof(*umem_odp->dma_list), GFP_KERNEL); |
85 | if (!umem_odp->dma_list) { |
86 | ret = -ENOMEM; |
87 | goto out_pfn_list; |
88 | } |
89 | |
90 | ret = mmu_interval_notifier_insert(interval_sub: &umem_odp->notifier, |
91 | mm: umem_odp->umem.owning_mm, |
92 | start, length: end - start, ops); |
93 | if (ret) |
94 | goto out_dma_list; |
95 | } |
96 | |
97 | return 0; |
98 | |
99 | out_dma_list: |
100 | kvfree(addr: umem_odp->dma_list); |
101 | out_pfn_list: |
102 | kvfree(addr: umem_odp->pfn_list); |
103 | return ret; |
104 | } |
105 | |
106 | /** |
107 | * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem |
108 | * |
109 | * Implicit ODP umems do not have a VA range and do not have any page lists. |
110 | * They exist only to hold the per_mm reference to help the driver create |
111 | * children umems. |
112 | * |
113 | * @device: IB device to create UMEM |
114 | * @access: ib_reg_mr access flags |
115 | */ |
116 | struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device, |
117 | int access) |
118 | { |
119 | struct ib_umem *umem; |
120 | struct ib_umem_odp *umem_odp; |
121 | int ret; |
122 | |
123 | if (access & IB_ACCESS_HUGETLB) |
124 | return ERR_PTR(error: -EINVAL); |
125 | |
126 | umem_odp = kzalloc(size: sizeof(*umem_odp), GFP_KERNEL); |
127 | if (!umem_odp) |
128 | return ERR_PTR(error: -ENOMEM); |
129 | umem = &umem_odp->umem; |
130 | umem->ibdev = device; |
131 | umem->writable = ib_access_writable(access_flags: access); |
132 | umem->owning_mm = current->mm; |
133 | umem_odp->is_implicit_odp = 1; |
134 | umem_odp->page_shift = PAGE_SHIFT; |
135 | |
136 | umem_odp->tgid = get_task_pid(current->group_leader, type: PIDTYPE_PID); |
137 | ret = ib_init_umem_odp(umem_odp, NULL); |
138 | if (ret) { |
139 | put_pid(pid: umem_odp->tgid); |
140 | kfree(objp: umem_odp); |
141 | return ERR_PTR(error: ret); |
142 | } |
143 | return umem_odp; |
144 | } |
145 | EXPORT_SYMBOL(ib_umem_odp_alloc_implicit); |
146 | |
147 | /** |
148 | * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit |
149 | * parent ODP umem |
150 | * |
151 | * @root: The parent umem enclosing the child. This must be allocated using |
152 | * ib_alloc_implicit_odp_umem() |
153 | * @addr: The starting userspace VA |
154 | * @size: The length of the userspace VA |
155 | * @ops: MMU interval ops, currently only @invalidate |
156 | */ |
157 | struct ib_umem_odp * |
158 | ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr, |
159 | size_t size, |
160 | const struct mmu_interval_notifier_ops *ops) |
161 | { |
162 | /* |
163 | * Caller must ensure that root cannot be freed during the call to |
164 | * ib_alloc_odp_umem. |
165 | */ |
166 | struct ib_umem_odp *odp_data; |
167 | struct ib_umem *umem; |
168 | int ret; |
169 | |
170 | if (WARN_ON(!root->is_implicit_odp)) |
171 | return ERR_PTR(error: -EINVAL); |
172 | |
173 | odp_data = kzalloc(size: sizeof(*odp_data), GFP_KERNEL); |
174 | if (!odp_data) |
175 | return ERR_PTR(error: -ENOMEM); |
176 | umem = &odp_data->umem; |
177 | umem->ibdev = root->umem.ibdev; |
178 | umem->length = size; |
179 | umem->address = addr; |
180 | umem->writable = root->umem.writable; |
181 | umem->owning_mm = root->umem.owning_mm; |
182 | odp_data->page_shift = PAGE_SHIFT; |
183 | odp_data->notifier.ops = ops; |
184 | |
185 | /* |
186 | * A mmget must be held when registering a notifier, the owming_mm only |
187 | * has a mm_grab at this point. |
188 | */ |
189 | if (!mmget_not_zero(mm: umem->owning_mm)) { |
190 | ret = -EFAULT; |
191 | goto out_free; |
192 | } |
193 | |
194 | odp_data->tgid = get_pid(pid: root->tgid); |
195 | ret = ib_init_umem_odp(umem_odp: odp_data, ops); |
196 | if (ret) |
197 | goto out_tgid; |
198 | mmput(umem->owning_mm); |
199 | return odp_data; |
200 | |
201 | out_tgid: |
202 | put_pid(pid: odp_data->tgid); |
203 | mmput(umem->owning_mm); |
204 | out_free: |
205 | kfree(objp: odp_data); |
206 | return ERR_PTR(error: ret); |
207 | } |
208 | EXPORT_SYMBOL(ib_umem_odp_alloc_child); |
209 | |
210 | /** |
211 | * ib_umem_odp_get - Create a umem_odp for a userspace va |
212 | * |
213 | * @device: IB device struct to get UMEM |
214 | * @addr: userspace virtual address to start at |
215 | * @size: length of region to pin |
216 | * @access: IB_ACCESS_xxx flags for memory being pinned |
217 | * @ops: MMU interval ops, currently only @invalidate |
218 | * |
219 | * The driver should use when the access flags indicate ODP memory. It avoids |
220 | * pinning, instead, stores the mm for future page fault handling in |
221 | * conjunction with MMU notifiers. |
222 | */ |
223 | struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, |
224 | unsigned long addr, size_t size, int access, |
225 | const struct mmu_interval_notifier_ops *ops) |
226 | { |
227 | struct ib_umem_odp *umem_odp; |
228 | int ret; |
229 | |
230 | if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) |
231 | return ERR_PTR(error: -EINVAL); |
232 | |
233 | umem_odp = kzalloc(size: sizeof(struct ib_umem_odp), GFP_KERNEL); |
234 | if (!umem_odp) |
235 | return ERR_PTR(error: -ENOMEM); |
236 | |
237 | umem_odp->umem.ibdev = device; |
238 | umem_odp->umem.length = size; |
239 | umem_odp->umem.address = addr; |
240 | umem_odp->umem.writable = ib_access_writable(access_flags: access); |
241 | umem_odp->umem.owning_mm = current->mm; |
242 | umem_odp->notifier.ops = ops; |
243 | |
244 | umem_odp->page_shift = PAGE_SHIFT; |
245 | #ifdef CONFIG_HUGETLB_PAGE |
246 | if (access & IB_ACCESS_HUGETLB) |
247 | umem_odp->page_shift = HPAGE_SHIFT; |
248 | #endif |
249 | |
250 | umem_odp->tgid = get_task_pid(current->group_leader, type: PIDTYPE_PID); |
251 | ret = ib_init_umem_odp(umem_odp, ops); |
252 | if (ret) |
253 | goto err_put_pid; |
254 | return umem_odp; |
255 | |
256 | err_put_pid: |
257 | put_pid(pid: umem_odp->tgid); |
258 | kfree(objp: umem_odp); |
259 | return ERR_PTR(error: ret); |
260 | } |
261 | EXPORT_SYMBOL(ib_umem_odp_get); |
262 | |
263 | void ib_umem_odp_release(struct ib_umem_odp *umem_odp) |
264 | { |
265 | /* |
266 | * Ensure that no more pages are mapped in the umem. |
267 | * |
268 | * It is the driver's responsibility to ensure, before calling us, |
269 | * that the hardware will not attempt to access the MR any more. |
270 | */ |
271 | if (!umem_odp->is_implicit_odp) { |
272 | mutex_lock(&umem_odp->umem_mutex); |
273 | ib_umem_odp_unmap_dma_pages(umem_odp, start_offset: ib_umem_start(umem_odp), |
274 | bound: ib_umem_end(umem_odp)); |
275 | mutex_unlock(lock: &umem_odp->umem_mutex); |
276 | mmu_interval_notifier_remove(interval_sub: &umem_odp->notifier); |
277 | kvfree(addr: umem_odp->dma_list); |
278 | kvfree(addr: umem_odp->pfn_list); |
279 | } |
280 | put_pid(pid: umem_odp->tgid); |
281 | kfree(objp: umem_odp); |
282 | } |
283 | EXPORT_SYMBOL(ib_umem_odp_release); |
284 | |
285 | /* |
286 | * Map for DMA and insert a single page into the on-demand paging page tables. |
287 | * |
288 | * @umem: the umem to insert the page to. |
289 | * @dma_index: index in the umem to add the dma to. |
290 | * @page: the page struct to map and add. |
291 | * @access_mask: access permissions needed for this page. |
292 | * |
293 | * The function returns -EFAULT if the DMA mapping operation fails. |
294 | * |
295 | */ |
296 | static int ib_umem_odp_map_dma_single_page( |
297 | struct ib_umem_odp *umem_odp, |
298 | unsigned int dma_index, |
299 | struct page *page, |
300 | u64 access_mask) |
301 | { |
302 | struct ib_device *dev = umem_odp->umem.ibdev; |
303 | dma_addr_t *dma_addr = &umem_odp->dma_list[dma_index]; |
304 | |
305 | if (*dma_addr) { |
306 | /* |
307 | * If the page is already dma mapped it means it went through |
308 | * a non-invalidating trasition, like read-only to writable. |
309 | * Resync the flags. |
310 | */ |
311 | *dma_addr = (*dma_addr & ODP_DMA_ADDR_MASK) | access_mask; |
312 | return 0; |
313 | } |
314 | |
315 | *dma_addr = ib_dma_map_page(dev, page, offset: 0, size: 1 << umem_odp->page_shift, |
316 | direction: DMA_BIDIRECTIONAL); |
317 | if (ib_dma_mapping_error(dev, dma_addr: *dma_addr)) { |
318 | *dma_addr = 0; |
319 | return -EFAULT; |
320 | } |
321 | umem_odp->npages++; |
322 | *dma_addr |= access_mask; |
323 | return 0; |
324 | } |
325 | |
326 | /** |
327 | * ib_umem_odp_map_dma_and_lock - DMA map userspace memory in an ODP MR and lock it. |
328 | * |
329 | * Maps the range passed in the argument to DMA addresses. |
330 | * The DMA addresses of the mapped pages is updated in umem_odp->dma_list. |
331 | * Upon success the ODP MR will be locked to let caller complete its device |
332 | * page table update. |
333 | * |
334 | * Returns the number of pages mapped in success, negative error code |
335 | * for failure. |
336 | * @umem_odp: the umem to map and pin |
337 | * @user_virt: the address from which we need to map. |
338 | * @bcnt: the minimal number of bytes to pin and map. The mapping might be |
339 | * bigger due to alignment, and may also be smaller in case of an error |
340 | * pinning or mapping a page. The actual pages mapped is returned in |
341 | * the return value. |
342 | * @access_mask: bit mask of the requested access permissions for the given |
343 | * range. |
344 | * @fault: is faulting required for the given range |
345 | */ |
346 | int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt, |
347 | u64 bcnt, u64 access_mask, bool fault) |
348 | __acquires(&umem_odp->umem_mutex) |
349 | { |
350 | struct task_struct *owning_process = NULL; |
351 | struct mm_struct *owning_mm = umem_odp->umem.owning_mm; |
352 | int pfn_index, dma_index, ret = 0, start_idx; |
353 | unsigned int page_shift, hmm_order, pfn_start_idx; |
354 | unsigned long num_pfns, current_seq; |
355 | struct hmm_range range = {}; |
356 | unsigned long timeout; |
357 | |
358 | if (access_mask == 0) |
359 | return -EINVAL; |
360 | |
361 | if (user_virt < ib_umem_start(umem_odp) || |
362 | user_virt + bcnt > ib_umem_end(umem_odp)) |
363 | return -EFAULT; |
364 | |
365 | page_shift = umem_odp->page_shift; |
366 | |
367 | /* |
368 | * owning_process is allowed to be NULL, this means somehow the mm is |
369 | * existing beyond the lifetime of the originating process.. Presumably |
370 | * mmget_not_zero will fail in this case. |
371 | */ |
372 | owning_process = get_pid_task(pid: umem_odp->tgid, PIDTYPE_PID); |
373 | if (!owning_process || !mmget_not_zero(mm: owning_mm)) { |
374 | ret = -EINVAL; |
375 | goto out_put_task; |
376 | } |
377 | |
378 | range.notifier = &umem_odp->notifier; |
379 | range.start = ALIGN_DOWN(user_virt, 1UL << page_shift); |
380 | range.end = ALIGN(user_virt + bcnt, 1UL << page_shift); |
381 | pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; |
382 | num_pfns = (range.end - range.start) >> PAGE_SHIFT; |
383 | if (fault) { |
384 | range.default_flags = HMM_PFN_REQ_FAULT; |
385 | |
386 | if (access_mask & ODP_WRITE_ALLOWED_BIT) |
387 | range.default_flags |= HMM_PFN_REQ_WRITE; |
388 | } |
389 | |
390 | range.hmm_pfns = &(umem_odp->pfn_list[pfn_start_idx]); |
391 | timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); |
392 | |
393 | retry: |
394 | current_seq = range.notifier_seq = |
395 | mmu_interval_read_begin(interval_sub: &umem_odp->notifier); |
396 | |
397 | mmap_read_lock(mm: owning_mm); |
398 | ret = hmm_range_fault(range: &range); |
399 | mmap_read_unlock(mm: owning_mm); |
400 | if (unlikely(ret)) { |
401 | if (ret == -EBUSY && !time_after(jiffies, timeout)) |
402 | goto retry; |
403 | goto out_put_mm; |
404 | } |
405 | |
406 | start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift; |
407 | dma_index = start_idx; |
408 | |
409 | mutex_lock(&umem_odp->umem_mutex); |
410 | if (mmu_interval_read_retry(interval_sub: &umem_odp->notifier, seq: current_seq)) { |
411 | mutex_unlock(lock: &umem_odp->umem_mutex); |
412 | goto retry; |
413 | } |
414 | |
415 | for (pfn_index = 0; pfn_index < num_pfns; |
416 | pfn_index += 1 << (page_shift - PAGE_SHIFT), dma_index++) { |
417 | |
418 | if (fault) { |
419 | /* |
420 | * Since we asked for hmm_range_fault() to populate |
421 | * pages it shouldn't return an error entry on success. |
422 | */ |
423 | WARN_ON(range.hmm_pfns[pfn_index] & HMM_PFN_ERROR); |
424 | WARN_ON(!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)); |
425 | } else { |
426 | if (!(range.hmm_pfns[pfn_index] & HMM_PFN_VALID)) { |
427 | WARN_ON(umem_odp->dma_list[dma_index]); |
428 | continue; |
429 | } |
430 | access_mask = ODP_READ_ALLOWED_BIT; |
431 | if (range.hmm_pfns[pfn_index] & HMM_PFN_WRITE) |
432 | access_mask |= ODP_WRITE_ALLOWED_BIT; |
433 | } |
434 | |
435 | hmm_order = hmm_pfn_to_map_order(hmm_pfn: range.hmm_pfns[pfn_index]); |
436 | /* If a hugepage was detected and ODP wasn't set for, the umem |
437 | * page_shift will be used, the opposite case is an error. |
438 | */ |
439 | if (hmm_order + PAGE_SHIFT < page_shift) { |
440 | ret = -EINVAL; |
441 | ibdev_dbg(umem_odp->umem.ibdev, |
442 | "%s: un-expected hmm_order %u, page_shift %u\n" , |
443 | __func__, hmm_order, page_shift); |
444 | break; |
445 | } |
446 | |
447 | ret = ib_umem_odp_map_dma_single_page( |
448 | umem_odp, dma_index, page: hmm_pfn_to_page(hmm_pfn: range.hmm_pfns[pfn_index]), |
449 | access_mask); |
450 | if (ret < 0) { |
451 | ibdev_dbg(umem_odp->umem.ibdev, |
452 | "ib_umem_odp_map_dma_single_page failed with error %d\n" , ret); |
453 | break; |
454 | } |
455 | } |
456 | /* upon success lock should stay on hold for the callee */ |
457 | if (!ret) |
458 | ret = dma_index - start_idx; |
459 | else |
460 | mutex_unlock(lock: &umem_odp->umem_mutex); |
461 | |
462 | out_put_mm: |
463 | mmput_async(owning_mm); |
464 | out_put_task: |
465 | if (owning_process) |
466 | put_task_struct(t: owning_process); |
467 | return ret; |
468 | } |
469 | EXPORT_SYMBOL(ib_umem_odp_map_dma_and_lock); |
470 | |
471 | void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt, |
472 | u64 bound) |
473 | { |
474 | dma_addr_t dma_addr; |
475 | dma_addr_t dma; |
476 | int idx; |
477 | u64 addr; |
478 | struct ib_device *dev = umem_odp->umem.ibdev; |
479 | |
480 | lockdep_assert_held(&umem_odp->umem_mutex); |
481 | |
482 | virt = max_t(u64, virt, ib_umem_start(umem_odp)); |
483 | bound = min_t(u64, bound, ib_umem_end(umem_odp)); |
484 | for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) { |
485 | idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; |
486 | dma = umem_odp->dma_list[idx]; |
487 | |
488 | /* The access flags guaranteed a valid DMA address in case was NULL */ |
489 | if (dma) { |
490 | unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; |
491 | struct page *page = hmm_pfn_to_page(hmm_pfn: umem_odp->pfn_list[pfn_idx]); |
492 | |
493 | dma_addr = dma & ODP_DMA_ADDR_MASK; |
494 | ib_dma_unmap_page(dev, addr: dma_addr, |
495 | BIT(umem_odp->page_shift), |
496 | direction: DMA_BIDIRECTIONAL); |
497 | if (dma & ODP_WRITE_ALLOWED_BIT) { |
498 | struct page *head_page = compound_head(page); |
499 | /* |
500 | * set_page_dirty prefers being called with |
501 | * the page lock. However, MMU notifiers are |
502 | * called sometimes with and sometimes without |
503 | * the lock. We rely on the umem_mutex instead |
504 | * to prevent other mmu notifiers from |
505 | * continuing and allowing the page mapping to |
506 | * be removed. |
507 | */ |
508 | set_page_dirty(head_page); |
509 | } |
510 | umem_odp->dma_list[idx] = 0; |
511 | umem_odp->npages--; |
512 | } |
513 | } |
514 | } |
515 | EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); |
516 | |