1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
2 | /* |
3 | * Copyright (c) 2007 Cisco Systems. All rights reserved. |
4 | * Copyright (c) 2020 Intel Corporation. All rights reserved. |
5 | */ |
6 | |
7 | #ifndef IB_UMEM_H |
8 | #define IB_UMEM_H |
9 | |
10 | #include <linux/list.h> |
11 | #include <linux/scatterlist.h> |
12 | #include <linux/workqueue.h> |
13 | #include <rdma/ib_verbs.h> |
14 | |
15 | struct ib_ucontext; |
16 | struct ib_umem_odp; |
17 | struct dma_buf_attach_ops; |
18 | |
19 | struct ib_umem { |
20 | struct ib_device *ibdev; |
21 | struct mm_struct *owning_mm; |
22 | u64 iova; |
23 | size_t length; |
24 | unsigned long address; |
25 | u32 writable : 1; |
26 | u32 is_odp : 1; |
27 | u32 is_dmabuf : 1; |
28 | struct sg_append_table sgt_append; |
29 | }; |
30 | |
31 | struct ib_umem_dmabuf { |
32 | struct ib_umem umem; |
33 | struct dma_buf_attachment *attach; |
34 | struct sg_table *sgt; |
35 | struct scatterlist *first_sg; |
36 | struct scatterlist *last_sg; |
37 | unsigned long first_sg_offset; |
38 | unsigned long last_sg_trim; |
39 | void *private; |
40 | u8 pinned : 1; |
41 | }; |
42 | |
43 | static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem) |
44 | { |
45 | return container_of(umem, struct ib_umem_dmabuf, umem); |
46 | } |
47 | |
48 | /* Returns the offset of the umem start relative to the first page. */ |
49 | static inline int ib_umem_offset(struct ib_umem *umem) |
50 | { |
51 | return umem->address & ~PAGE_MASK; |
52 | } |
53 | |
54 | static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, |
55 | unsigned long pgsz) |
56 | { |
57 | return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & |
58 | (pgsz - 1); |
59 | } |
60 | |
61 | static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, |
62 | unsigned long pgsz) |
63 | { |
64 | return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - |
65 | ALIGN_DOWN(umem->iova, pgsz))) / |
66 | pgsz; |
67 | } |
68 | |
69 | static inline size_t ib_umem_num_pages(struct ib_umem *umem) |
70 | { |
71 | return ib_umem_num_dma_blocks(umem, PAGE_SIZE); |
72 | } |
73 | |
74 | static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, |
75 | struct ib_umem *umem, |
76 | unsigned long pgsz) |
77 | { |
78 | __rdma_block_iter_start(biter, sglist: umem->sgt_append.sgt.sgl, |
79 | nents: umem->sgt_append.sgt.nents, pgsz); |
80 | biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); |
81 | biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); |
82 | } |
83 | |
84 | static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) |
85 | { |
86 | return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; |
87 | } |
88 | |
89 | /** |
90 | * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem |
91 | * @umem: umem to iterate over |
92 | * @pgsz: Page size to split the list into |
93 | * |
94 | * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The |
95 | * returned DMA blocks will be aligned to pgsz and span the range: |
96 | * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz) |
97 | * |
98 | * Performs exactly ib_umem_num_dma_blocks() iterations. |
99 | */ |
100 | #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ |
101 | for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ |
102 | __rdma_umem_block_iter_next(biter);) |
103 | |
104 | #ifdef CONFIG_INFINIBAND_USER_MEM |
105 | |
106 | struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, |
107 | size_t size, int access); |
108 | void ib_umem_release(struct ib_umem *umem); |
109 | int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
110 | size_t length); |
111 | unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
112 | unsigned long pgsz_bitmap, |
113 | unsigned long virt); |
114 | |
115 | /** |
116 | * ib_umem_find_best_pgoff - Find best HW page size |
117 | * |
118 | * @umem: umem struct |
119 | * @pgsz_bitmap bitmap of HW supported page sizes |
120 | * @pgoff_bitmask: Mask of bits that can be represented with an offset |
121 | * |
122 | * This is very similar to ib_umem_find_best_pgsz() except instead of accepting |
123 | * an IOVA it accepts a bitmask specifying what address bits can be represented |
124 | * with a page offset. |
125 | * |
126 | * For instance if the HW has multiple page sizes, requires 64 byte alignemnt, |
127 | * and can support aligned offsets up to 4032 then pgoff_bitmask would be |
128 | * "111111000000". |
129 | * |
130 | * If the pgoff_bitmask requires either alignment in the low bit or an |
131 | * unavailable page size for the high bits, this function returns 0. |
132 | */ |
133 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
134 | unsigned long pgsz_bitmap, |
135 | u64 pgoff_bitmask) |
136 | { |
137 | struct scatterlist *sg = umem->sgt_append.sgt.sgl; |
138 | dma_addr_t dma_addr; |
139 | |
140 | dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); |
141 | return ib_umem_find_best_pgsz(umem, pgsz_bitmap, |
142 | virt: dma_addr & pgoff_bitmask); |
143 | } |
144 | |
145 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
146 | unsigned long offset, size_t size, |
147 | int fd, int access, |
148 | const struct dma_buf_attach_ops *ops); |
149 | struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, |
150 | unsigned long offset, |
151 | size_t size, int fd, |
152 | int access); |
153 | int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf); |
154 | void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf); |
155 | void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf); |
156 | |
157 | #else /* CONFIG_INFINIBAND_USER_MEM */ |
158 | |
159 | #include <linux/err.h> |
160 | |
161 | static inline struct ib_umem *ib_umem_get(struct ib_device *device, |
162 | unsigned long addr, size_t size, |
163 | int access) |
164 | { |
165 | return ERR_PTR(-EOPNOTSUPP); |
166 | } |
167 | static inline void ib_umem_release(struct ib_umem *umem) { } |
168 | static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, |
169 | size_t length) { |
170 | return -EOPNOTSUPP; |
171 | } |
172 | static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, |
173 | unsigned long pgsz_bitmap, |
174 | unsigned long virt) |
175 | { |
176 | return 0; |
177 | } |
178 | static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, |
179 | unsigned long pgsz_bitmap, |
180 | u64 pgoff_bitmask) |
181 | { |
182 | return 0; |
183 | } |
184 | static inline |
185 | struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, |
186 | unsigned long offset, |
187 | size_t size, int fd, |
188 | int access, |
189 | struct dma_buf_attach_ops *ops) |
190 | { |
191 | return ERR_PTR(-EOPNOTSUPP); |
192 | } |
193 | static inline struct ib_umem_dmabuf * |
194 | ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset, |
195 | size_t size, int fd, int access) |
196 | { |
197 | return ERR_PTR(-EOPNOTSUPP); |
198 | } |
199 | static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) |
200 | { |
201 | return -EOPNOTSUPP; |
202 | } |
203 | static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { } |
204 | static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { } |
205 | |
206 | #endif /* CONFIG_INFINIBAND_USER_MEM */ |
207 | #endif /* IB_UMEM_H */ |
208 | |