| 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
| 2 | |
| 3 | /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ |
| 4 | /* Copyright (c) 2008-2019, IBM Corporation */ |
| 5 | |
| 6 | #ifndef _SIW_MEM_H |
| 7 | #define _SIW_MEM_H |
| 8 | |
| 9 | struct siw_umem *siw_umem_get(struct ib_device *base_dave, u64 start, |
| 10 | u64 len, int rights); |
| 11 | void siw_umem_release(struct siw_umem *umem); |
| 12 | struct siw_pbl *siw_pbl_alloc(u32 num_buf); |
| 13 | dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); |
| 14 | struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); |
| 15 | int siw_invalidate_stag(struct ib_pd *pd, u32 stag); |
| 16 | int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, |
| 17 | enum ib_access_flags perms, int len); |
| 18 | int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge, |
| 19 | struct siw_mem *mem[], enum ib_access_flags perms, |
| 20 | u32 off, int len); |
| 21 | void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op); |
| 22 | int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj, |
| 23 | u64 start, u64 len, int rights); |
| 24 | void siw_mr_drop_mem(struct siw_mr *mr); |
| 25 | void siw_free_mem(struct kref *ref); |
| 26 | |
| 27 | static inline void siw_mem_put(struct siw_mem *mem) |
| 28 | { |
| 29 | kref_put(kref: &mem->ref, release: siw_free_mem); |
| 30 | } |
| 31 | |
| 32 | static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge) |
| 33 | { |
| 34 | while (num_sge) { |
| 35 | if (*mem == NULL) |
| 36 | break; |
| 37 | |
| 38 | siw_mem_put(mem: *mem); |
| 39 | *mem = NULL; |
| 40 | mem++; |
| 41 | num_sge--; |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | #define CHUNK_SHIFT 9 /* sets number of pages per chunk */ |
| 46 | #define PAGES_PER_CHUNK (_AC(1, UL) << CHUNK_SHIFT) |
| 47 | #define CHUNK_MASK (~(PAGES_PER_CHUNK - 1)) |
| 48 | #define PAGE_CHUNK_SIZE (PAGES_PER_CHUNK * sizeof(struct page *)) |
| 49 | |
| 50 | /* |
| 51 | * siw_get_upage() |
| 52 | * |
| 53 | * Get page pointer for address on given umem. |
| 54 | * |
| 55 | * @umem: two dimensional list of page pointers |
| 56 | * @addr: user virtual address |
| 57 | */ |
| 58 | static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) |
| 59 | { |
| 60 | unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, |
| 61 | chunk_idx = page_idx >> CHUNK_SHIFT, |
| 62 | page_in_chunk = page_idx & ~CHUNK_MASK; |
| 63 | |
| 64 | if (likely(page_idx < umem->num_pages)) |
| 65 | return umem->page_chunk[chunk_idx].plist[page_in_chunk]; |
| 66 | |
| 67 | return NULL; |
| 68 | } |
| 69 | #endif |
| 70 | |