| 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
| 2 | /* Copyright (c) 2015 - 2021 Intel Corporation */ |
| 3 | #ifndef IRDMA_VERBS_H |
| 4 | #define IRDMA_VERBS_H |
| 5 | |
| 6 | #define IRDMA_MAX_SAVED_PHY_PGADDR 4 |
| 7 | #define IRDMA_FLUSH_DELAY_MS 20 |
| 8 | |
| 9 | #define IRDMA_PKEY_TBL_SZ 1 |
| 10 | #define IRDMA_DEFAULT_PKEY 0xFFFF |
| 11 | #define IRDMA_SHADOW_PGCNT 1 |
| 12 | |
| 13 | struct irdma_ucontext { |
| 14 | struct ib_ucontext ibucontext; |
| 15 | struct irdma_device *iwdev; |
| 16 | struct rdma_user_mmap_entry *db_mmap_entry; |
| 17 | struct list_head cq_reg_mem_list; |
| 18 | spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */ |
| 19 | struct list_head qp_reg_mem_list; |
| 20 | spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ |
| 21 | struct list_head srq_reg_mem_list; |
| 22 | spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */ |
| 23 | int abi_ver; |
| 24 | u8 legacy_mode : 1; |
| 25 | u8 use_raw_attrs : 1; |
| 26 | }; |
| 27 | |
| 28 | struct irdma_pd { |
| 29 | struct ib_pd ibpd; |
| 30 | struct irdma_sc_pd sc_pd; |
| 31 | }; |
| 32 | |
| 33 | union irdma_sockaddr { |
| 34 | struct sockaddr_in saddr_in; |
| 35 | struct sockaddr_in6 saddr_in6; |
| 36 | }; |
| 37 | |
| 38 | struct irdma_av { |
| 39 | u8 macaddr[16]; |
| 40 | struct rdma_ah_attr attrs; |
| 41 | union irdma_sockaddr sgid_addr; |
| 42 | union irdma_sockaddr dgid_addr; |
| 43 | u8 net_type; |
| 44 | }; |
| 45 | |
| 46 | struct irdma_ah { |
| 47 | struct ib_ah ibah; |
| 48 | struct irdma_sc_ah sc_ah; |
| 49 | struct irdma_pd *pd; |
| 50 | struct irdma_av av; |
| 51 | u8 sgid_index; |
| 52 | union ib_gid dgid; |
| 53 | struct hlist_node list; |
| 54 | refcount_t refcnt; |
| 55 | struct irdma_ah *parent_ah; /* AH from cached list */ |
| 56 | }; |
| 57 | |
| 58 | struct irdma_hmc_pble { |
| 59 | union { |
| 60 | u32 idx; |
| 61 | dma_addr_t addr; |
| 62 | }; |
| 63 | }; |
| 64 | |
| 65 | struct irdma_cq_mr { |
| 66 | struct irdma_hmc_pble cq_pbl; |
| 67 | dma_addr_t shadow; |
| 68 | bool split; |
| 69 | }; |
| 70 | |
| 71 | struct irdma_srq_mr { |
| 72 | struct irdma_hmc_pble srq_pbl; |
| 73 | dma_addr_t shadow; |
| 74 | }; |
| 75 | |
| 76 | struct irdma_qp_mr { |
| 77 | struct irdma_hmc_pble sq_pbl; |
| 78 | struct irdma_hmc_pble rq_pbl; |
| 79 | dma_addr_t shadow; |
| 80 | dma_addr_t rq_pa; |
| 81 | struct page *sq_page; |
| 82 | }; |
| 83 | |
| 84 | struct irdma_cq_buf { |
| 85 | struct irdma_dma_mem kmem_buf; |
| 86 | struct irdma_cq_uk cq_uk; |
| 87 | struct irdma_hw *hw; |
| 88 | struct list_head list; |
| 89 | struct work_struct work; |
| 90 | }; |
| 91 | |
| 92 | struct irdma_pbl { |
| 93 | struct list_head list; |
| 94 | union { |
| 95 | struct irdma_qp_mr qp_mr; |
| 96 | struct irdma_cq_mr cq_mr; |
| 97 | struct irdma_srq_mr srq_mr; |
| 98 | }; |
| 99 | |
| 100 | bool pbl_allocated:1; |
| 101 | bool on_list:1; |
| 102 | u64 user_base; |
| 103 | struct irdma_pble_alloc pble_alloc; |
| 104 | struct irdma_mr *iwmr; |
| 105 | }; |
| 106 | |
| 107 | struct irdma_mr { |
| 108 | union { |
| 109 | struct ib_mr ibmr; |
| 110 | struct ib_mw ibmw; |
| 111 | }; |
| 112 | struct ib_umem *region; |
| 113 | int access; |
| 114 | bool is_hwreg:1; |
| 115 | bool dma_mr:1; |
| 116 | u16 type; |
| 117 | u32 page_cnt; |
| 118 | u64 page_size; |
| 119 | u32 npages; |
| 120 | u32 stag; |
| 121 | u64 len; |
| 122 | u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR]; |
| 123 | struct irdma_pbl iwpbl; |
| 124 | }; |
| 125 | |
| 126 | struct irdma_srq { |
| 127 | struct ib_srq ibsrq; |
| 128 | struct irdma_sc_srq sc_srq __aligned(64); |
| 129 | struct irdma_dma_mem kmem; |
| 130 | u64 *srq_wrid_mem; |
| 131 | refcount_t refcnt; |
| 132 | spinlock_t lock; /* for poll srq */ |
| 133 | struct irdma_pbl *iwpbl; |
| 134 | struct irdma_sge *sg_list; |
| 135 | u16 srq_head; |
| 136 | u32 srq_num; |
| 137 | u32 max_wr; |
| 138 | bool user_mode:1; |
| 139 | }; |
| 140 | |
| 141 | struct irdma_cq { |
| 142 | struct ib_cq ibcq; |
| 143 | struct irdma_sc_cq sc_cq; |
| 144 | u32 cq_num; |
| 145 | bool user_mode; |
| 146 | atomic_t armed; |
| 147 | enum irdma_cmpl_notify last_notify; |
| 148 | struct irdma_dma_mem kmem; |
| 149 | struct irdma_dma_mem kmem_shadow; |
| 150 | struct completion free_cq; |
| 151 | refcount_t refcnt; |
| 152 | spinlock_t lock; /* for poll cq */ |
| 153 | struct list_head resize_list; |
| 154 | struct irdma_cq_poll_info cur_cqe; |
| 155 | struct list_head cmpl_generated; |
| 156 | }; |
| 157 | |
| 158 | struct irdma_cmpl_gen { |
| 159 | struct list_head list; |
| 160 | struct irdma_cq_poll_info cpi; |
| 161 | }; |
| 162 | |
| 163 | struct disconn_work { |
| 164 | struct work_struct work; |
| 165 | struct irdma_qp *iwqp; |
| 166 | }; |
| 167 | |
| 168 | struct iw_cm_id; |
| 169 | |
| 170 | struct irdma_qp_kmode { |
| 171 | struct irdma_dma_mem dma_mem; |
| 172 | struct irdma_sq_uk_wr_trk_info *sq_wrid_mem; |
| 173 | u64 *rq_wrid_mem; |
| 174 | }; |
| 175 | |
| 176 | struct irdma_qp { |
| 177 | struct ib_qp ibqp; |
| 178 | struct irdma_sc_qp sc_qp; |
| 179 | struct irdma_device *iwdev; |
| 180 | struct irdma_cq *iwscq; |
| 181 | struct irdma_cq *iwrcq; |
| 182 | struct irdma_pd *iwpd; |
| 183 | struct rdma_user_mmap_entry *push_wqe_mmap_entry; |
| 184 | struct rdma_user_mmap_entry *push_db_mmap_entry; |
| 185 | struct irdma_qp_host_ctx_info ctx_info; |
| 186 | union { |
| 187 | struct irdma_iwarp_offload_info iwarp_info; |
| 188 | struct irdma_roce_offload_info roce_info; |
| 189 | }; |
| 190 | |
| 191 | union { |
| 192 | struct irdma_tcp_offload_info tcp_info; |
| 193 | struct irdma_udp_offload_info udp_info; |
| 194 | }; |
| 195 | |
| 196 | struct irdma_ah roce_ah; |
| 197 | struct list_head teardown_entry; |
| 198 | refcount_t refcnt; |
| 199 | struct iw_cm_id *cm_id; |
| 200 | struct irdma_cm_node *cm_node; |
| 201 | struct delayed_work dwork_flush; |
| 202 | struct ib_mr *lsmm_mr; |
| 203 | atomic_t hw_mod_qp_pend; |
| 204 | enum ib_qp_state ibqp_state; |
| 205 | u32 qp_mem_size; |
| 206 | u32 last_aeq; |
| 207 | int max_send_wr; |
| 208 | int max_recv_wr; |
| 209 | atomic_t close_timer_started; |
| 210 | spinlock_t lock; /* serialize posting WRs to SQ/RQ */ |
| 211 | struct irdma_qp_context *iwqp_context; |
| 212 | void *pbl_vbase; |
| 213 | dma_addr_t pbl_pbase; |
| 214 | struct page *page; |
| 215 | u8 active_conn : 1; |
| 216 | u8 user_mode : 1; |
| 217 | u8 hte_added : 1; |
| 218 | u8 flush_issued : 1; |
| 219 | u8 sig_all : 1; |
| 220 | u8 pau_mode : 1; |
| 221 | u8 suspend_pending : 1; |
| 222 | u8 rsvd : 1; |
| 223 | u8 iwarp_state; |
| 224 | u16 term_sq_flush_code; |
| 225 | u16 term_rq_flush_code; |
| 226 | u8 hw_iwarp_state; |
| 227 | u8 hw_tcp_state; |
| 228 | struct irdma_qp_kmode kqp; |
| 229 | struct irdma_dma_mem host_ctx; |
| 230 | struct timer_list terminate_timer; |
| 231 | struct irdma_pbl *iwpbl; |
| 232 | struct irdma_dma_mem q2_ctx_mem; |
| 233 | struct irdma_dma_mem ietf_mem; |
| 234 | struct completion free_qp; |
| 235 | wait_queue_head_t waitq; |
| 236 | wait_queue_head_t mod_qp_waitq; |
| 237 | u8 rts_ae_rcvd; |
| 238 | }; |
| 239 | |
| 240 | enum irdma_mmap_flag { |
| 241 | IRDMA_MMAP_IO_NC, |
| 242 | IRDMA_MMAP_IO_WC, |
| 243 | }; |
| 244 | |
| 245 | struct irdma_user_mmap_entry { |
| 246 | struct rdma_user_mmap_entry rdma_entry; |
| 247 | u64 bar_offset; |
| 248 | u8 mmap_flag; |
| 249 | }; |
| 250 | |
| 251 | static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev) |
| 252 | { |
| 253 | return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); |
| 254 | } |
| 255 | |
| 256 | static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev) |
| 257 | { |
| 258 | return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); |
| 259 | } |
| 260 | |
| 261 | static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info, |
| 262 | struct ib_wc *entry) |
| 263 | { |
| 264 | switch (cq_poll_info->op_type) { |
| 265 | case IRDMA_OP_TYPE_RDMA_WRITE: |
| 266 | case IRDMA_OP_TYPE_RDMA_WRITE_SOL: |
| 267 | entry->opcode = IB_WC_RDMA_WRITE; |
| 268 | break; |
| 269 | case IRDMA_OP_TYPE_RDMA_READ_INV_STAG: |
| 270 | case IRDMA_OP_TYPE_RDMA_READ: |
| 271 | entry->opcode = IB_WC_RDMA_READ; |
| 272 | break; |
| 273 | case IRDMA_OP_TYPE_SEND_SOL: |
| 274 | case IRDMA_OP_TYPE_SEND_SOL_INV: |
| 275 | case IRDMA_OP_TYPE_SEND_INV: |
| 276 | case IRDMA_OP_TYPE_SEND: |
| 277 | entry->opcode = IB_WC_SEND; |
| 278 | break; |
| 279 | case IRDMA_OP_TYPE_FAST_REG_NSMR: |
| 280 | entry->opcode = IB_WC_REG_MR; |
| 281 | break; |
| 282 | case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP: |
| 283 | entry->opcode = IB_WC_COMP_SWAP; |
| 284 | break; |
| 285 | case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD: |
| 286 | entry->opcode = IB_WC_FETCH_ADD; |
| 287 | break; |
| 288 | case IRDMA_OP_TYPE_INV_STAG: |
| 289 | entry->opcode = IB_WC_LOCAL_INV; |
| 290 | break; |
| 291 | default: |
| 292 | entry->status = IB_WC_GENERAL_ERR; |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info, |
| 297 | struct ib_wc *entry) |
| 298 | { |
| 299 | switch (info->op_type) { |
| 300 | case IRDMA_OP_TYPE_RDMA_WRITE: |
| 301 | case IRDMA_OP_TYPE_RDMA_WRITE_SOL: |
| 302 | entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 303 | break; |
| 304 | default: |
| 305 | entry->opcode = IB_WC_RECV; |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info, |
| 310 | struct ib_wc *entry, bool send_imm_support) |
| 311 | { |
| 312 | /** |
| 313 | * iWARP does not support sendImm, so the presence of Imm data |
| 314 | * must be WriteImm. |
| 315 | */ |
| 316 | if (!send_imm_support) { |
| 317 | entry->opcode = cq_poll_info->imm_valid ? |
| 318 | IB_WC_RECV_RDMA_WITH_IMM : |
| 319 | IB_WC_RECV; |
| 320 | return; |
| 321 | } |
| 322 | |
| 323 | switch (cq_poll_info->op_type) { |
| 324 | case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: |
| 325 | case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: |
| 326 | entry->opcode = IB_WC_RECV_RDMA_WITH_IMM; |
| 327 | break; |
| 328 | default: |
| 329 | entry->opcode = IB_WC_RECV; |
| 330 | } |
| 331 | } |
| 332 | |
| 333 | void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4); |
| 334 | int irdma_ib_register_device(struct irdma_device *iwdev); |
| 335 | void irdma_ib_unregister_device(struct irdma_device *iwdev); |
| 336 | void irdma_ib_dealloc_device(struct ib_device *ibdev); |
| 337 | void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event); |
| 338 | void irdma_generate_flush_completions(struct irdma_qp *iwqp); |
| 339 | void irdma_remove_cmpls_list(struct irdma_cq *iwcq); |
| 340 | int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info); |
| 341 | #endif /* IRDMA_VERBS_H */ |
| 342 | |