| 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | /* |
| 3 | * Copyright (c) 2004-2007 Intel Corporation. All rights reserved. |
| 4 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
| 5 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. |
| 6 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. |
| 7 | * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/completion.h> |
| 11 | #include <linux/dma-mapping.h> |
| 12 | #include <linux/device.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/err.h> |
| 15 | #include <linux/idr.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/random.h> |
| 18 | #include <linux/rbtree.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/slab.h> |
| 21 | #include <linux/sysfs.h> |
| 22 | #include <linux/workqueue.h> |
| 23 | #include <linux/kdev_t.h> |
| 24 | #include <linux/etherdevice.h> |
| 25 | |
| 26 | #include <rdma/ib_cache.h> |
| 27 | #include <rdma/ib_cm.h> |
| 28 | #include <rdma/ib_sysfs.h> |
| 29 | #include "cm_msgs.h" |
| 30 | #include "core_priv.h" |
| 31 | #include "cm_trace.h" |
| 32 | |
| 33 | MODULE_AUTHOR("Sean Hefty" ); |
| 34 | MODULE_DESCRIPTION("InfiniBand CM" ); |
| 35 | MODULE_LICENSE("Dual BSD/GPL" ); |
| 36 | |
| 37 | #define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */ |
| 38 | #define CM_DIRECT_RETRY_CTX ((void *) 1UL) |
| 39 | #define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */ |
| 40 | |
| 41 | static const char * const ibcm_rej_reason_strs[] = { |
| 42 | [IB_CM_REJ_NO_QP] = "no QP" , |
| 43 | [IB_CM_REJ_NO_EEC] = "no EEC" , |
| 44 | [IB_CM_REJ_NO_RESOURCES] = "no resources" , |
| 45 | [IB_CM_REJ_TIMEOUT] = "timeout" , |
| 46 | [IB_CM_REJ_UNSUPPORTED] = "unsupported" , |
| 47 | [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID" , |
| 48 | [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance" , |
| 49 | [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID" , |
| 50 | [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type" , |
| 51 | [IB_CM_REJ_STALE_CONN] = "stale conn" , |
| 52 | [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist" , |
| 53 | [IB_CM_REJ_INVALID_GID] = "invalid GID" , |
| 54 | [IB_CM_REJ_INVALID_LID] = "invalid LID" , |
| 55 | [IB_CM_REJ_INVALID_SL] = "invalid SL" , |
| 56 | [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class" , |
| 57 | [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit" , |
| 58 | [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate" , |
| 59 | [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID" , |
| 60 | [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID" , |
| 61 | [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL" , |
| 62 | [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class" , |
| 63 | [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit" , |
| 64 | [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate" , |
| 65 | [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect" , |
| 66 | [IB_CM_REJ_PORT_REDIRECT] = "port redirect" , |
| 67 | [IB_CM_REJ_INVALID_MTU] = "invalid MTU" , |
| 68 | [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources" , |
| 69 | [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined" , |
| 70 | [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry" , |
| 71 | [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID" , |
| 72 | [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version" , |
| 73 | [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label" , |
| 74 | [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label" , |
| 75 | [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] = |
| 76 | "vendor option is not supported" , |
| 77 | }; |
| 78 | |
| 79 | const char *__attribute_const__ ibcm_reject_msg(int reason) |
| 80 | { |
| 81 | size_t index = reason; |
| 82 | |
| 83 | if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && |
| 84 | ibcm_rej_reason_strs[index]) |
| 85 | return ibcm_rej_reason_strs[index]; |
| 86 | else |
| 87 | return "unrecognized reason" ; |
| 88 | } |
| 89 | EXPORT_SYMBOL(ibcm_reject_msg); |
| 90 | |
| 91 | struct cm_id_private; |
| 92 | struct cm_work; |
| 93 | static int cm_add_one(struct ib_device *device); |
| 94 | static void cm_remove_one(struct ib_device *device, void *client_data); |
| 95 | static void cm_process_work(struct cm_id_private *cm_id_priv, |
| 96 | struct cm_work *work); |
| 97 | static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, |
| 98 | struct ib_cm_sidr_rep_param *param); |
| 99 | static void cm_issue_dreq(struct cm_id_private *cm_id_priv); |
| 100 | static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, |
| 101 | void *private_data, u8 private_data_len); |
| 102 | static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, |
| 103 | enum ib_cm_rej_reason reason, void *ari, |
| 104 | u8 ari_length, const void *private_data, |
| 105 | u8 private_data_len); |
| 106 | |
| 107 | static struct ib_client cm_client = { |
| 108 | .name = "cm" , |
| 109 | .add = cm_add_one, |
| 110 | .remove = cm_remove_one |
| 111 | }; |
| 112 | |
| 113 | static struct ib_cm { |
| 114 | spinlock_t lock; |
| 115 | struct list_head device_list; |
| 116 | rwlock_t device_lock; |
| 117 | struct rb_root listen_service_table; |
| 118 | u64 listen_service_id; |
| 119 | /* struct rb_root peer_service_table; todo: fix peer to peer */ |
| 120 | struct rb_root remote_qp_table; |
| 121 | struct rb_root remote_id_table; |
| 122 | struct rb_root remote_sidr_table; |
| 123 | struct xarray local_id_table; |
| 124 | u32 local_id_next; |
| 125 | __be32 random_id_operand; |
| 126 | struct list_head timewait_list; |
| 127 | struct workqueue_struct *wq; |
| 128 | } cm; |
| 129 | |
| 130 | /* Counter indexes ordered by attribute ID */ |
| 131 | enum { |
| 132 | CM_REQ_COUNTER, |
| 133 | CM_MRA_COUNTER, |
| 134 | CM_REJ_COUNTER, |
| 135 | CM_REP_COUNTER, |
| 136 | CM_RTU_COUNTER, |
| 137 | CM_DREQ_COUNTER, |
| 138 | CM_DREP_COUNTER, |
| 139 | CM_SIDR_REQ_COUNTER, |
| 140 | CM_SIDR_REP_COUNTER, |
| 141 | CM_LAP_COUNTER, |
| 142 | CM_APR_COUNTER, |
| 143 | CM_ATTR_COUNT, |
| 144 | CM_ATTR_ID_OFFSET = 0x0010, |
| 145 | }; |
| 146 | |
| 147 | enum { |
| 148 | CM_XMIT, |
| 149 | CM_XMIT_RETRIES, |
| 150 | CM_RECV, |
| 151 | CM_RECV_DUPLICATES, |
| 152 | CM_COUNTER_GROUPS |
| 153 | }; |
| 154 | |
| 155 | struct cm_counter_attribute { |
| 156 | struct ib_port_attribute attr; |
| 157 | unsigned short group; |
| 158 | unsigned short index; |
| 159 | }; |
| 160 | |
| 161 | struct cm_port { |
| 162 | struct cm_device *cm_dev; |
| 163 | struct ib_mad_agent *mad_agent; |
| 164 | u32 port_num; |
| 165 | atomic_long_t counters[CM_COUNTER_GROUPS][CM_ATTR_COUNT]; |
| 166 | }; |
| 167 | |
| 168 | struct cm_device { |
| 169 | struct kref kref; |
| 170 | struct list_head list; |
| 171 | rwlock_t mad_agent_lock; |
| 172 | struct ib_device *ib_device; |
| 173 | u8 ack_delay; |
| 174 | int going_down; |
| 175 | struct cm_port *port[]; |
| 176 | }; |
| 177 | |
| 178 | struct cm_av { |
| 179 | struct cm_port *port; |
| 180 | struct rdma_ah_attr ah_attr; |
| 181 | u16 dlid_datapath; |
| 182 | u16 pkey_index; |
| 183 | u8 timeout; |
| 184 | }; |
| 185 | |
| 186 | struct cm_work { |
| 187 | struct delayed_work work; |
| 188 | struct list_head list; |
| 189 | struct cm_port *port; |
| 190 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ |
| 191 | __be32 local_id; /* Established / timewait */ |
| 192 | __be32 remote_id; |
| 193 | struct ib_cm_event cm_event; |
| 194 | struct sa_path_rec path[]; |
| 195 | }; |
| 196 | |
| 197 | struct cm_timewait_info { |
| 198 | struct cm_work work; |
| 199 | struct list_head list; |
| 200 | struct rb_node remote_qp_node; |
| 201 | struct rb_node remote_id_node; |
| 202 | __be64 remote_ca_guid; |
| 203 | __be32 remote_qpn; |
| 204 | u8 inserted_remote_qp; |
| 205 | u8 inserted_remote_id; |
| 206 | }; |
| 207 | |
| 208 | struct cm_id_private { |
| 209 | struct ib_cm_id id; |
| 210 | |
| 211 | struct rb_node service_node; |
| 212 | struct rb_node sidr_id_node; |
| 213 | u32 sidr_slid; |
| 214 | spinlock_t lock; /* Do not acquire inside cm.lock */ |
| 215 | struct completion comp; |
| 216 | refcount_t refcount; |
| 217 | /* Number of clients sharing this ib_cm_id. Only valid for listeners. |
| 218 | * Protected by the cm.lock spinlock. |
| 219 | */ |
| 220 | int listen_sharecount; |
| 221 | struct rcu_head rcu; |
| 222 | |
| 223 | struct ib_mad_send_buf *msg; |
| 224 | struct cm_timewait_info *timewait_info; |
| 225 | /* todo: use alternate port on send failure */ |
| 226 | struct cm_av av; |
| 227 | struct cm_av alt_av; |
| 228 | |
| 229 | void *private_data; |
| 230 | __be64 tid; |
| 231 | __be32 local_qpn; |
| 232 | __be32 remote_qpn; |
| 233 | enum ib_qp_type qp_type; |
| 234 | __be32 sq_psn; |
| 235 | __be32 rq_psn; |
| 236 | int timeout_ms; |
| 237 | enum ib_mtu path_mtu; |
| 238 | __be16 pkey; |
| 239 | u8 private_data_len; |
| 240 | u8 max_cm_retries; |
| 241 | u8 responder_resources; |
| 242 | u8 initiator_depth; |
| 243 | u8 retry_count; |
| 244 | u8 rnr_retry_count; |
| 245 | u8 target_ack_delay; |
| 246 | |
| 247 | struct list_head work_list; |
| 248 | atomic_t work_count; |
| 249 | |
| 250 | struct rdma_ucm_ece ece; |
| 251 | }; |
| 252 | |
| 253 | static void cm_dev_release(struct kref *kref) |
| 254 | { |
| 255 | struct cm_device *cm_dev = container_of(kref, struct cm_device, kref); |
| 256 | u32 i; |
| 257 | |
| 258 | rdma_for_each_port(cm_dev->ib_device, i) |
| 259 | kfree(objp: cm_dev->port[i - 1]); |
| 260 | |
| 261 | kfree(objp: cm_dev); |
| 262 | } |
| 263 | |
| 264 | static void cm_device_put(struct cm_device *cm_dev) |
| 265 | { |
| 266 | kref_put(kref: &cm_dev->kref, release: cm_dev_release); |
| 267 | } |
| 268 | |
| 269 | static void cm_work_handler(struct work_struct *work); |
| 270 | |
| 271 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
| 272 | { |
| 273 | if (refcount_dec_and_test(r: &cm_id_priv->refcount)) |
| 274 | complete(&cm_id_priv->comp); |
| 275 | } |
| 276 | |
| 277 | static struct ib_mad_send_buf *cm_alloc_msg(struct cm_id_private *cm_id_priv) |
| 278 | { |
| 279 | struct ib_mad_agent *mad_agent; |
| 280 | struct ib_mad_send_buf *m; |
| 281 | struct ib_ah *ah; |
| 282 | |
| 283 | lockdep_assert_held(&cm_id_priv->lock); |
| 284 | |
| 285 | if (!cm_id_priv->av.port) |
| 286 | return ERR_PTR(error: -EINVAL); |
| 287 | |
| 288 | read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); |
| 289 | mad_agent = cm_id_priv->av.port->mad_agent; |
| 290 | if (!mad_agent) { |
| 291 | m = ERR_PTR(error: -EINVAL); |
| 292 | goto out; |
| 293 | } |
| 294 | |
| 295 | ah = rdma_create_ah(pd: mad_agent->qp->pd, ah_attr: &cm_id_priv->av.ah_attr, flags: 0); |
| 296 | if (IS_ERR(ptr: ah)) { |
| 297 | m = ERR_CAST(ptr: ah); |
| 298 | goto out; |
| 299 | } |
| 300 | |
| 301 | m = ib_create_send_mad(mad_agent, remote_qpn: cm_id_priv->id.remote_cm_qpn, |
| 302 | pkey_index: cm_id_priv->av.pkey_index, |
| 303 | rmpp_active: 0, hdr_len: IB_MGMT_MAD_HDR, data_len: IB_MGMT_MAD_DATA, |
| 304 | GFP_ATOMIC, |
| 305 | IB_MGMT_BASE_VERSION); |
| 306 | if (IS_ERR(ptr: m)) { |
| 307 | rdma_destroy_ah(ah, flags: 0); |
| 308 | goto out; |
| 309 | } |
| 310 | |
| 311 | m->ah = ah; |
| 312 | |
| 313 | out: |
| 314 | read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); |
| 315 | return m; |
| 316 | } |
| 317 | |
| 318 | static void cm_free_msg(struct ib_mad_send_buf *msg) |
| 319 | { |
| 320 | if (msg->ah) |
| 321 | rdma_destroy_ah(ah: msg->ah, flags: 0); |
| 322 | ib_free_send_mad(send_buf: msg); |
| 323 | } |
| 324 | |
| 325 | static struct ib_mad_send_buf * |
| 326 | cm_alloc_priv_msg(struct cm_id_private *cm_id_priv, enum ib_cm_state state) |
| 327 | { |
| 328 | struct ib_mad_send_buf *msg; |
| 329 | |
| 330 | lockdep_assert_held(&cm_id_priv->lock); |
| 331 | |
| 332 | msg = cm_alloc_msg(cm_id_priv); |
| 333 | if (IS_ERR(ptr: msg)) |
| 334 | return msg; |
| 335 | |
| 336 | cm_id_priv->msg = msg; |
| 337 | refcount_inc(r: &cm_id_priv->refcount); |
| 338 | msg->context[0] = cm_id_priv; |
| 339 | msg->context[1] = (void *) (unsigned long) state; |
| 340 | |
| 341 | msg->retries = cm_id_priv->max_cm_retries; |
| 342 | msg->timeout_ms = cm_id_priv->timeout_ms; |
| 343 | |
| 344 | return msg; |
| 345 | } |
| 346 | |
| 347 | static void cm_free_priv_msg(struct ib_mad_send_buf *msg) |
| 348 | { |
| 349 | struct cm_id_private *cm_id_priv = msg->context[0]; |
| 350 | |
| 351 | lockdep_assert_held(&cm_id_priv->lock); |
| 352 | |
| 353 | if (!WARN_ON(cm_id_priv->msg != msg)) |
| 354 | cm_id_priv->msg = NULL; |
| 355 | |
| 356 | if (msg->ah) |
| 357 | rdma_destroy_ah(ah: msg->ah, flags: 0); |
| 358 | cm_deref_id(cm_id_priv); |
| 359 | ib_free_send_mad(send_buf: msg); |
| 360 | } |
| 361 | |
| 362 | static struct ib_mad_send_buf * |
| 363 | cm_alloc_response_msg_no_ah(struct cm_port *port, |
| 364 | struct ib_mad_recv_wc *mad_recv_wc, |
| 365 | bool direct_retry) |
| 366 | { |
| 367 | struct ib_mad_send_buf *m; |
| 368 | |
| 369 | m = ib_create_send_mad(mad_agent: port->mad_agent, remote_qpn: 1, pkey_index: mad_recv_wc->wc->pkey_index, |
| 370 | rmpp_active: 0, hdr_len: IB_MGMT_MAD_HDR, data_len: IB_MGMT_MAD_DATA, |
| 371 | GFP_ATOMIC, IB_MGMT_BASE_VERSION); |
| 372 | if (!IS_ERR(ptr: m)) |
| 373 | m->context[0] = direct_retry ? CM_DIRECT_RETRY_CTX : NULL; |
| 374 | |
| 375 | return m; |
| 376 | } |
| 377 | |
| 378 | static int cm_create_response_msg_ah(struct cm_port *port, |
| 379 | struct ib_mad_recv_wc *mad_recv_wc, |
| 380 | struct ib_mad_send_buf *msg) |
| 381 | { |
| 382 | struct ib_ah *ah; |
| 383 | |
| 384 | ah = ib_create_ah_from_wc(pd: port->mad_agent->qp->pd, wc: mad_recv_wc->wc, |
| 385 | grh: mad_recv_wc->recv_buf.grh, port_num: port->port_num); |
| 386 | if (IS_ERR(ptr: ah)) |
| 387 | return PTR_ERR(ptr: ah); |
| 388 | |
| 389 | msg->ah = ah; |
| 390 | return 0; |
| 391 | } |
| 392 | |
| 393 | static int cm_alloc_response_msg(struct cm_port *port, |
| 394 | struct ib_mad_recv_wc *mad_recv_wc, |
| 395 | bool direct_retry, |
| 396 | struct ib_mad_send_buf **msg) |
| 397 | { |
| 398 | struct ib_mad_send_buf *m; |
| 399 | int ret; |
| 400 | |
| 401 | m = cm_alloc_response_msg_no_ah(port, mad_recv_wc, direct_retry); |
| 402 | if (IS_ERR(ptr: m)) |
| 403 | return PTR_ERR(ptr: m); |
| 404 | |
| 405 | ret = cm_create_response_msg_ah(port, mad_recv_wc, msg: m); |
| 406 | if (ret) { |
| 407 | ib_free_send_mad(send_buf: m); |
| 408 | return ret; |
| 409 | } |
| 410 | |
| 411 | *msg = m; |
| 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | static void *cm_copy_private_data(const void *private_data, u8 private_data_len) |
| 416 | { |
| 417 | void *data; |
| 418 | |
| 419 | if (!private_data || !private_data_len) |
| 420 | return NULL; |
| 421 | |
| 422 | data = kmemdup(private_data, private_data_len, GFP_KERNEL); |
| 423 | if (!data) |
| 424 | return ERR_PTR(error: -ENOMEM); |
| 425 | |
| 426 | return data; |
| 427 | } |
| 428 | |
| 429 | static void cm_set_private_data(struct cm_id_private *cm_id_priv, |
| 430 | void *private_data, u8 private_data_len) |
| 431 | { |
| 432 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) |
| 433 | kfree(objp: cm_id_priv->private_data); |
| 434 | |
| 435 | cm_id_priv->private_data = private_data; |
| 436 | cm_id_priv->private_data_len = private_data_len; |
| 437 | } |
| 438 | |
| 439 | static void cm_set_av_port(struct cm_av *av, struct cm_port *port) |
| 440 | { |
| 441 | struct cm_port *old_port = av->port; |
| 442 | |
| 443 | if (old_port == port) |
| 444 | return; |
| 445 | |
| 446 | av->port = port; |
| 447 | if (old_port) |
| 448 | cm_device_put(cm_dev: old_port->cm_dev); |
| 449 | if (port) |
| 450 | kref_get(kref: &port->cm_dev->kref); |
| 451 | } |
| 452 | |
| 453 | static void cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc, |
| 454 | struct rdma_ah_attr *ah_attr, struct cm_av *av) |
| 455 | { |
| 456 | cm_set_av_port(av, port); |
| 457 | av->pkey_index = wc->pkey_index; |
| 458 | rdma_move_ah_attr(dest: &av->ah_attr, src: ah_attr); |
| 459 | } |
| 460 | |
| 461 | static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, |
| 462 | struct ib_grh *grh, struct cm_av *av) |
| 463 | { |
| 464 | cm_set_av_port(av, port); |
| 465 | av->pkey_index = wc->pkey_index; |
| 466 | return ib_init_ah_attr_from_wc(device: port->cm_dev->ib_device, |
| 467 | port_num: port->port_num, wc, |
| 468 | grh, ah_attr: &av->ah_attr); |
| 469 | } |
| 470 | |
| 471 | static struct cm_port * |
| 472 | get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr) |
| 473 | { |
| 474 | struct cm_device *cm_dev; |
| 475 | struct cm_port *port = NULL; |
| 476 | unsigned long flags; |
| 477 | |
| 478 | if (attr) { |
| 479 | read_lock_irqsave(&cm.device_lock, flags); |
| 480 | list_for_each_entry(cm_dev, &cm.device_list, list) { |
| 481 | if (cm_dev->ib_device == attr->device) { |
| 482 | port = cm_dev->port[attr->port_num - 1]; |
| 483 | break; |
| 484 | } |
| 485 | } |
| 486 | read_unlock_irqrestore(&cm.device_lock, flags); |
| 487 | } else { |
| 488 | /* SGID attribute can be NULL in following |
| 489 | * conditions. |
| 490 | * (a) Alternative path |
| 491 | * (b) IB link layer without GRH |
| 492 | * (c) LAP send messages |
| 493 | */ |
| 494 | read_lock_irqsave(&cm.device_lock, flags); |
| 495 | list_for_each_entry(cm_dev, &cm.device_list, list) { |
| 496 | attr = rdma_find_gid(device: cm_dev->ib_device, |
| 497 | gid: &path->sgid, |
| 498 | gid_type: sa_conv_pathrec_to_gid_type(rec: path), |
| 499 | NULL); |
| 500 | if (!IS_ERR(ptr: attr)) { |
| 501 | port = cm_dev->port[attr->port_num - 1]; |
| 502 | break; |
| 503 | } |
| 504 | } |
| 505 | read_unlock_irqrestore(&cm.device_lock, flags); |
| 506 | if (port) |
| 507 | rdma_put_gid_attr(attr); |
| 508 | } |
| 509 | return port; |
| 510 | } |
| 511 | |
| 512 | static int cm_init_av_by_path(struct sa_path_rec *path, |
| 513 | const struct ib_gid_attr *sgid_attr, |
| 514 | struct cm_av *av) |
| 515 | { |
| 516 | struct rdma_ah_attr new_ah_attr; |
| 517 | struct cm_device *cm_dev; |
| 518 | struct cm_port *port; |
| 519 | int ret; |
| 520 | |
| 521 | port = get_cm_port_from_path(path, attr: sgid_attr); |
| 522 | if (!port) |
| 523 | return -EINVAL; |
| 524 | cm_dev = port->cm_dev; |
| 525 | |
| 526 | ret = ib_find_cached_pkey(device: cm_dev->ib_device, port_num: port->port_num, |
| 527 | be16_to_cpu(path->pkey), index: &av->pkey_index); |
| 528 | if (ret) |
| 529 | return ret; |
| 530 | |
| 531 | cm_set_av_port(av, port); |
| 532 | |
| 533 | /* |
| 534 | * av->ah_attr might be initialized based on wc or during |
| 535 | * request processing time which might have reference to sgid_attr. |
| 536 | * So initialize a new ah_attr on stack. |
| 537 | * If initialization fails, old ah_attr is used for sending any |
| 538 | * responses. If initialization is successful, than new ah_attr |
| 539 | * is used by overwriting the old one. So that right ah_attr |
| 540 | * can be used to return an error response. |
| 541 | */ |
| 542 | ret = ib_init_ah_attr_from_path(device: cm_dev->ib_device, port_num: port->port_num, rec: path, |
| 543 | ah_attr: &new_ah_attr, sgid_attr); |
| 544 | if (ret) |
| 545 | return ret; |
| 546 | |
| 547 | av->timeout = path->packet_life_time + 1; |
| 548 | rdma_move_ah_attr(dest: &av->ah_attr, src: &new_ah_attr); |
| 549 | return 0; |
| 550 | } |
| 551 | |
| 552 | /* Move av created by cm_init_av_by_path(), so av.dgid is not moved */ |
| 553 | static void cm_move_av_from_path(struct cm_av *dest, struct cm_av *src) |
| 554 | { |
| 555 | cm_set_av_port(av: dest, port: src->port); |
| 556 | cm_set_av_port(av: src, NULL); |
| 557 | dest->pkey_index = src->pkey_index; |
| 558 | rdma_move_ah_attr(dest: &dest->ah_attr, src: &src->ah_attr); |
| 559 | dest->timeout = src->timeout; |
| 560 | } |
| 561 | |
| 562 | static void cm_destroy_av(struct cm_av *av) |
| 563 | { |
| 564 | rdma_destroy_ah_attr(ah_attr: &av->ah_attr); |
| 565 | cm_set_av_port(av, NULL); |
| 566 | } |
| 567 | |
| 568 | static u32 cm_local_id(__be32 local_id) |
| 569 | { |
| 570 | return (__force u32) (local_id ^ cm.random_id_operand); |
| 571 | } |
| 572 | |
| 573 | static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id) |
| 574 | { |
| 575 | struct cm_id_private *cm_id_priv; |
| 576 | |
| 577 | rcu_read_lock(); |
| 578 | cm_id_priv = xa_load(&cm.local_id_table, index: cm_local_id(local_id)); |
| 579 | if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id || |
| 580 | !refcount_inc_not_zero(r: &cm_id_priv->refcount)) |
| 581 | cm_id_priv = NULL; |
| 582 | rcu_read_unlock(); |
| 583 | |
| 584 | return cm_id_priv; |
| 585 | } |
| 586 | |
| 587 | /* |
| 588 | * Trivial helpers to strip endian annotation and compare; the |
| 589 | * endianness doesn't actually matter since we just need a stable |
| 590 | * order for the RB tree. |
| 591 | */ |
| 592 | static int be32_lt(__be32 a, __be32 b) |
| 593 | { |
| 594 | return (__force u32) a < (__force u32) b; |
| 595 | } |
| 596 | |
| 597 | static int be32_gt(__be32 a, __be32 b) |
| 598 | { |
| 599 | return (__force u32) a > (__force u32) b; |
| 600 | } |
| 601 | |
| 602 | static int be64_lt(__be64 a, __be64 b) |
| 603 | { |
| 604 | return (__force u64) a < (__force u64) b; |
| 605 | } |
| 606 | |
| 607 | static int be64_gt(__be64 a, __be64 b) |
| 608 | { |
| 609 | return (__force u64) a > (__force u64) b; |
| 610 | } |
| 611 | |
| 612 | /* |
| 613 | * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv |
| 614 | * if the new ID was inserted, NULL if it could not be inserted due to a |
| 615 | * collision, or the existing cm_id_priv ready for shared usage. |
| 616 | */ |
| 617 | static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv, |
| 618 | ib_cm_handler shared_handler) |
| 619 | { |
| 620 | struct rb_node **link = &cm.listen_service_table.rb_node; |
| 621 | struct rb_node *parent = NULL; |
| 622 | struct cm_id_private *cur_cm_id_priv; |
| 623 | __be64 service_id = cm_id_priv->id.service_id; |
| 624 | unsigned long flags; |
| 625 | |
| 626 | spin_lock_irqsave(&cm.lock, flags); |
| 627 | while (*link) { |
| 628 | parent = *link; |
| 629 | cur_cm_id_priv = rb_entry(parent, struct cm_id_private, |
| 630 | service_node); |
| 631 | |
| 632 | if (cm_id_priv->id.device < cur_cm_id_priv->id.device) |
| 633 | link = &(*link)->rb_left; |
| 634 | else if (cm_id_priv->id.device > cur_cm_id_priv->id.device) |
| 635 | link = &(*link)->rb_right; |
| 636 | else if (be64_lt(a: service_id, b: cur_cm_id_priv->id.service_id)) |
| 637 | link = &(*link)->rb_left; |
| 638 | else if (be64_gt(a: service_id, b: cur_cm_id_priv->id.service_id)) |
| 639 | link = &(*link)->rb_right; |
| 640 | else { |
| 641 | /* |
| 642 | * Sharing an ib_cm_id with different handlers is not |
| 643 | * supported |
| 644 | */ |
| 645 | if (cur_cm_id_priv->id.cm_handler != shared_handler || |
| 646 | cur_cm_id_priv->id.context || |
| 647 | WARN_ON(!cur_cm_id_priv->id.cm_handler)) { |
| 648 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 649 | return NULL; |
| 650 | } |
| 651 | refcount_inc(r: &cur_cm_id_priv->refcount); |
| 652 | cur_cm_id_priv->listen_sharecount++; |
| 653 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 654 | return cur_cm_id_priv; |
| 655 | } |
| 656 | } |
| 657 | cm_id_priv->listen_sharecount++; |
| 658 | rb_link_node(node: &cm_id_priv->service_node, parent, rb_link: link); |
| 659 | rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table); |
| 660 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 661 | return cm_id_priv; |
| 662 | } |
| 663 | |
| 664 | static struct cm_id_private *cm_find_listen(struct ib_device *device, |
| 665 | __be64 service_id) |
| 666 | { |
| 667 | struct rb_node *node = cm.listen_service_table.rb_node; |
| 668 | struct cm_id_private *cm_id_priv; |
| 669 | |
| 670 | while (node) { |
| 671 | cm_id_priv = rb_entry(node, struct cm_id_private, service_node); |
| 672 | |
| 673 | if (device < cm_id_priv->id.device) |
| 674 | node = node->rb_left; |
| 675 | else if (device > cm_id_priv->id.device) |
| 676 | node = node->rb_right; |
| 677 | else if (be64_lt(a: service_id, b: cm_id_priv->id.service_id)) |
| 678 | node = node->rb_left; |
| 679 | else if (be64_gt(a: service_id, b: cm_id_priv->id.service_id)) |
| 680 | node = node->rb_right; |
| 681 | else { |
| 682 | refcount_inc(r: &cm_id_priv->refcount); |
| 683 | return cm_id_priv; |
| 684 | } |
| 685 | } |
| 686 | return NULL; |
| 687 | } |
| 688 | |
| 689 | static struct cm_timewait_info * |
| 690 | cm_insert_remote_id(struct cm_timewait_info *timewait_info) |
| 691 | { |
| 692 | struct rb_node **link = &cm.remote_id_table.rb_node; |
| 693 | struct rb_node *parent = NULL; |
| 694 | struct cm_timewait_info *cur_timewait_info; |
| 695 | __be64 remote_ca_guid = timewait_info->remote_ca_guid; |
| 696 | __be32 remote_id = timewait_info->work.remote_id; |
| 697 | |
| 698 | while (*link) { |
| 699 | parent = *link; |
| 700 | cur_timewait_info = rb_entry(parent, struct cm_timewait_info, |
| 701 | remote_id_node); |
| 702 | if (be32_lt(a: remote_id, b: cur_timewait_info->work.remote_id)) |
| 703 | link = &(*link)->rb_left; |
| 704 | else if (be32_gt(a: remote_id, b: cur_timewait_info->work.remote_id)) |
| 705 | link = &(*link)->rb_right; |
| 706 | else if (be64_lt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid)) |
| 707 | link = &(*link)->rb_left; |
| 708 | else if (be64_gt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid)) |
| 709 | link = &(*link)->rb_right; |
| 710 | else |
| 711 | return cur_timewait_info; |
| 712 | } |
| 713 | timewait_info->inserted_remote_id = 1; |
| 714 | rb_link_node(node: &timewait_info->remote_id_node, parent, rb_link: link); |
| 715 | rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table); |
| 716 | return NULL; |
| 717 | } |
| 718 | |
| 719 | static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid, |
| 720 | __be32 remote_id) |
| 721 | { |
| 722 | struct rb_node *node = cm.remote_id_table.rb_node; |
| 723 | struct cm_timewait_info *timewait_info; |
| 724 | struct cm_id_private *res = NULL; |
| 725 | |
| 726 | spin_lock_irq(lock: &cm.lock); |
| 727 | while (node) { |
| 728 | timewait_info = rb_entry(node, struct cm_timewait_info, |
| 729 | remote_id_node); |
| 730 | if (be32_lt(a: remote_id, b: timewait_info->work.remote_id)) |
| 731 | node = node->rb_left; |
| 732 | else if (be32_gt(a: remote_id, b: timewait_info->work.remote_id)) |
| 733 | node = node->rb_right; |
| 734 | else if (be64_lt(a: remote_ca_guid, b: timewait_info->remote_ca_guid)) |
| 735 | node = node->rb_left; |
| 736 | else if (be64_gt(a: remote_ca_guid, b: timewait_info->remote_ca_guid)) |
| 737 | node = node->rb_right; |
| 738 | else { |
| 739 | res = cm_acquire_id(local_id: timewait_info->work.local_id, |
| 740 | remote_id: timewait_info->work.remote_id); |
| 741 | break; |
| 742 | } |
| 743 | } |
| 744 | spin_unlock_irq(lock: &cm.lock); |
| 745 | return res; |
| 746 | } |
| 747 | |
| 748 | static struct cm_timewait_info * |
| 749 | cm_insert_remote_qpn(struct cm_timewait_info *timewait_info) |
| 750 | { |
| 751 | struct rb_node **link = &cm.remote_qp_table.rb_node; |
| 752 | struct rb_node *parent = NULL; |
| 753 | struct cm_timewait_info *cur_timewait_info; |
| 754 | __be64 remote_ca_guid = timewait_info->remote_ca_guid; |
| 755 | __be32 remote_qpn = timewait_info->remote_qpn; |
| 756 | |
| 757 | while (*link) { |
| 758 | parent = *link; |
| 759 | cur_timewait_info = rb_entry(parent, struct cm_timewait_info, |
| 760 | remote_qp_node); |
| 761 | if (be32_lt(a: remote_qpn, b: cur_timewait_info->remote_qpn)) |
| 762 | link = &(*link)->rb_left; |
| 763 | else if (be32_gt(a: remote_qpn, b: cur_timewait_info->remote_qpn)) |
| 764 | link = &(*link)->rb_right; |
| 765 | else if (be64_lt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid)) |
| 766 | link = &(*link)->rb_left; |
| 767 | else if (be64_gt(a: remote_ca_guid, b: cur_timewait_info->remote_ca_guid)) |
| 768 | link = &(*link)->rb_right; |
| 769 | else |
| 770 | return cur_timewait_info; |
| 771 | } |
| 772 | timewait_info->inserted_remote_qp = 1; |
| 773 | rb_link_node(node: &timewait_info->remote_qp_node, parent, rb_link: link); |
| 774 | rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table); |
| 775 | return NULL; |
| 776 | } |
| 777 | |
| 778 | static struct cm_id_private * |
| 779 | cm_insert_remote_sidr(struct cm_id_private *cm_id_priv) |
| 780 | { |
| 781 | struct rb_node **link = &cm.remote_sidr_table.rb_node; |
| 782 | struct rb_node *parent = NULL; |
| 783 | struct cm_id_private *cur_cm_id_priv; |
| 784 | __be32 remote_id = cm_id_priv->id.remote_id; |
| 785 | |
| 786 | while (*link) { |
| 787 | parent = *link; |
| 788 | cur_cm_id_priv = rb_entry(parent, struct cm_id_private, |
| 789 | sidr_id_node); |
| 790 | if (be32_lt(a: remote_id, b: cur_cm_id_priv->id.remote_id)) |
| 791 | link = &(*link)->rb_left; |
| 792 | else if (be32_gt(a: remote_id, b: cur_cm_id_priv->id.remote_id)) |
| 793 | link = &(*link)->rb_right; |
| 794 | else { |
| 795 | if (cur_cm_id_priv->sidr_slid < cm_id_priv->sidr_slid) |
| 796 | link = &(*link)->rb_left; |
| 797 | else if (cur_cm_id_priv->sidr_slid > cm_id_priv->sidr_slid) |
| 798 | link = &(*link)->rb_right; |
| 799 | else |
| 800 | return cur_cm_id_priv; |
| 801 | } |
| 802 | } |
| 803 | rb_link_node(node: &cm_id_priv->sidr_id_node, parent, rb_link: link); |
| 804 | rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); |
| 805 | return NULL; |
| 806 | } |
| 807 | |
| 808 | static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device, |
| 809 | ib_cm_handler cm_handler, |
| 810 | void *context) |
| 811 | { |
| 812 | struct cm_id_private *cm_id_priv; |
| 813 | u32 id; |
| 814 | int ret; |
| 815 | |
| 816 | cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL); |
| 817 | if (!cm_id_priv) |
| 818 | return ERR_PTR(error: -ENOMEM); |
| 819 | |
| 820 | cm_id_priv->id.state = IB_CM_IDLE; |
| 821 | cm_id_priv->id.device = device; |
| 822 | cm_id_priv->id.cm_handler = cm_handler; |
| 823 | cm_id_priv->id.context = context; |
| 824 | cm_id_priv->id.remote_cm_qpn = 1; |
| 825 | |
| 826 | RB_CLEAR_NODE(&cm_id_priv->service_node); |
| 827 | RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); |
| 828 | spin_lock_init(&cm_id_priv->lock); |
| 829 | init_completion(x: &cm_id_priv->comp); |
| 830 | INIT_LIST_HEAD(list: &cm_id_priv->work_list); |
| 831 | atomic_set(v: &cm_id_priv->work_count, i: -1); |
| 832 | refcount_set(r: &cm_id_priv->refcount, n: 1); |
| 833 | |
| 834 | ret = xa_alloc_cyclic(xa: &cm.local_id_table, id: &id, NULL, xa_limit_32b, |
| 835 | next: &cm.local_id_next, GFP_KERNEL); |
| 836 | if (ret < 0) |
| 837 | goto error; |
| 838 | cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; |
| 839 | |
| 840 | return cm_id_priv; |
| 841 | |
| 842 | error: |
| 843 | kfree(objp: cm_id_priv); |
| 844 | return ERR_PTR(error: ret); |
| 845 | } |
| 846 | |
| 847 | /* |
| 848 | * Make the ID visible to the MAD handlers and other threads that use the |
| 849 | * xarray. |
| 850 | */ |
| 851 | static void cm_finalize_id(struct cm_id_private *cm_id_priv) |
| 852 | { |
| 853 | xa_store(&cm.local_id_table, index: cm_local_id(local_id: cm_id_priv->id.local_id), |
| 854 | entry: cm_id_priv, GFP_ATOMIC); |
| 855 | } |
| 856 | |
| 857 | struct ib_cm_id *ib_create_cm_id(struct ib_device *device, |
| 858 | ib_cm_handler cm_handler, |
| 859 | void *context) |
| 860 | { |
| 861 | struct cm_id_private *cm_id_priv; |
| 862 | |
| 863 | cm_id_priv = cm_alloc_id_priv(device, cm_handler, context); |
| 864 | if (IS_ERR(ptr: cm_id_priv)) |
| 865 | return ERR_CAST(ptr: cm_id_priv); |
| 866 | |
| 867 | cm_finalize_id(cm_id_priv); |
| 868 | return &cm_id_priv->id; |
| 869 | } |
| 870 | EXPORT_SYMBOL(ib_create_cm_id); |
| 871 | |
| 872 | static struct cm_work *cm_dequeue_work(struct cm_id_private *cm_id_priv) |
| 873 | { |
| 874 | struct cm_work *work; |
| 875 | |
| 876 | if (list_empty(head: &cm_id_priv->work_list)) |
| 877 | return NULL; |
| 878 | |
| 879 | work = list_entry(cm_id_priv->work_list.next, struct cm_work, list); |
| 880 | list_del(entry: &work->list); |
| 881 | return work; |
| 882 | } |
| 883 | |
| 884 | static void cm_free_work(struct cm_work *work) |
| 885 | { |
| 886 | if (work->mad_recv_wc) |
| 887 | ib_free_recv_mad(mad_recv_wc: work->mad_recv_wc); |
| 888 | kfree(objp: work); |
| 889 | } |
| 890 | |
| 891 | static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv, |
| 892 | struct cm_work *work) |
| 893 | __releases(&cm_id_priv->lock) |
| 894 | { |
| 895 | bool immediate; |
| 896 | |
| 897 | /* |
| 898 | * To deliver the event to the user callback we have the drop the |
| 899 | * spinlock, however, we need to ensure that the user callback is single |
| 900 | * threaded and receives events in the temporal order. If there are |
| 901 | * already events being processed then thread new events onto a list, |
| 902 | * the thread currently processing will pick them up. |
| 903 | */ |
| 904 | immediate = atomic_inc_and_test(v: &cm_id_priv->work_count); |
| 905 | if (!immediate) { |
| 906 | list_add_tail(new: &work->list, head: &cm_id_priv->work_list); |
| 907 | /* |
| 908 | * This routine always consumes incoming reference. Once queued |
| 909 | * to the work_list then a reference is held by the thread |
| 910 | * currently running cm_process_work() and this reference is not |
| 911 | * needed. |
| 912 | */ |
| 913 | cm_deref_id(cm_id_priv); |
| 914 | } |
| 915 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 916 | |
| 917 | if (immediate) |
| 918 | cm_process_work(cm_id_priv, work); |
| 919 | } |
| 920 | |
| 921 | static inline int cm_convert_to_ms(int iba_time) |
| 922 | { |
| 923 | /* approximate conversion to ms from 4.096us x 2^iba_time */ |
| 924 | return 1 << max(iba_time - 8, 0); |
| 925 | } |
| 926 | |
| 927 | /* |
| 928 | * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time |
| 929 | * Because of how ack_timeout is stored, adding one doubles the timeout. |
| 930 | * To avoid large timeouts, select the max(ack_delay, life_time + 1), and |
| 931 | * increment it (round up) only if the other is within 50%. |
| 932 | */ |
| 933 | static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time) |
| 934 | { |
| 935 | int ack_timeout = packet_life_time + 1; |
| 936 | |
| 937 | if (ack_timeout >= ca_ack_delay) |
| 938 | ack_timeout += (ca_ack_delay >= (ack_timeout - 1)); |
| 939 | else |
| 940 | ack_timeout = ca_ack_delay + |
| 941 | (ack_timeout >= (ca_ack_delay - 1)); |
| 942 | |
| 943 | return min(31, ack_timeout); |
| 944 | } |
| 945 | |
| 946 | static void cm_remove_remote(struct cm_id_private *cm_id_priv) |
| 947 | { |
| 948 | struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info; |
| 949 | |
| 950 | if (timewait_info->inserted_remote_id) { |
| 951 | rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table); |
| 952 | timewait_info->inserted_remote_id = 0; |
| 953 | } |
| 954 | |
| 955 | if (timewait_info->inserted_remote_qp) { |
| 956 | rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table); |
| 957 | timewait_info->inserted_remote_qp = 0; |
| 958 | } |
| 959 | } |
| 960 | |
| 961 | static struct cm_timewait_info *cm_create_timewait_info(__be32 local_id) |
| 962 | { |
| 963 | struct cm_timewait_info *timewait_info; |
| 964 | |
| 965 | timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL); |
| 966 | if (!timewait_info) |
| 967 | return ERR_PTR(error: -ENOMEM); |
| 968 | |
| 969 | timewait_info->work.local_id = local_id; |
| 970 | INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); |
| 971 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; |
| 972 | return timewait_info; |
| 973 | } |
| 974 | |
| 975 | static void cm_enter_timewait(struct cm_id_private *cm_id_priv) |
| 976 | { |
| 977 | int wait_time; |
| 978 | unsigned long flags; |
| 979 | struct cm_device *cm_dev; |
| 980 | |
| 981 | lockdep_assert_held(&cm_id_priv->lock); |
| 982 | |
| 983 | cm_dev = ib_get_client_data(device: cm_id_priv->id.device, client: &cm_client); |
| 984 | if (!cm_dev) |
| 985 | return; |
| 986 | |
| 987 | spin_lock_irqsave(&cm.lock, flags); |
| 988 | cm_remove_remote(cm_id_priv); |
| 989 | list_add_tail(new: &cm_id_priv->timewait_info->list, head: &cm.timewait_list); |
| 990 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 991 | |
| 992 | /* |
| 993 | * The cm_id could be destroyed by the user before we exit timewait. |
| 994 | * To protect against this, we search for the cm_id after exiting |
| 995 | * timewait before notifying the user that we've exited timewait. |
| 996 | */ |
| 997 | cm_id_priv->id.state = IB_CM_TIMEWAIT; |
| 998 | wait_time = cm_convert_to_ms(iba_time: cm_id_priv->av.timeout); |
| 999 | |
| 1000 | /* Check if the device started its remove_one */ |
| 1001 | spin_lock_irqsave(&cm.lock, flags); |
| 1002 | if (!cm_dev->going_down) |
| 1003 | queue_delayed_work(wq: cm.wq, dwork: &cm_id_priv->timewait_info->work.work, |
| 1004 | delay: msecs_to_jiffies(m: wait_time)); |
| 1005 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 1006 | |
| 1007 | /* |
| 1008 | * The timewait_info is converted into a work and gets freed during |
| 1009 | * cm_free_work() in cm_timewait_handler(). |
| 1010 | */ |
| 1011 | BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0); |
| 1012 | cm_id_priv->timewait_info = NULL; |
| 1013 | } |
| 1014 | |
| 1015 | static void cm_reset_to_idle(struct cm_id_private *cm_id_priv) |
| 1016 | { |
| 1017 | unsigned long flags; |
| 1018 | |
| 1019 | lockdep_assert_held(&cm_id_priv->lock); |
| 1020 | |
| 1021 | cm_id_priv->id.state = IB_CM_IDLE; |
| 1022 | if (cm_id_priv->timewait_info) { |
| 1023 | spin_lock_irqsave(&cm.lock, flags); |
| 1024 | cm_remove_remote(cm_id_priv); |
| 1025 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 1026 | kfree(objp: cm_id_priv->timewait_info); |
| 1027 | cm_id_priv->timewait_info = NULL; |
| 1028 | } |
| 1029 | } |
| 1030 | |
| 1031 | static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id, |
| 1032 | enum ib_cm_state old_state) |
| 1033 | { |
| 1034 | struct cm_id_private *cm_id_priv; |
| 1035 | |
| 1036 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 1037 | pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n" , __func__, |
| 1038 | cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount)); |
| 1039 | } |
| 1040 | |
| 1041 | static void cm_destroy_id(struct ib_cm_id *cm_id, int err) |
| 1042 | { |
| 1043 | struct cm_id_private *cm_id_priv; |
| 1044 | enum ib_cm_state old_state; |
| 1045 | struct cm_work *work; |
| 1046 | int ret; |
| 1047 | |
| 1048 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 1049 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 1050 | old_state = cm_id->state; |
| 1051 | retest: |
| 1052 | switch (cm_id->state) { |
| 1053 | case IB_CM_LISTEN: |
| 1054 | spin_lock(lock: &cm.lock); |
| 1055 | if (--cm_id_priv->listen_sharecount > 0) { |
| 1056 | /* The id is still shared. */ |
| 1057 | WARN_ON(refcount_read(&cm_id_priv->refcount) == 1); |
| 1058 | spin_unlock(lock: &cm.lock); |
| 1059 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1060 | cm_deref_id(cm_id_priv); |
| 1061 | return; |
| 1062 | } |
| 1063 | cm_id->state = IB_CM_IDLE; |
| 1064 | rb_erase(&cm_id_priv->service_node, &cm.listen_service_table); |
| 1065 | RB_CLEAR_NODE(&cm_id_priv->service_node); |
| 1066 | spin_unlock(lock: &cm.lock); |
| 1067 | break; |
| 1068 | case IB_CM_SIDR_REQ_SENT: |
| 1069 | cm_id->state = IB_CM_IDLE; |
| 1070 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 1071 | break; |
| 1072 | case IB_CM_SIDR_REQ_RCVD: |
| 1073 | cm_send_sidr_rep_locked(cm_id_priv, |
| 1074 | param: &(struct ib_cm_sidr_rep_param){ |
| 1075 | .status = IB_SIDR_REJECT }); |
| 1076 | /* cm_send_sidr_rep_locked will not move to IDLE if it fails */ |
| 1077 | cm_id->state = IB_CM_IDLE; |
| 1078 | break; |
| 1079 | case IB_CM_REQ_SENT: |
| 1080 | case IB_CM_MRA_REQ_RCVD: |
| 1081 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 1082 | cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_TIMEOUT, |
| 1083 | ari: &cm_id_priv->id.device->node_guid, |
| 1084 | ari_length: sizeof(cm_id_priv->id.device->node_guid), |
| 1085 | NULL, private_data_len: 0); |
| 1086 | break; |
| 1087 | case IB_CM_REQ_RCVD: |
| 1088 | if (err == -ENOMEM) { |
| 1089 | /* Do not reject to allow future retries. */ |
| 1090 | cm_reset_to_idle(cm_id_priv); |
| 1091 | } else { |
| 1092 | cm_send_rej_locked(cm_id_priv, |
| 1093 | reason: IB_CM_REJ_CONSUMER_DEFINED, NULL, ari_length: 0, |
| 1094 | NULL, private_data_len: 0); |
| 1095 | } |
| 1096 | break; |
| 1097 | case IB_CM_REP_SENT: |
| 1098 | case IB_CM_MRA_REP_RCVD: |
| 1099 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 1100 | cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_CONSUMER_DEFINED, NULL, |
| 1101 | ari_length: 0, NULL, private_data_len: 0); |
| 1102 | goto retest; |
| 1103 | case IB_CM_MRA_REQ_SENT: |
| 1104 | case IB_CM_REP_RCVD: |
| 1105 | case IB_CM_MRA_REP_SENT: |
| 1106 | cm_send_rej_locked(cm_id_priv, reason: IB_CM_REJ_CONSUMER_DEFINED, NULL, |
| 1107 | ari_length: 0, NULL, private_data_len: 0); |
| 1108 | break; |
| 1109 | case IB_CM_ESTABLISHED: |
| 1110 | if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) { |
| 1111 | cm_id->state = IB_CM_IDLE; |
| 1112 | break; |
| 1113 | } |
| 1114 | cm_issue_dreq(cm_id_priv); |
| 1115 | cm_enter_timewait(cm_id_priv); |
| 1116 | goto retest; |
| 1117 | case IB_CM_DREQ_SENT: |
| 1118 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 1119 | cm_enter_timewait(cm_id_priv); |
| 1120 | goto retest; |
| 1121 | case IB_CM_DREQ_RCVD: |
| 1122 | cm_send_drep_locked(cm_id_priv, NULL, private_data_len: 0); |
| 1123 | WARN_ON(cm_id->state != IB_CM_TIMEWAIT); |
| 1124 | goto retest; |
| 1125 | case IB_CM_TIMEWAIT: |
| 1126 | /* |
| 1127 | * The cm_acquire_id in cm_timewait_handler will stop working |
| 1128 | * once we do xa_erase below, so just move to idle here for |
| 1129 | * consistency. |
| 1130 | */ |
| 1131 | cm_id->state = IB_CM_IDLE; |
| 1132 | break; |
| 1133 | case IB_CM_IDLE: |
| 1134 | break; |
| 1135 | } |
| 1136 | WARN_ON(cm_id->state != IB_CM_IDLE); |
| 1137 | |
| 1138 | spin_lock(lock: &cm.lock); |
| 1139 | /* Required for cleanup paths related cm_req_handler() */ |
| 1140 | if (cm_id_priv->timewait_info) { |
| 1141 | cm_remove_remote(cm_id_priv); |
| 1142 | kfree(objp: cm_id_priv->timewait_info); |
| 1143 | cm_id_priv->timewait_info = NULL; |
| 1144 | } |
| 1145 | |
| 1146 | WARN_ON(cm_id_priv->listen_sharecount); |
| 1147 | WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node)); |
| 1148 | if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) |
| 1149 | rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); |
| 1150 | spin_unlock(lock: &cm.lock); |
| 1151 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1152 | |
| 1153 | xa_erase(&cm.local_id_table, index: cm_local_id(local_id: cm_id->local_id)); |
| 1154 | cm_deref_id(cm_id_priv); |
| 1155 | do { |
| 1156 | ret = wait_for_completion_timeout(x: &cm_id_priv->comp, |
| 1157 | timeout: msecs_to_jiffies( |
| 1158 | CM_DESTROY_ID_WAIT_TIMEOUT)); |
| 1159 | if (!ret) /* timeout happened */ |
| 1160 | cm_destroy_id_wait_timeout(cm_id, old_state); |
| 1161 | } while (!ret); |
| 1162 | |
| 1163 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) |
| 1164 | cm_free_work(work); |
| 1165 | |
| 1166 | cm_destroy_av(av: &cm_id_priv->av); |
| 1167 | cm_destroy_av(av: &cm_id_priv->alt_av); |
| 1168 | kfree(objp: cm_id_priv->private_data); |
| 1169 | kfree_rcu(cm_id_priv, rcu); |
| 1170 | } |
| 1171 | |
| 1172 | void ib_destroy_cm_id(struct ib_cm_id *cm_id) |
| 1173 | { |
| 1174 | cm_destroy_id(cm_id, err: 0); |
| 1175 | } |
| 1176 | EXPORT_SYMBOL(ib_destroy_cm_id); |
| 1177 | |
| 1178 | static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id) |
| 1179 | { |
| 1180 | if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID && |
| 1181 | (service_id != IB_CM_ASSIGN_SERVICE_ID)) |
| 1182 | return -EINVAL; |
| 1183 | |
| 1184 | if (service_id == IB_CM_ASSIGN_SERVICE_ID) |
| 1185 | cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++); |
| 1186 | else |
| 1187 | cm_id_priv->id.service_id = service_id; |
| 1188 | |
| 1189 | return 0; |
| 1190 | } |
| 1191 | |
| 1192 | /** |
| 1193 | * ib_cm_listen - Initiates listening on the specified service ID for |
| 1194 | * connection and service ID resolution requests. |
| 1195 | * @cm_id: Connection identifier associated with the listen request. |
| 1196 | * @service_id: Service identifier matched against incoming connection |
| 1197 | * and service ID resolution requests. The service ID should be specified |
| 1198 | * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will |
| 1199 | * assign a service ID to the caller. |
| 1200 | */ |
| 1201 | int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id) |
| 1202 | { |
| 1203 | struct cm_id_private *cm_id_priv = |
| 1204 | container_of(cm_id, struct cm_id_private, id); |
| 1205 | unsigned long flags; |
| 1206 | int ret; |
| 1207 | |
| 1208 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 1209 | if (cm_id_priv->id.state != IB_CM_IDLE) { |
| 1210 | ret = -EINVAL; |
| 1211 | goto out; |
| 1212 | } |
| 1213 | |
| 1214 | ret = cm_init_listen(cm_id_priv, service_id); |
| 1215 | if (ret) |
| 1216 | goto out; |
| 1217 | |
| 1218 | if (!cm_insert_listen(cm_id_priv, NULL)) { |
| 1219 | ret = -EBUSY; |
| 1220 | goto out; |
| 1221 | } |
| 1222 | |
| 1223 | cm_id_priv->id.state = IB_CM_LISTEN; |
| 1224 | ret = 0; |
| 1225 | |
| 1226 | out: |
| 1227 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 1228 | return ret; |
| 1229 | } |
| 1230 | EXPORT_SYMBOL(ib_cm_listen); |
| 1231 | |
| 1232 | /** |
| 1233 | * ib_cm_insert_listen - Create a new listening ib_cm_id and listen on |
| 1234 | * the given service ID. |
| 1235 | * |
| 1236 | * If there's an existing ID listening on that same device and service ID, |
| 1237 | * return it. |
| 1238 | * |
| 1239 | * @device: Device associated with the cm_id. All related communication will |
| 1240 | * be associated with the specified device. |
| 1241 | * @cm_handler: Callback invoked to notify the user of CM events. |
| 1242 | * @service_id: Service identifier matched against incoming connection |
| 1243 | * and service ID resolution requests. The service ID should be specified |
| 1244 | * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will |
| 1245 | * assign a service ID to the caller. |
| 1246 | * |
| 1247 | * Callers should call ib_destroy_cm_id when done with the listener ID. |
| 1248 | */ |
| 1249 | struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device, |
| 1250 | ib_cm_handler cm_handler, |
| 1251 | __be64 service_id) |
| 1252 | { |
| 1253 | struct cm_id_private *listen_id_priv; |
| 1254 | struct cm_id_private *cm_id_priv; |
| 1255 | int err = 0; |
| 1256 | |
| 1257 | /* Create an ID in advance, since the creation may sleep */ |
| 1258 | cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL); |
| 1259 | if (IS_ERR(ptr: cm_id_priv)) |
| 1260 | return ERR_CAST(ptr: cm_id_priv); |
| 1261 | |
| 1262 | err = cm_init_listen(cm_id_priv, service_id); |
| 1263 | if (err) { |
| 1264 | ib_destroy_cm_id(&cm_id_priv->id); |
| 1265 | return ERR_PTR(error: err); |
| 1266 | } |
| 1267 | |
| 1268 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 1269 | listen_id_priv = cm_insert_listen(cm_id_priv, shared_handler: cm_handler); |
| 1270 | if (listen_id_priv != cm_id_priv) { |
| 1271 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1272 | ib_destroy_cm_id(&cm_id_priv->id); |
| 1273 | if (!listen_id_priv) |
| 1274 | return ERR_PTR(error: -EINVAL); |
| 1275 | return &listen_id_priv->id; |
| 1276 | } |
| 1277 | cm_id_priv->id.state = IB_CM_LISTEN; |
| 1278 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1279 | |
| 1280 | /* |
| 1281 | * A listen ID does not need to be in the xarray since it does not |
| 1282 | * receive mads, is not placed in the remote_id or remote_qpn rbtree, |
| 1283 | * and does not enter timewait. |
| 1284 | */ |
| 1285 | |
| 1286 | return &cm_id_priv->id; |
| 1287 | } |
| 1288 | EXPORT_SYMBOL(ib_cm_insert_listen); |
| 1289 | |
| 1290 | static __be64 cm_form_tid(struct cm_id_private *cm_id_priv) |
| 1291 | { |
| 1292 | u64 hi_tid = 0, low_tid; |
| 1293 | |
| 1294 | lockdep_assert_held(&cm_id_priv->lock); |
| 1295 | |
| 1296 | low_tid = (u64)cm_id_priv->id.local_id; |
| 1297 | if (!cm_id_priv->av.port) |
| 1298 | return cpu_to_be64(low_tid); |
| 1299 | |
| 1300 | read_lock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); |
| 1301 | if (cm_id_priv->av.port->mad_agent) |
| 1302 | hi_tid = ((u64)cm_id_priv->av.port->mad_agent->hi_tid) << 32; |
| 1303 | read_unlock(&cm_id_priv->av.port->cm_dev->mad_agent_lock); |
| 1304 | return cpu_to_be64(hi_tid | low_tid); |
| 1305 | } |
| 1306 | |
| 1307 | static void cm_format_mad_hdr(struct ib_mad_hdr *hdr, |
| 1308 | __be16 attr_id, __be64 tid) |
| 1309 | { |
| 1310 | hdr->base_version = IB_MGMT_BASE_VERSION; |
| 1311 | hdr->mgmt_class = IB_MGMT_CLASS_CM; |
| 1312 | hdr->class_version = IB_CM_CLASS_VERSION; |
| 1313 | hdr->method = IB_MGMT_METHOD_SEND; |
| 1314 | hdr->attr_id = attr_id; |
| 1315 | hdr->tid = tid; |
| 1316 | } |
| 1317 | |
| 1318 | static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id, |
| 1319 | __be64 tid, u32 attr_mod) |
| 1320 | { |
| 1321 | cm_format_mad_hdr(hdr, attr_id, tid); |
| 1322 | hdr->attr_mod = cpu_to_be32(attr_mod); |
| 1323 | } |
| 1324 | |
| 1325 | static void cm_format_req(struct cm_req_msg *req_msg, |
| 1326 | struct cm_id_private *cm_id_priv, |
| 1327 | struct ib_cm_req_param *param) |
| 1328 | { |
| 1329 | struct sa_path_rec *pri_path = param->primary_path; |
| 1330 | struct sa_path_rec *alt_path = param->alternate_path; |
| 1331 | bool pri_ext = false; |
| 1332 | __be16 lid; |
| 1333 | |
| 1334 | if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA) |
| 1335 | pri_ext = opa_is_extended_lid(dlid: pri_path->opa.dlid, |
| 1336 | slid: pri_path->opa.slid); |
| 1337 | |
| 1338 | cm_format_mad_ece_hdr(hdr: &req_msg->hdr, CM_REQ_ATTR_ID, |
| 1339 | tid: cm_form_tid(cm_id_priv), attr_mod: param->ece.attr_mod); |
| 1340 | |
| 1341 | IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg, |
| 1342 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 1343 | IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id)); |
| 1344 | IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg, |
| 1345 | be64_to_cpu(cm_id_priv->id.device->node_guid)); |
| 1346 | IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num); |
| 1347 | IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth); |
| 1348 | IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg, |
| 1349 | param->remote_cm_response_timeout); |
| 1350 | cm_req_set_qp_type(req_msg, qp_type: param->qp_type); |
| 1351 | IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control); |
| 1352 | IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn); |
| 1353 | IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg, |
| 1354 | param->local_cm_response_timeout); |
| 1355 | IBA_SET(CM_REQ_PARTITION_KEY, req_msg, |
| 1356 | be16_to_cpu(param->primary_path->pkey)); |
| 1357 | IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg, |
| 1358 | param->primary_path->mtu); |
| 1359 | IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries); |
| 1360 | |
| 1361 | if (param->qp_type != IB_QPT_XRC_INI) { |
| 1362 | IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg, |
| 1363 | param->responder_resources); |
| 1364 | IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count); |
| 1365 | IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg, |
| 1366 | param->rnr_retry_count); |
| 1367 | IBA_SET(CM_REQ_SRQ, req_msg, param->srq); |
| 1368 | } |
| 1369 | |
| 1370 | *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) = |
| 1371 | pri_path->sgid; |
| 1372 | *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) = |
| 1373 | pri_path->dgid; |
| 1374 | if (pri_ext) { |
| 1375 | IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) |
| 1376 | ->global.interface_id = |
| 1377 | OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid)); |
| 1378 | IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) |
| 1379 | ->global.interface_id = |
| 1380 | OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid)); |
| 1381 | } |
| 1382 | if (pri_path->hop_limit <= 1) { |
| 1383 | IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, |
| 1384 | be16_to_cpu(pri_ext ? 0 : |
| 1385 | htons(ntohl(sa_path_get_slid( |
| 1386 | pri_path))))); |
| 1387 | IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, |
| 1388 | be16_to_cpu(pri_ext ? 0 : |
| 1389 | htons(ntohl(sa_path_get_dlid( |
| 1390 | pri_path))))); |
| 1391 | } else { |
| 1392 | |
| 1393 | if (param->primary_path_inbound) { |
| 1394 | lid = param->primary_path_inbound->ib.dlid; |
| 1395 | IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, |
| 1396 | be16_to_cpu(lid)); |
| 1397 | } else |
| 1398 | IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, |
| 1399 | be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1400 | |
| 1401 | /* Work-around until there's a way to obtain remote LID info */ |
| 1402 | IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, |
| 1403 | be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1404 | } |
| 1405 | IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg, |
| 1406 | be32_to_cpu(pri_path->flow_label)); |
| 1407 | IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate); |
| 1408 | IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class); |
| 1409 | IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit); |
| 1410 | IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl); |
| 1411 | IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg, |
| 1412 | (pri_path->hop_limit <= 1)); |
| 1413 | IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg, |
| 1414 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, |
| 1415 | pri_path->packet_life_time)); |
| 1416 | |
| 1417 | if (alt_path) { |
| 1418 | bool alt_ext = false; |
| 1419 | |
| 1420 | if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA) |
| 1421 | alt_ext = opa_is_extended_lid(dlid: alt_path->opa.dlid, |
| 1422 | slid: alt_path->opa.slid); |
| 1423 | |
| 1424 | *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) = |
| 1425 | alt_path->sgid; |
| 1426 | *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) = |
| 1427 | alt_path->dgid; |
| 1428 | if (alt_ext) { |
| 1429 | IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, |
| 1430 | req_msg) |
| 1431 | ->global.interface_id = |
| 1432 | OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid)); |
| 1433 | IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, |
| 1434 | req_msg) |
| 1435 | ->global.interface_id = |
| 1436 | OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid)); |
| 1437 | } |
| 1438 | if (alt_path->hop_limit <= 1) { |
| 1439 | IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, |
| 1440 | be16_to_cpu( |
| 1441 | alt_ext ? 0 : |
| 1442 | htons(ntohl(sa_path_get_slid( |
| 1443 | alt_path))))); |
| 1444 | IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, |
| 1445 | be16_to_cpu( |
| 1446 | alt_ext ? 0 : |
| 1447 | htons(ntohl(sa_path_get_dlid( |
| 1448 | alt_path))))); |
| 1449 | } else { |
| 1450 | IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, |
| 1451 | be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1452 | IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, |
| 1453 | be16_to_cpu(IB_LID_PERMISSIVE)); |
| 1454 | } |
| 1455 | IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg, |
| 1456 | be32_to_cpu(alt_path->flow_label)); |
| 1457 | IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate); |
| 1458 | IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg, |
| 1459 | alt_path->traffic_class); |
| 1460 | IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg, |
| 1461 | alt_path->hop_limit); |
| 1462 | IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl); |
| 1463 | IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg, |
| 1464 | (alt_path->hop_limit <= 1)); |
| 1465 | IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg, |
| 1466 | cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay, |
| 1467 | alt_path->packet_life_time)); |
| 1468 | } |
| 1469 | IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id); |
| 1470 | |
| 1471 | if (param->private_data && param->private_data_len) |
| 1472 | IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data, |
| 1473 | param->private_data_len); |
| 1474 | } |
| 1475 | |
| 1476 | static int cm_validate_req_param(struct ib_cm_req_param *param) |
| 1477 | { |
| 1478 | if (!param->primary_path) |
| 1479 | return -EINVAL; |
| 1480 | |
| 1481 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && |
| 1482 | param->qp_type != IB_QPT_XRC_INI) |
| 1483 | return -EINVAL; |
| 1484 | |
| 1485 | if (param->private_data && |
| 1486 | param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE) |
| 1487 | return -EINVAL; |
| 1488 | |
| 1489 | if (param->alternate_path && |
| 1490 | (param->alternate_path->pkey != param->primary_path->pkey || |
| 1491 | param->alternate_path->mtu != param->primary_path->mtu)) |
| 1492 | return -EINVAL; |
| 1493 | |
| 1494 | return 0; |
| 1495 | } |
| 1496 | |
| 1497 | int ib_send_cm_req(struct ib_cm_id *cm_id, |
| 1498 | struct ib_cm_req_param *param) |
| 1499 | { |
| 1500 | struct cm_av av = {}, alt_av = {}; |
| 1501 | struct cm_id_private *cm_id_priv; |
| 1502 | struct ib_mad_send_buf *msg; |
| 1503 | struct cm_req_msg *req_msg; |
| 1504 | unsigned long flags; |
| 1505 | int ret; |
| 1506 | |
| 1507 | ret = cm_validate_req_param(param); |
| 1508 | if (ret) |
| 1509 | return ret; |
| 1510 | |
| 1511 | /* Verify that we're not in timewait. */ |
| 1512 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 1513 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 1514 | if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) { |
| 1515 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 1516 | return -EINVAL; |
| 1517 | } |
| 1518 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 1519 | |
| 1520 | cm_id_priv->timewait_info = cm_create_timewait_info(local_id: cm_id_priv-> |
| 1521 | id.local_id); |
| 1522 | if (IS_ERR(ptr: cm_id_priv->timewait_info)) { |
| 1523 | ret = PTR_ERR(ptr: cm_id_priv->timewait_info); |
| 1524 | cm_id_priv->timewait_info = NULL; |
| 1525 | return ret; |
| 1526 | } |
| 1527 | |
| 1528 | ret = cm_init_av_by_path(path: param->primary_path, |
| 1529 | sgid_attr: param->ppath_sgid_attr, av: &av); |
| 1530 | if (ret) |
| 1531 | return ret; |
| 1532 | if (param->alternate_path) { |
| 1533 | ret = cm_init_av_by_path(path: param->alternate_path, NULL, |
| 1534 | av: &alt_av); |
| 1535 | if (ret) { |
| 1536 | cm_destroy_av(av: &av); |
| 1537 | return ret; |
| 1538 | } |
| 1539 | } |
| 1540 | cm_id->service_id = param->service_id; |
| 1541 | cm_id_priv->timeout_ms = cm_convert_to_ms( |
| 1542 | iba_time: param->primary_path->packet_life_time) * 2 + |
| 1543 | cm_convert_to_ms( |
| 1544 | iba_time: param->remote_cm_response_timeout); |
| 1545 | cm_id_priv->max_cm_retries = param->max_cm_retries; |
| 1546 | cm_id_priv->initiator_depth = param->initiator_depth; |
| 1547 | cm_id_priv->responder_resources = param->responder_resources; |
| 1548 | cm_id_priv->retry_count = param->retry_count; |
| 1549 | cm_id_priv->path_mtu = param->primary_path->mtu; |
| 1550 | cm_id_priv->pkey = param->primary_path->pkey; |
| 1551 | cm_id_priv->qp_type = param->qp_type; |
| 1552 | |
| 1553 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 1554 | |
| 1555 | cm_move_av_from_path(dest: &cm_id_priv->av, src: &av); |
| 1556 | if (param->primary_path_outbound) |
| 1557 | cm_id_priv->av.dlid_datapath = |
| 1558 | be16_to_cpu(param->primary_path_outbound->ib.dlid); |
| 1559 | |
| 1560 | if (param->alternate_path) |
| 1561 | cm_move_av_from_path(dest: &cm_id_priv->alt_av, src: &alt_av); |
| 1562 | |
| 1563 | msg = cm_alloc_priv_msg(cm_id_priv, state: IB_CM_REQ_SENT); |
| 1564 | if (IS_ERR(ptr: msg)) { |
| 1565 | ret = PTR_ERR(ptr: msg); |
| 1566 | goto out_unlock; |
| 1567 | } |
| 1568 | |
| 1569 | req_msg = (struct cm_req_msg *)msg->mad; |
| 1570 | cm_format_req(req_msg, cm_id_priv, param); |
| 1571 | cm_id_priv->tid = req_msg->hdr.tid; |
| 1572 | |
| 1573 | cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); |
| 1574 | cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); |
| 1575 | |
| 1576 | trace_icm_send_req(cm_id: &cm_id_priv->id); |
| 1577 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 1578 | if (ret) |
| 1579 | goto out_free; |
| 1580 | BUG_ON(cm_id->state != IB_CM_IDLE); |
| 1581 | cm_id->state = IB_CM_REQ_SENT; |
| 1582 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 1583 | return 0; |
| 1584 | out_free: |
| 1585 | cm_free_priv_msg(msg); |
| 1586 | out_unlock: |
| 1587 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 1588 | return ret; |
| 1589 | } |
| 1590 | EXPORT_SYMBOL(ib_send_cm_req); |
| 1591 | |
| 1592 | static int cm_issue_rej(struct cm_port *port, |
| 1593 | struct ib_mad_recv_wc *mad_recv_wc, |
| 1594 | enum ib_cm_rej_reason reason, |
| 1595 | enum cm_msg_response msg_rejected, |
| 1596 | void *ari, u8 ari_length) |
| 1597 | { |
| 1598 | struct ib_mad_send_buf *msg = NULL; |
| 1599 | struct cm_rej_msg *rej_msg, *rcv_msg; |
| 1600 | int ret; |
| 1601 | |
| 1602 | ret = cm_alloc_response_msg(port, mad_recv_wc, direct_retry: false, msg: &msg); |
| 1603 | if (ret) |
| 1604 | return ret; |
| 1605 | |
| 1606 | /* We just need common CM header information. Cast to any message. */ |
| 1607 | rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad; |
| 1608 | rej_msg = (struct cm_rej_msg *) msg->mad; |
| 1609 | |
| 1610 | cm_format_mad_hdr(hdr: &rej_msg->hdr, CM_REJ_ATTR_ID, tid: rcv_msg->hdr.tid); |
| 1611 | IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, |
| 1612 | IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg)); |
| 1613 | IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, |
| 1614 | IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); |
| 1615 | IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected); |
| 1616 | IBA_SET(CM_REJ_REASON, rej_msg, reason); |
| 1617 | |
| 1618 | if (ari && ari_length) { |
| 1619 | IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); |
| 1620 | IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); |
| 1621 | } |
| 1622 | |
| 1623 | trace_icm_issue_rej( |
| 1624 | IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg), |
| 1625 | IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg)); |
| 1626 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 1627 | if (ret) |
| 1628 | cm_free_msg(msg); |
| 1629 | |
| 1630 | return ret; |
| 1631 | } |
| 1632 | |
| 1633 | static bool cm_req_has_alt_path(struct cm_req_msg *req_msg) |
| 1634 | { |
| 1635 | return ((cpu_to_be16( |
| 1636 | IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) || |
| 1637 | (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, |
| 1638 | req_msg)))); |
| 1639 | } |
| 1640 | |
| 1641 | static void cm_path_set_rec_type(struct ib_device *ib_device, u32 port_num, |
| 1642 | struct sa_path_rec *path, union ib_gid *gid) |
| 1643 | { |
| 1644 | if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(device: ib_device, port_num)) |
| 1645 | path->rec_type = SA_PATH_REC_TYPE_OPA; |
| 1646 | else |
| 1647 | path->rec_type = SA_PATH_REC_TYPE_IB; |
| 1648 | } |
| 1649 | |
| 1650 | static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg, |
| 1651 | struct sa_path_rec *primary_path, |
| 1652 | struct sa_path_rec *alt_path, |
| 1653 | struct ib_wc *wc) |
| 1654 | { |
| 1655 | u32 lid; |
| 1656 | |
| 1657 | if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) { |
| 1658 | sa_path_set_dlid(rec: primary_path, dlid: wc->slid); |
| 1659 | sa_path_set_slid(rec: primary_path, |
| 1660 | IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, |
| 1661 | req_msg)); |
| 1662 | } else { |
| 1663 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 1664 | CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)); |
| 1665 | sa_path_set_dlid(rec: primary_path, dlid: lid); |
| 1666 | |
| 1667 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 1668 | CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)); |
| 1669 | sa_path_set_slid(rec: primary_path, slid: lid); |
| 1670 | } |
| 1671 | |
| 1672 | if (!cm_req_has_alt_path(req_msg)) |
| 1673 | return; |
| 1674 | |
| 1675 | if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) { |
| 1676 | sa_path_set_dlid(rec: alt_path, |
| 1677 | IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, |
| 1678 | req_msg)); |
| 1679 | sa_path_set_slid(rec: alt_path, |
| 1680 | IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, |
| 1681 | req_msg)); |
| 1682 | } else { |
| 1683 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 1684 | CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg)); |
| 1685 | sa_path_set_dlid(rec: alt_path, dlid: lid); |
| 1686 | |
| 1687 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 1688 | CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg)); |
| 1689 | sa_path_set_slid(rec: alt_path, slid: lid); |
| 1690 | } |
| 1691 | } |
| 1692 | |
| 1693 | static void cm_format_paths_from_req(struct cm_req_msg *req_msg, |
| 1694 | struct sa_path_rec *primary_path, |
| 1695 | struct sa_path_rec *alt_path, |
| 1696 | struct ib_wc *wc) |
| 1697 | { |
| 1698 | primary_path->dgid = |
| 1699 | *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg); |
| 1700 | primary_path->sgid = |
| 1701 | *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg); |
| 1702 | primary_path->flow_label = |
| 1703 | cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg)); |
| 1704 | primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg); |
| 1705 | primary_path->traffic_class = |
| 1706 | IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg); |
| 1707 | primary_path->reversible = 1; |
| 1708 | primary_path->pkey = |
| 1709 | cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); |
| 1710 | primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg); |
| 1711 | primary_path->mtu_selector = IB_SA_EQ; |
| 1712 | primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); |
| 1713 | primary_path->rate_selector = IB_SA_EQ; |
| 1714 | primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg); |
| 1715 | primary_path->packet_life_time_selector = IB_SA_EQ; |
| 1716 | primary_path->packet_life_time = |
| 1717 | IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg); |
| 1718 | primary_path->packet_life_time -= (primary_path->packet_life_time > 0); |
| 1719 | primary_path->service_id = |
| 1720 | cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); |
| 1721 | if (sa_path_is_roce(rec: primary_path)) |
| 1722 | primary_path->roce.route_resolved = false; |
| 1723 | |
| 1724 | if (cm_req_has_alt_path(req_msg)) { |
| 1725 | alt_path->dgid = *IBA_GET_MEM_PTR( |
| 1726 | CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg); |
| 1727 | alt_path->sgid = *IBA_GET_MEM_PTR( |
| 1728 | CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg); |
| 1729 | alt_path->flow_label = cpu_to_be32( |
| 1730 | IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg)); |
| 1731 | alt_path->hop_limit = |
| 1732 | IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg); |
| 1733 | alt_path->traffic_class = |
| 1734 | IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg); |
| 1735 | alt_path->reversible = 1; |
| 1736 | alt_path->pkey = |
| 1737 | cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); |
| 1738 | alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg); |
| 1739 | alt_path->mtu_selector = IB_SA_EQ; |
| 1740 | alt_path->mtu = |
| 1741 | IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); |
| 1742 | alt_path->rate_selector = IB_SA_EQ; |
| 1743 | alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg); |
| 1744 | alt_path->packet_life_time_selector = IB_SA_EQ; |
| 1745 | alt_path->packet_life_time = |
| 1746 | IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg); |
| 1747 | alt_path->packet_life_time -= (alt_path->packet_life_time > 0); |
| 1748 | alt_path->service_id = |
| 1749 | cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); |
| 1750 | |
| 1751 | if (sa_path_is_roce(rec: alt_path)) |
| 1752 | alt_path->roce.route_resolved = false; |
| 1753 | } |
| 1754 | cm_format_path_lid_from_req(req_msg, primary_path, alt_path, wc); |
| 1755 | } |
| 1756 | |
| 1757 | static u16 cm_get_bth_pkey(struct cm_work *work) |
| 1758 | { |
| 1759 | struct ib_device *ib_dev = work->port->cm_dev->ib_device; |
| 1760 | u32 port_num = work->port->port_num; |
| 1761 | u16 pkey_index = work->mad_recv_wc->wc->pkey_index; |
| 1762 | u16 pkey; |
| 1763 | int ret; |
| 1764 | |
| 1765 | ret = ib_get_cached_pkey(device_handle: ib_dev, port_num, index: pkey_index, pkey: &pkey); |
| 1766 | if (ret) { |
| 1767 | dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %u, pkey index %u). %d\n" , |
| 1768 | port_num, pkey_index, ret); |
| 1769 | return 0; |
| 1770 | } |
| 1771 | |
| 1772 | return pkey; |
| 1773 | } |
| 1774 | |
| 1775 | /** |
| 1776 | * cm_opa_to_ib_sgid - Convert OPA SGID to IB SGID |
| 1777 | * ULPs (such as IPoIB) do not understand OPA GIDs and will |
| 1778 | * reject them as the local_gid will not match the sgid. Therefore, |
| 1779 | * change the pathrec's SGID to an IB SGID. |
| 1780 | * |
| 1781 | * @work: Work completion |
| 1782 | * @path: Path record |
| 1783 | */ |
| 1784 | static void cm_opa_to_ib_sgid(struct cm_work *work, |
| 1785 | struct sa_path_rec *path) |
| 1786 | { |
| 1787 | struct ib_device *dev = work->port->cm_dev->ib_device; |
| 1788 | u32 port_num = work->port->port_num; |
| 1789 | |
| 1790 | if (rdma_cap_opa_ah(device: dev, port_num) && |
| 1791 | (ib_is_opa_gid(gid: &path->sgid))) { |
| 1792 | union ib_gid sgid; |
| 1793 | |
| 1794 | if (rdma_query_gid(device: dev, port_num, index: 0, gid: &sgid)) { |
| 1795 | dev_warn(&dev->dev, |
| 1796 | "Error updating sgid in CM request\n" ); |
| 1797 | return; |
| 1798 | } |
| 1799 | |
| 1800 | path->sgid = sgid; |
| 1801 | } |
| 1802 | } |
| 1803 | |
| 1804 | static void cm_format_req_event(struct cm_work *work, |
| 1805 | struct cm_id_private *cm_id_priv, |
| 1806 | struct ib_cm_id *listen_id) |
| 1807 | { |
| 1808 | struct cm_req_msg *req_msg; |
| 1809 | struct ib_cm_req_event_param *param; |
| 1810 | |
| 1811 | req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; |
| 1812 | param = &work->cm_event.param.req_rcvd; |
| 1813 | param->listen_id = listen_id; |
| 1814 | param->bth_pkey = cm_get_bth_pkey(work); |
| 1815 | param->port = cm_id_priv->av.port->port_num; |
| 1816 | param->primary_path = &work->path[0]; |
| 1817 | cm_opa_to_ib_sgid(work, path: param->primary_path); |
| 1818 | if (cm_req_has_alt_path(req_msg)) { |
| 1819 | param->alternate_path = &work->path[1]; |
| 1820 | cm_opa_to_ib_sgid(work, path: param->alternate_path); |
| 1821 | } else { |
| 1822 | param->alternate_path = NULL; |
| 1823 | } |
| 1824 | param->remote_ca_guid = |
| 1825 | cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); |
| 1826 | param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg); |
| 1827 | param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg); |
| 1828 | param->qp_type = cm_req_get_qp_type(req_msg); |
| 1829 | param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg); |
| 1830 | param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); |
| 1831 | param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); |
| 1832 | param->local_cm_response_timeout = |
| 1833 | IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg); |
| 1834 | param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg); |
| 1835 | param->remote_cm_response_timeout = |
| 1836 | IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg); |
| 1837 | param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); |
| 1838 | param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); |
| 1839 | param->srq = IBA_GET(CM_REQ_SRQ, req_msg); |
| 1840 | param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; |
| 1841 | param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg); |
| 1842 | param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod); |
| 1843 | |
| 1844 | work->cm_event.private_data = |
| 1845 | IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg); |
| 1846 | } |
| 1847 | |
| 1848 | static void cm_process_work(struct cm_id_private *cm_id_priv, |
| 1849 | struct cm_work *work) |
| 1850 | { |
| 1851 | int ret; |
| 1852 | |
| 1853 | /* We will typically only have the current event to report. */ |
| 1854 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); |
| 1855 | cm_free_work(work); |
| 1856 | |
| 1857 | while (!ret && !atomic_add_negative(i: -1, v: &cm_id_priv->work_count)) { |
| 1858 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 1859 | work = cm_dequeue_work(cm_id_priv); |
| 1860 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1861 | if (!work) |
| 1862 | return; |
| 1863 | |
| 1864 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, |
| 1865 | &work->cm_event); |
| 1866 | cm_free_work(work); |
| 1867 | } |
| 1868 | cm_deref_id(cm_id_priv); |
| 1869 | if (ret) |
| 1870 | cm_destroy_id(cm_id: &cm_id_priv->id, err: ret); |
| 1871 | } |
| 1872 | |
| 1873 | static void cm_format_mra(struct cm_mra_msg *mra_msg, |
| 1874 | struct cm_id_private *cm_id_priv, |
| 1875 | enum cm_msg_response msg_mraed, |
| 1876 | const void *private_data, u8 private_data_len) |
| 1877 | { |
| 1878 | cm_format_mad_hdr(hdr: &mra_msg->hdr, CM_MRA_ATTR_ID, tid: cm_id_priv->tid); |
| 1879 | IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed); |
| 1880 | IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg, |
| 1881 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 1882 | IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg, |
| 1883 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 1884 | IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, CM_MRA_SETTING); |
| 1885 | |
| 1886 | if (private_data && private_data_len) |
| 1887 | IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data, |
| 1888 | private_data_len); |
| 1889 | } |
| 1890 | |
| 1891 | static void cm_format_rej(struct cm_rej_msg *rej_msg, |
| 1892 | struct cm_id_private *cm_id_priv, |
| 1893 | enum ib_cm_rej_reason reason, void *ari, |
| 1894 | u8 ari_length, const void *private_data, |
| 1895 | u8 private_data_len, enum ib_cm_state state) |
| 1896 | { |
| 1897 | lockdep_assert_held(&cm_id_priv->lock); |
| 1898 | |
| 1899 | cm_format_mad_hdr(hdr: &rej_msg->hdr, CM_REJ_ATTR_ID, tid: cm_id_priv->tid); |
| 1900 | IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg, |
| 1901 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 1902 | |
| 1903 | switch (state) { |
| 1904 | case IB_CM_REQ_RCVD: |
| 1905 | IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0)); |
| 1906 | IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); |
| 1907 | break; |
| 1908 | case IB_CM_MRA_REQ_SENT: |
| 1909 | IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, |
| 1910 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 1911 | IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ); |
| 1912 | break; |
| 1913 | case IB_CM_REP_RCVD: |
| 1914 | case IB_CM_MRA_REP_SENT: |
| 1915 | IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, |
| 1916 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 1917 | IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP); |
| 1918 | break; |
| 1919 | default: |
| 1920 | IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, |
| 1921 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 1922 | IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, |
| 1923 | CM_MSG_RESPONSE_OTHER); |
| 1924 | break; |
| 1925 | } |
| 1926 | |
| 1927 | IBA_SET(CM_REJ_REASON, rej_msg, reason); |
| 1928 | if (ari && ari_length) { |
| 1929 | IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length); |
| 1930 | IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length); |
| 1931 | } |
| 1932 | |
| 1933 | if (private_data && private_data_len) |
| 1934 | IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data, |
| 1935 | private_data_len); |
| 1936 | } |
| 1937 | |
| 1938 | static void cm_dup_req_handler(struct cm_work *work, |
| 1939 | struct cm_id_private *cm_id_priv) |
| 1940 | { |
| 1941 | struct ib_mad_send_buf *msg = NULL; |
| 1942 | int ret; |
| 1943 | |
| 1944 | atomic_long_inc( |
| 1945 | v: &work->port->counters[CM_RECV_DUPLICATES][CM_REQ_COUNTER]); |
| 1946 | |
| 1947 | /* Quick state check to discard duplicate REQs. */ |
| 1948 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 1949 | if (cm_id_priv->id.state == IB_CM_REQ_RCVD) { |
| 1950 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1951 | return; |
| 1952 | } |
| 1953 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1954 | |
| 1955 | ret = cm_alloc_response_msg(port: work->port, mad_recv_wc: work->mad_recv_wc, direct_retry: true, msg: &msg); |
| 1956 | if (ret) |
| 1957 | return; |
| 1958 | |
| 1959 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 1960 | switch (cm_id_priv->id.state) { |
| 1961 | case IB_CM_MRA_REQ_SENT: |
| 1962 | cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv, |
| 1963 | msg_mraed: CM_MSG_RESPONSE_REQ, |
| 1964 | private_data: cm_id_priv->private_data, |
| 1965 | private_data_len: cm_id_priv->private_data_len); |
| 1966 | break; |
| 1967 | case IB_CM_TIMEWAIT: |
| 1968 | cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv, |
| 1969 | reason: IB_CM_REJ_STALE_CONN, NULL, ari_length: 0, NULL, private_data_len: 0, |
| 1970 | state: IB_CM_TIMEWAIT); |
| 1971 | break; |
| 1972 | default: |
| 1973 | goto unlock; |
| 1974 | } |
| 1975 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1976 | |
| 1977 | trace_icm_send_dup_req(cm_id: &cm_id_priv->id); |
| 1978 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 1979 | if (ret) |
| 1980 | goto free; |
| 1981 | return; |
| 1982 | |
| 1983 | unlock: spin_unlock_irq(lock: &cm_id_priv->lock); |
| 1984 | free: cm_free_msg(msg); |
| 1985 | } |
| 1986 | |
| 1987 | static struct cm_id_private *cm_match_req(struct cm_work *work, |
| 1988 | struct cm_id_private *cm_id_priv) |
| 1989 | { |
| 1990 | struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; |
| 1991 | struct cm_timewait_info *timewait_info; |
| 1992 | struct cm_req_msg *req_msg; |
| 1993 | |
| 1994 | req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; |
| 1995 | |
| 1996 | /* Check for possible duplicate REQ. */ |
| 1997 | spin_lock_irq(lock: &cm.lock); |
| 1998 | timewait_info = cm_insert_remote_id(timewait_info: cm_id_priv->timewait_info); |
| 1999 | if (timewait_info) { |
| 2000 | cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id, |
| 2001 | remote_id: timewait_info->work.remote_id); |
| 2002 | spin_unlock_irq(lock: &cm.lock); |
| 2003 | if (cur_cm_id_priv) { |
| 2004 | cm_dup_req_handler(work, cm_id_priv: cur_cm_id_priv); |
| 2005 | cm_deref_id(cm_id_priv: cur_cm_id_priv); |
| 2006 | } |
| 2007 | return NULL; |
| 2008 | } |
| 2009 | |
| 2010 | /* Check for stale connections. */ |
| 2011 | timewait_info = cm_insert_remote_qpn(timewait_info: cm_id_priv->timewait_info); |
| 2012 | if (timewait_info) { |
| 2013 | cm_remove_remote(cm_id_priv); |
| 2014 | cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id, |
| 2015 | remote_id: timewait_info->work.remote_id); |
| 2016 | |
| 2017 | spin_unlock_irq(lock: &cm.lock); |
| 2018 | cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc, |
| 2019 | reason: IB_CM_REJ_STALE_CONN, msg_rejected: CM_MSG_RESPONSE_REQ, |
| 2020 | NULL, ari_length: 0); |
| 2021 | if (cur_cm_id_priv) { |
| 2022 | ib_send_cm_dreq(cm_id: &cur_cm_id_priv->id, NULL, private_data_len: 0); |
| 2023 | cm_deref_id(cm_id_priv: cur_cm_id_priv); |
| 2024 | } |
| 2025 | return NULL; |
| 2026 | } |
| 2027 | |
| 2028 | /* Find matching listen request. */ |
| 2029 | listen_cm_id_priv = cm_find_listen( |
| 2030 | device: cm_id_priv->id.device, |
| 2031 | cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg))); |
| 2032 | if (!listen_cm_id_priv) { |
| 2033 | cm_remove_remote(cm_id_priv); |
| 2034 | spin_unlock_irq(lock: &cm.lock); |
| 2035 | cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc, |
| 2036 | reason: IB_CM_REJ_INVALID_SERVICE_ID, msg_rejected: CM_MSG_RESPONSE_REQ, |
| 2037 | NULL, ari_length: 0); |
| 2038 | return NULL; |
| 2039 | } |
| 2040 | spin_unlock_irq(lock: &cm.lock); |
| 2041 | return listen_cm_id_priv; |
| 2042 | } |
| 2043 | |
| 2044 | /* |
| 2045 | * Work-around for inter-subnet connections. If the LIDs are permissive, |
| 2046 | * we need to override the LID/SL data in the REQ with the LID information |
| 2047 | * in the work completion. |
| 2048 | */ |
| 2049 | static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc) |
| 2050 | { |
| 2051 | if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) { |
| 2052 | if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, |
| 2053 | req_msg)) == IB_LID_PERMISSIVE) { |
| 2054 | IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg, |
| 2055 | be16_to_cpu(ib_lid_be16(wc->slid))); |
| 2056 | IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl); |
| 2057 | } |
| 2058 | |
| 2059 | if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID, |
| 2060 | req_msg)) == IB_LID_PERMISSIVE) |
| 2061 | IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg, |
| 2062 | wc->dlid_path_bits); |
| 2063 | } |
| 2064 | |
| 2065 | if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) { |
| 2066 | if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, |
| 2067 | req_msg)) == IB_LID_PERMISSIVE) { |
| 2068 | IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg, |
| 2069 | be16_to_cpu(ib_lid_be16(wc->slid))); |
| 2070 | IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl); |
| 2071 | } |
| 2072 | |
| 2073 | if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, |
| 2074 | req_msg)) == IB_LID_PERMISSIVE) |
| 2075 | IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg, |
| 2076 | wc->dlid_path_bits); |
| 2077 | } |
| 2078 | } |
| 2079 | |
| 2080 | static int cm_req_handler(struct cm_work *work) |
| 2081 | { |
| 2082 | struct cm_id_private *cm_id_priv, *listen_cm_id_priv; |
| 2083 | struct cm_req_msg *req_msg; |
| 2084 | const struct ib_global_route *grh; |
| 2085 | const struct ib_gid_attr *gid_attr; |
| 2086 | int ret; |
| 2087 | |
| 2088 | req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2089 | |
| 2090 | cm_id_priv = |
| 2091 | cm_alloc_id_priv(device: work->port->cm_dev->ib_device, NULL, NULL); |
| 2092 | if (IS_ERR(ptr: cm_id_priv)) |
| 2093 | return PTR_ERR(ptr: cm_id_priv); |
| 2094 | |
| 2095 | cm_id_priv->id.remote_id = |
| 2096 | cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg)); |
| 2097 | cm_id_priv->id.service_id = |
| 2098 | cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)); |
| 2099 | cm_id_priv->tid = req_msg->hdr.tid; |
| 2100 | cm_id_priv->timeout_ms = cm_convert_to_ms( |
| 2101 | IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg)); |
| 2102 | cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg); |
| 2103 | cm_id_priv->remote_qpn = |
| 2104 | cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg)); |
| 2105 | cm_id_priv->initiator_depth = |
| 2106 | IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg); |
| 2107 | cm_id_priv->responder_resources = |
| 2108 | IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg); |
| 2109 | cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg); |
| 2110 | cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg)); |
| 2111 | cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg)); |
| 2112 | cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg); |
| 2113 | cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg); |
| 2114 | cm_id_priv->qp_type = cm_req_get_qp_type(req_msg); |
| 2115 | |
| 2116 | ret = cm_init_av_for_response(port: work->port, wc: work->mad_recv_wc->wc, |
| 2117 | grh: work->mad_recv_wc->recv_buf.grh, |
| 2118 | av: &cm_id_priv->av); |
| 2119 | if (ret) |
| 2120 | goto destroy; |
| 2121 | cm_id_priv->timewait_info = cm_create_timewait_info(local_id: cm_id_priv-> |
| 2122 | id.local_id); |
| 2123 | if (IS_ERR(ptr: cm_id_priv->timewait_info)) { |
| 2124 | ret = PTR_ERR(ptr: cm_id_priv->timewait_info); |
| 2125 | cm_id_priv->timewait_info = NULL; |
| 2126 | goto destroy; |
| 2127 | } |
| 2128 | cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id; |
| 2129 | cm_id_priv->timewait_info->remote_ca_guid = |
| 2130 | cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg)); |
| 2131 | cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn; |
| 2132 | |
| 2133 | /* |
| 2134 | * Note that the ID pointer is not in the xarray at this point, |
| 2135 | * so this set is only visible to the local thread. |
| 2136 | */ |
| 2137 | cm_id_priv->id.state = IB_CM_REQ_RCVD; |
| 2138 | |
| 2139 | listen_cm_id_priv = cm_match_req(work, cm_id_priv); |
| 2140 | if (!listen_cm_id_priv) { |
| 2141 | trace_icm_no_listener_err(cm_id: &cm_id_priv->id); |
| 2142 | cm_id_priv->id.state = IB_CM_IDLE; |
| 2143 | ret = -EINVAL; |
| 2144 | goto destroy; |
| 2145 | } |
| 2146 | |
| 2147 | memset(&work->path[0], 0, sizeof(work->path[0])); |
| 2148 | if (cm_req_has_alt_path(req_msg)) |
| 2149 | memset(&work->path[1], 0, sizeof(work->path[1])); |
| 2150 | grh = rdma_ah_read_grh(attr: &cm_id_priv->av.ah_attr); |
| 2151 | gid_attr = grh->sgid_attr; |
| 2152 | |
| 2153 | if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) { |
| 2154 | work->path[0].rec_type = |
| 2155 | sa_conv_gid_to_pathrec_type(type: gid_attr->gid_type); |
| 2156 | } else { |
| 2157 | cm_process_routed_req(req_msg, wc: work->mad_recv_wc->wc); |
| 2158 | cm_path_set_rec_type( |
| 2159 | ib_device: work->port->cm_dev->ib_device, port_num: work->port->port_num, |
| 2160 | path: &work->path[0], |
| 2161 | IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, |
| 2162 | req_msg)); |
| 2163 | } |
| 2164 | if (cm_req_has_alt_path(req_msg)) |
| 2165 | work->path[1].rec_type = work->path[0].rec_type; |
| 2166 | cm_format_paths_from_req(req_msg, primary_path: &work->path[0], |
| 2167 | alt_path: &work->path[1], wc: work->mad_recv_wc->wc); |
| 2168 | if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE) |
| 2169 | sa_path_set_dmac(rec: &work->path[0], |
| 2170 | dmac: cm_id_priv->av.ah_attr.roce.dmac); |
| 2171 | work->path[0].hop_limit = grh->hop_limit; |
| 2172 | |
| 2173 | /* This destroy call is needed to pair with cm_init_av_for_response */ |
| 2174 | cm_destroy_av(av: &cm_id_priv->av); |
| 2175 | ret = cm_init_av_by_path(path: &work->path[0], sgid_attr: gid_attr, av: &cm_id_priv->av); |
| 2176 | if (ret) { |
| 2177 | int err; |
| 2178 | |
| 2179 | err = rdma_query_gid(device: work->port->cm_dev->ib_device, |
| 2180 | port_num: work->port->port_num, index: 0, |
| 2181 | gid: &work->path[0].sgid); |
| 2182 | if (err) |
| 2183 | ib_send_cm_rej(cm_id: &cm_id_priv->id, reason: IB_CM_REJ_INVALID_GID, |
| 2184 | NULL, ari_length: 0, NULL, private_data_len: 0); |
| 2185 | else |
| 2186 | ib_send_cm_rej(cm_id: &cm_id_priv->id, reason: IB_CM_REJ_INVALID_GID, |
| 2187 | ari: &work->path[0].sgid, |
| 2188 | ari_length: sizeof(work->path[0].sgid), |
| 2189 | NULL, private_data_len: 0); |
| 2190 | goto rejected; |
| 2191 | } |
| 2192 | if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_IB) |
| 2193 | cm_id_priv->av.dlid_datapath = |
| 2194 | IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg); |
| 2195 | |
| 2196 | if (cm_req_has_alt_path(req_msg)) { |
| 2197 | ret = cm_init_av_by_path(path: &work->path[1], NULL, |
| 2198 | av: &cm_id_priv->alt_av); |
| 2199 | if (ret) { |
| 2200 | ib_send_cm_rej(cm_id: &cm_id_priv->id, |
| 2201 | reason: IB_CM_REJ_INVALID_ALT_GID, |
| 2202 | ari: &work->path[0].sgid, |
| 2203 | ari_length: sizeof(work->path[0].sgid), NULL, private_data_len: 0); |
| 2204 | goto rejected; |
| 2205 | } |
| 2206 | } |
| 2207 | |
| 2208 | cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; |
| 2209 | cm_id_priv->id.context = listen_cm_id_priv->id.context; |
| 2210 | cm_format_req_event(work, cm_id_priv, listen_id: &listen_cm_id_priv->id); |
| 2211 | |
| 2212 | /* Now MAD handlers can see the new ID */ |
| 2213 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2214 | cm_finalize_id(cm_id_priv); |
| 2215 | |
| 2216 | /* Refcount belongs to the event, pairs with cm_process_work() */ |
| 2217 | refcount_inc(r: &cm_id_priv->refcount); |
| 2218 | cm_queue_work_unlock(cm_id_priv, work); |
| 2219 | /* |
| 2220 | * Since this ID was just created and was not made visible to other MAD |
| 2221 | * handlers until the cm_finalize_id() above we know that the |
| 2222 | * cm_process_work() will deliver the event and the listen_cm_id |
| 2223 | * embedded in the event can be derefed here. |
| 2224 | */ |
| 2225 | cm_deref_id(cm_id_priv: listen_cm_id_priv); |
| 2226 | return 0; |
| 2227 | |
| 2228 | rejected: |
| 2229 | cm_deref_id(cm_id_priv: listen_cm_id_priv); |
| 2230 | destroy: |
| 2231 | ib_destroy_cm_id(&cm_id_priv->id); |
| 2232 | return ret; |
| 2233 | } |
| 2234 | |
| 2235 | static void cm_format_rep(struct cm_rep_msg *rep_msg, |
| 2236 | struct cm_id_private *cm_id_priv, |
| 2237 | struct ib_cm_rep_param *param) |
| 2238 | { |
| 2239 | cm_format_mad_ece_hdr(hdr: &rep_msg->hdr, CM_REP_ATTR_ID, tid: cm_id_priv->tid, |
| 2240 | attr_mod: param->ece.attr_mod); |
| 2241 | IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg, |
| 2242 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 2243 | IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg, |
| 2244 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 2245 | IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn); |
| 2246 | IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg, |
| 2247 | param->responder_resources); |
| 2248 | IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg, |
| 2249 | cm_id_priv->av.port->cm_dev->ack_delay); |
| 2250 | IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted); |
| 2251 | IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count); |
| 2252 | IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg, |
| 2253 | be64_to_cpu(cm_id_priv->id.device->node_guid)); |
| 2254 | |
| 2255 | if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { |
| 2256 | IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg, |
| 2257 | param->initiator_depth); |
| 2258 | IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg, |
| 2259 | param->flow_control); |
| 2260 | IBA_SET(CM_REP_SRQ, rep_msg, param->srq); |
| 2261 | IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num); |
| 2262 | } else { |
| 2263 | IBA_SET(CM_REP_SRQ, rep_msg, 1); |
| 2264 | IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num); |
| 2265 | } |
| 2266 | |
| 2267 | IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id); |
| 2268 | IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8); |
| 2269 | IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16); |
| 2270 | |
| 2271 | if (param->private_data && param->private_data_len) |
| 2272 | IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data, |
| 2273 | param->private_data_len); |
| 2274 | } |
| 2275 | |
| 2276 | int ib_send_cm_rep(struct ib_cm_id *cm_id, |
| 2277 | struct ib_cm_rep_param *param) |
| 2278 | { |
| 2279 | struct cm_id_private *cm_id_priv; |
| 2280 | struct ib_mad_send_buf *msg; |
| 2281 | struct cm_rep_msg *rep_msg; |
| 2282 | unsigned long flags; |
| 2283 | int ret; |
| 2284 | |
| 2285 | if (param->private_data && |
| 2286 | param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE) |
| 2287 | return -EINVAL; |
| 2288 | |
| 2289 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 2290 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 2291 | if (cm_id->state != IB_CM_REQ_RCVD && |
| 2292 | cm_id->state != IB_CM_MRA_REQ_SENT) { |
| 2293 | trace_icm_send_rep_err(local_id: cm_id_priv->id.local_id, state: cm_id->state); |
| 2294 | ret = -EINVAL; |
| 2295 | goto out; |
| 2296 | } |
| 2297 | |
| 2298 | msg = cm_alloc_priv_msg(cm_id_priv, state: IB_CM_REP_SENT); |
| 2299 | if (IS_ERR(ptr: msg)) { |
| 2300 | ret = PTR_ERR(ptr: msg); |
| 2301 | goto out; |
| 2302 | } |
| 2303 | |
| 2304 | rep_msg = (struct cm_rep_msg *) msg->mad; |
| 2305 | cm_format_rep(rep_msg, cm_id_priv, param); |
| 2306 | |
| 2307 | trace_icm_send_rep(cm_id); |
| 2308 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2309 | if (ret) |
| 2310 | goto out_free; |
| 2311 | |
| 2312 | cm_id->state = IB_CM_REP_SENT; |
| 2313 | cm_id_priv->initiator_depth = param->initiator_depth; |
| 2314 | cm_id_priv->responder_resources = param->responder_resources; |
| 2315 | cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); |
| 2316 | WARN_ONCE(param->qp_num & 0xFF000000, |
| 2317 | "IBTA declares QPN to be 24 bits, but it is 0x%X\n" , |
| 2318 | param->qp_num); |
| 2319 | cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); |
| 2320 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2321 | return 0; |
| 2322 | |
| 2323 | out_free: |
| 2324 | cm_free_priv_msg(msg); |
| 2325 | out: |
| 2326 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2327 | return ret; |
| 2328 | } |
| 2329 | EXPORT_SYMBOL(ib_send_cm_rep); |
| 2330 | |
| 2331 | static void cm_format_rtu(struct cm_rtu_msg *rtu_msg, |
| 2332 | struct cm_id_private *cm_id_priv, |
| 2333 | const void *private_data, |
| 2334 | u8 private_data_len) |
| 2335 | { |
| 2336 | cm_format_mad_hdr(hdr: &rtu_msg->hdr, CM_RTU_ATTR_ID, tid: cm_id_priv->tid); |
| 2337 | IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg, |
| 2338 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 2339 | IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg, |
| 2340 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 2341 | |
| 2342 | if (private_data && private_data_len) |
| 2343 | IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data, |
| 2344 | private_data_len); |
| 2345 | } |
| 2346 | |
| 2347 | int ib_send_cm_rtu(struct ib_cm_id *cm_id, |
| 2348 | const void *private_data, |
| 2349 | u8 private_data_len) |
| 2350 | { |
| 2351 | struct cm_id_private *cm_id_priv; |
| 2352 | struct ib_mad_send_buf *msg; |
| 2353 | unsigned long flags; |
| 2354 | void *data; |
| 2355 | int ret; |
| 2356 | |
| 2357 | if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE) |
| 2358 | return -EINVAL; |
| 2359 | |
| 2360 | data = cm_copy_private_data(private_data, private_data_len); |
| 2361 | if (IS_ERR(ptr: data)) |
| 2362 | return PTR_ERR(ptr: data); |
| 2363 | |
| 2364 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 2365 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 2366 | if (cm_id->state != IB_CM_REP_RCVD && |
| 2367 | cm_id->state != IB_CM_MRA_REP_SENT) { |
| 2368 | trace_icm_send_cm_rtu_err(cm_id); |
| 2369 | ret = -EINVAL; |
| 2370 | goto error; |
| 2371 | } |
| 2372 | |
| 2373 | msg = cm_alloc_msg(cm_id_priv); |
| 2374 | if (IS_ERR(ptr: msg)) { |
| 2375 | ret = PTR_ERR(ptr: msg); |
| 2376 | goto error; |
| 2377 | } |
| 2378 | |
| 2379 | cm_format_rtu(rtu_msg: (struct cm_rtu_msg *) msg->mad, cm_id_priv, |
| 2380 | private_data, private_data_len); |
| 2381 | |
| 2382 | trace_icm_send_rtu(cm_id); |
| 2383 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2384 | if (ret) { |
| 2385 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2386 | cm_free_msg(msg); |
| 2387 | kfree(objp: data); |
| 2388 | return ret; |
| 2389 | } |
| 2390 | |
| 2391 | cm_id->state = IB_CM_ESTABLISHED; |
| 2392 | cm_set_private_data(cm_id_priv, private_data: data, private_data_len); |
| 2393 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2394 | return 0; |
| 2395 | |
| 2396 | error: spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2397 | kfree(objp: data); |
| 2398 | return ret; |
| 2399 | } |
| 2400 | EXPORT_SYMBOL(ib_send_cm_rtu); |
| 2401 | |
| 2402 | static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) |
| 2403 | { |
| 2404 | struct cm_rep_msg *rep_msg; |
| 2405 | struct ib_cm_rep_event_param *param; |
| 2406 | |
| 2407 | rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2408 | param = &work->cm_event.param.rep_rcvd; |
| 2409 | param->remote_ca_guid = |
| 2410 | cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); |
| 2411 | param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg); |
| 2412 | param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); |
| 2413 | param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg); |
| 2414 | param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); |
| 2415 | param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); |
| 2416 | param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); |
| 2417 | param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg); |
| 2418 | param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg); |
| 2419 | param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); |
| 2420 | param->srq = IBA_GET(CM_REP_SRQ, rep_msg); |
| 2421 | param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16; |
| 2422 | param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8; |
| 2423 | param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg); |
| 2424 | param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod); |
| 2425 | |
| 2426 | work->cm_event.private_data = |
| 2427 | IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg); |
| 2428 | } |
| 2429 | |
| 2430 | static void cm_dup_rep_handler(struct cm_work *work) |
| 2431 | { |
| 2432 | struct cm_id_private *cm_id_priv; |
| 2433 | struct cm_rep_msg *rep_msg; |
| 2434 | struct ib_mad_send_buf *msg = NULL; |
| 2435 | int ret; |
| 2436 | |
| 2437 | rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad; |
| 2438 | cm_id_priv = cm_acquire_id( |
| 2439 | cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), |
| 2440 | cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg))); |
| 2441 | if (!cm_id_priv) |
| 2442 | return; |
| 2443 | |
| 2444 | atomic_long_inc( |
| 2445 | v: &work->port->counters[CM_RECV_DUPLICATES][CM_REP_COUNTER]); |
| 2446 | ret = cm_alloc_response_msg(port: work->port, mad_recv_wc: work->mad_recv_wc, direct_retry: true, msg: &msg); |
| 2447 | if (ret) |
| 2448 | goto deref; |
| 2449 | |
| 2450 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2451 | if (cm_id_priv->id.state == IB_CM_ESTABLISHED) |
| 2452 | cm_format_rtu(rtu_msg: (struct cm_rtu_msg *) msg->mad, cm_id_priv, |
| 2453 | private_data: cm_id_priv->private_data, |
| 2454 | private_data_len: cm_id_priv->private_data_len); |
| 2455 | else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT) |
| 2456 | cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv, |
| 2457 | msg_mraed: CM_MSG_RESPONSE_REP, |
| 2458 | private_data: cm_id_priv->private_data, |
| 2459 | private_data_len: cm_id_priv->private_data_len); |
| 2460 | else |
| 2461 | goto unlock; |
| 2462 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2463 | |
| 2464 | trace_icm_send_dup_rep(cm_id: &cm_id_priv->id); |
| 2465 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2466 | if (ret) |
| 2467 | goto free; |
| 2468 | goto deref; |
| 2469 | |
| 2470 | unlock: spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2471 | free: cm_free_msg(msg); |
| 2472 | deref: cm_deref_id(cm_id_priv); |
| 2473 | } |
| 2474 | |
| 2475 | static int cm_rep_handler(struct cm_work *work) |
| 2476 | { |
| 2477 | struct cm_id_private *cm_id_priv; |
| 2478 | struct cm_rep_msg *rep_msg; |
| 2479 | int ret; |
| 2480 | struct cm_id_private *cur_cm_id_priv; |
| 2481 | struct cm_timewait_info *timewait_info; |
| 2482 | |
| 2483 | rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2484 | cm_id_priv = cm_acquire_id( |
| 2485 | cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), remote_id: 0); |
| 2486 | if (!cm_id_priv) { |
| 2487 | cm_dup_rep_handler(work); |
| 2488 | trace_icm_remote_no_priv_err( |
| 2489 | IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); |
| 2490 | return -EINVAL; |
| 2491 | } |
| 2492 | |
| 2493 | cm_format_rep_event(work, qp_type: cm_id_priv->qp_type); |
| 2494 | |
| 2495 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2496 | switch (cm_id_priv->id.state) { |
| 2497 | case IB_CM_REQ_SENT: |
| 2498 | case IB_CM_MRA_REQ_RCVD: |
| 2499 | break; |
| 2500 | default: |
| 2501 | ret = -EINVAL; |
| 2502 | trace_icm_rep_unknown_err( |
| 2503 | IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), |
| 2504 | IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg), |
| 2505 | state: cm_id_priv->id.state); |
| 2506 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2507 | goto error; |
| 2508 | } |
| 2509 | |
| 2510 | cm_id_priv->timewait_info->work.remote_id = |
| 2511 | cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); |
| 2512 | cm_id_priv->timewait_info->remote_ca_guid = |
| 2513 | cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg)); |
| 2514 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, qp_type: cm_id_priv->qp_type); |
| 2515 | |
| 2516 | spin_lock(lock: &cm.lock); |
| 2517 | /* Check for duplicate REP. */ |
| 2518 | if (cm_insert_remote_id(timewait_info: cm_id_priv->timewait_info)) { |
| 2519 | spin_unlock(lock: &cm.lock); |
| 2520 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2521 | ret = -EINVAL; |
| 2522 | trace_icm_insert_failed_err( |
| 2523 | IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); |
| 2524 | goto error; |
| 2525 | } |
| 2526 | /* Check for a stale connection. */ |
| 2527 | timewait_info = cm_insert_remote_qpn(timewait_info: cm_id_priv->timewait_info); |
| 2528 | if (timewait_info) { |
| 2529 | cm_remove_remote(cm_id_priv); |
| 2530 | cur_cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id, |
| 2531 | remote_id: timewait_info->work.remote_id); |
| 2532 | |
| 2533 | spin_unlock(lock: &cm.lock); |
| 2534 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2535 | cm_issue_rej(port: work->port, mad_recv_wc: work->mad_recv_wc, |
| 2536 | reason: IB_CM_REJ_STALE_CONN, msg_rejected: CM_MSG_RESPONSE_REP, |
| 2537 | NULL, ari_length: 0); |
| 2538 | ret = -EINVAL; |
| 2539 | trace_icm_staleconn_err( |
| 2540 | IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg), |
| 2541 | IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)); |
| 2542 | |
| 2543 | if (cur_cm_id_priv) { |
| 2544 | ib_send_cm_dreq(cm_id: &cur_cm_id_priv->id, NULL, private_data_len: 0); |
| 2545 | cm_deref_id(cm_id_priv: cur_cm_id_priv); |
| 2546 | } |
| 2547 | |
| 2548 | goto error; |
| 2549 | } |
| 2550 | spin_unlock(lock: &cm.lock); |
| 2551 | |
| 2552 | cm_id_priv->id.state = IB_CM_REP_RCVD; |
| 2553 | cm_id_priv->id.remote_id = |
| 2554 | cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)); |
| 2555 | cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, qp_type: cm_id_priv->qp_type); |
| 2556 | cm_id_priv->initiator_depth = |
| 2557 | IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg); |
| 2558 | cm_id_priv->responder_resources = |
| 2559 | IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg); |
| 2560 | cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg)); |
| 2561 | cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg); |
| 2562 | cm_id_priv->target_ack_delay = |
| 2563 | IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg); |
| 2564 | cm_id_priv->av.timeout = |
| 2565 | cm_ack_timeout(ca_ack_delay: cm_id_priv->target_ack_delay, |
| 2566 | packet_life_time: cm_id_priv->av.timeout - 1); |
| 2567 | cm_id_priv->alt_av.timeout = |
| 2568 | cm_ack_timeout(ca_ack_delay: cm_id_priv->target_ack_delay, |
| 2569 | packet_life_time: cm_id_priv->alt_av.timeout - 1); |
| 2570 | |
| 2571 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2572 | cm_queue_work_unlock(cm_id_priv, work); |
| 2573 | return 0; |
| 2574 | |
| 2575 | error: |
| 2576 | cm_deref_id(cm_id_priv); |
| 2577 | return ret; |
| 2578 | } |
| 2579 | |
| 2580 | static int cm_establish_handler(struct cm_work *work) |
| 2581 | { |
| 2582 | struct cm_id_private *cm_id_priv; |
| 2583 | |
| 2584 | /* See comment in cm_establish about lookup. */ |
| 2585 | cm_id_priv = cm_acquire_id(local_id: work->local_id, remote_id: work->remote_id); |
| 2586 | if (!cm_id_priv) |
| 2587 | return -EINVAL; |
| 2588 | |
| 2589 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2590 | if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { |
| 2591 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2592 | goto out; |
| 2593 | } |
| 2594 | |
| 2595 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2596 | cm_queue_work_unlock(cm_id_priv, work); |
| 2597 | return 0; |
| 2598 | out: |
| 2599 | cm_deref_id(cm_id_priv); |
| 2600 | return -EINVAL; |
| 2601 | } |
| 2602 | |
| 2603 | static int cm_rtu_handler(struct cm_work *work) |
| 2604 | { |
| 2605 | struct cm_id_private *cm_id_priv; |
| 2606 | struct cm_rtu_msg *rtu_msg; |
| 2607 | |
| 2608 | rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2609 | cm_id_priv = cm_acquire_id( |
| 2610 | cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)), |
| 2611 | cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg))); |
| 2612 | if (!cm_id_priv) |
| 2613 | return -EINVAL; |
| 2614 | |
| 2615 | work->cm_event.private_data = |
| 2616 | IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg); |
| 2617 | |
| 2618 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2619 | if (cm_id_priv->id.state != IB_CM_REP_SENT && |
| 2620 | cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { |
| 2621 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2622 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 2623 | [CM_RTU_COUNTER]); |
| 2624 | goto out; |
| 2625 | } |
| 2626 | cm_id_priv->id.state = IB_CM_ESTABLISHED; |
| 2627 | |
| 2628 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2629 | cm_queue_work_unlock(cm_id_priv, work); |
| 2630 | return 0; |
| 2631 | out: |
| 2632 | cm_deref_id(cm_id_priv); |
| 2633 | return -EINVAL; |
| 2634 | } |
| 2635 | |
| 2636 | static void cm_format_dreq(struct cm_dreq_msg *dreq_msg, |
| 2637 | struct cm_id_private *cm_id_priv, |
| 2638 | const void *private_data, |
| 2639 | u8 private_data_len) |
| 2640 | { |
| 2641 | cm_format_mad_hdr(hdr: &dreq_msg->hdr, CM_DREQ_ATTR_ID, |
| 2642 | tid: cm_form_tid(cm_id_priv)); |
| 2643 | IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg, |
| 2644 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 2645 | IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg, |
| 2646 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 2647 | IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg, |
| 2648 | be32_to_cpu(cm_id_priv->remote_qpn)); |
| 2649 | |
| 2650 | if (private_data && private_data_len) |
| 2651 | IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data, |
| 2652 | private_data_len); |
| 2653 | } |
| 2654 | |
| 2655 | static void cm_issue_dreq(struct cm_id_private *cm_id_priv) |
| 2656 | { |
| 2657 | struct ib_mad_send_buf *msg; |
| 2658 | int ret; |
| 2659 | |
| 2660 | lockdep_assert_held(&cm_id_priv->lock); |
| 2661 | |
| 2662 | msg = cm_alloc_msg(cm_id_priv); |
| 2663 | if (IS_ERR(ptr: msg)) |
| 2664 | return; |
| 2665 | |
| 2666 | cm_format_dreq(dreq_msg: (struct cm_dreq_msg *) msg->mad, cm_id_priv, NULL, private_data_len: 0); |
| 2667 | |
| 2668 | trace_icm_send_dreq(cm_id: &cm_id_priv->id); |
| 2669 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2670 | if (ret) |
| 2671 | cm_free_msg(msg); |
| 2672 | } |
| 2673 | |
| 2674 | int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data, |
| 2675 | u8 private_data_len) |
| 2676 | { |
| 2677 | struct cm_id_private *cm_id_priv = |
| 2678 | container_of(cm_id, struct cm_id_private, id); |
| 2679 | struct ib_mad_send_buf *msg; |
| 2680 | unsigned long flags; |
| 2681 | int ret; |
| 2682 | |
| 2683 | if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE) |
| 2684 | return -EINVAL; |
| 2685 | |
| 2686 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 2687 | if (cm_id_priv->id.state != IB_CM_ESTABLISHED) { |
| 2688 | trace_icm_dreq_skipped(cm_id: &cm_id_priv->id); |
| 2689 | ret = -EINVAL; |
| 2690 | goto unlock; |
| 2691 | } |
| 2692 | |
| 2693 | if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || |
| 2694 | cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) |
| 2695 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2696 | |
| 2697 | msg = cm_alloc_priv_msg(cm_id_priv, state: IB_CM_DREQ_SENT); |
| 2698 | if (IS_ERR(ptr: msg)) { |
| 2699 | cm_enter_timewait(cm_id_priv); |
| 2700 | ret = PTR_ERR(ptr: msg); |
| 2701 | goto unlock; |
| 2702 | } |
| 2703 | |
| 2704 | cm_format_dreq(dreq_msg: (struct cm_dreq_msg *) msg->mad, cm_id_priv, |
| 2705 | private_data, private_data_len); |
| 2706 | |
| 2707 | trace_icm_send_dreq(cm_id: &cm_id_priv->id); |
| 2708 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2709 | if (ret) { |
| 2710 | cm_enter_timewait(cm_id_priv); |
| 2711 | cm_free_priv_msg(msg); |
| 2712 | goto unlock; |
| 2713 | } |
| 2714 | |
| 2715 | cm_id_priv->id.state = IB_CM_DREQ_SENT; |
| 2716 | unlock: |
| 2717 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2718 | return ret; |
| 2719 | } |
| 2720 | EXPORT_SYMBOL(ib_send_cm_dreq); |
| 2721 | |
| 2722 | static void cm_format_drep(struct cm_drep_msg *drep_msg, |
| 2723 | struct cm_id_private *cm_id_priv, |
| 2724 | const void *private_data, |
| 2725 | u8 private_data_len) |
| 2726 | { |
| 2727 | cm_format_mad_hdr(hdr: &drep_msg->hdr, CM_DREP_ATTR_ID, tid: cm_id_priv->tid); |
| 2728 | IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, |
| 2729 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 2730 | IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, |
| 2731 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 2732 | |
| 2733 | if (private_data && private_data_len) |
| 2734 | IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data, |
| 2735 | private_data_len); |
| 2736 | } |
| 2737 | |
| 2738 | static int cm_send_drep_locked(struct cm_id_private *cm_id_priv, |
| 2739 | void *private_data, u8 private_data_len) |
| 2740 | { |
| 2741 | struct ib_mad_send_buf *msg; |
| 2742 | int ret; |
| 2743 | |
| 2744 | lockdep_assert_held(&cm_id_priv->lock); |
| 2745 | |
| 2746 | if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE) |
| 2747 | return -EINVAL; |
| 2748 | |
| 2749 | if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) { |
| 2750 | trace_icm_send_drep_err(cm_id: &cm_id_priv->id); |
| 2751 | kfree(objp: private_data); |
| 2752 | return -EINVAL; |
| 2753 | } |
| 2754 | |
| 2755 | cm_set_private_data(cm_id_priv, private_data, private_data_len); |
| 2756 | cm_enter_timewait(cm_id_priv); |
| 2757 | |
| 2758 | msg = cm_alloc_msg(cm_id_priv); |
| 2759 | if (IS_ERR(ptr: msg)) |
| 2760 | return PTR_ERR(ptr: msg); |
| 2761 | |
| 2762 | cm_format_drep(drep_msg: (struct cm_drep_msg *) msg->mad, cm_id_priv, |
| 2763 | private_data, private_data_len); |
| 2764 | |
| 2765 | trace_icm_send_drep(cm_id: &cm_id_priv->id); |
| 2766 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2767 | if (ret) { |
| 2768 | cm_free_msg(msg); |
| 2769 | return ret; |
| 2770 | } |
| 2771 | return 0; |
| 2772 | } |
| 2773 | |
| 2774 | int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data, |
| 2775 | u8 private_data_len) |
| 2776 | { |
| 2777 | struct cm_id_private *cm_id_priv = |
| 2778 | container_of(cm_id, struct cm_id_private, id); |
| 2779 | unsigned long flags; |
| 2780 | void *data; |
| 2781 | int ret; |
| 2782 | |
| 2783 | data = cm_copy_private_data(private_data, private_data_len); |
| 2784 | if (IS_ERR(ptr: data)) |
| 2785 | return PTR_ERR(ptr: data); |
| 2786 | |
| 2787 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 2788 | ret = cm_send_drep_locked(cm_id_priv, private_data: data, private_data_len); |
| 2789 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2790 | return ret; |
| 2791 | } |
| 2792 | EXPORT_SYMBOL(ib_send_cm_drep); |
| 2793 | |
| 2794 | static int cm_issue_drep(struct cm_port *port, |
| 2795 | struct ib_mad_recv_wc *mad_recv_wc) |
| 2796 | { |
| 2797 | struct ib_mad_send_buf *msg = NULL; |
| 2798 | struct cm_dreq_msg *dreq_msg; |
| 2799 | struct cm_drep_msg *drep_msg; |
| 2800 | int ret; |
| 2801 | |
| 2802 | ret = cm_alloc_response_msg(port, mad_recv_wc, direct_retry: true, msg: &msg); |
| 2803 | if (ret) |
| 2804 | return ret; |
| 2805 | |
| 2806 | dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad; |
| 2807 | drep_msg = (struct cm_drep_msg *) msg->mad; |
| 2808 | |
| 2809 | cm_format_mad_hdr(hdr: &drep_msg->hdr, CM_DREP_ATTR_ID, tid: dreq_msg->hdr.tid); |
| 2810 | IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg, |
| 2811 | IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)); |
| 2812 | IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg, |
| 2813 | IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); |
| 2814 | |
| 2815 | trace_icm_issue_drep( |
| 2816 | IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), |
| 2817 | IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); |
| 2818 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2819 | if (ret) |
| 2820 | cm_free_msg(msg); |
| 2821 | |
| 2822 | return ret; |
| 2823 | } |
| 2824 | |
| 2825 | static int cm_dreq_handler(struct cm_work *work) |
| 2826 | { |
| 2827 | struct cm_id_private *cm_id_priv; |
| 2828 | struct cm_dreq_msg *dreq_msg; |
| 2829 | struct ib_mad_send_buf *msg = NULL; |
| 2830 | |
| 2831 | dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2832 | cm_id_priv = cm_acquire_id( |
| 2833 | cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)), |
| 2834 | cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg))); |
| 2835 | if (!cm_id_priv) { |
| 2836 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 2837 | [CM_DREQ_COUNTER]); |
| 2838 | cm_issue_drep(port: work->port, mad_recv_wc: work->mad_recv_wc); |
| 2839 | trace_icm_no_priv_err( |
| 2840 | IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg), |
| 2841 | IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)); |
| 2842 | return -EINVAL; |
| 2843 | } |
| 2844 | |
| 2845 | work->cm_event.private_data = |
| 2846 | IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg); |
| 2847 | |
| 2848 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2849 | if (cm_id_priv->local_qpn != |
| 2850 | cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg))) |
| 2851 | goto unlock; |
| 2852 | |
| 2853 | switch (cm_id_priv->id.state) { |
| 2854 | case IB_CM_REP_SENT: |
| 2855 | case IB_CM_DREQ_SENT: |
| 2856 | case IB_CM_MRA_REP_RCVD: |
| 2857 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2858 | break; |
| 2859 | case IB_CM_ESTABLISHED: |
| 2860 | if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || |
| 2861 | cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) |
| 2862 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2863 | break; |
| 2864 | case IB_CM_TIMEWAIT: |
| 2865 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 2866 | [CM_DREQ_COUNTER]); |
| 2867 | msg = cm_alloc_response_msg_no_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, |
| 2868 | direct_retry: true); |
| 2869 | if (IS_ERR(ptr: msg)) |
| 2870 | goto unlock; |
| 2871 | |
| 2872 | cm_format_drep(drep_msg: (struct cm_drep_msg *) msg->mad, cm_id_priv, |
| 2873 | private_data: cm_id_priv->private_data, |
| 2874 | private_data_len: cm_id_priv->private_data_len); |
| 2875 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2876 | |
| 2877 | if (cm_create_response_msg_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, msg) || |
| 2878 | ib_post_send_mad(send_buf: msg, NULL)) |
| 2879 | cm_free_msg(msg); |
| 2880 | goto deref; |
| 2881 | case IB_CM_DREQ_RCVD: |
| 2882 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 2883 | [CM_DREQ_COUNTER]); |
| 2884 | goto unlock; |
| 2885 | default: |
| 2886 | trace_icm_dreq_unknown_err(cm_id: &cm_id_priv->id); |
| 2887 | goto unlock; |
| 2888 | } |
| 2889 | cm_id_priv->id.state = IB_CM_DREQ_RCVD; |
| 2890 | cm_id_priv->tid = dreq_msg->hdr.tid; |
| 2891 | cm_queue_work_unlock(cm_id_priv, work); |
| 2892 | return 0; |
| 2893 | |
| 2894 | unlock: spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2895 | deref: cm_deref_id(cm_id_priv); |
| 2896 | return -EINVAL; |
| 2897 | } |
| 2898 | |
| 2899 | static int cm_drep_handler(struct cm_work *work) |
| 2900 | { |
| 2901 | struct cm_id_private *cm_id_priv; |
| 2902 | struct cm_drep_msg *drep_msg; |
| 2903 | |
| 2904 | drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad; |
| 2905 | cm_id_priv = cm_acquire_id( |
| 2906 | cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)), |
| 2907 | cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg))); |
| 2908 | if (!cm_id_priv) |
| 2909 | return -EINVAL; |
| 2910 | |
| 2911 | work->cm_event.private_data = |
| 2912 | IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg); |
| 2913 | |
| 2914 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 2915 | if (cm_id_priv->id.state != IB_CM_DREQ_SENT && |
| 2916 | cm_id_priv->id.state != IB_CM_DREQ_RCVD) { |
| 2917 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 2918 | goto out; |
| 2919 | } |
| 2920 | cm_enter_timewait(cm_id_priv); |
| 2921 | |
| 2922 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 2923 | cm_queue_work_unlock(cm_id_priv, work); |
| 2924 | return 0; |
| 2925 | out: |
| 2926 | cm_deref_id(cm_id_priv); |
| 2927 | return -EINVAL; |
| 2928 | } |
| 2929 | |
| 2930 | static int cm_send_rej_locked(struct cm_id_private *cm_id_priv, |
| 2931 | enum ib_cm_rej_reason reason, void *ari, |
| 2932 | u8 ari_length, const void *private_data, |
| 2933 | u8 private_data_len) |
| 2934 | { |
| 2935 | enum ib_cm_state state = cm_id_priv->id.state; |
| 2936 | struct ib_mad_send_buf *msg; |
| 2937 | int ret; |
| 2938 | |
| 2939 | lockdep_assert_held(&cm_id_priv->lock); |
| 2940 | |
| 2941 | if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) || |
| 2942 | (ari && ari_length > IB_CM_REJ_ARI_LENGTH)) |
| 2943 | return -EINVAL; |
| 2944 | |
| 2945 | trace_icm_send_rej(cm_id: &cm_id_priv->id, reason); |
| 2946 | |
| 2947 | switch (state) { |
| 2948 | case IB_CM_REQ_SENT: |
| 2949 | case IB_CM_MRA_REQ_RCVD: |
| 2950 | case IB_CM_REQ_RCVD: |
| 2951 | case IB_CM_MRA_REQ_SENT: |
| 2952 | case IB_CM_REP_RCVD: |
| 2953 | case IB_CM_MRA_REP_SENT: |
| 2954 | cm_reset_to_idle(cm_id_priv); |
| 2955 | msg = cm_alloc_msg(cm_id_priv); |
| 2956 | if (IS_ERR(ptr: msg)) |
| 2957 | return PTR_ERR(ptr: msg); |
| 2958 | cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv, reason, |
| 2959 | ari, ari_length, private_data, private_data_len, |
| 2960 | state); |
| 2961 | break; |
| 2962 | case IB_CM_REP_SENT: |
| 2963 | case IB_CM_MRA_REP_RCVD: |
| 2964 | cm_enter_timewait(cm_id_priv); |
| 2965 | msg = cm_alloc_msg(cm_id_priv); |
| 2966 | if (IS_ERR(ptr: msg)) |
| 2967 | return PTR_ERR(ptr: msg); |
| 2968 | cm_format_rej(rej_msg: (struct cm_rej_msg *)msg->mad, cm_id_priv, reason, |
| 2969 | ari, ari_length, private_data, private_data_len, |
| 2970 | state); |
| 2971 | break; |
| 2972 | default: |
| 2973 | trace_icm_send_unknown_rej_err(cm_id: &cm_id_priv->id); |
| 2974 | return -EINVAL; |
| 2975 | } |
| 2976 | |
| 2977 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 2978 | if (ret) { |
| 2979 | cm_free_msg(msg); |
| 2980 | return ret; |
| 2981 | } |
| 2982 | |
| 2983 | return 0; |
| 2984 | } |
| 2985 | |
| 2986 | int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason, |
| 2987 | void *ari, u8 ari_length, const void *private_data, |
| 2988 | u8 private_data_len) |
| 2989 | { |
| 2990 | struct cm_id_private *cm_id_priv = |
| 2991 | container_of(cm_id, struct cm_id_private, id); |
| 2992 | unsigned long flags; |
| 2993 | int ret; |
| 2994 | |
| 2995 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 2996 | ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length, |
| 2997 | private_data, private_data_len); |
| 2998 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 2999 | return ret; |
| 3000 | } |
| 3001 | EXPORT_SYMBOL(ib_send_cm_rej); |
| 3002 | |
| 3003 | static void cm_format_rej_event(struct cm_work *work) |
| 3004 | { |
| 3005 | struct cm_rej_msg *rej_msg; |
| 3006 | struct ib_cm_rej_event_param *param; |
| 3007 | |
| 3008 | rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; |
| 3009 | param = &work->cm_event.param.rej_rcvd; |
| 3010 | param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg); |
| 3011 | param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg); |
| 3012 | param->reason = IBA_GET(CM_REJ_REASON, rej_msg); |
| 3013 | work->cm_event.private_data = |
| 3014 | IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg); |
| 3015 | } |
| 3016 | |
| 3017 | static struct cm_id_private *cm_acquire_rejected_id(struct cm_rej_msg *rej_msg) |
| 3018 | { |
| 3019 | struct cm_id_private *cm_id_priv; |
| 3020 | __be32 remote_id; |
| 3021 | |
| 3022 | remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg)); |
| 3023 | |
| 3024 | if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) { |
| 3025 | cm_id_priv = cm_find_remote_id( |
| 3026 | remote_ca_guid: *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)), |
| 3027 | remote_id); |
| 3028 | } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) == |
| 3029 | CM_MSG_RESPONSE_REQ) |
| 3030 | cm_id_priv = cm_acquire_id( |
| 3031 | cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), |
| 3032 | remote_id: 0); |
| 3033 | else |
| 3034 | cm_id_priv = cm_acquire_id( |
| 3035 | cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)), |
| 3036 | remote_id); |
| 3037 | |
| 3038 | return cm_id_priv; |
| 3039 | } |
| 3040 | |
| 3041 | static int cm_rej_handler(struct cm_work *work) |
| 3042 | { |
| 3043 | struct cm_id_private *cm_id_priv; |
| 3044 | struct cm_rej_msg *rej_msg; |
| 3045 | |
| 3046 | rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad; |
| 3047 | cm_id_priv = cm_acquire_rejected_id(rej_msg); |
| 3048 | if (!cm_id_priv) |
| 3049 | return -EINVAL; |
| 3050 | |
| 3051 | cm_format_rej_event(work); |
| 3052 | |
| 3053 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3054 | switch (cm_id_priv->id.state) { |
| 3055 | case IB_CM_REQ_SENT: |
| 3056 | case IB_CM_MRA_REQ_RCVD: |
| 3057 | case IB_CM_REP_SENT: |
| 3058 | case IB_CM_MRA_REP_RCVD: |
| 3059 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 3060 | fallthrough; |
| 3061 | case IB_CM_REQ_RCVD: |
| 3062 | case IB_CM_MRA_REQ_SENT: |
| 3063 | if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN) |
| 3064 | cm_enter_timewait(cm_id_priv); |
| 3065 | else |
| 3066 | cm_reset_to_idle(cm_id_priv); |
| 3067 | break; |
| 3068 | case IB_CM_DREQ_SENT: |
| 3069 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 3070 | fallthrough; |
| 3071 | case IB_CM_REP_RCVD: |
| 3072 | case IB_CM_MRA_REP_SENT: |
| 3073 | cm_enter_timewait(cm_id_priv); |
| 3074 | break; |
| 3075 | case IB_CM_ESTABLISHED: |
| 3076 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || |
| 3077 | cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { |
| 3078 | if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) |
| 3079 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 3080 | cm_enter_timewait(cm_id_priv); |
| 3081 | break; |
| 3082 | } |
| 3083 | fallthrough; |
| 3084 | default: |
| 3085 | trace_icm_rej_unknown_err(cm_id: &cm_id_priv->id); |
| 3086 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3087 | goto out; |
| 3088 | } |
| 3089 | |
| 3090 | cm_queue_work_unlock(cm_id_priv, work); |
| 3091 | return 0; |
| 3092 | out: |
| 3093 | cm_deref_id(cm_id_priv); |
| 3094 | return -EINVAL; |
| 3095 | } |
| 3096 | |
| 3097 | int ib_prepare_cm_mra(struct ib_cm_id *cm_id) |
| 3098 | { |
| 3099 | struct cm_id_private *cm_id_priv; |
| 3100 | enum ib_cm_state cm_state; |
| 3101 | enum ib_cm_lap_state lap_state; |
| 3102 | unsigned long flags; |
| 3103 | int ret = 0; |
| 3104 | |
| 3105 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 3106 | |
| 3107 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 3108 | switch (cm_id_priv->id.state) { |
| 3109 | case IB_CM_REQ_RCVD: |
| 3110 | cm_state = IB_CM_MRA_REQ_SENT; |
| 3111 | lap_state = cm_id->lap_state; |
| 3112 | break; |
| 3113 | case IB_CM_REP_RCVD: |
| 3114 | cm_state = IB_CM_MRA_REP_SENT; |
| 3115 | lap_state = cm_id->lap_state; |
| 3116 | break; |
| 3117 | case IB_CM_ESTABLISHED: |
| 3118 | if (cm_id->lap_state == IB_CM_LAP_RCVD) { |
| 3119 | cm_state = cm_id->state; |
| 3120 | lap_state = IB_CM_MRA_LAP_SENT; |
| 3121 | break; |
| 3122 | } |
| 3123 | fallthrough; |
| 3124 | default: |
| 3125 | trace_icm_prepare_mra_unknown_err(cm_id: &cm_id_priv->id); |
| 3126 | ret = -EINVAL; |
| 3127 | goto error_unlock; |
| 3128 | } |
| 3129 | |
| 3130 | cm_id->state = cm_state; |
| 3131 | cm_id->lap_state = lap_state; |
| 3132 | cm_set_private_data(cm_id_priv, NULL, private_data_len: 0); |
| 3133 | |
| 3134 | error_unlock: |
| 3135 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3136 | return ret; |
| 3137 | } |
| 3138 | EXPORT_SYMBOL(ib_prepare_cm_mra); |
| 3139 | |
| 3140 | static struct cm_id_private *cm_acquire_mraed_id(struct cm_mra_msg *mra_msg) |
| 3141 | { |
| 3142 | switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) { |
| 3143 | case CM_MSG_RESPONSE_REQ: |
| 3144 | return cm_acquire_id( |
| 3145 | cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), |
| 3146 | remote_id: 0); |
| 3147 | case CM_MSG_RESPONSE_REP: |
| 3148 | case CM_MSG_RESPONSE_OTHER: |
| 3149 | return cm_acquire_id( |
| 3150 | cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)), |
| 3151 | cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg))); |
| 3152 | default: |
| 3153 | return NULL; |
| 3154 | } |
| 3155 | } |
| 3156 | |
| 3157 | static int cm_mra_handler(struct cm_work *work) |
| 3158 | { |
| 3159 | struct cm_id_private *cm_id_priv; |
| 3160 | struct cm_mra_msg *mra_msg; |
| 3161 | int timeout; |
| 3162 | |
| 3163 | mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad; |
| 3164 | cm_id_priv = cm_acquire_mraed_id(mra_msg); |
| 3165 | if (!cm_id_priv) |
| 3166 | return -EINVAL; |
| 3167 | |
| 3168 | work->cm_event.private_data = |
| 3169 | IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg); |
| 3170 | work->cm_event.param.mra_rcvd.service_timeout = |
| 3171 | IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg); |
| 3172 | timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) + |
| 3173 | cm_convert_to_ms(iba_time: cm_id_priv->av.timeout); |
| 3174 | |
| 3175 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3176 | switch (cm_id_priv->id.state) { |
| 3177 | case IB_CM_REQ_SENT: |
| 3178 | if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != |
| 3179 | CM_MSG_RESPONSE_REQ || |
| 3180 | ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout)) |
| 3181 | goto out; |
| 3182 | cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; |
| 3183 | break; |
| 3184 | case IB_CM_REP_SENT: |
| 3185 | if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != |
| 3186 | CM_MSG_RESPONSE_REP || |
| 3187 | ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout)) |
| 3188 | goto out; |
| 3189 | cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; |
| 3190 | break; |
| 3191 | case IB_CM_ESTABLISHED: |
| 3192 | if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != |
| 3193 | CM_MSG_RESPONSE_OTHER || |
| 3194 | cm_id_priv->id.lap_state != IB_CM_LAP_SENT || |
| 3195 | ib_modify_mad(send_buf: cm_id_priv->msg, timeout_ms: timeout)) { |
| 3196 | if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) |
| 3197 | atomic_long_inc( |
| 3198 | v: &work->port->counters[CM_RECV_DUPLICATES] |
| 3199 | [CM_MRA_COUNTER]); |
| 3200 | goto out; |
| 3201 | } |
| 3202 | cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; |
| 3203 | break; |
| 3204 | case IB_CM_MRA_REQ_RCVD: |
| 3205 | case IB_CM_MRA_REP_RCVD: |
| 3206 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 3207 | [CM_MRA_COUNTER]); |
| 3208 | fallthrough; |
| 3209 | default: |
| 3210 | trace_icm_mra_unknown_err(cm_id: &cm_id_priv->id); |
| 3211 | goto out; |
| 3212 | } |
| 3213 | |
| 3214 | cm_id_priv->msg->context[1] = (void *) (unsigned long) |
| 3215 | cm_id_priv->id.state; |
| 3216 | cm_queue_work_unlock(cm_id_priv, work); |
| 3217 | return 0; |
| 3218 | out: |
| 3219 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3220 | cm_deref_id(cm_id_priv); |
| 3221 | return -EINVAL; |
| 3222 | } |
| 3223 | |
| 3224 | static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg, |
| 3225 | struct sa_path_rec *path) |
| 3226 | { |
| 3227 | u32 lid; |
| 3228 | |
| 3229 | if (path->rec_type != SA_PATH_REC_TYPE_OPA) { |
| 3230 | sa_path_set_dlid(rec: path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID, |
| 3231 | lap_msg)); |
| 3232 | sa_path_set_slid(rec: path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID, |
| 3233 | lap_msg)); |
| 3234 | } else { |
| 3235 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 3236 | CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg)); |
| 3237 | sa_path_set_dlid(rec: path, dlid: lid); |
| 3238 | |
| 3239 | lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR( |
| 3240 | CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg)); |
| 3241 | sa_path_set_slid(rec: path, slid: lid); |
| 3242 | } |
| 3243 | } |
| 3244 | |
| 3245 | static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv, |
| 3246 | struct sa_path_rec *path, |
| 3247 | struct cm_lap_msg *lap_msg) |
| 3248 | { |
| 3249 | path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg); |
| 3250 | path->sgid = |
| 3251 | *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg); |
| 3252 | path->flow_label = |
| 3253 | cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg)); |
| 3254 | path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg); |
| 3255 | path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg); |
| 3256 | path->reversible = 1; |
| 3257 | path->pkey = cm_id_priv->pkey; |
| 3258 | path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg); |
| 3259 | path->mtu_selector = IB_SA_EQ; |
| 3260 | path->mtu = cm_id_priv->path_mtu; |
| 3261 | path->rate_selector = IB_SA_EQ; |
| 3262 | path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg); |
| 3263 | path->packet_life_time_selector = IB_SA_EQ; |
| 3264 | path->packet_life_time = |
| 3265 | IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg); |
| 3266 | path->packet_life_time -= (path->packet_life_time > 0); |
| 3267 | cm_format_path_lid_from_lap(lap_msg, path); |
| 3268 | } |
| 3269 | |
| 3270 | static int cm_lap_handler(struct cm_work *work) |
| 3271 | { |
| 3272 | struct cm_id_private *cm_id_priv; |
| 3273 | struct cm_lap_msg *lap_msg; |
| 3274 | struct ib_cm_lap_event_param *param; |
| 3275 | struct ib_mad_send_buf *msg = NULL; |
| 3276 | struct rdma_ah_attr ah_attr; |
| 3277 | struct cm_av alt_av = {}; |
| 3278 | int ret; |
| 3279 | |
| 3280 | /* Currently Alternate path messages are not supported for |
| 3281 | * RoCE link layer. |
| 3282 | */ |
| 3283 | if (rdma_protocol_roce(device: work->port->cm_dev->ib_device, |
| 3284 | port_num: work->port->port_num)) |
| 3285 | return -EINVAL; |
| 3286 | |
| 3287 | /* todo: verify LAP request and send reject APR if invalid. */ |
| 3288 | lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad; |
| 3289 | cm_id_priv = cm_acquire_id( |
| 3290 | cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)), |
| 3291 | cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg))); |
| 3292 | if (!cm_id_priv) |
| 3293 | return -EINVAL; |
| 3294 | |
| 3295 | param = &work->cm_event.param.lap_rcvd; |
| 3296 | memset(&work->path[0], 0, sizeof(work->path[1])); |
| 3297 | cm_path_set_rec_type(ib_device: work->port->cm_dev->ib_device, |
| 3298 | port_num: work->port->port_num, path: &work->path[0], |
| 3299 | IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, |
| 3300 | lap_msg)); |
| 3301 | param->alternate_path = &work->path[0]; |
| 3302 | cm_format_path_from_lap(cm_id_priv, path: param->alternate_path, lap_msg); |
| 3303 | work->cm_event.private_data = |
| 3304 | IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg); |
| 3305 | |
| 3306 | ret = ib_init_ah_attr_from_wc(device: work->port->cm_dev->ib_device, |
| 3307 | port_num: work->port->port_num, |
| 3308 | wc: work->mad_recv_wc->wc, |
| 3309 | grh: work->mad_recv_wc->recv_buf.grh, |
| 3310 | ah_attr: &ah_attr); |
| 3311 | if (ret) |
| 3312 | goto deref; |
| 3313 | |
| 3314 | ret = cm_init_av_by_path(path: param->alternate_path, NULL, av: &alt_av); |
| 3315 | if (ret) { |
| 3316 | rdma_destroy_ah_attr(ah_attr: &ah_attr); |
| 3317 | goto deref; |
| 3318 | } |
| 3319 | |
| 3320 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3321 | cm_init_av_for_lap(port: work->port, wc: work->mad_recv_wc->wc, |
| 3322 | ah_attr: &ah_attr, av: &cm_id_priv->av); |
| 3323 | cm_move_av_from_path(dest: &cm_id_priv->alt_av, src: &alt_av); |
| 3324 | |
| 3325 | if (cm_id_priv->id.state != IB_CM_ESTABLISHED) |
| 3326 | goto unlock; |
| 3327 | |
| 3328 | switch (cm_id_priv->id.lap_state) { |
| 3329 | case IB_CM_LAP_UNINIT: |
| 3330 | case IB_CM_LAP_IDLE: |
| 3331 | break; |
| 3332 | case IB_CM_MRA_LAP_SENT: |
| 3333 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 3334 | [CM_LAP_COUNTER]); |
| 3335 | msg = cm_alloc_response_msg_no_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, |
| 3336 | direct_retry: true); |
| 3337 | if (IS_ERR(ptr: msg)) |
| 3338 | goto unlock; |
| 3339 | |
| 3340 | cm_format_mra(mra_msg: (struct cm_mra_msg *) msg->mad, cm_id_priv, |
| 3341 | msg_mraed: CM_MSG_RESPONSE_OTHER, |
| 3342 | private_data: cm_id_priv->private_data, |
| 3343 | private_data_len: cm_id_priv->private_data_len); |
| 3344 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3345 | |
| 3346 | if (cm_create_response_msg_ah(port: work->port, mad_recv_wc: work->mad_recv_wc, msg) || |
| 3347 | ib_post_send_mad(send_buf: msg, NULL)) |
| 3348 | cm_free_msg(msg); |
| 3349 | goto deref; |
| 3350 | case IB_CM_LAP_RCVD: |
| 3351 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 3352 | [CM_LAP_COUNTER]); |
| 3353 | goto unlock; |
| 3354 | default: |
| 3355 | goto unlock; |
| 3356 | } |
| 3357 | |
| 3358 | cm_id_priv->id.lap_state = IB_CM_LAP_RCVD; |
| 3359 | cm_id_priv->tid = lap_msg->hdr.tid; |
| 3360 | cm_queue_work_unlock(cm_id_priv, work); |
| 3361 | return 0; |
| 3362 | |
| 3363 | unlock: spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3364 | deref: cm_deref_id(cm_id_priv); |
| 3365 | return -EINVAL; |
| 3366 | } |
| 3367 | |
| 3368 | static int cm_apr_handler(struct cm_work *work) |
| 3369 | { |
| 3370 | struct cm_id_private *cm_id_priv; |
| 3371 | struct cm_apr_msg *apr_msg; |
| 3372 | |
| 3373 | /* Currently Alternate path messages are not supported for |
| 3374 | * RoCE link layer. |
| 3375 | */ |
| 3376 | if (rdma_protocol_roce(device: work->port->cm_dev->ib_device, |
| 3377 | port_num: work->port->port_num)) |
| 3378 | return -EINVAL; |
| 3379 | |
| 3380 | apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad; |
| 3381 | cm_id_priv = cm_acquire_id( |
| 3382 | cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)), |
| 3383 | cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg))); |
| 3384 | if (!cm_id_priv) |
| 3385 | return -EINVAL; /* Unmatched reply. */ |
| 3386 | |
| 3387 | work->cm_event.param.apr_rcvd.ap_status = |
| 3388 | IBA_GET(CM_APR_AR_STATUS, apr_msg); |
| 3389 | work->cm_event.param.apr_rcvd.apr_info = |
| 3390 | IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg); |
| 3391 | work->cm_event.param.apr_rcvd.info_len = |
| 3392 | IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg); |
| 3393 | work->cm_event.private_data = |
| 3394 | IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg); |
| 3395 | |
| 3396 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3397 | if (cm_id_priv->id.state != IB_CM_ESTABLISHED || |
| 3398 | (cm_id_priv->id.lap_state != IB_CM_LAP_SENT && |
| 3399 | cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) { |
| 3400 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3401 | goto out; |
| 3402 | } |
| 3403 | cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; |
| 3404 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 3405 | cm_queue_work_unlock(cm_id_priv, work); |
| 3406 | return 0; |
| 3407 | out: |
| 3408 | cm_deref_id(cm_id_priv); |
| 3409 | return -EINVAL; |
| 3410 | } |
| 3411 | |
| 3412 | static int cm_timewait_handler(struct cm_work *work) |
| 3413 | { |
| 3414 | struct cm_timewait_info *timewait_info; |
| 3415 | struct cm_id_private *cm_id_priv; |
| 3416 | |
| 3417 | timewait_info = container_of(work, struct cm_timewait_info, work); |
| 3418 | spin_lock_irq(lock: &cm.lock); |
| 3419 | list_del(entry: &timewait_info->list); |
| 3420 | spin_unlock_irq(lock: &cm.lock); |
| 3421 | |
| 3422 | cm_id_priv = cm_acquire_id(local_id: timewait_info->work.local_id, |
| 3423 | remote_id: timewait_info->work.remote_id); |
| 3424 | if (!cm_id_priv) |
| 3425 | return -EINVAL; |
| 3426 | |
| 3427 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3428 | if (cm_id_priv->id.state != IB_CM_TIMEWAIT || |
| 3429 | cm_id_priv->remote_qpn != timewait_info->remote_qpn) { |
| 3430 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3431 | goto out; |
| 3432 | } |
| 3433 | cm_id_priv->id.state = IB_CM_IDLE; |
| 3434 | cm_queue_work_unlock(cm_id_priv, work); |
| 3435 | return 0; |
| 3436 | out: |
| 3437 | cm_deref_id(cm_id_priv); |
| 3438 | return -EINVAL; |
| 3439 | } |
| 3440 | |
| 3441 | static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg, |
| 3442 | struct cm_id_private *cm_id_priv, |
| 3443 | struct ib_cm_sidr_req_param *param) |
| 3444 | { |
| 3445 | cm_format_mad_hdr(hdr: &sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID, |
| 3446 | tid: cm_form_tid(cm_id_priv)); |
| 3447 | IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg, |
| 3448 | be32_to_cpu(cm_id_priv->id.local_id)); |
| 3449 | IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg, |
| 3450 | be16_to_cpu(param->path->pkey)); |
| 3451 | IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg, |
| 3452 | be64_to_cpu(param->service_id)); |
| 3453 | |
| 3454 | if (param->private_data && param->private_data_len) |
| 3455 | IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg, |
| 3456 | param->private_data, param->private_data_len); |
| 3457 | } |
| 3458 | |
| 3459 | int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, |
| 3460 | struct ib_cm_sidr_req_param *param) |
| 3461 | { |
| 3462 | struct cm_id_private *cm_id_priv; |
| 3463 | struct ib_mad_send_buf *msg; |
| 3464 | struct cm_av av = {}; |
| 3465 | unsigned long flags; |
| 3466 | int ret; |
| 3467 | |
| 3468 | if (!param->path || (param->private_data && |
| 3469 | param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE)) |
| 3470 | return -EINVAL; |
| 3471 | |
| 3472 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 3473 | ret = cm_init_av_by_path(path: param->path, sgid_attr: param->sgid_attr, av: &av); |
| 3474 | if (ret) |
| 3475 | return ret; |
| 3476 | |
| 3477 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 3478 | cm_move_av_from_path(dest: &cm_id_priv->av, src: &av); |
| 3479 | cm_id->service_id = param->service_id; |
| 3480 | cm_id_priv->timeout_ms = param->timeout_ms; |
| 3481 | cm_id_priv->max_cm_retries = param->max_cm_retries; |
| 3482 | if (cm_id->state != IB_CM_IDLE) { |
| 3483 | ret = -EINVAL; |
| 3484 | goto out_unlock; |
| 3485 | } |
| 3486 | |
| 3487 | msg = cm_alloc_priv_msg(cm_id_priv, state: IB_CM_SIDR_REQ_SENT); |
| 3488 | if (IS_ERR(ptr: msg)) { |
| 3489 | ret = PTR_ERR(ptr: msg); |
| 3490 | goto out_unlock; |
| 3491 | } |
| 3492 | |
| 3493 | cm_format_sidr_req(sidr_req_msg: (struct cm_sidr_req_msg *)msg->mad, cm_id_priv, |
| 3494 | param); |
| 3495 | |
| 3496 | trace_icm_send_sidr_req(cm_id: &cm_id_priv->id); |
| 3497 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 3498 | if (ret) |
| 3499 | goto out_free; |
| 3500 | cm_id->state = IB_CM_SIDR_REQ_SENT; |
| 3501 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3502 | return 0; |
| 3503 | out_free: |
| 3504 | cm_free_priv_msg(msg); |
| 3505 | out_unlock: |
| 3506 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3507 | return ret; |
| 3508 | } |
| 3509 | EXPORT_SYMBOL(ib_send_cm_sidr_req); |
| 3510 | |
| 3511 | static void cm_format_sidr_req_event(struct cm_work *work, |
| 3512 | const struct cm_id_private *rx_cm_id, |
| 3513 | struct ib_cm_id *listen_id) |
| 3514 | { |
| 3515 | struct cm_sidr_req_msg *sidr_req_msg; |
| 3516 | struct ib_cm_sidr_req_event_param *param; |
| 3517 | |
| 3518 | sidr_req_msg = (struct cm_sidr_req_msg *) |
| 3519 | work->mad_recv_wc->recv_buf.mad; |
| 3520 | param = &work->cm_event.param.sidr_req_rcvd; |
| 3521 | param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg); |
| 3522 | param->listen_id = listen_id; |
| 3523 | param->service_id = |
| 3524 | cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); |
| 3525 | param->bth_pkey = cm_get_bth_pkey(work); |
| 3526 | param->port = work->port->port_num; |
| 3527 | param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr; |
| 3528 | work->cm_event.private_data = |
| 3529 | IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg); |
| 3530 | } |
| 3531 | |
| 3532 | static int cm_sidr_req_handler(struct cm_work *work) |
| 3533 | { |
| 3534 | struct cm_id_private *cm_id_priv, *listen_cm_id_priv; |
| 3535 | struct cm_sidr_req_msg *sidr_req_msg; |
| 3536 | struct ib_wc *wc; |
| 3537 | int ret; |
| 3538 | |
| 3539 | cm_id_priv = |
| 3540 | cm_alloc_id_priv(device: work->port->cm_dev->ib_device, NULL, NULL); |
| 3541 | if (IS_ERR(ptr: cm_id_priv)) |
| 3542 | return PTR_ERR(ptr: cm_id_priv); |
| 3543 | |
| 3544 | /* Record SGID/SLID and request ID for lookup. */ |
| 3545 | sidr_req_msg = (struct cm_sidr_req_msg *) |
| 3546 | work->mad_recv_wc->recv_buf.mad; |
| 3547 | |
| 3548 | cm_id_priv->id.remote_id = |
| 3549 | cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg)); |
| 3550 | cm_id_priv->id.service_id = |
| 3551 | cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg)); |
| 3552 | cm_id_priv->tid = sidr_req_msg->hdr.tid; |
| 3553 | |
| 3554 | wc = work->mad_recv_wc->wc; |
| 3555 | cm_id_priv->sidr_slid = wc->slid; |
| 3556 | ret = cm_init_av_for_response(port: work->port, wc: work->mad_recv_wc->wc, |
| 3557 | grh: work->mad_recv_wc->recv_buf.grh, |
| 3558 | av: &cm_id_priv->av); |
| 3559 | if (ret) |
| 3560 | goto out; |
| 3561 | |
| 3562 | spin_lock_irq(lock: &cm.lock); |
| 3563 | listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); |
| 3564 | if (listen_cm_id_priv) { |
| 3565 | spin_unlock_irq(lock: &cm.lock); |
| 3566 | atomic_long_inc(v: &work->port->counters[CM_RECV_DUPLICATES] |
| 3567 | [CM_SIDR_REQ_COUNTER]); |
| 3568 | goto out; /* Duplicate message. */ |
| 3569 | } |
| 3570 | cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD; |
| 3571 | listen_cm_id_priv = cm_find_listen(device: cm_id_priv->id.device, |
| 3572 | service_id: cm_id_priv->id.service_id); |
| 3573 | if (!listen_cm_id_priv) { |
| 3574 | spin_unlock_irq(lock: &cm.lock); |
| 3575 | ib_send_cm_sidr_rep(cm_id: &cm_id_priv->id, |
| 3576 | param: &(struct ib_cm_sidr_rep_param){ |
| 3577 | .status = IB_SIDR_UNSUPPORTED }); |
| 3578 | goto out; /* No match. */ |
| 3579 | } |
| 3580 | spin_unlock_irq(lock: &cm.lock); |
| 3581 | |
| 3582 | cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler; |
| 3583 | cm_id_priv->id.context = listen_cm_id_priv->id.context; |
| 3584 | |
| 3585 | /* |
| 3586 | * A SIDR ID does not need to be in the xarray since it does not receive |
| 3587 | * mads, is not placed in the remote_id or remote_qpn rbtree, and does |
| 3588 | * not enter timewait. |
| 3589 | */ |
| 3590 | |
| 3591 | cm_format_sidr_req_event(work, rx_cm_id: cm_id_priv, listen_id: &listen_cm_id_priv->id); |
| 3592 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event); |
| 3593 | cm_free_work(work); |
| 3594 | /* |
| 3595 | * A pointer to the listen_cm_id is held in the event, so this deref |
| 3596 | * must be after the event is delivered above. |
| 3597 | */ |
| 3598 | cm_deref_id(cm_id_priv: listen_cm_id_priv); |
| 3599 | if (ret) |
| 3600 | cm_destroy_id(cm_id: &cm_id_priv->id, err: ret); |
| 3601 | return 0; |
| 3602 | out: |
| 3603 | ib_destroy_cm_id(&cm_id_priv->id); |
| 3604 | return -EINVAL; |
| 3605 | } |
| 3606 | |
| 3607 | static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg, |
| 3608 | struct cm_id_private *cm_id_priv, |
| 3609 | struct ib_cm_sidr_rep_param *param) |
| 3610 | { |
| 3611 | cm_format_mad_ece_hdr(hdr: &sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID, |
| 3612 | tid: cm_id_priv->tid, attr_mod: param->ece.attr_mod); |
| 3613 | IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg, |
| 3614 | be32_to_cpu(cm_id_priv->id.remote_id)); |
| 3615 | IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status); |
| 3616 | IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num); |
| 3617 | IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg, |
| 3618 | be64_to_cpu(cm_id_priv->id.service_id)); |
| 3619 | IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey); |
| 3620 | IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg, |
| 3621 | param->ece.vendor_id & 0xFF); |
| 3622 | IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg, |
| 3623 | (param->ece.vendor_id >> 8) & 0xFF); |
| 3624 | |
| 3625 | if (param->info && param->info_length) |
| 3626 | IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg, |
| 3627 | param->info, param->info_length); |
| 3628 | |
| 3629 | if (param->private_data && param->private_data_len) |
| 3630 | IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg, |
| 3631 | param->private_data, param->private_data_len); |
| 3632 | } |
| 3633 | |
| 3634 | static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv, |
| 3635 | struct ib_cm_sidr_rep_param *param) |
| 3636 | { |
| 3637 | struct ib_mad_send_buf *msg; |
| 3638 | unsigned long flags; |
| 3639 | int ret; |
| 3640 | |
| 3641 | lockdep_assert_held(&cm_id_priv->lock); |
| 3642 | |
| 3643 | if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) || |
| 3644 | (param->private_data && |
| 3645 | param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE)) |
| 3646 | return -EINVAL; |
| 3647 | |
| 3648 | if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD) |
| 3649 | return -EINVAL; |
| 3650 | |
| 3651 | msg = cm_alloc_msg(cm_id_priv); |
| 3652 | if (IS_ERR(ptr: msg)) |
| 3653 | return PTR_ERR(ptr: msg); |
| 3654 | |
| 3655 | cm_format_sidr_rep(sidr_rep_msg: (struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, |
| 3656 | param); |
| 3657 | trace_icm_send_sidr_rep(cm_id: &cm_id_priv->id); |
| 3658 | ret = ib_post_send_mad(send_buf: msg, NULL); |
| 3659 | if (ret) { |
| 3660 | cm_free_msg(msg); |
| 3661 | return ret; |
| 3662 | } |
| 3663 | cm_id_priv->id.state = IB_CM_IDLE; |
| 3664 | spin_lock_irqsave(&cm.lock, flags); |
| 3665 | if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { |
| 3666 | rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); |
| 3667 | RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); |
| 3668 | } |
| 3669 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 3670 | return 0; |
| 3671 | } |
| 3672 | |
| 3673 | int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, |
| 3674 | struct ib_cm_sidr_rep_param *param) |
| 3675 | { |
| 3676 | struct cm_id_private *cm_id_priv = |
| 3677 | container_of(cm_id, struct cm_id_private, id); |
| 3678 | unsigned long flags; |
| 3679 | int ret; |
| 3680 | |
| 3681 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 3682 | ret = cm_send_sidr_rep_locked(cm_id_priv, param); |
| 3683 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3684 | return ret; |
| 3685 | } |
| 3686 | EXPORT_SYMBOL(ib_send_cm_sidr_rep); |
| 3687 | |
| 3688 | static void cm_format_sidr_rep_event(struct cm_work *work, |
| 3689 | const struct cm_id_private *cm_id_priv) |
| 3690 | { |
| 3691 | struct cm_sidr_rep_msg *sidr_rep_msg; |
| 3692 | struct ib_cm_sidr_rep_event_param *param; |
| 3693 | |
| 3694 | sidr_rep_msg = (struct cm_sidr_rep_msg *) |
| 3695 | work->mad_recv_wc->recv_buf.mad; |
| 3696 | param = &work->cm_event.param.sidr_rep_rcvd; |
| 3697 | param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg); |
| 3698 | param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg); |
| 3699 | param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg); |
| 3700 | param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION, |
| 3701 | sidr_rep_msg); |
| 3702 | param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH, |
| 3703 | sidr_rep_msg); |
| 3704 | param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr; |
| 3705 | work->cm_event.private_data = |
| 3706 | IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg); |
| 3707 | } |
| 3708 | |
| 3709 | static int cm_sidr_rep_handler(struct cm_work *work) |
| 3710 | { |
| 3711 | struct cm_sidr_rep_msg *sidr_rep_msg; |
| 3712 | struct cm_id_private *cm_id_priv; |
| 3713 | |
| 3714 | sidr_rep_msg = (struct cm_sidr_rep_msg *) |
| 3715 | work->mad_recv_wc->recv_buf.mad; |
| 3716 | cm_id_priv = cm_acquire_id( |
| 3717 | cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), remote_id: 0); |
| 3718 | if (!cm_id_priv) |
| 3719 | return -EINVAL; /* Unmatched reply. */ |
| 3720 | |
| 3721 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3722 | if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) { |
| 3723 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3724 | goto out; |
| 3725 | } |
| 3726 | cm_id_priv->id.state = IB_CM_IDLE; |
| 3727 | ib_cancel_mad(send_buf: cm_id_priv->msg); |
| 3728 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3729 | |
| 3730 | cm_format_sidr_rep_event(work, cm_id_priv); |
| 3731 | cm_process_work(cm_id_priv, work); |
| 3732 | return 0; |
| 3733 | out: |
| 3734 | cm_deref_id(cm_id_priv); |
| 3735 | return -EINVAL; |
| 3736 | } |
| 3737 | |
| 3738 | static void cm_process_send_error(struct cm_id_private *cm_id_priv, |
| 3739 | struct ib_mad_send_buf *msg, |
| 3740 | enum ib_wc_status wc_status) |
| 3741 | { |
| 3742 | enum ib_cm_state state = (unsigned long) msg->context[1]; |
| 3743 | struct ib_cm_event cm_event = {}; |
| 3744 | int ret; |
| 3745 | |
| 3746 | /* Discard old sends. */ |
| 3747 | spin_lock_irq(lock: &cm_id_priv->lock); |
| 3748 | if (msg != cm_id_priv->msg) { |
| 3749 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3750 | cm_free_msg(msg); |
| 3751 | cm_deref_id(cm_id_priv); |
| 3752 | return; |
| 3753 | } |
| 3754 | cm_free_priv_msg(msg); |
| 3755 | |
| 3756 | if (state != cm_id_priv->id.state || wc_status == IB_WC_SUCCESS || |
| 3757 | wc_status == IB_WC_WR_FLUSH_ERR) |
| 3758 | goto out_unlock; |
| 3759 | |
| 3760 | trace_icm_mad_send_err(state, wc_status); |
| 3761 | switch (state) { |
| 3762 | case IB_CM_REQ_SENT: |
| 3763 | case IB_CM_MRA_REQ_RCVD: |
| 3764 | cm_reset_to_idle(cm_id_priv); |
| 3765 | cm_event.event = IB_CM_REQ_ERROR; |
| 3766 | break; |
| 3767 | case IB_CM_REP_SENT: |
| 3768 | case IB_CM_MRA_REP_RCVD: |
| 3769 | cm_reset_to_idle(cm_id_priv); |
| 3770 | cm_event.event = IB_CM_REP_ERROR; |
| 3771 | break; |
| 3772 | case IB_CM_DREQ_SENT: |
| 3773 | cm_enter_timewait(cm_id_priv); |
| 3774 | cm_event.event = IB_CM_DREQ_ERROR; |
| 3775 | break; |
| 3776 | case IB_CM_SIDR_REQ_SENT: |
| 3777 | cm_id_priv->id.state = IB_CM_IDLE; |
| 3778 | cm_event.event = IB_CM_SIDR_REQ_ERROR; |
| 3779 | break; |
| 3780 | default: |
| 3781 | goto out_unlock; |
| 3782 | } |
| 3783 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3784 | cm_event.param.send_status = wc_status; |
| 3785 | |
| 3786 | /* No other events can occur on the cm_id at this point. */ |
| 3787 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event); |
| 3788 | if (ret) |
| 3789 | ib_destroy_cm_id(&cm_id_priv->id); |
| 3790 | return; |
| 3791 | out_unlock: |
| 3792 | spin_unlock_irq(lock: &cm_id_priv->lock); |
| 3793 | } |
| 3794 | |
| 3795 | static void cm_send_handler(struct ib_mad_agent *mad_agent, |
| 3796 | struct ib_mad_send_wc *mad_send_wc) |
| 3797 | { |
| 3798 | struct ib_mad_send_buf *msg = mad_send_wc->send_buf; |
| 3799 | struct cm_id_private *cm_id_priv; |
| 3800 | struct cm_port *port; |
| 3801 | u16 attr_index; |
| 3802 | |
| 3803 | port = mad_agent->context; |
| 3804 | attr_index = be16_to_cpu(((struct ib_mad_hdr *) |
| 3805 | msg->mad)->attr_id) - CM_ATTR_ID_OFFSET; |
| 3806 | |
| 3807 | if (msg->context[0] == CM_DIRECT_RETRY_CTX) { |
| 3808 | msg->retries = 1; |
| 3809 | cm_id_priv = NULL; |
| 3810 | } else { |
| 3811 | cm_id_priv = msg->context[0]; |
| 3812 | } |
| 3813 | |
| 3814 | atomic_long_add(i: 1 + msg->retries, v: &port->counters[CM_XMIT][attr_index]); |
| 3815 | if (msg->retries) |
| 3816 | atomic_long_add(i: msg->retries, |
| 3817 | v: &port->counters[CM_XMIT_RETRIES][attr_index]); |
| 3818 | |
| 3819 | if (cm_id_priv) |
| 3820 | cm_process_send_error(cm_id_priv, msg, wc_status: mad_send_wc->status); |
| 3821 | else |
| 3822 | cm_free_msg(msg); |
| 3823 | } |
| 3824 | |
| 3825 | static void cm_work_handler(struct work_struct *_work) |
| 3826 | { |
| 3827 | struct cm_work *work = container_of(_work, struct cm_work, work.work); |
| 3828 | int ret; |
| 3829 | |
| 3830 | switch (work->cm_event.event) { |
| 3831 | case IB_CM_REQ_RECEIVED: |
| 3832 | ret = cm_req_handler(work); |
| 3833 | break; |
| 3834 | case IB_CM_MRA_RECEIVED: |
| 3835 | ret = cm_mra_handler(work); |
| 3836 | break; |
| 3837 | case IB_CM_REJ_RECEIVED: |
| 3838 | ret = cm_rej_handler(work); |
| 3839 | break; |
| 3840 | case IB_CM_REP_RECEIVED: |
| 3841 | ret = cm_rep_handler(work); |
| 3842 | break; |
| 3843 | case IB_CM_RTU_RECEIVED: |
| 3844 | ret = cm_rtu_handler(work); |
| 3845 | break; |
| 3846 | case IB_CM_USER_ESTABLISHED: |
| 3847 | ret = cm_establish_handler(work); |
| 3848 | break; |
| 3849 | case IB_CM_DREQ_RECEIVED: |
| 3850 | ret = cm_dreq_handler(work); |
| 3851 | break; |
| 3852 | case IB_CM_DREP_RECEIVED: |
| 3853 | ret = cm_drep_handler(work); |
| 3854 | break; |
| 3855 | case IB_CM_SIDR_REQ_RECEIVED: |
| 3856 | ret = cm_sidr_req_handler(work); |
| 3857 | break; |
| 3858 | case IB_CM_SIDR_REP_RECEIVED: |
| 3859 | ret = cm_sidr_rep_handler(work); |
| 3860 | break; |
| 3861 | case IB_CM_LAP_RECEIVED: |
| 3862 | ret = cm_lap_handler(work); |
| 3863 | break; |
| 3864 | case IB_CM_APR_RECEIVED: |
| 3865 | ret = cm_apr_handler(work); |
| 3866 | break; |
| 3867 | case IB_CM_TIMEWAIT_EXIT: |
| 3868 | ret = cm_timewait_handler(work); |
| 3869 | break; |
| 3870 | default: |
| 3871 | trace_icm_handler_err(event: work->cm_event.event); |
| 3872 | ret = -EINVAL; |
| 3873 | break; |
| 3874 | } |
| 3875 | if (ret) |
| 3876 | cm_free_work(work); |
| 3877 | } |
| 3878 | |
| 3879 | static int cm_establish(struct ib_cm_id *cm_id) |
| 3880 | { |
| 3881 | struct cm_id_private *cm_id_priv; |
| 3882 | struct cm_work *work; |
| 3883 | unsigned long flags; |
| 3884 | int ret = 0; |
| 3885 | struct cm_device *cm_dev; |
| 3886 | |
| 3887 | cm_dev = ib_get_client_data(device: cm_id->device, client: &cm_client); |
| 3888 | if (!cm_dev) |
| 3889 | return -ENODEV; |
| 3890 | |
| 3891 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
| 3892 | if (!work) |
| 3893 | return -ENOMEM; |
| 3894 | |
| 3895 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 3896 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 3897 | switch (cm_id->state) { |
| 3898 | case IB_CM_REP_SENT: |
| 3899 | case IB_CM_MRA_REP_RCVD: |
| 3900 | cm_id->state = IB_CM_ESTABLISHED; |
| 3901 | break; |
| 3902 | case IB_CM_ESTABLISHED: |
| 3903 | ret = -EISCONN; |
| 3904 | break; |
| 3905 | default: |
| 3906 | trace_icm_establish_err(cm_id); |
| 3907 | ret = -EINVAL; |
| 3908 | break; |
| 3909 | } |
| 3910 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3911 | |
| 3912 | if (ret) { |
| 3913 | kfree(objp: work); |
| 3914 | goto out; |
| 3915 | } |
| 3916 | |
| 3917 | /* |
| 3918 | * The CM worker thread may try to destroy the cm_id before it |
| 3919 | * can execute this work item. To prevent potential deadlock, |
| 3920 | * we need to find the cm_id once we're in the context of the |
| 3921 | * worker thread, rather than holding a reference on it. |
| 3922 | */ |
| 3923 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
| 3924 | work->local_id = cm_id->local_id; |
| 3925 | work->remote_id = cm_id->remote_id; |
| 3926 | work->mad_recv_wc = NULL; |
| 3927 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
| 3928 | |
| 3929 | /* Check if the device started its remove_one */ |
| 3930 | spin_lock_irqsave(&cm.lock, flags); |
| 3931 | if (!cm_dev->going_down) { |
| 3932 | queue_delayed_work(wq: cm.wq, dwork: &work->work, delay: 0); |
| 3933 | } else { |
| 3934 | kfree(objp: work); |
| 3935 | ret = -ENODEV; |
| 3936 | } |
| 3937 | spin_unlock_irqrestore(lock: &cm.lock, flags); |
| 3938 | |
| 3939 | out: |
| 3940 | return ret; |
| 3941 | } |
| 3942 | |
| 3943 | static int cm_migrate(struct ib_cm_id *cm_id) |
| 3944 | { |
| 3945 | struct cm_id_private *cm_id_priv; |
| 3946 | unsigned long flags; |
| 3947 | int ret = 0; |
| 3948 | |
| 3949 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 3950 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 3951 | if (cm_id->state == IB_CM_ESTABLISHED && |
| 3952 | (cm_id->lap_state == IB_CM_LAP_UNINIT || |
| 3953 | cm_id->lap_state == IB_CM_LAP_IDLE)) { |
| 3954 | cm_id->lap_state = IB_CM_LAP_IDLE; |
| 3955 | cm_id_priv->av = cm_id_priv->alt_av; |
| 3956 | } else |
| 3957 | ret = -EINVAL; |
| 3958 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 3959 | |
| 3960 | return ret; |
| 3961 | } |
| 3962 | |
| 3963 | int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event) |
| 3964 | { |
| 3965 | int ret; |
| 3966 | |
| 3967 | switch (event) { |
| 3968 | case IB_EVENT_COMM_EST: |
| 3969 | ret = cm_establish(cm_id); |
| 3970 | break; |
| 3971 | case IB_EVENT_PATH_MIG: |
| 3972 | ret = cm_migrate(cm_id); |
| 3973 | break; |
| 3974 | default: |
| 3975 | ret = -EINVAL; |
| 3976 | } |
| 3977 | return ret; |
| 3978 | } |
| 3979 | EXPORT_SYMBOL(ib_cm_notify); |
| 3980 | |
| 3981 | static void cm_recv_handler(struct ib_mad_agent *mad_agent, |
| 3982 | struct ib_mad_send_buf *send_buf, |
| 3983 | struct ib_mad_recv_wc *mad_recv_wc) |
| 3984 | { |
| 3985 | struct cm_port *port = mad_agent->context; |
| 3986 | struct cm_work *work; |
| 3987 | enum ib_cm_event_type event; |
| 3988 | bool alt_path = false; |
| 3989 | u16 attr_id; |
| 3990 | int paths = 0; |
| 3991 | int going_down = 0; |
| 3992 | |
| 3993 | switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) { |
| 3994 | case CM_REQ_ATTR_ID: |
| 3995 | alt_path = cm_req_has_alt_path(req_msg: (struct cm_req_msg *) |
| 3996 | mad_recv_wc->recv_buf.mad); |
| 3997 | paths = 1 + (alt_path != 0); |
| 3998 | event = IB_CM_REQ_RECEIVED; |
| 3999 | break; |
| 4000 | case CM_MRA_ATTR_ID: |
| 4001 | event = IB_CM_MRA_RECEIVED; |
| 4002 | break; |
| 4003 | case CM_REJ_ATTR_ID: |
| 4004 | event = IB_CM_REJ_RECEIVED; |
| 4005 | break; |
| 4006 | case CM_REP_ATTR_ID: |
| 4007 | event = IB_CM_REP_RECEIVED; |
| 4008 | break; |
| 4009 | case CM_RTU_ATTR_ID: |
| 4010 | event = IB_CM_RTU_RECEIVED; |
| 4011 | break; |
| 4012 | case CM_DREQ_ATTR_ID: |
| 4013 | event = IB_CM_DREQ_RECEIVED; |
| 4014 | break; |
| 4015 | case CM_DREP_ATTR_ID: |
| 4016 | event = IB_CM_DREP_RECEIVED; |
| 4017 | break; |
| 4018 | case CM_SIDR_REQ_ATTR_ID: |
| 4019 | event = IB_CM_SIDR_REQ_RECEIVED; |
| 4020 | break; |
| 4021 | case CM_SIDR_REP_ATTR_ID: |
| 4022 | event = IB_CM_SIDR_REP_RECEIVED; |
| 4023 | break; |
| 4024 | case CM_LAP_ATTR_ID: |
| 4025 | paths = 1; |
| 4026 | event = IB_CM_LAP_RECEIVED; |
| 4027 | break; |
| 4028 | case CM_APR_ATTR_ID: |
| 4029 | event = IB_CM_APR_RECEIVED; |
| 4030 | break; |
| 4031 | default: |
| 4032 | ib_free_recv_mad(mad_recv_wc); |
| 4033 | return; |
| 4034 | } |
| 4035 | |
| 4036 | attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); |
| 4037 | atomic_long_inc(v: &port->counters[CM_RECV][attr_id - CM_ATTR_ID_OFFSET]); |
| 4038 | |
| 4039 | work = kmalloc(struct_size(work, path, paths), GFP_KERNEL); |
| 4040 | if (!work) { |
| 4041 | ib_free_recv_mad(mad_recv_wc); |
| 4042 | return; |
| 4043 | } |
| 4044 | |
| 4045 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
| 4046 | work->cm_event.event = event; |
| 4047 | work->mad_recv_wc = mad_recv_wc; |
| 4048 | work->port = port; |
| 4049 | |
| 4050 | /* Check if the device started its remove_one */ |
| 4051 | spin_lock_irq(lock: &cm.lock); |
| 4052 | if (!port->cm_dev->going_down) |
| 4053 | queue_delayed_work(wq: cm.wq, dwork: &work->work, delay: 0); |
| 4054 | else |
| 4055 | going_down = 1; |
| 4056 | spin_unlock_irq(lock: &cm.lock); |
| 4057 | |
| 4058 | if (going_down) { |
| 4059 | kfree(objp: work); |
| 4060 | ib_free_recv_mad(mad_recv_wc); |
| 4061 | } |
| 4062 | } |
| 4063 | |
| 4064 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, |
| 4065 | struct ib_qp_attr *qp_attr, |
| 4066 | int *qp_attr_mask) |
| 4067 | { |
| 4068 | unsigned long flags; |
| 4069 | int ret; |
| 4070 | |
| 4071 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 4072 | switch (cm_id_priv->id.state) { |
| 4073 | case IB_CM_REQ_SENT: |
| 4074 | case IB_CM_MRA_REQ_RCVD: |
| 4075 | case IB_CM_REQ_RCVD: |
| 4076 | case IB_CM_MRA_REQ_SENT: |
| 4077 | case IB_CM_REP_RCVD: |
| 4078 | case IB_CM_MRA_REP_SENT: |
| 4079 | case IB_CM_REP_SENT: |
| 4080 | case IB_CM_MRA_REP_RCVD: |
| 4081 | case IB_CM_ESTABLISHED: |
| 4082 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | |
| 4083 | IB_QP_PKEY_INDEX | IB_QP_PORT; |
| 4084 | qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; |
| 4085 | if (cm_id_priv->responder_resources) { |
| 4086 | struct ib_device *ib_dev = cm_id_priv->id.device; |
| 4087 | u64 support_flush = ib_dev->attrs.device_cap_flags & |
| 4088 | (IB_DEVICE_FLUSH_GLOBAL | IB_DEVICE_FLUSH_PERSISTENT); |
| 4089 | u32 flushable = support_flush ? |
| 4090 | (IB_ACCESS_FLUSH_GLOBAL | |
| 4091 | IB_ACCESS_FLUSH_PERSISTENT) : 0; |
| 4092 | |
| 4093 | qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ | |
| 4094 | IB_ACCESS_REMOTE_ATOMIC | |
| 4095 | flushable; |
| 4096 | } |
| 4097 | qp_attr->pkey_index = cm_id_priv->av.pkey_index; |
| 4098 | if (cm_id_priv->av.port) |
| 4099 | qp_attr->port_num = cm_id_priv->av.port->port_num; |
| 4100 | ret = 0; |
| 4101 | break; |
| 4102 | default: |
| 4103 | trace_icm_qp_init_err(cm_id: &cm_id_priv->id); |
| 4104 | ret = -EINVAL; |
| 4105 | break; |
| 4106 | } |
| 4107 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 4108 | return ret; |
| 4109 | } |
| 4110 | |
| 4111 | static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, |
| 4112 | struct ib_qp_attr *qp_attr, |
| 4113 | int *qp_attr_mask) |
| 4114 | { |
| 4115 | unsigned long flags; |
| 4116 | int ret; |
| 4117 | |
| 4118 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 4119 | switch (cm_id_priv->id.state) { |
| 4120 | case IB_CM_REQ_RCVD: |
| 4121 | case IB_CM_MRA_REQ_SENT: |
| 4122 | case IB_CM_REP_RCVD: |
| 4123 | case IB_CM_MRA_REP_SENT: |
| 4124 | case IB_CM_REP_SENT: |
| 4125 | case IB_CM_MRA_REP_RCVD: |
| 4126 | case IB_CM_ESTABLISHED: |
| 4127 | *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | |
| 4128 | IB_QP_DEST_QPN | IB_QP_RQ_PSN; |
| 4129 | qp_attr->ah_attr = cm_id_priv->av.ah_attr; |
| 4130 | if ((qp_attr->ah_attr.type == RDMA_AH_ATTR_TYPE_IB) && |
| 4131 | cm_id_priv->av.dlid_datapath && |
| 4132 | (cm_id_priv->av.dlid_datapath != 0xffff)) |
| 4133 | qp_attr->ah_attr.ib.dlid = cm_id_priv->av.dlid_datapath; |
| 4134 | qp_attr->path_mtu = cm_id_priv->path_mtu; |
| 4135 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); |
| 4136 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); |
| 4137 | if (cm_id_priv->qp_type == IB_QPT_RC || |
| 4138 | cm_id_priv->qp_type == IB_QPT_XRC_TGT) { |
| 4139 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | |
| 4140 | IB_QP_MIN_RNR_TIMER; |
| 4141 | qp_attr->max_dest_rd_atomic = |
| 4142 | cm_id_priv->responder_resources; |
| 4143 | qp_attr->min_rnr_timer = 0; |
| 4144 | } |
| 4145 | if (rdma_ah_get_dlid(attr: &cm_id_priv->alt_av.ah_attr) && |
| 4146 | cm_id_priv->alt_av.port) { |
| 4147 | *qp_attr_mask |= IB_QP_ALT_PATH; |
| 4148 | qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num; |
| 4149 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; |
| 4150 | qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; |
| 4151 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; |
| 4152 | } |
| 4153 | ret = 0; |
| 4154 | break; |
| 4155 | default: |
| 4156 | trace_icm_qp_rtr_err(cm_id: &cm_id_priv->id); |
| 4157 | ret = -EINVAL; |
| 4158 | break; |
| 4159 | } |
| 4160 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 4161 | return ret; |
| 4162 | } |
| 4163 | |
| 4164 | static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, |
| 4165 | struct ib_qp_attr *qp_attr, |
| 4166 | int *qp_attr_mask) |
| 4167 | { |
| 4168 | unsigned long flags; |
| 4169 | int ret; |
| 4170 | |
| 4171 | spin_lock_irqsave(&cm_id_priv->lock, flags); |
| 4172 | switch (cm_id_priv->id.state) { |
| 4173 | /* Allow transition to RTS before sending REP */ |
| 4174 | case IB_CM_REQ_RCVD: |
| 4175 | case IB_CM_MRA_REQ_SENT: |
| 4176 | |
| 4177 | case IB_CM_REP_RCVD: |
| 4178 | case IB_CM_MRA_REP_SENT: |
| 4179 | case IB_CM_REP_SENT: |
| 4180 | case IB_CM_MRA_REP_RCVD: |
| 4181 | case IB_CM_ESTABLISHED: |
| 4182 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
| 4183 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
| 4184 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
| 4185 | switch (cm_id_priv->qp_type) { |
| 4186 | case IB_QPT_RC: |
| 4187 | case IB_QPT_XRC_INI: |
| 4188 | *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | |
| 4189 | IB_QP_MAX_QP_RD_ATOMIC; |
| 4190 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
| 4191 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
| 4192 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; |
| 4193 | fallthrough; |
| 4194 | case IB_QPT_XRC_TGT: |
| 4195 | *qp_attr_mask |= IB_QP_TIMEOUT; |
| 4196 | qp_attr->timeout = cm_id_priv->av.timeout; |
| 4197 | break; |
| 4198 | default: |
| 4199 | break; |
| 4200 | } |
| 4201 | if (rdma_ah_get_dlid(attr: &cm_id_priv->alt_av.ah_attr)) { |
| 4202 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; |
| 4203 | qp_attr->path_mig_state = IB_MIG_REARM; |
| 4204 | } |
| 4205 | } else { |
| 4206 | *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE; |
| 4207 | if (cm_id_priv->alt_av.port) |
| 4208 | qp_attr->alt_port_num = |
| 4209 | cm_id_priv->alt_av.port->port_num; |
| 4210 | qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index; |
| 4211 | qp_attr->alt_timeout = cm_id_priv->alt_av.timeout; |
| 4212 | qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr; |
| 4213 | qp_attr->path_mig_state = IB_MIG_REARM; |
| 4214 | } |
| 4215 | ret = 0; |
| 4216 | break; |
| 4217 | default: |
| 4218 | trace_icm_qp_rts_err(cm_id: &cm_id_priv->id); |
| 4219 | ret = -EINVAL; |
| 4220 | break; |
| 4221 | } |
| 4222 | spin_unlock_irqrestore(lock: &cm_id_priv->lock, flags); |
| 4223 | return ret; |
| 4224 | } |
| 4225 | |
| 4226 | int ib_cm_init_qp_attr(struct ib_cm_id *cm_id, |
| 4227 | struct ib_qp_attr *qp_attr, |
| 4228 | int *qp_attr_mask) |
| 4229 | { |
| 4230 | struct cm_id_private *cm_id_priv; |
| 4231 | int ret; |
| 4232 | |
| 4233 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
| 4234 | switch (qp_attr->qp_state) { |
| 4235 | case IB_QPS_INIT: |
| 4236 | ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask); |
| 4237 | break; |
| 4238 | case IB_QPS_RTR: |
| 4239 | ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask); |
| 4240 | break; |
| 4241 | case IB_QPS_RTS: |
| 4242 | ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask); |
| 4243 | break; |
| 4244 | default: |
| 4245 | ret = -EINVAL; |
| 4246 | break; |
| 4247 | } |
| 4248 | return ret; |
| 4249 | } |
| 4250 | EXPORT_SYMBOL(ib_cm_init_qp_attr); |
| 4251 | |
| 4252 | static ssize_t cm_show_counter(struct ib_device *ibdev, u32 port_num, |
| 4253 | struct ib_port_attribute *attr, char *buf) |
| 4254 | { |
| 4255 | struct cm_counter_attribute *cm_attr = |
| 4256 | container_of(attr, struct cm_counter_attribute, attr); |
| 4257 | struct cm_device *cm_dev = ib_get_client_data(device: ibdev, client: &cm_client); |
| 4258 | |
| 4259 | if (WARN_ON(!cm_dev)) |
| 4260 | return -EINVAL; |
| 4261 | |
| 4262 | return sysfs_emit( |
| 4263 | buf, fmt: "%ld\n" , |
| 4264 | atomic_long_read( |
| 4265 | v: &cm_dev->port[port_num - 1] |
| 4266 | ->counters[cm_attr->group][cm_attr->index])); |
| 4267 | } |
| 4268 | |
| 4269 | #define CM_COUNTER_ATTR(_name, _group, _index) \ |
| 4270 | { \ |
| 4271 | .attr = __ATTR(_name, 0444, cm_show_counter, NULL), \ |
| 4272 | .group = _group, .index = _index \ |
| 4273 | } |
| 4274 | |
| 4275 | #define CM_COUNTER_GROUP(_group, _name) \ |
| 4276 | static struct cm_counter_attribute cm_counter_attr_##_group[] = { \ |
| 4277 | CM_COUNTER_ATTR(req, _group, CM_REQ_COUNTER), \ |
| 4278 | CM_COUNTER_ATTR(mra, _group, CM_MRA_COUNTER), \ |
| 4279 | CM_COUNTER_ATTR(rej, _group, CM_REJ_COUNTER), \ |
| 4280 | CM_COUNTER_ATTR(rep, _group, CM_REP_COUNTER), \ |
| 4281 | CM_COUNTER_ATTR(rtu, _group, CM_RTU_COUNTER), \ |
| 4282 | CM_COUNTER_ATTR(dreq, _group, CM_DREQ_COUNTER), \ |
| 4283 | CM_COUNTER_ATTR(drep, _group, CM_DREP_COUNTER), \ |
| 4284 | CM_COUNTER_ATTR(sidr_req, _group, CM_SIDR_REQ_COUNTER), \ |
| 4285 | CM_COUNTER_ATTR(sidr_rep, _group, CM_SIDR_REP_COUNTER), \ |
| 4286 | CM_COUNTER_ATTR(lap, _group, CM_LAP_COUNTER), \ |
| 4287 | CM_COUNTER_ATTR(apr, _group, CM_APR_COUNTER), \ |
| 4288 | }; \ |
| 4289 | static struct attribute *cm_counter_attrs_##_group[] = { \ |
| 4290 | &cm_counter_attr_##_group[0].attr.attr, \ |
| 4291 | &cm_counter_attr_##_group[1].attr.attr, \ |
| 4292 | &cm_counter_attr_##_group[2].attr.attr, \ |
| 4293 | &cm_counter_attr_##_group[3].attr.attr, \ |
| 4294 | &cm_counter_attr_##_group[4].attr.attr, \ |
| 4295 | &cm_counter_attr_##_group[5].attr.attr, \ |
| 4296 | &cm_counter_attr_##_group[6].attr.attr, \ |
| 4297 | &cm_counter_attr_##_group[7].attr.attr, \ |
| 4298 | &cm_counter_attr_##_group[8].attr.attr, \ |
| 4299 | &cm_counter_attr_##_group[9].attr.attr, \ |
| 4300 | &cm_counter_attr_##_group[10].attr.attr, \ |
| 4301 | NULL, \ |
| 4302 | }; \ |
| 4303 | static const struct attribute_group cm_counter_group_##_group = { \ |
| 4304 | .name = _name, \ |
| 4305 | .attrs = cm_counter_attrs_##_group, \ |
| 4306 | }; |
| 4307 | |
| 4308 | CM_COUNTER_GROUP(CM_XMIT, "cm_tx_msgs" ) |
| 4309 | CM_COUNTER_GROUP(CM_XMIT_RETRIES, "cm_tx_retries" ) |
| 4310 | CM_COUNTER_GROUP(CM_RECV, "cm_rx_msgs" ) |
| 4311 | CM_COUNTER_GROUP(CM_RECV_DUPLICATES, "cm_rx_duplicates" ) |
| 4312 | |
| 4313 | static const struct attribute_group *cm_counter_groups[] = { |
| 4314 | &cm_counter_group_CM_XMIT, |
| 4315 | &cm_counter_group_CM_XMIT_RETRIES, |
| 4316 | &cm_counter_group_CM_RECV, |
| 4317 | &cm_counter_group_CM_RECV_DUPLICATES, |
| 4318 | NULL, |
| 4319 | }; |
| 4320 | |
| 4321 | static int cm_add_one(struct ib_device *ib_device) |
| 4322 | { |
| 4323 | struct cm_device *cm_dev; |
| 4324 | struct cm_port *port; |
| 4325 | struct ib_mad_reg_req reg_req = { |
| 4326 | .mgmt_class = IB_MGMT_CLASS_CM, |
| 4327 | .mgmt_class_version = IB_CM_CLASS_VERSION, |
| 4328 | }; |
| 4329 | struct ib_port_modify port_modify = { |
| 4330 | .set_port_cap_mask = IB_PORT_CM_SUP |
| 4331 | }; |
| 4332 | unsigned long flags; |
| 4333 | int ret; |
| 4334 | int count = 0; |
| 4335 | u32 i; |
| 4336 | |
| 4337 | cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt), |
| 4338 | GFP_KERNEL); |
| 4339 | if (!cm_dev) |
| 4340 | return -ENOMEM; |
| 4341 | |
| 4342 | kref_init(kref: &cm_dev->kref); |
| 4343 | rwlock_init(&cm_dev->mad_agent_lock); |
| 4344 | cm_dev->ib_device = ib_device; |
| 4345 | cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay; |
| 4346 | cm_dev->going_down = 0; |
| 4347 | |
| 4348 | ib_set_client_data(device: ib_device, client: &cm_client, data: cm_dev); |
| 4349 | |
| 4350 | set_bit(IB_MGMT_METHOD_SEND, addr: reg_req.method_mask); |
| 4351 | rdma_for_each_port (ib_device, i) { |
| 4352 | if (!rdma_cap_ib_cm(device: ib_device, port_num: i)) |
| 4353 | continue; |
| 4354 | |
| 4355 | port = kzalloc(sizeof *port, GFP_KERNEL); |
| 4356 | if (!port) { |
| 4357 | ret = -ENOMEM; |
| 4358 | goto error1; |
| 4359 | } |
| 4360 | |
| 4361 | cm_dev->port[i-1] = port; |
| 4362 | port->cm_dev = cm_dev; |
| 4363 | port->port_num = i; |
| 4364 | |
| 4365 | ret = ib_port_register_client_groups(ibdev: ib_device, port_num: i, |
| 4366 | groups: cm_counter_groups); |
| 4367 | if (ret) |
| 4368 | goto error1; |
| 4369 | |
| 4370 | port->mad_agent = ib_register_mad_agent(device: ib_device, port_num: i, |
| 4371 | qp_type: IB_QPT_GSI, |
| 4372 | mad_reg_req: ®_req, |
| 4373 | rmpp_version: 0, |
| 4374 | send_handler: cm_send_handler, |
| 4375 | recv_handler: cm_recv_handler, |
| 4376 | context: port, |
| 4377 | registration_flags: 0); |
| 4378 | if (IS_ERR(ptr: port->mad_agent)) { |
| 4379 | ret = PTR_ERR(ptr: port->mad_agent); |
| 4380 | goto error2; |
| 4381 | } |
| 4382 | |
| 4383 | ret = ib_modify_port(device: ib_device, port_num: i, port_modify_mask: 0, port_modify: &port_modify); |
| 4384 | if (ret) |
| 4385 | goto error3; |
| 4386 | |
| 4387 | count++; |
| 4388 | } |
| 4389 | |
| 4390 | if (!count) { |
| 4391 | ret = -EOPNOTSUPP; |
| 4392 | goto free; |
| 4393 | } |
| 4394 | |
| 4395 | write_lock_irqsave(&cm.device_lock, flags); |
| 4396 | list_add_tail(new: &cm_dev->list, head: &cm.device_list); |
| 4397 | write_unlock_irqrestore(&cm.device_lock, flags); |
| 4398 | return 0; |
| 4399 | |
| 4400 | error3: |
| 4401 | ib_unregister_mad_agent(mad_agent: port->mad_agent); |
| 4402 | error2: |
| 4403 | ib_port_unregister_client_groups(ibdev: ib_device, port_num: i, groups: cm_counter_groups); |
| 4404 | error1: |
| 4405 | port_modify.set_port_cap_mask = 0; |
| 4406 | port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; |
| 4407 | while (--i) { |
| 4408 | if (!rdma_cap_ib_cm(device: ib_device, port_num: i)) |
| 4409 | continue; |
| 4410 | |
| 4411 | port = cm_dev->port[i-1]; |
| 4412 | ib_modify_port(device: ib_device, port_num: port->port_num, port_modify_mask: 0, port_modify: &port_modify); |
| 4413 | ib_unregister_mad_agent(mad_agent: port->mad_agent); |
| 4414 | ib_port_unregister_client_groups(ibdev: ib_device, port_num: i, |
| 4415 | groups: cm_counter_groups); |
| 4416 | } |
| 4417 | free: |
| 4418 | cm_device_put(cm_dev); |
| 4419 | return ret; |
| 4420 | } |
| 4421 | |
| 4422 | static void cm_remove_one(struct ib_device *ib_device, void *client_data) |
| 4423 | { |
| 4424 | struct cm_device *cm_dev = client_data; |
| 4425 | struct cm_port *port; |
| 4426 | struct ib_port_modify port_modify = { |
| 4427 | .clr_port_cap_mask = IB_PORT_CM_SUP |
| 4428 | }; |
| 4429 | unsigned long flags; |
| 4430 | u32 i; |
| 4431 | |
| 4432 | write_lock_irqsave(&cm.device_lock, flags); |
| 4433 | list_del(entry: &cm_dev->list); |
| 4434 | write_unlock_irqrestore(&cm.device_lock, flags); |
| 4435 | |
| 4436 | spin_lock_irq(lock: &cm.lock); |
| 4437 | cm_dev->going_down = 1; |
| 4438 | spin_unlock_irq(lock: &cm.lock); |
| 4439 | |
| 4440 | rdma_for_each_port (ib_device, i) { |
| 4441 | struct ib_mad_agent *mad_agent; |
| 4442 | |
| 4443 | if (!rdma_cap_ib_cm(device: ib_device, port_num: i)) |
| 4444 | continue; |
| 4445 | |
| 4446 | port = cm_dev->port[i-1]; |
| 4447 | mad_agent = port->mad_agent; |
| 4448 | ib_modify_port(device: ib_device, port_num: port->port_num, port_modify_mask: 0, port_modify: &port_modify); |
| 4449 | /* |
| 4450 | * We flush the queue here after the going_down set, this |
| 4451 | * verify that no new works will be queued in the recv handler, |
| 4452 | * after that we can call the unregister_mad_agent |
| 4453 | */ |
| 4454 | flush_workqueue(cm.wq); |
| 4455 | /* |
| 4456 | * The above ensures no call paths from the work are running, |
| 4457 | * the remaining paths all take the mad_agent_lock. |
| 4458 | */ |
| 4459 | write_lock(&cm_dev->mad_agent_lock); |
| 4460 | port->mad_agent = NULL; |
| 4461 | write_unlock(&cm_dev->mad_agent_lock); |
| 4462 | ib_unregister_mad_agent(mad_agent); |
| 4463 | ib_port_unregister_client_groups(ibdev: ib_device, port_num: i, |
| 4464 | groups: cm_counter_groups); |
| 4465 | } |
| 4466 | |
| 4467 | cm_device_put(cm_dev); |
| 4468 | } |
| 4469 | |
| 4470 | static int __init ib_cm_init(void) |
| 4471 | { |
| 4472 | int ret; |
| 4473 | |
| 4474 | INIT_LIST_HEAD(list: &cm.device_list); |
| 4475 | rwlock_init(&cm.device_lock); |
| 4476 | spin_lock_init(&cm.lock); |
| 4477 | cm.listen_service_table = RB_ROOT; |
| 4478 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); |
| 4479 | cm.remote_id_table = RB_ROOT; |
| 4480 | cm.remote_qp_table = RB_ROOT; |
| 4481 | cm.remote_sidr_table = RB_ROOT; |
| 4482 | xa_init_flags(xa: &cm.local_id_table, XA_FLAGS_ALLOC); |
| 4483 | get_random_bytes(buf: &cm.random_id_operand, len: sizeof cm.random_id_operand); |
| 4484 | INIT_LIST_HEAD(list: &cm.timewait_list); |
| 4485 | |
| 4486 | cm.wq = alloc_workqueue(fmt: "ib_cm" , flags: 0, max_active: 1); |
| 4487 | if (!cm.wq) { |
| 4488 | ret = -ENOMEM; |
| 4489 | goto error2; |
| 4490 | } |
| 4491 | |
| 4492 | ret = ib_register_client(client: &cm_client); |
| 4493 | if (ret) |
| 4494 | goto error3; |
| 4495 | |
| 4496 | return 0; |
| 4497 | error3: |
| 4498 | destroy_workqueue(wq: cm.wq); |
| 4499 | error2: |
| 4500 | return ret; |
| 4501 | } |
| 4502 | |
| 4503 | static void __exit ib_cm_cleanup(void) |
| 4504 | { |
| 4505 | struct cm_timewait_info *timewait_info, *tmp; |
| 4506 | |
| 4507 | spin_lock_irq(lock: &cm.lock); |
| 4508 | list_for_each_entry(timewait_info, &cm.timewait_list, list) |
| 4509 | cancel_delayed_work(dwork: &timewait_info->work.work); |
| 4510 | spin_unlock_irq(lock: &cm.lock); |
| 4511 | |
| 4512 | ib_unregister_client(client: &cm_client); |
| 4513 | destroy_workqueue(wq: cm.wq); |
| 4514 | |
| 4515 | list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { |
| 4516 | list_del(entry: &timewait_info->list); |
| 4517 | kfree(objp: timewait_info); |
| 4518 | } |
| 4519 | |
| 4520 | WARN_ON(!xa_empty(&cm.local_id_table)); |
| 4521 | } |
| 4522 | |
| 4523 | module_init(ib_cm_init); |
| 4524 | module_exit(ib_cm_cleanup); |
| 4525 | |