| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* Client connection-specific management code. |
| 3 | * |
| 4 | * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved. |
| 5 | * Written by David Howells (dhowells@redhat.com) |
| 6 | * |
| 7 | * Client connections need to be cached for a little while after they've made a |
| 8 | * call so as to handle retransmitted DATA packets in case the server didn't |
| 9 | * receive the final ACK or terminating ABORT we sent it. |
| 10 | * |
| 11 | * There are flags of relevance to the cache: |
| 12 | * |
| 13 | * (2) DONT_REUSE - The connection should be discarded as soon as possible and |
| 14 | * should not be reused. This is set when an exclusive connection is used |
| 15 | * or a call ID counter overflows. |
| 16 | * |
| 17 | * The caching state may only be changed if the cache lock is held. |
| 18 | * |
| 19 | * There are two idle client connection expiry durations. If the total number |
| 20 | * of connections is below the reap threshold, we use the normal duration; if |
| 21 | * it's above, we use the fast duration. |
| 22 | */ |
| 23 | |
| 24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 25 | |
| 26 | #include <linux/slab.h> |
| 27 | #include <linux/idr.h> |
| 28 | #include <linux/timer.h> |
| 29 | #include <linux/sched/signal.h> |
| 30 | |
| 31 | #include "ar-internal.h" |
| 32 | |
| 33 | __read_mostly unsigned int rxrpc_reap_client_connections = 900; |
| 34 | __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; |
| 35 | __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; |
| 36 | |
| 37 | static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle) |
| 38 | { |
| 39 | atomic_inc(v: &bundle->active); |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * Release a connection ID for a client connection. |
| 44 | */ |
| 45 | static void rxrpc_put_client_connection_id(struct rxrpc_local *local, |
| 46 | struct rxrpc_connection *conn) |
| 47 | { |
| 48 | idr_remove(&local->conn_ids, id: conn->proto.cid >> RXRPC_CIDSHIFT); |
| 49 | } |
| 50 | |
| 51 | /* |
| 52 | * Destroy the client connection ID tree. |
| 53 | */ |
| 54 | static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) |
| 55 | { |
| 56 | struct rxrpc_connection *conn; |
| 57 | int id; |
| 58 | |
| 59 | if (!idr_is_empty(idr: &local->conn_ids)) { |
| 60 | idr_for_each_entry(&local->conn_ids, conn, id) { |
| 61 | pr_err("AF_RXRPC: Leaked client conn %p {%d}\n" , |
| 62 | conn, refcount_read(&conn->ref)); |
| 63 | } |
| 64 | BUG(); |
| 65 | } |
| 66 | |
| 67 | idr_destroy(&local->conn_ids); |
| 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Allocate a connection bundle. |
| 72 | */ |
| 73 | static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, |
| 74 | gfp_t gfp) |
| 75 | { |
| 76 | static atomic_t rxrpc_bundle_id; |
| 77 | struct rxrpc_bundle *bundle; |
| 78 | |
| 79 | bundle = kzalloc(sizeof(*bundle), gfp); |
| 80 | if (bundle) { |
| 81 | bundle->local = call->local; |
| 82 | bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle); |
| 83 | bundle->key = key_get(key: call->key); |
| 84 | bundle->security = call->security; |
| 85 | bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); |
| 86 | bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); |
| 87 | bundle->service_id = call->dest_srx.srx_service; |
| 88 | bundle->security_level = call->security_level; |
| 89 | bundle->debug_id = atomic_inc_return(v: &rxrpc_bundle_id); |
| 90 | refcount_set(r: &bundle->ref, n: 1); |
| 91 | atomic_set(v: &bundle->active, i: 1); |
| 92 | INIT_LIST_HEAD(list: &bundle->waiting_calls); |
| 93 | trace_rxrpc_bundle(bundle_debug_id: bundle->debug_id, ref: 1, why: rxrpc_bundle_new); |
| 94 | |
| 95 | write_lock(&bundle->local->rxnet->conn_lock); |
| 96 | list_add_tail(new: &bundle->proc_link, head: &bundle->local->rxnet->bundle_proc_list); |
| 97 | write_unlock(&bundle->local->rxnet->conn_lock); |
| 98 | } |
| 99 | return bundle; |
| 100 | } |
| 101 | |
| 102 | struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle, |
| 103 | enum rxrpc_bundle_trace why) |
| 104 | { |
| 105 | int r; |
| 106 | |
| 107 | __refcount_inc(r: &bundle->ref, oldp: &r); |
| 108 | trace_rxrpc_bundle(bundle_debug_id: bundle->debug_id, ref: r + 1, why); |
| 109 | return bundle; |
| 110 | } |
| 111 | |
| 112 | static void rxrpc_free_bundle(struct rxrpc_bundle *bundle) |
| 113 | { |
| 114 | trace_rxrpc_bundle(bundle_debug_id: bundle->debug_id, ref: refcount_read(r: &bundle->ref), |
| 115 | why: rxrpc_bundle_free); |
| 116 | write_lock(&bundle->local->rxnet->conn_lock); |
| 117 | list_del(entry: &bundle->proc_link); |
| 118 | write_unlock(&bundle->local->rxnet->conn_lock); |
| 119 | rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle); |
| 120 | key_put(key: bundle->key); |
| 121 | kfree(objp: bundle); |
| 122 | } |
| 123 | |
| 124 | void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why) |
| 125 | { |
| 126 | unsigned int id; |
| 127 | bool dead; |
| 128 | int r; |
| 129 | |
| 130 | if (bundle) { |
| 131 | id = bundle->debug_id; |
| 132 | dead = __refcount_dec_and_test(r: &bundle->ref, oldp: &r); |
| 133 | trace_rxrpc_bundle(bundle_debug_id: id, ref: r - 1, why); |
| 134 | if (dead) |
| 135 | rxrpc_free_bundle(bundle); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | /* |
| 140 | * Get rid of outstanding client connection preallocations when a local |
| 141 | * endpoint is destroyed. |
| 142 | */ |
| 143 | void rxrpc_purge_client_connections(struct rxrpc_local *local) |
| 144 | { |
| 145 | rxrpc_destroy_client_conn_ids(local); |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Allocate a client connection. |
| 150 | */ |
| 151 | static struct rxrpc_connection * |
| 152 | rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle) |
| 153 | { |
| 154 | struct rxrpc_connection *conn; |
| 155 | struct rxrpc_local *local = bundle->local; |
| 156 | struct rxrpc_net *rxnet = local->rxnet; |
| 157 | int id; |
| 158 | |
| 159 | _enter("" ); |
| 160 | |
| 161 | conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN); |
| 162 | if (!conn) |
| 163 | return ERR_PTR(error: -ENOMEM); |
| 164 | |
| 165 | id = idr_alloc_cyclic(&local->conn_ids, ptr: conn, start: 1, end: 0x40000000, |
| 166 | GFP_ATOMIC | __GFP_NOWARN); |
| 167 | if (id < 0) { |
| 168 | kfree(objp: conn); |
| 169 | return ERR_PTR(error: id); |
| 170 | } |
| 171 | |
| 172 | refcount_set(r: &conn->ref, n: 1); |
| 173 | conn->proto.cid = id << RXRPC_CIDSHIFT; |
| 174 | conn->proto.epoch = local->rxnet->epoch; |
| 175 | conn->out_clientflag = RXRPC_CLIENT_INITIATED; |
| 176 | conn->bundle = rxrpc_get_bundle(bundle, why: rxrpc_bundle_get_client_conn); |
| 177 | conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn); |
| 178 | conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn); |
| 179 | conn->key = key_get(key: bundle->key); |
| 180 | conn->security = bundle->security; |
| 181 | conn->exclusive = bundle->exclusive; |
| 182 | conn->upgrade = bundle->upgrade; |
| 183 | conn->orig_service_id = bundle->service_id; |
| 184 | conn->security_level = bundle->security_level; |
| 185 | conn->state = RXRPC_CONN_CLIENT_UNSECURED; |
| 186 | conn->service_id = conn->orig_service_id; |
| 187 | |
| 188 | if (conn->security == &rxrpc_no_security) |
| 189 | conn->state = RXRPC_CONN_CLIENT; |
| 190 | |
| 191 | atomic_inc(v: &rxnet->nr_conns); |
| 192 | write_lock(&rxnet->conn_lock); |
| 193 | list_add_tail(new: &conn->proc_link, head: &rxnet->conn_proc_list); |
| 194 | write_unlock(&rxnet->conn_lock); |
| 195 | |
| 196 | rxrpc_see_connection(conn, rxrpc_conn_new_client); |
| 197 | |
| 198 | atomic_inc(v: &rxnet->nr_client_conns); |
| 199 | trace_rxrpc_client(conn, channel: -1, op: rxrpc_client_alloc); |
| 200 | return conn; |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Determine if a connection may be reused. |
| 205 | */ |
| 206 | static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn) |
| 207 | { |
| 208 | struct rxrpc_net *rxnet; |
| 209 | int id_cursor, id, distance, limit; |
| 210 | |
| 211 | if (!conn) |
| 212 | goto dont_reuse; |
| 213 | |
| 214 | rxnet = conn->rxnet; |
| 215 | if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) |
| 216 | goto dont_reuse; |
| 217 | |
| 218 | if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED && |
| 219 | conn->state != RXRPC_CONN_CLIENT) || |
| 220 | conn->proto.epoch != rxnet->epoch) |
| 221 | goto mark_dont_reuse; |
| 222 | |
| 223 | /* The IDR tree gets very expensive on memory if the connection IDs are |
| 224 | * widely scattered throughout the number space, so we shall want to |
| 225 | * kill off connections that, say, have an ID more than about four |
| 226 | * times the maximum number of client conns away from the current |
| 227 | * allocation point to try and keep the IDs concentrated. |
| 228 | */ |
| 229 | id_cursor = idr_get_cursor(idr: &conn->local->conn_ids); |
| 230 | id = conn->proto.cid >> RXRPC_CIDSHIFT; |
| 231 | distance = id - id_cursor; |
| 232 | if (distance < 0) |
| 233 | distance = -distance; |
| 234 | limit = umax(atomic_read(&rxnet->nr_conns) * 4, 1024); |
| 235 | if (distance > limit) |
| 236 | goto mark_dont_reuse; |
| 237 | |
| 238 | return true; |
| 239 | |
| 240 | mark_dont_reuse: |
| 241 | set_bit(nr: RXRPC_CONN_DONT_REUSE, addr: &conn->flags); |
| 242 | dont_reuse: |
| 243 | return false; |
| 244 | } |
| 245 | |
| 246 | /* |
| 247 | * Look up the conn bundle that matches the connection parameters, adding it if |
| 248 | * it doesn't yet exist. |
| 249 | */ |
| 250 | int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) |
| 251 | { |
| 252 | struct rxrpc_bundle *bundle, *candidate; |
| 253 | struct rxrpc_local *local = call->local; |
| 254 | struct rb_node *p, **pp, *parent; |
| 255 | long diff; |
| 256 | bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); |
| 257 | |
| 258 | _enter("{%px,%x,%u,%u}" , |
| 259 | call->peer, key_serial(call->key), call->security_level, |
| 260 | upgrade); |
| 261 | |
| 262 | if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) { |
| 263 | call->bundle = rxrpc_alloc_bundle(call, gfp); |
| 264 | return call->bundle ? 0 : -ENOMEM; |
| 265 | } |
| 266 | |
| 267 | /* First, see if the bundle is already there. */ |
| 268 | _debug("search 1" ); |
| 269 | spin_lock(lock: &local->client_bundles_lock); |
| 270 | p = local->client_bundles.rb_node; |
| 271 | while (p) { |
| 272 | bundle = rb_entry(p, struct rxrpc_bundle, local_node); |
| 273 | |
| 274 | #define cmp(X, Y) ((long)(X) - (long)(Y)) |
| 275 | diff = (cmp(bundle->peer, call->peer) ?: |
| 276 | cmp(bundle->key, call->key) ?: |
| 277 | cmp(bundle->security_level, call->security_level) ?: |
| 278 | cmp(bundle->upgrade, upgrade)); |
| 279 | #undef cmp |
| 280 | if (diff < 0) |
| 281 | p = p->rb_left; |
| 282 | else if (diff > 0) |
| 283 | p = p->rb_right; |
| 284 | else |
| 285 | goto found_bundle; |
| 286 | } |
| 287 | spin_unlock(lock: &local->client_bundles_lock); |
| 288 | _debug("not found" ); |
| 289 | |
| 290 | /* It wasn't. We need to add one. */ |
| 291 | candidate = rxrpc_alloc_bundle(call, gfp); |
| 292 | if (!candidate) |
| 293 | return -ENOMEM; |
| 294 | |
| 295 | _debug("search 2" ); |
| 296 | spin_lock(lock: &local->client_bundles_lock); |
| 297 | pp = &local->client_bundles.rb_node; |
| 298 | parent = NULL; |
| 299 | while (*pp) { |
| 300 | parent = *pp; |
| 301 | bundle = rb_entry(parent, struct rxrpc_bundle, local_node); |
| 302 | |
| 303 | #define cmp(X, Y) ((long)(X) - (long)(Y)) |
| 304 | diff = (cmp(bundle->peer, call->peer) ?: |
| 305 | cmp(bundle->key, call->key) ?: |
| 306 | cmp(bundle->security_level, call->security_level) ?: |
| 307 | cmp(bundle->upgrade, upgrade)); |
| 308 | #undef cmp |
| 309 | if (diff < 0) |
| 310 | pp = &(*pp)->rb_left; |
| 311 | else if (diff > 0) |
| 312 | pp = &(*pp)->rb_right; |
| 313 | else |
| 314 | goto found_bundle_free; |
| 315 | } |
| 316 | |
| 317 | _debug("new bundle" ); |
| 318 | rb_link_node(node: &candidate->local_node, parent, rb_link: pp); |
| 319 | rb_insert_color(&candidate->local_node, &local->client_bundles); |
| 320 | call->bundle = rxrpc_get_bundle(bundle: candidate, why: rxrpc_bundle_get_client_call); |
| 321 | spin_unlock(lock: &local->client_bundles_lock); |
| 322 | _leave(" = B=%u [new]" , call->bundle->debug_id); |
| 323 | return 0; |
| 324 | |
| 325 | found_bundle_free: |
| 326 | rxrpc_free_bundle(bundle: candidate); |
| 327 | found_bundle: |
| 328 | call->bundle = rxrpc_get_bundle(bundle, why: rxrpc_bundle_get_client_call); |
| 329 | rxrpc_activate_bundle(bundle); |
| 330 | spin_unlock(lock: &local->client_bundles_lock); |
| 331 | _leave(" = B=%u [found]" , call->bundle->debug_id); |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | /* |
| 336 | * Allocate a new connection and add it into a bundle. |
| 337 | */ |
| 338 | static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, |
| 339 | unsigned int slot) |
| 340 | { |
| 341 | struct rxrpc_connection *conn, *old; |
| 342 | unsigned int shift = slot * RXRPC_MAXCALLS; |
| 343 | unsigned int i; |
| 344 | |
| 345 | old = bundle->conns[slot]; |
| 346 | if (old) { |
| 347 | bundle->conns[slot] = NULL; |
| 348 | bundle->conn_ids[slot] = 0; |
| 349 | trace_rxrpc_client(conn: old, channel: -1, op: rxrpc_client_replace); |
| 350 | rxrpc_put_connection(old, rxrpc_conn_put_noreuse); |
| 351 | } |
| 352 | |
| 353 | conn = rxrpc_alloc_client_connection(bundle); |
| 354 | if (IS_ERR(ptr: conn)) { |
| 355 | bundle->alloc_error = PTR_ERR(ptr: conn); |
| 356 | return false; |
| 357 | } |
| 358 | |
| 359 | rxrpc_activate_bundle(bundle); |
| 360 | conn->bundle_shift = shift; |
| 361 | bundle->conns[slot] = conn; |
| 362 | bundle->conn_ids[slot] = conn->debug_id; |
| 363 | for (i = 0; i < RXRPC_MAXCALLS; i++) |
| 364 | set_bit(nr: shift + i, addr: &bundle->avail_chans); |
| 365 | return true; |
| 366 | } |
| 367 | |
| 368 | /* |
| 369 | * Add a connection to a bundle if there are no usable connections or we have |
| 370 | * connections waiting for extra capacity. |
| 371 | */ |
| 372 | static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle) |
| 373 | { |
| 374 | int slot = -1, i, usable; |
| 375 | |
| 376 | _enter("" ); |
| 377 | |
| 378 | bundle->alloc_error = 0; |
| 379 | |
| 380 | /* See if there are any usable connections. */ |
| 381 | usable = 0; |
| 382 | for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { |
| 383 | if (rxrpc_may_reuse_conn(conn: bundle->conns[i])) |
| 384 | usable++; |
| 385 | else if (slot == -1) |
| 386 | slot = i; |
| 387 | } |
| 388 | |
| 389 | if (!usable && bundle->upgrade) |
| 390 | bundle->try_upgrade = true; |
| 391 | |
| 392 | if (!usable) |
| 393 | goto alloc_conn; |
| 394 | |
| 395 | if (!bundle->avail_chans && |
| 396 | !bundle->try_upgrade && |
| 397 | usable < ARRAY_SIZE(bundle->conns)) |
| 398 | goto alloc_conn; |
| 399 | |
| 400 | _leave("" ); |
| 401 | return usable; |
| 402 | |
| 403 | alloc_conn: |
| 404 | return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false; |
| 405 | } |
| 406 | |
| 407 | /* |
| 408 | * Assign a channel to the call at the front of the queue and wake the call up. |
| 409 | * We don't increment the callNumber counter until this number has been exposed |
| 410 | * to the world. |
| 411 | */ |
| 412 | static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, |
| 413 | unsigned int channel) |
| 414 | { |
| 415 | struct rxrpc_channel *chan = &conn->channels[channel]; |
| 416 | struct rxrpc_bundle *bundle = conn->bundle; |
| 417 | struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, |
| 418 | struct rxrpc_call, wait_link); |
| 419 | u32 call_id = chan->call_counter + 1; |
| 420 | |
| 421 | _enter("C=%x,%u" , conn->debug_id, channel); |
| 422 | |
| 423 | list_del_init(entry: &call->wait_link); |
| 424 | |
| 425 | trace_rxrpc_client(conn, channel, op: rxrpc_client_chan_activate); |
| 426 | |
| 427 | /* Cancel the final ACK on the previous call if it hasn't been sent yet |
| 428 | * as the DATA packet will implicitly ACK it. |
| 429 | */ |
| 430 | clear_bit(nr: RXRPC_CONN_FINAL_ACK_0 + channel, addr: &conn->flags); |
| 431 | clear_bit(nr: conn->bundle_shift + channel, addr: &bundle->avail_chans); |
| 432 | |
| 433 | rxrpc_see_call(call, rxrpc_call_see_activate_client); |
| 434 | call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call); |
| 435 | call->cid = conn->proto.cid | channel; |
| 436 | call->call_id = call_id; |
| 437 | call->dest_srx.srx_service = conn->service_id; |
| 438 | call->cong_ssthresh = call->peer->cong_ssthresh; |
| 439 | if (call->cong_cwnd >= call->cong_ssthresh) |
| 440 | call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE; |
| 441 | else |
| 442 | call->cong_ca_state = RXRPC_CA_SLOW_START; |
| 443 | |
| 444 | chan->call_id = call_id; |
| 445 | chan->call_debug_id = call->debug_id; |
| 446 | chan->call = call; |
| 447 | |
| 448 | rxrpc_see_call(call, rxrpc_call_see_connected); |
| 449 | trace_rxrpc_connect_call(call); |
| 450 | call->tx_last_sent = ktime_get_real(); |
| 451 | rxrpc_start_call_timer(call); |
| 452 | rxrpc_set_call_state(call, state: RXRPC_CALL_CLIENT_SEND_REQUEST); |
| 453 | wake_up(&call->waitq); |
| 454 | } |
| 455 | |
| 456 | /* |
| 457 | * Remove a connection from the idle list if it's on it. |
| 458 | */ |
| 459 | static void rxrpc_unidle_conn(struct rxrpc_connection *conn) |
| 460 | { |
| 461 | if (!list_empty(head: &conn->cache_link)) { |
| 462 | list_del_init(entry: &conn->cache_link); |
| 463 | rxrpc_put_connection(conn, rxrpc_conn_put_unidle); |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /* |
| 468 | * Assign channels and callNumbers to waiting calls. |
| 469 | */ |
| 470 | static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) |
| 471 | { |
| 472 | struct rxrpc_connection *conn; |
| 473 | unsigned long avail, mask; |
| 474 | unsigned int channel, slot; |
| 475 | |
| 476 | trace_rxrpc_client(NULL, channel: -1, op: rxrpc_client_activate_chans); |
| 477 | |
| 478 | if (bundle->try_upgrade) |
| 479 | mask = 1; |
| 480 | else |
| 481 | mask = ULONG_MAX; |
| 482 | |
| 483 | while (!list_empty(head: &bundle->waiting_calls)) { |
| 484 | avail = bundle->avail_chans & mask; |
| 485 | if (!avail) |
| 486 | break; |
| 487 | channel = __ffs(avail); |
| 488 | clear_bit(nr: channel, addr: &bundle->avail_chans); |
| 489 | |
| 490 | slot = channel / RXRPC_MAXCALLS; |
| 491 | conn = bundle->conns[slot]; |
| 492 | if (!conn) |
| 493 | break; |
| 494 | |
| 495 | if (bundle->try_upgrade) |
| 496 | set_bit(nr: RXRPC_CONN_PROBING_FOR_UPGRADE, addr: &conn->flags); |
| 497 | rxrpc_unidle_conn(conn); |
| 498 | |
| 499 | channel &= (RXRPC_MAXCALLS - 1); |
| 500 | conn->act_chans |= 1 << channel; |
| 501 | rxrpc_activate_one_channel(conn, channel); |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * Connect waiting channels (called from the I/O thread). |
| 507 | */ |
| 508 | void rxrpc_connect_client_calls(struct rxrpc_local *local) |
| 509 | { |
| 510 | struct rxrpc_call *call; |
| 511 | LIST_HEAD(new_client_calls); |
| 512 | |
| 513 | spin_lock_irq(lock: &local->client_call_lock); |
| 514 | list_splice_tail_init(list: &local->new_client_calls, head: &new_client_calls); |
| 515 | spin_unlock_irq(lock: &local->client_call_lock); |
| 516 | |
| 517 | while ((call = list_first_entry_or_null(&new_client_calls, |
| 518 | struct rxrpc_call, wait_link))) { |
| 519 | struct rxrpc_bundle *bundle = call->bundle; |
| 520 | |
| 521 | list_move_tail(list: &call->wait_link, head: &bundle->waiting_calls); |
| 522 | rxrpc_see_call(call, rxrpc_call_see_waiting_call); |
| 523 | |
| 524 | if (rxrpc_bundle_has_space(bundle)) |
| 525 | rxrpc_activate_channels(bundle); |
| 526 | } |
| 527 | } |
| 528 | |
| 529 | /* |
| 530 | * Note that a call, and thus a connection, is about to be exposed to the |
| 531 | * world. |
| 532 | */ |
| 533 | void rxrpc_expose_client_call(struct rxrpc_call *call) |
| 534 | { |
| 535 | unsigned int channel = call->cid & RXRPC_CHANNELMASK; |
| 536 | struct rxrpc_connection *conn = call->conn; |
| 537 | struct rxrpc_channel *chan = &conn->channels[channel]; |
| 538 | |
| 539 | if (!test_and_set_bit(nr: RXRPC_CALL_EXPOSED, addr: &call->flags)) { |
| 540 | /* Mark the call ID as being used. If the callNumber counter |
| 541 | * exceeds ~2 billion, we kill the connection after its |
| 542 | * outstanding calls have finished so that the counter doesn't |
| 543 | * wrap. |
| 544 | */ |
| 545 | chan->call_counter++; |
| 546 | if (chan->call_counter >= INT_MAX) |
| 547 | set_bit(nr: RXRPC_CONN_DONT_REUSE, addr: &conn->flags); |
| 548 | trace_rxrpc_client(conn, channel, op: rxrpc_client_exposed); |
| 549 | |
| 550 | spin_lock_irq(lock: &call->peer->lock); |
| 551 | hlist_add_head(n: &call->error_link, h: &call->peer->error_targets); |
| 552 | spin_unlock_irq(lock: &call->peer->lock); |
| 553 | } |
| 554 | } |
| 555 | |
| 556 | /* |
| 557 | * Set the reap timer. |
| 558 | */ |
| 559 | static void rxrpc_set_client_reap_timer(struct rxrpc_local *local) |
| 560 | { |
| 561 | if (!local->kill_all_client_conns) { |
| 562 | unsigned long now = jiffies; |
| 563 | unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; |
| 564 | |
| 565 | if (local->rxnet->live) |
| 566 | timer_reduce(timer: &local->client_conn_reap_timer, expires: reap_at); |
| 567 | } |
| 568 | } |
| 569 | |
| 570 | /* |
| 571 | * Disconnect a client call. |
| 572 | */ |
| 573 | void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call) |
| 574 | { |
| 575 | struct rxrpc_connection *conn; |
| 576 | struct rxrpc_channel *chan = NULL; |
| 577 | struct rxrpc_local *local = bundle->local; |
| 578 | unsigned int channel; |
| 579 | bool may_reuse; |
| 580 | u32 cid; |
| 581 | |
| 582 | _enter("c=%x" , call->debug_id); |
| 583 | |
| 584 | /* Calls that have never actually been assigned a channel can simply be |
| 585 | * discarded. |
| 586 | */ |
| 587 | conn = call->conn; |
| 588 | if (!conn) { |
| 589 | _debug("call is waiting" ); |
| 590 | ASSERTCMP(call->call_id, ==, 0); |
| 591 | ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); |
| 592 | /* May still be on ->new_client_calls. */ |
| 593 | spin_lock_irq(lock: &local->client_call_lock); |
| 594 | list_del_init(entry: &call->wait_link); |
| 595 | spin_unlock_irq(lock: &local->client_call_lock); |
| 596 | return; |
| 597 | } |
| 598 | |
| 599 | cid = call->cid; |
| 600 | channel = cid & RXRPC_CHANNELMASK; |
| 601 | chan = &conn->channels[channel]; |
| 602 | trace_rxrpc_client(conn, channel, op: rxrpc_client_chan_disconnect); |
| 603 | |
| 604 | if (WARN_ON(chan->call != call)) |
| 605 | return; |
| 606 | |
| 607 | may_reuse = rxrpc_may_reuse_conn(conn); |
| 608 | |
| 609 | /* If a client call was exposed to the world, we save the result for |
| 610 | * retransmission. |
| 611 | * |
| 612 | * We use a barrier here so that the call number and abort code can be |
| 613 | * read without needing to take a lock. |
| 614 | * |
| 615 | * TODO: Make the incoming packet handler check this and handle |
| 616 | * terminal retransmission without requiring access to the call. |
| 617 | */ |
| 618 | if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { |
| 619 | _debug("exposed %u,%u" , call->call_id, call->abort_code); |
| 620 | __rxrpc_disconnect_call(conn, call); |
| 621 | |
| 622 | if (test_and_clear_bit(nr: RXRPC_CONN_PROBING_FOR_UPGRADE, addr: &conn->flags)) { |
| 623 | trace_rxrpc_client(conn, channel, op: rxrpc_client_to_active); |
| 624 | bundle->try_upgrade = false; |
| 625 | if (may_reuse) |
| 626 | rxrpc_activate_channels(bundle); |
| 627 | } |
| 628 | } |
| 629 | |
| 630 | /* See if we can pass the channel directly to another call. */ |
| 631 | if (may_reuse && !list_empty(head: &bundle->waiting_calls)) { |
| 632 | trace_rxrpc_client(conn, channel, op: rxrpc_client_chan_pass); |
| 633 | rxrpc_activate_one_channel(conn, channel); |
| 634 | return; |
| 635 | } |
| 636 | |
| 637 | /* Schedule the final ACK to be transmitted in a short while so that it |
| 638 | * can be skipped if we find a follow-on call. The first DATA packet |
| 639 | * of the follow on call will implicitly ACK this call. |
| 640 | */ |
| 641 | if (call->completion == RXRPC_CALL_SUCCEEDED && |
| 642 | test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { |
| 643 | unsigned long final_ack_at = jiffies + 2; |
| 644 | |
| 645 | chan->final_ack_at = final_ack_at; |
| 646 | smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ |
| 647 | set_bit(nr: RXRPC_CONN_FINAL_ACK_0 + channel, addr: &conn->flags); |
| 648 | rxrpc_reduce_conn_timer(conn, expire_at: final_ack_at); |
| 649 | } |
| 650 | |
| 651 | /* Deactivate the channel. */ |
| 652 | chan->call = NULL; |
| 653 | set_bit(nr: conn->bundle_shift + channel, addr: &conn->bundle->avail_chans); |
| 654 | conn->act_chans &= ~(1 << channel); |
| 655 | |
| 656 | /* If no channels remain active, then put the connection on the idle |
| 657 | * list for a short while. Give it a ref to stop it going away if it |
| 658 | * becomes unbundled. |
| 659 | */ |
| 660 | if (!conn->act_chans) { |
| 661 | trace_rxrpc_client(conn, channel, op: rxrpc_client_to_idle); |
| 662 | conn->idle_timestamp = jiffies; |
| 663 | |
| 664 | rxrpc_get_connection(conn, rxrpc_conn_get_idle); |
| 665 | list_move_tail(list: &conn->cache_link, head: &local->idle_client_conns); |
| 666 | |
| 667 | rxrpc_set_client_reap_timer(local); |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | /* |
| 672 | * Remove a connection from a bundle. |
| 673 | */ |
| 674 | static void rxrpc_unbundle_conn(struct rxrpc_connection *conn) |
| 675 | { |
| 676 | struct rxrpc_bundle *bundle = conn->bundle; |
| 677 | unsigned int bindex; |
| 678 | int i; |
| 679 | |
| 680 | _enter("C=%x" , conn->debug_id); |
| 681 | |
| 682 | if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) |
| 683 | rxrpc_process_delayed_final_acks(conn, true); |
| 684 | |
| 685 | bindex = conn->bundle_shift / RXRPC_MAXCALLS; |
| 686 | if (bundle->conns[bindex] == conn) { |
| 687 | _debug("clear slot %u" , bindex); |
| 688 | bundle->conns[bindex] = NULL; |
| 689 | bundle->conn_ids[bindex] = 0; |
| 690 | for (i = 0; i < RXRPC_MAXCALLS; i++) |
| 691 | clear_bit(nr: conn->bundle_shift + i, addr: &bundle->avail_chans); |
| 692 | rxrpc_put_client_connection_id(local: bundle->local, conn); |
| 693 | rxrpc_deactivate_bundle(bundle); |
| 694 | rxrpc_put_connection(conn, rxrpc_conn_put_unbundle); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | /* |
| 699 | * Drop the active count on a bundle. |
| 700 | */ |
| 701 | void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) |
| 702 | { |
| 703 | struct rxrpc_local *local; |
| 704 | bool need_put = false; |
| 705 | |
| 706 | if (!bundle) |
| 707 | return; |
| 708 | |
| 709 | local = bundle->local; |
| 710 | if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { |
| 711 | if (!bundle->exclusive) { |
| 712 | _debug("erase bundle" ); |
| 713 | rb_erase(&bundle->local_node, &local->client_bundles); |
| 714 | need_put = true; |
| 715 | } |
| 716 | |
| 717 | spin_unlock(lock: &local->client_bundles_lock); |
| 718 | if (need_put) |
| 719 | rxrpc_put_bundle(bundle, why: rxrpc_bundle_put_discard); |
| 720 | } |
| 721 | } |
| 722 | |
| 723 | /* |
| 724 | * Clean up a dead client connection. |
| 725 | */ |
| 726 | void rxrpc_kill_client_conn(struct rxrpc_connection *conn) |
| 727 | { |
| 728 | struct rxrpc_local *local = conn->local; |
| 729 | struct rxrpc_net *rxnet = local->rxnet; |
| 730 | |
| 731 | _enter("C=%x" , conn->debug_id); |
| 732 | |
| 733 | trace_rxrpc_client(conn, channel: -1, op: rxrpc_client_cleanup); |
| 734 | atomic_dec(v: &rxnet->nr_client_conns); |
| 735 | |
| 736 | rxrpc_put_client_connection_id(local, conn); |
| 737 | } |
| 738 | |
| 739 | /* |
| 740 | * Discard expired client connections from the idle list. Each conn in the |
| 741 | * idle list has been exposed and holds an extra ref because of that. |
| 742 | * |
| 743 | * This may be called from conn setup or from a work item so cannot be |
| 744 | * considered non-reentrant. |
| 745 | */ |
| 746 | void rxrpc_discard_expired_client_conns(struct rxrpc_local *local) |
| 747 | { |
| 748 | struct rxrpc_connection *conn; |
| 749 | unsigned long expiry, conn_expires_at, now; |
| 750 | unsigned int nr_conns; |
| 751 | |
| 752 | _enter("" ); |
| 753 | |
| 754 | /* We keep an estimate of what the number of conns ought to be after |
| 755 | * we've discarded some so that we don't overdo the discarding. |
| 756 | */ |
| 757 | nr_conns = atomic_read(v: &local->rxnet->nr_client_conns); |
| 758 | |
| 759 | next: |
| 760 | conn = list_first_entry_or_null(&local->idle_client_conns, |
| 761 | struct rxrpc_connection, cache_link); |
| 762 | if (!conn) |
| 763 | return; |
| 764 | |
| 765 | if (!local->kill_all_client_conns) { |
| 766 | /* If the number of connections is over the reap limit, we |
| 767 | * expedite discard by reducing the expiry timeout. We must, |
| 768 | * however, have at least a short grace period to be able to do |
| 769 | * final-ACK or ABORT retransmission. |
| 770 | */ |
| 771 | expiry = rxrpc_conn_idle_client_expiry; |
| 772 | if (nr_conns > rxrpc_reap_client_connections) |
| 773 | expiry = rxrpc_conn_idle_client_fast_expiry; |
| 774 | if (conn->local->service_closed) |
| 775 | expiry = rxrpc_closed_conn_expiry * HZ; |
| 776 | |
| 777 | conn_expires_at = conn->idle_timestamp + expiry; |
| 778 | |
| 779 | now = jiffies; |
| 780 | if (time_after(conn_expires_at, now)) |
| 781 | goto not_yet_expired; |
| 782 | } |
| 783 | |
| 784 | atomic_dec(v: &conn->active); |
| 785 | trace_rxrpc_client(conn, channel: -1, op: rxrpc_client_discard); |
| 786 | list_del_init(entry: &conn->cache_link); |
| 787 | |
| 788 | rxrpc_unbundle_conn(conn); |
| 789 | /* Drop the ->cache_link ref */ |
| 790 | rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); |
| 791 | |
| 792 | nr_conns--; |
| 793 | goto next; |
| 794 | |
| 795 | not_yet_expired: |
| 796 | /* The connection at the front of the queue hasn't yet expired, so |
| 797 | * schedule the work item for that point if we discarded something. |
| 798 | * |
| 799 | * We don't worry if the work item is already scheduled - it can look |
| 800 | * after rescheduling itself at a later time. We could cancel it, but |
| 801 | * then things get messier. |
| 802 | */ |
| 803 | _debug("not yet" ); |
| 804 | if (!local->kill_all_client_conns) |
| 805 | timer_reduce(timer: &local->client_conn_reap_timer, expires: conn_expires_at); |
| 806 | |
| 807 | _leave("" ); |
| 808 | } |
| 809 | |
| 810 | /* |
| 811 | * Clean up the client connections on a local endpoint. |
| 812 | */ |
| 813 | void rxrpc_clean_up_local_conns(struct rxrpc_local *local) |
| 814 | { |
| 815 | struct rxrpc_connection *conn; |
| 816 | |
| 817 | _enter("" ); |
| 818 | |
| 819 | local->kill_all_client_conns = true; |
| 820 | |
| 821 | timer_delete_sync(timer: &local->client_conn_reap_timer); |
| 822 | |
| 823 | while ((conn = list_first_entry_or_null(&local->idle_client_conns, |
| 824 | struct rxrpc_connection, cache_link))) { |
| 825 | list_del_init(entry: &conn->cache_link); |
| 826 | atomic_dec(v: &conn->active); |
| 827 | trace_rxrpc_client(conn, channel: -1, op: rxrpc_client_discard); |
| 828 | rxrpc_unbundle_conn(conn); |
| 829 | rxrpc_put_connection(conn, rxrpc_conn_put_local_dead); |
| 830 | } |
| 831 | |
| 832 | _leave(" [culled]" ); |
| 833 | } |
| 834 | |