1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* SCTP kernel implementation |
3 | * Copyright (c) 1999-2000 Cisco, Inc. |
4 | * Copyright (c) 1999-2001 Motorola, Inc. |
5 | * Copyright (c) 2001-2003 International Business Machines Corp. |
6 | * Copyright (c) 2001 Intel Corp. |
7 | * Copyright (c) 2001 La Monte H.P. Yarroll |
8 | * |
9 | * This file is part of the SCTP kernel implementation |
10 | * |
11 | * This module provides the abstraction for an SCTP transport representing |
12 | * a remote transport address. For local transport addresses, we just use |
13 | * union sctp_addr. |
14 | * |
15 | * Please send any bug reports or fixes you make to the |
16 | * email address(es): |
17 | * lksctp developers <linux-sctp@vger.kernel.org> |
18 | * |
19 | * Written or modified by: |
20 | * La Monte H.P. Yarroll <piggy@acm.org> |
21 | * Karl Knutson <karl@athena.chicago.il.us> |
22 | * Jon Grimm <jgrimm@us.ibm.com> |
23 | * Xingang Guo <xingang.guo@intel.com> |
24 | * Hui Huang <hui.huang@nokia.com> |
25 | * Sridhar Samudrala <sri@us.ibm.com> |
26 | * Ardelle Fan <ardelle.fan@intel.com> |
27 | */ |
28 | |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | |
31 | #include <linux/slab.h> |
32 | #include <linux/types.h> |
33 | #include <linux/random.h> |
34 | #include <net/sctp/sctp.h> |
35 | #include <net/sctp/sm.h> |
36 | |
37 | /* 1st Level Abstractions. */ |
38 | |
39 | /* Initialize a new transport from provided memory. */ |
40 | static struct sctp_transport *sctp_transport_init(struct net *net, |
41 | struct sctp_transport *peer, |
42 | const union sctp_addr *addr, |
43 | gfp_t gfp) |
44 | { |
45 | /* Copy in the address. */ |
46 | peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); |
47 | memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len); |
48 | memset(&peer->saddr, 0, sizeof(union sctp_addr)); |
49 | |
50 | peer->sack_generation = 0; |
51 | |
52 | /* From 6.3.1 RTO Calculation: |
53 | * |
54 | * C1) Until an RTT measurement has been made for a packet sent to the |
55 | * given destination transport address, set RTO to the protocol |
56 | * parameter 'RTO.Initial'. |
57 | */ |
58 | peer->rto = msecs_to_jiffies(m: net->sctp.rto_initial); |
59 | |
60 | peer->last_time_heard = 0; |
61 | peer->last_time_ecne_reduced = jiffies; |
62 | |
63 | peer->param_flags = SPP_HB_DISABLE | |
64 | SPP_PMTUD_ENABLE | |
65 | SPP_SACKDELAY_ENABLE; |
66 | |
67 | /* Initialize the default path max_retrans. */ |
68 | peer->pathmaxrxt = net->sctp.max_retrans_path; |
69 | peer->pf_retrans = net->sctp.pf_retrans; |
70 | |
71 | INIT_LIST_HEAD(list: &peer->transmitted); |
72 | INIT_LIST_HEAD(list: &peer->send_ready); |
73 | INIT_LIST_HEAD(list: &peer->transports); |
74 | |
75 | timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0); |
76 | timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0); |
77 | timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0); |
78 | timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0); |
79 | timer_setup(&peer->proto_unreach_timer, |
80 | sctp_generate_proto_unreach_event, 0); |
81 | |
82 | /* Initialize the 64-bit random nonce sent with heartbeat. */ |
83 | get_random_bytes(buf: &peer->hb_nonce, len: sizeof(peer->hb_nonce)); |
84 | |
85 | refcount_set(r: &peer->refcnt, n: 1); |
86 | |
87 | return peer; |
88 | } |
89 | |
90 | /* Allocate and initialize a new transport. */ |
91 | struct sctp_transport *sctp_transport_new(struct net *net, |
92 | const union sctp_addr *addr, |
93 | gfp_t gfp) |
94 | { |
95 | struct sctp_transport *transport; |
96 | |
97 | transport = kzalloc(size: sizeof(*transport), flags: gfp); |
98 | if (!transport) |
99 | goto fail; |
100 | |
101 | if (!sctp_transport_init(net, peer: transport, addr, gfp)) |
102 | goto fail_init; |
103 | |
104 | SCTP_DBG_OBJCNT_INC(transport); |
105 | |
106 | return transport; |
107 | |
108 | fail_init: |
109 | kfree(objp: transport); |
110 | |
111 | fail: |
112 | return NULL; |
113 | } |
114 | |
115 | /* This transport is no longer needed. Free up if possible, or |
116 | * delay until it last reference count. |
117 | */ |
118 | void sctp_transport_free(struct sctp_transport *transport) |
119 | { |
120 | /* Try to delete the heartbeat timer. */ |
121 | if (del_timer(timer: &transport->hb_timer)) |
122 | sctp_transport_put(transport); |
123 | |
124 | /* Delete the T3_rtx timer if it's active. |
125 | * There is no point in not doing this now and letting |
126 | * structure hang around in memory since we know |
127 | * the transport is going away. |
128 | */ |
129 | if (del_timer(timer: &transport->T3_rtx_timer)) |
130 | sctp_transport_put(transport); |
131 | |
132 | if (del_timer(timer: &transport->reconf_timer)) |
133 | sctp_transport_put(transport); |
134 | |
135 | if (del_timer(timer: &transport->probe_timer)) |
136 | sctp_transport_put(transport); |
137 | |
138 | /* Delete the ICMP proto unreachable timer if it's active. */ |
139 | if (del_timer(timer: &transport->proto_unreach_timer)) |
140 | sctp_transport_put(transport); |
141 | |
142 | sctp_transport_put(transport); |
143 | } |
144 | |
145 | static void sctp_transport_destroy_rcu(struct rcu_head *head) |
146 | { |
147 | struct sctp_transport *transport; |
148 | |
149 | transport = container_of(head, struct sctp_transport, rcu); |
150 | |
151 | dst_release(dst: transport->dst); |
152 | kfree(objp: transport); |
153 | SCTP_DBG_OBJCNT_DEC(transport); |
154 | } |
155 | |
156 | /* Destroy the transport data structure. |
157 | * Assumes there are no more users of this structure. |
158 | */ |
159 | static void sctp_transport_destroy(struct sctp_transport *transport) |
160 | { |
161 | if (unlikely(refcount_read(&transport->refcnt))) { |
162 | WARN(1, "Attempt to destroy undead transport %p!\n" , transport); |
163 | return; |
164 | } |
165 | |
166 | sctp_packet_free(&transport->packet); |
167 | |
168 | if (transport->asoc) |
169 | sctp_association_put(transport->asoc); |
170 | |
171 | call_rcu(head: &transport->rcu, func: sctp_transport_destroy_rcu); |
172 | } |
173 | |
174 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
175 | * timer. This routine is called every time a DATA chunk is sent. |
176 | */ |
177 | void sctp_transport_reset_t3_rtx(struct sctp_transport *transport) |
178 | { |
179 | /* RFC 2960 6.3.2 Retransmission Timer Rules |
180 | * |
181 | * R1) Every time a DATA chunk is sent to any address(including a |
182 | * retransmission), if the T3-rtx timer of that address is not running |
183 | * start it running so that it will expire after the RTO of that |
184 | * address. |
185 | */ |
186 | |
187 | if (!timer_pending(timer: &transport->T3_rtx_timer)) |
188 | if (!mod_timer(timer: &transport->T3_rtx_timer, |
189 | expires: jiffies + transport->rto)) |
190 | sctp_transport_hold(transport); |
191 | } |
192 | |
193 | void sctp_transport_reset_hb_timer(struct sctp_transport *transport) |
194 | { |
195 | unsigned long expires; |
196 | |
197 | /* When a data chunk is sent, reset the heartbeat interval. */ |
198 | expires = jiffies + sctp_transport_timeout(transport); |
199 | if (!mod_timer(timer: &transport->hb_timer, |
200 | expires: expires + get_random_u32_below(ceil: transport->rto))) |
201 | sctp_transport_hold(transport); |
202 | } |
203 | |
204 | void sctp_transport_reset_reconf_timer(struct sctp_transport *transport) |
205 | { |
206 | if (!timer_pending(timer: &transport->reconf_timer)) |
207 | if (!mod_timer(timer: &transport->reconf_timer, |
208 | expires: jiffies + transport->rto)) |
209 | sctp_transport_hold(transport); |
210 | } |
211 | |
212 | void sctp_transport_reset_probe_timer(struct sctp_transport *transport) |
213 | { |
214 | if (!mod_timer(timer: &transport->probe_timer, |
215 | expires: jiffies + transport->probe_interval)) |
216 | sctp_transport_hold(transport); |
217 | } |
218 | |
219 | void sctp_transport_reset_raise_timer(struct sctp_transport *transport) |
220 | { |
221 | if (!mod_timer(timer: &transport->probe_timer, |
222 | expires: jiffies + transport->probe_interval * 30)) |
223 | sctp_transport_hold(transport); |
224 | } |
225 | |
226 | /* This transport has been assigned to an association. |
227 | * Initialize fields from the association or from the sock itself. |
228 | * Register the reference count in the association. |
229 | */ |
230 | void sctp_transport_set_owner(struct sctp_transport *transport, |
231 | struct sctp_association *asoc) |
232 | { |
233 | transport->asoc = asoc; |
234 | sctp_association_hold(asoc); |
235 | } |
236 | |
237 | /* Initialize the pmtu of a transport. */ |
238 | void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) |
239 | { |
240 | /* If we don't have a fresh route, look one up */ |
241 | if (!transport->dst || transport->dst->obsolete) { |
242 | sctp_transport_dst_release(t: transport); |
243 | transport->af_specific->get_dst(transport, &transport->saddr, |
244 | &transport->fl, sk); |
245 | } |
246 | |
247 | if (transport->param_flags & SPP_PMTUD_DISABLE) { |
248 | struct sctp_association *asoc = transport->asoc; |
249 | |
250 | if (!transport->pathmtu && asoc && asoc->pathmtu) |
251 | transport->pathmtu = asoc->pathmtu; |
252 | if (transport->pathmtu) |
253 | return; |
254 | } |
255 | |
256 | if (transport->dst) |
257 | transport->pathmtu = sctp_dst_mtu(dst: transport->dst); |
258 | else |
259 | transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; |
260 | |
261 | sctp_transport_pl_update(t: transport); |
262 | } |
263 | |
264 | void sctp_transport_pl_send(struct sctp_transport *t) |
265 | { |
266 | if (t->pl.probe_count < SCTP_MAX_PROBES) |
267 | goto out; |
268 | |
269 | t->pl.probe_count = 0; |
270 | if (t->pl.state == SCTP_PL_BASE) { |
271 | if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */ |
272 | t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ |
273 | |
274 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
275 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
276 | sctp_assoc_sync_pmtu(asoc: t->asoc); |
277 | } |
278 | } else if (t->pl.state == SCTP_PL_SEARCH) { |
279 | if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ |
280 | t->pl.state = SCTP_PL_BASE; /* Search -> Base */ |
281 | t->pl.probe_size = SCTP_BASE_PLPMTU; |
282 | t->pl.probe_high = 0; |
283 | |
284 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
285 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
286 | sctp_assoc_sync_pmtu(asoc: t->asoc); |
287 | } else { /* Normal probe failure. */ |
288 | t->pl.probe_high = t->pl.probe_size; |
289 | t->pl.probe_size = t->pl.pmtu; |
290 | } |
291 | } else if (t->pl.state == SCTP_PL_COMPLETE) { |
292 | if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */ |
293 | t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */ |
294 | t->pl.probe_size = SCTP_BASE_PLPMTU; |
295 | |
296 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
297 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
298 | sctp_assoc_sync_pmtu(asoc: t->asoc); |
299 | } |
300 | } |
301 | |
302 | out: |
303 | pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n" , |
304 | __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); |
305 | t->pl.probe_count++; |
306 | } |
307 | |
308 | bool sctp_transport_pl_recv(struct sctp_transport *t) |
309 | { |
310 | pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n" , |
311 | __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high); |
312 | |
313 | t->pl.pmtu = t->pl.probe_size; |
314 | t->pl.probe_count = 0; |
315 | if (t->pl.state == SCTP_PL_BASE) { |
316 | t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */ |
317 | t->pl.probe_size += SCTP_PL_BIG_STEP; |
318 | } else if (t->pl.state == SCTP_PL_ERROR) { |
319 | t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */ |
320 | |
321 | t->pl.pmtu = t->pl.probe_size; |
322 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
323 | sctp_assoc_sync_pmtu(asoc: t->asoc); |
324 | t->pl.probe_size += SCTP_PL_BIG_STEP; |
325 | } else if (t->pl.state == SCTP_PL_SEARCH) { |
326 | if (!t->pl.probe_high) { |
327 | if (t->pl.probe_size < SCTP_MAX_PLPMTU) { |
328 | t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP, |
329 | SCTP_MAX_PLPMTU); |
330 | return false; |
331 | } |
332 | t->pl.probe_high = SCTP_MAX_PLPMTU; |
333 | } |
334 | t->pl.probe_size += SCTP_PL_MIN_STEP; |
335 | if (t->pl.probe_size >= t->pl.probe_high) { |
336 | t->pl.probe_high = 0; |
337 | t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */ |
338 | |
339 | t->pl.probe_size = t->pl.pmtu; |
340 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
341 | sctp_assoc_sync_pmtu(asoc: t->asoc); |
342 | sctp_transport_reset_raise_timer(transport: t); |
343 | } |
344 | } else if (t->pl.state == SCTP_PL_COMPLETE) { |
345 | /* Raise probe_size again after 30 * interval in Search Complete */ |
346 | t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */ |
347 | t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU); |
348 | } |
349 | |
350 | return t->pl.state == SCTP_PL_COMPLETE; |
351 | } |
352 | |
353 | static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu) |
354 | { |
355 | pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n" , |
356 | __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu); |
357 | |
358 | if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size) |
359 | return false; |
360 | |
361 | if (t->pl.state == SCTP_PL_BASE) { |
362 | if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) { |
363 | t->pl.state = SCTP_PL_ERROR; /* Base -> Error */ |
364 | |
365 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
366 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
367 | return true; |
368 | } |
369 | } else if (t->pl.state == SCTP_PL_SEARCH) { |
370 | if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { |
371 | t->pl.state = SCTP_PL_BASE; /* Search -> Base */ |
372 | t->pl.probe_size = SCTP_BASE_PLPMTU; |
373 | t->pl.probe_count = 0; |
374 | |
375 | t->pl.probe_high = 0; |
376 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
377 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
378 | return true; |
379 | } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) { |
380 | t->pl.probe_size = pmtu; |
381 | t->pl.probe_count = 0; |
382 | } |
383 | } else if (t->pl.state == SCTP_PL_COMPLETE) { |
384 | if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) { |
385 | t->pl.state = SCTP_PL_BASE; /* Complete -> Base */ |
386 | t->pl.probe_size = SCTP_BASE_PLPMTU; |
387 | t->pl.probe_count = 0; |
388 | |
389 | t->pl.probe_high = 0; |
390 | t->pl.pmtu = SCTP_BASE_PLPMTU; |
391 | t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t); |
392 | sctp_transport_reset_probe_timer(transport: t); |
393 | return true; |
394 | } |
395 | } |
396 | |
397 | return false; |
398 | } |
399 | |
400 | bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) |
401 | { |
402 | struct sock *sk = t->asoc->base.sk; |
403 | struct dst_entry *dst; |
404 | bool change = true; |
405 | |
406 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { |
407 | pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n" , |
408 | __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); |
409 | /* Use default minimum segment instead */ |
410 | pmtu = SCTP_DEFAULT_MINSEGMENT; |
411 | } |
412 | pmtu = SCTP_TRUNC4(pmtu); |
413 | |
414 | if (sctp_transport_pl_enabled(t)) |
415 | return sctp_transport_pl_toobig(t, pmtu: pmtu - sctp_transport_pl_hlen(t)); |
416 | |
417 | dst = sctp_transport_dst_check(t); |
418 | if (dst) { |
419 | struct sctp_pf *pf = sctp_get_pf_specific(family: dst->ops->family); |
420 | union sctp_addr addr; |
421 | |
422 | pf->af->from_sk(&addr, sk); |
423 | pf->to_sk_daddr(&t->ipaddr, sk); |
424 | dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); |
425 | pf->to_sk_daddr(&addr, sk); |
426 | |
427 | dst = sctp_transport_dst_check(t); |
428 | } |
429 | |
430 | if (!dst) { |
431 | t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); |
432 | dst = t->dst; |
433 | } |
434 | |
435 | if (dst) { |
436 | /* Re-fetch, as under layers may have a higher minimum size */ |
437 | pmtu = sctp_dst_mtu(dst); |
438 | change = t->pathmtu != pmtu; |
439 | } |
440 | t->pathmtu = pmtu; |
441 | |
442 | return change; |
443 | } |
444 | |
445 | /* Caches the dst entry and source address for a transport's destination |
446 | * address. |
447 | */ |
448 | void sctp_transport_route(struct sctp_transport *transport, |
449 | union sctp_addr *saddr, struct sctp_sock *opt) |
450 | { |
451 | struct sctp_association *asoc = transport->asoc; |
452 | struct sctp_af *af = transport->af_specific; |
453 | |
454 | sctp_transport_dst_release(t: transport); |
455 | af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(sp: opt)); |
456 | |
457 | if (saddr) |
458 | memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); |
459 | else |
460 | af->get_saddr(opt, transport, &transport->fl); |
461 | |
462 | sctp_transport_pmtu(transport, sk: sctp_opt2sk(sp: opt)); |
463 | |
464 | /* Initialize sk->sk_rcv_saddr, if the transport is the |
465 | * association's active path for getsockname(). |
466 | */ |
467 | if (transport->dst && asoc && |
468 | (!asoc->peer.primary_path || transport == asoc->peer.active_path)) |
469 | opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk); |
470 | } |
471 | |
472 | /* Hold a reference to a transport. */ |
473 | int sctp_transport_hold(struct sctp_transport *transport) |
474 | { |
475 | return refcount_inc_not_zero(r: &transport->refcnt); |
476 | } |
477 | |
478 | /* Release a reference to a transport and clean up |
479 | * if there are no more references. |
480 | */ |
481 | void sctp_transport_put(struct sctp_transport *transport) |
482 | { |
483 | if (refcount_dec_and_test(r: &transport->refcnt)) |
484 | sctp_transport_destroy(transport); |
485 | } |
486 | |
487 | /* Update transport's RTO based on the newly calculated RTT. */ |
488 | void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) |
489 | { |
490 | if (unlikely(!tp->rto_pending)) |
491 | /* We should not be doing any RTO updates unless rto_pending is set. */ |
492 | pr_debug("%s: rto_pending not set on transport %p!\n" , __func__, tp); |
493 | |
494 | if (tp->rttvar || tp->srtt) { |
495 | struct net *net = tp->asoc->base.net; |
496 | /* 6.3.1 C3) When a new RTT measurement R' is made, set |
497 | * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| |
498 | * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' |
499 | */ |
500 | |
501 | /* Note: The above algorithm has been rewritten to |
502 | * express rto_beta and rto_alpha as inverse powers |
503 | * of two. |
504 | * For example, assuming the default value of RTO.Alpha of |
505 | * 1/8, rto_alpha would be expressed as 3. |
506 | */ |
507 | tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) |
508 | + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); |
509 | tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) |
510 | + (rtt >> net->sctp.rto_alpha); |
511 | } else { |
512 | /* 6.3.1 C2) When the first RTT measurement R is made, set |
513 | * SRTT <- R, RTTVAR <- R/2. |
514 | */ |
515 | tp->srtt = rtt; |
516 | tp->rttvar = rtt >> 1; |
517 | } |
518 | |
519 | /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then |
520 | * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. |
521 | */ |
522 | if (tp->rttvar == 0) |
523 | tp->rttvar = SCTP_CLOCK_GRANULARITY; |
524 | |
525 | /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ |
526 | tp->rto = tp->srtt + (tp->rttvar << 2); |
527 | |
528 | /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min |
529 | * seconds then it is rounded up to RTO.Min seconds. |
530 | */ |
531 | if (tp->rto < tp->asoc->rto_min) |
532 | tp->rto = tp->asoc->rto_min; |
533 | |
534 | /* 6.3.1 C7) A maximum value may be placed on RTO provided it is |
535 | * at least RTO.max seconds. |
536 | */ |
537 | if (tp->rto > tp->asoc->rto_max) |
538 | tp->rto = tp->asoc->rto_max; |
539 | |
540 | sctp_max_rto(asoc: tp->asoc, trans: tp); |
541 | tp->rtt = rtt; |
542 | |
543 | /* Reset rto_pending so that a new RTT measurement is started when a |
544 | * new data chunk is sent. |
545 | */ |
546 | tp->rto_pending = 0; |
547 | |
548 | pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n" , |
549 | __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); |
550 | } |
551 | |
552 | /* This routine updates the transport's cwnd and partial_bytes_acked |
553 | * parameters based on the bytes acked in the received SACK. |
554 | */ |
555 | void sctp_transport_raise_cwnd(struct sctp_transport *transport, |
556 | __u32 sack_ctsn, __u32 bytes_acked) |
557 | { |
558 | struct sctp_association *asoc = transport->asoc; |
559 | __u32 cwnd, ssthresh, flight_size, pba, pmtu; |
560 | |
561 | cwnd = transport->cwnd; |
562 | flight_size = transport->flight_size; |
563 | |
564 | /* See if we need to exit Fast Recovery first */ |
565 | if (asoc->fast_recovery && |
566 | TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) |
567 | asoc->fast_recovery = 0; |
568 | |
569 | ssthresh = transport->ssthresh; |
570 | pba = transport->partial_bytes_acked; |
571 | pmtu = transport->asoc->pathmtu; |
572 | |
573 | if (cwnd <= ssthresh) { |
574 | /* RFC 4960 7.2.1 |
575 | * o When cwnd is less than or equal to ssthresh, an SCTP |
576 | * endpoint MUST use the slow-start algorithm to increase |
577 | * cwnd only if the current congestion window is being fully |
578 | * utilized, an incoming SACK advances the Cumulative TSN |
579 | * Ack Point, and the data sender is not in Fast Recovery. |
580 | * Only when these three conditions are met can the cwnd be |
581 | * increased; otherwise, the cwnd MUST not be increased. |
582 | * If these conditions are met, then cwnd MUST be increased |
583 | * by, at most, the lesser of 1) the total size of the |
584 | * previously outstanding DATA chunk(s) acknowledged, and |
585 | * 2) the destination's path MTU. This upper bound protects |
586 | * against the ACK-Splitting attack outlined in [SAVAGE99]. |
587 | */ |
588 | if (asoc->fast_recovery) |
589 | return; |
590 | |
591 | /* The appropriate cwnd increase algorithm is performed |
592 | * if, and only if the congestion window is being fully |
593 | * utilized. Note that RFC4960 Errata 3.22 removed the |
594 | * other condition on ctsn moving. |
595 | */ |
596 | if (flight_size < cwnd) |
597 | return; |
598 | |
599 | if (bytes_acked > pmtu) |
600 | cwnd += pmtu; |
601 | else |
602 | cwnd += bytes_acked; |
603 | |
604 | pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " |
605 | "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n" , |
606 | __func__, transport, bytes_acked, cwnd, ssthresh, |
607 | flight_size, pba); |
608 | } else { |
609 | /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, |
610 | * upon each SACK arrival, increase partial_bytes_acked |
611 | * by the total number of bytes of all new chunks |
612 | * acknowledged in that SACK including chunks |
613 | * acknowledged by the new Cumulative TSN Ack and by Gap |
614 | * Ack Blocks. (updated by RFC4960 Errata 3.22) |
615 | * |
616 | * When partial_bytes_acked is greater than cwnd and |
617 | * before the arrival of the SACK the sender had less |
618 | * bytes of data outstanding than cwnd (i.e., before |
619 | * arrival of the SACK, flightsize was less than cwnd), |
620 | * reset partial_bytes_acked to cwnd. (RFC 4960 Errata |
621 | * 3.26) |
622 | * |
623 | * When partial_bytes_acked is equal to or greater than |
624 | * cwnd and before the arrival of the SACK the sender |
625 | * had cwnd or more bytes of data outstanding (i.e., |
626 | * before arrival of the SACK, flightsize was greater |
627 | * than or equal to cwnd), partial_bytes_acked is reset |
628 | * to (partial_bytes_acked - cwnd). Next, cwnd is |
629 | * increased by MTU. (RFC 4960 Errata 3.12) |
630 | */ |
631 | pba += bytes_acked; |
632 | if (pba > cwnd && flight_size < cwnd) |
633 | pba = cwnd; |
634 | if (pba >= cwnd && flight_size >= cwnd) { |
635 | pba = pba - cwnd; |
636 | cwnd += pmtu; |
637 | } |
638 | |
639 | pr_debug("%s: congestion avoidance: transport:%p, " |
640 | "bytes_acked:%d, cwnd:%d, ssthresh:%d, " |
641 | "flight_size:%d, pba:%d\n" , __func__, |
642 | transport, bytes_acked, cwnd, ssthresh, |
643 | flight_size, pba); |
644 | } |
645 | |
646 | transport->cwnd = cwnd; |
647 | transport->partial_bytes_acked = pba; |
648 | } |
649 | |
650 | /* This routine is used to lower the transport's cwnd when congestion is |
651 | * detected. |
652 | */ |
653 | void sctp_transport_lower_cwnd(struct sctp_transport *transport, |
654 | enum sctp_lower_cwnd reason) |
655 | { |
656 | struct sctp_association *asoc = transport->asoc; |
657 | |
658 | switch (reason) { |
659 | case SCTP_LOWER_CWND_T3_RTX: |
660 | /* RFC 2960 Section 7.2.3, sctpimpguide |
661 | * When the T3-rtx timer expires on an address, SCTP should |
662 | * perform slow start by: |
663 | * ssthresh = max(cwnd/2, 4*MTU) |
664 | * cwnd = 1*MTU |
665 | * partial_bytes_acked = 0 |
666 | */ |
667 | transport->ssthresh = max(transport->cwnd/2, |
668 | 4*asoc->pathmtu); |
669 | transport->cwnd = asoc->pathmtu; |
670 | |
671 | /* T3-rtx also clears fast recovery */ |
672 | asoc->fast_recovery = 0; |
673 | break; |
674 | |
675 | case SCTP_LOWER_CWND_FAST_RTX: |
676 | /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the |
677 | * destination address(es) to which the missing DATA chunks |
678 | * were last sent, according to the formula described in |
679 | * Section 7.2.3. |
680 | * |
681 | * RFC 2960 7.2.3, sctpimpguide Upon detection of packet |
682 | * losses from SACK (see Section 7.2.4), An endpoint |
683 | * should do the following: |
684 | * ssthresh = max(cwnd/2, 4*MTU) |
685 | * cwnd = ssthresh |
686 | * partial_bytes_acked = 0 |
687 | */ |
688 | if (asoc->fast_recovery) |
689 | return; |
690 | |
691 | /* Mark Fast recovery */ |
692 | asoc->fast_recovery = 1; |
693 | asoc->fast_recovery_exit = asoc->next_tsn - 1; |
694 | |
695 | transport->ssthresh = max(transport->cwnd/2, |
696 | 4*asoc->pathmtu); |
697 | transport->cwnd = transport->ssthresh; |
698 | break; |
699 | |
700 | case SCTP_LOWER_CWND_ECNE: |
701 | /* RFC 2481 Section 6.1.2. |
702 | * If the sender receives an ECN-Echo ACK packet |
703 | * then the sender knows that congestion was encountered in the |
704 | * network on the path from the sender to the receiver. The |
705 | * indication of congestion should be treated just as a |
706 | * congestion loss in non-ECN Capable TCP. That is, the TCP |
707 | * source halves the congestion window "cwnd" and reduces the |
708 | * slow start threshold "ssthresh". |
709 | * A critical condition is that TCP does not react to |
710 | * congestion indications more than once every window of |
711 | * data (or more loosely more than once every round-trip time). |
712 | */ |
713 | if (time_after(jiffies, transport->last_time_ecne_reduced + |
714 | transport->rtt)) { |
715 | transport->ssthresh = max(transport->cwnd/2, |
716 | 4*asoc->pathmtu); |
717 | transport->cwnd = transport->ssthresh; |
718 | transport->last_time_ecne_reduced = jiffies; |
719 | } |
720 | break; |
721 | |
722 | case SCTP_LOWER_CWND_INACTIVE: |
723 | /* RFC 2960 Section 7.2.1, sctpimpguide |
724 | * When the endpoint does not transmit data on a given |
725 | * transport address, the cwnd of the transport address |
726 | * should be adjusted to max(cwnd/2, 4*MTU) per RTO. |
727 | * NOTE: Although the draft recommends that this check needs |
728 | * to be done every RTO interval, we do it every hearbeat |
729 | * interval. |
730 | */ |
731 | transport->cwnd = max(transport->cwnd/2, |
732 | 4*asoc->pathmtu); |
733 | /* RFC 4960 Errata 3.27.2: also adjust sshthresh */ |
734 | transport->ssthresh = transport->cwnd; |
735 | break; |
736 | } |
737 | |
738 | transport->partial_bytes_acked = 0; |
739 | |
740 | pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n" , |
741 | __func__, transport, reason, transport->cwnd, |
742 | transport->ssthresh); |
743 | } |
744 | |
745 | /* Apply Max.Burst limit to the congestion window: |
746 | * sctpimpguide-05 2.14.2 |
747 | * D) When the time comes for the sender to |
748 | * transmit new DATA chunks, the protocol parameter Max.Burst MUST |
749 | * first be applied to limit how many new DATA chunks may be sent. |
750 | * The limit is applied by adjusting cwnd as follows: |
751 | * if ((flightsize+ Max.Burst * MTU) < cwnd) |
752 | * cwnd = flightsize + Max.Burst * MTU |
753 | */ |
754 | |
755 | void sctp_transport_burst_limited(struct sctp_transport *t) |
756 | { |
757 | struct sctp_association *asoc = t->asoc; |
758 | u32 old_cwnd = t->cwnd; |
759 | u32 max_burst_bytes; |
760 | |
761 | if (t->burst_limited || asoc->max_burst == 0) |
762 | return; |
763 | |
764 | max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); |
765 | if (max_burst_bytes < old_cwnd) { |
766 | t->cwnd = max_burst_bytes; |
767 | t->burst_limited = old_cwnd; |
768 | } |
769 | } |
770 | |
771 | /* Restore the old cwnd congestion window, after the burst had it's |
772 | * desired effect. |
773 | */ |
774 | void sctp_transport_burst_reset(struct sctp_transport *t) |
775 | { |
776 | if (t->burst_limited) { |
777 | t->cwnd = t->burst_limited; |
778 | t->burst_limited = 0; |
779 | } |
780 | } |
781 | |
782 | /* What is the next timeout value for this transport? */ |
783 | unsigned long sctp_transport_timeout(struct sctp_transport *trans) |
784 | { |
785 | /* RTO + timer slack +/- 50% of RTO */ |
786 | unsigned long timeout = trans->rto >> 1; |
787 | |
788 | if (trans->state != SCTP_UNCONFIRMED && |
789 | trans->state != SCTP_PF) |
790 | timeout += trans->hbinterval; |
791 | |
792 | return max_t(unsigned long, timeout, HZ / 5); |
793 | } |
794 | |
795 | /* Reset transport variables to their initial values */ |
796 | void sctp_transport_reset(struct sctp_transport *t) |
797 | { |
798 | struct sctp_association *asoc = t->asoc; |
799 | |
800 | /* RFC 2960 (bis), Section 5.2.4 |
801 | * All the congestion control parameters (e.g., cwnd, ssthresh) |
802 | * related to this peer MUST be reset to their initial values |
803 | * (see Section 6.2.1) |
804 | */ |
805 | t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); |
806 | t->burst_limited = 0; |
807 | t->ssthresh = asoc->peer.i.a_rwnd; |
808 | t->rto = asoc->rto_initial; |
809 | sctp_max_rto(asoc, trans: t); |
810 | t->rtt = 0; |
811 | t->srtt = 0; |
812 | t->rttvar = 0; |
813 | |
814 | /* Reset these additional variables so that we have a clean slate. */ |
815 | t->partial_bytes_acked = 0; |
816 | t->flight_size = 0; |
817 | t->error_count = 0; |
818 | t->rto_pending = 0; |
819 | t->hb_sent = 0; |
820 | |
821 | /* Initialize the state information for SFR-CACC */ |
822 | t->cacc.changeover_active = 0; |
823 | t->cacc.cycling_changeover = 0; |
824 | t->cacc.next_tsn_at_change = 0; |
825 | t->cacc.cacc_saw_newack = 0; |
826 | } |
827 | |
828 | /* Schedule retransmission on the given transport */ |
829 | void sctp_transport_immediate_rtx(struct sctp_transport *t) |
830 | { |
831 | /* Stop pending T3_rtx_timer */ |
832 | if (del_timer(timer: &t->T3_rtx_timer)) |
833 | sctp_transport_put(transport: t); |
834 | |
835 | sctp_retransmit(q: &t->asoc->outqueue, transport: t, reason: SCTP_RTXR_T3_RTX); |
836 | if (!timer_pending(timer: &t->T3_rtx_timer)) { |
837 | if (!mod_timer(timer: &t->T3_rtx_timer, expires: jiffies + t->rto)) |
838 | sctp_transport_hold(transport: t); |
839 | } |
840 | } |
841 | |
842 | /* Drop dst */ |
843 | void sctp_transport_dst_release(struct sctp_transport *t) |
844 | { |
845 | dst_release(dst: t->dst); |
846 | t->dst = NULL; |
847 | t->dst_pending_confirm = 0; |
848 | } |
849 | |
850 | /* Schedule neighbour confirm */ |
851 | void sctp_transport_dst_confirm(struct sctp_transport *t) |
852 | { |
853 | t->dst_pending_confirm = 1; |
854 | } |
855 | |