1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Processing of received RxRPC packets
3 *
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include "ar-internal.h"
11
12static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
13 enum rxrpc_abort_reason why)
14{
15 rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, error: -EBADMSG, why);
16}
17
18/*
19 * Do TCP-style congestion management [RFC 5681].
20 */
21static void rxrpc_congestion_management(struct rxrpc_call *call,
22 struct sk_buff *skb,
23 struct rxrpc_ack_summary *summary,
24 rxrpc_serial_t acked_serial)
25{
26 enum rxrpc_congest_change change = rxrpc_cong_no_change;
27 unsigned int cumulative_acks = call->cong_cumul_acks;
28 unsigned int cwnd = call->cong_cwnd;
29 bool resend = false;
30
31 summary->flight_size =
32 (call->tx_top - call->acks_hard_ack) - summary->nr_acks;
33
34 if (test_and_clear_bit(nr: RXRPC_CALL_RETRANS_TIMEOUT, addr: &call->flags)) {
35 summary->retrans_timeo = true;
36 call->cong_ssthresh = max_t(unsigned int,
37 summary->flight_size / 2, 2);
38 cwnd = 1;
39 if (cwnd >= call->cong_ssthresh &&
40 call->cong_mode == RXRPC_CALL_SLOW_START) {
41 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
42 call->cong_tstamp = skb->tstamp;
43 cumulative_acks = 0;
44 }
45 }
46
47 cumulative_acks += summary->nr_new_acks;
48 if (cumulative_acks > 255)
49 cumulative_acks = 255;
50
51 summary->cwnd = call->cong_cwnd;
52 summary->ssthresh = call->cong_ssthresh;
53 summary->cumulative_acks = cumulative_acks;
54 summary->dup_acks = call->cong_dup_acks;
55
56 switch (call->cong_mode) {
57 case RXRPC_CALL_SLOW_START:
58 if (summary->saw_nacks)
59 goto packet_loss_detected;
60 if (summary->cumulative_acks > 0)
61 cwnd += 1;
62 if (cwnd >= call->cong_ssthresh) {
63 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
64 call->cong_tstamp = skb->tstamp;
65 }
66 goto out;
67
68 case RXRPC_CALL_CONGEST_AVOIDANCE:
69 if (summary->saw_nacks)
70 goto packet_loss_detected;
71
72 /* We analyse the number of packets that get ACK'd per RTT
73 * period and increase the window if we managed to fill it.
74 */
75 if (call->peer->rtt_count == 0)
76 goto out;
77 if (ktime_before(cmp1: skb->tstamp,
78 cmp2: ktime_add_us(kt: call->cong_tstamp,
79 usec: call->peer->srtt_us >> 3)))
80 goto out_no_clear_ca;
81 change = rxrpc_cong_rtt_window_end;
82 call->cong_tstamp = skb->tstamp;
83 if (cumulative_acks >= cwnd)
84 cwnd++;
85 goto out;
86
87 case RXRPC_CALL_PACKET_LOSS:
88 if (!summary->saw_nacks)
89 goto resume_normality;
90
91 if (summary->new_low_nack) {
92 change = rxrpc_cong_new_low_nack;
93 call->cong_dup_acks = 1;
94 if (call->cong_extra > 1)
95 call->cong_extra = 1;
96 goto send_extra_data;
97 }
98
99 call->cong_dup_acks++;
100 if (call->cong_dup_acks < 3)
101 goto send_extra_data;
102
103 change = rxrpc_cong_begin_retransmission;
104 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
105 call->cong_ssthresh = max_t(unsigned int,
106 summary->flight_size / 2, 2);
107 cwnd = call->cong_ssthresh + 3;
108 call->cong_extra = 0;
109 call->cong_dup_acks = 0;
110 resend = true;
111 goto out;
112
113 case RXRPC_CALL_FAST_RETRANSMIT:
114 if (!summary->new_low_nack) {
115 if (summary->nr_new_acks == 0)
116 cwnd += 1;
117 call->cong_dup_acks++;
118 if (call->cong_dup_acks == 2) {
119 change = rxrpc_cong_retransmit_again;
120 call->cong_dup_acks = 0;
121 resend = true;
122 }
123 } else {
124 change = rxrpc_cong_progress;
125 cwnd = call->cong_ssthresh;
126 if (!summary->saw_nacks)
127 goto resume_normality;
128 }
129 goto out;
130
131 default:
132 BUG();
133 goto out;
134 }
135
136resume_normality:
137 change = rxrpc_cong_cleared_nacks;
138 call->cong_dup_acks = 0;
139 call->cong_extra = 0;
140 call->cong_tstamp = skb->tstamp;
141 if (cwnd < call->cong_ssthresh)
142 call->cong_mode = RXRPC_CALL_SLOW_START;
143 else
144 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
145out:
146 cumulative_acks = 0;
147out_no_clear_ca:
148 if (cwnd >= RXRPC_TX_MAX_WINDOW)
149 cwnd = RXRPC_TX_MAX_WINDOW;
150 call->cong_cwnd = cwnd;
151 call->cong_cumul_acks = cumulative_acks;
152 summary->mode = call->cong_mode;
153 trace_rxrpc_congest(call, summary, ack_serial: acked_serial, change);
154 if (resend)
155 rxrpc_resend(call, ack_skb: skb);
156 return;
157
158packet_loss_detected:
159 change = rxrpc_cong_saw_nack;
160 call->cong_mode = RXRPC_CALL_PACKET_LOSS;
161 call->cong_dup_acks = 0;
162 goto send_extra_data;
163
164send_extra_data:
165 /* Send some previously unsent DATA if we have some to advance the ACK
166 * state.
167 */
168 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
169 summary->nr_acks != call->tx_top - call->acks_hard_ack) {
170 call->cong_extra++;
171 wake_up(&call->waitq);
172 }
173 goto out_no_clear_ca;
174}
175
176/*
177 * Degrade the congestion window if we haven't transmitted a packet for >1RTT.
178 */
179void rxrpc_congestion_degrade(struct rxrpc_call *call)
180{
181 ktime_t rtt, now;
182
183 if (call->cong_mode != RXRPC_CALL_SLOW_START &&
184 call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
185 return;
186 if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
187 return;
188
189 rtt = ns_to_ktime(ns: call->peer->srtt_us * (1000 / 8));
190 now = ktime_get_real();
191 if (!ktime_before(ktime_add(call->tx_last_sent, rtt), cmp2: now))
192 return;
193
194 trace_rxrpc_reset_cwnd(call, now);
195 rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
196 call->tx_last_sent = now;
197 call->cong_mode = RXRPC_CALL_SLOW_START;
198 call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh,
199 call->cong_cwnd * 3 / 4);
200 call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND);
201}
202
203/*
204 * Apply a hard ACK by advancing the Tx window.
205 */
206static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
207 struct rxrpc_ack_summary *summary)
208{
209 struct rxrpc_txbuf *txb;
210 bool rot_last = false;
211
212 list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) {
213 if (before_eq(seq1: txb->seq, seq2: call->acks_hard_ack))
214 continue;
215 if (txb->flags & RXRPC_LAST_PACKET) {
216 set_bit(nr: RXRPC_CALL_TX_LAST, addr: &call->flags);
217 rot_last = true;
218 }
219 if (txb->seq == to)
220 break;
221 }
222
223 if (rot_last)
224 set_bit(nr: RXRPC_CALL_TX_ALL_ACKED, addr: &call->flags);
225
226 _enter("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last);
227
228 if (call->acks_lowest_nak == call->acks_hard_ack) {
229 call->acks_lowest_nak = to;
230 } else if (after(seq1: to, seq2: call->acks_lowest_nak)) {
231 summary->new_low_nack = true;
232 call->acks_lowest_nak = to;
233 }
234
235 smp_store_release(&call->acks_hard_ack, to);
236
237 trace_rxrpc_txqueue(call, why: (rot_last ?
238 rxrpc_txqueue_rotate_last :
239 rxrpc_txqueue_rotate));
240 wake_up(&call->waitq);
241 return rot_last;
242}
243
244/*
245 * End the transmission phase of a call.
246 *
247 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
248 * or a final ACK packet.
249 */
250static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
251 enum rxrpc_abort_reason abort_why)
252{
253 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
254
255 call->resend_at = KTIME_MAX;
256 trace_rxrpc_timer_can(call, why: rxrpc_timer_trace_resend);
257
258 if (unlikely(call->cong_last_nack)) {
259 rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
260 call->cong_last_nack = NULL;
261 }
262
263 switch (__rxrpc_call_state(call)) {
264 case RXRPC_CALL_CLIENT_SEND_REQUEST:
265 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
266 if (reply_begun) {
267 rxrpc_set_call_state(call, state: RXRPC_CALL_CLIENT_RECV_REPLY);
268 trace_rxrpc_txqueue(call, why: rxrpc_txqueue_end);
269 break;
270 }
271
272 rxrpc_set_call_state(call, state: RXRPC_CALL_CLIENT_AWAIT_REPLY);
273 trace_rxrpc_txqueue(call, why: rxrpc_txqueue_await_reply);
274 break;
275
276 case RXRPC_CALL_SERVER_AWAIT_ACK:
277 rxrpc_call_completed(call);
278 trace_rxrpc_txqueue(call, why: rxrpc_txqueue_end);
279 break;
280
281 default:
282 kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
283 rxrpc_proto_abort(call, seq: call->tx_top, why: abort_why);
284 break;
285 }
286}
287
288/*
289 * Begin the reply reception phase of a call.
290 */
291static bool rxrpc_receiving_reply(struct rxrpc_call *call)
292{
293 struct rxrpc_ack_summary summary = { 0 };
294 rxrpc_seq_t top = READ_ONCE(call->tx_top);
295
296 if (call->ackr_reason) {
297 call->delay_ack_at = KTIME_MAX;
298 trace_rxrpc_timer_can(call, why: rxrpc_timer_trace_delayed_ack);
299 }
300
301 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
302 if (!rxrpc_rotate_tx_window(call, to: top, summary: &summary)) {
303 rxrpc_proto_abort(call, seq: top, why: rxrpc_eproto_early_reply);
304 return false;
305 }
306 }
307
308 rxrpc_end_tx_phase(call, reply_begun: true, abort_why: rxrpc_eproto_unexpected_reply);
309 return true;
310}
311
312/*
313 * End the packet reception phase.
314 */
315static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
316{
317 rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
318
319 _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
320
321 trace_rxrpc_receive(call, why: rxrpc_receive_end, serial: 0, seq: whigh);
322
323 switch (__rxrpc_call_state(call)) {
324 case RXRPC_CALL_CLIENT_RECV_REPLY:
325 rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
326 rxrpc_call_completed(call);
327 break;
328
329 case RXRPC_CALL_SERVER_RECV_REQUEST:
330 rxrpc_set_call_state(call, state: RXRPC_CALL_SERVER_ACK_REQUEST);
331 call->expect_req_by = KTIME_MAX;
332 rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
333 break;
334
335 default:
336 break;
337 }
338}
339
340static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
341 rxrpc_seq_t window, rxrpc_seq_t wtop)
342{
343 call->ackr_window = window;
344 call->ackr_wtop = wtop;
345}
346
347/*
348 * Push a DATA packet onto the Rx queue.
349 */
350static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
351 rxrpc_seq_t window, rxrpc_seq_t wtop,
352 enum rxrpc_receive_trace why)
353{
354 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
355 bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
356
357 __skb_queue_tail(list: &call->recvmsg_queue, newsk: skb);
358 rxrpc_input_update_ack_window(call, window, wtop);
359 trace_rxrpc_receive(call, why: last ? why + 1 : why, serial: sp->hdr.serial, seq: sp->hdr.seq);
360 if (last)
361 rxrpc_end_rx_phase(call, serial: sp->hdr.serial);
362}
363
364/*
365 * Process a DATA packet.
366 */
367static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
368 bool *_notify)
369{
370 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
371 struct sk_buff *oos;
372 rxrpc_serial_t serial = sp->hdr.serial;
373 unsigned int sack = call->ackr_sack_base;
374 rxrpc_seq_t window = call->ackr_window;
375 rxrpc_seq_t wtop = call->ackr_wtop;
376 rxrpc_seq_t wlimit = window + call->rx_winsize - 1;
377 rxrpc_seq_t seq = sp->hdr.seq;
378 bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
379 int ack_reason = -1;
380
381 rxrpc_inc_stat(call->rxnet, stat_rx_data);
382 if (sp->hdr.flags & RXRPC_REQUEST_ACK)
383 rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack);
384 if (sp->hdr.flags & RXRPC_JUMBO_PACKET)
385 rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo);
386
387 if (last) {
388 if (test_and_set_bit(nr: RXRPC_CALL_RX_LAST, addr: &call->flags) &&
389 seq + 1 != wtop)
390 return rxrpc_proto_abort(call, seq, why: rxrpc_eproto_different_last);
391 } else {
392 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
393 after_eq(seq1: seq, seq2: wtop)) {
394 pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
395 call->debug_id, seq, window, wtop, wlimit);
396 return rxrpc_proto_abort(call, seq, why: rxrpc_eproto_data_after_last);
397 }
398 }
399
400 if (after(seq1: seq, seq2: call->rx_highest_seq))
401 call->rx_highest_seq = seq;
402
403 trace_rxrpc_rx_data(call: call->debug_id, seq, serial, flags: sp->hdr.flags);
404
405 if (before(seq1: seq, seq2: window)) {
406 ack_reason = RXRPC_ACK_DUPLICATE;
407 goto send_ack;
408 }
409 if (after(seq1: seq, seq2: wlimit)) {
410 ack_reason = RXRPC_ACK_EXCEEDS_WINDOW;
411 goto send_ack;
412 }
413
414 /* Queue the packet. */
415 if (seq == window) {
416 if (sp->hdr.flags & RXRPC_REQUEST_ACK)
417 ack_reason = RXRPC_ACK_REQUESTED;
418 /* Send an immediate ACK if we fill in a hole */
419 else if (!skb_queue_empty(list: &call->rx_oos_queue))
420 ack_reason = RXRPC_ACK_DELAY;
421 else
422 call->ackr_nr_unacked++;
423
424 window++;
425 if (after(seq1: window, seq2: wtop)) {
426 trace_rxrpc_sack(call, seq, sack, what: rxrpc_sack_none);
427 wtop = window;
428 } else {
429 trace_rxrpc_sack(call, seq, sack, what: rxrpc_sack_advance);
430 sack = (sack + 1) % RXRPC_SACK_SIZE;
431 }
432
433
434 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg);
435
436 spin_lock(lock: &call->recvmsg_queue.lock);
437 rxrpc_input_queue_data(call, skb, window, wtop, why: rxrpc_receive_queue);
438 *_notify = true;
439
440 while ((oos = skb_peek(list_: &call->rx_oos_queue))) {
441 struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
442
443 if (after(seq1: osp->hdr.seq, seq2: window))
444 break;
445
446 __skb_unlink(skb: oos, list: &call->rx_oos_queue);
447 last = osp->hdr.flags & RXRPC_LAST_PACKET;
448 seq = osp->hdr.seq;
449 call->ackr_sack_table[sack] = 0;
450 trace_rxrpc_sack(call, seq, sack, what: rxrpc_sack_fill);
451 sack = (sack + 1) % RXRPC_SACK_SIZE;
452
453 window++;
454 rxrpc_input_queue_data(call, skb: oos, window, wtop,
455 why: rxrpc_receive_queue_oos);
456 }
457
458 spin_unlock(lock: &call->recvmsg_queue.lock);
459
460 call->ackr_sack_base = sack;
461 } else {
462 unsigned int slot;
463
464 ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE;
465
466 slot = seq - window;
467 sack = (sack + slot) % RXRPC_SACK_SIZE;
468
469 if (call->ackr_sack_table[sack % RXRPC_SACK_SIZE]) {
470 ack_reason = RXRPC_ACK_DUPLICATE;
471 goto send_ack;
472 }
473
474 call->ackr_sack_table[sack % RXRPC_SACK_SIZE] |= 1;
475 trace_rxrpc_sack(call, seq, sack, what: rxrpc_sack_oos);
476
477 if (after(seq1: seq + 1, seq2: wtop)) {
478 wtop = seq + 1;
479 rxrpc_input_update_ack_window(call, window, wtop);
480 }
481
482 skb_queue_walk(&call->rx_oos_queue, oos) {
483 struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
484
485 if (after(seq1: osp->hdr.seq, seq2: seq)) {
486 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
487 __skb_queue_before(list: &call->rx_oos_queue, next: oos, newsk: skb);
488 goto oos_queued;
489 }
490 }
491
492 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
493 __skb_queue_tail(list: &call->rx_oos_queue, newsk: skb);
494 oos_queued:
495 trace_rxrpc_receive(call, why: last ? rxrpc_receive_oos_last : rxrpc_receive_oos,
496 serial: sp->hdr.serial, seq: sp->hdr.seq);
497 }
498
499send_ack:
500 if (ack_reason >= 0)
501 rxrpc_send_ACK(call, ack_reason, serial,
502 why: rxrpc_propose_ack_input_data);
503 else
504 rxrpc_propose_delay_ACK(call, serial,
505 rxrpc_propose_ack_input_data);
506}
507
508/*
509 * Split a jumbo packet and file the bits separately.
510 */
511static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb)
512{
513 struct rxrpc_jumbo_header jhdr;
514 struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
515 struct sk_buff *jskb;
516 unsigned int offset = sizeof(struct rxrpc_wire_header);
517 unsigned int len = skb->len - offset;
518 bool notify = false;
519
520 while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
521 if (len < RXRPC_JUMBO_SUBPKTLEN)
522 goto protocol_error;
523 if (sp->hdr.flags & RXRPC_LAST_PACKET)
524 goto protocol_error;
525 if (skb_copy_bits(skb, offset: offset + RXRPC_JUMBO_DATALEN,
526 to: &jhdr, len: sizeof(jhdr)) < 0)
527 goto protocol_error;
528
529 jskb = skb_clone(skb, GFP_NOFS);
530 if (!jskb) {
531 kdebug("couldn't clone");
532 return false;
533 }
534 rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket);
535 jsp = rxrpc_skb(jskb);
536 jsp->offset = offset;
537 jsp->len = RXRPC_JUMBO_DATALEN;
538 rxrpc_input_data_one(call, skb: jskb, notify: &notify);
539 rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket);
540
541 sp->hdr.flags = jhdr.flags;
542 sp->hdr._rsvd = ntohs(jhdr._rsvd);
543 sp->hdr.seq++;
544 sp->hdr.serial++;
545 offset += RXRPC_JUMBO_SUBPKTLEN;
546 len -= RXRPC_JUMBO_SUBPKTLEN;
547 }
548
549 sp->offset = offset;
550 sp->len = len;
551 rxrpc_input_data_one(call, skb, notify: &notify);
552 if (notify) {
553 trace_rxrpc_notify_socket(debug_id: call->debug_id, serial: sp->hdr.serial);
554 rxrpc_notify_socket(call);
555 }
556 return true;
557
558protocol_error:
559 return false;
560}
561
562/*
563 * Process a DATA packet, adding the packet to the Rx ring. The caller's
564 * packet ref must be passed on or discarded.
565 */
566static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
567{
568 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
569 rxrpc_serial_t serial = sp->hdr.serial;
570 rxrpc_seq_t seq0 = sp->hdr.seq;
571
572 _enter("{%x,%x,%x},{%u,%x}",
573 call->ackr_window, call->ackr_wtop, call->rx_highest_seq,
574 skb->len, seq0);
575
576 if (__rxrpc_call_is_complete(call))
577 return;
578
579 switch (__rxrpc_call_state(call)) {
580 case RXRPC_CALL_CLIENT_SEND_REQUEST:
581 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
582 /* Received data implicitly ACKs all of the request
583 * packets we sent when we're acting as a client.
584 */
585 if (!rxrpc_receiving_reply(call))
586 goto out_notify;
587 break;
588
589 case RXRPC_CALL_SERVER_RECV_REQUEST: {
590 unsigned long timo = READ_ONCE(call->next_req_timo);
591
592 if (timo) {
593 ktime_t delay = ms_to_ktime(ms: timo);
594
595 call->expect_req_by = ktime_add(ktime_get_real(), delay);
596 trace_rxrpc_timer_set(call, delay, why: rxrpc_timer_trace_idle);
597 }
598 break;
599 }
600
601 default:
602 break;
603 }
604
605 if (!rxrpc_input_split_jumbo(call, skb)) {
606 rxrpc_proto_abort(call, seq: sp->hdr.seq, why: rxrpc_badmsg_bad_jumbo);
607 goto out_notify;
608 }
609 return;
610
611out_notify:
612 trace_rxrpc_notify_socket(debug_id: call->debug_id, serial);
613 rxrpc_notify_socket(call);
614 _leave(" [queued]");
615}
616
617/*
618 * See if there's a cached RTT probe to complete.
619 */
620static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
621 ktime_t resp_time,
622 rxrpc_serial_t acked_serial,
623 rxrpc_serial_t ack_serial,
624 enum rxrpc_rtt_rx_trace type)
625{
626 rxrpc_serial_t orig_serial;
627 unsigned long avail;
628 ktime_t sent_at;
629 bool matched = false;
630 int i;
631
632 avail = READ_ONCE(call->rtt_avail);
633 smp_rmb(); /* Read avail bits before accessing data. */
634
635 for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
636 if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
637 continue;
638
639 sent_at = call->rtt_sent_at[i];
640 orig_serial = call->rtt_serial[i];
641
642 if (orig_serial == acked_serial) {
643 clear_bit(nr: i + RXRPC_CALL_RTT_PEND_SHIFT, addr: &call->rtt_avail);
644 smp_mb(); /* Read data before setting avail bit */
645 set_bit(nr: i, addr: &call->rtt_avail);
646 rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
647 sent_at, resp_time);
648 matched = true;
649 }
650
651 /* If a later serial is being acked, then mark this slot as
652 * being available.
653 */
654 if (after(seq1: acked_serial, seq2: orig_serial)) {
655 trace_rxrpc_rtt_rx(call, why: rxrpc_rtt_rx_obsolete, slot: i,
656 send_serial: orig_serial, resp_serial: acked_serial, rtt: 0, rto: 0);
657 clear_bit(nr: i + RXRPC_CALL_RTT_PEND_SHIFT, addr: &call->rtt_avail);
658 smp_wmb();
659 set_bit(nr: i, addr: &call->rtt_avail);
660 }
661 }
662
663 if (!matched)
664 trace_rxrpc_rtt_rx(call, why: rxrpc_rtt_rx_lost, slot: 9, send_serial: 0, resp_serial: acked_serial, rtt: 0, rto: 0);
665}
666
667/*
668 * Process the extra information that may be appended to an ACK packet
669 */
670static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
671 struct rxrpc_acktrailer *trailer)
672{
673 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
674 struct rxrpc_peer *peer;
675 unsigned int mtu;
676 bool wake = false;
677 u32 rwind = ntohl(trailer->rwind);
678
679 if (rwind > RXRPC_TX_MAX_WINDOW)
680 rwind = RXRPC_TX_MAX_WINDOW;
681 if (call->tx_winsize != rwind) {
682 if (rwind > call->tx_winsize)
683 wake = true;
684 trace_rxrpc_rx_rwind_change(call, serial: sp->hdr.serial, rwind, wake);
685 call->tx_winsize = rwind;
686 }
687
688 if (call->cong_ssthresh > rwind)
689 call->cong_ssthresh = rwind;
690
691 mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
692
693 peer = call->peer;
694 if (mtu < peer->maxdata) {
695 spin_lock(lock: &peer->lock);
696 peer->maxdata = mtu;
697 peer->mtu = mtu + peer->hdrsize;
698 spin_unlock(lock: &peer->lock);
699 }
700
701 if (wake)
702 wake_up(&call->waitq);
703}
704
705/*
706 * Determine how many nacks from the previous ACK have now been satisfied.
707 */
708static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call,
709 struct rxrpc_ack_summary *summary,
710 rxrpc_seq_t seq)
711{
712 struct sk_buff *skb = call->cong_last_nack;
713 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
714 unsigned int i, new_acks = 0, retained_nacks = 0;
715 rxrpc_seq_t old_seq = sp->ack.first_ack;
716 u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
717
718 if (after_eq(seq1: seq, seq2: old_seq + sp->ack.nr_acks)) {
719 summary->nr_new_acks += sp->ack.nr_nacks;
720 summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks);
721 summary->nr_retained_nacks = 0;
722 } else if (seq == old_seq) {
723 summary->nr_retained_nacks = sp->ack.nr_nacks;
724 } else {
725 for (i = 0; i < sp->ack.nr_acks; i++) {
726 if (acks[i] == RXRPC_ACK_TYPE_NACK) {
727 if (before(seq1: old_seq + i, seq2: seq))
728 new_acks++;
729 else
730 retained_nacks++;
731 }
732 }
733
734 summary->nr_new_acks += new_acks;
735 summary->nr_retained_nacks = retained_nacks;
736 }
737
738 return old_seq + sp->ack.nr_acks;
739}
740
741/*
742 * Process individual soft ACKs.
743 *
744 * Each ACK in the array corresponds to one packet and can be either an ACK or
745 * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
746 * packets that lie beyond the end of the ACK list are scheduled for resend by
747 * the timer on the basis that the peer might just not have processed them at
748 * the time the ACK was sent.
749 */
750static void rxrpc_input_soft_acks(struct rxrpc_call *call,
751 struct rxrpc_ack_summary *summary,
752 struct sk_buff *skb,
753 rxrpc_seq_t seq,
754 rxrpc_seq_t since)
755{
756 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
757 unsigned int i, old_nacks = 0;
758 rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
759 u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
760
761 for (i = 0; i < sp->ack.nr_acks; i++) {
762 if (acks[i] == RXRPC_ACK_TYPE_ACK) {
763 summary->nr_acks++;
764 if (after_eq(seq1: seq, seq2: since))
765 summary->nr_new_acks++;
766 } else {
767 summary->saw_nacks = true;
768 if (before(seq1: seq, seq2: since)) {
769 /* Overlap with previous ACK */
770 old_nacks++;
771 } else {
772 summary->nr_new_nacks++;
773 sp->ack.nr_nacks++;
774 }
775
776 if (before(seq1: seq, seq2: lowest_nak))
777 lowest_nak = seq;
778 }
779 seq++;
780 }
781
782 if (lowest_nak != call->acks_lowest_nak) {
783 call->acks_lowest_nak = lowest_nak;
784 summary->new_low_nack = true;
785 }
786
787 /* We *can* have more nacks than we did - the peer is permitted to drop
788 * packets it has soft-acked and re-request them. Further, it is
789 * possible for the nack distribution to change whilst the number of
790 * nacks stays the same or goes down.
791 */
792 if (old_nacks < summary->nr_retained_nacks)
793 summary->nr_new_acks += summary->nr_retained_nacks - old_nacks;
794 summary->nr_retained_nacks = old_nacks;
795}
796
797/*
798 * Return true if the ACK is valid - ie. it doesn't appear to have regressed
799 * with respect to the ack state conveyed by preceding ACKs.
800 */
801static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
802 rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
803{
804 rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
805
806 if (after(seq1: first_pkt, seq2: base))
807 return true; /* The window advanced */
808
809 if (before(seq1: first_pkt, seq2: base))
810 return false; /* firstPacket regressed */
811
812 if (after_eq(seq1: prev_pkt, seq2: call->acks_prev_seq))
813 return true; /* previousPacket hasn't regressed. */
814
815 /* Some rx implementations put a serial number in previousPacket. */
816 if (after_eq(seq1: prev_pkt, seq2: base + call->tx_winsize))
817 return false;
818 return true;
819}
820
821/*
822 * Process an ACK packet.
823 *
824 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
825 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
826 *
827 * A hard-ACK means that a packet has been processed and may be discarded; a
828 * soft-ACK means that the packet may be discarded and retransmission
829 * requested. A phase is complete when all packets are hard-ACK'd.
830 */
831static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
832{
833 struct rxrpc_ack_summary summary = { 0 };
834 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
835 struct rxrpc_acktrailer trailer;
836 rxrpc_serial_t ack_serial, acked_serial;
837 rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since;
838 int nr_acks, offset, ioffset;
839
840 _enter("");
841
842 offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
843
844 ack_serial = sp->hdr.serial;
845 acked_serial = sp->ack.acked_serial;
846 first_soft_ack = sp->ack.first_ack;
847 prev_pkt = sp->ack.prev_ack;
848 nr_acks = sp->ack.nr_acks;
849 hard_ack = first_soft_ack - 1;
850 summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
851 sp->ack.reason : RXRPC_ACK__INVALID);
852
853 trace_rxrpc_rx_ack(call, serial: ack_serial, ack_serial: acked_serial,
854 first: first_soft_ack, prev: prev_pkt,
855 reason: summary.ack_reason, n_acks: nr_acks);
856 rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
857
858 if (acked_serial != 0) {
859 switch (summary.ack_reason) {
860 case RXRPC_ACK_PING_RESPONSE:
861 rxrpc_complete_rtt_probe(call, resp_time: skb->tstamp, acked_serial, ack_serial,
862 type: rxrpc_rtt_rx_ping_response);
863 break;
864 case RXRPC_ACK_REQUESTED:
865 rxrpc_complete_rtt_probe(call, resp_time: skb->tstamp, acked_serial, ack_serial,
866 type: rxrpc_rtt_rx_requested_ack);
867 break;
868 default:
869 rxrpc_complete_rtt_probe(call, resp_time: skb->tstamp, acked_serial, ack_serial,
870 type: rxrpc_rtt_rx_other_ack);
871 break;
872 }
873 }
874
875 /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
876 * indicates that the client address changed due to NAT. The server
877 * lost the call because it switched to a different peer.
878 */
879 if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
880 first_soft_ack == 1 &&
881 prev_pkt == 0 &&
882 rxrpc_is_client_call(call)) {
883 rxrpc_set_call_completion(call, compl: RXRPC_CALL_REMOTELY_ABORTED,
884 abort_code: 0, error: -ENETRESET);
885 goto send_response;
886 }
887
888 /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
889 * indicate a change of address. However, we can retransmit the call
890 * if we still have it buffered to the beginning.
891 */
892 if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
893 first_soft_ack == 1 &&
894 prev_pkt == 0 &&
895 call->acks_hard_ack == 0 &&
896 rxrpc_is_client_call(call)) {
897 rxrpc_set_call_completion(call, compl: RXRPC_CALL_REMOTELY_ABORTED,
898 abort_code: 0, error: -ENETRESET);
899 goto send_response;
900 }
901
902 /* Discard any out-of-order or duplicate ACKs (outside lock). */
903 if (!rxrpc_is_ack_valid(call, first_pkt: first_soft_ack, prev_pkt)) {
904 trace_rxrpc_rx_discard_ack(debug_id: call->debug_id, serial: ack_serial,
905 first_soft_ack, call_ackr_first: call->acks_first_seq,
906 prev_pkt, call_ackr_prev: call->acks_prev_seq);
907 goto send_response;
908 }
909
910 trailer.maxMTU = 0;
911 ioffset = offset + nr_acks + 3;
912 if (skb->len >= ioffset + sizeof(trailer) &&
913 skb_copy_bits(skb, offset: ioffset, to: &trailer, len: sizeof(trailer)) < 0)
914 return rxrpc_proto_abort(call, seq: 0, why: rxrpc_badmsg_short_ack_trailer);
915
916 if (nr_acks > 0)
917 skb_condense(skb);
918
919 if (call->cong_last_nack) {
920 since = rxrpc_input_check_prev_ack(call, summary: &summary, seq: first_soft_ack);
921 rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack);
922 call->cong_last_nack = NULL;
923 } else {
924 summary.nr_new_acks = first_soft_ack - call->acks_first_seq;
925 call->acks_lowest_nak = first_soft_ack + nr_acks;
926 since = first_soft_ack;
927 }
928
929 call->acks_latest_ts = skb->tstamp;
930 call->acks_first_seq = first_soft_ack;
931 call->acks_prev_seq = prev_pkt;
932
933 switch (summary.ack_reason) {
934 case RXRPC_ACK_PING:
935 break;
936 default:
937 if (acked_serial && after(seq1: acked_serial, seq2: call->acks_highest_serial))
938 call->acks_highest_serial = acked_serial;
939 break;
940 }
941
942 /* Parse rwind and mtu sizes if provided. */
943 if (trailer.maxMTU)
944 rxrpc_input_ack_trailer(call, skb, trailer: &trailer);
945
946 if (first_soft_ack == 0)
947 return rxrpc_proto_abort(call, seq: 0, why: rxrpc_eproto_ackr_zero);
948
949 /* Ignore ACKs unless we are or have just been transmitting. */
950 switch (__rxrpc_call_state(call)) {
951 case RXRPC_CALL_CLIENT_SEND_REQUEST:
952 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
953 case RXRPC_CALL_SERVER_SEND_REPLY:
954 case RXRPC_CALL_SERVER_AWAIT_ACK:
955 break;
956 default:
957 goto send_response;
958 }
959
960 if (before(seq1: hard_ack, seq2: call->acks_hard_ack) ||
961 after(seq1: hard_ack, seq2: call->tx_top))
962 return rxrpc_proto_abort(call, seq: 0, why: rxrpc_eproto_ackr_outside_window);
963 if (nr_acks > call->tx_top - hard_ack)
964 return rxrpc_proto_abort(call, seq: 0, why: rxrpc_eproto_ackr_sack_overflow);
965
966 if (after(seq1: hard_ack, seq2: call->acks_hard_ack)) {
967 if (rxrpc_rotate_tx_window(call, to: hard_ack, summary: &summary)) {
968 rxrpc_end_tx_phase(call, reply_begun: false, abort_why: rxrpc_eproto_unexpected_ack);
969 goto send_response;
970 }
971 }
972
973 if (nr_acks > 0) {
974 if (offset > (int)skb->len - nr_acks)
975 return rxrpc_proto_abort(call, seq: 0, why: rxrpc_eproto_ackr_short_sack);
976 rxrpc_input_soft_acks(call, summary: &summary, skb, seq: first_soft_ack, since);
977 rxrpc_get_skb(skb, rxrpc_skb_get_last_nack);
978 call->cong_last_nack = skb;
979 }
980
981 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
982 summary.nr_acks == call->tx_top - hard_ack &&
983 rxrpc_is_client_call(call))
984 rxrpc_propose_ping(call, serial: ack_serial,
985 why: rxrpc_propose_ack_ping_for_lost_reply);
986
987 rxrpc_congestion_management(call, skb, summary: &summary, acked_serial);
988
989send_response:
990 if (summary.ack_reason == RXRPC_ACK_PING)
991 rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, serial: ack_serial,
992 why: rxrpc_propose_ack_respond_to_ping);
993 else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
994 rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, serial: ack_serial,
995 why: rxrpc_propose_ack_respond_to_ack);
996}
997
998/*
999 * Process an ACKALL packet.
1000 */
1001static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
1002{
1003 struct rxrpc_ack_summary summary = { 0 };
1004
1005 if (rxrpc_rotate_tx_window(call, to: call->tx_top, summary: &summary))
1006 rxrpc_end_tx_phase(call, reply_begun: false, abort_why: rxrpc_eproto_unexpected_ackall);
1007}
1008
1009/*
1010 * Process an ABORT packet directed at a call.
1011 */
1012static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1013{
1014 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1015
1016 trace_rxrpc_rx_abort(call, serial: sp->hdr.serial, abort_code: skb->priority);
1017
1018 rxrpc_set_call_completion(call, compl: RXRPC_CALL_REMOTELY_ABORTED,
1019 abort_code: skb->priority, error: -ECONNABORTED);
1020}
1021
1022/*
1023 * Process an incoming call packet.
1024 */
1025void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
1026{
1027 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1028 unsigned long timo;
1029
1030 _enter("%p,%p", call, skb);
1031
1032 if (sp->hdr.serviceId != call->dest_srx.srx_service)
1033 call->dest_srx.srx_service = sp->hdr.serviceId;
1034 if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
1035 call->rx_serial = sp->hdr.serial;
1036 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1037 set_bit(nr: RXRPC_CALL_RX_HEARD, addr: &call->flags);
1038
1039 timo = READ_ONCE(call->next_rx_timo);
1040 if (timo) {
1041 ktime_t delay = ms_to_ktime(ms: timo);
1042
1043 call->expect_rx_by = ktime_add(ktime_get_real(), delay);
1044 trace_rxrpc_timer_set(call, delay, why: rxrpc_timer_trace_expect_rx);
1045 }
1046
1047 switch (sp->hdr.type) {
1048 case RXRPC_PACKET_TYPE_DATA:
1049 return rxrpc_input_data(call, skb);
1050
1051 case RXRPC_PACKET_TYPE_ACK:
1052 return rxrpc_input_ack(call, skb);
1053
1054 case RXRPC_PACKET_TYPE_BUSY:
1055 /* Just ignore BUSY packets from the server; the retry and
1056 * lifespan timers will take care of business. BUSY packets
1057 * from the client don't make sense.
1058 */
1059 return;
1060
1061 case RXRPC_PACKET_TYPE_ABORT:
1062 return rxrpc_input_abort(call, skb);
1063
1064 case RXRPC_PACKET_TYPE_ACKALL:
1065 return rxrpc_input_ackall(call, skb);
1066
1067 default:
1068 break;
1069 }
1070}
1071
1072/*
1073 * Handle a new service call on a channel implicitly completing the preceding
1074 * call on that channel. This does not apply to client conns.
1075 *
1076 * TODO: If callNumber > call_id + 1, renegotiate security.
1077 */
1078void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
1079{
1080 switch (__rxrpc_call_state(call)) {
1081 case RXRPC_CALL_SERVER_AWAIT_ACK:
1082 rxrpc_call_completed(call);
1083 fallthrough;
1084 case RXRPC_CALL_COMPLETE:
1085 break;
1086 default:
1087 rxrpc_abort_call(call, seq: 0, RX_CALL_DEAD, error: -ESHUTDOWN,
1088 why: rxrpc_eproto_improper_term);
1089 trace_rxrpc_improper_term(call);
1090 break;
1091 }
1092
1093 rxrpc_input_call_event(call, skb);
1094}
1095

source code of linux/net/rxrpc/input.c