1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
4 | * operating system. INET is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * Implementation of the Transmission Control Protocol(TCP). |
8 | * |
9 | * Authors: Ross Biro |
10 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
11 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
13 | * Florian La Roche, <flla@stud.uni-sb.de> |
14 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
15 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
16 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
17 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
18 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
19 | * Jorge Cwik, <jorge@laser.satlink.net> |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/gfp.h> |
24 | #include <net/tcp.h> |
25 | |
26 | static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) |
27 | { |
28 | struct inet_connection_sock *icsk = inet_csk(sk); |
29 | const struct tcp_sock *tp = tcp_sk(sk); |
30 | u32 elapsed, user_timeout; |
31 | s32 remaining; |
32 | |
33 | user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
34 | if (!user_timeout) |
35 | return icsk->icsk_rto; |
36 | |
37 | elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp; |
38 | if (tp->tcp_usec_ts) |
39 | elapsed /= USEC_PER_MSEC; |
40 | |
41 | remaining = user_timeout - elapsed; |
42 | if (remaining <= 0) |
43 | return 1; /* user timeout has passed; fire ASAP */ |
44 | |
45 | return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); |
46 | } |
47 | |
48 | u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) |
49 | { |
50 | struct inet_connection_sock *icsk = inet_csk(sk); |
51 | u32 remaining, user_timeout; |
52 | s32 elapsed; |
53 | |
54 | user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
55 | if (!user_timeout || !icsk->icsk_probes_tstamp) |
56 | return when; |
57 | |
58 | elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; |
59 | if (unlikely(elapsed < 0)) |
60 | elapsed = 0; |
61 | remaining = msecs_to_jiffies(m: user_timeout) - elapsed; |
62 | remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN); |
63 | |
64 | return min_t(u32, remaining, when); |
65 | } |
66 | |
67 | /** |
68 | * tcp_write_err() - close socket and save error info |
69 | * @sk: The socket the error has appeared on. |
70 | * |
71 | * Returns: Nothing (void) |
72 | */ |
73 | |
74 | static void tcp_write_err(struct sock *sk) |
75 | { |
76 | WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); |
77 | sk_error_report(sk); |
78 | |
79 | tcp_write_queue_purge(sk); |
80 | tcp_done(sk); |
81 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); |
82 | } |
83 | |
84 | /** |
85 | * tcp_out_of_resources() - Close socket if out of resources |
86 | * @sk: pointer to current socket |
87 | * @do_reset: send a last packet with reset flag |
88 | * |
89 | * Do not allow orphaned sockets to eat all our resources. |
90 | * This is direct violation of TCP specs, but it is required |
91 | * to prevent DoS attacks. It is called when a retransmission timeout |
92 | * or zero probe timeout occurs on orphaned socket. |
93 | * |
94 | * Also close if our net namespace is exiting; in that case there is no |
95 | * hope of ever communicating again since all netns interfaces are already |
96 | * down (or about to be down), and we need to release our dst references, |
97 | * which have been moved to the netns loopback interface, so the namespace |
98 | * can finish exiting. This condition is only possible if we are a kernel |
99 | * socket, as those do not hold references to the namespace. |
100 | * |
101 | * Criteria is still not confirmed experimentally and may change. |
102 | * We kill the socket, if: |
103 | * 1. If number of orphaned sockets exceeds an administratively configured |
104 | * limit. |
105 | * 2. If we have strong memory pressure. |
106 | * 3. If our net namespace is exiting. |
107 | */ |
108 | static int tcp_out_of_resources(struct sock *sk, bool do_reset) |
109 | { |
110 | struct tcp_sock *tp = tcp_sk(sk); |
111 | int shift = 0; |
112 | |
113 | /* If peer does not open window for long time, or did not transmit |
114 | * anything for long time, penalize it. */ |
115 | if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) |
116 | shift++; |
117 | |
118 | /* If some dubious ICMP arrived, penalize even more. */ |
119 | if (READ_ONCE(sk->sk_err_soft)) |
120 | shift++; |
121 | |
122 | if (tcp_check_oom(sk, shift)) { |
123 | /* Catch exceptional cases, when connection requires reset. |
124 | * 1. Last segment was sent recently. */ |
125 | if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN || |
126 | /* 2. Window is closed. */ |
127 | (!tp->snd_wnd && !tp->packets_out)) |
128 | do_reset = true; |
129 | if (do_reset) |
130 | tcp_send_active_reset(sk, GFP_ATOMIC); |
131 | tcp_done(sk); |
132 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); |
133 | return 1; |
134 | } |
135 | |
136 | if (!check_net(net: sock_net(sk))) { |
137 | /* Not possible to send reset; just close */ |
138 | tcp_done(sk); |
139 | return 1; |
140 | } |
141 | |
142 | return 0; |
143 | } |
144 | |
145 | /** |
146 | * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket |
147 | * @sk: Pointer to the current socket. |
148 | * @alive: bool, socket alive state |
149 | */ |
150 | static int tcp_orphan_retries(struct sock *sk, bool alive) |
151 | { |
152 | int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */ |
153 | |
154 | /* We know from an ICMP that something is wrong. */ |
155 | if (READ_ONCE(sk->sk_err_soft) && !alive) |
156 | retries = 0; |
157 | |
158 | /* However, if socket sent something recently, select some safe |
159 | * number of retries. 8 corresponds to >100 seconds with minimal |
160 | * RTO of 200msec. */ |
161 | if (retries == 0 && alive) |
162 | retries = 8; |
163 | return retries; |
164 | } |
165 | |
166 | static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) |
167 | { |
168 | const struct net *net = sock_net(sk); |
169 | int mss; |
170 | |
171 | /* Black hole detection */ |
172 | if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) |
173 | return; |
174 | |
175 | if (!icsk->icsk_mtup.enabled) { |
176 | icsk->icsk_mtup.enabled = 1; |
177 | icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; |
178 | } else { |
179 | mss = tcp_mtu_to_mss(sk, pmtu: icsk->icsk_mtup.search_low) >> 1; |
180 | mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); |
181 | mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); |
182 | mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); |
183 | icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); |
184 | } |
185 | tcp_sync_mss(sk, pmtu: icsk->icsk_pmtu_cookie); |
186 | } |
187 | |
188 | static unsigned int tcp_model_timeout(struct sock *sk, |
189 | unsigned int boundary, |
190 | unsigned int rto_base) |
191 | { |
192 | unsigned int linear_backoff_thresh, timeout; |
193 | |
194 | linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base); |
195 | if (boundary <= linear_backoff_thresh) |
196 | timeout = ((2 << boundary) - 1) * rto_base; |
197 | else |
198 | timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + |
199 | (boundary - linear_backoff_thresh) * TCP_RTO_MAX; |
200 | return jiffies_to_msecs(j: timeout); |
201 | } |
202 | /** |
203 | * retransmits_timed_out() - returns true if this connection has timed out |
204 | * @sk: The current socket |
205 | * @boundary: max number of retransmissions |
206 | * @timeout: A custom timeout value. |
207 | * If set to 0 the default timeout is calculated and used. |
208 | * Using TCP_RTO_MIN and the number of unsuccessful retransmits. |
209 | * |
210 | * The default "timeout" value this function can calculate and use |
211 | * is equivalent to the timeout of a TCP Connection |
212 | * after "boundary" unsuccessful, exponentially backed-off |
213 | * retransmissions with an initial RTO of TCP_RTO_MIN. |
214 | */ |
215 | static bool retransmits_timed_out(struct sock *sk, |
216 | unsigned int boundary, |
217 | unsigned int timeout) |
218 | { |
219 | struct tcp_sock *tp = tcp_sk(sk); |
220 | unsigned int start_ts, delta; |
221 | |
222 | if (!inet_csk(sk)->icsk_retransmits) |
223 | return false; |
224 | |
225 | start_ts = tp->retrans_stamp; |
226 | if (likely(timeout == 0)) { |
227 | unsigned int rto_base = TCP_RTO_MIN; |
228 | |
229 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
230 | rto_base = tcp_timeout_init(sk); |
231 | timeout = tcp_model_timeout(sk, boundary, rto_base); |
232 | } |
233 | |
234 | if (tp->tcp_usec_ts) { |
235 | /* delta maybe off up to a jiffy due to timer granularity. */ |
236 | delta = tp->tcp_mstamp - start_ts + jiffies_to_usecs(j: 1); |
237 | return (s32)(delta - timeout * USEC_PER_MSEC) >= 0; |
238 | } |
239 | return (s32)(tcp_time_stamp_ts(tp) - start_ts - timeout) >= 0; |
240 | } |
241 | |
242 | /* A write timeout has occurred. Process the after effects. */ |
243 | static int tcp_write_timeout(struct sock *sk) |
244 | { |
245 | struct inet_connection_sock *icsk = inet_csk(sk); |
246 | struct tcp_sock *tp = tcp_sk(sk); |
247 | struct net *net = sock_net(sk); |
248 | bool expired = false, do_reset; |
249 | int retry_until, max_retransmits; |
250 | |
251 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
252 | if (icsk->icsk_retransmits) |
253 | __dst_negative_advice(sk); |
254 | /* Paired with WRITE_ONCE() in tcp_sock_set_syncnt() */ |
255 | retry_until = READ_ONCE(icsk->icsk_syn_retries) ? : |
256 | READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); |
257 | |
258 | max_retransmits = retry_until; |
259 | if (sk->sk_state == TCP_SYN_SENT) |
260 | max_retransmits += READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts); |
261 | |
262 | expired = icsk->icsk_retransmits >= max_retransmits; |
263 | } else { |
264 | if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), timeout: 0)) { |
265 | /* Black hole detection */ |
266 | tcp_mtu_probing(icsk, sk); |
267 | |
268 | __dst_negative_advice(sk); |
269 | } |
270 | |
271 | retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); |
272 | if (sock_flag(sk, flag: SOCK_DEAD)) { |
273 | const bool alive = icsk->icsk_rto < TCP_RTO_MAX; |
274 | |
275 | retry_until = tcp_orphan_retries(sk, alive); |
276 | do_reset = alive || |
277 | !retransmits_timed_out(sk, boundary: retry_until, timeout: 0); |
278 | |
279 | if (tcp_out_of_resources(sk, do_reset)) |
280 | return 1; |
281 | } |
282 | } |
283 | if (!expired) |
284 | expired = retransmits_timed_out(sk, boundary: retry_until, |
285 | READ_ONCE(icsk->icsk_user_timeout)); |
286 | tcp_fastopen_active_detect_blackhole(sk, expired); |
287 | |
288 | if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG)) |
289 | tcp_call_bpf_3arg(sk, op: BPF_SOCK_OPS_RTO_CB, |
290 | arg1: icsk->icsk_retransmits, |
291 | arg2: icsk->icsk_rto, arg3: (int)expired); |
292 | |
293 | if (expired) { |
294 | /* Has it gone just too far? */ |
295 | tcp_write_err(sk); |
296 | return 1; |
297 | } |
298 | |
299 | if (sk_rethink_txhash(sk)) { |
300 | tp->timeout_rehash++; |
301 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH); |
302 | } |
303 | |
304 | return 0; |
305 | } |
306 | |
307 | /* Called with BH disabled */ |
308 | void tcp_delack_timer_handler(struct sock *sk) |
309 | { |
310 | struct inet_connection_sock *icsk = inet_csk(sk); |
311 | struct tcp_sock *tp = tcp_sk(sk); |
312 | |
313 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
314 | return; |
315 | |
316 | /* Handling the sack compression case */ |
317 | if (tp->compressed_ack) { |
318 | tcp_mstamp_refresh(tp); |
319 | tcp_sack_compress_send_ack(sk); |
320 | return; |
321 | } |
322 | |
323 | if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
324 | return; |
325 | |
326 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
327 | sk_reset_timer(sk, timer: &icsk->icsk_delack_timer, expires: icsk->icsk_ack.timeout); |
328 | return; |
329 | } |
330 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
331 | |
332 | if (inet_csk_ack_scheduled(sk)) { |
333 | if (!inet_csk_in_pingpong_mode(sk)) { |
334 | /* Delayed ACK missed: inflate ATO. */ |
335 | icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto); |
336 | } else { |
337 | /* Delayed ACK missed: leave pingpong mode and |
338 | * deflate ATO. |
339 | */ |
340 | inet_csk_exit_pingpong_mode(sk); |
341 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
342 | } |
343 | tcp_mstamp_refresh(tp); |
344 | tcp_send_ack(sk); |
345 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
346 | } |
347 | } |
348 | |
349 | |
350 | /** |
351 | * tcp_delack_timer() - The TCP delayed ACK timeout handler |
352 | * @t: Pointer to the timer. (gets casted to struct sock *) |
353 | * |
354 | * This function gets (indirectly) called when the kernel timer for a TCP packet |
355 | * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work. |
356 | * |
357 | * Returns: Nothing (void) |
358 | */ |
359 | static void tcp_delack_timer(struct timer_list *t) |
360 | { |
361 | struct inet_connection_sock *icsk = |
362 | from_timer(icsk, t, icsk_delack_timer); |
363 | struct sock *sk = &icsk->icsk_inet.sk; |
364 | |
365 | bh_lock_sock(sk); |
366 | if (!sock_owned_by_user(sk)) { |
367 | tcp_delack_timer_handler(sk); |
368 | } else { |
369 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
370 | /* deleguate our work to tcp_release_cb() */ |
371 | if (!test_and_set_bit(nr: TCP_DELACK_TIMER_DEFERRED, addr: &sk->sk_tsq_flags)) |
372 | sock_hold(sk); |
373 | } |
374 | bh_unlock_sock(sk); |
375 | sock_put(sk); |
376 | } |
377 | |
378 | static void tcp_probe_timer(struct sock *sk) |
379 | { |
380 | struct inet_connection_sock *icsk = inet_csk(sk); |
381 | struct sk_buff *skb = tcp_send_head(sk); |
382 | struct tcp_sock *tp = tcp_sk(sk); |
383 | int max_probes; |
384 | |
385 | if (tp->packets_out || !skb) { |
386 | icsk->icsk_probes_out = 0; |
387 | icsk->icsk_probes_tstamp = 0; |
388 | return; |
389 | } |
390 | |
391 | /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as |
392 | * long as the receiver continues to respond probes. We support this by |
393 | * default and reset icsk_probes_out with incoming ACKs. But if the |
394 | * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we |
395 | * kill the socket when the retry count and the time exceeds the |
396 | * corresponding system limit. We also implement similar policy when |
397 | * we use RTO to probe window in tcp_retransmit_timer(). |
398 | */ |
399 | if (!icsk->icsk_probes_tstamp) { |
400 | icsk->icsk_probes_tstamp = tcp_jiffies32; |
401 | } else { |
402 | u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
403 | |
404 | if (user_timeout && |
405 | (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= |
406 | msecs_to_jiffies(m: user_timeout)) |
407 | goto abort; |
408 | } |
409 | max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); |
410 | if (sock_flag(sk, flag: SOCK_DEAD)) { |
411 | const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; |
412 | |
413 | max_probes = tcp_orphan_retries(sk, alive); |
414 | if (!alive && icsk->icsk_backoff >= max_probes) |
415 | goto abort; |
416 | if (tcp_out_of_resources(sk, do_reset: true)) |
417 | return; |
418 | } |
419 | |
420 | if (icsk->icsk_probes_out >= max_probes) { |
421 | abort: tcp_write_err(sk); |
422 | } else { |
423 | /* Only send another probe if we didn't close things up. */ |
424 | tcp_send_probe0(sk); |
425 | } |
426 | } |
427 | |
428 | static void tcp_update_rto_stats(struct sock *sk) |
429 | { |
430 | struct inet_connection_sock *icsk = inet_csk(sk); |
431 | struct tcp_sock *tp = tcp_sk(sk); |
432 | |
433 | if (!icsk->icsk_retransmits) { |
434 | tp->total_rto_recoveries++; |
435 | tp->rto_stamp = tcp_time_stamp_ms(tp); |
436 | } |
437 | icsk->icsk_retransmits++; |
438 | tp->total_rto++; |
439 | } |
440 | |
441 | /* |
442 | * Timer for Fast Open socket to retransmit SYNACK. Note that the |
443 | * sk here is the child socket, not the parent (listener) socket. |
444 | */ |
445 | static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) |
446 | { |
447 | struct inet_connection_sock *icsk = inet_csk(sk); |
448 | struct tcp_sock *tp = tcp_sk(sk); |
449 | int max_retries; |
450 | |
451 | req->rsk_ops->syn_ack_timeout(req); |
452 | |
453 | /* Add one more retry for fastopen. |
454 | * Paired with WRITE_ONCE() in tcp_sock_set_syncnt() |
455 | */ |
456 | max_retries = READ_ONCE(icsk->icsk_syn_retries) ? : |
457 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; |
458 | |
459 | if (req->num_timeout >= max_retries) { |
460 | tcp_write_err(sk); |
461 | return; |
462 | } |
463 | /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */ |
464 | if (icsk->icsk_retransmits == 1) |
465 | tcp_enter_loss(sk); |
466 | /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error |
467 | * returned from rtx_syn_ack() to make it more persistent like |
468 | * regular retransmit because if the child socket has been accepted |
469 | * it's not good to give up too easily. |
470 | */ |
471 | inet_rtx_syn_ack(parent: sk, req); |
472 | req->num_timeout++; |
473 | tcp_update_rto_stats(sk); |
474 | if (!tp->retrans_stamp) |
475 | tp->retrans_stamp = tcp_time_stamp_ts(tp); |
476 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
477 | when: req->timeout << req->num_timeout, TCP_RTO_MAX); |
478 | } |
479 | |
480 | static bool tcp_rtx_probe0_timed_out(const struct sock *sk, |
481 | const struct sk_buff *skb, |
482 | u32 rtx_delta) |
483 | { |
484 | const struct tcp_sock *tp = tcp_sk(sk); |
485 | const int timeout = TCP_RTO_MAX * 2; |
486 | u32 rcv_delta; |
487 | |
488 | rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp; |
489 | if (rcv_delta <= timeout) |
490 | return false; |
491 | |
492 | return msecs_to_jiffies(m: rtx_delta) > timeout; |
493 | } |
494 | |
495 | /** |
496 | * tcp_retransmit_timer() - The TCP retransmit timeout handler |
497 | * @sk: Pointer to the current socket. |
498 | * |
499 | * This function gets called when the kernel timer for a TCP packet |
500 | * of this socket expires. |
501 | * |
502 | * It handles retransmission, timer adjustment and other necessary measures. |
503 | * |
504 | * Returns: Nothing (void) |
505 | */ |
506 | void tcp_retransmit_timer(struct sock *sk) |
507 | { |
508 | struct tcp_sock *tp = tcp_sk(sk); |
509 | struct net *net = sock_net(sk); |
510 | struct inet_connection_sock *icsk = inet_csk(sk); |
511 | struct request_sock *req; |
512 | struct sk_buff *skb; |
513 | |
514 | req = rcu_dereference_protected(tp->fastopen_rsk, |
515 | lockdep_sock_is_held(sk)); |
516 | if (req) { |
517 | WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && |
518 | sk->sk_state != TCP_FIN_WAIT1); |
519 | tcp_fastopen_synack_timer(sk, req); |
520 | /* Before we receive ACK to our SYN-ACK don't retransmit |
521 | * anything else (e.g., data or FIN segments). |
522 | */ |
523 | return; |
524 | } |
525 | |
526 | if (!tp->packets_out) |
527 | return; |
528 | |
529 | skb = tcp_rtx_queue_head(sk); |
530 | if (WARN_ON_ONCE(!skb)) |
531 | return; |
532 | |
533 | tp->tlp_high_seq = 0; |
534 | |
535 | if (!tp->snd_wnd && !sock_flag(sk, flag: SOCK_DEAD) && |
536 | !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { |
537 | /* Receiver dastardly shrinks window. Our retransmits |
538 | * become zero probes, but we should not timeout this |
539 | * connection. If the socket is an orphan, time it out, |
540 | * we cannot allow such beasts to hang infinitely. |
541 | */ |
542 | struct inet_sock *inet = inet_sk(sk); |
543 | u32 rtx_delta; |
544 | |
545 | rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?: |
546 | tcp_skb_timestamp_ts(usec_ts: tp->tcp_usec_ts, skb)); |
547 | if (tp->tcp_usec_ts) |
548 | rtx_delta /= USEC_PER_MSEC; |
549 | |
550 | if (sk->sk_family == AF_INET) { |
551 | net_dbg_ratelimited("Probing zero-window on %pI4:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n" , |
552 | &inet->inet_daddr, ntohs(inet->inet_dport), |
553 | inet->inet_num, tp->snd_una, tp->snd_nxt, |
554 | jiffies_to_msecs(jiffies - tp->rcv_tstamp), |
555 | rtx_delta); |
556 | } |
557 | #if IS_ENABLED(CONFIG_IPV6) |
558 | else if (sk->sk_family == AF_INET6) { |
559 | net_dbg_ratelimited("Probing zero-window on %pI6:%u/%u, seq=%u:%u, recv %ums ago, lasting %ums\n" , |
560 | &sk->sk_v6_daddr, ntohs(inet->inet_dport), |
561 | inet->inet_num, tp->snd_una, tp->snd_nxt, |
562 | jiffies_to_msecs(jiffies - tp->rcv_tstamp), |
563 | rtx_delta); |
564 | } |
565 | #endif |
566 | if (tcp_rtx_probe0_timed_out(sk, skb, rtx_delta)) { |
567 | tcp_write_err(sk); |
568 | goto out; |
569 | } |
570 | tcp_enter_loss(sk); |
571 | tcp_retransmit_skb(sk, skb, segs: 1); |
572 | __sk_dst_reset(sk); |
573 | goto out_reset_timer; |
574 | } |
575 | |
576 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); |
577 | if (tcp_write_timeout(sk)) |
578 | goto out; |
579 | |
580 | if (icsk->icsk_retransmits == 0) { |
581 | int mib_idx = 0; |
582 | |
583 | if (icsk->icsk_ca_state == TCP_CA_Recovery) { |
584 | if (tcp_is_sack(tp)) |
585 | mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL; |
586 | else |
587 | mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL; |
588 | } else if (icsk->icsk_ca_state == TCP_CA_Loss) { |
589 | mib_idx = LINUX_MIB_TCPLOSSFAILURES; |
590 | } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || |
591 | tp->sacked_out) { |
592 | if (tcp_is_sack(tp)) |
593 | mib_idx = LINUX_MIB_TCPSACKFAILURES; |
594 | else |
595 | mib_idx = LINUX_MIB_TCPRENOFAILURES; |
596 | } |
597 | if (mib_idx) |
598 | __NET_INC_STATS(sock_net(sk), mib_idx); |
599 | } |
600 | |
601 | tcp_enter_loss(sk); |
602 | |
603 | tcp_update_rto_stats(sk); |
604 | if (tcp_retransmit_skb(sk, skb: tcp_rtx_queue_head(sk), segs: 1) > 0) { |
605 | /* Retransmission failed because of local congestion, |
606 | * Let senders fight for local resources conservatively. |
607 | */ |
608 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
609 | TCP_RESOURCE_PROBE_INTERVAL, |
610 | TCP_RTO_MAX); |
611 | goto out; |
612 | } |
613 | |
614 | /* Increase the timeout each time we retransmit. Note that |
615 | * we do not increase the rtt estimate. rto is initialized |
616 | * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests |
617 | * that doubling rto each time is the least we can get away with. |
618 | * In KA9Q, Karn uses this for the first few times, and then |
619 | * goes to quadratic. netBSD doubles, but only goes up to *64, |
620 | * and clamps at 1 to 64 sec afterwards. Note that 120 sec is |
621 | * defined in the protocol as the maximum possible RTT. I guess |
622 | * we'll have to use something other than TCP to talk to the |
623 | * University of Mars. |
624 | * |
625 | * PAWS allows us longer timeouts and large windows, so once |
626 | * implemented ftp to mars will work nicely. We will have to fix |
627 | * the 120 second clamps though! |
628 | */ |
629 | icsk->icsk_backoff++; |
630 | |
631 | out_reset_timer: |
632 | /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is |
633 | * used to reset timer, set to 0. Recalculate 'icsk_rto' as this |
634 | * might be increased if the stream oscillates between thin and thick, |
635 | * thus the old value might already be too high compared to the value |
636 | * set by 'tcp_set_rto' in tcp_input.c which resets the rto without |
637 | * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating |
638 | * exponential backoff behaviour to avoid continue hammering |
639 | * linear-timeout retransmissions into a black hole |
640 | */ |
641 | if (sk->sk_state == TCP_ESTABLISHED && |
642 | (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && |
643 | tcp_stream_is_thin(tp) && |
644 | icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { |
645 | icsk->icsk_backoff = 0; |
646 | icsk->icsk_rto = clamp(__tcp_set_rto(tp), |
647 | tcp_rto_min(sk), |
648 | TCP_RTO_MAX); |
649 | } else if (sk->sk_state != TCP_SYN_SENT || |
650 | icsk->icsk_backoff > |
651 | READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) { |
652 | /* Use normal (exponential) backoff unless linear timeouts are |
653 | * activated. |
654 | */ |
655 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
656 | } |
657 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
658 | when: tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); |
659 | if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, timeout: 0)) |
660 | __sk_dst_reset(sk); |
661 | |
662 | out:; |
663 | } |
664 | |
665 | /* Called with bottom-half processing disabled. |
666 | Called by tcp_write_timer() */ |
667 | void tcp_write_timer_handler(struct sock *sk) |
668 | { |
669 | struct inet_connection_sock *icsk = inet_csk(sk); |
670 | int event; |
671 | |
672 | if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || |
673 | !icsk->icsk_pending) |
674 | return; |
675 | |
676 | if (time_after(icsk->icsk_timeout, jiffies)) { |
677 | sk_reset_timer(sk, timer: &icsk->icsk_retransmit_timer, expires: icsk->icsk_timeout); |
678 | return; |
679 | } |
680 | |
681 | tcp_mstamp_refresh(tcp_sk(sk)); |
682 | event = icsk->icsk_pending; |
683 | |
684 | switch (event) { |
685 | case ICSK_TIME_REO_TIMEOUT: |
686 | tcp_rack_reo_timeout(sk); |
687 | break; |
688 | case ICSK_TIME_LOSS_PROBE: |
689 | tcp_send_loss_probe(sk); |
690 | break; |
691 | case ICSK_TIME_RETRANS: |
692 | icsk->icsk_pending = 0; |
693 | tcp_retransmit_timer(sk); |
694 | break; |
695 | case ICSK_TIME_PROBE0: |
696 | icsk->icsk_pending = 0; |
697 | tcp_probe_timer(sk); |
698 | break; |
699 | } |
700 | } |
701 | |
702 | static void tcp_write_timer(struct timer_list *t) |
703 | { |
704 | struct inet_connection_sock *icsk = |
705 | from_timer(icsk, t, icsk_retransmit_timer); |
706 | struct sock *sk = &icsk->icsk_inet.sk; |
707 | |
708 | bh_lock_sock(sk); |
709 | if (!sock_owned_by_user(sk)) { |
710 | tcp_write_timer_handler(sk); |
711 | } else { |
712 | /* delegate our work to tcp_release_cb() */ |
713 | if (!test_and_set_bit(nr: TCP_WRITE_TIMER_DEFERRED, addr: &sk->sk_tsq_flags)) |
714 | sock_hold(sk); |
715 | } |
716 | bh_unlock_sock(sk); |
717 | sock_put(sk); |
718 | } |
719 | |
720 | void tcp_syn_ack_timeout(const struct request_sock *req) |
721 | { |
722 | struct net *net = read_pnet(pnet: &inet_rsk(sk: req)->ireq_net); |
723 | |
724 | __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); |
725 | } |
726 | EXPORT_SYMBOL(tcp_syn_ack_timeout); |
727 | |
728 | void tcp_set_keepalive(struct sock *sk, int val) |
729 | { |
730 | if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
731 | return; |
732 | |
733 | if (val && !sock_flag(sk, flag: SOCK_KEEPOPEN)) |
734 | inet_csk_reset_keepalive_timer(sk, timeout: keepalive_time_when(tcp_sk(sk))); |
735 | else if (!val) |
736 | inet_csk_delete_keepalive_timer(sk); |
737 | } |
738 | EXPORT_SYMBOL_GPL(tcp_set_keepalive); |
739 | |
740 | |
741 | static void tcp_keepalive_timer (struct timer_list *t) |
742 | { |
743 | struct sock *sk = from_timer(sk, t, sk_timer); |
744 | struct inet_connection_sock *icsk = inet_csk(sk); |
745 | struct tcp_sock *tp = tcp_sk(sk); |
746 | u32 elapsed; |
747 | |
748 | /* Only process if socket is not in use. */ |
749 | bh_lock_sock(sk); |
750 | if (sock_owned_by_user(sk)) { |
751 | /* Try again later. */ |
752 | inet_csk_reset_keepalive_timer (sk, HZ/20); |
753 | goto out; |
754 | } |
755 | |
756 | if (sk->sk_state == TCP_LISTEN) { |
757 | pr_err("Hmm... keepalive on a LISTEN ???\n" ); |
758 | goto out; |
759 | } |
760 | |
761 | tcp_mstamp_refresh(tp); |
762 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, flag: SOCK_DEAD)) { |
763 | if (READ_ONCE(tp->linger2) >= 0) { |
764 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
765 | |
766 | if (tmo > 0) { |
767 | tcp_time_wait(sk, state: TCP_FIN_WAIT2, timeo: tmo); |
768 | goto out; |
769 | } |
770 | } |
771 | tcp_send_active_reset(sk, GFP_ATOMIC); |
772 | goto death; |
773 | } |
774 | |
775 | if (!sock_flag(sk, flag: SOCK_KEEPOPEN) || |
776 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT))) |
777 | goto out; |
778 | |
779 | elapsed = keepalive_time_when(tp); |
780 | |
781 | /* It is alive without keepalive 8) */ |
782 | if (tp->packets_out || !tcp_write_queue_empty(sk)) |
783 | goto resched; |
784 | |
785 | elapsed = keepalive_time_elapsed(tp); |
786 | |
787 | if (elapsed >= keepalive_time_when(tp)) { |
788 | u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); |
789 | |
790 | /* If the TCP_USER_TIMEOUT option is enabled, use that |
791 | * to determine when to timeout instead. |
792 | */ |
793 | if ((user_timeout != 0 && |
794 | elapsed >= msecs_to_jiffies(m: user_timeout) && |
795 | icsk->icsk_probes_out > 0) || |
796 | (user_timeout == 0 && |
797 | icsk->icsk_probes_out >= keepalive_probes(tp))) { |
798 | tcp_send_active_reset(sk, GFP_ATOMIC); |
799 | tcp_write_err(sk); |
800 | goto out; |
801 | } |
802 | if (tcp_write_wakeup(sk, mib: LINUX_MIB_TCPKEEPALIVE) <= 0) { |
803 | icsk->icsk_probes_out++; |
804 | elapsed = keepalive_intvl_when(tp); |
805 | } else { |
806 | /* If keepalive was lost due to local congestion, |
807 | * try harder. |
808 | */ |
809 | elapsed = TCP_RESOURCE_PROBE_INTERVAL; |
810 | } |
811 | } else { |
812 | /* It is tp->rcv_tstamp + keepalive_time_when(tp) */ |
813 | elapsed = keepalive_time_when(tp) - elapsed; |
814 | } |
815 | |
816 | resched: |
817 | inet_csk_reset_keepalive_timer (sk, timeout: elapsed); |
818 | goto out; |
819 | |
820 | death: |
821 | tcp_done(sk); |
822 | |
823 | out: |
824 | bh_unlock_sock(sk); |
825 | sock_put(sk); |
826 | } |
827 | |
828 | static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) |
829 | { |
830 | struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer); |
831 | struct sock *sk = (struct sock *)tp; |
832 | |
833 | bh_lock_sock(sk); |
834 | if (!sock_owned_by_user(sk)) { |
835 | if (tp->compressed_ack) { |
836 | /* Since we have to send one ack finally, |
837 | * subtract one from tp->compressed_ack to keep |
838 | * LINUX_MIB_TCPACKCOMPRESSED accurate. |
839 | */ |
840 | tp->compressed_ack--; |
841 | tcp_send_ack(sk); |
842 | } |
843 | } else { |
844 | if (!test_and_set_bit(nr: TCP_DELACK_TIMER_DEFERRED, |
845 | addr: &sk->sk_tsq_flags)) |
846 | sock_hold(sk); |
847 | } |
848 | bh_unlock_sock(sk); |
849 | |
850 | sock_put(sk); |
851 | |
852 | return HRTIMER_NORESTART; |
853 | } |
854 | |
855 | void tcp_init_xmit_timers(struct sock *sk) |
856 | { |
857 | inet_csk_init_xmit_timers(sk, retransmit_handler: &tcp_write_timer, delack_handler: &tcp_delack_timer, |
858 | keepalive_handler: &tcp_keepalive_timer); |
859 | hrtimer_init(timer: &tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC, |
860 | mode: HRTIMER_MODE_ABS_PINNED_SOFT); |
861 | tcp_sk(sk)->pacing_timer.function = tcp_pace_kick; |
862 | |
863 | hrtimer_init(timer: &tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC, |
864 | mode: HRTIMER_MODE_REL_PINNED_SOFT); |
865 | tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick; |
866 | } |
867 | |