1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/kernel.h> |
3 | #include <linux/tcp.h> |
4 | #include <linux/rcupdate.h> |
5 | #include <net/tcp.h> |
6 | |
7 | void tcp_fastopen_init_key_once(struct net *net) |
8 | { |
9 | u8 key[TCP_FASTOPEN_KEY_LENGTH]; |
10 | struct tcp_fastopen_context *ctxt; |
11 | |
12 | rcu_read_lock(); |
13 | ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
14 | if (ctxt) { |
15 | rcu_read_unlock(); |
16 | return; |
17 | } |
18 | rcu_read_unlock(); |
19 | |
20 | /* tcp_fastopen_reset_cipher publishes the new context |
21 | * atomically, so we allow this race happening here. |
22 | * |
23 | * All call sites of tcp_fastopen_cookie_gen also check |
24 | * for a valid cookie, so this is an acceptable risk. |
25 | */ |
26 | get_random_bytes(buf: key, len: sizeof(key)); |
27 | tcp_fastopen_reset_cipher(net, NULL, primary_key: key, NULL); |
28 | } |
29 | |
30 | static void tcp_fastopen_ctx_free(struct rcu_head *head) |
31 | { |
32 | struct tcp_fastopen_context *ctx = |
33 | container_of(head, struct tcp_fastopen_context, rcu); |
34 | |
35 | kfree_sensitive(objp: ctx); |
36 | } |
37 | |
38 | void tcp_fastopen_destroy_cipher(struct sock *sk) |
39 | { |
40 | struct tcp_fastopen_context *ctx; |
41 | |
42 | ctx = rcu_dereference_protected( |
43 | inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); |
44 | if (ctx) |
45 | call_rcu(head: &ctx->rcu, func: tcp_fastopen_ctx_free); |
46 | } |
47 | |
48 | void tcp_fastopen_ctx_destroy(struct net *net) |
49 | { |
50 | struct tcp_fastopen_context *ctxt; |
51 | |
52 | ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); |
53 | |
54 | if (ctxt) |
55 | call_rcu(head: &ctxt->rcu, func: tcp_fastopen_ctx_free); |
56 | } |
57 | |
58 | int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, |
59 | void *primary_key, void *backup_key) |
60 | { |
61 | struct tcp_fastopen_context *ctx, *octx; |
62 | struct fastopen_queue *q; |
63 | int err = 0; |
64 | |
65 | ctx = kmalloc(size: sizeof(*ctx), GFP_KERNEL); |
66 | if (!ctx) { |
67 | err = -ENOMEM; |
68 | goto out; |
69 | } |
70 | |
71 | ctx->key[0].key[0] = get_unaligned_le64(p: primary_key); |
72 | ctx->key[0].key[1] = get_unaligned_le64(p: primary_key + 8); |
73 | if (backup_key) { |
74 | ctx->key[1].key[0] = get_unaligned_le64(p: backup_key); |
75 | ctx->key[1].key[1] = get_unaligned_le64(p: backup_key + 8); |
76 | ctx->num = 2; |
77 | } else { |
78 | ctx->num = 1; |
79 | } |
80 | |
81 | if (sk) { |
82 | q = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
83 | octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx); |
84 | } else { |
85 | octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); |
86 | } |
87 | |
88 | if (octx) |
89 | call_rcu(head: &octx->rcu, func: tcp_fastopen_ctx_free); |
90 | out: |
91 | return err; |
92 | } |
93 | |
94 | int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, |
95 | u64 *key) |
96 | { |
97 | struct tcp_fastopen_context *ctx; |
98 | int n_keys = 0, i; |
99 | |
100 | rcu_read_lock(); |
101 | if (icsk) |
102 | ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx); |
103 | else |
104 | ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx); |
105 | if (ctx) { |
106 | n_keys = tcp_fastopen_context_len(ctx); |
107 | for (i = 0; i < n_keys; i++) { |
108 | put_unaligned_le64(val: ctx->key[i].key[0], p: key + (i * 2)); |
109 | put_unaligned_le64(val: ctx->key[i].key[1], p: key + (i * 2) + 1); |
110 | } |
111 | } |
112 | rcu_read_unlock(); |
113 | |
114 | return n_keys; |
115 | } |
116 | |
117 | static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req, |
118 | struct sk_buff *syn, |
119 | const siphash_key_t *key, |
120 | struct tcp_fastopen_cookie *foc) |
121 | { |
122 | BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64)); |
123 | |
124 | if (req->rsk_ops->family == AF_INET) { |
125 | const struct iphdr *iph = ip_hdr(skb: syn); |
126 | |
127 | foc->val[0] = cpu_to_le64(siphash(&iph->saddr, |
128 | sizeof(iph->saddr) + |
129 | sizeof(iph->daddr), |
130 | key)); |
131 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
132 | return true; |
133 | } |
134 | #if IS_ENABLED(CONFIG_IPV6) |
135 | if (req->rsk_ops->family == AF_INET6) { |
136 | const struct ipv6hdr *ip6h = ipv6_hdr(skb: syn); |
137 | |
138 | foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr, |
139 | sizeof(ip6h->saddr) + |
140 | sizeof(ip6h->daddr), |
141 | key)); |
142 | foc->len = TCP_FASTOPEN_COOKIE_SIZE; |
143 | return true; |
144 | } |
145 | #endif |
146 | return false; |
147 | } |
148 | |
149 | /* Generate the fastopen cookie by applying SipHash to both the source and |
150 | * destination addresses. |
151 | */ |
152 | static void tcp_fastopen_cookie_gen(struct sock *sk, |
153 | struct request_sock *req, |
154 | struct sk_buff *syn, |
155 | struct tcp_fastopen_cookie *foc) |
156 | { |
157 | struct tcp_fastopen_context *ctx; |
158 | |
159 | rcu_read_lock(); |
160 | ctx = tcp_fastopen_get_ctx(sk); |
161 | if (ctx) |
162 | __tcp_fastopen_cookie_gen_cipher(req, syn, key: &ctx->key[0], foc); |
163 | rcu_read_unlock(); |
164 | } |
165 | |
166 | /* If an incoming SYN or SYNACK frame contains a payload and/or FIN, |
167 | * queue this additional data / FIN. |
168 | */ |
169 | void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) |
170 | { |
171 | struct tcp_sock *tp = tcp_sk(sk); |
172 | |
173 | if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt) |
174 | return; |
175 | |
176 | skb = skb_clone(skb, GFP_ATOMIC); |
177 | if (!skb) |
178 | return; |
179 | |
180 | skb_dst_drop(skb); |
181 | /* segs_in has been initialized to 1 in tcp_create_openreq_child(). |
182 | * Hence, reset segs_in to 0 before calling tcp_segs_in() |
183 | * to avoid double counting. Also, tcp_segs_in() expects |
184 | * skb->len to include the tcp_hdrlen. Hence, it should |
185 | * be called before __skb_pull(). |
186 | */ |
187 | tp->segs_in = 0; |
188 | tcp_segs_in(tp, skb); |
189 | __skb_pull(skb, len: tcp_hdrlen(skb)); |
190 | sk_forced_mem_schedule(sk, size: skb->truesize); |
191 | skb_set_owner_r(skb, sk); |
192 | |
193 | TCP_SKB_CB(skb)->seq++; |
194 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; |
195 | |
196 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
197 | __skb_queue_tail(list: &sk->sk_receive_queue, newsk: skb); |
198 | tp->syn_data_acked = 1; |
199 | |
200 | /* u64_stats_update_begin(&tp->syncp) not needed here, |
201 | * as we certainly are not changing upper 32bit value (0) |
202 | */ |
203 | tp->bytes_received = skb->len; |
204 | |
205 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
206 | tcp_fin(sk); |
207 | } |
208 | |
209 | /* returns 0 - no key match, 1 for primary, 2 for backup */ |
210 | static int tcp_fastopen_cookie_gen_check(struct sock *sk, |
211 | struct request_sock *req, |
212 | struct sk_buff *syn, |
213 | struct tcp_fastopen_cookie *orig, |
214 | struct tcp_fastopen_cookie *valid_foc) |
215 | { |
216 | struct tcp_fastopen_cookie search_foc = { .len = -1 }; |
217 | struct tcp_fastopen_cookie *foc = valid_foc; |
218 | struct tcp_fastopen_context *ctx; |
219 | int i, ret = 0; |
220 | |
221 | rcu_read_lock(); |
222 | ctx = tcp_fastopen_get_ctx(sk); |
223 | if (!ctx) |
224 | goto out; |
225 | for (i = 0; i < tcp_fastopen_context_len(ctx); i++) { |
226 | __tcp_fastopen_cookie_gen_cipher(req, syn, key: &ctx->key[i], foc); |
227 | if (tcp_fastopen_cookie_match(foc, orig)) { |
228 | ret = i + 1; |
229 | goto out; |
230 | } |
231 | foc = &search_foc; |
232 | } |
233 | out: |
234 | rcu_read_unlock(); |
235 | return ret; |
236 | } |
237 | |
238 | static struct sock *tcp_fastopen_create_child(struct sock *sk, |
239 | struct sk_buff *skb, |
240 | struct request_sock *req) |
241 | { |
242 | struct tcp_sock *tp; |
243 | struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; |
244 | struct sock *child; |
245 | bool own_req; |
246 | |
247 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
248 | NULL, &own_req); |
249 | if (!child) |
250 | return NULL; |
251 | |
252 | spin_lock(lock: &queue->fastopenq.lock); |
253 | queue->fastopenq.qlen++; |
254 | spin_unlock(lock: &queue->fastopenq.lock); |
255 | |
256 | /* Initialize the child socket. Have to fix some values to take |
257 | * into account the child is a Fast Open socket and is created |
258 | * only out of the bits carried in the SYN packet. |
259 | */ |
260 | tp = tcp_sk(child); |
261 | |
262 | rcu_assign_pointer(tp->fastopen_rsk, req); |
263 | tcp_rsk(req)->tfo_listener = true; |
264 | |
265 | /* RFC1323: The window in SYN & SYN/ACK segments is never |
266 | * scaled. So correct it appropriately. |
267 | */ |
268 | tp->snd_wnd = ntohs(tcp_hdr(skb)->window); |
269 | tp->max_window = tp->snd_wnd; |
270 | |
271 | /* Activate the retrans timer so that SYNACK can be retransmitted. |
272 | * The request socket is not added to the ehash |
273 | * because it's been added to the accept queue directly. |
274 | */ |
275 | req->timeout = tcp_timeout_init(sk: child); |
276 | inet_csk_reset_xmit_timer(sk: child, ICSK_TIME_RETRANS, |
277 | when: req->timeout, TCP_RTO_MAX); |
278 | |
279 | refcount_set(r: &req->rsk_refcnt, n: 2); |
280 | |
281 | /* Now finish processing the fastopen child socket. */ |
282 | tcp_init_transfer(sk: child, bpf_op: BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb); |
283 | |
284 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
285 | |
286 | tcp_fastopen_add_skb(sk: child, skb); |
287 | |
288 | tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; |
289 | tp->rcv_wup = tp->rcv_nxt; |
290 | /* tcp_conn_request() is sending the SYNACK, |
291 | * and queues the child into listener accept queue. |
292 | */ |
293 | return child; |
294 | } |
295 | |
296 | static bool tcp_fastopen_queue_check(struct sock *sk) |
297 | { |
298 | struct fastopen_queue *fastopenq; |
299 | int max_qlen; |
300 | |
301 | /* Make sure the listener has enabled fastopen, and we don't |
302 | * exceed the max # of pending TFO requests allowed before trying |
303 | * to validating the cookie in order to avoid burning CPU cycles |
304 | * unnecessarily. |
305 | * |
306 | * XXX (TFO) - The implication of checking the max_qlen before |
307 | * processing a cookie request is that clients can't differentiate |
308 | * between qlen overflow causing Fast Open to be disabled |
309 | * temporarily vs a server not supporting Fast Open at all. |
310 | */ |
311 | fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; |
312 | max_qlen = READ_ONCE(fastopenq->max_qlen); |
313 | if (max_qlen == 0) |
314 | return false; |
315 | |
316 | if (fastopenq->qlen >= max_qlen) { |
317 | struct request_sock *req1; |
318 | spin_lock(lock: &fastopenq->lock); |
319 | req1 = fastopenq->rskq_rst_head; |
320 | if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { |
321 | __NET_INC_STATS(sock_net(sk), |
322 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); |
323 | spin_unlock(lock: &fastopenq->lock); |
324 | return false; |
325 | } |
326 | fastopenq->rskq_rst_head = req1->dl_next; |
327 | fastopenq->qlen--; |
328 | spin_unlock(lock: &fastopenq->lock); |
329 | reqsk_put(req: req1); |
330 | } |
331 | return true; |
332 | } |
333 | |
334 | static bool tcp_fastopen_no_cookie(const struct sock *sk, |
335 | const struct dst_entry *dst, |
336 | int flag) |
337 | { |
338 | return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || |
339 | tcp_sk(sk)->fastopen_no_cookie || |
340 | (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); |
341 | } |
342 | |
343 | /* Returns true if we should perform Fast Open on the SYN. The cookie (foc) |
344 | * may be updated and return the client in the SYN-ACK later. E.g., Fast Open |
345 | * cookie request (foc->len == 0). |
346 | */ |
347 | struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, |
348 | struct request_sock *req, |
349 | struct tcp_fastopen_cookie *foc, |
350 | const struct dst_entry *dst) |
351 | { |
352 | bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; |
353 | int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); |
354 | struct tcp_fastopen_cookie valid_foc = { .len = -1 }; |
355 | struct sock *child; |
356 | int ret = 0; |
357 | |
358 | if (foc->len == 0) /* Client requests a cookie */ |
359 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); |
360 | |
361 | if (!((tcp_fastopen & TFO_SERVER_ENABLE) && |
362 | (syn_data || foc->len >= 0) && |
363 | tcp_fastopen_queue_check(sk))) { |
364 | foc->len = -1; |
365 | return NULL; |
366 | } |
367 | |
368 | if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD)) |
369 | goto fastopen; |
370 | |
371 | if (foc->len == 0) { |
372 | /* Client requests a cookie. */ |
373 | tcp_fastopen_cookie_gen(sk, req, syn: skb, foc: &valid_foc); |
374 | } else if (foc->len > 0) { |
375 | ret = tcp_fastopen_cookie_gen_check(sk, req, syn: skb, orig: foc, |
376 | valid_foc: &valid_foc); |
377 | if (!ret) { |
378 | NET_INC_STATS(sock_net(sk), |
379 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
380 | } else { |
381 | /* Cookie is valid. Create a (full) child socket to |
382 | * accept the data in SYN before returning a SYN-ACK to |
383 | * ack the data. If we fail to create the socket, fall |
384 | * back and ack the ISN only but includes the same |
385 | * cookie. |
386 | * |
387 | * Note: Data-less SYN with valid cookie is allowed to |
388 | * send data in SYN_RECV state. |
389 | */ |
390 | fastopen: |
391 | child = tcp_fastopen_create_child(sk, skb, req); |
392 | if (child) { |
393 | if (ret == 2) { |
394 | valid_foc.exp = foc->exp; |
395 | *foc = valid_foc; |
396 | NET_INC_STATS(sock_net(sk), |
397 | LINUX_MIB_TCPFASTOPENPASSIVEALTKEY); |
398 | } else { |
399 | foc->len = -1; |
400 | } |
401 | NET_INC_STATS(sock_net(sk), |
402 | LINUX_MIB_TCPFASTOPENPASSIVE); |
403 | return child; |
404 | } |
405 | NET_INC_STATS(sock_net(sk), |
406 | LINUX_MIB_TCPFASTOPENPASSIVEFAIL); |
407 | } |
408 | } |
409 | valid_foc.exp = foc->exp; |
410 | *foc = valid_foc; |
411 | return NULL; |
412 | } |
413 | |
414 | bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, |
415 | struct tcp_fastopen_cookie *cookie) |
416 | { |
417 | const struct dst_entry *dst; |
418 | |
419 | tcp_fastopen_cache_get(sk, mss, cookie); |
420 | |
421 | /* Firewall blackhole issue check */ |
422 | if (tcp_fastopen_active_should_disable(sk)) { |
423 | cookie->len = -1; |
424 | return false; |
425 | } |
426 | |
427 | dst = __sk_dst_get(sk); |
428 | |
429 | if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) { |
430 | cookie->len = -1; |
431 | return true; |
432 | } |
433 | if (cookie->len > 0) |
434 | return true; |
435 | tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE; |
436 | return false; |
437 | } |
438 | |
439 | /* This function checks if we want to defer sending SYN until the first |
440 | * write(). We defer under the following conditions: |
441 | * 1. fastopen_connect sockopt is set |
442 | * 2. we have a valid cookie |
443 | * Return value: return true if we want to defer until application writes data |
444 | * return false if we want to send out SYN immediately |
445 | */ |
446 | bool tcp_fastopen_defer_connect(struct sock *sk, int *err) |
447 | { |
448 | struct tcp_fastopen_cookie cookie = { .len = 0 }; |
449 | struct tcp_sock *tp = tcp_sk(sk); |
450 | u16 mss; |
451 | |
452 | if (tp->fastopen_connect && !tp->fastopen_req) { |
453 | if (tcp_fastopen_cookie_check(sk, mss: &mss, cookie: &cookie)) { |
454 | inet_set_bit(DEFER_CONNECT, sk); |
455 | return true; |
456 | } |
457 | |
458 | /* Alloc fastopen_req in order for FO option to be included |
459 | * in SYN |
460 | */ |
461 | tp->fastopen_req = kzalloc(size: sizeof(*tp->fastopen_req), |
462 | flags: sk->sk_allocation); |
463 | if (tp->fastopen_req) |
464 | tp->fastopen_req->cookie = cookie; |
465 | else |
466 | *err = -ENOBUFS; |
467 | } |
468 | return false; |
469 | } |
470 | EXPORT_SYMBOL(tcp_fastopen_defer_connect); |
471 | |
472 | /* |
473 | * The following code block is to deal with middle box issues with TFO: |
474 | * Middlebox firewall issues can potentially cause server's data being |
475 | * blackholed after a successful 3WHS using TFO. |
476 | * The proposed solution is to disable active TFO globally under the |
477 | * following circumstances: |
478 | * 1. client side TFO socket receives out of order FIN |
479 | * 2. client side TFO socket receives out of order RST |
480 | * 3. client side TFO socket has timed out three times consecutively during |
481 | * or after handshake |
482 | * We disable active side TFO globally for 1hr at first. Then if it |
483 | * happens again, we disable it for 2h, then 4h, 8h, ... |
484 | * And we reset the timeout back to 1hr when we see a successful active |
485 | * TFO connection with data exchanges. |
486 | */ |
487 | |
488 | /* Disable active TFO and record current jiffies and |
489 | * tfo_active_disable_times |
490 | */ |
491 | void tcp_fastopen_active_disable(struct sock *sk) |
492 | { |
493 | struct net *net = sock_net(sk); |
494 | |
495 | if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) |
496 | return; |
497 | |
498 | /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ |
499 | WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); |
500 | |
501 | /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). |
502 | * We want net->ipv4.tfo_active_disable_stamp to be updated first. |
503 | */ |
504 | smp_mb__before_atomic(); |
505 | atomic_inc(v: &net->ipv4.tfo_active_disable_times); |
506 | |
507 | NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); |
508 | } |
509 | |
510 | /* Calculate timeout for tfo active disable |
511 | * Return true if we are still in the active TFO disable period |
512 | * Return false if timeout already expired and we should use active TFO |
513 | */ |
514 | bool tcp_fastopen_active_should_disable(struct sock *sk) |
515 | { |
516 | unsigned int tfo_bh_timeout = |
517 | READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); |
518 | unsigned long timeout; |
519 | int tfo_da_times; |
520 | int multiplier; |
521 | |
522 | if (!tfo_bh_timeout) |
523 | return false; |
524 | |
525 | tfo_da_times = atomic_read(v: &sock_net(sk)->ipv4.tfo_active_disable_times); |
526 | if (!tfo_da_times) |
527 | return false; |
528 | |
529 | /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ |
530 | smp_rmb(); |
531 | |
532 | /* Limit timeout to max: 2^6 * initial timeout */ |
533 | multiplier = 1 << min(tfo_da_times - 1, 6); |
534 | |
535 | /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ |
536 | timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + |
537 | multiplier * tfo_bh_timeout * HZ; |
538 | if (time_before(jiffies, timeout)) |
539 | return true; |
540 | |
541 | /* Mark check bit so we can check for successful active TFO |
542 | * condition and reset tfo_active_disable_times |
543 | */ |
544 | tcp_sk(sk)->syn_fastopen_ch = 1; |
545 | return false; |
546 | } |
547 | |
548 | /* Disable active TFO if FIN is the only packet in the ofo queue |
549 | * and no data is received. |
550 | * Also check if we can reset tfo_active_disable_times if data is |
551 | * received successfully on a marked active TFO sockets opened on |
552 | * a non-loopback interface |
553 | */ |
554 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk) |
555 | { |
556 | struct tcp_sock *tp = tcp_sk(sk); |
557 | struct dst_entry *dst; |
558 | struct sk_buff *skb; |
559 | |
560 | if (!tp->syn_fastopen) |
561 | return; |
562 | |
563 | if (!tp->data_segs_in) { |
564 | skb = skb_rb_first(&tp->out_of_order_queue); |
565 | if (skb && !skb_rb_next(skb)) { |
566 | if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { |
567 | tcp_fastopen_active_disable(sk); |
568 | return; |
569 | } |
570 | } |
571 | } else if (tp->syn_fastopen_ch && |
572 | atomic_read(v: &sock_net(sk)->ipv4.tfo_active_disable_times)) { |
573 | dst = sk_dst_get(sk); |
574 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) |
575 | atomic_set(v: &sock_net(sk)->ipv4.tfo_active_disable_times, i: 0); |
576 | dst_release(dst); |
577 | } |
578 | } |
579 | |
580 | void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired) |
581 | { |
582 | u32 timeouts = inet_csk(sk)->icsk_retransmits; |
583 | struct tcp_sock *tp = tcp_sk(sk); |
584 | |
585 | /* Broken middle-boxes may black-hole Fast Open connection during or |
586 | * even after the handshake. Be extremely conservative and pause |
587 | * Fast Open globally after hitting the third consecutive timeout or |
588 | * exceeding the configured timeout limit. |
589 | */ |
590 | if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) && |
591 | (timeouts == 2 || (timeouts < 2 && expired))) { |
592 | tcp_fastopen_active_disable(sk); |
593 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
594 | } |
595 | } |
596 | |