1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * net/dccp/timer.c |
4 | * |
5 | * An implementation of the DCCP protocol |
6 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
7 | */ |
8 | |
9 | #include <linux/dccp.h> |
10 | #include <linux/skbuff.h> |
11 | #include <linux/export.h> |
12 | |
13 | #include "dccp.h" |
14 | |
15 | /* sysctl variables governing numbers of retransmission attempts */ |
16 | int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; |
17 | int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; |
18 | int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; |
19 | |
20 | static void dccp_write_err(struct sock *sk) |
21 | { |
22 | sk->sk_err = READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT; |
23 | sk_error_report(sk); |
24 | |
25 | dccp_send_reset(sk, code: DCCP_RESET_CODE_ABORTED); |
26 | dccp_done(sk); |
27 | __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT); |
28 | } |
29 | |
30 | /* A write timeout has occurred. Process the after effects. */ |
31 | static int dccp_write_timeout(struct sock *sk) |
32 | { |
33 | const struct inet_connection_sock *icsk = inet_csk(sk); |
34 | int retry_until; |
35 | |
36 | if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { |
37 | if (icsk->icsk_retransmits != 0) |
38 | dst_negative_advice(sk); |
39 | retry_until = icsk->icsk_syn_retries ? |
40 | : sysctl_dccp_request_retries; |
41 | } else { |
42 | if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { |
43 | /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu |
44 | black hole detection. :-( |
45 | |
46 | It is place to make it. It is not made. I do not want |
47 | to make it. It is disguisting. It does not work in any |
48 | case. Let me to cite the same draft, which requires for |
49 | us to implement this: |
50 | |
51 | "The one security concern raised by this memo is that ICMP black holes |
52 | are often caused by over-zealous security administrators who block |
53 | all ICMP messages. It is vitally important that those who design and |
54 | deploy security systems understand the impact of strict filtering on |
55 | upper-layer protocols. The safest web site in the world is worthless |
56 | if most TCP implementations cannot transfer data from it. It would |
57 | be far nicer to have all of the black holes fixed rather than fixing |
58 | all of the TCP implementations." |
59 | |
60 | Golden words :-). |
61 | */ |
62 | |
63 | dst_negative_advice(sk); |
64 | } |
65 | |
66 | retry_until = sysctl_dccp_retries2; |
67 | /* |
68 | * FIXME: see tcp_write_timout and tcp_out_of_resources |
69 | */ |
70 | } |
71 | |
72 | if (icsk->icsk_retransmits >= retry_until) { |
73 | /* Has it gone just too far? */ |
74 | dccp_write_err(sk); |
75 | return 1; |
76 | } |
77 | return 0; |
78 | } |
79 | |
80 | /* |
81 | * The DCCP retransmit timer. |
82 | */ |
83 | static void dccp_retransmit_timer(struct sock *sk) |
84 | { |
85 | struct inet_connection_sock *icsk = inet_csk(sk); |
86 | |
87 | /* |
88 | * More than 4MSL (8 minutes) has passed, a RESET(aborted) was |
89 | * sent, no need to retransmit, this sock is dead. |
90 | */ |
91 | if (dccp_write_timeout(sk)) |
92 | return; |
93 | |
94 | /* |
95 | * We want to know the number of packets retransmitted, not the |
96 | * total number of retransmissions of clones of original packets. |
97 | */ |
98 | if (icsk->icsk_retransmits == 0) |
99 | __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS); |
100 | |
101 | if (dccp_retransmit_skb(sk) != 0) { |
102 | /* |
103 | * Retransmission failed because of local congestion, |
104 | * do not backoff. |
105 | */ |
106 | if (--icsk->icsk_retransmits == 0) |
107 | icsk->icsk_retransmits = 1; |
108 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
109 | min(icsk->icsk_rto, |
110 | TCP_RESOURCE_PROBE_INTERVAL), |
111 | DCCP_RTO_MAX); |
112 | return; |
113 | } |
114 | |
115 | icsk->icsk_backoff++; |
116 | |
117 | icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); |
118 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, when: icsk->icsk_rto, |
119 | DCCP_RTO_MAX); |
120 | if (icsk->icsk_retransmits > sysctl_dccp_retries1) |
121 | __sk_dst_reset(sk); |
122 | } |
123 | |
124 | static void dccp_write_timer(struct timer_list *t) |
125 | { |
126 | struct inet_connection_sock *icsk = |
127 | from_timer(icsk, t, icsk_retransmit_timer); |
128 | struct sock *sk = &icsk->icsk_inet.sk; |
129 | int event = 0; |
130 | |
131 | bh_lock_sock(sk); |
132 | if (sock_owned_by_user(sk)) { |
133 | /* Try again later */ |
134 | sk_reset_timer(sk, timer: &icsk->icsk_retransmit_timer, |
135 | expires: jiffies + (HZ / 20)); |
136 | goto out; |
137 | } |
138 | |
139 | if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) |
140 | goto out; |
141 | |
142 | if (time_after(icsk->icsk_timeout, jiffies)) { |
143 | sk_reset_timer(sk, timer: &icsk->icsk_retransmit_timer, |
144 | expires: icsk->icsk_timeout); |
145 | goto out; |
146 | } |
147 | |
148 | event = icsk->icsk_pending; |
149 | icsk->icsk_pending = 0; |
150 | |
151 | switch (event) { |
152 | case ICSK_TIME_RETRANS: |
153 | dccp_retransmit_timer(sk); |
154 | break; |
155 | } |
156 | out: |
157 | bh_unlock_sock(sk); |
158 | sock_put(sk); |
159 | } |
160 | |
161 | static void dccp_keepalive_timer(struct timer_list *t) |
162 | { |
163 | struct sock *sk = from_timer(sk, t, sk_timer); |
164 | |
165 | pr_err("dccp should not use a keepalive timer !\n" ); |
166 | sock_put(sk); |
167 | } |
168 | |
169 | /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ |
170 | static void dccp_delack_timer(struct timer_list *t) |
171 | { |
172 | struct inet_connection_sock *icsk = |
173 | from_timer(icsk, t, icsk_delack_timer); |
174 | struct sock *sk = &icsk->icsk_inet.sk; |
175 | |
176 | bh_lock_sock(sk); |
177 | if (sock_owned_by_user(sk)) { |
178 | /* Try again later. */ |
179 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); |
180 | sk_reset_timer(sk, timer: &icsk->icsk_delack_timer, |
181 | expires: jiffies + TCP_DELACK_MIN); |
182 | goto out; |
183 | } |
184 | |
185 | if (sk->sk_state == DCCP_CLOSED || |
186 | !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) |
187 | goto out; |
188 | if (time_after(icsk->icsk_ack.timeout, jiffies)) { |
189 | sk_reset_timer(sk, timer: &icsk->icsk_delack_timer, |
190 | expires: icsk->icsk_ack.timeout); |
191 | goto out; |
192 | } |
193 | |
194 | icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; |
195 | |
196 | if (inet_csk_ack_scheduled(sk)) { |
197 | if (!inet_csk_in_pingpong_mode(sk)) { |
198 | /* Delayed ACK missed: inflate ATO. */ |
199 | icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, |
200 | icsk->icsk_rto); |
201 | } else { |
202 | /* Delayed ACK missed: leave pingpong mode and |
203 | * deflate ATO. |
204 | */ |
205 | inet_csk_exit_pingpong_mode(sk); |
206 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
207 | } |
208 | dccp_send_ack(sk); |
209 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
210 | } |
211 | out: |
212 | bh_unlock_sock(sk); |
213 | sock_put(sk); |
214 | } |
215 | |
216 | /** |
217 | * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface |
218 | * @t: pointer to the tasklet associated with this handler |
219 | * |
220 | * See the comments above %ccid_dequeueing_decision for supported modes. |
221 | */ |
222 | static void dccp_write_xmitlet(struct tasklet_struct *t) |
223 | { |
224 | struct dccp_sock *dp = from_tasklet(dp, t, dccps_xmitlet); |
225 | struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk; |
226 | |
227 | bh_lock_sock(sk); |
228 | if (sock_owned_by_user(sk)) |
229 | sk_reset_timer(sk, timer: &dccp_sk(sk)->dccps_xmit_timer, expires: jiffies + 1); |
230 | else |
231 | dccp_write_xmit(sk); |
232 | bh_unlock_sock(sk); |
233 | sock_put(sk); |
234 | } |
235 | |
236 | static void dccp_write_xmit_timer(struct timer_list *t) |
237 | { |
238 | struct dccp_sock *dp = from_timer(dp, t, dccps_xmit_timer); |
239 | |
240 | dccp_write_xmitlet(t: &dp->dccps_xmitlet); |
241 | } |
242 | |
243 | void dccp_init_xmit_timers(struct sock *sk) |
244 | { |
245 | struct dccp_sock *dp = dccp_sk(sk); |
246 | |
247 | tasklet_setup(t: &dp->dccps_xmitlet, callback: dccp_write_xmitlet); |
248 | timer_setup(&dp->dccps_xmit_timer, dccp_write_xmit_timer, 0); |
249 | inet_csk_init_xmit_timers(sk, retransmit_handler: &dccp_write_timer, delack_handler: &dccp_delack_timer, |
250 | keepalive_handler: &dccp_keepalive_timer); |
251 | } |
252 | |
253 | static ktime_t dccp_timestamp_seed; |
254 | /** |
255 | * dccp_timestamp - 10s of microseconds time source |
256 | * Returns the number of 10s of microseconds since loading DCCP. This is native |
257 | * DCCP time difference format (RFC 4340, sec. 13). |
258 | * Please note: This will wrap around about circa every 11.9 hours. |
259 | */ |
260 | u32 dccp_timestamp(void) |
261 | { |
262 | u64 delta = (u64)ktime_us_delta(later: ktime_get_real(), earlier: dccp_timestamp_seed); |
263 | |
264 | do_div(delta, 10); |
265 | return delta; |
266 | } |
267 | EXPORT_SYMBOL_GPL(dccp_timestamp); |
268 | |
269 | void __init dccp_timestamping_init(void) |
270 | { |
271 | dccp_timestamp_seed = ktime_get_real(); |
272 | } |
273 | |