1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* SCTP kernel implementation |
3 | * (C) Copyright IBM Corp. 2001, 2004 |
4 | * Copyright (c) 1999 Cisco, Inc. |
5 | * Copyright (c) 1999-2001 Motorola, Inc. |
6 | * |
7 | * This file is part of the SCTP kernel implementation |
8 | * |
9 | * These functions work with the state functions in sctp_sm_statefuns.c |
10 | * to implement that state operations. These functions implement the |
11 | * steps which require modifying existing data structures. |
12 | * |
13 | * Please send any bug reports or fixes you make to the |
14 | * email address(es): |
15 | * lksctp developers <linux-sctp@vger.kernel.org> |
16 | * |
17 | * Written or modified by: |
18 | * La Monte H.P. Yarroll <piggy@acm.org> |
19 | * Karl Knutson <karl@athena.chicago.il.us> |
20 | * Jon Grimm <jgrimm@austin.ibm.com> |
21 | * Hui Huang <hui.huang@nokia.com> |
22 | * Dajiang Zhang <dajiang.zhang@nokia.com> |
23 | * Daisy Chang <daisyc@us.ibm.com> |
24 | * Sridhar Samudrala <sri@us.ibm.com> |
25 | * Ardelle Fan <ardelle.fan@intel.com> |
26 | */ |
27 | |
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
29 | |
30 | #include <linux/skbuff.h> |
31 | #include <linux/types.h> |
32 | #include <linux/socket.h> |
33 | #include <linux/ip.h> |
34 | #include <linux/gfp.h> |
35 | #include <net/sock.h> |
36 | #include <net/sctp/sctp.h> |
37 | #include <net/sctp/sm.h> |
38 | #include <net/sctp/stream_sched.h> |
39 | |
40 | static int sctp_cmd_interpreter(enum sctp_event_type event_type, |
41 | union sctp_subtype subtype, |
42 | enum sctp_state state, |
43 | struct sctp_endpoint *ep, |
44 | struct sctp_association *asoc, |
45 | void *event_arg, |
46 | enum sctp_disposition status, |
47 | struct sctp_cmd_seq *commands, |
48 | gfp_t gfp); |
49 | static int sctp_side_effects(enum sctp_event_type event_type, |
50 | union sctp_subtype subtype, |
51 | enum sctp_state state, |
52 | struct sctp_endpoint *ep, |
53 | struct sctp_association **asoc, |
54 | void *event_arg, |
55 | enum sctp_disposition status, |
56 | struct sctp_cmd_seq *commands, |
57 | gfp_t gfp); |
58 | |
59 | /******************************************************************** |
60 | * Helper functions |
61 | ********************************************************************/ |
62 | |
63 | /* A helper function for delayed processing of INET ECN CE bit. */ |
64 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, |
65 | __u32 lowest_tsn) |
66 | { |
67 | /* Save the TSN away for comparison when we receive CWR */ |
68 | |
69 | asoc->last_ecne_tsn = lowest_tsn; |
70 | asoc->need_ecne = 1; |
71 | } |
72 | |
73 | /* Helper function for delayed processing of SCTP ECNE chunk. */ |
74 | /* RFC 2960 Appendix A |
75 | * |
76 | * RFC 2481 details a specific bit for a sender to send in |
77 | * the header of its next outbound TCP segment to indicate to |
78 | * its peer that it has reduced its congestion window. This |
79 | * is termed the CWR bit. For SCTP the same indication is made |
80 | * by including the CWR chunk. This chunk contains one data |
81 | * element, i.e. the TSN number that was sent in the ECNE chunk. |
82 | * This element represents the lowest TSN number in the datagram |
83 | * that was originally marked with the CE bit. |
84 | */ |
85 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, |
86 | __u32 lowest_tsn, |
87 | struct sctp_chunk *chunk) |
88 | { |
89 | struct sctp_chunk *repl; |
90 | |
91 | /* Our previously transmitted packet ran into some congestion |
92 | * so we should take action by reducing cwnd and ssthresh |
93 | * and then ACK our peer that we we've done so by |
94 | * sending a CWR. |
95 | */ |
96 | |
97 | /* First, try to determine if we want to actually lower |
98 | * our cwnd variables. Only lower them if the ECNE looks more |
99 | * recent than the last response. |
100 | */ |
101 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { |
102 | struct sctp_transport *transport; |
103 | |
104 | /* Find which transport's congestion variables |
105 | * need to be adjusted. |
106 | */ |
107 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); |
108 | |
109 | /* Update the congestion variables. */ |
110 | if (transport) |
111 | sctp_transport_lower_cwnd(t: transport, |
112 | reason: SCTP_LOWER_CWND_ECNE); |
113 | asoc->last_cwr_tsn = lowest_tsn; |
114 | } |
115 | |
116 | /* Always try to quiet the other end. In case of lost CWR, |
117 | * resend last_cwr_tsn. |
118 | */ |
119 | repl = sctp_make_cwr(asoc, lowest_tsn: asoc->last_cwr_tsn, chunk); |
120 | |
121 | /* If we run out of memory, it will look like a lost CWR. We'll |
122 | * get back in sync eventually. |
123 | */ |
124 | return repl; |
125 | } |
126 | |
127 | /* Helper function to do delayed processing of ECN CWR chunk. */ |
128 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, |
129 | __u32 lowest_tsn) |
130 | { |
131 | /* Turn off ECNE getting auto-prepended to every outgoing |
132 | * packet |
133 | */ |
134 | asoc->need_ecne = 0; |
135 | } |
136 | |
137 | /* Generate SACK if necessary. We call this at the end of a packet. */ |
138 | static int sctp_gen_sack(struct sctp_association *asoc, int force, |
139 | struct sctp_cmd_seq *commands) |
140 | { |
141 | struct sctp_transport *trans = asoc->peer.last_data_from; |
142 | __u32 ctsn, max_tsn_seen; |
143 | struct sctp_chunk *sack; |
144 | int error = 0; |
145 | |
146 | if (force || |
147 | (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || |
148 | (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) |
149 | asoc->peer.sack_needed = 1; |
150 | |
151 | ctsn = sctp_tsnmap_get_ctsn(map: &asoc->peer.tsn_map); |
152 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(map: &asoc->peer.tsn_map); |
153 | |
154 | /* From 12.2 Parameters necessary per association (i.e. the TCB): |
155 | * |
156 | * Ack State : This flag indicates if the next received packet |
157 | * : is to be responded to with a SACK. ... |
158 | * : When DATA chunks are out of order, SACK's |
159 | * : are not delayed (see Section 6). |
160 | * |
161 | * [This is actually not mentioned in Section 6, but we |
162 | * implement it here anyway. --piggy] |
163 | */ |
164 | if (max_tsn_seen != ctsn) |
165 | asoc->peer.sack_needed = 1; |
166 | |
167 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: |
168 | * |
169 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, |
170 | * an acknowledgement SHOULD be generated for at least every |
171 | * second packet (not every second DATA chunk) received, and |
172 | * SHOULD be generated within 200 ms of the arrival of any |
173 | * unacknowledged DATA chunk. ... |
174 | */ |
175 | if (!asoc->peer.sack_needed) { |
176 | asoc->peer.sack_cnt++; |
177 | |
178 | /* Set the SACK delay timeout based on the |
179 | * SACK delay for the last transport |
180 | * data was received from, or the default |
181 | * for the association. |
182 | */ |
183 | if (trans) { |
184 | /* We will need a SACK for the next packet. */ |
185 | if (asoc->peer.sack_cnt >= trans->sackfreq - 1) |
186 | asoc->peer.sack_needed = 1; |
187 | |
188 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
189 | trans->sackdelay; |
190 | } else { |
191 | /* We will need a SACK for the next packet. */ |
192 | if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) |
193 | asoc->peer.sack_needed = 1; |
194 | |
195 | asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = |
196 | asoc->sackdelay; |
197 | } |
198 | |
199 | /* Restart the SACK timer. */ |
200 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_TIMER_RESTART, |
201 | obj: SCTP_TO(arg: SCTP_EVENT_TIMEOUT_SACK)); |
202 | } else { |
203 | __u32 old_a_rwnd = asoc->a_rwnd; |
204 | |
205 | asoc->a_rwnd = asoc->rwnd; |
206 | sack = sctp_make_sack(asoc); |
207 | if (!sack) { |
208 | asoc->a_rwnd = old_a_rwnd; |
209 | goto nomem; |
210 | } |
211 | |
212 | asoc->peer.sack_needed = 0; |
213 | asoc->peer.sack_cnt = 0; |
214 | |
215 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, obj: SCTP_CHUNK(arg: sack)); |
216 | |
217 | /* Stop the SACK timer. */ |
218 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_TIMER_STOP, |
219 | obj: SCTP_TO(arg: SCTP_EVENT_TIMEOUT_SACK)); |
220 | } |
221 | |
222 | return error; |
223 | nomem: |
224 | error = -ENOMEM; |
225 | return error; |
226 | } |
227 | |
228 | /* When the T3-RTX timer expires, it calls this function to create the |
229 | * relevant state machine event. |
230 | */ |
231 | void sctp_generate_t3_rtx_event(struct timer_list *t) |
232 | { |
233 | struct sctp_transport *transport = |
234 | from_timer(transport, t, T3_rtx_timer); |
235 | struct sctp_association *asoc = transport->asoc; |
236 | struct sock *sk = asoc->base.sk; |
237 | struct net *net = sock_net(sk); |
238 | int error; |
239 | |
240 | /* Check whether a task is in the sock. */ |
241 | |
242 | bh_lock_sock(sk); |
243 | if (sock_owned_by_user(sk)) { |
244 | pr_debug("%s: sock is busy\n" , __func__); |
245 | |
246 | /* Try again later. */ |
247 | if (!mod_timer(timer: &transport->T3_rtx_timer, expires: jiffies + (HZ/20))) |
248 | sctp_transport_hold(transport); |
249 | goto out_unlock; |
250 | } |
251 | |
252 | /* Run through the state machine. */ |
253 | error = sctp_do_sm(net, event_type: SCTP_EVENT_T_TIMEOUT, |
254 | subtype: SCTP_ST_TIMEOUT(arg: SCTP_EVENT_TIMEOUT_T3_RTX), |
255 | state: asoc->state, |
256 | ep: asoc->ep, asoc, |
257 | event_arg: transport, GFP_ATOMIC); |
258 | |
259 | if (error) |
260 | sk->sk_err = -error; |
261 | |
262 | out_unlock: |
263 | bh_unlock_sock(sk); |
264 | sctp_transport_put(transport); |
265 | } |
266 | |
267 | /* This is a sa interface for producing timeout events. It works |
268 | * for timeouts which use the association as their parameter. |
269 | */ |
270 | static void sctp_generate_timeout_event(struct sctp_association *asoc, |
271 | enum sctp_event_timeout timeout_type) |
272 | { |
273 | struct sock *sk = asoc->base.sk; |
274 | struct net *net = sock_net(sk); |
275 | int error = 0; |
276 | |
277 | bh_lock_sock(sk); |
278 | if (sock_owned_by_user(sk)) { |
279 | pr_debug("%s: sock is busy: timer %d\n" , __func__, |
280 | timeout_type); |
281 | |
282 | /* Try again later. */ |
283 | if (!mod_timer(timer: &asoc->timers[timeout_type], expires: jiffies + (HZ/20))) |
284 | sctp_association_hold(asoc); |
285 | goto out_unlock; |
286 | } |
287 | |
288 | /* Is this association really dead and just waiting around for |
289 | * the timer to let go of the reference? |
290 | */ |
291 | if (asoc->base.dead) |
292 | goto out_unlock; |
293 | |
294 | /* Run through the state machine. */ |
295 | error = sctp_do_sm(net, event_type: SCTP_EVENT_T_TIMEOUT, |
296 | subtype: SCTP_ST_TIMEOUT(arg: timeout_type), |
297 | state: asoc->state, ep: asoc->ep, asoc, |
298 | event_arg: (void *)timeout_type, GFP_ATOMIC); |
299 | |
300 | if (error) |
301 | sk->sk_err = -error; |
302 | |
303 | out_unlock: |
304 | bh_unlock_sock(sk); |
305 | sctp_association_put(asoc); |
306 | } |
307 | |
308 | static void sctp_generate_t1_cookie_event(struct timer_list *t) |
309 | { |
310 | struct sctp_association *asoc = |
311 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]); |
312 | |
313 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_T1_COOKIE); |
314 | } |
315 | |
316 | static void sctp_generate_t1_init_event(struct timer_list *t) |
317 | { |
318 | struct sctp_association *asoc = |
319 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]); |
320 | |
321 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_T1_INIT); |
322 | } |
323 | |
324 | static void sctp_generate_t2_shutdown_event(struct timer_list *t) |
325 | { |
326 | struct sctp_association *asoc = |
327 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]); |
328 | |
329 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); |
330 | } |
331 | |
332 | static void sctp_generate_t4_rto_event(struct timer_list *t) |
333 | { |
334 | struct sctp_association *asoc = |
335 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]); |
336 | |
337 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_T4_RTO); |
338 | } |
339 | |
340 | static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t) |
341 | { |
342 | struct sctp_association *asoc = |
343 | from_timer(asoc, t, |
344 | timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]); |
345 | |
346 | sctp_generate_timeout_event(asoc, |
347 | timeout_type: SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); |
348 | |
349 | } /* sctp_generate_t5_shutdown_guard_event() */ |
350 | |
351 | static void sctp_generate_autoclose_event(struct timer_list *t) |
352 | { |
353 | struct sctp_association *asoc = |
354 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]); |
355 | |
356 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_AUTOCLOSE); |
357 | } |
358 | |
359 | /* Generate a heart beat event. If the sock is busy, reschedule. Make |
360 | * sure that the transport is still valid. |
361 | */ |
362 | void sctp_generate_heartbeat_event(struct timer_list *t) |
363 | { |
364 | struct sctp_transport *transport = from_timer(transport, t, hb_timer); |
365 | struct sctp_association *asoc = transport->asoc; |
366 | struct sock *sk = asoc->base.sk; |
367 | struct net *net = sock_net(sk); |
368 | u32 elapsed, timeout; |
369 | int error = 0; |
370 | |
371 | bh_lock_sock(sk); |
372 | if (sock_owned_by_user(sk)) { |
373 | pr_debug("%s: sock is busy\n" , __func__); |
374 | |
375 | /* Try again later. */ |
376 | if (!mod_timer(timer: &transport->hb_timer, expires: jiffies + (HZ/20))) |
377 | sctp_transport_hold(transport); |
378 | goto out_unlock; |
379 | } |
380 | |
381 | /* Check if we should still send the heartbeat or reschedule */ |
382 | elapsed = jiffies - transport->last_time_sent; |
383 | timeout = sctp_transport_timeout(transport); |
384 | if (elapsed < timeout) { |
385 | elapsed = timeout - elapsed; |
386 | if (!mod_timer(timer: &transport->hb_timer, expires: jiffies + elapsed)) |
387 | sctp_transport_hold(transport); |
388 | goto out_unlock; |
389 | } |
390 | |
391 | error = sctp_do_sm(net, event_type: SCTP_EVENT_T_TIMEOUT, |
392 | subtype: SCTP_ST_TIMEOUT(arg: SCTP_EVENT_TIMEOUT_HEARTBEAT), |
393 | state: asoc->state, ep: asoc->ep, asoc, |
394 | event_arg: transport, GFP_ATOMIC); |
395 | |
396 | if (error) |
397 | sk->sk_err = -error; |
398 | |
399 | out_unlock: |
400 | bh_unlock_sock(sk); |
401 | sctp_transport_put(transport); |
402 | } |
403 | |
404 | /* Handle the timeout of the ICMP protocol unreachable timer. Trigger |
405 | * the correct state machine transition that will close the association. |
406 | */ |
407 | void sctp_generate_proto_unreach_event(struct timer_list *t) |
408 | { |
409 | struct sctp_transport *transport = |
410 | from_timer(transport, t, proto_unreach_timer); |
411 | struct sctp_association *asoc = transport->asoc; |
412 | struct sock *sk = asoc->base.sk; |
413 | struct net *net = sock_net(sk); |
414 | |
415 | bh_lock_sock(sk); |
416 | if (sock_owned_by_user(sk)) { |
417 | pr_debug("%s: sock is busy\n" , __func__); |
418 | |
419 | /* Try again later. */ |
420 | if (!mod_timer(timer: &transport->proto_unreach_timer, |
421 | expires: jiffies + (HZ/20))) |
422 | sctp_transport_hold(transport); |
423 | goto out_unlock; |
424 | } |
425 | |
426 | /* Is this structure just waiting around for us to actually |
427 | * get destroyed? |
428 | */ |
429 | if (asoc->base.dead) |
430 | goto out_unlock; |
431 | |
432 | sctp_do_sm(net, event_type: SCTP_EVENT_T_OTHER, |
433 | subtype: SCTP_ST_OTHER(arg: SCTP_EVENT_ICMP_PROTO_UNREACH), |
434 | state: asoc->state, ep: asoc->ep, asoc, event_arg: transport, GFP_ATOMIC); |
435 | |
436 | out_unlock: |
437 | bh_unlock_sock(sk); |
438 | sctp_transport_put(transport); |
439 | } |
440 | |
441 | /* Handle the timeout of the RE-CONFIG timer. */ |
442 | void sctp_generate_reconf_event(struct timer_list *t) |
443 | { |
444 | struct sctp_transport *transport = |
445 | from_timer(transport, t, reconf_timer); |
446 | struct sctp_association *asoc = transport->asoc; |
447 | struct sock *sk = asoc->base.sk; |
448 | struct net *net = sock_net(sk); |
449 | int error = 0; |
450 | |
451 | bh_lock_sock(sk); |
452 | if (sock_owned_by_user(sk)) { |
453 | pr_debug("%s: sock is busy\n" , __func__); |
454 | |
455 | /* Try again later. */ |
456 | if (!mod_timer(timer: &transport->reconf_timer, expires: jiffies + (HZ / 20))) |
457 | sctp_transport_hold(transport); |
458 | goto out_unlock; |
459 | } |
460 | |
461 | /* This happens when the response arrives after the timer is triggered. */ |
462 | if (!asoc->strreset_chunk) |
463 | goto out_unlock; |
464 | |
465 | error = sctp_do_sm(net, event_type: SCTP_EVENT_T_TIMEOUT, |
466 | subtype: SCTP_ST_TIMEOUT(arg: SCTP_EVENT_TIMEOUT_RECONF), |
467 | state: asoc->state, ep: asoc->ep, asoc, |
468 | event_arg: transport, GFP_ATOMIC); |
469 | |
470 | if (error) |
471 | sk->sk_err = -error; |
472 | |
473 | out_unlock: |
474 | bh_unlock_sock(sk); |
475 | sctp_transport_put(transport); |
476 | } |
477 | |
478 | /* Handle the timeout of the probe timer. */ |
479 | void sctp_generate_probe_event(struct timer_list *t) |
480 | { |
481 | struct sctp_transport *transport = from_timer(transport, t, probe_timer); |
482 | struct sctp_association *asoc = transport->asoc; |
483 | struct sock *sk = asoc->base.sk; |
484 | struct net *net = sock_net(sk); |
485 | int error = 0; |
486 | |
487 | bh_lock_sock(sk); |
488 | if (sock_owned_by_user(sk)) { |
489 | pr_debug("%s: sock is busy\n" , __func__); |
490 | |
491 | /* Try again later. */ |
492 | if (!mod_timer(timer: &transport->probe_timer, expires: jiffies + (HZ / 20))) |
493 | sctp_transport_hold(transport); |
494 | goto out_unlock; |
495 | } |
496 | |
497 | error = sctp_do_sm(net, event_type: SCTP_EVENT_T_TIMEOUT, |
498 | subtype: SCTP_ST_TIMEOUT(arg: SCTP_EVENT_TIMEOUT_PROBE), |
499 | state: asoc->state, ep: asoc->ep, asoc, |
500 | event_arg: transport, GFP_ATOMIC); |
501 | |
502 | if (error) |
503 | sk->sk_err = -error; |
504 | |
505 | out_unlock: |
506 | bh_unlock_sock(sk); |
507 | sctp_transport_put(transport); |
508 | } |
509 | |
510 | /* Inject a SACK Timeout event into the state machine. */ |
511 | static void sctp_generate_sack_event(struct timer_list *t) |
512 | { |
513 | struct sctp_association *asoc = |
514 | from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]); |
515 | |
516 | sctp_generate_timeout_event(asoc, timeout_type: SCTP_EVENT_TIMEOUT_SACK); |
517 | } |
518 | |
519 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { |
520 | [SCTP_EVENT_TIMEOUT_NONE] = NULL, |
521 | [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event, |
522 | [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event, |
523 | [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event, |
524 | [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL, |
525 | [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event, |
526 | [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = |
527 | sctp_generate_t5_shutdown_guard_event, |
528 | [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL, |
529 | [SCTP_EVENT_TIMEOUT_RECONF] = NULL, |
530 | [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event, |
531 | [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event, |
532 | }; |
533 | |
534 | |
535 | /* RFC 2960 8.2 Path Failure Detection |
536 | * |
537 | * When its peer endpoint is multi-homed, an endpoint should keep a |
538 | * error counter for each of the destination transport addresses of the |
539 | * peer endpoint. |
540 | * |
541 | * Each time the T3-rtx timer expires on any address, or when a |
542 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, |
543 | * the error counter of that destination address will be incremented. |
544 | * When the value in the error counter exceeds the protocol parameter |
545 | * 'Path.Max.Retrans' of that destination address, the endpoint should |
546 | * mark the destination transport address as inactive, and a |
547 | * notification SHOULD be sent to the upper layer. |
548 | * |
549 | */ |
550 | static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, |
551 | struct sctp_association *asoc, |
552 | struct sctp_transport *transport, |
553 | int is_hb) |
554 | { |
555 | /* The check for association's overall error counter exceeding the |
556 | * threshold is done in the state function. |
557 | */ |
558 | /* We are here due to a timer expiration. If the timer was |
559 | * not a HEARTBEAT, then normal error tracking is done. |
560 | * If the timer was a heartbeat, we only increment error counts |
561 | * when we already have an outstanding HEARTBEAT that has not |
562 | * been acknowledged. |
563 | * Additionally, some tranport states inhibit error increments. |
564 | */ |
565 | if (!is_hb) { |
566 | asoc->overall_error_count++; |
567 | if (transport->state != SCTP_INACTIVE) |
568 | transport->error_count++; |
569 | } else if (transport->hb_sent) { |
570 | if (transport->state != SCTP_UNCONFIRMED) |
571 | asoc->overall_error_count++; |
572 | if (transport->state != SCTP_INACTIVE) |
573 | transport->error_count++; |
574 | } |
575 | |
576 | /* If the transport error count is greater than the pf_retrans |
577 | * threshold, and less than pathmaxrtx, and if the current state |
578 | * is SCTP_ACTIVE, then mark this transport as Partially Failed, |
579 | * see SCTP Quick Failover Draft, section 5.1 |
580 | */ |
581 | if (asoc->base.net->sctp.pf_enable && |
582 | transport->state == SCTP_ACTIVE && |
583 | transport->error_count < transport->pathmaxrxt && |
584 | transport->error_count > transport->pf_retrans) { |
585 | |
586 | sctp_assoc_control_transport(asoc, transport, |
587 | command: SCTP_TRANSPORT_PF, |
588 | error: 0); |
589 | |
590 | /* Update the hb timer to resend a heartbeat every rto */ |
591 | sctp_transport_reset_hb_timer(transport); |
592 | } |
593 | |
594 | if (transport->state != SCTP_INACTIVE && |
595 | (transport->error_count > transport->pathmaxrxt)) { |
596 | pr_debug("%s: association:%p transport addr:%pISpc failed\n" , |
597 | __func__, asoc, &transport->ipaddr.sa); |
598 | |
599 | sctp_assoc_control_transport(asoc, transport, |
600 | command: SCTP_TRANSPORT_DOWN, |
601 | error: SCTP_FAILED_THRESHOLD); |
602 | } |
603 | |
604 | if (transport->error_count > transport->ps_retrans && |
605 | asoc->peer.primary_path == transport && |
606 | asoc->peer.active_path != transport) |
607 | sctp_assoc_set_primary(asoc, asoc->peer.active_path); |
608 | |
609 | /* E2) For the destination address for which the timer |
610 | * expires, set RTO <- RTO * 2 ("back off the timer"). The |
611 | * maximum value discussed in rule C7 above (RTO.max) may be |
612 | * used to provide an upper bound to this doubling operation. |
613 | * |
614 | * Special Case: the first HB doesn't trigger exponential backoff. |
615 | * The first unacknowledged HB triggers it. We do this with a flag |
616 | * that indicates that we have an outstanding HB. |
617 | */ |
618 | if (!is_hb || transport->hb_sent) { |
619 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); |
620 | sctp_max_rto(asoc, trans: transport); |
621 | } |
622 | } |
623 | |
624 | /* Worker routine to handle INIT command failure. */ |
625 | static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands, |
626 | struct sctp_association *asoc, |
627 | unsigned int error) |
628 | { |
629 | struct sctp_ulpevent *event; |
630 | |
631 | event = sctp_ulpevent_make_assoc_change(asoc, flags: 0, state: SCTP_CANT_STR_ASSOC, |
632 | error: (__u16)error, outbound: 0, inbound: 0, NULL, |
633 | GFP_ATOMIC); |
634 | |
635 | if (event) |
636 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_EVENT_ULP, |
637 | obj: SCTP_ULPEVENT(arg: event)); |
638 | |
639 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_NEW_STATE, |
640 | obj: SCTP_STATE(arg: SCTP_STATE_CLOSED)); |
641 | |
642 | /* SEND_FAILED sent later when cleaning up the association. */ |
643 | asoc->outqueue.error = error; |
644 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_DELETE_TCB, obj: SCTP_NULL()); |
645 | } |
646 | |
647 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ |
648 | static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, |
649 | struct sctp_association *asoc, |
650 | enum sctp_event_type event_type, |
651 | union sctp_subtype subtype, |
652 | struct sctp_chunk *chunk, |
653 | unsigned int error) |
654 | { |
655 | struct sctp_ulpevent *event; |
656 | struct sctp_chunk *abort; |
657 | |
658 | /* Cancel any partial delivery in progress. */ |
659 | asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC); |
660 | |
661 | if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) |
662 | event = sctp_ulpevent_make_assoc_change(asoc, flags: 0, state: SCTP_COMM_LOST, |
663 | error: (__u16)error, outbound: 0, inbound: 0, chunk, |
664 | GFP_ATOMIC); |
665 | else |
666 | event = sctp_ulpevent_make_assoc_change(asoc, flags: 0, state: SCTP_COMM_LOST, |
667 | error: (__u16)error, outbound: 0, inbound: 0, NULL, |
668 | GFP_ATOMIC); |
669 | if (event) |
670 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_EVENT_ULP, |
671 | obj: SCTP_ULPEVENT(arg: event)); |
672 | |
673 | if (asoc->overall_error_count >= asoc->max_retrans) { |
674 | abort = sctp_make_violation_max_retrans(asoc, chunk); |
675 | if (abort) |
676 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
677 | obj: SCTP_CHUNK(arg: abort)); |
678 | } |
679 | |
680 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_NEW_STATE, |
681 | obj: SCTP_STATE(arg: SCTP_STATE_CLOSED)); |
682 | |
683 | /* SEND_FAILED sent later when cleaning up the association. */ |
684 | asoc->outqueue.error = error; |
685 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_DELETE_TCB, obj: SCTP_NULL()); |
686 | } |
687 | |
688 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT |
689 | * inside the cookie. In reality, this is only used for INIT-ACK processing |
690 | * since all other cases use "temporary" associations and can do all |
691 | * their work in statefuns directly. |
692 | */ |
693 | static int sctp_cmd_process_init(struct sctp_cmd_seq *commands, |
694 | struct sctp_association *asoc, |
695 | struct sctp_chunk *chunk, |
696 | struct sctp_init_chunk *peer_init, |
697 | gfp_t gfp) |
698 | { |
699 | int error; |
700 | |
701 | /* We only process the init as a sideeffect in a single |
702 | * case. This is when we process the INIT-ACK. If we |
703 | * fail during INIT processing (due to malloc problems), |
704 | * just return the error and stop processing the stack. |
705 | */ |
706 | if (!sctp_process_init(asoc, chunk, peer: sctp_source(chunk), init: peer_init, gfp)) |
707 | error = -ENOMEM; |
708 | else |
709 | error = 0; |
710 | |
711 | return error; |
712 | } |
713 | |
714 | /* Helper function to break out starting up of heartbeat timers. */ |
715 | static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds, |
716 | struct sctp_association *asoc) |
717 | { |
718 | struct sctp_transport *t; |
719 | |
720 | /* Start a heartbeat timer for each transport on the association. |
721 | * hold a reference on the transport to make sure none of |
722 | * the needed data structures go away. |
723 | */ |
724 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) |
725 | sctp_transport_reset_hb_timer(t); |
726 | } |
727 | |
728 | static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds, |
729 | struct sctp_association *asoc) |
730 | { |
731 | struct sctp_transport *t; |
732 | |
733 | /* Stop all heartbeat timers. */ |
734 | |
735 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
736 | transports) { |
737 | if (del_timer(timer: &t->hb_timer)) |
738 | sctp_transport_put(t); |
739 | } |
740 | } |
741 | |
742 | /* Helper function to stop any pending T3-RTX timers */ |
743 | static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds, |
744 | struct sctp_association *asoc) |
745 | { |
746 | struct sctp_transport *t; |
747 | |
748 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
749 | transports) { |
750 | if (del_timer(timer: &t->T3_rtx_timer)) |
751 | sctp_transport_put(t); |
752 | } |
753 | } |
754 | |
755 | |
756 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ |
757 | static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds, |
758 | struct sctp_association *asoc, |
759 | struct sctp_transport *t, |
760 | struct sctp_chunk *chunk) |
761 | { |
762 | struct sctp_sender_hb_info *hbinfo; |
763 | int was_unconfirmed = 0; |
764 | |
765 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the |
766 | * HEARTBEAT should clear the error counter of the destination |
767 | * transport address to which the HEARTBEAT was sent. |
768 | */ |
769 | t->error_count = 0; |
770 | |
771 | /* |
772 | * Although RFC4960 specifies that the overall error count must |
773 | * be cleared when a HEARTBEAT ACK is received, we make an |
774 | * exception while in SHUTDOWN PENDING. If the peer keeps its |
775 | * window shut forever, we may never be able to transmit our |
776 | * outstanding data and rely on the retransmission limit be reached |
777 | * to shutdown the association. |
778 | */ |
779 | if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) |
780 | t->asoc->overall_error_count = 0; |
781 | |
782 | /* Clear the hb_sent flag to signal that we had a good |
783 | * acknowledgement. |
784 | */ |
785 | t->hb_sent = 0; |
786 | |
787 | /* Mark the destination transport address as active if it is not so |
788 | * marked. |
789 | */ |
790 | if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { |
791 | was_unconfirmed = 1; |
792 | sctp_assoc_control_transport(asoc, transport: t, command: SCTP_TRANSPORT_UP, |
793 | error: SCTP_HEARTBEAT_SUCCESS); |
794 | } |
795 | |
796 | if (t->state == SCTP_PF) |
797 | sctp_assoc_control_transport(asoc, transport: t, command: SCTP_TRANSPORT_UP, |
798 | error: SCTP_HEARTBEAT_SUCCESS); |
799 | |
800 | /* HB-ACK was received for a the proper HB. Consider this |
801 | * forward progress. |
802 | */ |
803 | if (t->dst) |
804 | sctp_transport_dst_confirm(t); |
805 | |
806 | /* The receiver of the HEARTBEAT ACK should also perform an |
807 | * RTT measurement for that destination transport address |
808 | * using the time value carried in the HEARTBEAT ACK chunk. |
809 | * If the transport's rto_pending variable has been cleared, |
810 | * it was most likely due to a retransmit. However, we want |
811 | * to re-enable it to properly update the rto. |
812 | */ |
813 | if (t->rto_pending == 0) |
814 | t->rto_pending = 1; |
815 | |
816 | hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; |
817 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); |
818 | |
819 | /* Update the heartbeat timer. */ |
820 | sctp_transport_reset_hb_timer(t); |
821 | |
822 | if (was_unconfirmed && asoc->peer.transport_count == 1) |
823 | sctp_transport_immediate_rtx(t); |
824 | } |
825 | |
826 | |
827 | /* Helper function to process the process SACK command. */ |
828 | static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds, |
829 | struct sctp_association *asoc, |
830 | struct sctp_chunk *chunk) |
831 | { |
832 | int err = 0; |
833 | |
834 | if (sctp_outq_sack(&asoc->outqueue, chunk)) { |
835 | /* There are no more TSNs awaiting SACK. */ |
836 | err = sctp_do_sm(net: asoc->base.net, event_type: SCTP_EVENT_T_OTHER, |
837 | subtype: SCTP_ST_OTHER(arg: SCTP_EVENT_NO_PENDING_TSN), |
838 | state: asoc->state, ep: asoc->ep, asoc, NULL, |
839 | GFP_ATOMIC); |
840 | } |
841 | |
842 | return err; |
843 | } |
844 | |
845 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set |
846 | * the transport for a shutdown chunk. |
847 | */ |
848 | static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds, |
849 | struct sctp_association *asoc, |
850 | struct sctp_chunk *chunk) |
851 | { |
852 | struct sctp_transport *t; |
853 | |
854 | if (chunk->transport) |
855 | t = chunk->transport; |
856 | else { |
857 | t = sctp_assoc_choose_alter_transport(asoc, |
858 | asoc->shutdown_last_sent_to); |
859 | chunk->transport = t; |
860 | } |
861 | asoc->shutdown_last_sent_to = t; |
862 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; |
863 | } |
864 | |
865 | /* Helper function to change the state of an association. */ |
866 | static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds, |
867 | struct sctp_association *asoc, |
868 | enum sctp_state state) |
869 | { |
870 | struct sock *sk = asoc->base.sk; |
871 | |
872 | asoc->state = state; |
873 | |
874 | pr_debug("%s: asoc:%p[%s]\n" , __func__, asoc, sctp_state_tbl[state]); |
875 | |
876 | if (sctp_style(sk, TCP)) { |
877 | /* Change the sk->sk_state of a TCP-style socket that has |
878 | * successfully completed a connect() call. |
879 | */ |
880 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) |
881 | inet_sk_set_state(sk, state: SCTP_SS_ESTABLISHED); |
882 | |
883 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ |
884 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && |
885 | sctp_sstate(sk, ESTABLISHED)) { |
886 | inet_sk_set_state(sk, state: SCTP_SS_CLOSING); |
887 | sk->sk_shutdown |= RCV_SHUTDOWN; |
888 | } |
889 | } |
890 | |
891 | if (sctp_state(asoc, COOKIE_WAIT)) { |
892 | /* Reset init timeouts since they may have been |
893 | * increased due to timer expirations. |
894 | */ |
895 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = |
896 | asoc->rto_initial; |
897 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = |
898 | asoc->rto_initial; |
899 | } |
900 | |
901 | if (sctp_state(asoc, ESTABLISHED)) { |
902 | kfree(objp: asoc->peer.cookie); |
903 | asoc->peer.cookie = NULL; |
904 | } |
905 | |
906 | if (sctp_state(asoc, ESTABLISHED) || |
907 | sctp_state(asoc, CLOSED) || |
908 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { |
909 | /* Wake up any processes waiting in the asoc's wait queue in |
910 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). |
911 | */ |
912 | if (waitqueue_active(wq_head: &asoc->wait)) |
913 | wake_up_interruptible(&asoc->wait); |
914 | |
915 | /* Wake up any processes waiting in the sk's sleep queue of |
916 | * a TCP-style or UDP-style peeled-off socket in |
917 | * sctp_wait_for_accept() or sctp_wait_for_packet(). |
918 | * For a UDP-style socket, the waiters are woken up by the |
919 | * notifications. |
920 | */ |
921 | if (!sctp_style(sk, UDP)) |
922 | sk->sk_state_change(sk); |
923 | } |
924 | |
925 | if (sctp_state(asoc, SHUTDOWN_PENDING) && |
926 | !sctp_outq_is_empty(&asoc->outqueue)) |
927 | sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC); |
928 | } |
929 | |
930 | /* Helper function to delete an association. */ |
931 | static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds, |
932 | struct sctp_association *asoc) |
933 | { |
934 | struct sock *sk = asoc->base.sk; |
935 | |
936 | /* If it is a non-temporary association belonging to a TCP-style |
937 | * listening socket that is not closed, do not free it so that accept() |
938 | * can pick it up later. |
939 | */ |
940 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && |
941 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) |
942 | return; |
943 | |
944 | sctp_association_free(asoc); |
945 | } |
946 | |
947 | /* |
948 | * ADDIP Section 4.1 ASCONF Chunk Procedures |
949 | * A4) Start a T-4 RTO timer, using the RTO value of the selected |
950 | * destination address (we use active path instead of primary path just |
951 | * because primary path may be inactive. |
952 | */ |
953 | static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds, |
954 | struct sctp_association *asoc, |
955 | struct sctp_chunk *chunk) |
956 | { |
957 | struct sctp_transport *t; |
958 | |
959 | t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); |
960 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; |
961 | chunk->transport = t; |
962 | } |
963 | |
964 | /* Process an incoming Operation Error Chunk. */ |
965 | static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds, |
966 | struct sctp_association *asoc, |
967 | struct sctp_chunk *chunk) |
968 | { |
969 | struct sctp_errhdr *err_hdr; |
970 | struct sctp_ulpevent *ev; |
971 | |
972 | while (chunk->chunk_end > chunk->skb->data) { |
973 | err_hdr = (struct sctp_errhdr *)(chunk->skb->data); |
974 | |
975 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, flags: 0, |
976 | GFP_ATOMIC); |
977 | if (!ev) |
978 | return; |
979 | |
980 | asoc->stream.si->enqueue_event(&asoc->ulpq, ev); |
981 | |
982 | switch (err_hdr->cause) { |
983 | case SCTP_ERROR_UNKNOWN_CHUNK: |
984 | { |
985 | struct sctp_chunkhdr *unk_chunk_hdr; |
986 | |
987 | unk_chunk_hdr = (struct sctp_chunkhdr *)(err_hdr + 1); |
988 | switch (unk_chunk_hdr->type) { |
989 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with |
990 | * an ERROR chunk reporting that it did not recognized |
991 | * the ASCONF chunk type, the sender of the ASCONF MUST |
992 | * NOT send any further ASCONF chunks and MUST stop its |
993 | * T-4 timer. |
994 | */ |
995 | case SCTP_CID_ASCONF: |
996 | if (asoc->peer.asconf_capable == 0) |
997 | break; |
998 | |
999 | asoc->peer.asconf_capable = 0; |
1000 | sctp_add_cmd_sf(seq: cmds, verb: SCTP_CMD_TIMER_STOP, |
1001 | obj: SCTP_TO(arg: SCTP_EVENT_TIMEOUT_T4_RTO)); |
1002 | break; |
1003 | default: |
1004 | break; |
1005 | } |
1006 | break; |
1007 | } |
1008 | default: |
1009 | break; |
1010 | } |
1011 | } |
1012 | } |
1013 | |
1014 | /* Helper function to remove the association non-primary peer |
1015 | * transports. |
1016 | */ |
1017 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) |
1018 | { |
1019 | struct sctp_transport *t; |
1020 | struct list_head *temp; |
1021 | struct list_head *pos; |
1022 | |
1023 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { |
1024 | t = list_entry(pos, struct sctp_transport, transports); |
1025 | if (!sctp_cmp_addr_exact(ss1: &t->ipaddr, |
1026 | ss2: &asoc->peer.primary_addr)) { |
1027 | sctp_assoc_rm_peer(asoc, peer: t); |
1028 | } |
1029 | } |
1030 | } |
1031 | |
1032 | /* Helper function to set sk_err on a 1-1 style socket. */ |
1033 | static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) |
1034 | { |
1035 | struct sock *sk = asoc->base.sk; |
1036 | |
1037 | if (!sctp_style(sk, UDP)) |
1038 | sk->sk_err = error; |
1039 | } |
1040 | |
1041 | /* Helper function to generate an association change event */ |
1042 | static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands, |
1043 | struct sctp_association *asoc, |
1044 | u8 state) |
1045 | { |
1046 | struct sctp_ulpevent *ev; |
1047 | |
1048 | ev = sctp_ulpevent_make_assoc_change(asoc, flags: 0, state, error: 0, |
1049 | outbound: asoc->c.sinit_num_ostreams, |
1050 | inbound: asoc->c.sinit_max_instreams, |
1051 | NULL, GFP_ATOMIC); |
1052 | if (ev) |
1053 | asoc->stream.si->enqueue_event(&asoc->ulpq, ev); |
1054 | } |
1055 | |
1056 | static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands, |
1057 | struct sctp_association *asoc) |
1058 | { |
1059 | struct sctp_ulpevent *ev; |
1060 | |
1061 | ev = sctp_ulpevent_make_authkey(asoc, key_id: 0, indication: SCTP_AUTH_NO_AUTH, GFP_ATOMIC); |
1062 | if (ev) |
1063 | asoc->stream.si->enqueue_event(&asoc->ulpq, ev); |
1064 | } |
1065 | |
1066 | /* Helper function to generate an adaptation indication event */ |
1067 | static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands, |
1068 | struct sctp_association *asoc) |
1069 | { |
1070 | struct sctp_ulpevent *ev; |
1071 | |
1072 | ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); |
1073 | |
1074 | if (ev) |
1075 | asoc->stream.si->enqueue_event(&asoc->ulpq, ev); |
1076 | } |
1077 | |
1078 | |
1079 | static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, |
1080 | enum sctp_event_timeout timer, |
1081 | char *name) |
1082 | { |
1083 | struct sctp_transport *t; |
1084 | |
1085 | t = asoc->init_last_sent_to; |
1086 | asoc->init_err_counter++; |
1087 | |
1088 | if (t->init_sent_count > (asoc->init_cycle + 1)) { |
1089 | asoc->timeouts[timer] *= 2; |
1090 | if (asoc->timeouts[timer] > asoc->max_init_timeo) { |
1091 | asoc->timeouts[timer] = asoc->max_init_timeo; |
1092 | } |
1093 | asoc->init_cycle++; |
1094 | |
1095 | pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" |
1096 | " cycle:%d timeout:%ld\n" , __func__, name, |
1097 | asoc->init_err_counter, asoc->init_cycle, |
1098 | asoc->timeouts[timer]); |
1099 | } |
1100 | |
1101 | } |
1102 | |
1103 | /* Send the whole message, chunk by chunk, to the outqueue. |
1104 | * This way the whole message is queued up and bundling if |
1105 | * encouraged for small fragments. |
1106 | */ |
1107 | static void sctp_cmd_send_msg(struct sctp_association *asoc, |
1108 | struct sctp_datamsg *msg, gfp_t gfp) |
1109 | { |
1110 | struct sctp_chunk *chunk; |
1111 | |
1112 | list_for_each_entry(chunk, &msg->chunks, frag_list) |
1113 | sctp_outq_tail(&asoc->outqueue, chunk, gfp); |
1114 | |
1115 | asoc->outqueue.sched->enqueue(&asoc->outqueue, msg); |
1116 | } |
1117 | |
1118 | |
1119 | /* These three macros allow us to pull the debugging code out of the |
1120 | * main flow of sctp_do_sm() to keep attention focused on the real |
1121 | * functionality there. |
1122 | */ |
1123 | #define debug_pre_sfn() \ |
1124 | pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ |
1125 | ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ |
1126 | asoc, sctp_state_tbl[state], state_fn->name) |
1127 | |
1128 | #define debug_post_sfn() \ |
1129 | pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ |
1130 | sctp_status_tbl[status]) |
1131 | |
1132 | #define debug_post_sfx() \ |
1133 | pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ |
1134 | asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ |
1135 | sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) |
1136 | |
1137 | /* |
1138 | * This is the master state machine processing function. |
1139 | * |
1140 | * If you want to understand all of lksctp, this is a |
1141 | * good place to start. |
1142 | */ |
1143 | int sctp_do_sm(struct net *net, enum sctp_event_type event_type, |
1144 | union sctp_subtype subtype, enum sctp_state state, |
1145 | struct sctp_endpoint *ep, struct sctp_association *asoc, |
1146 | void *event_arg, gfp_t gfp) |
1147 | { |
1148 | typedef const char *(printfn_t)(union sctp_subtype); |
1149 | static printfn_t *table[] = { |
1150 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, |
1151 | }; |
1152 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; |
1153 | const struct sctp_sm_table_entry *state_fn; |
1154 | struct sctp_cmd_seq commands; |
1155 | enum sctp_disposition status; |
1156 | int error = 0; |
1157 | |
1158 | /* Look up the state function, run it, and then process the |
1159 | * side effects. These three steps are the heart of lksctp. |
1160 | */ |
1161 | state_fn = sctp_sm_lookup_event(net, event_type, state, event_subtype: subtype); |
1162 | |
1163 | sctp_init_cmd_seq(seq: &commands); |
1164 | |
1165 | debug_pre_sfn(); |
1166 | status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); |
1167 | debug_post_sfn(); |
1168 | |
1169 | error = sctp_side_effects(event_type, subtype, state, |
1170 | ep, asoc: &asoc, event_arg, status, |
1171 | commands: &commands, gfp); |
1172 | debug_post_sfx(); |
1173 | |
1174 | return error; |
1175 | } |
1176 | |
1177 | /***************************************************************** |
1178 | * This the master state function side effect processing function. |
1179 | *****************************************************************/ |
1180 | static int sctp_side_effects(enum sctp_event_type event_type, |
1181 | union sctp_subtype subtype, |
1182 | enum sctp_state state, |
1183 | struct sctp_endpoint *ep, |
1184 | struct sctp_association **asoc, |
1185 | void *event_arg, |
1186 | enum sctp_disposition status, |
1187 | struct sctp_cmd_seq *commands, |
1188 | gfp_t gfp) |
1189 | { |
1190 | int error; |
1191 | |
1192 | /* FIXME - Most of the dispositions left today would be categorized |
1193 | * as "exceptional" dispositions. For those dispositions, it |
1194 | * may not be proper to run through any of the commands at all. |
1195 | * For example, the command interpreter might be run only with |
1196 | * disposition SCTP_DISPOSITION_CONSUME. |
1197 | */ |
1198 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, |
1199 | ep, asoc: *asoc, |
1200 | event_arg, status, |
1201 | commands, gfp))) |
1202 | goto bail; |
1203 | |
1204 | switch (status) { |
1205 | case SCTP_DISPOSITION_DISCARD: |
1206 | pr_debug("%s: ignored sctp protocol event - state:%d, " |
1207 | "event_type:%d, event_id:%d\n" , __func__, state, |
1208 | event_type, subtype.chunk); |
1209 | break; |
1210 | |
1211 | case SCTP_DISPOSITION_NOMEM: |
1212 | /* We ran out of memory, so we need to discard this |
1213 | * packet. |
1214 | */ |
1215 | /* BUG--we should now recover some memory, probably by |
1216 | * reneging... |
1217 | */ |
1218 | error = -ENOMEM; |
1219 | break; |
1220 | |
1221 | case SCTP_DISPOSITION_DELETE_TCB: |
1222 | case SCTP_DISPOSITION_ABORT: |
1223 | /* This should now be a command. */ |
1224 | *asoc = NULL; |
1225 | break; |
1226 | |
1227 | case SCTP_DISPOSITION_CONSUME: |
1228 | /* |
1229 | * We should no longer have much work to do here as the |
1230 | * real work has been done as explicit commands above. |
1231 | */ |
1232 | break; |
1233 | |
1234 | case SCTP_DISPOSITION_VIOLATION: |
1235 | net_err_ratelimited("protocol violation state %d chunkid %d\n" , |
1236 | state, subtype.chunk); |
1237 | break; |
1238 | |
1239 | case SCTP_DISPOSITION_NOT_IMPL: |
1240 | pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n" , |
1241 | state, event_type, subtype.chunk); |
1242 | break; |
1243 | |
1244 | case SCTP_DISPOSITION_BUG: |
1245 | pr_err("bug in state %d, event_type %d, event_id %d\n" , |
1246 | state, event_type, subtype.chunk); |
1247 | BUG(); |
1248 | break; |
1249 | |
1250 | default: |
1251 | pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n" , |
1252 | status, state, event_type, subtype.chunk); |
1253 | error = status; |
1254 | if (error >= 0) |
1255 | error = -EINVAL; |
1256 | WARN_ON_ONCE(1); |
1257 | break; |
1258 | } |
1259 | |
1260 | bail: |
1261 | return error; |
1262 | } |
1263 | |
1264 | /******************************************************************** |
1265 | * 2nd Level Abstractions |
1266 | ********************************************************************/ |
1267 | |
1268 | /* This is the side-effect interpreter. */ |
1269 | static int sctp_cmd_interpreter(enum sctp_event_type event_type, |
1270 | union sctp_subtype subtype, |
1271 | enum sctp_state state, |
1272 | struct sctp_endpoint *ep, |
1273 | struct sctp_association *asoc, |
1274 | void *event_arg, |
1275 | enum sctp_disposition status, |
1276 | struct sctp_cmd_seq *commands, |
1277 | gfp_t gfp) |
1278 | { |
1279 | struct sctp_sock *sp = sctp_sk(sk: ep->base.sk); |
1280 | struct sctp_chunk *chunk = NULL, *new_obj; |
1281 | struct sctp_packet *packet; |
1282 | struct sctp_sackhdr sackh; |
1283 | struct timer_list *timer; |
1284 | struct sctp_transport *t; |
1285 | unsigned long timeout; |
1286 | struct sctp_cmd *cmd; |
1287 | int local_cork = 0; |
1288 | int error = 0; |
1289 | int force; |
1290 | |
1291 | if (SCTP_EVENT_T_TIMEOUT != event_type) |
1292 | chunk = event_arg; |
1293 | |
1294 | /* Note: This whole file is a huge candidate for rework. |
1295 | * For example, each command could either have its own handler, so |
1296 | * the loop would look like: |
1297 | * while (cmds) |
1298 | * cmd->handle(x, y, z) |
1299 | * --jgrimm |
1300 | */ |
1301 | while (NULL != (cmd = sctp_next_cmd(seq: commands))) { |
1302 | switch (cmd->verb) { |
1303 | case SCTP_CMD_NOP: |
1304 | /* Do nothing. */ |
1305 | break; |
1306 | |
1307 | case SCTP_CMD_NEW_ASOC: |
1308 | /* Register a new association. */ |
1309 | if (local_cork) { |
1310 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1311 | local_cork = 0; |
1312 | } |
1313 | |
1314 | /* Register with the endpoint. */ |
1315 | asoc = cmd->obj.asoc; |
1316 | BUG_ON(asoc->peer.primary_path == NULL); |
1317 | sctp_endpoint_add_asoc(ep, asoc); |
1318 | break; |
1319 | |
1320 | case SCTP_CMD_PURGE_OUTQUEUE: |
1321 | sctp_outq_teardown(&asoc->outqueue); |
1322 | break; |
1323 | |
1324 | case SCTP_CMD_DELETE_TCB: |
1325 | if (local_cork) { |
1326 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1327 | local_cork = 0; |
1328 | } |
1329 | /* Delete the current association. */ |
1330 | sctp_cmd_delete_tcb(cmds: commands, asoc); |
1331 | asoc = NULL; |
1332 | break; |
1333 | |
1334 | case SCTP_CMD_NEW_STATE: |
1335 | /* Enter a new state. */ |
1336 | sctp_cmd_new_state(cmds: commands, asoc, state: cmd->obj.state); |
1337 | break; |
1338 | |
1339 | case SCTP_CMD_REPORT_TSN: |
1340 | /* Record the arrival of a TSN. */ |
1341 | error = sctp_tsnmap_mark(&asoc->peer.tsn_map, |
1342 | tsn: cmd->obj.u32, NULL); |
1343 | break; |
1344 | |
1345 | case SCTP_CMD_REPORT_FWDTSN: |
1346 | asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32); |
1347 | break; |
1348 | |
1349 | case SCTP_CMD_PROCESS_FWDTSN: |
1350 | asoc->stream.si->handle_ftsn(&asoc->ulpq, |
1351 | cmd->obj.chunk); |
1352 | break; |
1353 | |
1354 | case SCTP_CMD_GEN_SACK: |
1355 | /* Generate a Selective ACK. |
1356 | * The argument tells us whether to just count |
1357 | * the packet and MAYBE generate a SACK, or |
1358 | * force a SACK out. |
1359 | */ |
1360 | force = cmd->obj.i32; |
1361 | error = sctp_gen_sack(asoc, force, commands); |
1362 | break; |
1363 | |
1364 | case SCTP_CMD_PROCESS_SACK: |
1365 | /* Process an inbound SACK. */ |
1366 | error = sctp_cmd_process_sack(cmds: commands, asoc, |
1367 | chunk: cmd->obj.chunk); |
1368 | break; |
1369 | |
1370 | case SCTP_CMD_GEN_INIT_ACK: |
1371 | /* Generate an INIT ACK chunk. */ |
1372 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, |
1373 | unkparam_len: 0); |
1374 | if (!new_obj) { |
1375 | error = -ENOMEM; |
1376 | break; |
1377 | } |
1378 | |
1379 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
1380 | obj: SCTP_CHUNK(arg: new_obj)); |
1381 | break; |
1382 | |
1383 | case SCTP_CMD_PEER_INIT: |
1384 | /* Process a unified INIT from the peer. |
1385 | * Note: Only used during INIT-ACK processing. If |
1386 | * there is an error just return to the outter |
1387 | * layer which will bail. |
1388 | */ |
1389 | error = sctp_cmd_process_init(commands, asoc, chunk, |
1390 | peer_init: cmd->obj.init, gfp); |
1391 | break; |
1392 | |
1393 | case SCTP_CMD_GEN_COOKIE_ECHO: |
1394 | /* Generate a COOKIE ECHO chunk. */ |
1395 | new_obj = sctp_make_cookie_echo(asoc, chunk); |
1396 | if (!new_obj) { |
1397 | if (cmd->obj.chunk) |
1398 | sctp_chunk_free(cmd->obj.chunk); |
1399 | error = -ENOMEM; |
1400 | break; |
1401 | } |
1402 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
1403 | obj: SCTP_CHUNK(arg: new_obj)); |
1404 | |
1405 | /* If there is an ERROR chunk to be sent along with |
1406 | * the COOKIE_ECHO, send it, too. |
1407 | */ |
1408 | if (cmd->obj.chunk) |
1409 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
1410 | obj: SCTP_CHUNK(arg: cmd->obj.chunk)); |
1411 | |
1412 | if (new_obj->transport) { |
1413 | new_obj->transport->init_sent_count++; |
1414 | asoc->init_last_sent_to = new_obj->transport; |
1415 | } |
1416 | |
1417 | /* FIXME - Eventually come up with a cleaner way to |
1418 | * enabling COOKIE-ECHO + DATA bundling during |
1419 | * multihoming stale cookie scenarios, the following |
1420 | * command plays with asoc->peer.retran_path to |
1421 | * avoid the problem of sending the COOKIE-ECHO and |
1422 | * DATA in different paths, which could result |
1423 | * in the association being ABORTed if the DATA chunk |
1424 | * is processed first by the server. Checking the |
1425 | * init error counter simply causes this command |
1426 | * to be executed only during failed attempts of |
1427 | * association establishment. |
1428 | */ |
1429 | if ((asoc->peer.retran_path != |
1430 | asoc->peer.primary_path) && |
1431 | (asoc->init_err_counter > 0)) { |
1432 | sctp_add_cmd_sf(seq: commands, |
1433 | verb: SCTP_CMD_FORCE_PRIM_RETRAN, |
1434 | obj: SCTP_NULL()); |
1435 | } |
1436 | |
1437 | break; |
1438 | |
1439 | case SCTP_CMD_GEN_SHUTDOWN: |
1440 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. |
1441 | * Reset error counts. |
1442 | */ |
1443 | asoc->overall_error_count = 0; |
1444 | |
1445 | /* Generate a SHUTDOWN chunk. */ |
1446 | new_obj = sctp_make_shutdown(asoc, chunk); |
1447 | if (!new_obj) { |
1448 | error = -ENOMEM; |
1449 | break; |
1450 | } |
1451 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
1452 | obj: SCTP_CHUNK(arg: new_obj)); |
1453 | break; |
1454 | |
1455 | case SCTP_CMD_CHUNK_ULP: |
1456 | /* Send a chunk to the sockets layer. */ |
1457 | pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n" , |
1458 | __func__, cmd->obj.chunk, &asoc->ulpq); |
1459 | |
1460 | asoc->stream.si->ulpevent_data(&asoc->ulpq, |
1461 | cmd->obj.chunk, |
1462 | GFP_ATOMIC); |
1463 | break; |
1464 | |
1465 | case SCTP_CMD_EVENT_ULP: |
1466 | /* Send a notification to the sockets layer. */ |
1467 | pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n" , |
1468 | __func__, cmd->obj.ulpevent, &asoc->ulpq); |
1469 | |
1470 | asoc->stream.si->enqueue_event(&asoc->ulpq, |
1471 | cmd->obj.ulpevent); |
1472 | break; |
1473 | |
1474 | case SCTP_CMD_REPLY: |
1475 | /* If an caller has not already corked, do cork. */ |
1476 | if (!asoc->outqueue.cork) { |
1477 | sctp_outq_cork(q: &asoc->outqueue); |
1478 | local_cork = 1; |
1479 | } |
1480 | /* Send a chunk to our peer. */ |
1481 | sctp_outq_tail(&asoc->outqueue, chunk: cmd->obj.chunk, gfp); |
1482 | break; |
1483 | |
1484 | case SCTP_CMD_SEND_PKT: |
1485 | /* Send a full packet to our peer. */ |
1486 | packet = cmd->obj.packet; |
1487 | sctp_packet_transmit(packet, gfp); |
1488 | sctp_ootb_pkt_free(packet); |
1489 | break; |
1490 | |
1491 | case SCTP_CMD_T1_RETRAN: |
1492 | /* Mark a transport for retransmission. */ |
1493 | sctp_retransmit(q: &asoc->outqueue, transport: cmd->obj.transport, |
1494 | reason: SCTP_RTXR_T1_RTX); |
1495 | break; |
1496 | |
1497 | case SCTP_CMD_RETRAN: |
1498 | /* Mark a transport for retransmission. */ |
1499 | sctp_retransmit(q: &asoc->outqueue, transport: cmd->obj.transport, |
1500 | reason: SCTP_RTXR_T3_RTX); |
1501 | break; |
1502 | |
1503 | case SCTP_CMD_ECN_CE: |
1504 | /* Do delayed CE processing. */ |
1505 | sctp_do_ecn_ce_work(asoc, lowest_tsn: cmd->obj.u32); |
1506 | break; |
1507 | |
1508 | case SCTP_CMD_ECN_ECNE: |
1509 | /* Do delayed ECNE processing. */ |
1510 | new_obj = sctp_do_ecn_ecne_work(asoc, lowest_tsn: cmd->obj.u32, |
1511 | chunk); |
1512 | if (new_obj) |
1513 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_REPLY, |
1514 | obj: SCTP_CHUNK(arg: new_obj)); |
1515 | break; |
1516 | |
1517 | case SCTP_CMD_ECN_CWR: |
1518 | /* Do delayed CWR processing. */ |
1519 | sctp_do_ecn_cwr_work(asoc, lowest_tsn: cmd->obj.u32); |
1520 | break; |
1521 | |
1522 | case SCTP_CMD_SETUP_T2: |
1523 | sctp_cmd_setup_t2(cmds: commands, asoc, chunk: cmd->obj.chunk); |
1524 | break; |
1525 | |
1526 | case SCTP_CMD_TIMER_START_ONCE: |
1527 | timer = &asoc->timers[cmd->obj.to]; |
1528 | |
1529 | if (timer_pending(timer)) |
1530 | break; |
1531 | fallthrough; |
1532 | |
1533 | case SCTP_CMD_TIMER_START: |
1534 | timer = &asoc->timers[cmd->obj.to]; |
1535 | timeout = asoc->timeouts[cmd->obj.to]; |
1536 | BUG_ON(!timeout); |
1537 | |
1538 | /* |
1539 | * SCTP has a hard time with timer starts. Because we process |
1540 | * timer starts as side effects, it can be hard to tell if we |
1541 | * have already started a timer or not, which leads to BUG |
1542 | * halts when we call add_timer. So here, instead of just starting |
1543 | * a timer, if the timer is already started, and just mod |
1544 | * the timer with the shorter of the two expiration times |
1545 | */ |
1546 | if (!timer_pending(timer)) |
1547 | sctp_association_hold(asoc); |
1548 | timer_reduce(timer, expires: jiffies + timeout); |
1549 | break; |
1550 | |
1551 | case SCTP_CMD_TIMER_RESTART: |
1552 | timer = &asoc->timers[cmd->obj.to]; |
1553 | timeout = asoc->timeouts[cmd->obj.to]; |
1554 | if (!mod_timer(timer, expires: jiffies + timeout)) |
1555 | sctp_association_hold(asoc); |
1556 | break; |
1557 | |
1558 | case SCTP_CMD_TIMER_STOP: |
1559 | timer = &asoc->timers[cmd->obj.to]; |
1560 | if (del_timer(timer)) |
1561 | sctp_association_put(asoc); |
1562 | break; |
1563 | |
1564 | case SCTP_CMD_INIT_CHOOSE_TRANSPORT: |
1565 | chunk = cmd->obj.chunk; |
1566 | t = sctp_assoc_choose_alter_transport(asoc, |
1567 | asoc->init_last_sent_to); |
1568 | asoc->init_last_sent_to = t; |
1569 | chunk->transport = t; |
1570 | t->init_sent_count++; |
1571 | /* Set the new transport as primary */ |
1572 | sctp_assoc_set_primary(asoc, t); |
1573 | break; |
1574 | |
1575 | case SCTP_CMD_INIT_RESTART: |
1576 | /* Do the needed accounting and updates |
1577 | * associated with restarting an initialization |
1578 | * timer. Only multiply the timeout by two if |
1579 | * all transports have been tried at the current |
1580 | * timeout. |
1581 | */ |
1582 | sctp_cmd_t1_timer_update(asoc, |
1583 | timer: SCTP_EVENT_TIMEOUT_T1_INIT, |
1584 | name: "INIT" ); |
1585 | |
1586 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_TIMER_RESTART, |
1587 | obj: SCTP_TO(arg: SCTP_EVENT_TIMEOUT_T1_INIT)); |
1588 | break; |
1589 | |
1590 | case SCTP_CMD_COOKIEECHO_RESTART: |
1591 | /* Do the needed accounting and updates |
1592 | * associated with restarting an initialization |
1593 | * timer. Only multiply the timeout by two if |
1594 | * all transports have been tried at the current |
1595 | * timeout. |
1596 | */ |
1597 | sctp_cmd_t1_timer_update(asoc, |
1598 | timer: SCTP_EVENT_TIMEOUT_T1_COOKIE, |
1599 | name: "COOKIE" ); |
1600 | |
1601 | /* If we've sent any data bundled with |
1602 | * COOKIE-ECHO we need to resend. |
1603 | */ |
1604 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1605 | transports) { |
1606 | sctp_retransmit_mark(&asoc->outqueue, t, |
1607 | SCTP_RTXR_T1_RTX); |
1608 | } |
1609 | |
1610 | sctp_add_cmd_sf(seq: commands, |
1611 | verb: SCTP_CMD_TIMER_RESTART, |
1612 | obj: SCTP_TO(arg: SCTP_EVENT_TIMEOUT_T1_COOKIE)); |
1613 | break; |
1614 | |
1615 | case SCTP_CMD_INIT_FAILED: |
1616 | sctp_cmd_init_failed(commands, asoc, error: cmd->obj.u16); |
1617 | break; |
1618 | |
1619 | case SCTP_CMD_ASSOC_FAILED: |
1620 | sctp_cmd_assoc_failed(commands, asoc, event_type, |
1621 | subtype, chunk, error: cmd->obj.u16); |
1622 | break; |
1623 | |
1624 | case SCTP_CMD_INIT_COUNTER_INC: |
1625 | asoc->init_err_counter++; |
1626 | break; |
1627 | |
1628 | case SCTP_CMD_INIT_COUNTER_RESET: |
1629 | asoc->init_err_counter = 0; |
1630 | asoc->init_cycle = 0; |
1631 | list_for_each_entry(t, &asoc->peer.transport_addr_list, |
1632 | transports) { |
1633 | t->init_sent_count = 0; |
1634 | } |
1635 | break; |
1636 | |
1637 | case SCTP_CMD_REPORT_DUP: |
1638 | sctp_tsnmap_mark_dup(map: &asoc->peer.tsn_map, |
1639 | tsn: cmd->obj.u32); |
1640 | break; |
1641 | |
1642 | case SCTP_CMD_REPORT_BAD_TAG: |
1643 | pr_debug("%s: vtag mismatch!\n" , __func__); |
1644 | break; |
1645 | |
1646 | case SCTP_CMD_STRIKE: |
1647 | /* Mark one strike against a transport. */ |
1648 | sctp_do_8_2_transport_strike(commands, asoc, |
1649 | transport: cmd->obj.transport, is_hb: 0); |
1650 | break; |
1651 | |
1652 | case SCTP_CMD_TRANSPORT_IDLE: |
1653 | t = cmd->obj.transport; |
1654 | sctp_transport_lower_cwnd(t, reason: SCTP_LOWER_CWND_INACTIVE); |
1655 | break; |
1656 | |
1657 | case SCTP_CMD_TRANSPORT_HB_SENT: |
1658 | t = cmd->obj.transport; |
1659 | sctp_do_8_2_transport_strike(commands, asoc, |
1660 | transport: t, is_hb: 1); |
1661 | t->hb_sent = 1; |
1662 | break; |
1663 | |
1664 | case SCTP_CMD_TRANSPORT_ON: |
1665 | t = cmd->obj.transport; |
1666 | sctp_cmd_transport_on(cmds: commands, asoc, t, chunk); |
1667 | break; |
1668 | |
1669 | case SCTP_CMD_HB_TIMERS_START: |
1670 | sctp_cmd_hb_timers_start(cmds: commands, asoc); |
1671 | break; |
1672 | |
1673 | case SCTP_CMD_HB_TIMER_UPDATE: |
1674 | t = cmd->obj.transport; |
1675 | sctp_transport_reset_hb_timer(t); |
1676 | break; |
1677 | |
1678 | case SCTP_CMD_HB_TIMERS_STOP: |
1679 | sctp_cmd_hb_timers_stop(cmds: commands, asoc); |
1680 | break; |
1681 | |
1682 | case SCTP_CMD_PROBE_TIMER_UPDATE: |
1683 | t = cmd->obj.transport; |
1684 | sctp_transport_reset_probe_timer(transport: t); |
1685 | break; |
1686 | |
1687 | case SCTP_CMD_REPORT_ERROR: |
1688 | error = cmd->obj.error; |
1689 | break; |
1690 | |
1691 | case SCTP_CMD_PROCESS_CTSN: |
1692 | /* Dummy up a SACK for processing. */ |
1693 | sackh.cum_tsn_ack = cmd->obj.be32; |
1694 | sackh.a_rwnd = htonl(asoc->peer.rwnd + |
1695 | asoc->outqueue.outstanding_bytes); |
1696 | sackh.num_gap_ack_blocks = 0; |
1697 | sackh.num_dup_tsns = 0; |
1698 | chunk->subh.sack_hdr = &sackh; |
1699 | sctp_add_cmd_sf(seq: commands, verb: SCTP_CMD_PROCESS_SACK, |
1700 | obj: SCTP_CHUNK(arg: chunk)); |
1701 | break; |
1702 | |
1703 | case SCTP_CMD_DISCARD_PACKET: |
1704 | /* We need to discard the whole packet. |
1705 | * Uncork the queue since there might be |
1706 | * responses pending |
1707 | */ |
1708 | chunk->pdiscard = 1; |
1709 | if (asoc) { |
1710 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1711 | local_cork = 0; |
1712 | } |
1713 | break; |
1714 | |
1715 | case SCTP_CMD_RTO_PENDING: |
1716 | t = cmd->obj.transport; |
1717 | t->rto_pending = 1; |
1718 | break; |
1719 | |
1720 | case SCTP_CMD_PART_DELIVER: |
1721 | asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC); |
1722 | break; |
1723 | |
1724 | case SCTP_CMD_RENEGE: |
1725 | asoc->stream.si->renege_events(&asoc->ulpq, |
1726 | cmd->obj.chunk, |
1727 | GFP_ATOMIC); |
1728 | break; |
1729 | |
1730 | case SCTP_CMD_SETUP_T4: |
1731 | sctp_cmd_setup_t4(cmds: commands, asoc, chunk: cmd->obj.chunk); |
1732 | break; |
1733 | |
1734 | case SCTP_CMD_PROCESS_OPERR: |
1735 | sctp_cmd_process_operr(cmds: commands, asoc, chunk); |
1736 | break; |
1737 | case SCTP_CMD_CLEAR_INIT_TAG: |
1738 | asoc->peer.i.init_tag = 0; |
1739 | break; |
1740 | case SCTP_CMD_DEL_NON_PRIMARY: |
1741 | sctp_cmd_del_non_primary(asoc); |
1742 | break; |
1743 | case SCTP_CMD_T3_RTX_TIMERS_STOP: |
1744 | sctp_cmd_t3_rtx_timers_stop(cmds: commands, asoc); |
1745 | break; |
1746 | case SCTP_CMD_FORCE_PRIM_RETRAN: |
1747 | t = asoc->peer.retran_path; |
1748 | asoc->peer.retran_path = asoc->peer.primary_path; |
1749 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1750 | local_cork = 0; |
1751 | asoc->peer.retran_path = t; |
1752 | break; |
1753 | case SCTP_CMD_SET_SK_ERR: |
1754 | sctp_cmd_set_sk_err(asoc, error: cmd->obj.error); |
1755 | break; |
1756 | case SCTP_CMD_ASSOC_CHANGE: |
1757 | sctp_cmd_assoc_change(commands, asoc, |
1758 | state: cmd->obj.u8); |
1759 | break; |
1760 | case SCTP_CMD_ADAPTATION_IND: |
1761 | sctp_cmd_adaptation_ind(commands, asoc); |
1762 | break; |
1763 | case SCTP_CMD_PEER_NO_AUTH: |
1764 | sctp_cmd_peer_no_auth(commands, asoc); |
1765 | break; |
1766 | |
1767 | case SCTP_CMD_ASSOC_SHKEY: |
1768 | error = sctp_auth_asoc_init_active_key(asoc, |
1769 | GFP_ATOMIC); |
1770 | break; |
1771 | case SCTP_CMD_UPDATE_INITTAG: |
1772 | asoc->peer.i.init_tag = cmd->obj.u32; |
1773 | break; |
1774 | case SCTP_CMD_SEND_MSG: |
1775 | if (!asoc->outqueue.cork) { |
1776 | sctp_outq_cork(q: &asoc->outqueue); |
1777 | local_cork = 1; |
1778 | } |
1779 | sctp_cmd_send_msg(asoc, msg: cmd->obj.msg, gfp); |
1780 | break; |
1781 | case SCTP_CMD_PURGE_ASCONF_QUEUE: |
1782 | sctp_asconf_queue_teardown(asoc); |
1783 | break; |
1784 | |
1785 | case SCTP_CMD_SET_ASOC: |
1786 | if (asoc && local_cork) { |
1787 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1788 | local_cork = 0; |
1789 | } |
1790 | asoc = cmd->obj.asoc; |
1791 | break; |
1792 | |
1793 | default: |
1794 | pr_warn("Impossible command: %u\n" , |
1795 | cmd->verb); |
1796 | break; |
1797 | } |
1798 | |
1799 | if (error) { |
1800 | cmd = sctp_next_cmd(seq: commands); |
1801 | while (cmd) { |
1802 | if (cmd->verb == SCTP_CMD_REPLY) |
1803 | sctp_chunk_free(cmd->obj.chunk); |
1804 | cmd = sctp_next_cmd(seq: commands); |
1805 | } |
1806 | break; |
1807 | } |
1808 | } |
1809 | |
1810 | /* If this is in response to a received chunk, wait until |
1811 | * we are done with the packet to open the queue so that we don't |
1812 | * send multiple packets in response to a single request. |
1813 | */ |
1814 | if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { |
1815 | if (chunk->end_of_packet || chunk->singleton) |
1816 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1817 | } else if (local_cork) |
1818 | sctp_outq_uncork(&asoc->outqueue, gfp); |
1819 | |
1820 | if (sp->data_ready_signalled) |
1821 | sp->data_ready_signalled = 0; |
1822 | |
1823 | return error; |
1824 | } |
1825 | |