1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * X.25 Packet Layer release 002
4 *
5 * This is ALPHA test software. This code may break your machine,
6 * randomly fail to work with new releases, misbehave and/or generally
7 * screw up. It might even work.
8 *
9 * This code REQUIRES 2.1.15 or higher
10 *
11 * History
12 * X.25 001 Jonathan Naylor Started coding.
13 * X.25 002 Jonathan Naylor Centralised disconnection code.
14 * New timer architecture.
15 * 2000-03-20 Daniela Squassoni Disabling/enabling of facilities
16 * negotiation.
17 * 2000-11-10 Henner Eisen Check and reset for out-of-sequence
18 * i-frames.
19 */
20
21#define pr_fmt(fmt) "X25: " fmt
22
23#include <linux/slab.h>
24#include <linux/errno.h>
25#include <linux/kernel.h>
26#include <linux/string.h>
27#include <linux/skbuff.h>
28#include <net/sock.h>
29#include <net/tcp_states.h>
30#include <net/x25.h>
31
32static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
33{
34 struct sk_buff *skbo, *skbn = skb;
35 struct x25_sock *x25 = x25_sk(sk);
36
37 if (more) {
38 x25->fraglen += skb->len;
39 skb_queue_tail(list: &x25->fragment_queue, newsk: skb);
40 skb_set_owner_r(skb, sk);
41 return 0;
42 }
43
44 if (x25->fraglen > 0) { /* End of fragment */
45 int len = x25->fraglen + skb->len;
46
47 if ((skbn = alloc_skb(size: len, GFP_ATOMIC)) == NULL){
48 kfree_skb(skb);
49 return 1;
50 }
51
52 skb_queue_tail(list: &x25->fragment_queue, newsk: skb);
53
54 skb_reset_transport_header(skb: skbn);
55
56 skbo = skb_dequeue(list: &x25->fragment_queue);
57 skb_copy_from_linear_data(skb: skbo, to: skb_put(skb: skbn, len: skbo->len),
58 len: skbo->len);
59 kfree_skb(skb: skbo);
60
61 while ((skbo =
62 skb_dequeue(list: &x25->fragment_queue)) != NULL) {
63 skb_pull(skb: skbo, len: (x25->neighbour->extended) ?
64 X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
65 skb_copy_from_linear_data(skb: skbo,
66 to: skb_put(skb: skbn, len: skbo->len),
67 len: skbo->len);
68 kfree_skb(skb: skbo);
69 }
70
71 x25->fraglen = 0;
72 }
73
74 skb_set_owner_r(skb: skbn, sk);
75 skb_queue_tail(list: &sk->sk_receive_queue, newsk: skbn);
76 if (!sock_flag(sk, flag: SOCK_DEAD))
77 sk->sk_data_ready(sk);
78
79 return 0;
80}
81
82/*
83 * State machine for state 1, Awaiting Call Accepted State.
84 * The handling of the timer(s) is in file x25_timer.c.
85 * Handling of state 0 and connection release is in af_x25.c.
86 */
87static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
88{
89 struct x25_address source_addr, dest_addr;
90 int len;
91 struct x25_sock *x25 = x25_sk(sk);
92
93 switch (frametype) {
94 case X25_CALL_ACCEPTED: {
95
96 x25_stop_timer(sk);
97 x25->condition = 0x00;
98 x25->vs = 0;
99 x25->va = 0;
100 x25->vr = 0;
101 x25->vl = 0;
102 x25->state = X25_STATE_3;
103 sk->sk_state = TCP_ESTABLISHED;
104 /*
105 * Parse the data in the frame.
106 */
107 if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
108 goto out_clear;
109 skb_pull(skb, X25_STD_MIN_LEN);
110
111 len = x25_parse_address_block(skb, called_addr: &source_addr,
112 calling_addr: &dest_addr);
113 if (len > 0)
114 skb_pull(skb, len);
115 else if (len < 0)
116 goto out_clear;
117
118 len = x25_parse_facilities(skb, &x25->facilities,
119 &x25->dte_facilities,
120 &x25->vc_facil_mask);
121 if (len > 0)
122 skb_pull(skb, len);
123 else if (len < 0)
124 goto out_clear;
125 /*
126 * Copy any Call User Data.
127 */
128 if (skb->len > 0) {
129 if (skb->len > X25_MAX_CUD_LEN)
130 goto out_clear;
131
132 skb_copy_bits(skb, offset: 0, to: x25->calluserdata.cuddata,
133 len: skb->len);
134 x25->calluserdata.cudlength = skb->len;
135 }
136 if (!sock_flag(sk, flag: SOCK_DEAD))
137 sk->sk_state_change(sk);
138 break;
139 }
140 case X25_CALL_REQUEST:
141 /* call collision */
142 x25->causediag.cause = 0x01;
143 x25->causediag.diagnostic = 0x48;
144
145 x25_write_internal(sk, X25_CLEAR_REQUEST);
146 x25_disconnect(sk, EISCONN, 0x01, 0x48);
147 break;
148
149 case X25_CLEAR_REQUEST:
150 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
151 goto out_clear;
152
153 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
154 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
155 break;
156
157 default:
158 break;
159 }
160
161 return 0;
162
163out_clear:
164 x25_write_internal(sk, X25_CLEAR_REQUEST);
165 x25->state = X25_STATE_2;
166 x25_start_t23timer(sk);
167 return 0;
168}
169
170/*
171 * State machine for state 2, Awaiting Clear Confirmation State.
172 * The handling of the timer(s) is in file x25_timer.c
173 * Handling of state 0 and connection release is in af_x25.c.
174 */
175static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
176{
177 switch (frametype) {
178
179 case X25_CLEAR_REQUEST:
180 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
181 goto out_clear;
182
183 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
184 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
185 break;
186
187 case X25_CLEAR_CONFIRMATION:
188 x25_disconnect(sk, 0, 0, 0);
189 break;
190
191 default:
192 break;
193 }
194
195 return 0;
196
197out_clear:
198 x25_write_internal(sk, X25_CLEAR_REQUEST);
199 x25_start_t23timer(sk);
200 return 0;
201}
202
203/*
204 * State machine for state 3, Connected State.
205 * The handling of the timer(s) is in file x25_timer.c
206 * Handling of state 0 and connection release is in af_x25.c.
207 */
208static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
209{
210 int queued = 0;
211 int modulus;
212 struct x25_sock *x25 = x25_sk(sk);
213
214 modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;
215
216 switch (frametype) {
217
218 case X25_RESET_REQUEST:
219 x25_write_internal(sk, X25_RESET_CONFIRMATION);
220 x25_stop_timer(sk);
221 x25->condition = 0x00;
222 x25->vs = 0;
223 x25->vr = 0;
224 x25->va = 0;
225 x25->vl = 0;
226 x25_requeue_frames(sk);
227 break;
228
229 case X25_CLEAR_REQUEST:
230 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
231 goto out_clear;
232
233 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
234 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
235 break;
236
237 case X25_RR:
238 case X25_RNR:
239 if (!x25_validate_nr(sk, nr)) {
240 x25_clear_queues(sk);
241 x25_write_internal(sk, X25_RESET_REQUEST);
242 x25_start_t22timer(sk);
243 x25->condition = 0x00;
244 x25->vs = 0;
245 x25->vr = 0;
246 x25->va = 0;
247 x25->vl = 0;
248 x25->state = X25_STATE_4;
249 } else {
250 x25_frames_acked(sk, nr);
251 if (frametype == X25_RNR) {
252 x25->condition |= X25_COND_PEER_RX_BUSY;
253 } else {
254 x25->condition &= ~X25_COND_PEER_RX_BUSY;
255 }
256 }
257 break;
258
259 case X25_DATA: /* XXX */
260 x25->condition &= ~X25_COND_PEER_RX_BUSY;
261 if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
262 x25_clear_queues(sk);
263 x25_write_internal(sk, X25_RESET_REQUEST);
264 x25_start_t22timer(sk);
265 x25->condition = 0x00;
266 x25->vs = 0;
267 x25->vr = 0;
268 x25->va = 0;
269 x25->vl = 0;
270 x25->state = X25_STATE_4;
271 break;
272 }
273 x25_frames_acked(sk, nr);
274 if (ns == x25->vr) {
275 if (x25_queue_rx_frame(sk, skb, more: m) == 0) {
276 x25->vr = (x25->vr + 1) % modulus;
277 queued = 1;
278 } else {
279 /* Should never happen */
280 x25_clear_queues(sk);
281 x25_write_internal(sk, X25_RESET_REQUEST);
282 x25_start_t22timer(sk);
283 x25->condition = 0x00;
284 x25->vs = 0;
285 x25->vr = 0;
286 x25->va = 0;
287 x25->vl = 0;
288 x25->state = X25_STATE_4;
289 break;
290 }
291 if (atomic_read(v: &sk->sk_rmem_alloc) >
292 (sk->sk_rcvbuf >> 1))
293 x25->condition |= X25_COND_OWN_RX_BUSY;
294 }
295 /*
296 * If the window is full Ack it immediately, else
297 * start the holdback timer.
298 */
299 if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
300 x25->condition &= ~X25_COND_ACK_PENDING;
301 x25_stop_timer(sk);
302 x25_enquiry_response(sk);
303 } else {
304 x25->condition |= X25_COND_ACK_PENDING;
305 x25_start_t2timer(sk);
306 }
307 break;
308
309 case X25_INTERRUPT_CONFIRMATION:
310 clear_bit(X25_INTERRUPT_FLAG, addr: &x25->flags);
311 break;
312
313 case X25_INTERRUPT:
314 if (sock_flag(sk, flag: SOCK_URGINLINE))
315 queued = !sock_queue_rcv_skb(sk, skb);
316 else {
317 skb_set_owner_r(skb, sk);
318 skb_queue_tail(list: &x25->interrupt_in_queue, newsk: skb);
319 queued = 1;
320 }
321 sk_send_sigurg(sk);
322 x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
323 break;
324
325 default:
326 pr_warn("unknown %02X in state 3\n", frametype);
327 break;
328 }
329
330 return queued;
331
332out_clear:
333 x25_write_internal(sk, X25_CLEAR_REQUEST);
334 x25->state = X25_STATE_2;
335 x25_start_t23timer(sk);
336 return 0;
337}
338
339/*
340 * State machine for state 4, Awaiting Reset Confirmation State.
341 * The handling of the timer(s) is in file x25_timer.c
342 * Handling of state 0 and connection release is in af_x25.c.
343 */
344static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
345{
346 struct x25_sock *x25 = x25_sk(sk);
347
348 switch (frametype) {
349
350 case X25_RESET_REQUEST:
351 x25_write_internal(sk, X25_RESET_CONFIRMATION);
352 fallthrough;
353 case X25_RESET_CONFIRMATION: {
354 x25_stop_timer(sk);
355 x25->condition = 0x00;
356 x25->va = 0;
357 x25->vr = 0;
358 x25->vs = 0;
359 x25->vl = 0;
360 x25->state = X25_STATE_3;
361 x25_requeue_frames(sk);
362 break;
363 }
364 case X25_CLEAR_REQUEST:
365 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
366 goto out_clear;
367
368 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
369 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
370 break;
371
372 default:
373 break;
374 }
375
376 return 0;
377
378out_clear:
379 x25_write_internal(sk, X25_CLEAR_REQUEST);
380 x25->state = X25_STATE_2;
381 x25_start_t23timer(sk);
382 return 0;
383}
384
385/*
386 * State machine for state 5, Call Accepted / Call Connected pending (X25_ACCPT_APPRV_FLAG).
387 * The handling of the timer(s) is in file x25_timer.c
388 * Handling of state 0 and connection release is in af_x25.c.
389 */
390static int x25_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
391{
392 struct x25_sock *x25 = x25_sk(sk);
393
394 switch (frametype) {
395 case X25_CLEAR_REQUEST:
396 if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) {
397 x25_write_internal(sk, X25_CLEAR_REQUEST);
398 x25->state = X25_STATE_2;
399 x25_start_t23timer(sk);
400 return 0;
401 }
402
403 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
404 x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
405 break;
406
407 default:
408 break;
409 }
410
411 return 0;
412}
413
414/* Higher level upcall for a LAPB frame */
415int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
416{
417 struct x25_sock *x25 = x25_sk(sk);
418 int queued = 0, frametype, ns, nr, q, d, m;
419
420 if (x25->state == X25_STATE_0)
421 return 0;
422
423 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
424
425 switch (x25->state) {
426 case X25_STATE_1:
427 queued = x25_state1_machine(sk, skb, frametype);
428 break;
429 case X25_STATE_2:
430 queued = x25_state2_machine(sk, skb, frametype);
431 break;
432 case X25_STATE_3:
433 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
434 break;
435 case X25_STATE_4:
436 queued = x25_state4_machine(sk, skb, frametype);
437 break;
438 case X25_STATE_5:
439 queued = x25_state5_machine(sk, skb, frametype);
440 break;
441 }
442
443 x25_kick(sk);
444
445 return queued;
446}
447
448int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
449{
450 int queued = x25_process_rx_frame(sk, skb);
451
452 if (!queued)
453 kfree_skb(skb);
454
455 return 0;
456}
457

source code of linux/net/x25/x25_in.c