1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * shdlc Link Layer Control |
4 | * |
5 | * Copyright (C) 2012 Intel Corporation. All rights reserved. |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "shdlc: %s: " fmt, __func__ |
9 | |
10 | #include <linux/types.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/wait.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/skbuff.h> |
15 | |
16 | #include "llc.h" |
17 | |
18 | enum shdlc_state { |
19 | SHDLC_DISCONNECTED = 0, |
20 | SHDLC_CONNECTING = 1, |
21 | SHDLC_NEGOTIATING = 2, |
22 | SHDLC_HALF_CONNECTED = 3, |
23 | SHDLC_CONNECTED = 4 |
24 | }; |
25 | |
26 | struct llc_shdlc { |
27 | struct nfc_hci_dev *hdev; |
28 | xmit_to_drv_t xmit_to_drv; |
29 | rcv_to_hci_t rcv_to_hci; |
30 | |
31 | struct mutex state_mutex; |
32 | enum shdlc_state state; |
33 | int hard_fault; |
34 | |
35 | wait_queue_head_t *connect_wq; |
36 | int connect_tries; |
37 | int connect_result; |
38 | struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */ |
39 | |
40 | u8 w; /* window size */ |
41 | bool srej_support; |
42 | |
43 | struct timer_list t1_timer; /* send ack timeout */ |
44 | bool t1_active; |
45 | |
46 | struct timer_list t2_timer; /* guard/retransmit timeout */ |
47 | bool t2_active; |
48 | |
49 | int ns; /* next seq num for send */ |
50 | int nr; /* next expected seq num for receive */ |
51 | int dnr; /* oldest sent unacked seq num */ |
52 | |
53 | struct sk_buff_head rcv_q; |
54 | |
55 | struct sk_buff_head send_q; |
56 | bool rnr; /* other side is not ready to receive */ |
57 | |
58 | struct sk_buff_head ack_pending_q; |
59 | |
60 | struct work_struct sm_work; |
61 | |
62 | int tx_headroom; |
63 | int tx_tailroom; |
64 | |
65 | llc_failure_t llc_failure; |
66 | }; |
67 | |
68 | #define SHDLC_LLC_HEAD_ROOM 2 |
69 | |
70 | #define SHDLC_MAX_WINDOW 4 |
71 | #define SHDLC_SREJ_SUPPORT false |
72 | |
73 | #define SHDLC_CONTROL_HEAD_MASK 0xe0 |
74 | #define SHDLC_CONTROL_HEAD_I 0x80 |
75 | #define SHDLC_CONTROL_HEAD_I2 0xa0 |
76 | #define SHDLC_CONTROL_HEAD_S 0xc0 |
77 | #define SHDLC_CONTROL_HEAD_U 0xe0 |
78 | |
79 | #define SHDLC_CONTROL_NS_MASK 0x38 |
80 | #define SHDLC_CONTROL_NR_MASK 0x07 |
81 | #define SHDLC_CONTROL_TYPE_MASK 0x18 |
82 | |
83 | #define SHDLC_CONTROL_M_MASK 0x1f |
84 | |
85 | enum sframe_type { |
86 | S_FRAME_RR = 0x00, |
87 | S_FRAME_REJ = 0x01, |
88 | S_FRAME_RNR = 0x02, |
89 | S_FRAME_SREJ = 0x03 |
90 | }; |
91 | |
92 | enum uframe_modifier { |
93 | U_FRAME_UA = 0x06, |
94 | U_FRAME_RSET = 0x19 |
95 | }; |
96 | |
97 | #define SHDLC_CONNECT_VALUE_MS 5 |
98 | #define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4) |
99 | #define SHDLC_T2_VALUE_MS 300 |
100 | |
101 | #define SHDLC_DUMP_SKB(info, skb) \ |
102 | do { \ |
103 | pr_debug("%s:\n", info); \ |
104 | print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \ |
105 | 16, 1, skb->data, skb->len, 0); \ |
106 | } while (0) |
107 | |
108 | /* checks x < y <= z modulo 8 */ |
109 | static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z) |
110 | { |
111 | if (x < z) |
112 | return ((x < y) && (y <= z)) ? true : false; |
113 | else |
114 | return ((y > x) || (y <= z)) ? true : false; |
115 | } |
116 | |
117 | /* checks x <= y < z modulo 8 */ |
118 | static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z) |
119 | { |
120 | if (x <= z) |
121 | return ((x <= y) && (y < z)) ? true : false; |
122 | else /* x > z -> z+8 > x */ |
123 | return ((y >= x) || (y < z)) ? true : false; |
124 | } |
125 | |
126 | static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc, |
127 | int payload_len) |
128 | { |
129 | struct sk_buff *skb; |
130 | |
131 | skb = alloc_skb(size: shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM + |
132 | shdlc->tx_tailroom + payload_len, GFP_KERNEL); |
133 | if (skb) |
134 | skb_reserve(skb, len: shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM); |
135 | |
136 | return skb; |
137 | } |
138 | |
139 | /* immediately sends an S frame. */ |
140 | static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc, |
141 | enum sframe_type sframe_type, int nr) |
142 | { |
143 | int r; |
144 | struct sk_buff *skb; |
145 | |
146 | pr_debug("sframe_type=%d nr=%d\n" , sframe_type, nr); |
147 | |
148 | skb = llc_shdlc_alloc_skb(shdlc, payload_len: 0); |
149 | if (skb == NULL) |
150 | return -ENOMEM; |
151 | |
152 | *(u8 *)skb_push(skb, len: 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr; |
153 | |
154 | r = shdlc->xmit_to_drv(shdlc->hdev, skb); |
155 | |
156 | kfree_skb(skb); |
157 | |
158 | return r; |
159 | } |
160 | |
161 | /* immediately sends an U frame. skb may contain optional payload */ |
162 | static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc, |
163 | struct sk_buff *skb, |
164 | enum uframe_modifier uframe_modifier) |
165 | { |
166 | int r; |
167 | |
168 | pr_debug("uframe_modifier=%d\n" , uframe_modifier); |
169 | |
170 | *(u8 *)skb_push(skb, len: 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier; |
171 | |
172 | r = shdlc->xmit_to_drv(shdlc->hdev, skb); |
173 | |
174 | kfree_skb(skb); |
175 | |
176 | return r; |
177 | } |
178 | |
179 | /* |
180 | * Free ack_pending frames until y_nr - 1, and reset t2 according to |
181 | * the remaining oldest ack_pending frame sent time |
182 | */ |
183 | static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr) |
184 | { |
185 | struct sk_buff *skb; |
186 | int dnr = shdlc->dnr; /* MUST initially be < y_nr */ |
187 | |
188 | pr_debug("release ack pending up to frame %d excluded\n" , y_nr); |
189 | |
190 | while (dnr != y_nr) { |
191 | pr_debug("release ack pending frame %d\n" , dnr); |
192 | |
193 | skb = skb_dequeue(list: &shdlc->ack_pending_q); |
194 | kfree_skb(skb); |
195 | |
196 | dnr = (dnr + 1) % 8; |
197 | } |
198 | |
199 | if (skb_queue_empty(list: &shdlc->ack_pending_q)) { |
200 | if (shdlc->t2_active) { |
201 | del_timer_sync(timer: &shdlc->t2_timer); |
202 | shdlc->t2_active = false; |
203 | |
204 | pr_debug("All sent frames acked. Stopped T2(retransmit)\n" ); |
205 | } |
206 | } else { |
207 | skb = skb_peek(list_: &shdlc->ack_pending_q); |
208 | |
209 | mod_timer(timer: &shdlc->t2_timer, expires: *(unsigned long *)skb->cb + |
210 | msecs_to_jiffies(SHDLC_T2_VALUE_MS)); |
211 | shdlc->t2_active = true; |
212 | |
213 | pr_debug("Start T2(retransmit) for remaining unacked sent frames\n" ); |
214 | } |
215 | } |
216 | |
217 | /* |
218 | * Receive validated frames from lower layer. skb contains HCI payload only. |
219 | * Handle according to algorithm at spec:10.8.2 |
220 | */ |
221 | static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc, |
222 | struct sk_buff *skb, int ns, int nr) |
223 | { |
224 | int x_ns = ns; |
225 | int y_nr = nr; |
226 | |
227 | pr_debug("recvd I-frame %d, remote waiting frame %d\n" , ns, nr); |
228 | |
229 | if (shdlc->state != SHDLC_CONNECTED) |
230 | goto exit; |
231 | |
232 | if (x_ns != shdlc->nr) { |
233 | llc_shdlc_send_s_frame(shdlc, sframe_type: S_FRAME_REJ, nr: shdlc->nr); |
234 | goto exit; |
235 | } |
236 | |
237 | if (!shdlc->t1_active) { |
238 | shdlc->t1_active = true; |
239 | mod_timer(timer: &shdlc->t1_timer, expires: jiffies + |
240 | msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w))); |
241 | pr_debug("(re)Start T1(send ack)\n" ); |
242 | } |
243 | |
244 | if (skb->len) { |
245 | shdlc->rcv_to_hci(shdlc->hdev, skb); |
246 | skb = NULL; |
247 | } |
248 | |
249 | shdlc->nr = (shdlc->nr + 1) % 8; |
250 | |
251 | if (llc_shdlc_x_lt_y_lteq_z(x: shdlc->dnr, y: y_nr, z: shdlc->ns)) { |
252 | llc_shdlc_reset_t2(shdlc, y_nr); |
253 | |
254 | shdlc->dnr = y_nr; |
255 | } |
256 | |
257 | exit: |
258 | kfree_skb(skb); |
259 | } |
260 | |
261 | static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr) |
262 | { |
263 | pr_debug("remote acked up to frame %d excluded\n" , y_nr); |
264 | |
265 | if (llc_shdlc_x_lt_y_lteq_z(x: shdlc->dnr, y: y_nr, z: shdlc->ns)) { |
266 | llc_shdlc_reset_t2(shdlc, y_nr); |
267 | shdlc->dnr = y_nr; |
268 | } |
269 | } |
270 | |
271 | static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc) |
272 | { |
273 | struct sk_buff *skb; |
274 | |
275 | pr_debug("ns reset to %d\n" , shdlc->dnr); |
276 | |
277 | while ((skb = skb_dequeue_tail(list: &shdlc->ack_pending_q))) { |
278 | skb_pull(skb, len: 1); /* remove control field */ |
279 | skb_queue_head(list: &shdlc->send_q, newsk: skb); |
280 | } |
281 | shdlc->ns = shdlc->dnr; |
282 | } |
283 | |
284 | static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr) |
285 | { |
286 | struct sk_buff *skb; |
287 | |
288 | pr_debug("remote asks retransmission from frame %d\n" , y_nr); |
289 | |
290 | if (llc_shdlc_x_lteq_y_lt_z(x: shdlc->dnr, y: y_nr, z: shdlc->ns)) { |
291 | if (shdlc->t2_active) { |
292 | del_timer_sync(timer: &shdlc->t2_timer); |
293 | shdlc->t2_active = false; |
294 | pr_debug("Stopped T2(retransmit)\n" ); |
295 | } |
296 | |
297 | if (shdlc->dnr != y_nr) { |
298 | while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) { |
299 | skb = skb_dequeue(list: &shdlc->ack_pending_q); |
300 | kfree_skb(skb); |
301 | } |
302 | } |
303 | |
304 | llc_shdlc_requeue_ack_pending(shdlc); |
305 | } |
306 | } |
307 | |
308 | /* See spec RR:10.8.3 REJ:10.8.4 */ |
309 | static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc, |
310 | enum sframe_type s_frame_type, int nr) |
311 | { |
312 | struct sk_buff *skb; |
313 | |
314 | if (shdlc->state != SHDLC_CONNECTED) |
315 | return; |
316 | |
317 | switch (s_frame_type) { |
318 | case S_FRAME_RR: |
319 | llc_shdlc_rcv_ack(shdlc, y_nr: nr); |
320 | if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */ |
321 | shdlc->rnr = false; |
322 | if (shdlc->send_q.qlen == 0) { |
323 | skb = llc_shdlc_alloc_skb(shdlc, payload_len: 0); |
324 | if (skb) |
325 | skb_queue_tail(list: &shdlc->send_q, newsk: skb); |
326 | } |
327 | } |
328 | break; |
329 | case S_FRAME_REJ: |
330 | llc_shdlc_rcv_rej(shdlc, y_nr: nr); |
331 | break; |
332 | case S_FRAME_RNR: |
333 | llc_shdlc_rcv_ack(shdlc, y_nr: nr); |
334 | shdlc->rnr = true; |
335 | break; |
336 | default: |
337 | break; |
338 | } |
339 | } |
340 | |
341 | static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r) |
342 | { |
343 | pr_debug("result=%d\n" , r); |
344 | |
345 | del_timer_sync(timer: &shdlc->connect_timer); |
346 | |
347 | if (r == 0) { |
348 | shdlc->ns = 0; |
349 | shdlc->nr = 0; |
350 | shdlc->dnr = 0; |
351 | |
352 | shdlc->state = SHDLC_HALF_CONNECTED; |
353 | } else { |
354 | shdlc->state = SHDLC_DISCONNECTED; |
355 | } |
356 | |
357 | shdlc->connect_result = r; |
358 | |
359 | wake_up(shdlc->connect_wq); |
360 | } |
361 | |
362 | static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc) |
363 | { |
364 | struct sk_buff *skb; |
365 | |
366 | skb = llc_shdlc_alloc_skb(shdlc, payload_len: 2); |
367 | if (skb == NULL) |
368 | return -ENOMEM; |
369 | |
370 | skb_put_u8(skb, SHDLC_MAX_WINDOW); |
371 | skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0); |
372 | |
373 | return llc_shdlc_send_u_frame(shdlc, skb, uframe_modifier: U_FRAME_RSET); |
374 | } |
375 | |
376 | static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc) |
377 | { |
378 | struct sk_buff *skb; |
379 | |
380 | skb = llc_shdlc_alloc_skb(shdlc, payload_len: 0); |
381 | if (skb == NULL) |
382 | return -ENOMEM; |
383 | |
384 | return llc_shdlc_send_u_frame(shdlc, skb, uframe_modifier: U_FRAME_UA); |
385 | } |
386 | |
387 | static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc, |
388 | struct sk_buff *skb, |
389 | enum uframe_modifier u_frame_modifier) |
390 | { |
391 | u8 w = SHDLC_MAX_WINDOW; |
392 | bool srej_support = SHDLC_SREJ_SUPPORT; |
393 | int r; |
394 | |
395 | pr_debug("u_frame_modifier=%d\n" , u_frame_modifier); |
396 | |
397 | switch (u_frame_modifier) { |
398 | case U_FRAME_RSET: |
399 | switch (shdlc->state) { |
400 | case SHDLC_NEGOTIATING: |
401 | case SHDLC_CONNECTING: |
402 | /* |
403 | * We sent RSET, but chip wants to negotiate or we |
404 | * got RSET before we managed to send out our. |
405 | */ |
406 | if (skb->len > 0) |
407 | w = skb->data[0]; |
408 | |
409 | if (skb->len > 1) |
410 | srej_support = skb->data[1] & 0x01 ? true : |
411 | false; |
412 | |
413 | if ((w <= SHDLC_MAX_WINDOW) && |
414 | (SHDLC_SREJ_SUPPORT || (srej_support == false))) { |
415 | shdlc->w = w; |
416 | shdlc->srej_support = srej_support; |
417 | r = llc_shdlc_connect_send_ua(shdlc); |
418 | llc_shdlc_connect_complete(shdlc, r); |
419 | } |
420 | break; |
421 | case SHDLC_HALF_CONNECTED: |
422 | /* |
423 | * Chip resent RSET due to its timeout - Ignote it |
424 | * as we already sent UA. |
425 | */ |
426 | break; |
427 | case SHDLC_CONNECTED: |
428 | /* |
429 | * Chip wants to reset link. This is unexpected and |
430 | * unsupported. |
431 | */ |
432 | shdlc->hard_fault = -ECONNRESET; |
433 | break; |
434 | default: |
435 | break; |
436 | } |
437 | break; |
438 | case U_FRAME_UA: |
439 | if ((shdlc->state == SHDLC_CONNECTING && |
440 | shdlc->connect_tries > 0) || |
441 | (shdlc->state == SHDLC_NEGOTIATING)) { |
442 | llc_shdlc_connect_complete(shdlc, r: 0); |
443 | shdlc->state = SHDLC_CONNECTED; |
444 | } |
445 | break; |
446 | default: |
447 | break; |
448 | } |
449 | |
450 | kfree_skb(skb); |
451 | } |
452 | |
453 | static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc) |
454 | { |
455 | struct sk_buff *skb; |
456 | u8 control; |
457 | int nr; |
458 | int ns; |
459 | enum sframe_type s_frame_type; |
460 | enum uframe_modifier u_frame_modifier; |
461 | |
462 | if (shdlc->rcv_q.qlen) |
463 | pr_debug("rcvQlen=%d\n" , shdlc->rcv_q.qlen); |
464 | |
465 | while ((skb = skb_dequeue(list: &shdlc->rcv_q)) != NULL) { |
466 | control = skb->data[0]; |
467 | skb_pull(skb, len: 1); |
468 | switch (control & SHDLC_CONTROL_HEAD_MASK) { |
469 | case SHDLC_CONTROL_HEAD_I: |
470 | case SHDLC_CONTROL_HEAD_I2: |
471 | if (shdlc->state == SHDLC_HALF_CONNECTED) |
472 | shdlc->state = SHDLC_CONNECTED; |
473 | |
474 | ns = (control & SHDLC_CONTROL_NS_MASK) >> 3; |
475 | nr = control & SHDLC_CONTROL_NR_MASK; |
476 | llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr); |
477 | break; |
478 | case SHDLC_CONTROL_HEAD_S: |
479 | if (shdlc->state == SHDLC_HALF_CONNECTED) |
480 | shdlc->state = SHDLC_CONNECTED; |
481 | |
482 | s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3; |
483 | nr = control & SHDLC_CONTROL_NR_MASK; |
484 | llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr); |
485 | kfree_skb(skb); |
486 | break; |
487 | case SHDLC_CONTROL_HEAD_U: |
488 | u_frame_modifier = control & SHDLC_CONTROL_M_MASK; |
489 | llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier); |
490 | break; |
491 | default: |
492 | pr_err("UNKNOWN Control=%d\n" , control); |
493 | kfree_skb(skb); |
494 | break; |
495 | } |
496 | } |
497 | } |
498 | |
499 | static int llc_shdlc_w_used(int ns, int dnr) |
500 | { |
501 | int unack_count; |
502 | |
503 | if (dnr <= ns) |
504 | unack_count = ns - dnr; |
505 | else |
506 | unack_count = 8 - dnr + ns; |
507 | |
508 | return unack_count; |
509 | } |
510 | |
511 | /* Send frames according to algorithm at spec:10.8.1 */ |
512 | static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc) |
513 | { |
514 | struct sk_buff *skb; |
515 | int r; |
516 | unsigned long time_sent; |
517 | |
518 | if (shdlc->send_q.qlen) |
519 | pr_debug("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n" , |
520 | shdlc->send_q.qlen, shdlc->ns, shdlc->dnr, |
521 | shdlc->rnr == false ? "false" : "true" , |
522 | shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr), |
523 | shdlc->ack_pending_q.qlen); |
524 | |
525 | while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w && |
526 | (shdlc->rnr == false)) { |
527 | |
528 | if (shdlc->t1_active) { |
529 | del_timer_sync(timer: &shdlc->t1_timer); |
530 | shdlc->t1_active = false; |
531 | pr_debug("Stopped T1(send ack)\n" ); |
532 | } |
533 | |
534 | skb = skb_dequeue(list: &shdlc->send_q); |
535 | |
536 | *(u8 *)skb_push(skb, len: 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) | |
537 | shdlc->nr; |
538 | |
539 | pr_debug("Sending I-Frame %d, waiting to rcv %d\n" , shdlc->ns, |
540 | shdlc->nr); |
541 | SHDLC_DUMP_SKB("shdlc frame written" , skb); |
542 | |
543 | r = shdlc->xmit_to_drv(shdlc->hdev, skb); |
544 | if (r < 0) { |
545 | shdlc->hard_fault = r; |
546 | break; |
547 | } |
548 | |
549 | shdlc->ns = (shdlc->ns + 1) % 8; |
550 | |
551 | time_sent = jiffies; |
552 | *(unsigned long *)skb->cb = time_sent; |
553 | |
554 | skb_queue_tail(list: &shdlc->ack_pending_q, newsk: skb); |
555 | |
556 | if (shdlc->t2_active == false) { |
557 | shdlc->t2_active = true; |
558 | mod_timer(timer: &shdlc->t2_timer, expires: time_sent + |
559 | msecs_to_jiffies(SHDLC_T2_VALUE_MS)); |
560 | pr_debug("Started T2 (retransmit)\n" ); |
561 | } |
562 | } |
563 | } |
564 | |
565 | static void llc_shdlc_connect_timeout(struct timer_list *t) |
566 | { |
567 | struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer); |
568 | |
569 | schedule_work(work: &shdlc->sm_work); |
570 | } |
571 | |
572 | static void llc_shdlc_t1_timeout(struct timer_list *t) |
573 | { |
574 | struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer); |
575 | |
576 | pr_debug("SoftIRQ: need to send ack\n" ); |
577 | |
578 | schedule_work(work: &shdlc->sm_work); |
579 | } |
580 | |
581 | static void llc_shdlc_t2_timeout(struct timer_list *t) |
582 | { |
583 | struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer); |
584 | |
585 | pr_debug("SoftIRQ: need to retransmit\n" ); |
586 | |
587 | schedule_work(work: &shdlc->sm_work); |
588 | } |
589 | |
590 | static void llc_shdlc_sm_work(struct work_struct *work) |
591 | { |
592 | struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work); |
593 | int r; |
594 | |
595 | mutex_lock(&shdlc->state_mutex); |
596 | |
597 | switch (shdlc->state) { |
598 | case SHDLC_DISCONNECTED: |
599 | skb_queue_purge(list: &shdlc->rcv_q); |
600 | skb_queue_purge(list: &shdlc->send_q); |
601 | skb_queue_purge(list: &shdlc->ack_pending_q); |
602 | break; |
603 | case SHDLC_CONNECTING: |
604 | if (shdlc->hard_fault) { |
605 | llc_shdlc_connect_complete(shdlc, r: shdlc->hard_fault); |
606 | break; |
607 | } |
608 | |
609 | if (shdlc->connect_tries++ < 5) |
610 | r = llc_shdlc_connect_initiate(shdlc); |
611 | else |
612 | r = -ETIME; |
613 | if (r < 0) { |
614 | llc_shdlc_connect_complete(shdlc, r); |
615 | } else { |
616 | mod_timer(timer: &shdlc->connect_timer, expires: jiffies + |
617 | msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS)); |
618 | |
619 | shdlc->state = SHDLC_NEGOTIATING; |
620 | } |
621 | break; |
622 | case SHDLC_NEGOTIATING: |
623 | if (timer_pending(timer: &shdlc->connect_timer) == 0) { |
624 | shdlc->state = SHDLC_CONNECTING; |
625 | schedule_work(work: &shdlc->sm_work); |
626 | } |
627 | |
628 | llc_shdlc_handle_rcv_queue(shdlc); |
629 | |
630 | if (shdlc->hard_fault) { |
631 | llc_shdlc_connect_complete(shdlc, r: shdlc->hard_fault); |
632 | break; |
633 | } |
634 | break; |
635 | case SHDLC_HALF_CONNECTED: |
636 | case SHDLC_CONNECTED: |
637 | llc_shdlc_handle_rcv_queue(shdlc); |
638 | llc_shdlc_handle_send_queue(shdlc); |
639 | |
640 | if (shdlc->t1_active && timer_pending(timer: &shdlc->t1_timer) == 0) { |
641 | pr_debug("Handle T1(send ack) elapsed (T1 now inactive)\n" ); |
642 | |
643 | shdlc->t1_active = false; |
644 | r = llc_shdlc_send_s_frame(shdlc, sframe_type: S_FRAME_RR, |
645 | nr: shdlc->nr); |
646 | if (r < 0) |
647 | shdlc->hard_fault = r; |
648 | } |
649 | |
650 | if (shdlc->t2_active && timer_pending(timer: &shdlc->t2_timer) == 0) { |
651 | pr_debug("Handle T2(retransmit) elapsed (T2 inactive)\n" ); |
652 | |
653 | shdlc->t2_active = false; |
654 | |
655 | llc_shdlc_requeue_ack_pending(shdlc); |
656 | llc_shdlc_handle_send_queue(shdlc); |
657 | } |
658 | |
659 | if (shdlc->hard_fault) |
660 | shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault); |
661 | break; |
662 | default: |
663 | break; |
664 | } |
665 | mutex_unlock(lock: &shdlc->state_mutex); |
666 | } |
667 | |
668 | /* |
669 | * Called from syscall context to establish shdlc link. Sleeps until |
670 | * link is ready or failure. |
671 | */ |
672 | static int llc_shdlc_connect(struct llc_shdlc *shdlc) |
673 | { |
674 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq); |
675 | |
676 | mutex_lock(&shdlc->state_mutex); |
677 | |
678 | shdlc->state = SHDLC_CONNECTING; |
679 | shdlc->connect_wq = &connect_wq; |
680 | shdlc->connect_tries = 0; |
681 | shdlc->connect_result = 1; |
682 | |
683 | mutex_unlock(lock: &shdlc->state_mutex); |
684 | |
685 | schedule_work(work: &shdlc->sm_work); |
686 | |
687 | wait_event(connect_wq, shdlc->connect_result != 1); |
688 | |
689 | return shdlc->connect_result; |
690 | } |
691 | |
692 | static void llc_shdlc_disconnect(struct llc_shdlc *shdlc) |
693 | { |
694 | mutex_lock(&shdlc->state_mutex); |
695 | |
696 | shdlc->state = SHDLC_DISCONNECTED; |
697 | |
698 | mutex_unlock(lock: &shdlc->state_mutex); |
699 | |
700 | schedule_work(work: &shdlc->sm_work); |
701 | } |
702 | |
703 | /* |
704 | * Receive an incoming shdlc frame. Frame has already been crc-validated. |
705 | * skb contains only LLC header and payload. |
706 | * If skb == NULL, it is a notification that the link below is dead. |
707 | */ |
708 | static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb) |
709 | { |
710 | if (skb == NULL) { |
711 | pr_err("NULL Frame -> link is dead\n" ); |
712 | shdlc->hard_fault = -EREMOTEIO; |
713 | } else { |
714 | SHDLC_DUMP_SKB("incoming frame" , skb); |
715 | skb_queue_tail(list: &shdlc->rcv_q, newsk: skb); |
716 | } |
717 | |
718 | schedule_work(work: &shdlc->sm_work); |
719 | } |
720 | |
721 | static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv, |
722 | rcv_to_hci_t rcv_to_hci, int tx_headroom, |
723 | int tx_tailroom, int *rx_headroom, int *rx_tailroom, |
724 | llc_failure_t llc_failure) |
725 | { |
726 | struct llc_shdlc *shdlc; |
727 | |
728 | *rx_headroom = SHDLC_LLC_HEAD_ROOM; |
729 | *rx_tailroom = 0; |
730 | |
731 | shdlc = kzalloc(size: sizeof(struct llc_shdlc), GFP_KERNEL); |
732 | if (shdlc == NULL) |
733 | return NULL; |
734 | |
735 | mutex_init(&shdlc->state_mutex); |
736 | shdlc->state = SHDLC_DISCONNECTED; |
737 | |
738 | timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0); |
739 | timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0); |
740 | timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0); |
741 | |
742 | shdlc->w = SHDLC_MAX_WINDOW; |
743 | shdlc->srej_support = SHDLC_SREJ_SUPPORT; |
744 | |
745 | skb_queue_head_init(list: &shdlc->rcv_q); |
746 | skb_queue_head_init(list: &shdlc->send_q); |
747 | skb_queue_head_init(list: &shdlc->ack_pending_q); |
748 | |
749 | INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work); |
750 | |
751 | shdlc->hdev = hdev; |
752 | shdlc->xmit_to_drv = xmit_to_drv; |
753 | shdlc->rcv_to_hci = rcv_to_hci; |
754 | shdlc->tx_headroom = tx_headroom; |
755 | shdlc->tx_tailroom = tx_tailroom; |
756 | shdlc->llc_failure = llc_failure; |
757 | |
758 | return shdlc; |
759 | } |
760 | |
761 | static void llc_shdlc_deinit(struct nfc_llc *llc) |
762 | { |
763 | struct llc_shdlc *shdlc = nfc_llc_get_data(llc); |
764 | |
765 | skb_queue_purge(list: &shdlc->rcv_q); |
766 | skb_queue_purge(list: &shdlc->send_q); |
767 | skb_queue_purge(list: &shdlc->ack_pending_q); |
768 | |
769 | kfree(objp: shdlc); |
770 | } |
771 | |
772 | static int llc_shdlc_start(struct nfc_llc *llc) |
773 | { |
774 | struct llc_shdlc *shdlc = nfc_llc_get_data(llc); |
775 | |
776 | return llc_shdlc_connect(shdlc); |
777 | } |
778 | |
779 | static int llc_shdlc_stop(struct nfc_llc *llc) |
780 | { |
781 | struct llc_shdlc *shdlc = nfc_llc_get_data(llc); |
782 | |
783 | llc_shdlc_disconnect(shdlc); |
784 | |
785 | return 0; |
786 | } |
787 | |
788 | static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb) |
789 | { |
790 | struct llc_shdlc *shdlc = nfc_llc_get_data(llc); |
791 | |
792 | llc_shdlc_recv_frame(shdlc, skb); |
793 | } |
794 | |
795 | static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb) |
796 | { |
797 | struct llc_shdlc *shdlc = nfc_llc_get_data(llc); |
798 | |
799 | skb_queue_tail(list: &shdlc->send_q, newsk: skb); |
800 | |
801 | schedule_work(work: &shdlc->sm_work); |
802 | |
803 | return 0; |
804 | } |
805 | |
806 | static const struct nfc_llc_ops llc_shdlc_ops = { |
807 | .init = llc_shdlc_init, |
808 | .deinit = llc_shdlc_deinit, |
809 | .start = llc_shdlc_start, |
810 | .stop = llc_shdlc_stop, |
811 | .rcv_from_drv = llc_shdlc_rcv_from_drv, |
812 | .xmit_from_hci = llc_shdlc_xmit_from_hci, |
813 | }; |
814 | |
815 | int nfc_llc_shdlc_register(void) |
816 | { |
817 | return nfc_llc_register(LLC_SHDLC_NAME, ops: &llc_shdlc_ops); |
818 | } |
819 | |