1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack |
4 | * |
5 | * Copyright (C) 2003-2005,2008 David Brownell |
6 | * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger |
7 | * Copyright (C) 2008 Nokia Corporation |
8 | */ |
9 | |
10 | /* #define VERBOSE_DEBUG */ |
11 | |
12 | #include <linux/kernel.h> |
13 | #include <linux/module.h> |
14 | #include <linux/gfp.h> |
15 | #include <linux/device.h> |
16 | #include <linux/ctype.h> |
17 | #include <linux/etherdevice.h> |
18 | #include <linux/ethtool.h> |
19 | #include <linux/if_vlan.h> |
20 | #include <linux/string_helpers.h> |
21 | #include <linux/usb/composite.h> |
22 | |
23 | #include "u_ether.h" |
24 | |
25 | |
26 | /* |
27 | * This component encapsulates the Ethernet link glue needed to provide |
28 | * one (!) network link through the USB gadget stack, normally "usb0". |
29 | * |
30 | * The control and data models are handled by the function driver which |
31 | * connects to this code; such as CDC Ethernet (ECM or EEM), |
32 | * "CDC Subset", or RNDIS. That includes all descriptor and endpoint |
33 | * management. |
34 | * |
35 | * Link level addressing is handled by this component using module |
36 | * parameters; if no such parameters are provided, random link level |
37 | * addresses are used. Each end of the link uses one address. The |
38 | * host end address is exported in various ways, and is often recorded |
39 | * in configuration databases. |
40 | * |
41 | * The driver which assembles each configuration using such a link is |
42 | * responsible for ensuring that each configuration includes at most one |
43 | * instance of is network link. (The network layer provides ways for |
44 | * this single "physical" link to be used by multiple virtual links.) |
45 | */ |
46 | |
47 | #define UETH__VERSION "29-May-2008" |
48 | |
49 | /* Experiments show that both Linux and Windows hosts allow up to 16k |
50 | * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k |
51 | * blocks and still have efficient handling. */ |
52 | #define GETHER_MAX_MTU_SIZE 15412 |
53 | #define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN) |
54 | |
55 | struct eth_dev { |
56 | /* lock is held while accessing port_usb |
57 | */ |
58 | spinlock_t lock; |
59 | struct gether *port_usb; |
60 | |
61 | struct net_device *net; |
62 | struct usb_gadget *gadget; |
63 | |
64 | spinlock_t req_lock; /* guard {rx,tx}_reqs */ |
65 | struct list_head tx_reqs, rx_reqs; |
66 | atomic_t tx_qlen; |
67 | |
68 | struct sk_buff_head rx_frames; |
69 | |
70 | unsigned qmult; |
71 | |
72 | unsigned ; |
73 | struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); |
74 | int (*unwrap)(struct gether *, |
75 | struct sk_buff *skb, |
76 | struct sk_buff_head *list); |
77 | |
78 | struct work_struct work; |
79 | |
80 | unsigned long todo; |
81 | #define WORK_RX_MEMORY 0 |
82 | |
83 | bool zlp; |
84 | bool no_skb_reserve; |
85 | bool ifname_set; |
86 | u8 host_mac[ETH_ALEN]; |
87 | u8 dev_mac[ETH_ALEN]; |
88 | }; |
89 | |
90 | /*-------------------------------------------------------------------------*/ |
91 | |
92 | #define 20 /* bytes guarding against rx overflows */ |
93 | |
94 | #define DEFAULT_QLEN 2 /* double buffering by default */ |
95 | |
96 | /* use deeper queues at high/super speed */ |
97 | static inline int qlen(struct usb_gadget *gadget, unsigned qmult) |
98 | { |
99 | if (gadget->speed == USB_SPEED_HIGH || gadget->speed >= USB_SPEED_SUPER) |
100 | return qmult * DEFAULT_QLEN; |
101 | else |
102 | return DEFAULT_QLEN; |
103 | } |
104 | |
105 | /*-------------------------------------------------------------------------*/ |
106 | |
107 | /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ |
108 | |
109 | static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) |
110 | { |
111 | struct eth_dev *dev = netdev_priv(dev: net); |
112 | |
113 | strscpy(p->driver, "g_ether" , sizeof(p->driver)); |
114 | strscpy(p->version, UETH__VERSION, sizeof(p->version)); |
115 | strscpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); |
116 | strscpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); |
117 | } |
118 | |
119 | /* REVISIT can also support: |
120 | * - WOL (by tracking suspends and issuing remote wakeup) |
121 | * - msglevel (implies updated messaging) |
122 | * - ... probably more ethtool ops |
123 | */ |
124 | |
125 | static const struct ethtool_ops ops = { |
126 | .get_drvinfo = eth_get_drvinfo, |
127 | .get_link = ethtool_op_get_link, |
128 | }; |
129 | |
130 | static void defer_kevent(struct eth_dev *dev, int flag) |
131 | { |
132 | if (test_and_set_bit(nr: flag, addr: &dev->todo)) |
133 | return; |
134 | if (!schedule_work(work: &dev->work)) |
135 | ERROR(dev, "kevent %d may have been dropped\n" , flag); |
136 | else |
137 | DBG(dev, "kevent %d scheduled\n" , flag); |
138 | } |
139 | |
140 | static void rx_complete(struct usb_ep *ep, struct usb_request *req); |
141 | |
142 | static int |
143 | rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) |
144 | { |
145 | struct usb_gadget *g = dev->gadget; |
146 | struct sk_buff *skb; |
147 | int retval = -ENOMEM; |
148 | size_t size = 0; |
149 | struct usb_ep *out; |
150 | unsigned long flags; |
151 | |
152 | spin_lock_irqsave(&dev->lock, flags); |
153 | if (dev->port_usb) |
154 | out = dev->port_usb->out_ep; |
155 | else |
156 | out = NULL; |
157 | |
158 | if (!out) |
159 | { |
160 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
161 | return -ENOTCONN; |
162 | } |
163 | |
164 | /* Padding up to RX_EXTRA handles minor disagreements with host. |
165 | * Normally we use the USB "terminate on short read" convention; |
166 | * so allow up to (N*maxpacket), since that memory is normally |
167 | * already allocated. Some hardware doesn't deal well with short |
168 | * reads (e.g. DMA must be N*maxpacket), so for now don't trim a |
169 | * byte off the end (to force hardware errors on overflow). |
170 | * |
171 | * RNDIS uses internal framing, and explicitly allows senders to |
172 | * pad to end-of-packet. That's potentially nice for speed, but |
173 | * means receivers can't recover lost synch on their own (because |
174 | * new packets don't only start after a short RX). |
175 | */ |
176 | size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; |
177 | size += dev->port_usb->header_len; |
178 | |
179 | if (g->quirk_ep_out_aligned_size) { |
180 | size += out->maxpacket - 1; |
181 | size -= size % out->maxpacket; |
182 | } |
183 | |
184 | if (dev->port_usb->is_fixed) |
185 | size = max_t(size_t, size, dev->port_usb->fixed_out_len); |
186 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
187 | |
188 | skb = __netdev_alloc_skb(dev: dev->net, length: size + NET_IP_ALIGN, gfp_mask: gfp_flags); |
189 | if (skb == NULL) { |
190 | DBG(dev, "no rx skb\n" ); |
191 | goto enomem; |
192 | } |
193 | |
194 | /* Some platforms perform better when IP packets are aligned, |
195 | * but on at least one, checksumming fails otherwise. Note: |
196 | * RNDIS headers involve variable numbers of LE32 values. |
197 | */ |
198 | if (likely(!dev->no_skb_reserve)) |
199 | skb_reserve(skb, NET_IP_ALIGN); |
200 | |
201 | req->buf = skb->data; |
202 | req->length = size; |
203 | req->complete = rx_complete; |
204 | req->context = skb; |
205 | |
206 | retval = usb_ep_queue(ep: out, req, gfp_flags); |
207 | if (retval == -ENOMEM) |
208 | enomem: |
209 | defer_kevent(dev, WORK_RX_MEMORY); |
210 | if (retval) { |
211 | DBG(dev, "rx submit --> %d\n" , retval); |
212 | if (skb) |
213 | dev_kfree_skb_any(skb); |
214 | spin_lock_irqsave(&dev->req_lock, flags); |
215 | list_add(new: &req->list, head: &dev->rx_reqs); |
216 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
217 | } |
218 | return retval; |
219 | } |
220 | |
221 | static void rx_complete(struct usb_ep *ep, struct usb_request *req) |
222 | { |
223 | struct sk_buff *skb = req->context, *skb2; |
224 | struct eth_dev *dev = ep->driver_data; |
225 | int status = req->status; |
226 | |
227 | switch (status) { |
228 | |
229 | /* normal completion */ |
230 | case 0: |
231 | skb_put(skb, len: req->actual); |
232 | |
233 | if (dev->unwrap) { |
234 | unsigned long flags; |
235 | |
236 | spin_lock_irqsave(&dev->lock, flags); |
237 | if (dev->port_usb) { |
238 | status = dev->unwrap(dev->port_usb, |
239 | skb, |
240 | &dev->rx_frames); |
241 | } else { |
242 | dev_kfree_skb_any(skb); |
243 | status = -ENOTCONN; |
244 | } |
245 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
246 | } else { |
247 | skb_queue_tail(list: &dev->rx_frames, newsk: skb); |
248 | } |
249 | skb = NULL; |
250 | |
251 | skb2 = skb_dequeue(list: &dev->rx_frames); |
252 | while (skb2) { |
253 | if (status < 0 |
254 | || ETH_HLEN > skb2->len |
255 | || skb2->len > GETHER_MAX_ETH_FRAME_LEN) { |
256 | dev->net->stats.rx_errors++; |
257 | dev->net->stats.rx_length_errors++; |
258 | DBG(dev, "rx length %d\n" , skb2->len); |
259 | dev_kfree_skb_any(skb: skb2); |
260 | goto next_frame; |
261 | } |
262 | skb2->protocol = eth_type_trans(skb: skb2, dev: dev->net); |
263 | dev->net->stats.rx_packets++; |
264 | dev->net->stats.rx_bytes += skb2->len; |
265 | |
266 | /* no buffer copies needed, unless hardware can't |
267 | * use skb buffers. |
268 | */ |
269 | status = netif_rx(skb: skb2); |
270 | next_frame: |
271 | skb2 = skb_dequeue(list: &dev->rx_frames); |
272 | } |
273 | break; |
274 | |
275 | /* software-driven interface shutdown */ |
276 | case -ECONNRESET: /* unlink */ |
277 | case -ESHUTDOWN: /* disconnect etc */ |
278 | VDBG(dev, "rx shutdown, code %d\n" , status); |
279 | goto quiesce; |
280 | |
281 | /* for hardware automagic (such as pxa) */ |
282 | case -ECONNABORTED: /* endpoint reset */ |
283 | DBG(dev, "rx %s reset\n" , ep->name); |
284 | defer_kevent(dev, WORK_RX_MEMORY); |
285 | quiesce: |
286 | dev_kfree_skb_any(skb); |
287 | goto clean; |
288 | |
289 | /* data overrun */ |
290 | case -EOVERFLOW: |
291 | dev->net->stats.rx_over_errors++; |
292 | fallthrough; |
293 | |
294 | default: |
295 | dev->net->stats.rx_errors++; |
296 | DBG(dev, "rx status %d\n" , status); |
297 | break; |
298 | } |
299 | |
300 | if (skb) |
301 | dev_kfree_skb_any(skb); |
302 | if (!netif_running(dev: dev->net)) { |
303 | clean: |
304 | spin_lock(lock: &dev->req_lock); |
305 | list_add(new: &req->list, head: &dev->rx_reqs); |
306 | spin_unlock(lock: &dev->req_lock); |
307 | req = NULL; |
308 | } |
309 | if (req) |
310 | rx_submit(dev, req, GFP_ATOMIC); |
311 | } |
312 | |
313 | static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) |
314 | { |
315 | unsigned i; |
316 | struct usb_request *req; |
317 | |
318 | if (!n) |
319 | return -ENOMEM; |
320 | |
321 | /* queue/recycle up to N requests */ |
322 | i = n; |
323 | list_for_each_entry(req, list, list) { |
324 | if (i-- == 0) |
325 | goto extra; |
326 | } |
327 | while (i--) { |
328 | req = usb_ep_alloc_request(ep, GFP_ATOMIC); |
329 | if (!req) |
330 | return list_empty(head: list) ? -ENOMEM : 0; |
331 | list_add(new: &req->list, head: list); |
332 | } |
333 | return 0; |
334 | |
335 | : |
336 | /* free extras */ |
337 | for (;;) { |
338 | struct list_head *next; |
339 | |
340 | next = req->list.next; |
341 | list_del(entry: &req->list); |
342 | usb_ep_free_request(ep, req); |
343 | |
344 | if (next == list) |
345 | break; |
346 | |
347 | req = container_of(next, struct usb_request, list); |
348 | } |
349 | return 0; |
350 | } |
351 | |
352 | static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) |
353 | { |
354 | int status; |
355 | |
356 | spin_lock(lock: &dev->req_lock); |
357 | status = prealloc(list: &dev->tx_reqs, ep: link->in_ep, n); |
358 | if (status < 0) |
359 | goto fail; |
360 | status = prealloc(list: &dev->rx_reqs, ep: link->out_ep, n); |
361 | if (status < 0) |
362 | goto fail; |
363 | goto done; |
364 | fail: |
365 | DBG(dev, "can't alloc requests\n" ); |
366 | done: |
367 | spin_unlock(lock: &dev->req_lock); |
368 | return status; |
369 | } |
370 | |
371 | static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) |
372 | { |
373 | struct usb_request *req; |
374 | unsigned long flags; |
375 | |
376 | /* fill unused rxq slots with some skb */ |
377 | spin_lock_irqsave(&dev->req_lock, flags); |
378 | while (!list_empty(head: &dev->rx_reqs)) { |
379 | req = list_first_entry(&dev->rx_reqs, struct usb_request, list); |
380 | list_del_init(entry: &req->list); |
381 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
382 | |
383 | if (rx_submit(dev, req, gfp_flags) < 0) { |
384 | defer_kevent(dev, WORK_RX_MEMORY); |
385 | return; |
386 | } |
387 | |
388 | spin_lock_irqsave(&dev->req_lock, flags); |
389 | } |
390 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
391 | } |
392 | |
393 | static void eth_work(struct work_struct *work) |
394 | { |
395 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
396 | |
397 | if (test_and_clear_bit(WORK_RX_MEMORY, addr: &dev->todo)) { |
398 | if (netif_running(dev: dev->net)) |
399 | rx_fill(dev, GFP_KERNEL); |
400 | } |
401 | |
402 | if (dev->todo) |
403 | DBG(dev, "work done, flags = 0x%lx\n" , dev->todo); |
404 | } |
405 | |
406 | static void tx_complete(struct usb_ep *ep, struct usb_request *req) |
407 | { |
408 | struct sk_buff *skb = req->context; |
409 | struct eth_dev *dev = ep->driver_data; |
410 | |
411 | switch (req->status) { |
412 | default: |
413 | dev->net->stats.tx_errors++; |
414 | VDBG(dev, "tx err %d\n" , req->status); |
415 | fallthrough; |
416 | case -ECONNRESET: /* unlink */ |
417 | case -ESHUTDOWN: /* disconnect etc */ |
418 | dev_kfree_skb_any(skb); |
419 | break; |
420 | case 0: |
421 | dev->net->stats.tx_bytes += skb->len; |
422 | dev_consume_skb_any(skb); |
423 | } |
424 | dev->net->stats.tx_packets++; |
425 | |
426 | spin_lock(lock: &dev->req_lock); |
427 | list_add(new: &req->list, head: &dev->tx_reqs); |
428 | spin_unlock(lock: &dev->req_lock); |
429 | |
430 | atomic_dec(v: &dev->tx_qlen); |
431 | if (netif_carrier_ok(dev: dev->net)) |
432 | netif_wake_queue(dev: dev->net); |
433 | } |
434 | |
435 | static inline int is_promisc(u16 cdc_filter) |
436 | { |
437 | return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; |
438 | } |
439 | |
440 | static int ether_wakeup_host(struct gether *port) |
441 | { |
442 | int ret; |
443 | struct usb_function *func = &port->func; |
444 | struct usb_gadget *gadget = func->config->cdev->gadget; |
445 | |
446 | if (func->func_suspended) |
447 | ret = usb_func_wakeup(func); |
448 | else |
449 | ret = usb_gadget_wakeup(gadget); |
450 | |
451 | return ret; |
452 | } |
453 | |
454 | static netdev_tx_t eth_start_xmit(struct sk_buff *skb, |
455 | struct net_device *net) |
456 | { |
457 | struct eth_dev *dev = netdev_priv(dev: net); |
458 | int length = 0; |
459 | int retval; |
460 | struct usb_request *req = NULL; |
461 | unsigned long flags; |
462 | struct usb_ep *in; |
463 | u16 cdc_filter; |
464 | |
465 | spin_lock_irqsave(&dev->lock, flags); |
466 | if (dev->port_usb) { |
467 | in = dev->port_usb->in_ep; |
468 | cdc_filter = dev->port_usb->cdc_filter; |
469 | } else { |
470 | in = NULL; |
471 | cdc_filter = 0; |
472 | } |
473 | |
474 | if (dev->port_usb && dev->port_usb->is_suspend) { |
475 | DBG(dev, "Port suspended. Triggering wakeup\n" ); |
476 | netif_stop_queue(dev: net); |
477 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
478 | ether_wakeup_host(port: dev->port_usb); |
479 | return NETDEV_TX_BUSY; |
480 | } |
481 | |
482 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
483 | |
484 | if (!in) { |
485 | if (skb) |
486 | dev_kfree_skb_any(skb); |
487 | return NETDEV_TX_OK; |
488 | } |
489 | |
490 | /* apply outgoing CDC or RNDIS filters */ |
491 | if (skb && !is_promisc(cdc_filter)) { |
492 | u8 *dest = skb->data; |
493 | |
494 | if (is_multicast_ether_addr(addr: dest)) { |
495 | u16 type; |
496 | |
497 | /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host |
498 | * SET_ETHERNET_MULTICAST_FILTERS requests |
499 | */ |
500 | if (is_broadcast_ether_addr(addr: dest)) |
501 | type = USB_CDC_PACKET_TYPE_BROADCAST; |
502 | else |
503 | type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; |
504 | if (!(cdc_filter & type)) { |
505 | dev_kfree_skb_any(skb); |
506 | return NETDEV_TX_OK; |
507 | } |
508 | } |
509 | /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ |
510 | } |
511 | |
512 | spin_lock_irqsave(&dev->req_lock, flags); |
513 | /* |
514 | * this freelist can be empty if an interrupt triggered disconnect() |
515 | * and reconfigured the gadget (shutting down this queue) after the |
516 | * network stack decided to xmit but before we got the spinlock. |
517 | */ |
518 | if (list_empty(head: &dev->tx_reqs)) { |
519 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
520 | return NETDEV_TX_BUSY; |
521 | } |
522 | |
523 | req = list_first_entry(&dev->tx_reqs, struct usb_request, list); |
524 | list_del(entry: &req->list); |
525 | |
526 | /* temporarily stop TX queue when the freelist empties */ |
527 | if (list_empty(head: &dev->tx_reqs)) |
528 | netif_stop_queue(dev: net); |
529 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
530 | |
531 | /* no buffer copies needed, unless the network stack did it |
532 | * or the hardware can't use skb buffers. |
533 | * or there's not enough space for extra headers we need |
534 | */ |
535 | if (dev->wrap) { |
536 | unsigned long flags; |
537 | |
538 | spin_lock_irqsave(&dev->lock, flags); |
539 | if (dev->port_usb) |
540 | skb = dev->wrap(dev->port_usb, skb); |
541 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
542 | if (!skb) { |
543 | /* Multi frame CDC protocols may store the frame for |
544 | * later which is not a dropped frame. |
545 | */ |
546 | if (dev->port_usb && |
547 | dev->port_usb->supports_multi_frame) |
548 | goto multiframe; |
549 | goto drop; |
550 | } |
551 | } |
552 | |
553 | length = skb->len; |
554 | req->buf = skb->data; |
555 | req->context = skb; |
556 | req->complete = tx_complete; |
557 | |
558 | /* NCM requires no zlp if transfer is dwNtbInMaxSize */ |
559 | if (dev->port_usb && |
560 | dev->port_usb->is_fixed && |
561 | length == dev->port_usb->fixed_in_len && |
562 | (length % in->maxpacket) == 0) |
563 | req->zero = 0; |
564 | else |
565 | req->zero = 1; |
566 | |
567 | /* use zlp framing on tx for strict CDC-Ether conformance, |
568 | * though any robust network rx path ignores extra padding. |
569 | * and some hardware doesn't like to write zlps. |
570 | */ |
571 | if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) |
572 | length++; |
573 | |
574 | req->length = length; |
575 | |
576 | retval = usb_ep_queue(ep: in, req, GFP_ATOMIC); |
577 | switch (retval) { |
578 | default: |
579 | DBG(dev, "tx queue err %d\n" , retval); |
580 | break; |
581 | case 0: |
582 | netif_trans_update(dev: net); |
583 | atomic_inc(v: &dev->tx_qlen); |
584 | } |
585 | |
586 | if (retval) { |
587 | dev_kfree_skb_any(skb); |
588 | drop: |
589 | dev->net->stats.tx_dropped++; |
590 | multiframe: |
591 | spin_lock_irqsave(&dev->req_lock, flags); |
592 | if (list_empty(head: &dev->tx_reqs)) |
593 | netif_start_queue(dev: net); |
594 | list_add(new: &req->list, head: &dev->tx_reqs); |
595 | spin_unlock_irqrestore(lock: &dev->req_lock, flags); |
596 | } |
597 | return NETDEV_TX_OK; |
598 | } |
599 | |
600 | /*-------------------------------------------------------------------------*/ |
601 | |
602 | static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) |
603 | { |
604 | DBG(dev, "%s\n" , __func__); |
605 | |
606 | /* fill the rx queue */ |
607 | rx_fill(dev, gfp_flags); |
608 | |
609 | /* and open the tx floodgates */ |
610 | atomic_set(v: &dev->tx_qlen, i: 0); |
611 | netif_wake_queue(dev: dev->net); |
612 | } |
613 | |
614 | static int eth_open(struct net_device *net) |
615 | { |
616 | struct eth_dev *dev = netdev_priv(dev: net); |
617 | struct gether *link; |
618 | |
619 | DBG(dev, "%s\n" , __func__); |
620 | if (netif_carrier_ok(dev: dev->net)) |
621 | eth_start(dev, GFP_KERNEL); |
622 | |
623 | spin_lock_irq(lock: &dev->lock); |
624 | link = dev->port_usb; |
625 | if (link && link->open) |
626 | link->open(link); |
627 | spin_unlock_irq(lock: &dev->lock); |
628 | |
629 | return 0; |
630 | } |
631 | |
632 | static int eth_stop(struct net_device *net) |
633 | { |
634 | struct eth_dev *dev = netdev_priv(dev: net); |
635 | unsigned long flags; |
636 | |
637 | VDBG(dev, "%s\n" , __func__); |
638 | netif_stop_queue(dev: net); |
639 | |
640 | DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n" , |
641 | dev->net->stats.rx_packets, dev->net->stats.tx_packets, |
642 | dev->net->stats.rx_errors, dev->net->stats.tx_errors |
643 | ); |
644 | |
645 | /* ensure there are no more active requests */ |
646 | spin_lock_irqsave(&dev->lock, flags); |
647 | if (dev->port_usb) { |
648 | struct gether *link = dev->port_usb; |
649 | const struct usb_endpoint_descriptor *in; |
650 | const struct usb_endpoint_descriptor *out; |
651 | |
652 | if (link->close) |
653 | link->close(link); |
654 | |
655 | /* NOTE: we have no abort-queue primitive we could use |
656 | * to cancel all pending I/O. Instead, we disable then |
657 | * reenable the endpoints ... this idiom may leave toggle |
658 | * wrong, but that's a self-correcting error. |
659 | * |
660 | * REVISIT: we *COULD* just let the transfers complete at |
661 | * their own pace; the network stack can handle old packets. |
662 | * For the moment we leave this here, since it works. |
663 | */ |
664 | in = link->in_ep->desc; |
665 | out = link->out_ep->desc; |
666 | usb_ep_disable(ep: link->in_ep); |
667 | usb_ep_disable(ep: link->out_ep); |
668 | if (netif_carrier_ok(dev: net)) { |
669 | DBG(dev, "host still using in/out endpoints\n" ); |
670 | link->in_ep->desc = in; |
671 | link->out_ep->desc = out; |
672 | usb_ep_enable(ep: link->in_ep); |
673 | usb_ep_enable(ep: link->out_ep); |
674 | } |
675 | } |
676 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
677 | |
678 | return 0; |
679 | } |
680 | |
681 | /*-------------------------------------------------------------------------*/ |
682 | |
683 | static int get_ether_addr(const char *str, u8 *dev_addr) |
684 | { |
685 | if (str) { |
686 | unsigned i; |
687 | |
688 | for (i = 0; i < 6; i++) { |
689 | unsigned char num; |
690 | |
691 | if ((*str == '.') || (*str == ':')) |
692 | str++; |
693 | num = hex_to_bin(ch: *str++) << 4; |
694 | num |= hex_to_bin(ch: *str++); |
695 | dev_addr [i] = num; |
696 | } |
697 | if (is_valid_ether_addr(addr: dev_addr)) |
698 | return 0; |
699 | } |
700 | eth_random_addr(addr: dev_addr); |
701 | return 1; |
702 | } |
703 | |
704 | static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) |
705 | { |
706 | if (len < 18) |
707 | return -EINVAL; |
708 | |
709 | snprintf(buf: str, size: len, fmt: "%pM" , dev_addr); |
710 | return 18; |
711 | } |
712 | |
713 | static const struct net_device_ops eth_netdev_ops = { |
714 | .ndo_open = eth_open, |
715 | .ndo_stop = eth_stop, |
716 | .ndo_start_xmit = eth_start_xmit, |
717 | .ndo_set_mac_address = eth_mac_addr, |
718 | .ndo_validate_addr = eth_validate_addr, |
719 | }; |
720 | |
721 | static const struct device_type gadget_type = { |
722 | .name = "gadget" , |
723 | }; |
724 | |
725 | /* |
726 | * gether_setup_name - initialize one ethernet-over-usb link |
727 | * @g: gadget to associated with these links |
728 | * @ethaddr: NULL, or a buffer in which the ethernet address of the |
729 | * host side of the link is recorded |
730 | * @netname: name for network device (for example, "usb") |
731 | * Context: may sleep |
732 | * |
733 | * This sets up the single network link that may be exported by a |
734 | * gadget driver using this framework. The link layer addresses are |
735 | * set up using module parameters. |
736 | * |
737 | * Returns an eth_dev pointer on success, or an ERR_PTR on failure. |
738 | */ |
739 | struct eth_dev *gether_setup_name(struct usb_gadget *g, |
740 | const char *dev_addr, const char *host_addr, |
741 | u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) |
742 | { |
743 | struct eth_dev *dev; |
744 | struct net_device *net; |
745 | int status; |
746 | u8 addr[ETH_ALEN]; |
747 | |
748 | net = alloc_etherdev(sizeof *dev); |
749 | if (!net) |
750 | return ERR_PTR(error: -ENOMEM); |
751 | |
752 | dev = netdev_priv(dev: net); |
753 | spin_lock_init(&dev->lock); |
754 | spin_lock_init(&dev->req_lock); |
755 | INIT_WORK(&dev->work, eth_work); |
756 | INIT_LIST_HEAD(list: &dev->tx_reqs); |
757 | INIT_LIST_HEAD(list: &dev->rx_reqs); |
758 | |
759 | skb_queue_head_init(list: &dev->rx_frames); |
760 | |
761 | /* network device setup */ |
762 | dev->net = net; |
763 | dev->qmult = qmult; |
764 | snprintf(buf: net->name, size: sizeof(net->name), fmt: "%s%%d" , netname); |
765 | |
766 | if (get_ether_addr(str: dev_addr, dev_addr: addr)) { |
767 | net->addr_assign_type = NET_ADDR_RANDOM; |
768 | dev_warn(&g->dev, |
769 | "using random %s ethernet address\n" , "self" ); |
770 | } else { |
771 | net->addr_assign_type = NET_ADDR_SET; |
772 | } |
773 | eth_hw_addr_set(dev: net, addr); |
774 | if (get_ether_addr(str: host_addr, dev_addr: dev->host_mac)) |
775 | dev_warn(&g->dev, |
776 | "using random %s ethernet address\n" , "host" ); |
777 | |
778 | if (ethaddr) |
779 | memcpy(ethaddr, dev->host_mac, ETH_ALEN); |
780 | |
781 | net->netdev_ops = ð_netdev_ops; |
782 | |
783 | net->ethtool_ops = &ops; |
784 | |
785 | /* MTU range: 14 - 15412 */ |
786 | net->min_mtu = ETH_HLEN; |
787 | net->max_mtu = GETHER_MAX_MTU_SIZE; |
788 | |
789 | dev->gadget = g; |
790 | SET_NETDEV_DEV(net, &g->dev); |
791 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
792 | |
793 | status = register_netdev(dev: net); |
794 | if (status < 0) { |
795 | dev_dbg(&g->dev, "register_netdev failed, %d\n" , status); |
796 | free_netdev(dev: net); |
797 | dev = ERR_PTR(error: status); |
798 | } else { |
799 | INFO(dev, "MAC %pM\n" , net->dev_addr); |
800 | INFO(dev, "HOST MAC %pM\n" , dev->host_mac); |
801 | |
802 | /* |
803 | * two kinds of host-initiated state changes: |
804 | * - iff DATA transfer is active, carrier is "on" |
805 | * - tx queueing enabled if open *and* carrier is "on" |
806 | */ |
807 | netif_carrier_off(dev: net); |
808 | } |
809 | |
810 | return dev; |
811 | } |
812 | EXPORT_SYMBOL_GPL(gether_setup_name); |
813 | |
814 | struct net_device *gether_setup_name_default(const char *netname) |
815 | { |
816 | struct net_device *net; |
817 | struct eth_dev *dev; |
818 | |
819 | net = alloc_etherdev(sizeof(*dev)); |
820 | if (!net) |
821 | return ERR_PTR(error: -ENOMEM); |
822 | |
823 | dev = netdev_priv(dev: net); |
824 | spin_lock_init(&dev->lock); |
825 | spin_lock_init(&dev->req_lock); |
826 | INIT_WORK(&dev->work, eth_work); |
827 | INIT_LIST_HEAD(list: &dev->tx_reqs); |
828 | INIT_LIST_HEAD(list: &dev->rx_reqs); |
829 | |
830 | skb_queue_head_init(list: &dev->rx_frames); |
831 | |
832 | /* network device setup */ |
833 | dev->net = net; |
834 | dev->qmult = QMULT_DEFAULT; |
835 | snprintf(buf: net->name, size: sizeof(net->name), fmt: "%s%%d" , netname); |
836 | |
837 | eth_random_addr(addr: dev->dev_mac); |
838 | |
839 | /* by default we always have a random MAC address */ |
840 | net->addr_assign_type = NET_ADDR_RANDOM; |
841 | |
842 | eth_random_addr(addr: dev->host_mac); |
843 | |
844 | net->netdev_ops = ð_netdev_ops; |
845 | |
846 | net->ethtool_ops = &ops; |
847 | SET_NETDEV_DEVTYPE(net, &gadget_type); |
848 | |
849 | /* MTU range: 14 - 15412 */ |
850 | net->min_mtu = ETH_HLEN; |
851 | net->max_mtu = GETHER_MAX_MTU_SIZE; |
852 | |
853 | return net; |
854 | } |
855 | EXPORT_SYMBOL_GPL(gether_setup_name_default); |
856 | |
857 | int gether_register_netdev(struct net_device *net) |
858 | { |
859 | struct eth_dev *dev; |
860 | struct usb_gadget *g; |
861 | int status; |
862 | |
863 | if (!net->dev.parent) |
864 | return -EINVAL; |
865 | dev = netdev_priv(dev: net); |
866 | g = dev->gadget; |
867 | |
868 | eth_hw_addr_set(dev: net, addr: dev->dev_mac); |
869 | |
870 | status = register_netdev(dev: net); |
871 | if (status < 0) { |
872 | dev_dbg(&g->dev, "register_netdev failed, %d\n" , status); |
873 | return status; |
874 | } else { |
875 | INFO(dev, "HOST MAC %pM\n" , dev->host_mac); |
876 | INFO(dev, "MAC %pM\n" , dev->dev_mac); |
877 | |
878 | /* two kinds of host-initiated state changes: |
879 | * - iff DATA transfer is active, carrier is "on" |
880 | * - tx queueing enabled if open *and* carrier is "on" |
881 | */ |
882 | netif_carrier_off(dev: net); |
883 | } |
884 | |
885 | return status; |
886 | } |
887 | EXPORT_SYMBOL_GPL(gether_register_netdev); |
888 | |
889 | void gether_set_gadget(struct net_device *net, struct usb_gadget *g) |
890 | { |
891 | struct eth_dev *dev; |
892 | |
893 | dev = netdev_priv(dev: net); |
894 | dev->gadget = g; |
895 | SET_NETDEV_DEV(net, &g->dev); |
896 | } |
897 | EXPORT_SYMBOL_GPL(gether_set_gadget); |
898 | |
899 | int gether_set_dev_addr(struct net_device *net, const char *dev_addr) |
900 | { |
901 | struct eth_dev *dev; |
902 | u8 new_addr[ETH_ALEN]; |
903 | |
904 | dev = netdev_priv(dev: net); |
905 | if (get_ether_addr(str: dev_addr, dev_addr: new_addr)) |
906 | return -EINVAL; |
907 | memcpy(dev->dev_mac, new_addr, ETH_ALEN); |
908 | net->addr_assign_type = NET_ADDR_SET; |
909 | return 0; |
910 | } |
911 | EXPORT_SYMBOL_GPL(gether_set_dev_addr); |
912 | |
913 | int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) |
914 | { |
915 | struct eth_dev *dev; |
916 | int ret; |
917 | |
918 | dev = netdev_priv(dev: net); |
919 | ret = get_ether_addr_str(dev_addr: dev->dev_mac, str: dev_addr, len); |
920 | if (ret + 1 < len) { |
921 | dev_addr[ret++] = '\n'; |
922 | dev_addr[ret] = '\0'; |
923 | } |
924 | |
925 | return ret; |
926 | } |
927 | EXPORT_SYMBOL_GPL(gether_get_dev_addr); |
928 | |
929 | int gether_set_host_addr(struct net_device *net, const char *host_addr) |
930 | { |
931 | struct eth_dev *dev; |
932 | u8 new_addr[ETH_ALEN]; |
933 | |
934 | dev = netdev_priv(dev: net); |
935 | if (get_ether_addr(str: host_addr, dev_addr: new_addr)) |
936 | return -EINVAL; |
937 | memcpy(dev->host_mac, new_addr, ETH_ALEN); |
938 | return 0; |
939 | } |
940 | EXPORT_SYMBOL_GPL(gether_set_host_addr); |
941 | |
942 | int gether_get_host_addr(struct net_device *net, char *host_addr, int len) |
943 | { |
944 | struct eth_dev *dev; |
945 | int ret; |
946 | |
947 | dev = netdev_priv(dev: net); |
948 | ret = get_ether_addr_str(dev_addr: dev->host_mac, str: host_addr, len); |
949 | if (ret + 1 < len) { |
950 | host_addr[ret++] = '\n'; |
951 | host_addr[ret] = '\0'; |
952 | } |
953 | |
954 | return ret; |
955 | } |
956 | EXPORT_SYMBOL_GPL(gether_get_host_addr); |
957 | |
958 | int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) |
959 | { |
960 | struct eth_dev *dev; |
961 | |
962 | if (len < 13) |
963 | return -EINVAL; |
964 | |
965 | dev = netdev_priv(dev: net); |
966 | snprintf(buf: host_addr, size: len, fmt: "%pm" , dev->host_mac); |
967 | |
968 | string_upper(dst: host_addr, src: host_addr); |
969 | |
970 | return strlen(host_addr); |
971 | } |
972 | EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); |
973 | |
974 | void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) |
975 | { |
976 | struct eth_dev *dev; |
977 | |
978 | dev = netdev_priv(dev: net); |
979 | memcpy(host_mac, dev->host_mac, ETH_ALEN); |
980 | } |
981 | EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); |
982 | |
983 | void gether_set_qmult(struct net_device *net, unsigned qmult) |
984 | { |
985 | struct eth_dev *dev; |
986 | |
987 | dev = netdev_priv(dev: net); |
988 | dev->qmult = qmult; |
989 | } |
990 | EXPORT_SYMBOL_GPL(gether_set_qmult); |
991 | |
992 | unsigned gether_get_qmult(struct net_device *net) |
993 | { |
994 | struct eth_dev *dev; |
995 | |
996 | dev = netdev_priv(dev: net); |
997 | return dev->qmult; |
998 | } |
999 | EXPORT_SYMBOL_GPL(gether_get_qmult); |
1000 | |
1001 | int gether_get_ifname(struct net_device *net, char *name, int len) |
1002 | { |
1003 | struct eth_dev *dev = netdev_priv(dev: net); |
1004 | int ret; |
1005 | |
1006 | rtnl_lock(); |
1007 | ret = scnprintf(buf: name, size: len, fmt: "%s\n" , |
1008 | dev->ifname_set ? net->name : netdev_name(dev: net)); |
1009 | rtnl_unlock(); |
1010 | return ret; |
1011 | } |
1012 | EXPORT_SYMBOL_GPL(gether_get_ifname); |
1013 | |
1014 | int gether_set_ifname(struct net_device *net, const char *name, int len) |
1015 | { |
1016 | struct eth_dev *dev = netdev_priv(dev: net); |
1017 | char tmp[IFNAMSIZ]; |
1018 | const char *p; |
1019 | |
1020 | if (name[len - 1] == '\n') |
1021 | len--; |
1022 | |
1023 | if (len >= sizeof(tmp)) |
1024 | return -E2BIG; |
1025 | |
1026 | strscpy(tmp, name, len + 1); |
1027 | if (!dev_valid_name(name: tmp)) |
1028 | return -EINVAL; |
1029 | |
1030 | /* Require exactly one %d, so binding will not fail with EEXIST. */ |
1031 | p = strchr(name, '%'); |
1032 | if (!p || p[1] != 'd' || strchr(p + 2, '%')) |
1033 | return -EINVAL; |
1034 | |
1035 | strncpy(p: net->name, q: tmp, size: sizeof(net->name)); |
1036 | dev->ifname_set = true; |
1037 | |
1038 | return 0; |
1039 | } |
1040 | EXPORT_SYMBOL_GPL(gether_set_ifname); |
1041 | |
1042 | void gether_suspend(struct gether *link) |
1043 | { |
1044 | struct eth_dev *dev = link->ioport; |
1045 | unsigned long flags; |
1046 | |
1047 | if (!dev) |
1048 | return; |
1049 | |
1050 | if (atomic_read(v: &dev->tx_qlen)) { |
1051 | /* |
1052 | * There is a transfer in progress. So we trigger a remote |
1053 | * wakeup to inform the host. |
1054 | */ |
1055 | ether_wakeup_host(port: dev->port_usb); |
1056 | return; |
1057 | } |
1058 | spin_lock_irqsave(&dev->lock, flags); |
1059 | link->is_suspend = true; |
1060 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1061 | } |
1062 | EXPORT_SYMBOL_GPL(gether_suspend); |
1063 | |
1064 | void gether_resume(struct gether *link) |
1065 | { |
1066 | struct eth_dev *dev = link->ioport; |
1067 | unsigned long flags; |
1068 | |
1069 | if (!dev) |
1070 | return; |
1071 | |
1072 | if (netif_queue_stopped(dev: dev->net)) |
1073 | netif_start_queue(dev: dev->net); |
1074 | |
1075 | spin_lock_irqsave(&dev->lock, flags); |
1076 | link->is_suspend = false; |
1077 | spin_unlock_irqrestore(lock: &dev->lock, flags); |
1078 | } |
1079 | EXPORT_SYMBOL_GPL(gether_resume); |
1080 | |
1081 | /* |
1082 | * gether_cleanup - remove Ethernet-over-USB device |
1083 | * Context: may sleep |
1084 | * |
1085 | * This is called to free all resources allocated by @gether_setup(). |
1086 | */ |
1087 | void gether_cleanup(struct eth_dev *dev) |
1088 | { |
1089 | if (!dev) |
1090 | return; |
1091 | |
1092 | unregister_netdev(dev: dev->net); |
1093 | flush_work(work: &dev->work); |
1094 | free_netdev(dev: dev->net); |
1095 | } |
1096 | EXPORT_SYMBOL_GPL(gether_cleanup); |
1097 | |
1098 | /** |
1099 | * gether_connect - notify network layer that USB link is active |
1100 | * @link: the USB link, set up with endpoints, descriptors matching |
1101 | * current device speed, and any framing wrapper(s) set up. |
1102 | * Context: irqs blocked |
1103 | * |
1104 | * This is called to activate endpoints and let the network layer know |
1105 | * the connection is active ("carrier detect"). It may cause the I/O |
1106 | * queues to open and start letting network packets flow, but will in |
1107 | * any case activate the endpoints so that they respond properly to the |
1108 | * USB host. |
1109 | * |
1110 | * Verify net_device pointer returned using IS_ERR(). If it doesn't |
1111 | * indicate some error code (negative errno), ep->driver_data values |
1112 | * have been overwritten. |
1113 | */ |
1114 | struct net_device *gether_connect(struct gether *link) |
1115 | { |
1116 | struct eth_dev *dev = link->ioport; |
1117 | int result = 0; |
1118 | |
1119 | if (!dev) |
1120 | return ERR_PTR(error: -EINVAL); |
1121 | |
1122 | link->in_ep->driver_data = dev; |
1123 | result = usb_ep_enable(ep: link->in_ep); |
1124 | if (result != 0) { |
1125 | DBG(dev, "enable %s --> %d\n" , |
1126 | link->in_ep->name, result); |
1127 | goto fail0; |
1128 | } |
1129 | |
1130 | link->out_ep->driver_data = dev; |
1131 | result = usb_ep_enable(ep: link->out_ep); |
1132 | if (result != 0) { |
1133 | DBG(dev, "enable %s --> %d\n" , |
1134 | link->out_ep->name, result); |
1135 | goto fail1; |
1136 | } |
1137 | |
1138 | if (result == 0) |
1139 | result = alloc_requests(dev, link, n: qlen(gadget: dev->gadget, |
1140 | qmult: dev->qmult)); |
1141 | |
1142 | if (result == 0) { |
1143 | dev->zlp = link->is_zlp_ok; |
1144 | dev->no_skb_reserve = gadget_avoids_skb_reserve(g: dev->gadget); |
1145 | DBG(dev, "qlen %d\n" , qlen(dev->gadget, dev->qmult)); |
1146 | |
1147 | dev->header_len = link->header_len; |
1148 | dev->unwrap = link->unwrap; |
1149 | dev->wrap = link->wrap; |
1150 | |
1151 | spin_lock(lock: &dev->lock); |
1152 | dev->port_usb = link; |
1153 | if (netif_running(dev: dev->net)) { |
1154 | if (link->open) |
1155 | link->open(link); |
1156 | } else { |
1157 | if (link->close) |
1158 | link->close(link); |
1159 | } |
1160 | spin_unlock(lock: &dev->lock); |
1161 | |
1162 | netif_carrier_on(dev: dev->net); |
1163 | if (netif_running(dev: dev->net)) |
1164 | eth_start(dev, GFP_ATOMIC); |
1165 | |
1166 | netif_device_attach(dev: dev->net); |
1167 | |
1168 | /* on error, disable any endpoints */ |
1169 | } else { |
1170 | (void) usb_ep_disable(ep: link->out_ep); |
1171 | fail1: |
1172 | (void) usb_ep_disable(ep: link->in_ep); |
1173 | } |
1174 | fail0: |
1175 | /* caller is responsible for cleanup on error */ |
1176 | if (result < 0) |
1177 | return ERR_PTR(error: result); |
1178 | return dev->net; |
1179 | } |
1180 | EXPORT_SYMBOL_GPL(gether_connect); |
1181 | |
1182 | /** |
1183 | * gether_disconnect - notify network layer that USB link is inactive |
1184 | * @link: the USB link, on which gether_connect() was called |
1185 | * Context: irqs blocked |
1186 | * |
1187 | * This is called to deactivate endpoints and let the network layer know |
1188 | * the connection went inactive ("no carrier"). |
1189 | * |
1190 | * On return, the state is as if gether_connect() had never been called. |
1191 | * The endpoints are inactive, and accordingly without active USB I/O. |
1192 | * Pointers to endpoint descriptors and endpoint private data are nulled. |
1193 | */ |
1194 | void gether_disconnect(struct gether *link) |
1195 | { |
1196 | struct eth_dev *dev = link->ioport; |
1197 | struct usb_request *req; |
1198 | |
1199 | WARN_ON(!dev); |
1200 | if (!dev) |
1201 | return; |
1202 | |
1203 | DBG(dev, "%s\n" , __func__); |
1204 | |
1205 | netif_device_detach(dev: dev->net); |
1206 | netif_carrier_off(dev: dev->net); |
1207 | |
1208 | /* disable endpoints, forcing (synchronous) completion |
1209 | * of all pending i/o. then free the request objects |
1210 | * and forget about the endpoints. |
1211 | */ |
1212 | usb_ep_disable(ep: link->in_ep); |
1213 | spin_lock(lock: &dev->req_lock); |
1214 | while (!list_empty(head: &dev->tx_reqs)) { |
1215 | req = list_first_entry(&dev->tx_reqs, struct usb_request, list); |
1216 | list_del(entry: &req->list); |
1217 | |
1218 | spin_unlock(lock: &dev->req_lock); |
1219 | usb_ep_free_request(ep: link->in_ep, req); |
1220 | spin_lock(lock: &dev->req_lock); |
1221 | } |
1222 | spin_unlock(lock: &dev->req_lock); |
1223 | link->in_ep->desc = NULL; |
1224 | |
1225 | usb_ep_disable(ep: link->out_ep); |
1226 | spin_lock(lock: &dev->req_lock); |
1227 | while (!list_empty(head: &dev->rx_reqs)) { |
1228 | req = list_first_entry(&dev->rx_reqs, struct usb_request, list); |
1229 | list_del(entry: &req->list); |
1230 | |
1231 | spin_unlock(lock: &dev->req_lock); |
1232 | usb_ep_free_request(ep: link->out_ep, req); |
1233 | spin_lock(lock: &dev->req_lock); |
1234 | } |
1235 | spin_unlock(lock: &dev->req_lock); |
1236 | link->out_ep->desc = NULL; |
1237 | |
1238 | /* finish forgetting about this USB link episode */ |
1239 | dev->header_len = 0; |
1240 | dev->unwrap = NULL; |
1241 | dev->wrap = NULL; |
1242 | |
1243 | spin_lock(lock: &dev->lock); |
1244 | dev->port_usb = NULL; |
1245 | link->is_suspend = false; |
1246 | spin_unlock(lock: &dev->lock); |
1247 | } |
1248 | EXPORT_SYMBOL_GPL(gether_disconnect); |
1249 | |
1250 | MODULE_LICENSE("GPL" ); |
1251 | MODULE_AUTHOR("David Brownell" ); |
1252 | |