1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * MUSB OTG driver peripheral support |
4 | * |
5 | * Copyright 2005 Mentor Graphics Corporation |
6 | * Copyright (C) 2005-2006 by Texas Instruments |
7 | * Copyright (C) 2006-2007 Nokia Corporation |
8 | * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> |
9 | */ |
10 | |
11 | #include <linux/kernel.h> |
12 | #include <linux/list.h> |
13 | #include <linux/timer.h> |
14 | #include <linux/module.h> |
15 | #include <linux/smp.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/dma-mapping.h> |
19 | #include <linux/slab.h> |
20 | |
21 | #include "musb_core.h" |
22 | #include "musb_trace.h" |
23 | |
24 | |
25 | /* ----------------------------------------------------------------------- */ |
26 | |
27 | #define is_buffer_mapped(req) (is_dma_capable() && \ |
28 | (req->map_state != UN_MAPPED)) |
29 | |
30 | /* Maps the buffer to dma */ |
31 | |
32 | static inline void map_dma_buffer(struct musb_request *request, |
33 | struct musb *musb, struct musb_ep *musb_ep) |
34 | { |
35 | int compatible = true; |
36 | struct dma_controller *dma = musb->dma_controller; |
37 | |
38 | request->map_state = UN_MAPPED; |
39 | |
40 | if (!is_dma_capable() || !musb_ep->dma) |
41 | return; |
42 | |
43 | /* Check if DMA engine can handle this request. |
44 | * DMA code must reject the USB request explicitly. |
45 | * Default behaviour is to map the request. |
46 | */ |
47 | if (dma->is_compatible) |
48 | compatible = dma->is_compatible(musb_ep->dma, |
49 | musb_ep->packet_sz, request->request.buf, |
50 | request->request.length); |
51 | if (!compatible) |
52 | return; |
53 | |
54 | if (request->request.dma == DMA_ADDR_INVALID) { |
55 | dma_addr_t dma_addr; |
56 | int ret; |
57 | |
58 | dma_addr = dma_map_single( |
59 | musb->controller, |
60 | request->request.buf, |
61 | request->request.length, |
62 | request->tx |
63 | ? DMA_TO_DEVICE |
64 | : DMA_FROM_DEVICE); |
65 | ret = dma_mapping_error(dev: musb->controller, dma_addr); |
66 | if (ret) |
67 | return; |
68 | |
69 | request->request.dma = dma_addr; |
70 | request->map_state = MUSB_MAPPED; |
71 | } else { |
72 | dma_sync_single_for_device(dev: musb->controller, |
73 | addr: request->request.dma, |
74 | size: request->request.length, |
75 | dir: request->tx |
76 | ? DMA_TO_DEVICE |
77 | : DMA_FROM_DEVICE); |
78 | request->map_state = PRE_MAPPED; |
79 | } |
80 | } |
81 | |
82 | /* Unmap the buffer from dma and maps it back to cpu */ |
83 | static inline void unmap_dma_buffer(struct musb_request *request, |
84 | struct musb *musb) |
85 | { |
86 | struct musb_ep *musb_ep = request->ep; |
87 | |
88 | if (!is_buffer_mapped(request) || !musb_ep->dma) |
89 | return; |
90 | |
91 | if (request->request.dma == DMA_ADDR_INVALID) { |
92 | dev_vdbg(musb->controller, |
93 | "not unmapping a never mapped buffer\n" ); |
94 | return; |
95 | } |
96 | if (request->map_state == MUSB_MAPPED) { |
97 | dma_unmap_single(musb->controller, |
98 | request->request.dma, |
99 | request->request.length, |
100 | request->tx |
101 | ? DMA_TO_DEVICE |
102 | : DMA_FROM_DEVICE); |
103 | request->request.dma = DMA_ADDR_INVALID; |
104 | } else { /* PRE_MAPPED */ |
105 | dma_sync_single_for_cpu(dev: musb->controller, |
106 | addr: request->request.dma, |
107 | size: request->request.length, |
108 | dir: request->tx |
109 | ? DMA_TO_DEVICE |
110 | : DMA_FROM_DEVICE); |
111 | } |
112 | request->map_state = UN_MAPPED; |
113 | } |
114 | |
115 | /* |
116 | * Immediately complete a request. |
117 | * |
118 | * @param request the request to complete |
119 | * @param status the status to complete the request with |
120 | * Context: controller locked, IRQs blocked. |
121 | */ |
122 | void musb_g_giveback( |
123 | struct musb_ep *ep, |
124 | struct usb_request *request, |
125 | int status) |
126 | __releases(ep->musb->lock) |
127 | __acquires(ep->musb->lock) |
128 | { |
129 | struct musb_request *req; |
130 | struct musb *musb; |
131 | int busy = ep->busy; |
132 | |
133 | req = to_musb_request(request); |
134 | |
135 | list_del(entry: &req->list); |
136 | if (req->request.status == -EINPROGRESS) |
137 | req->request.status = status; |
138 | musb = req->musb; |
139 | |
140 | ep->busy = 1; |
141 | spin_unlock(lock: &musb->lock); |
142 | |
143 | if (!dma_mapping_error(dev: &musb->g.dev, dma_addr: request->dma)) |
144 | unmap_dma_buffer(request: req, musb); |
145 | |
146 | trace_musb_req_gb(req); |
147 | usb_gadget_giveback_request(ep: &req->ep->end_point, req: &req->request); |
148 | spin_lock(lock: &musb->lock); |
149 | ep->busy = busy; |
150 | } |
151 | |
152 | /* ----------------------------------------------------------------------- */ |
153 | |
154 | /* |
155 | * Abort requests queued to an endpoint using the status. Synchronous. |
156 | * caller locked controller and blocked irqs, and selected this ep. |
157 | */ |
158 | static void nuke(struct musb_ep *ep, const int status) |
159 | { |
160 | struct musb *musb = ep->musb; |
161 | struct musb_request *req = NULL; |
162 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; |
163 | |
164 | ep->busy = 1; |
165 | |
166 | if (is_dma_capable() && ep->dma) { |
167 | struct dma_controller *c = ep->musb->dma_controller; |
168 | int value; |
169 | |
170 | if (ep->is_in) { |
171 | /* |
172 | * The programming guide says that we must not clear |
173 | * the DMAMODE bit before DMAENAB, so we only |
174 | * clear it in the second write... |
175 | */ |
176 | musb_writew(epio, MUSB_TXCSR, |
177 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); |
178 | musb_writew(epio, MUSB_TXCSR, |
179 | 0 | MUSB_TXCSR_FLUSHFIFO); |
180 | } else { |
181 | musb_writew(epio, MUSB_RXCSR, |
182 | 0 | MUSB_RXCSR_FLUSHFIFO); |
183 | musb_writew(epio, MUSB_RXCSR, |
184 | 0 | MUSB_RXCSR_FLUSHFIFO); |
185 | } |
186 | |
187 | value = c->channel_abort(ep->dma); |
188 | musb_dbg(musb, fmt: "%s: abort DMA --> %d" , ep->name, value); |
189 | c->channel_release(ep->dma); |
190 | ep->dma = NULL; |
191 | } |
192 | |
193 | while (!list_empty(head: &ep->req_list)) { |
194 | req = list_first_entry(&ep->req_list, struct musb_request, list); |
195 | musb_g_giveback(ep, request: &req->request, status); |
196 | } |
197 | } |
198 | |
199 | /* ----------------------------------------------------------------------- */ |
200 | |
201 | /* Data transfers - pure PIO, pure DMA, or mixed mode */ |
202 | |
203 | /* |
204 | * This assumes the separate CPPI engine is responding to DMA requests |
205 | * from the usb core ... sequenced a bit differently from mentor dma. |
206 | */ |
207 | |
208 | static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) |
209 | { |
210 | if (can_bulk_split(musb, ep->type)) |
211 | return ep->hw_ep->max_packet_sz_tx; |
212 | else |
213 | return ep->packet_sz; |
214 | } |
215 | |
216 | /* |
217 | * An endpoint is transmitting data. This can be called either from |
218 | * the IRQ routine or from ep.queue() to kickstart a request on an |
219 | * endpoint. |
220 | * |
221 | * Context: controller locked, IRQs blocked, endpoint selected |
222 | */ |
223 | static void txstate(struct musb *musb, struct musb_request *req) |
224 | { |
225 | u8 epnum = req->epnum; |
226 | struct musb_ep *musb_ep; |
227 | void __iomem *epio = musb->endpoints[epnum].regs; |
228 | struct usb_request *request; |
229 | u16 fifo_count = 0, csr; |
230 | int use_dma = 0; |
231 | |
232 | musb_ep = req->ep; |
233 | |
234 | /* Check if EP is disabled */ |
235 | if (!musb_ep->desc) { |
236 | musb_dbg(musb, fmt: "ep:%s disabled - ignore request" , |
237 | musb_ep->end_point.name); |
238 | return; |
239 | } |
240 | |
241 | /* we shouldn't get here while DMA is active ... but we do ... */ |
242 | if (dma_channel_status(c: musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
243 | musb_dbg(musb, fmt: "dma pending..." ); |
244 | return; |
245 | } |
246 | |
247 | /* read TXCSR before */ |
248 | csr = musb_readw(epio, MUSB_TXCSR); |
249 | |
250 | request = &req->request; |
251 | fifo_count = min(max_ep_writesize(musb, musb_ep), |
252 | (int)(request->length - request->actual)); |
253 | |
254 | if (csr & MUSB_TXCSR_TXPKTRDY) { |
255 | musb_dbg(musb, fmt: "%s old packet still ready , txcsr %03x" , |
256 | musb_ep->end_point.name, csr); |
257 | return; |
258 | } |
259 | |
260 | if (csr & MUSB_TXCSR_P_SENDSTALL) { |
261 | musb_dbg(musb, fmt: "%s stalling, txcsr %03x" , |
262 | musb_ep->end_point.name, csr); |
263 | return; |
264 | } |
265 | |
266 | musb_dbg(musb, fmt: "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x" , |
267 | epnum, musb_ep->packet_sz, fifo_count, |
268 | csr); |
269 | |
270 | #ifndef CONFIG_MUSB_PIO_ONLY |
271 | if (is_buffer_mapped(req)) { |
272 | struct dma_controller *c = musb->dma_controller; |
273 | size_t request_size; |
274 | |
275 | /* setup DMA, then program endpoint CSR */ |
276 | request_size = min_t(size_t, request->length - request->actual, |
277 | musb_ep->dma->max_len); |
278 | |
279 | use_dma = (request->dma != DMA_ADDR_INVALID && request_size); |
280 | |
281 | /* MUSB_TXCSR_P_ISO is still set correctly */ |
282 | |
283 | if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) { |
284 | if (request_size < musb_ep->packet_sz) |
285 | musb_ep->dma->desired_mode = 0; |
286 | else |
287 | musb_ep->dma->desired_mode = 1; |
288 | |
289 | use_dma = use_dma && c->channel_program( |
290 | musb_ep->dma, musb_ep->packet_sz, |
291 | musb_ep->dma->desired_mode, |
292 | request->dma + request->actual, request_size); |
293 | if (use_dma) { |
294 | if (musb_ep->dma->desired_mode == 0) { |
295 | /* |
296 | * We must not clear the DMAMODE bit |
297 | * before the DMAENAB bit -- and the |
298 | * latter doesn't always get cleared |
299 | * before we get here... |
300 | */ |
301 | csr &= ~(MUSB_TXCSR_AUTOSET |
302 | | MUSB_TXCSR_DMAENAB); |
303 | musb_writew(epio, MUSB_TXCSR, csr |
304 | | MUSB_TXCSR_P_WZC_BITS); |
305 | csr &= ~MUSB_TXCSR_DMAMODE; |
306 | csr |= (MUSB_TXCSR_DMAENAB | |
307 | MUSB_TXCSR_MODE); |
308 | /* against programming guide */ |
309 | } else { |
310 | csr |= (MUSB_TXCSR_DMAENAB |
311 | | MUSB_TXCSR_DMAMODE |
312 | | MUSB_TXCSR_MODE); |
313 | /* |
314 | * Enable Autoset according to table |
315 | * below |
316 | * bulk_split hb_mult Autoset_Enable |
317 | * 0 0 Yes(Normal) |
318 | * 0 >0 No(High BW ISO) |
319 | * 1 0 Yes(HS bulk) |
320 | * 1 >0 Yes(FS bulk) |
321 | */ |
322 | if (!musb_ep->hb_mult || |
323 | can_bulk_split(musb, |
324 | musb_ep->type)) |
325 | csr |= MUSB_TXCSR_AUTOSET; |
326 | } |
327 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
328 | |
329 | musb_writew(epio, MUSB_TXCSR, csr); |
330 | } |
331 | } |
332 | |
333 | if (is_cppi_enabled(musb)) { |
334 | /* program endpoint CSR first, then setup DMA */ |
335 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
336 | csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | |
337 | MUSB_TXCSR_MODE; |
338 | musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & |
339 | ~MUSB_TXCSR_P_UNDERRUN) | csr); |
340 | |
341 | /* ensure writebuffer is empty */ |
342 | csr = musb_readw(epio, MUSB_TXCSR); |
343 | |
344 | /* |
345 | * NOTE host side sets DMAENAB later than this; both are |
346 | * OK since the transfer dma glue (between CPPI and |
347 | * Mentor fifos) just tells CPPI it could start. Data |
348 | * only moves to the USB TX fifo when both fifos are |
349 | * ready. |
350 | */ |
351 | /* |
352 | * "mode" is irrelevant here; handle terminating ZLPs |
353 | * like PIO does, since the hardware RNDIS mode seems |
354 | * unreliable except for the |
355 | * last-packet-is-already-short case. |
356 | */ |
357 | use_dma = use_dma && c->channel_program( |
358 | musb_ep->dma, musb_ep->packet_sz, |
359 | 0, |
360 | request->dma + request->actual, |
361 | request_size); |
362 | if (!use_dma) { |
363 | c->channel_release(musb_ep->dma); |
364 | musb_ep->dma = NULL; |
365 | csr &= ~MUSB_TXCSR_DMAENAB; |
366 | musb_writew(epio, MUSB_TXCSR, csr); |
367 | /* invariant: prequest->buf is non-null */ |
368 | } |
369 | } else if (tusb_dma_omap(musb)) |
370 | use_dma = use_dma && c->channel_program( |
371 | musb_ep->dma, musb_ep->packet_sz, |
372 | request->zero, |
373 | request->dma + request->actual, |
374 | request_size); |
375 | } |
376 | #endif |
377 | |
378 | if (!use_dma) { |
379 | /* |
380 | * Unmap the dma buffer back to cpu if dma channel |
381 | * programming fails |
382 | */ |
383 | unmap_dma_buffer(request: req, musb); |
384 | |
385 | musb_write_fifo(ep: musb_ep->hw_ep, len: fifo_count, |
386 | src: (u8 *) (request->buf + request->actual)); |
387 | request->actual += fifo_count; |
388 | csr |= MUSB_TXCSR_TXPKTRDY; |
389 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
390 | musb_writew(epio, MUSB_TXCSR, csr); |
391 | } |
392 | |
393 | /* host may already have the data when this message shows... */ |
394 | musb_dbg(musb, fmt: "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d" , |
395 | musb_ep->end_point.name, use_dma ? "dma" : "pio" , |
396 | request->actual, request->length, |
397 | musb_readw(epio, MUSB_TXCSR), |
398 | fifo_count, |
399 | musb_readw(epio, MUSB_TXMAXP)); |
400 | } |
401 | |
402 | /* |
403 | * FIFO state update (e.g. data ready). |
404 | * Called from IRQ, with controller locked. |
405 | */ |
406 | void musb_g_tx(struct musb *musb, u8 epnum) |
407 | { |
408 | u16 csr; |
409 | struct musb_request *req; |
410 | struct usb_request *request; |
411 | u8 __iomem *mbase = musb->mregs; |
412 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; |
413 | void __iomem *epio = musb->endpoints[epnum].regs; |
414 | struct dma_channel *dma; |
415 | |
416 | musb_ep_select(mbase, epnum); |
417 | req = next_request(ep: musb_ep); |
418 | request = &req->request; |
419 | |
420 | csr = musb_readw(epio, MUSB_TXCSR); |
421 | musb_dbg(musb, fmt: "<== %s, txcsr %04x" , musb_ep->end_point.name, csr); |
422 | |
423 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
424 | |
425 | /* |
426 | * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX |
427 | * probably rates reporting as a host error. |
428 | */ |
429 | if (csr & MUSB_TXCSR_P_SENTSTALL) { |
430 | csr |= MUSB_TXCSR_P_WZC_BITS; |
431 | csr &= ~MUSB_TXCSR_P_SENTSTALL; |
432 | musb_writew(epio, MUSB_TXCSR, csr); |
433 | return; |
434 | } |
435 | |
436 | if (csr & MUSB_TXCSR_P_UNDERRUN) { |
437 | /* We NAKed, no big deal... little reason to care. */ |
438 | csr |= MUSB_TXCSR_P_WZC_BITS; |
439 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
440 | musb_writew(epio, MUSB_TXCSR, csr); |
441 | dev_vdbg(musb->controller, "underrun on ep%d, req %p\n" , |
442 | epnum, request); |
443 | } |
444 | |
445 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
446 | /* |
447 | * SHOULD NOT HAPPEN... has with CPPI though, after |
448 | * changing SENDSTALL (and other cases); harmless? |
449 | */ |
450 | musb_dbg(musb, fmt: "%s dma still busy?" , musb_ep->end_point.name); |
451 | return; |
452 | } |
453 | |
454 | if (req) { |
455 | |
456 | trace_musb_req_tx(req); |
457 | |
458 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { |
459 | csr |= MUSB_TXCSR_P_WZC_BITS; |
460 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
461 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
462 | musb_writew(epio, MUSB_TXCSR, csr); |
463 | /* Ensure writebuffer is empty. */ |
464 | csr = musb_readw(epio, MUSB_TXCSR); |
465 | request->actual += musb_ep->dma->actual_len; |
466 | musb_dbg(musb, fmt: "TXCSR%d %04x, DMA off, len %zu, req %p" , |
467 | epnum, csr, musb_ep->dma->actual_len, request); |
468 | } |
469 | |
470 | /* |
471 | * First, maybe a terminating short packet. Some DMA |
472 | * engines might handle this by themselves. |
473 | */ |
474 | if ((request->zero && request->length) |
475 | && (request->length % musb_ep->packet_sz == 0) |
476 | && (request->actual == request->length)) { |
477 | |
478 | /* |
479 | * On DMA completion, FIFO may not be |
480 | * available yet... |
481 | */ |
482 | if (csr & MUSB_TXCSR_TXPKTRDY) |
483 | return; |
484 | |
485 | musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE |
486 | | MUSB_TXCSR_TXPKTRDY); |
487 | request->zero = 0; |
488 | } |
489 | |
490 | if (request->actual == request->length) { |
491 | musb_g_giveback(ep: musb_ep, request, status: 0); |
492 | /* |
493 | * In the giveback function the MUSB lock is |
494 | * released and acquired after sometime. During |
495 | * this time period the INDEX register could get |
496 | * changed by the gadget_queue function especially |
497 | * on SMP systems. Reselect the INDEX to be sure |
498 | * we are reading/modifying the right registers |
499 | */ |
500 | musb_ep_select(mbase, epnum); |
501 | req = musb_ep->desc ? next_request(ep: musb_ep) : NULL; |
502 | if (!req) { |
503 | musb_dbg(musb, fmt: "%s idle now" , |
504 | musb_ep->end_point.name); |
505 | return; |
506 | } |
507 | } |
508 | |
509 | txstate(musb, req); |
510 | } |
511 | } |
512 | |
513 | /* ------------------------------------------------------------ */ |
514 | |
515 | /* |
516 | * Context: controller locked, IRQs blocked, endpoint selected |
517 | */ |
518 | static void rxstate(struct musb *musb, struct musb_request *req) |
519 | { |
520 | const u8 epnum = req->epnum; |
521 | struct usb_request *request = &req->request; |
522 | struct musb_ep *musb_ep; |
523 | void __iomem *epio = musb->endpoints[epnum].regs; |
524 | unsigned len = 0; |
525 | u16 fifo_count; |
526 | u16 csr = musb_readw(epio, MUSB_RXCSR); |
527 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; |
528 | u8 use_mode_1; |
529 | |
530 | if (hw_ep->is_shared_fifo) |
531 | musb_ep = &hw_ep->ep_in; |
532 | else |
533 | musb_ep = &hw_ep->ep_out; |
534 | |
535 | fifo_count = musb_ep->packet_sz; |
536 | |
537 | /* Check if EP is disabled */ |
538 | if (!musb_ep->desc) { |
539 | musb_dbg(musb, fmt: "ep:%s disabled - ignore request" , |
540 | musb_ep->end_point.name); |
541 | return; |
542 | } |
543 | |
544 | /* We shouldn't get here while DMA is active, but we do... */ |
545 | if (dma_channel_status(c: musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
546 | musb_dbg(musb, fmt: "DMA pending..." ); |
547 | return; |
548 | } |
549 | |
550 | if (csr & MUSB_RXCSR_P_SENDSTALL) { |
551 | musb_dbg(musb, fmt: "%s stalling, RXCSR %04x" , |
552 | musb_ep->end_point.name, csr); |
553 | return; |
554 | } |
555 | |
556 | if (is_cppi_enabled(musb) && is_buffer_mapped(req)) { |
557 | struct dma_controller *c = musb->dma_controller; |
558 | struct dma_channel *channel = musb_ep->dma; |
559 | |
560 | /* NOTE: CPPI won't actually stop advancing the DMA |
561 | * queue after short packet transfers, so this is almost |
562 | * always going to run as IRQ-per-packet DMA so that |
563 | * faults will be handled correctly. |
564 | */ |
565 | if (c->channel_program(channel, |
566 | musb_ep->packet_sz, |
567 | !request->short_not_ok, |
568 | request->dma + request->actual, |
569 | request->length - request->actual)) { |
570 | |
571 | /* make sure that if an rxpkt arrived after the irq, |
572 | * the cppi engine will be ready to take it as soon |
573 | * as DMA is enabled |
574 | */ |
575 | csr &= ~(MUSB_RXCSR_AUTOCLEAR |
576 | | MUSB_RXCSR_DMAMODE); |
577 | csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; |
578 | musb_writew(epio, MUSB_RXCSR, csr); |
579 | return; |
580 | } |
581 | } |
582 | |
583 | if (csr & MUSB_RXCSR_RXPKTRDY) { |
584 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); |
585 | |
586 | /* |
587 | * Enable Mode 1 on RX transfers only when short_not_ok flag |
588 | * is set. Currently short_not_ok flag is set only from |
589 | * file_storage and f_mass_storage drivers |
590 | */ |
591 | |
592 | if (request->short_not_ok && fifo_count == musb_ep->packet_sz) |
593 | use_mode_1 = 1; |
594 | else |
595 | use_mode_1 = 0; |
596 | |
597 | if (request->actual < request->length) { |
598 | if (!is_buffer_mapped(req)) |
599 | goto buffer_aint_mapped; |
600 | |
601 | if (musb_dma_inventra(musb)) { |
602 | struct dma_controller *c; |
603 | struct dma_channel *channel; |
604 | int use_dma = 0; |
605 | unsigned int transfer_size; |
606 | |
607 | c = musb->dma_controller; |
608 | channel = musb_ep->dma; |
609 | |
610 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in |
611 | * mode 0 only. So we do not get endpoint interrupts due to DMA |
612 | * completion. We only get interrupts from DMA controller. |
613 | * |
614 | * We could operate in DMA mode 1 if we knew the size of the transfer |
615 | * in advance. For mass storage class, request->length = what the host |
616 | * sends, so that'd work. But for pretty much everything else, |
617 | * request->length is routinely more than what the host sends. For |
618 | * most these gadgets, end of is signified either by a short packet, |
619 | * or filling the last byte of the buffer. (Sending extra data in |
620 | * that last pckate should trigger an overflow fault.) But in mode 1, |
621 | * we don't get DMA completion interrupt for short packets. |
622 | * |
623 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), |
624 | * to get endpoint interrupt on every DMA req, but that didn't seem |
625 | * to work reliably. |
626 | * |
627 | * REVISIT an updated g_file_storage can set req->short_not_ok, which |
628 | * then becomes usable as a runtime "use mode 1" hint... |
629 | */ |
630 | |
631 | /* Experimental: Mode1 works with mass storage use cases */ |
632 | if (use_mode_1) { |
633 | csr |= MUSB_RXCSR_AUTOCLEAR; |
634 | musb_writew(epio, MUSB_RXCSR, csr); |
635 | csr |= MUSB_RXCSR_DMAENAB; |
636 | musb_writew(epio, MUSB_RXCSR, csr); |
637 | |
638 | /* |
639 | * this special sequence (enabling and then |
640 | * disabling MUSB_RXCSR_DMAMODE) is required |
641 | * to get DMAReq to activate |
642 | */ |
643 | musb_writew(epio, MUSB_RXCSR, |
644 | csr | MUSB_RXCSR_DMAMODE); |
645 | musb_writew(epio, MUSB_RXCSR, csr); |
646 | |
647 | transfer_size = min_t(unsigned int, |
648 | request->length - |
649 | request->actual, |
650 | channel->max_len); |
651 | musb_ep->dma->desired_mode = 1; |
652 | } else { |
653 | if (!musb_ep->hb_mult && |
654 | musb_ep->hw_ep->rx_double_buffered) |
655 | csr |= MUSB_RXCSR_AUTOCLEAR; |
656 | csr |= MUSB_RXCSR_DMAENAB; |
657 | musb_writew(epio, MUSB_RXCSR, csr); |
658 | |
659 | transfer_size = min(request->length - request->actual, |
660 | (unsigned)fifo_count); |
661 | musb_ep->dma->desired_mode = 0; |
662 | } |
663 | |
664 | use_dma = c->channel_program( |
665 | channel, |
666 | musb_ep->packet_sz, |
667 | channel->desired_mode, |
668 | request->dma |
669 | + request->actual, |
670 | transfer_size); |
671 | |
672 | if (use_dma) |
673 | return; |
674 | } |
675 | |
676 | if ((musb_dma_ux500(musb)) && |
677 | (request->actual < request->length)) { |
678 | |
679 | struct dma_controller *c; |
680 | struct dma_channel *channel; |
681 | unsigned int transfer_size = 0; |
682 | |
683 | c = musb->dma_controller; |
684 | channel = musb_ep->dma; |
685 | |
686 | /* In case first packet is short */ |
687 | if (fifo_count < musb_ep->packet_sz) |
688 | transfer_size = fifo_count; |
689 | else if (request->short_not_ok) |
690 | transfer_size = min_t(unsigned int, |
691 | request->length - |
692 | request->actual, |
693 | channel->max_len); |
694 | else |
695 | transfer_size = min_t(unsigned int, |
696 | request->length - |
697 | request->actual, |
698 | (unsigned)fifo_count); |
699 | |
700 | csr &= ~MUSB_RXCSR_DMAMODE; |
701 | csr |= (MUSB_RXCSR_DMAENAB | |
702 | MUSB_RXCSR_AUTOCLEAR); |
703 | |
704 | musb_writew(epio, MUSB_RXCSR, csr); |
705 | |
706 | if (transfer_size <= musb_ep->packet_sz) { |
707 | musb_ep->dma->desired_mode = 0; |
708 | } else { |
709 | musb_ep->dma->desired_mode = 1; |
710 | /* Mode must be set after DMAENAB */ |
711 | csr |= MUSB_RXCSR_DMAMODE; |
712 | musb_writew(epio, MUSB_RXCSR, csr); |
713 | } |
714 | |
715 | if (c->channel_program(channel, |
716 | musb_ep->packet_sz, |
717 | channel->desired_mode, |
718 | request->dma |
719 | + request->actual, |
720 | transfer_size)) |
721 | |
722 | return; |
723 | } |
724 | |
725 | len = request->length - request->actual; |
726 | musb_dbg(musb, fmt: "%s OUT/RX pio fifo %d/%d, maxpacket %d" , |
727 | musb_ep->end_point.name, |
728 | fifo_count, len, |
729 | musb_ep->packet_sz); |
730 | |
731 | fifo_count = min_t(unsigned, len, fifo_count); |
732 | |
733 | if (tusb_dma_omap(musb)) { |
734 | struct dma_controller *c = musb->dma_controller; |
735 | struct dma_channel *channel = musb_ep->dma; |
736 | u32 dma_addr = request->dma + request->actual; |
737 | int ret; |
738 | |
739 | ret = c->channel_program(channel, |
740 | musb_ep->packet_sz, |
741 | channel->desired_mode, |
742 | dma_addr, |
743 | fifo_count); |
744 | if (ret) |
745 | return; |
746 | } |
747 | |
748 | /* |
749 | * Unmap the dma buffer back to cpu if dma channel |
750 | * programming fails. This buffer is mapped if the |
751 | * channel allocation is successful |
752 | */ |
753 | unmap_dma_buffer(request: req, musb); |
754 | |
755 | /* |
756 | * Clear DMAENAB and AUTOCLEAR for the |
757 | * PIO mode transfer |
758 | */ |
759 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); |
760 | musb_writew(epio, MUSB_RXCSR, csr); |
761 | |
762 | buffer_aint_mapped: |
763 | fifo_count = min_t(unsigned int, |
764 | request->length - request->actual, |
765 | (unsigned int)fifo_count); |
766 | musb_read_fifo(ep: musb_ep->hw_ep, len: fifo_count, dst: (u8 *) |
767 | (request->buf + request->actual)); |
768 | request->actual += fifo_count; |
769 | |
770 | /* REVISIT if we left anything in the fifo, flush |
771 | * it and report -EOVERFLOW |
772 | */ |
773 | |
774 | /* ack the read! */ |
775 | csr |= MUSB_RXCSR_P_WZC_BITS; |
776 | csr &= ~MUSB_RXCSR_RXPKTRDY; |
777 | musb_writew(epio, MUSB_RXCSR, csr); |
778 | } |
779 | } |
780 | |
781 | /* reach the end or short packet detected */ |
782 | if (request->actual == request->length || |
783 | fifo_count < musb_ep->packet_sz) |
784 | musb_g_giveback(ep: musb_ep, request, status: 0); |
785 | } |
786 | |
787 | /* |
788 | * Data ready for a request; called from IRQ |
789 | */ |
790 | void musb_g_rx(struct musb *musb, u8 epnum) |
791 | { |
792 | u16 csr; |
793 | struct musb_request *req; |
794 | struct usb_request *request; |
795 | void __iomem *mbase = musb->mregs; |
796 | struct musb_ep *musb_ep; |
797 | void __iomem *epio = musb->endpoints[epnum].regs; |
798 | struct dma_channel *dma; |
799 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; |
800 | |
801 | if (hw_ep->is_shared_fifo) |
802 | musb_ep = &hw_ep->ep_in; |
803 | else |
804 | musb_ep = &hw_ep->ep_out; |
805 | |
806 | musb_ep_select(mbase, epnum); |
807 | |
808 | req = next_request(ep: musb_ep); |
809 | if (!req) |
810 | return; |
811 | |
812 | trace_musb_req_rx(req); |
813 | request = &req->request; |
814 | |
815 | csr = musb_readw(epio, MUSB_RXCSR); |
816 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
817 | |
818 | musb_dbg(musb, fmt: "<== %s, rxcsr %04x%s %p" , musb_ep->end_point.name, |
819 | csr, dma ? " (dma)" : "" , request); |
820 | |
821 | if (csr & MUSB_RXCSR_P_SENTSTALL) { |
822 | csr |= MUSB_RXCSR_P_WZC_BITS; |
823 | csr &= ~MUSB_RXCSR_P_SENTSTALL; |
824 | musb_writew(epio, MUSB_RXCSR, csr); |
825 | return; |
826 | } |
827 | |
828 | if (csr & MUSB_RXCSR_P_OVERRUN) { |
829 | /* csr |= MUSB_RXCSR_P_WZC_BITS; */ |
830 | csr &= ~MUSB_RXCSR_P_OVERRUN; |
831 | musb_writew(epio, MUSB_RXCSR, csr); |
832 | |
833 | musb_dbg(musb, fmt: "%s iso overrun on %p" , musb_ep->name, request); |
834 | if (request->status == -EINPROGRESS) |
835 | request->status = -EOVERFLOW; |
836 | } |
837 | if (csr & MUSB_RXCSR_INCOMPRX) { |
838 | /* REVISIT not necessarily an error */ |
839 | musb_dbg(musb, fmt: "%s, incomprx" , musb_ep->end_point.name); |
840 | } |
841 | |
842 | if (dma_channel_status(c: dma) == MUSB_DMA_STATUS_BUSY) { |
843 | /* "should not happen"; likely RXPKTRDY pending for DMA */ |
844 | musb_dbg(musb, fmt: "%s busy, csr %04x" , |
845 | musb_ep->end_point.name, csr); |
846 | return; |
847 | } |
848 | |
849 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { |
850 | csr &= ~(MUSB_RXCSR_AUTOCLEAR |
851 | | MUSB_RXCSR_DMAENAB |
852 | | MUSB_RXCSR_DMAMODE); |
853 | musb_writew(epio, MUSB_RXCSR, |
854 | MUSB_RXCSR_P_WZC_BITS | csr); |
855 | |
856 | request->actual += musb_ep->dma->actual_len; |
857 | |
858 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ |
859 | defined(CONFIG_USB_UX500_DMA) |
860 | /* Autoclear doesn't clear RxPktRdy for short packets */ |
861 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) |
862 | || (dma->actual_len |
863 | & (musb_ep->packet_sz - 1))) { |
864 | /* ack the read! */ |
865 | csr &= ~MUSB_RXCSR_RXPKTRDY; |
866 | musb_writew(epio, MUSB_RXCSR, csr); |
867 | } |
868 | |
869 | /* incomplete, and not short? wait for next IN packet */ |
870 | if ((request->actual < request->length) |
871 | && (musb_ep->dma->actual_len |
872 | == musb_ep->packet_sz)) { |
873 | /* In double buffer case, continue to unload fifo if |
874 | * there is Rx packet in FIFO. |
875 | **/ |
876 | csr = musb_readw(epio, MUSB_RXCSR); |
877 | if ((csr & MUSB_RXCSR_RXPKTRDY) && |
878 | hw_ep->rx_double_buffered) |
879 | goto exit; |
880 | return; |
881 | } |
882 | #endif |
883 | musb_g_giveback(ep: musb_ep, request, status: 0); |
884 | /* |
885 | * In the giveback function the MUSB lock is |
886 | * released and acquired after sometime. During |
887 | * this time period the INDEX register could get |
888 | * changed by the gadget_queue function especially |
889 | * on SMP systems. Reselect the INDEX to be sure |
890 | * we are reading/modifying the right registers |
891 | */ |
892 | musb_ep_select(mbase, epnum); |
893 | |
894 | req = next_request(ep: musb_ep); |
895 | if (!req) |
896 | return; |
897 | } |
898 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ |
899 | defined(CONFIG_USB_UX500_DMA) |
900 | exit: |
901 | #endif |
902 | /* Analyze request */ |
903 | rxstate(musb, req); |
904 | } |
905 | |
906 | /* ------------------------------------------------------------ */ |
907 | |
908 | static int musb_gadget_enable(struct usb_ep *ep, |
909 | const struct usb_endpoint_descriptor *desc) |
910 | { |
911 | unsigned long flags; |
912 | struct musb_ep *musb_ep; |
913 | struct musb_hw_ep *hw_ep; |
914 | void __iomem *regs; |
915 | struct musb *musb; |
916 | void __iomem *mbase; |
917 | u8 epnum; |
918 | u16 csr; |
919 | unsigned tmp; |
920 | int status = -EINVAL; |
921 | |
922 | if (!ep || !desc) |
923 | return -EINVAL; |
924 | |
925 | musb_ep = to_musb_ep(ep); |
926 | hw_ep = musb_ep->hw_ep; |
927 | regs = hw_ep->regs; |
928 | musb = musb_ep->musb; |
929 | mbase = musb->mregs; |
930 | epnum = musb_ep->current_epnum; |
931 | |
932 | spin_lock_irqsave(&musb->lock, flags); |
933 | |
934 | if (musb_ep->desc) { |
935 | status = -EBUSY; |
936 | goto fail; |
937 | } |
938 | musb_ep->type = usb_endpoint_type(epd: desc); |
939 | |
940 | /* check direction and (later) maxpacket size against endpoint */ |
941 | if (usb_endpoint_num(epd: desc) != epnum) |
942 | goto fail; |
943 | |
944 | /* REVISIT this rules out high bandwidth periodic transfers */ |
945 | tmp = usb_endpoint_maxp_mult(epd: desc) - 1; |
946 | if (tmp) { |
947 | int ok; |
948 | |
949 | if (usb_endpoint_dir_in(epd: desc)) |
950 | ok = musb->hb_iso_tx; |
951 | else |
952 | ok = musb->hb_iso_rx; |
953 | |
954 | if (!ok) { |
955 | musb_dbg(musb, fmt: "no support for high bandwidth ISO" ); |
956 | goto fail; |
957 | } |
958 | musb_ep->hb_mult = tmp; |
959 | } else { |
960 | musb_ep->hb_mult = 0; |
961 | } |
962 | |
963 | musb_ep->packet_sz = usb_endpoint_maxp(epd: desc); |
964 | tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); |
965 | |
966 | /* enable the interrupts for the endpoint, set the endpoint |
967 | * packet size (or fail), set the mode, clear the fifo |
968 | */ |
969 | musb_ep_select(mbase, epnum); |
970 | if (usb_endpoint_dir_in(epd: desc)) { |
971 | |
972 | if (hw_ep->is_shared_fifo) |
973 | musb_ep->is_in = 1; |
974 | if (!musb_ep->is_in) |
975 | goto fail; |
976 | |
977 | if (tmp > hw_ep->max_packet_sz_tx) { |
978 | musb_dbg(musb, fmt: "packet size beyond hardware FIFO size" ); |
979 | goto fail; |
980 | } |
981 | |
982 | musb->intrtxe |= (1 << epnum); |
983 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); |
984 | |
985 | /* REVISIT if can_bulk_split(), use by updating "tmp"; |
986 | * likewise high bandwidth periodic tx |
987 | */ |
988 | /* Set TXMAXP with the FIFO size of the endpoint |
989 | * to disable double buffering mode. |
990 | */ |
991 | if (can_bulk_split(musb, musb_ep->type)) |
992 | musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / |
993 | musb_ep->packet_sz) - 1; |
994 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz |
995 | | (musb_ep->hb_mult << 11)); |
996 | |
997 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
998 | if (musb_readw(regs, MUSB_TXCSR) |
999 | & MUSB_TXCSR_FIFONOTEMPTY) |
1000 | csr |= MUSB_TXCSR_FLUSHFIFO; |
1001 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) |
1002 | csr |= MUSB_TXCSR_P_ISO; |
1003 | |
1004 | /* set twice in case of double buffering */ |
1005 | musb_writew(regs, MUSB_TXCSR, csr); |
1006 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ |
1007 | musb_writew(regs, MUSB_TXCSR, csr); |
1008 | |
1009 | } else { |
1010 | |
1011 | if (hw_ep->is_shared_fifo) |
1012 | musb_ep->is_in = 0; |
1013 | if (musb_ep->is_in) |
1014 | goto fail; |
1015 | |
1016 | if (tmp > hw_ep->max_packet_sz_rx) { |
1017 | musb_dbg(musb, fmt: "packet size beyond hardware FIFO size" ); |
1018 | goto fail; |
1019 | } |
1020 | |
1021 | musb->intrrxe |= (1 << epnum); |
1022 | musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); |
1023 | |
1024 | /* REVISIT if can_bulk_combine() use by updating "tmp" |
1025 | * likewise high bandwidth periodic rx |
1026 | */ |
1027 | /* Set RXMAXP with the FIFO size of the endpoint |
1028 | * to disable double buffering mode. |
1029 | */ |
1030 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz |
1031 | | (musb_ep->hb_mult << 11)); |
1032 | |
1033 | /* force shared fifo to OUT-only mode */ |
1034 | if (hw_ep->is_shared_fifo) { |
1035 | csr = musb_readw(regs, MUSB_TXCSR); |
1036 | csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); |
1037 | musb_writew(regs, MUSB_TXCSR, csr); |
1038 | } |
1039 | |
1040 | csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; |
1041 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) |
1042 | csr |= MUSB_RXCSR_P_ISO; |
1043 | else if (musb_ep->type == USB_ENDPOINT_XFER_INT) |
1044 | csr |= MUSB_RXCSR_DISNYET; |
1045 | |
1046 | /* set twice in case of double buffering */ |
1047 | musb_writew(regs, MUSB_RXCSR, csr); |
1048 | musb_writew(regs, MUSB_RXCSR, csr); |
1049 | } |
1050 | |
1051 | /* NOTE: all the I/O code _should_ work fine without DMA, in case |
1052 | * for some reason you run out of channels here. |
1053 | */ |
1054 | if (is_dma_capable() && musb->dma_controller) { |
1055 | struct dma_controller *c = musb->dma_controller; |
1056 | |
1057 | musb_ep->dma = c->channel_alloc(c, hw_ep, |
1058 | (desc->bEndpointAddress & USB_DIR_IN)); |
1059 | } else |
1060 | musb_ep->dma = NULL; |
1061 | |
1062 | musb_ep->desc = desc; |
1063 | musb_ep->busy = 0; |
1064 | musb_ep->wedged = 0; |
1065 | status = 0; |
1066 | |
1067 | pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n" , |
1068 | musb_driver_name, musb_ep->end_point.name, |
1069 | musb_ep_xfertype_string(musb_ep->type), |
1070 | musb_ep->is_in ? "IN" : "OUT" , |
1071 | musb_ep->dma ? "dma, " : "" , |
1072 | musb_ep->packet_sz); |
1073 | |
1074 | schedule_delayed_work(dwork: &musb->irq_work, delay: 0); |
1075 | |
1076 | fail: |
1077 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1078 | return status; |
1079 | } |
1080 | |
1081 | /* |
1082 | * Disable an endpoint flushing all requests queued. |
1083 | */ |
1084 | static int musb_gadget_disable(struct usb_ep *ep) |
1085 | { |
1086 | unsigned long flags; |
1087 | struct musb *musb; |
1088 | u8 epnum; |
1089 | struct musb_ep *musb_ep; |
1090 | void __iomem *epio; |
1091 | |
1092 | musb_ep = to_musb_ep(ep); |
1093 | musb = musb_ep->musb; |
1094 | epnum = musb_ep->current_epnum; |
1095 | epio = musb->endpoints[epnum].regs; |
1096 | |
1097 | spin_lock_irqsave(&musb->lock, flags); |
1098 | musb_ep_select(musb->mregs, epnum); |
1099 | |
1100 | /* zero the endpoint sizes */ |
1101 | if (musb_ep->is_in) { |
1102 | musb->intrtxe &= ~(1 << epnum); |
1103 | musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); |
1104 | musb_writew(epio, MUSB_TXMAXP, 0); |
1105 | } else { |
1106 | musb->intrrxe &= ~(1 << epnum); |
1107 | musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); |
1108 | musb_writew(epio, MUSB_RXMAXP, 0); |
1109 | } |
1110 | |
1111 | /* abort all pending DMA and requests */ |
1112 | nuke(ep: musb_ep, status: -ESHUTDOWN); |
1113 | |
1114 | musb_ep->desc = NULL; |
1115 | musb_ep->end_point.desc = NULL; |
1116 | |
1117 | schedule_delayed_work(dwork: &musb->irq_work, delay: 0); |
1118 | |
1119 | spin_unlock_irqrestore(lock: &(musb->lock), flags); |
1120 | |
1121 | musb_dbg(musb, fmt: "%s" , musb_ep->end_point.name); |
1122 | |
1123 | return 0; |
1124 | } |
1125 | |
1126 | /* |
1127 | * Allocate a request for an endpoint. |
1128 | * Reused by ep0 code. |
1129 | */ |
1130 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) |
1131 | { |
1132 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1133 | struct musb_request *request; |
1134 | |
1135 | request = kzalloc(size: sizeof *request, flags: gfp_flags); |
1136 | if (!request) |
1137 | return NULL; |
1138 | |
1139 | request->request.dma = DMA_ADDR_INVALID; |
1140 | request->epnum = musb_ep->current_epnum; |
1141 | request->ep = musb_ep; |
1142 | |
1143 | trace_musb_req_alloc(req: request); |
1144 | return &request->request; |
1145 | } |
1146 | |
1147 | /* |
1148 | * Free a request |
1149 | * Reused by ep0 code. |
1150 | */ |
1151 | void musb_free_request(struct usb_ep *ep, struct usb_request *req) |
1152 | { |
1153 | struct musb_request *request = to_musb_request(req); |
1154 | |
1155 | trace_musb_req_free(req: request); |
1156 | kfree(objp: request); |
1157 | } |
1158 | |
1159 | static LIST_HEAD(buffers); |
1160 | |
1161 | struct free_record { |
1162 | struct list_head list; |
1163 | struct device *dev; |
1164 | unsigned bytes; |
1165 | dma_addr_t dma; |
1166 | }; |
1167 | |
1168 | /* |
1169 | * Context: controller locked, IRQs blocked. |
1170 | */ |
1171 | void musb_ep_restart(struct musb *musb, struct musb_request *req) |
1172 | { |
1173 | trace_musb_req_start(req); |
1174 | musb_ep_select(musb->mregs, req->epnum); |
1175 | if (req->tx) |
1176 | txstate(musb, req); |
1177 | else |
1178 | rxstate(musb, req); |
1179 | } |
1180 | |
1181 | static int musb_ep_restart_resume_work(struct musb *musb, void *data) |
1182 | { |
1183 | struct musb_request *req = data; |
1184 | |
1185 | musb_ep_restart(musb, req); |
1186 | |
1187 | return 0; |
1188 | } |
1189 | |
1190 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, |
1191 | gfp_t gfp_flags) |
1192 | { |
1193 | struct musb_ep *musb_ep; |
1194 | struct musb_request *request; |
1195 | struct musb *musb; |
1196 | int status; |
1197 | unsigned long lockflags; |
1198 | |
1199 | if (!ep || !req) |
1200 | return -EINVAL; |
1201 | if (!req->buf) |
1202 | return -ENODATA; |
1203 | |
1204 | musb_ep = to_musb_ep(ep); |
1205 | musb = musb_ep->musb; |
1206 | |
1207 | request = to_musb_request(req); |
1208 | request->musb = musb; |
1209 | |
1210 | if (request->ep != musb_ep) |
1211 | return -EINVAL; |
1212 | |
1213 | status = pm_runtime_get(dev: musb->controller); |
1214 | if ((status != -EINPROGRESS) && status < 0) { |
1215 | dev_err(musb->controller, |
1216 | "pm runtime get failed in %s\n" , |
1217 | __func__); |
1218 | pm_runtime_put_noidle(dev: musb->controller); |
1219 | |
1220 | return status; |
1221 | } |
1222 | status = 0; |
1223 | |
1224 | trace_musb_req_enq(req: request); |
1225 | |
1226 | /* request is mine now... */ |
1227 | request->request.actual = 0; |
1228 | request->request.status = -EINPROGRESS; |
1229 | request->epnum = musb_ep->current_epnum; |
1230 | request->tx = musb_ep->is_in; |
1231 | |
1232 | map_dma_buffer(request, musb, musb_ep); |
1233 | |
1234 | spin_lock_irqsave(&musb->lock, lockflags); |
1235 | |
1236 | /* don't queue if the ep is down */ |
1237 | if (!musb_ep->desc) { |
1238 | musb_dbg(musb, fmt: "req %p queued to %s while ep %s" , |
1239 | req, ep->name, "disabled" ); |
1240 | status = -ESHUTDOWN; |
1241 | unmap_dma_buffer(request, musb); |
1242 | goto unlock; |
1243 | } |
1244 | |
1245 | /* add request to the list */ |
1246 | list_add_tail(new: &request->list, head: &musb_ep->req_list); |
1247 | |
1248 | /* it this is the head of the queue, start i/o ... */ |
1249 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { |
1250 | status = musb_queue_resume_work(musb, |
1251 | callback: musb_ep_restart_resume_work, |
1252 | data: request); |
1253 | if (status < 0) { |
1254 | dev_err(musb->controller, "%s resume work: %i\n" , |
1255 | __func__, status); |
1256 | list_del(entry: &request->list); |
1257 | } |
1258 | } |
1259 | |
1260 | unlock: |
1261 | spin_unlock_irqrestore(lock: &musb->lock, flags: lockflags); |
1262 | pm_runtime_mark_last_busy(dev: musb->controller); |
1263 | pm_runtime_put_autosuspend(dev: musb->controller); |
1264 | |
1265 | return status; |
1266 | } |
1267 | |
1268 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) |
1269 | { |
1270 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1271 | struct musb_request *req = to_musb_request(request); |
1272 | struct musb_request *r; |
1273 | unsigned long flags; |
1274 | int status = 0; |
1275 | struct musb *musb = musb_ep->musb; |
1276 | |
1277 | if (!ep || !request || req->ep != musb_ep) |
1278 | return -EINVAL; |
1279 | |
1280 | trace_musb_req_deq(req); |
1281 | |
1282 | spin_lock_irqsave(&musb->lock, flags); |
1283 | |
1284 | list_for_each_entry(r, &musb_ep->req_list, list) { |
1285 | if (r == req) |
1286 | break; |
1287 | } |
1288 | if (r != req) { |
1289 | dev_err(musb->controller, "request %p not queued to %s\n" , |
1290 | request, ep->name); |
1291 | status = -EINVAL; |
1292 | goto done; |
1293 | } |
1294 | |
1295 | /* if the hardware doesn't have the request, easy ... */ |
1296 | if (musb_ep->req_list.next != &req->list || musb_ep->busy) |
1297 | musb_g_giveback(ep: musb_ep, request, status: -ECONNRESET); |
1298 | |
1299 | /* ... else abort the dma transfer ... */ |
1300 | else if (is_dma_capable() && musb_ep->dma) { |
1301 | struct dma_controller *c = musb->dma_controller; |
1302 | |
1303 | musb_ep_select(musb->mregs, musb_ep->current_epnum); |
1304 | if (c->channel_abort) |
1305 | status = c->channel_abort(musb_ep->dma); |
1306 | else |
1307 | status = -EBUSY; |
1308 | if (status == 0) |
1309 | musb_g_giveback(ep: musb_ep, request, status: -ECONNRESET); |
1310 | } else { |
1311 | /* NOTE: by sticking to easily tested hardware/driver states, |
1312 | * we leave counting of in-flight packets imprecise. |
1313 | */ |
1314 | musb_g_giveback(ep: musb_ep, request, status: -ECONNRESET); |
1315 | } |
1316 | |
1317 | done: |
1318 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1319 | return status; |
1320 | } |
1321 | |
1322 | /* |
1323 | * Set or clear the halt bit of an endpoint. A halted endpoint won't tx/rx any |
1324 | * data but will queue requests. |
1325 | * |
1326 | * exported to ep0 code |
1327 | */ |
1328 | static int musb_gadget_set_halt(struct usb_ep *ep, int value) |
1329 | { |
1330 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1331 | u8 epnum = musb_ep->current_epnum; |
1332 | struct musb *musb = musb_ep->musb; |
1333 | void __iomem *epio = musb->endpoints[epnum].regs; |
1334 | void __iomem *mbase; |
1335 | unsigned long flags; |
1336 | u16 csr; |
1337 | struct musb_request *request; |
1338 | int status = 0; |
1339 | |
1340 | if (!ep) |
1341 | return -EINVAL; |
1342 | mbase = musb->mregs; |
1343 | |
1344 | spin_lock_irqsave(&musb->lock, flags); |
1345 | |
1346 | if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { |
1347 | status = -EINVAL; |
1348 | goto done; |
1349 | } |
1350 | |
1351 | musb_ep_select(mbase, epnum); |
1352 | |
1353 | request = next_request(ep: musb_ep); |
1354 | if (value) { |
1355 | if (request) { |
1356 | musb_dbg(musb, fmt: "request in progress, cannot halt %s" , |
1357 | ep->name); |
1358 | status = -EAGAIN; |
1359 | goto done; |
1360 | } |
1361 | /* Cannot portably stall with non-empty FIFO */ |
1362 | if (musb_ep->is_in) { |
1363 | csr = musb_readw(epio, MUSB_TXCSR); |
1364 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1365 | musb_dbg(musb, fmt: "FIFO busy, cannot halt %s" , |
1366 | ep->name); |
1367 | status = -EAGAIN; |
1368 | goto done; |
1369 | } |
1370 | } |
1371 | } else |
1372 | musb_ep->wedged = 0; |
1373 | |
1374 | /* set/clear the stall and toggle bits */ |
1375 | musb_dbg(musb, fmt: "%s: %s stall" , ep->name, value ? "set" : "clear" ); |
1376 | if (musb_ep->is_in) { |
1377 | csr = musb_readw(epio, MUSB_TXCSR); |
1378 | csr |= MUSB_TXCSR_P_WZC_BITS |
1379 | | MUSB_TXCSR_CLRDATATOG; |
1380 | if (value) |
1381 | csr |= MUSB_TXCSR_P_SENDSTALL; |
1382 | else |
1383 | csr &= ~(MUSB_TXCSR_P_SENDSTALL |
1384 | | MUSB_TXCSR_P_SENTSTALL); |
1385 | csr &= ~MUSB_TXCSR_TXPKTRDY; |
1386 | musb_writew(epio, MUSB_TXCSR, csr); |
1387 | } else { |
1388 | csr = musb_readw(epio, MUSB_RXCSR); |
1389 | csr |= MUSB_RXCSR_P_WZC_BITS |
1390 | | MUSB_RXCSR_FLUSHFIFO |
1391 | | MUSB_RXCSR_CLRDATATOG; |
1392 | if (value) |
1393 | csr |= MUSB_RXCSR_P_SENDSTALL; |
1394 | else |
1395 | csr &= ~(MUSB_RXCSR_P_SENDSTALL |
1396 | | MUSB_RXCSR_P_SENTSTALL); |
1397 | musb_writew(epio, MUSB_RXCSR, csr); |
1398 | } |
1399 | |
1400 | /* maybe start the first request in the queue */ |
1401 | if (!musb_ep->busy && !value && request) { |
1402 | musb_dbg(musb, fmt: "restarting the request" ); |
1403 | musb_ep_restart(musb, req: request); |
1404 | } |
1405 | |
1406 | done: |
1407 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1408 | return status; |
1409 | } |
1410 | |
1411 | /* |
1412 | * Sets the halt feature with the clear requests ignored |
1413 | */ |
1414 | static int musb_gadget_set_wedge(struct usb_ep *ep) |
1415 | { |
1416 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1417 | |
1418 | if (!ep) |
1419 | return -EINVAL; |
1420 | |
1421 | musb_ep->wedged = 1; |
1422 | |
1423 | return usb_ep_set_halt(ep); |
1424 | } |
1425 | |
1426 | static int musb_gadget_fifo_status(struct usb_ep *ep) |
1427 | { |
1428 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1429 | void __iomem *epio = musb_ep->hw_ep->regs; |
1430 | int retval = -EINVAL; |
1431 | |
1432 | if (musb_ep->desc && !musb_ep->is_in) { |
1433 | struct musb *musb = musb_ep->musb; |
1434 | int epnum = musb_ep->current_epnum; |
1435 | void __iomem *mbase = musb->mregs; |
1436 | unsigned long flags; |
1437 | |
1438 | spin_lock_irqsave(&musb->lock, flags); |
1439 | |
1440 | musb_ep_select(mbase, epnum); |
1441 | /* FIXME return zero unless RXPKTRDY is set */ |
1442 | retval = musb_readw(epio, MUSB_RXCOUNT); |
1443 | |
1444 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1445 | } |
1446 | return retval; |
1447 | } |
1448 | |
1449 | static void musb_gadget_fifo_flush(struct usb_ep *ep) |
1450 | { |
1451 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1452 | struct musb *musb = musb_ep->musb; |
1453 | u8 epnum = musb_ep->current_epnum; |
1454 | void __iomem *epio = musb->endpoints[epnum].regs; |
1455 | void __iomem *mbase; |
1456 | unsigned long flags; |
1457 | u16 csr; |
1458 | |
1459 | mbase = musb->mregs; |
1460 | |
1461 | spin_lock_irqsave(&musb->lock, flags); |
1462 | musb_ep_select(mbase, (u8) epnum); |
1463 | |
1464 | /* disable interrupts */ |
1465 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); |
1466 | |
1467 | if (musb_ep->is_in) { |
1468 | csr = musb_readw(epio, MUSB_TXCSR); |
1469 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1470 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; |
1471 | /* |
1472 | * Setting both TXPKTRDY and FLUSHFIFO makes controller |
1473 | * to interrupt current FIFO loading, but not flushing |
1474 | * the already loaded ones. |
1475 | */ |
1476 | csr &= ~MUSB_TXCSR_TXPKTRDY; |
1477 | musb_writew(epio, MUSB_TXCSR, csr); |
1478 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ |
1479 | musb_writew(epio, MUSB_TXCSR, csr); |
1480 | } |
1481 | } else { |
1482 | csr = musb_readw(epio, MUSB_RXCSR); |
1483 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; |
1484 | musb_writew(epio, MUSB_RXCSR, csr); |
1485 | musb_writew(epio, MUSB_RXCSR, csr); |
1486 | } |
1487 | |
1488 | /* re-enable interrupt */ |
1489 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); |
1490 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1491 | } |
1492 | |
1493 | static const struct usb_ep_ops musb_ep_ops = { |
1494 | .enable = musb_gadget_enable, |
1495 | .disable = musb_gadget_disable, |
1496 | .alloc_request = musb_alloc_request, |
1497 | .free_request = musb_free_request, |
1498 | .queue = musb_gadget_queue, |
1499 | .dequeue = musb_gadget_dequeue, |
1500 | .set_halt = musb_gadget_set_halt, |
1501 | .set_wedge = musb_gadget_set_wedge, |
1502 | .fifo_status = musb_gadget_fifo_status, |
1503 | .fifo_flush = musb_gadget_fifo_flush |
1504 | }; |
1505 | |
1506 | /* ----------------------------------------------------------------------- */ |
1507 | |
1508 | static int musb_gadget_get_frame(struct usb_gadget *gadget) |
1509 | { |
1510 | struct musb *musb = gadget_to_musb(g: gadget); |
1511 | |
1512 | return (int)musb_readw(musb->mregs, MUSB_FRAME); |
1513 | } |
1514 | |
1515 | static int musb_gadget_wakeup(struct usb_gadget *gadget) |
1516 | { |
1517 | struct musb *musb = gadget_to_musb(g: gadget); |
1518 | void __iomem *mregs = musb->mregs; |
1519 | unsigned long flags; |
1520 | int status = -EINVAL; |
1521 | u8 power, devctl; |
1522 | int retries; |
1523 | |
1524 | spin_lock_irqsave(&musb->lock, flags); |
1525 | |
1526 | switch (musb_get_state(musb)) { |
1527 | case OTG_STATE_B_PERIPHERAL: |
1528 | /* NOTE: OTG state machine doesn't include B_SUSPENDED; |
1529 | * that's part of the standard usb 1.1 state machine, and |
1530 | * doesn't affect OTG transitions. |
1531 | */ |
1532 | if (musb->may_wakeup && musb->is_suspended) |
1533 | break; |
1534 | goto done; |
1535 | case OTG_STATE_B_IDLE: |
1536 | /* Start SRP ... OTG not required. */ |
1537 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1538 | musb_dbg(musb, fmt: "Sending SRP: devctl: %02x" , devctl); |
1539 | devctl |= MUSB_DEVCTL_SESSION; |
1540 | musb_writeb(mregs, MUSB_DEVCTL, devctl); |
1541 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1542 | retries = 100; |
1543 | while (!(devctl & MUSB_DEVCTL_SESSION)) { |
1544 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1545 | if (retries-- < 1) |
1546 | break; |
1547 | } |
1548 | retries = 10000; |
1549 | while (devctl & MUSB_DEVCTL_SESSION) { |
1550 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1551 | if (retries-- < 1) |
1552 | break; |
1553 | } |
1554 | |
1555 | if (musb->xceiv) { |
1556 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1557 | otg_start_srp(otg: musb->xceiv->otg); |
1558 | spin_lock_irqsave(&musb->lock, flags); |
1559 | } |
1560 | |
1561 | /* Block idling for at least 1s */ |
1562 | musb_platform_try_idle(musb, |
1563 | timeout: jiffies + msecs_to_jiffies(m: 1 * HZ)); |
1564 | |
1565 | status = 0; |
1566 | goto done; |
1567 | default: |
1568 | musb_dbg(musb, fmt: "Unhandled wake: %s" , |
1569 | musb_otg_state_string(musb)); |
1570 | goto done; |
1571 | } |
1572 | |
1573 | status = 0; |
1574 | |
1575 | power = musb_readb(mregs, MUSB_POWER); |
1576 | power |= MUSB_POWER_RESUME; |
1577 | musb_writeb(mregs, MUSB_POWER, power); |
1578 | musb_dbg(musb, fmt: "issue wakeup" ); |
1579 | |
1580 | /* FIXME do this next chunk in a timer callback, no udelay */ |
1581 | mdelay(2); |
1582 | |
1583 | power = musb_readb(mregs, MUSB_POWER); |
1584 | power &= ~MUSB_POWER_RESUME; |
1585 | musb_writeb(mregs, MUSB_POWER, power); |
1586 | done: |
1587 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1588 | return status; |
1589 | } |
1590 | |
1591 | static int |
1592 | musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) |
1593 | { |
1594 | gadget->is_selfpowered = !!is_selfpowered; |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static void musb_pullup(struct musb *musb, int is_on) |
1599 | { |
1600 | u8 power; |
1601 | |
1602 | power = musb_readb(musb->mregs, MUSB_POWER); |
1603 | if (is_on) |
1604 | power |= MUSB_POWER_SOFTCONN; |
1605 | else |
1606 | power &= ~MUSB_POWER_SOFTCONN; |
1607 | |
1608 | /* FIXME if on, HdrcStart; if off, HdrcStop */ |
1609 | |
1610 | musb_dbg(musb, fmt: "gadget D+ pullup %s" , |
1611 | is_on ? "on" : "off" ); |
1612 | musb_writeb(musb->mregs, MUSB_POWER, power); |
1613 | } |
1614 | |
1615 | #if 0 |
1616 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) |
1617 | { |
1618 | musb_dbg(musb, "<= %s =>\n" , __func__); |
1619 | |
1620 | /* |
1621 | * FIXME iff driver's softconnect flag is set (as it is during probe, |
1622 | * though that can clear it), just musb_pullup(). |
1623 | */ |
1624 | |
1625 | return -EINVAL; |
1626 | } |
1627 | #endif |
1628 | |
1629 | static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) |
1630 | { |
1631 | struct musb *musb = gadget_to_musb(g: gadget); |
1632 | |
1633 | return usb_phy_set_power(x: musb->xceiv, mA); |
1634 | } |
1635 | |
1636 | static void musb_gadget_work(struct work_struct *work) |
1637 | { |
1638 | struct musb *musb; |
1639 | unsigned long flags; |
1640 | |
1641 | musb = container_of(work, struct musb, gadget_work.work); |
1642 | pm_runtime_get_sync(dev: musb->controller); |
1643 | spin_lock_irqsave(&musb->lock, flags); |
1644 | musb_pullup(musb, is_on: musb->softconnect); |
1645 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1646 | pm_runtime_mark_last_busy(dev: musb->controller); |
1647 | pm_runtime_put_autosuspend(dev: musb->controller); |
1648 | } |
1649 | |
1650 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) |
1651 | { |
1652 | struct musb *musb = gadget_to_musb(g: gadget); |
1653 | unsigned long flags; |
1654 | |
1655 | is_on = !!is_on; |
1656 | |
1657 | /* NOTE: this assumes we are sensing vbus; we'd rather |
1658 | * not pullup unless the B-session is active. |
1659 | */ |
1660 | spin_lock_irqsave(&musb->lock, flags); |
1661 | if (is_on != musb->softconnect) { |
1662 | musb->softconnect = is_on; |
1663 | schedule_delayed_work(dwork: &musb->gadget_work, delay: 0); |
1664 | } |
1665 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1666 | |
1667 | return 0; |
1668 | } |
1669 | |
1670 | static int musb_gadget_start(struct usb_gadget *g, |
1671 | struct usb_gadget_driver *driver); |
1672 | static int musb_gadget_stop(struct usb_gadget *g); |
1673 | |
1674 | static const struct usb_gadget_ops musb_gadget_operations = { |
1675 | .get_frame = musb_gadget_get_frame, |
1676 | .wakeup = musb_gadget_wakeup, |
1677 | .set_selfpowered = musb_gadget_set_self_powered, |
1678 | /* .vbus_session = musb_gadget_vbus_session, */ |
1679 | .vbus_draw = musb_gadget_vbus_draw, |
1680 | .pullup = musb_gadget_pullup, |
1681 | .udc_start = musb_gadget_start, |
1682 | .udc_stop = musb_gadget_stop, |
1683 | }; |
1684 | |
1685 | /* ----------------------------------------------------------------------- */ |
1686 | |
1687 | /* Registration */ |
1688 | |
1689 | /* Only this registration code "knows" the rule (from USB standards) |
1690 | * about there being only one external upstream port. It assumes |
1691 | * all peripheral ports are external... |
1692 | */ |
1693 | |
1694 | static void |
1695 | init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) |
1696 | { |
1697 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1698 | |
1699 | memset(ep, 0, sizeof *ep); |
1700 | |
1701 | ep->current_epnum = epnum; |
1702 | ep->musb = musb; |
1703 | ep->hw_ep = hw_ep; |
1704 | ep->is_in = is_in; |
1705 | |
1706 | INIT_LIST_HEAD(list: &ep->req_list); |
1707 | |
1708 | sprintf(buf: ep->name, fmt: "ep%d%s" , epnum, |
1709 | (!epnum || hw_ep->is_shared_fifo) ? "" : ( |
1710 | is_in ? "in" : "out" )); |
1711 | ep->end_point.name = ep->name; |
1712 | INIT_LIST_HEAD(list: &ep->end_point.ep_list); |
1713 | if (!epnum) { |
1714 | usb_ep_set_maxpacket_limit(ep: &ep->end_point, maxpacket_limit: 64); |
1715 | ep->end_point.caps.type_control = true; |
1716 | ep->end_point.ops = &musb_g_ep0_ops; |
1717 | musb->g.ep0 = &ep->end_point; |
1718 | } else { |
1719 | if (is_in) |
1720 | usb_ep_set_maxpacket_limit(ep: &ep->end_point, maxpacket_limit: hw_ep->max_packet_sz_tx); |
1721 | else |
1722 | usb_ep_set_maxpacket_limit(ep: &ep->end_point, maxpacket_limit: hw_ep->max_packet_sz_rx); |
1723 | ep->end_point.caps.type_iso = true; |
1724 | ep->end_point.caps.type_bulk = true; |
1725 | ep->end_point.caps.type_int = true; |
1726 | ep->end_point.ops = &musb_ep_ops; |
1727 | list_add_tail(new: &ep->end_point.ep_list, head: &musb->g.ep_list); |
1728 | } |
1729 | |
1730 | if (!epnum || hw_ep->is_shared_fifo) { |
1731 | ep->end_point.caps.dir_in = true; |
1732 | ep->end_point.caps.dir_out = true; |
1733 | } else if (is_in) |
1734 | ep->end_point.caps.dir_in = true; |
1735 | else |
1736 | ep->end_point.caps.dir_out = true; |
1737 | } |
1738 | |
1739 | /* |
1740 | * Initialize the endpoints exposed to peripheral drivers, with backlinks |
1741 | * to the rest of the driver state. |
1742 | */ |
1743 | static inline void musb_g_init_endpoints(struct musb *musb) |
1744 | { |
1745 | u8 epnum; |
1746 | struct musb_hw_ep *hw_ep; |
1747 | unsigned count = 0; |
1748 | |
1749 | /* initialize endpoint list just once */ |
1750 | INIT_LIST_HEAD(list: &(musb->g.ep_list)); |
1751 | |
1752 | for (epnum = 0, hw_ep = musb->endpoints; |
1753 | epnum < musb->nr_endpoints; |
1754 | epnum++, hw_ep++) { |
1755 | if (hw_ep->is_shared_fifo /* || !epnum */) { |
1756 | init_peripheral_ep(musb, ep: &hw_ep->ep_in, epnum, is_in: 0); |
1757 | count++; |
1758 | } else { |
1759 | if (hw_ep->max_packet_sz_tx) { |
1760 | init_peripheral_ep(musb, ep: &hw_ep->ep_in, |
1761 | epnum, is_in: 1); |
1762 | count++; |
1763 | } |
1764 | if (hw_ep->max_packet_sz_rx) { |
1765 | init_peripheral_ep(musb, ep: &hw_ep->ep_out, |
1766 | epnum, is_in: 0); |
1767 | count++; |
1768 | } |
1769 | } |
1770 | } |
1771 | } |
1772 | |
1773 | /* called once during driver setup to initialize and link into |
1774 | * the driver model; memory is zeroed. |
1775 | */ |
1776 | int musb_gadget_setup(struct musb *musb) |
1777 | { |
1778 | int status; |
1779 | |
1780 | /* REVISIT minor race: if (erroneously) setting up two |
1781 | * musb peripherals at the same time, only the bus lock |
1782 | * is probably held. |
1783 | */ |
1784 | |
1785 | musb->g.ops = &musb_gadget_operations; |
1786 | musb->g.max_speed = USB_SPEED_HIGH; |
1787 | musb->g.speed = USB_SPEED_UNKNOWN; |
1788 | |
1789 | MUSB_DEV_MODE(musb); |
1790 | musb_set_state(musb, otg_state: OTG_STATE_B_IDLE); |
1791 | |
1792 | /* this "gadget" abstracts/virtualizes the controller */ |
1793 | musb->g.name = musb_driver_name; |
1794 | /* don't support otg protocols */ |
1795 | musb->g.is_otg = 0; |
1796 | INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); |
1797 | musb_g_init_endpoints(musb); |
1798 | |
1799 | musb->is_active = 0; |
1800 | musb_platform_try_idle(musb, timeout: 0); |
1801 | |
1802 | status = usb_add_gadget_udc(parent: musb->controller, gadget: &musb->g); |
1803 | if (status) |
1804 | goto err; |
1805 | |
1806 | return 0; |
1807 | err: |
1808 | musb->g.dev.parent = NULL; |
1809 | device_unregister(dev: &musb->g.dev); |
1810 | return status; |
1811 | } |
1812 | |
1813 | void musb_gadget_cleanup(struct musb *musb) |
1814 | { |
1815 | if (musb->port_mode == MUSB_HOST) |
1816 | return; |
1817 | |
1818 | cancel_delayed_work_sync(dwork: &musb->gadget_work); |
1819 | usb_del_gadget_udc(gadget: &musb->g); |
1820 | } |
1821 | |
1822 | /* |
1823 | * Register the gadget driver. Used by gadget drivers when |
1824 | * registering themselves with the controller. |
1825 | * |
1826 | * -EINVAL something went wrong (not driver) |
1827 | * -EBUSY another gadget is already using the controller |
1828 | * -ENOMEM no memory to perform the operation |
1829 | * |
1830 | * @param driver the gadget driver |
1831 | * @return <0 if error, 0 if everything is fine |
1832 | */ |
1833 | static int musb_gadget_start(struct usb_gadget *g, |
1834 | struct usb_gadget_driver *driver) |
1835 | { |
1836 | struct musb *musb = gadget_to_musb(g); |
1837 | unsigned long flags; |
1838 | int retval = 0; |
1839 | |
1840 | if (driver->max_speed < USB_SPEED_HIGH) { |
1841 | retval = -EINVAL; |
1842 | goto err; |
1843 | } |
1844 | |
1845 | pm_runtime_get_sync(dev: musb->controller); |
1846 | |
1847 | musb->softconnect = 0; |
1848 | musb->gadget_driver = driver; |
1849 | |
1850 | spin_lock_irqsave(&musb->lock, flags); |
1851 | musb->is_active = 1; |
1852 | |
1853 | if (musb->xceiv) |
1854 | otg_set_peripheral(otg: musb->xceiv->otg, periph: &musb->g); |
1855 | else |
1856 | phy_set_mode(musb->phy, PHY_MODE_USB_DEVICE); |
1857 | |
1858 | musb_set_state(musb, otg_state: OTG_STATE_B_IDLE); |
1859 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1860 | |
1861 | musb_start(musb); |
1862 | |
1863 | /* REVISIT: funcall to other code, which also |
1864 | * handles power budgeting ... this way also |
1865 | * ensures HdrcStart is indirectly called. |
1866 | */ |
1867 | if (musb->xceiv && musb->xceiv->last_event == USB_EVENT_ID) |
1868 | musb_platform_set_vbus(musb, is_on: 1); |
1869 | |
1870 | pm_runtime_mark_last_busy(dev: musb->controller); |
1871 | pm_runtime_put_autosuspend(dev: musb->controller); |
1872 | |
1873 | return 0; |
1874 | |
1875 | err: |
1876 | return retval; |
1877 | } |
1878 | |
1879 | /* |
1880 | * Unregister the gadget driver. Used by gadget drivers when |
1881 | * unregistering themselves from the controller. |
1882 | * |
1883 | * @param driver the gadget driver to unregister |
1884 | */ |
1885 | static int musb_gadget_stop(struct usb_gadget *g) |
1886 | { |
1887 | struct musb *musb = gadget_to_musb(g); |
1888 | unsigned long flags; |
1889 | |
1890 | pm_runtime_get_sync(dev: musb->controller); |
1891 | |
1892 | /* |
1893 | * REVISIT always use otg_set_peripheral() here too; |
1894 | * this needs to shut down the OTG engine. |
1895 | */ |
1896 | |
1897 | spin_lock_irqsave(&musb->lock, flags); |
1898 | |
1899 | musb_hnp_stop(musb); |
1900 | |
1901 | (void) musb_gadget_vbus_draw(gadget: &musb->g, mA: 0); |
1902 | |
1903 | musb_set_state(musb, otg_state: OTG_STATE_UNDEFINED); |
1904 | musb_stop(musb); |
1905 | |
1906 | if (musb->xceiv) |
1907 | otg_set_peripheral(otg: musb->xceiv->otg, NULL); |
1908 | else |
1909 | phy_set_mode(musb->phy, PHY_MODE_INVALID); |
1910 | |
1911 | musb->is_active = 0; |
1912 | musb->gadget_driver = NULL; |
1913 | musb_platform_try_idle(musb, timeout: 0); |
1914 | spin_unlock_irqrestore(lock: &musb->lock, flags); |
1915 | |
1916 | /* |
1917 | * FIXME we need to be able to register another |
1918 | * gadget driver here and have everything work; |
1919 | * that currently misbehaves. |
1920 | */ |
1921 | |
1922 | /* Force check of devctl register for PM runtime */ |
1923 | pm_runtime_mark_last_busy(dev: musb->controller); |
1924 | pm_runtime_put_autosuspend(dev: musb->controller); |
1925 | |
1926 | return 0; |
1927 | } |
1928 | |
1929 | /* ----------------------------------------------------------------------- */ |
1930 | |
1931 | /* lifecycle operations called through plat_uds.c */ |
1932 | |
1933 | void musb_g_resume(struct musb *musb) |
1934 | { |
1935 | musb->is_suspended = 0; |
1936 | switch (musb_get_state(musb)) { |
1937 | case OTG_STATE_B_IDLE: |
1938 | break; |
1939 | case OTG_STATE_B_WAIT_ACON: |
1940 | case OTG_STATE_B_PERIPHERAL: |
1941 | musb->is_active = 1; |
1942 | if (musb->gadget_driver && musb->gadget_driver->resume) { |
1943 | spin_unlock(lock: &musb->lock); |
1944 | musb->gadget_driver->resume(&musb->g); |
1945 | spin_lock(lock: &musb->lock); |
1946 | } |
1947 | break; |
1948 | default: |
1949 | WARNING("unhandled RESUME transition (%s)\n" , |
1950 | musb_otg_state_string(musb)); |
1951 | } |
1952 | } |
1953 | |
1954 | /* called when SOF packets stop for 3+ msec */ |
1955 | void musb_g_suspend(struct musb *musb) |
1956 | { |
1957 | u8 devctl; |
1958 | |
1959 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
1960 | musb_dbg(musb, fmt: "musb_g_suspend: devctl %02x" , devctl); |
1961 | |
1962 | switch (musb_get_state(musb)) { |
1963 | case OTG_STATE_B_IDLE: |
1964 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) |
1965 | musb_set_state(musb, otg_state: OTG_STATE_B_PERIPHERAL); |
1966 | break; |
1967 | case OTG_STATE_B_PERIPHERAL: |
1968 | musb->is_suspended = 1; |
1969 | if (musb->gadget_driver && musb->gadget_driver->suspend) { |
1970 | spin_unlock(lock: &musb->lock); |
1971 | musb->gadget_driver->suspend(&musb->g); |
1972 | spin_lock(lock: &musb->lock); |
1973 | } |
1974 | break; |
1975 | default: |
1976 | /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; |
1977 | * A_PERIPHERAL may need care too |
1978 | */ |
1979 | WARNING("unhandled SUSPEND transition (%s)" , |
1980 | musb_otg_state_string(musb)); |
1981 | } |
1982 | } |
1983 | |
1984 | /* Called during SRP */ |
1985 | void musb_g_wakeup(struct musb *musb) |
1986 | { |
1987 | musb_gadget_wakeup(gadget: &musb->g); |
1988 | } |
1989 | |
1990 | /* called when VBUS drops below session threshold, and in other cases */ |
1991 | void musb_g_disconnect(struct musb *musb) |
1992 | { |
1993 | void __iomem *mregs = musb->mregs; |
1994 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); |
1995 | |
1996 | musb_dbg(musb, fmt: "musb_g_disconnect: devctl %02x" , devctl); |
1997 | |
1998 | /* clear HR */ |
1999 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); |
2000 | |
2001 | /* don't draw vbus until new b-default session */ |
2002 | (void) musb_gadget_vbus_draw(gadget: &musb->g, mA: 0); |
2003 | |
2004 | musb->g.speed = USB_SPEED_UNKNOWN; |
2005 | if (musb->gadget_driver && musb->gadget_driver->disconnect) { |
2006 | spin_unlock(lock: &musb->lock); |
2007 | musb->gadget_driver->disconnect(&musb->g); |
2008 | spin_lock(lock: &musb->lock); |
2009 | } |
2010 | |
2011 | switch (musb_get_state(musb)) { |
2012 | default: |
2013 | musb_dbg(musb, fmt: "Unhandled disconnect %s, setting a_idle" , |
2014 | musb_otg_state_string(musb)); |
2015 | musb_set_state(musb, otg_state: OTG_STATE_A_IDLE); |
2016 | MUSB_HST_MODE(musb); |
2017 | break; |
2018 | case OTG_STATE_A_PERIPHERAL: |
2019 | musb_set_state(musb, otg_state: OTG_STATE_A_WAIT_BCON); |
2020 | MUSB_HST_MODE(musb); |
2021 | break; |
2022 | case OTG_STATE_B_WAIT_ACON: |
2023 | case OTG_STATE_B_HOST: |
2024 | case OTG_STATE_B_PERIPHERAL: |
2025 | case OTG_STATE_B_IDLE: |
2026 | musb_set_state(musb, otg_state: OTG_STATE_B_IDLE); |
2027 | break; |
2028 | case OTG_STATE_B_SRP_INIT: |
2029 | break; |
2030 | } |
2031 | |
2032 | musb->is_active = 0; |
2033 | } |
2034 | |
2035 | void musb_g_reset(struct musb *musb) |
2036 | __releases(musb->lock) |
2037 | __acquires(musb->lock) |
2038 | { |
2039 | void __iomem *mbase = musb->mregs; |
2040 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); |
2041 | u8 power; |
2042 | |
2043 | musb_dbg(musb, fmt: "<== %s driver '%s'" , |
2044 | (devctl & MUSB_DEVCTL_BDEVICE) |
2045 | ? "B-Device" : "A-Device" , |
2046 | musb->gadget_driver |
2047 | ? musb->gadget_driver->driver.name |
2048 | : NULL |
2049 | ); |
2050 | |
2051 | /* report reset, if we didn't already (flushing EP state) */ |
2052 | if (musb->gadget_driver && musb->g.speed != USB_SPEED_UNKNOWN) { |
2053 | spin_unlock(lock: &musb->lock); |
2054 | usb_gadget_udc_reset(gadget: &musb->g, driver: musb->gadget_driver); |
2055 | spin_lock(lock: &musb->lock); |
2056 | } |
2057 | |
2058 | /* clear HR */ |
2059 | else if (devctl & MUSB_DEVCTL_HR) |
2060 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); |
2061 | |
2062 | |
2063 | /* what speed did we negotiate? */ |
2064 | power = musb_readb(mbase, MUSB_POWER); |
2065 | musb->g.speed = (power & MUSB_POWER_HSMODE) |
2066 | ? USB_SPEED_HIGH : USB_SPEED_FULL; |
2067 | |
2068 | /* start in USB_STATE_DEFAULT */ |
2069 | musb->is_active = 1; |
2070 | musb->is_suspended = 0; |
2071 | MUSB_DEV_MODE(musb); |
2072 | musb->address = 0; |
2073 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; |
2074 | |
2075 | musb->may_wakeup = 0; |
2076 | musb->g.b_hnp_enable = 0; |
2077 | musb->g.a_alt_hnp_support = 0; |
2078 | musb->g.a_hnp_support = 0; |
2079 | musb->g.quirk_zlp_not_supp = 1; |
2080 | |
2081 | /* Normal reset, as B-Device; |
2082 | * or else after HNP, as A-Device |
2083 | */ |
2084 | if (!musb->g.is_otg) { |
2085 | /* USB device controllers that are not OTG compatible |
2086 | * may not have DEVCTL register in silicon. |
2087 | * In that case, do not rely on devctl for setting |
2088 | * peripheral mode. |
2089 | */ |
2090 | musb_set_state(musb, otg_state: OTG_STATE_B_PERIPHERAL); |
2091 | musb->g.is_a_peripheral = 0; |
2092 | } else if (devctl & MUSB_DEVCTL_BDEVICE) { |
2093 | musb_set_state(musb, otg_state: OTG_STATE_B_PERIPHERAL); |
2094 | musb->g.is_a_peripheral = 0; |
2095 | } else { |
2096 | musb_set_state(musb, otg_state: OTG_STATE_A_PERIPHERAL); |
2097 | musb->g.is_a_peripheral = 1; |
2098 | } |
2099 | |
2100 | /* start with default limits on VBUS power draw */ |
2101 | (void) musb_gadget_vbus_draw(gadget: &musb->g, mA: 8); |
2102 | } |
2103 | |