1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget |
4 | * |
5 | * epn.c - Generic endpoints management |
6 | * |
7 | * Copyright 2017 IBM Corporation |
8 | */ |
9 | |
10 | #include <linux/kernel.h> |
11 | #include <linux/module.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/ioport.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/list.h> |
18 | #include <linux/interrupt.h> |
19 | #include <linux/proc_fs.h> |
20 | #include <linux/prefetch.h> |
21 | #include <linux/clk.h> |
22 | #include <linux/usb/gadget.h> |
23 | #include <linux/of.h> |
24 | #include <linux/regmap.h> |
25 | #include <linux/dma-mapping.h> |
26 | |
27 | #include "vhub.h" |
28 | |
29 | #define |
30 | |
31 | #ifdef EXTRA_CHECKS |
32 | #define CHECK(ep, expr, fmt...) \ |
33 | do { \ |
34 | if (!(expr)) EPDBG(ep, "CHECK:" fmt); \ |
35 | } while(0) |
36 | #else |
37 | #define CHECK(ep, expr, fmt...) do { } while(0) |
38 | #endif |
39 | |
40 | static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req) |
41 | { |
42 | unsigned int act = req->req.actual; |
43 | unsigned int len = req->req.length; |
44 | unsigned int chunk; |
45 | |
46 | /* There should be no DMA ongoing */ |
47 | WARN_ON(req->active); |
48 | |
49 | /* Calculate next chunk size */ |
50 | chunk = len - act; |
51 | if (chunk > ep->ep.maxpacket) |
52 | chunk = ep->ep.maxpacket; |
53 | else if ((chunk < ep->ep.maxpacket) || !req->req.zero) |
54 | req->last_desc = 1; |
55 | |
56 | EPVDBG(ep, "kick req %p act=%d/%d chunk=%d last=%d\n" , |
57 | req, act, len, chunk, req->last_desc); |
58 | |
59 | /* If DMA unavailable, using staging EP buffer */ |
60 | if (!req->req.dma) { |
61 | |
62 | /* For IN transfers, copy data over first */ |
63 | if (ep->epn.is_in) { |
64 | memcpy(ep->buf, req->req.buf + act, chunk); |
65 | vhub_dma_workaround(addr: ep->buf); |
66 | } |
67 | writel(val: ep->buf_dma, addr: ep->epn.regs + AST_VHUB_EP_DESC_BASE); |
68 | } else { |
69 | if (ep->epn.is_in) |
70 | vhub_dma_workaround(addr: req->req.buf); |
71 | writel(val: req->req.dma + act, addr: ep->epn.regs + AST_VHUB_EP_DESC_BASE); |
72 | } |
73 | |
74 | /* Start DMA */ |
75 | req->active = true; |
76 | writel(VHUB_EP_DMA_SET_TX_SIZE(chunk), |
77 | addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
78 | writel(VHUB_EP_DMA_SET_TX_SIZE(chunk) | VHUB_EP_DMA_SINGLE_KICK, |
79 | addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
80 | } |
81 | |
82 | static void ast_vhub_epn_handle_ack(struct ast_vhub_ep *ep) |
83 | { |
84 | struct ast_vhub_req *req; |
85 | unsigned int len; |
86 | int status = 0; |
87 | u32 stat; |
88 | |
89 | /* Read EP status */ |
90 | stat = readl(addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
91 | |
92 | /* Grab current request if any */ |
93 | req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); |
94 | |
95 | EPVDBG(ep, "ACK status=%08x is_in=%d, req=%p (active=%d)\n" , |
96 | stat, ep->epn.is_in, req, req ? req->active : 0); |
97 | |
98 | /* In absence of a request, bail out, must have been dequeued */ |
99 | if (!req) |
100 | return; |
101 | |
102 | /* |
103 | * Request not active, move on to processing queue, active request |
104 | * was probably dequeued |
105 | */ |
106 | if (!req->active) |
107 | goto next_chunk; |
108 | |
109 | /* Check if HW has moved on */ |
110 | if (VHUB_EP_DMA_RPTR(stat) != 0) { |
111 | EPDBG(ep, "DMA read pointer not 0 !\n" ); |
112 | return; |
113 | } |
114 | |
115 | /* No current DMA ongoing */ |
116 | req->active = false; |
117 | |
118 | /* Grab length out of HW */ |
119 | len = VHUB_EP_DMA_TX_SIZE(stat); |
120 | |
121 | /* If not using DMA, copy data out if needed */ |
122 | if (!req->req.dma && !ep->epn.is_in && len) { |
123 | if (req->req.actual + len > req->req.length) { |
124 | req->last_desc = 1; |
125 | status = -EOVERFLOW; |
126 | goto done; |
127 | } else { |
128 | memcpy(req->req.buf + req->req.actual, ep->buf, len); |
129 | } |
130 | } |
131 | /* Adjust size */ |
132 | req->req.actual += len; |
133 | |
134 | /* Check for short packet */ |
135 | if (len < ep->ep.maxpacket) |
136 | req->last_desc = 1; |
137 | |
138 | done: |
139 | /* That's it ? complete the request and pick a new one */ |
140 | if (req->last_desc >= 0) { |
141 | ast_vhub_done(ep, req, status); |
142 | req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, |
143 | queue); |
144 | |
145 | /* |
146 | * Due to lock dropping inside "done" the next request could |
147 | * already be active, so check for that and bail if needed. |
148 | */ |
149 | if (!req || req->active) |
150 | return; |
151 | } |
152 | |
153 | next_chunk: |
154 | ast_vhub_epn_kick(ep, req); |
155 | } |
156 | |
157 | static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep) |
158 | { |
159 | /* |
160 | * d_next == d_last means descriptor list empty to HW, |
161 | * thus we can only have AST_VHUB_DESCS_COUNT-1 descriptors |
162 | * in the list |
163 | */ |
164 | return (ep->epn.d_last + AST_VHUB_DESCS_COUNT - ep->epn.d_next - 1) & |
165 | (AST_VHUB_DESCS_COUNT - 1); |
166 | } |
167 | |
168 | static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, |
169 | struct ast_vhub_req *req) |
170 | { |
171 | struct ast_vhub_desc *desc = NULL; |
172 | unsigned int act = req->act_count; |
173 | unsigned int len = req->req.length; |
174 | unsigned int chunk; |
175 | |
176 | /* Mark request active if not already */ |
177 | req->active = true; |
178 | |
179 | /* If the request was already completely written, do nothing */ |
180 | if (req->last_desc >= 0) |
181 | return; |
182 | |
183 | EPVDBG(ep, "kick act=%d/%d chunk_max=%d free_descs=%d\n" , |
184 | act, len, ep->epn.chunk_max, ast_vhub_count_free_descs(ep)); |
185 | |
186 | /* While we can create descriptors */ |
187 | while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { |
188 | unsigned int d_num; |
189 | |
190 | /* Grab next free descriptor */ |
191 | d_num = ep->epn.d_next; |
192 | desc = &ep->epn.descs[d_num]; |
193 | ep->epn.d_next = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); |
194 | |
195 | /* Calculate next chunk size */ |
196 | chunk = len - act; |
197 | if (chunk <= ep->epn.chunk_max) { |
198 | /* |
199 | * Is this the last packet ? Because of having up to 8 |
200 | * packets in a descriptor we can't just compare "chunk" |
201 | * with ep.maxpacket. We have to see if it's a multiple |
202 | * of it to know if we have to send a zero packet. |
203 | * Sadly that involves a modulo which is a bit expensive |
204 | * but probably still better than not doing it. |
205 | */ |
206 | if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0) |
207 | req->last_desc = d_num; |
208 | } else { |
209 | chunk = ep->epn.chunk_max; |
210 | } |
211 | |
212 | EPVDBG(ep, " chunk: act=%d/%d chunk=%d last=%d desc=%d free=%d\n" , |
213 | act, len, chunk, req->last_desc, d_num, |
214 | ast_vhub_count_free_descs(ep)); |
215 | |
216 | /* Populate descriptor */ |
217 | desc->w0 = cpu_to_le32(req->req.dma + act); |
218 | |
219 | /* Interrupt if end of request or no more descriptors */ |
220 | |
221 | /* |
222 | * TODO: Be smarter about it, if we don't have enough |
223 | * descriptors request an interrupt before queue empty |
224 | * or so in order to be able to populate more before |
225 | * the HW runs out. This isn't a problem at the moment |
226 | * as we use 256 descriptors and only put at most one |
227 | * request in the ring. |
228 | */ |
229 | desc->w1 = cpu_to_le32(VHUB_DSC1_IN_SET_LEN(chunk)); |
230 | if (req->last_desc >= 0 || !ast_vhub_count_free_descs(ep)) |
231 | desc->w1 |= cpu_to_le32(VHUB_DSC1_IN_INTERRUPT); |
232 | |
233 | /* Account packet */ |
234 | req->act_count = act = act + chunk; |
235 | } |
236 | |
237 | if (likely(desc)) |
238 | vhub_dma_workaround(addr: desc); |
239 | |
240 | /* Tell HW about new descriptors */ |
241 | writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), |
242 | addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
243 | |
244 | EPVDBG(ep, "HW kicked, d_next=%d dstat=%08x\n" , |
245 | ep->epn.d_next, readl(ep->epn.regs + AST_VHUB_EP_DESC_STATUS)); |
246 | } |
247 | |
248 | static void ast_vhub_epn_handle_ack_desc(struct ast_vhub_ep *ep) |
249 | { |
250 | struct ast_vhub_req *req; |
251 | unsigned int len, d_last; |
252 | u32 stat, stat1; |
253 | |
254 | /* Read EP status, workaround HW race */ |
255 | do { |
256 | stat = readl(addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
257 | stat1 = readl(addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
258 | } while(stat != stat1); |
259 | |
260 | /* Extract RPTR */ |
261 | d_last = VHUB_EP_DMA_RPTR(stat); |
262 | |
263 | /* Grab current request if any */ |
264 | req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue); |
265 | |
266 | EPVDBG(ep, "ACK status=%08x is_in=%d ep->d_last=%d..%d\n" , |
267 | stat, ep->epn.is_in, ep->epn.d_last, d_last); |
268 | |
269 | /* Check all completed descriptors */ |
270 | while (ep->epn.d_last != d_last) { |
271 | struct ast_vhub_desc *desc; |
272 | unsigned int d_num; |
273 | bool is_last_desc; |
274 | |
275 | /* Grab next completed descriptor */ |
276 | d_num = ep->epn.d_last; |
277 | desc = &ep->epn.descs[d_num]; |
278 | ep->epn.d_last = (d_num + 1) & (AST_VHUB_DESCS_COUNT - 1); |
279 | |
280 | /* Grab len out of descriptor */ |
281 | len = VHUB_DSC1_IN_LEN(le32_to_cpu(desc->w1)); |
282 | |
283 | EPVDBG(ep, " desc %d len=%d req=%p (act=%d)\n" , |
284 | d_num, len, req, req ? req->active : 0); |
285 | |
286 | /* If no active request pending, move on */ |
287 | if (!req || !req->active) |
288 | continue; |
289 | |
290 | /* Adjust size */ |
291 | req->req.actual += len; |
292 | |
293 | /* Is that the last chunk ? */ |
294 | is_last_desc = req->last_desc == d_num; |
295 | CHECK(ep, is_last_desc == (len < ep->ep.maxpacket || |
296 | (req->req.actual >= req->req.length && |
297 | !req->req.zero)), |
298 | "Last packet discrepancy: last_desc=%d len=%d r.act=%d " |
299 | "r.len=%d r.zero=%d mp=%d\n" , |
300 | is_last_desc, len, req->req.actual, req->req.length, |
301 | req->req.zero, ep->ep.maxpacket); |
302 | |
303 | if (is_last_desc) { |
304 | /* |
305 | * Because we can only have one request at a time |
306 | * in our descriptor list in this implementation, |
307 | * d_last and ep->d_last should now be equal |
308 | */ |
309 | CHECK(ep, d_last == ep->epn.d_last, |
310 | "DMA read ptr mismatch %d vs %d\n" , |
311 | d_last, ep->epn.d_last); |
312 | |
313 | /* Note: done will drop and re-acquire the lock */ |
314 | ast_vhub_done(ep, req, status: 0); |
315 | req = list_first_entry_or_null(&ep->queue, |
316 | struct ast_vhub_req, |
317 | queue); |
318 | break; |
319 | } |
320 | } |
321 | |
322 | /* More work ? */ |
323 | if (req) |
324 | ast_vhub_epn_kick_desc(ep, req); |
325 | } |
326 | |
327 | void ast_vhub_epn_ack_irq(struct ast_vhub_ep *ep) |
328 | { |
329 | if (ep->epn.desc_mode) |
330 | ast_vhub_epn_handle_ack_desc(ep); |
331 | else |
332 | ast_vhub_epn_handle_ack(ep); |
333 | } |
334 | |
335 | static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req, |
336 | gfp_t gfp_flags) |
337 | { |
338 | struct ast_vhub_req *req = to_ast_req(u_req); |
339 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
340 | struct ast_vhub *vhub = ep->vhub; |
341 | unsigned long flags; |
342 | bool empty; |
343 | int rc; |
344 | |
345 | /* Paranoid checks */ |
346 | if (!u_req || !u_req->complete || !u_req->buf) { |
347 | dev_warn(&vhub->pdev->dev, "Bogus EPn request ! u_req=%p\n" , u_req); |
348 | if (u_req) { |
349 | dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n" , |
350 | u_req->complete, req->internal); |
351 | } |
352 | return -EINVAL; |
353 | } |
354 | |
355 | /* Endpoint enabled ? */ |
356 | if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx || |
357 | !ep->dev->enabled) { |
358 | EPDBG(ep, "Enqueuing request on wrong or disabled EP\n" ); |
359 | return -ESHUTDOWN; |
360 | } |
361 | |
362 | /* Map request for DMA if possible. For now, the rule for DMA is |
363 | * that: |
364 | * |
365 | * * For single stage mode (no descriptors): |
366 | * |
367 | * - The buffer is aligned to a 8 bytes boundary (HW requirement) |
368 | * - For a OUT endpoint, the request size is a multiple of the EP |
369 | * packet size (otherwise the controller will DMA past the end |
370 | * of the buffer if the host is sending a too long packet). |
371 | * |
372 | * * For descriptor mode (tx only for now), always. |
373 | * |
374 | * We could relax the latter by making the decision to use the bounce |
375 | * buffer based on the size of a given *segment* of the request rather |
376 | * than the whole request. |
377 | */ |
378 | if (ep->epn.desc_mode || |
379 | ((((unsigned long)u_req->buf & 7) == 0) && |
380 | (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) { |
381 | rc = usb_gadget_map_request_by_dev(dev: &vhub->pdev->dev, req: u_req, |
382 | is_in: ep->epn.is_in); |
383 | if (rc) { |
384 | dev_warn(&vhub->pdev->dev, |
385 | "Request mapping failure %d\n" , rc); |
386 | return rc; |
387 | } |
388 | } else |
389 | u_req->dma = 0; |
390 | |
391 | EPVDBG(ep, "enqueue req @%p\n" , req); |
392 | EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n" , |
393 | u_req->length, (u32)u_req->dma, u_req->zero, |
394 | u_req->short_not_ok, u_req->no_interrupt, |
395 | ep->epn.is_in); |
396 | |
397 | /* Initialize request progress fields */ |
398 | u_req->status = -EINPROGRESS; |
399 | u_req->actual = 0; |
400 | req->act_count = 0; |
401 | req->active = false; |
402 | req->last_desc = -1; |
403 | spin_lock_irqsave(&vhub->lock, flags); |
404 | empty = list_empty(head: &ep->queue); |
405 | |
406 | /* Add request to list and kick processing if empty */ |
407 | list_add_tail(new: &req->queue, head: &ep->queue); |
408 | if (empty) { |
409 | if (ep->epn.desc_mode) |
410 | ast_vhub_epn_kick_desc(ep, req); |
411 | else |
412 | ast_vhub_epn_kick(ep, req); |
413 | } |
414 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
415 | |
416 | return 0; |
417 | } |
418 | |
419 | static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, |
420 | bool restart_ep) |
421 | { |
422 | u32 state, reg, loops; |
423 | |
424 | /* Stop DMA activity */ |
425 | if (ep->epn.desc_mode) |
426 | writel(VHUB_EP_DMA_CTRL_RESET, addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
427 | else |
428 | writel(val: 0, addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
429 | |
430 | /* Wait for it to complete */ |
431 | for (loops = 0; loops < 1000; loops++) { |
432 | state = readl(addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
433 | state = VHUB_EP_DMA_PROC_STATUS(state); |
434 | if (state == EP_DMA_PROC_RX_IDLE || |
435 | state == EP_DMA_PROC_TX_IDLE) |
436 | break; |
437 | udelay(1); |
438 | } |
439 | if (loops >= 1000) |
440 | dev_warn(&ep->vhub->pdev->dev, "Timeout waiting for DMA\n" ); |
441 | |
442 | /* If we don't have to restart the endpoint, that's it */ |
443 | if (!restart_ep) |
444 | return; |
445 | |
446 | /* Restart the endpoint */ |
447 | if (ep->epn.desc_mode) { |
448 | /* |
449 | * Take out descriptors by resetting the DMA read |
450 | * pointer to be equal to the CPU write pointer. |
451 | * |
452 | * Note: If we ever support creating descriptors for |
453 | * requests that aren't the head of the queue, we |
454 | * may have to do something more complex here, |
455 | * especially if the request being taken out is |
456 | * not the current head descriptors. |
457 | */ |
458 | reg = VHUB_EP_DMA_SET_RPTR(ep->epn.d_next) | |
459 | VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next); |
460 | writel(val: reg, addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
461 | |
462 | /* Then turn it back on */ |
463 | writel(val: ep->epn.dma_conf, |
464 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
465 | } else { |
466 | /* Single mode: just turn it back on */ |
467 | writel(val: ep->epn.dma_conf, |
468 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
469 | } |
470 | } |
471 | |
472 | static int ast_vhub_epn_dequeue(struct usb_ep* u_ep, struct usb_request *u_req) |
473 | { |
474 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
475 | struct ast_vhub *vhub = ep->vhub; |
476 | struct ast_vhub_req *req = NULL, *iter; |
477 | unsigned long flags; |
478 | int rc = -EINVAL; |
479 | |
480 | spin_lock_irqsave(&vhub->lock, flags); |
481 | |
482 | /* Make sure it's actually queued on this endpoint */ |
483 | list_for_each_entry(iter, &ep->queue, queue) { |
484 | if (&iter->req != u_req) |
485 | continue; |
486 | req = iter; |
487 | break; |
488 | } |
489 | |
490 | if (req) { |
491 | EPVDBG(ep, "dequeue req @%p active=%d\n" , |
492 | req, req->active); |
493 | if (req->active) |
494 | ast_vhub_stop_active_req(ep, restart_ep: true); |
495 | ast_vhub_done(ep, req, status: -ECONNRESET); |
496 | rc = 0; |
497 | } |
498 | |
499 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
500 | return rc; |
501 | } |
502 | |
503 | void ast_vhub_update_epn_stall(struct ast_vhub_ep *ep) |
504 | { |
505 | u32 reg; |
506 | |
507 | if (WARN_ON(ep->d_idx == 0)) |
508 | return; |
509 | reg = readl(addr: ep->epn.regs + AST_VHUB_EP_CONFIG); |
510 | if (ep->epn.stalled || ep->epn.wedged) |
511 | reg |= VHUB_EP_CFG_STALL_CTRL; |
512 | else |
513 | reg &= ~VHUB_EP_CFG_STALL_CTRL; |
514 | writel(val: reg, addr: ep->epn.regs + AST_VHUB_EP_CONFIG); |
515 | |
516 | if (!ep->epn.stalled && !ep->epn.wedged) |
517 | writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), |
518 | addr: ep->vhub->regs + AST_VHUB_EP_TOGGLE); |
519 | } |
520 | |
521 | static int ast_vhub_set_halt_and_wedge(struct usb_ep* u_ep, bool halt, |
522 | bool wedge) |
523 | { |
524 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
525 | struct ast_vhub *vhub = ep->vhub; |
526 | unsigned long flags; |
527 | |
528 | EPDBG(ep, "Set halt (%d) & wedge (%d)\n" , halt, wedge); |
529 | |
530 | if (!u_ep || !u_ep->desc) |
531 | return -EINVAL; |
532 | if (ep->d_idx == 0) |
533 | return 0; |
534 | if (ep->epn.is_iso) |
535 | return -EOPNOTSUPP; |
536 | |
537 | spin_lock_irqsave(&vhub->lock, flags); |
538 | |
539 | /* Fail with still-busy IN endpoints */ |
540 | if (halt && ep->epn.is_in && !list_empty(head: &ep->queue)) { |
541 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
542 | return -EAGAIN; |
543 | } |
544 | ep->epn.stalled = halt; |
545 | ep->epn.wedged = wedge; |
546 | ast_vhub_update_epn_stall(ep); |
547 | |
548 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | static int ast_vhub_epn_set_halt(struct usb_ep *u_ep, int value) |
554 | { |
555 | return ast_vhub_set_halt_and_wedge(u_ep, halt: value != 0, wedge: false); |
556 | } |
557 | |
558 | static int ast_vhub_epn_set_wedge(struct usb_ep *u_ep) |
559 | { |
560 | return ast_vhub_set_halt_and_wedge(u_ep, halt: true, wedge: true); |
561 | } |
562 | |
563 | static int ast_vhub_epn_disable(struct usb_ep* u_ep) |
564 | { |
565 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
566 | struct ast_vhub *vhub = ep->vhub; |
567 | unsigned long flags; |
568 | u32 imask, ep_ier; |
569 | |
570 | EPDBG(ep, "Disabling !\n" ); |
571 | |
572 | spin_lock_irqsave(&vhub->lock, flags); |
573 | |
574 | ep->epn.enabled = false; |
575 | |
576 | /* Stop active DMA if any */ |
577 | ast_vhub_stop_active_req(ep, restart_ep: false); |
578 | |
579 | /* Disable endpoint */ |
580 | writel(val: 0, addr: ep->epn.regs + AST_VHUB_EP_CONFIG); |
581 | |
582 | /* Disable ACK interrupt */ |
583 | imask = VHUB_EP_IRQ(ep->epn.g_idx); |
584 | ep_ier = readl(addr: vhub->regs + AST_VHUB_EP_ACK_IER); |
585 | ep_ier &= ~imask; |
586 | writel(val: ep_ier, addr: vhub->regs + AST_VHUB_EP_ACK_IER); |
587 | writel(val: imask, addr: vhub->regs + AST_VHUB_EP_ACK_ISR); |
588 | |
589 | /* Nuke all pending requests */ |
590 | ast_vhub_nuke(ep, status: -ESHUTDOWN); |
591 | |
592 | /* No more descriptor associated with request */ |
593 | ep->ep.desc = NULL; |
594 | |
595 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
596 | |
597 | return 0; |
598 | } |
599 | |
600 | static int ast_vhub_epn_enable(struct usb_ep* u_ep, |
601 | const struct usb_endpoint_descriptor *desc) |
602 | { |
603 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
604 | struct ast_vhub_dev *dev; |
605 | struct ast_vhub *vhub; |
606 | u16 maxpacket, type; |
607 | unsigned long flags; |
608 | u32 ep_conf, ep_ier, imask; |
609 | |
610 | /* Check arguments */ |
611 | if (!u_ep || !desc) |
612 | return -EINVAL; |
613 | |
614 | maxpacket = usb_endpoint_maxp(epd: desc); |
615 | if (!ep->d_idx || !ep->dev || |
616 | desc->bDescriptorType != USB_DT_ENDPOINT || |
617 | maxpacket == 0 || maxpacket > ep->ep.maxpacket) { |
618 | EPDBG(ep, "Invalid EP enable,d_idx=%d,dev=%p,type=%d,mp=%d/%d\n" , |
619 | ep->d_idx, ep->dev, desc->bDescriptorType, |
620 | maxpacket, ep->ep.maxpacket); |
621 | return -EINVAL; |
622 | } |
623 | if (ep->d_idx != usb_endpoint_num(epd: desc)) { |
624 | EPDBG(ep, "EP number mismatch !\n" ); |
625 | return -EINVAL; |
626 | } |
627 | |
628 | if (ep->epn.enabled) { |
629 | EPDBG(ep, "Already enabled\n" ); |
630 | return -EBUSY; |
631 | } |
632 | dev = ep->dev; |
633 | vhub = ep->vhub; |
634 | |
635 | /* Check device state */ |
636 | if (!dev->driver) { |
637 | EPDBG(ep, "Bogus device state: driver=%p speed=%d\n" , |
638 | dev->driver, dev->gadget.speed); |
639 | return -ESHUTDOWN; |
640 | } |
641 | |
642 | /* Grab some info from the descriptor */ |
643 | ep->epn.is_in = usb_endpoint_dir_in(epd: desc); |
644 | ep->ep.maxpacket = maxpacket; |
645 | type = usb_endpoint_type(epd: desc); |
646 | ep->epn.d_next = ep->epn.d_last = 0; |
647 | ep->epn.is_iso = false; |
648 | ep->epn.stalled = false; |
649 | ep->epn.wedged = false; |
650 | |
651 | EPDBG(ep, "Enabling [%s] %s num %d maxpacket=%d\n" , |
652 | ep->epn.is_in ? "in" : "out" , usb_ep_type_string(type), |
653 | usb_endpoint_num(desc), maxpacket); |
654 | |
655 | /* Can we use DMA descriptor mode ? */ |
656 | ep->epn.desc_mode = ep->epn.descs && ep->epn.is_in; |
657 | if (ep->epn.desc_mode) |
658 | memset(ep->epn.descs, 0, 8 * AST_VHUB_DESCS_COUNT); |
659 | |
660 | /* |
661 | * Large send function can send up to 8 packets from |
662 | * one descriptor with a limit of 4095 bytes. |
663 | */ |
664 | ep->epn.chunk_max = ep->ep.maxpacket; |
665 | if (ep->epn.is_in) { |
666 | ep->epn.chunk_max <<= 3; |
667 | while (ep->epn.chunk_max > 4095) |
668 | ep->epn.chunk_max -= ep->ep.maxpacket; |
669 | } |
670 | |
671 | switch(type) { |
672 | case USB_ENDPOINT_XFER_CONTROL: |
673 | EPDBG(ep, "Only one control endpoint\n" ); |
674 | return -EINVAL; |
675 | case USB_ENDPOINT_XFER_INT: |
676 | ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_INT); |
677 | break; |
678 | case USB_ENDPOINT_XFER_BULK: |
679 | ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_BULK); |
680 | break; |
681 | case USB_ENDPOINT_XFER_ISOC: |
682 | ep_conf = VHUB_EP_CFG_SET_TYPE(EP_TYPE_ISO); |
683 | ep->epn.is_iso = true; |
684 | break; |
685 | default: |
686 | return -EINVAL; |
687 | } |
688 | |
689 | /* Encode the rest of the EP config register */ |
690 | if (maxpacket < 1024) |
691 | ep_conf |= VHUB_EP_CFG_SET_MAX_PKT(maxpacket); |
692 | if (!ep->epn.is_in) |
693 | ep_conf |= VHUB_EP_CFG_DIR_OUT; |
694 | ep_conf |= VHUB_EP_CFG_SET_EP_NUM(usb_endpoint_num(desc)); |
695 | ep_conf |= VHUB_EP_CFG_ENABLE; |
696 | ep_conf |= VHUB_EP_CFG_SET_DEV(dev->index + 1); |
697 | EPVDBG(ep, "config=%08x\n" , ep_conf); |
698 | |
699 | spin_lock_irqsave(&vhub->lock, flags); |
700 | |
701 | /* Disable HW and reset DMA */ |
702 | writel(val: 0, addr: ep->epn.regs + AST_VHUB_EP_CONFIG); |
703 | writel(VHUB_EP_DMA_CTRL_RESET, |
704 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
705 | |
706 | /* Configure and enable */ |
707 | writel(val: ep_conf, addr: ep->epn.regs + AST_VHUB_EP_CONFIG); |
708 | |
709 | if (ep->epn.desc_mode) { |
710 | /* Clear DMA status, including the DMA read ptr */ |
711 | writel(val: 0, addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
712 | |
713 | /* Set descriptor base */ |
714 | writel(val: ep->epn.descs_dma, |
715 | addr: ep->epn.regs + AST_VHUB_EP_DESC_BASE); |
716 | |
717 | /* Set base DMA config value */ |
718 | ep->epn.dma_conf = VHUB_EP_DMA_DESC_MODE; |
719 | if (ep->epn.is_in) |
720 | ep->epn.dma_conf |= VHUB_EP_DMA_IN_LONG_MODE; |
721 | |
722 | /* First reset and disable all operations */ |
723 | writel(val: ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, |
724 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
725 | |
726 | /* Enable descriptor mode */ |
727 | writel(val: ep->epn.dma_conf, |
728 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
729 | } else { |
730 | /* Set base DMA config value */ |
731 | ep->epn.dma_conf = VHUB_EP_DMA_SINGLE_STAGE; |
732 | |
733 | /* Reset and switch to single stage mode */ |
734 | writel(val: ep->epn.dma_conf | VHUB_EP_DMA_CTRL_RESET, |
735 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
736 | writel(val: ep->epn.dma_conf, |
737 | addr: ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); |
738 | writel(val: 0, addr: ep->epn.regs + AST_VHUB_EP_DESC_STATUS); |
739 | } |
740 | |
741 | /* Cleanup data toggle just in case */ |
742 | writel(VHUB_EP_TOGGLE_SET_EPNUM(ep->epn.g_idx), |
743 | addr: vhub->regs + AST_VHUB_EP_TOGGLE); |
744 | |
745 | /* Cleanup and enable ACK interrupt */ |
746 | imask = VHUB_EP_IRQ(ep->epn.g_idx); |
747 | writel(val: imask, addr: vhub->regs + AST_VHUB_EP_ACK_ISR); |
748 | ep_ier = readl(addr: vhub->regs + AST_VHUB_EP_ACK_IER); |
749 | ep_ier |= imask; |
750 | writel(val: ep_ier, addr: vhub->regs + AST_VHUB_EP_ACK_IER); |
751 | |
752 | /* Woot, we are online ! */ |
753 | ep->epn.enabled = true; |
754 | |
755 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
756 | |
757 | return 0; |
758 | } |
759 | |
760 | static void ast_vhub_epn_dispose(struct usb_ep *u_ep) |
761 | { |
762 | struct ast_vhub_ep *ep = to_ast_ep(u_ep); |
763 | |
764 | if (WARN_ON(!ep->dev || !ep->d_idx)) |
765 | return; |
766 | |
767 | EPDBG(ep, "Releasing endpoint\n" ); |
768 | |
769 | /* Take it out of the EP list */ |
770 | list_del_init(entry: &ep->ep.ep_list); |
771 | |
772 | /* Mark the address free in the device */ |
773 | ep->dev->epns[ep->d_idx - 1] = NULL; |
774 | |
775 | /* Free name & DMA buffers */ |
776 | kfree(objp: ep->ep.name); |
777 | ep->ep.name = NULL; |
778 | dma_free_coherent(dev: &ep->vhub->pdev->dev, |
779 | AST_VHUB_EPn_MAX_PACKET + |
780 | 8 * AST_VHUB_DESCS_COUNT, |
781 | cpu_addr: ep->buf, dma_handle: ep->buf_dma); |
782 | ep->buf = NULL; |
783 | ep->epn.descs = NULL; |
784 | |
785 | /* Mark free */ |
786 | ep->dev = NULL; |
787 | } |
788 | |
789 | static const struct usb_ep_ops ast_vhub_epn_ops = { |
790 | .enable = ast_vhub_epn_enable, |
791 | .disable = ast_vhub_epn_disable, |
792 | .dispose = ast_vhub_epn_dispose, |
793 | .queue = ast_vhub_epn_queue, |
794 | .dequeue = ast_vhub_epn_dequeue, |
795 | .set_halt = ast_vhub_epn_set_halt, |
796 | .set_wedge = ast_vhub_epn_set_wedge, |
797 | .alloc_request = ast_vhub_alloc_request, |
798 | .free_request = ast_vhub_free_request, |
799 | }; |
800 | |
801 | struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr) |
802 | { |
803 | struct ast_vhub *vhub = d->vhub; |
804 | struct ast_vhub_ep *ep; |
805 | unsigned long flags; |
806 | int i; |
807 | |
808 | /* Find a free one (no device) */ |
809 | spin_lock_irqsave(&vhub->lock, flags); |
810 | for (i = 0; i < vhub->max_epns; i++) |
811 | if (vhub->epns[i].dev == NULL) |
812 | break; |
813 | if (i >= vhub->max_epns) { |
814 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
815 | return NULL; |
816 | } |
817 | |
818 | /* Set it up */ |
819 | ep = &vhub->epns[i]; |
820 | ep->dev = d; |
821 | spin_unlock_irqrestore(lock: &vhub->lock, flags); |
822 | |
823 | DDBG(d, "Allocating gen EP %d for addr %d\n" , i, addr); |
824 | INIT_LIST_HEAD(list: &ep->queue); |
825 | ep->d_idx = addr; |
826 | ep->vhub = vhub; |
827 | ep->ep.ops = &ast_vhub_epn_ops; |
828 | ep->ep.name = kasprintf(GFP_KERNEL, fmt: "ep%d" , addr); |
829 | d->epns[addr-1] = ep; |
830 | ep->epn.g_idx = i; |
831 | ep->epn.regs = vhub->regs + 0x200 + (i * 0x10); |
832 | |
833 | ep->buf = dma_alloc_coherent(dev: &vhub->pdev->dev, |
834 | AST_VHUB_EPn_MAX_PACKET + |
835 | 8 * AST_VHUB_DESCS_COUNT, |
836 | dma_handle: &ep->buf_dma, GFP_KERNEL); |
837 | if (!ep->buf) { |
838 | kfree(objp: ep->ep.name); |
839 | ep->ep.name = NULL; |
840 | return NULL; |
841 | } |
842 | ep->epn.descs = ep->buf + AST_VHUB_EPn_MAX_PACKET; |
843 | ep->epn.descs_dma = ep->buf_dma + AST_VHUB_EPn_MAX_PACKET; |
844 | |
845 | usb_ep_set_maxpacket_limit(ep: &ep->ep, AST_VHUB_EPn_MAX_PACKET); |
846 | list_add_tail(new: &ep->ep.ep_list, head: &d->gadget.ep_list); |
847 | ep->ep.caps.type_iso = true; |
848 | ep->ep.caps.type_bulk = true; |
849 | ep->ep.caps.type_int = true; |
850 | ep->ep.caps.dir_in = true; |
851 | ep->ep.caps.dir_out = true; |
852 | |
853 | return ep; |
854 | } |
855 | |