1 | // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) |
2 | /* |
3 | * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines |
4 | * |
5 | * Copyright (C) 2004-2013 Synopsys, Inc. |
6 | */ |
7 | |
8 | /* |
9 | * This file contains the Descriptor DMA implementation for Host mode |
10 | */ |
11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/interrupt.h> |
15 | #include <linux/dma-mapping.h> |
16 | #include <linux/io.h> |
17 | #include <linux/slab.h> |
18 | #include <linux/usb.h> |
19 | |
20 | #include <linux/usb/hcd.h> |
21 | #include <linux/usb/ch11.h> |
22 | |
23 | #include "core.h" |
24 | #include "hcd.h" |
25 | |
26 | static u16 dwc2_frame_list_idx(u16 frame) |
27 | { |
28 | return frame & (FRLISTEN_64_SIZE - 1); |
29 | } |
30 | |
31 | static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed) |
32 | { |
33 | return (idx + inc) & |
34 | ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : |
35 | MAX_DMA_DESC_NUM_GENERIC) - 1); |
36 | } |
37 | |
38 | static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed) |
39 | { |
40 | return (idx - inc) & |
41 | ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC : |
42 | MAX_DMA_DESC_NUM_GENERIC) - 1); |
43 | } |
44 | |
45 | static u16 dwc2_max_desc_num(struct dwc2_qh *qh) |
46 | { |
47 | return (qh->ep_type == USB_ENDPOINT_XFER_ISOC && |
48 | qh->dev_speed == USB_SPEED_HIGH) ? |
49 | MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC; |
50 | } |
51 | |
52 | static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) |
53 | { |
54 | return qh->dev_speed == USB_SPEED_HIGH ? |
55 | (qh->host_interval + 8 - 1) / 8 : qh->host_interval; |
56 | } |
57 | |
58 | static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, |
59 | gfp_t flags) |
60 | { |
61 | struct kmem_cache *desc_cache; |
62 | |
63 | if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && |
64 | qh->dev_speed == USB_SPEED_HIGH) |
65 | desc_cache = hsotg->desc_hsisoc_cache; |
66 | else |
67 | desc_cache = hsotg->desc_gen_cache; |
68 | |
69 | qh->desc_list_sz = sizeof(struct dwc2_dma_desc) * |
70 | dwc2_max_desc_num(qh); |
71 | |
72 | qh->desc_list = kmem_cache_zalloc(k: desc_cache, flags: flags | GFP_DMA); |
73 | if (!qh->desc_list) |
74 | return -ENOMEM; |
75 | |
76 | qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list, |
77 | qh->desc_list_sz, |
78 | DMA_TO_DEVICE); |
79 | |
80 | qh->n_bytes = kcalloc(n: dwc2_max_desc_num(qh), size: sizeof(u32), flags); |
81 | if (!qh->n_bytes) { |
82 | dma_unmap_single(hsotg->dev, qh->desc_list_dma, |
83 | qh->desc_list_sz, |
84 | DMA_FROM_DEVICE); |
85 | kmem_cache_free(s: desc_cache, objp: qh->desc_list); |
86 | qh->desc_list = NULL; |
87 | return -ENOMEM; |
88 | } |
89 | |
90 | return 0; |
91 | } |
92 | |
93 | static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) |
94 | { |
95 | struct kmem_cache *desc_cache; |
96 | |
97 | if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && |
98 | qh->dev_speed == USB_SPEED_HIGH) |
99 | desc_cache = hsotg->desc_hsisoc_cache; |
100 | else |
101 | desc_cache = hsotg->desc_gen_cache; |
102 | |
103 | if (qh->desc_list) { |
104 | dma_unmap_single(hsotg->dev, qh->desc_list_dma, |
105 | qh->desc_list_sz, DMA_FROM_DEVICE); |
106 | kmem_cache_free(s: desc_cache, objp: qh->desc_list); |
107 | qh->desc_list = NULL; |
108 | } |
109 | |
110 | kfree(objp: qh->n_bytes); |
111 | qh->n_bytes = NULL; |
112 | } |
113 | |
114 | static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) |
115 | { |
116 | if (hsotg->frame_list) |
117 | return 0; |
118 | |
119 | hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; |
120 | hsotg->frame_list = kzalloc(size: hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA); |
121 | if (!hsotg->frame_list) |
122 | return -ENOMEM; |
123 | |
124 | hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list, |
125 | hsotg->frame_list_sz, |
126 | DMA_TO_DEVICE); |
127 | |
128 | return 0; |
129 | } |
130 | |
131 | static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) |
132 | { |
133 | unsigned long flags; |
134 | |
135 | spin_lock_irqsave(&hsotg->lock, flags); |
136 | |
137 | if (!hsotg->frame_list) { |
138 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
139 | return; |
140 | } |
141 | |
142 | dma_unmap_single(hsotg->dev, hsotg->frame_list_dma, |
143 | hsotg->frame_list_sz, DMA_FROM_DEVICE); |
144 | |
145 | kfree(objp: hsotg->frame_list); |
146 | hsotg->frame_list = NULL; |
147 | |
148 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
149 | } |
150 | |
151 | static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) |
152 | { |
153 | u32 hcfg; |
154 | unsigned long flags; |
155 | |
156 | spin_lock_irqsave(&hsotg->lock, flags); |
157 | |
158 | hcfg = dwc2_readl(hsotg, HCFG); |
159 | if (hcfg & HCFG_PERSCHEDENA) { |
160 | /* already enabled */ |
161 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
162 | return; |
163 | } |
164 | |
165 | dwc2_writel(hsotg, value: hsotg->frame_list_dma, HFLBADDR); |
166 | |
167 | hcfg &= ~HCFG_FRLISTEN_MASK; |
168 | hcfg |= fr_list_en | HCFG_PERSCHEDENA; |
169 | dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n" ); |
170 | dwc2_writel(hsotg, value: hcfg, HCFG); |
171 | |
172 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
173 | } |
174 | |
175 | static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg) |
176 | { |
177 | u32 hcfg; |
178 | unsigned long flags; |
179 | |
180 | spin_lock_irqsave(&hsotg->lock, flags); |
181 | |
182 | hcfg = dwc2_readl(hsotg, HCFG); |
183 | if (!(hcfg & HCFG_PERSCHEDENA)) { |
184 | /* already disabled */ |
185 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
186 | return; |
187 | } |
188 | |
189 | hcfg &= ~HCFG_PERSCHEDENA; |
190 | dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n" ); |
191 | dwc2_writel(hsotg, value: hcfg, HCFG); |
192 | |
193 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
194 | } |
195 | |
196 | /* |
197 | * Activates/Deactivates FrameList entries for the channel based on endpoint |
198 | * servicing period |
199 | */ |
200 | static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, |
201 | int enable) |
202 | { |
203 | struct dwc2_host_chan *chan; |
204 | u16 i, j, inc; |
205 | |
206 | if (!hsotg) { |
207 | pr_err("hsotg = %p\n" , hsotg); |
208 | return; |
209 | } |
210 | |
211 | if (!qh->channel) { |
212 | dev_err(hsotg->dev, "qh->channel = %p\n" , qh->channel); |
213 | return; |
214 | } |
215 | |
216 | if (!hsotg->frame_list) { |
217 | dev_err(hsotg->dev, "hsotg->frame_list = %p\n" , |
218 | hsotg->frame_list); |
219 | return; |
220 | } |
221 | |
222 | chan = qh->channel; |
223 | inc = dwc2_frame_incr_val(qh); |
224 | if (qh->ep_type == USB_ENDPOINT_XFER_ISOC) |
225 | i = dwc2_frame_list_idx(frame: qh->next_active_frame); |
226 | else |
227 | i = 0; |
228 | |
229 | j = i; |
230 | do { |
231 | if (enable) |
232 | hsotg->frame_list[j] |= 1 << chan->hc_num; |
233 | else |
234 | hsotg->frame_list[j] &= ~(1 << chan->hc_num); |
235 | j = (j + inc) & (FRLISTEN_64_SIZE - 1); |
236 | } while (j != i); |
237 | |
238 | /* |
239 | * Sync frame list since controller will access it if periodic |
240 | * channel is currently enabled. |
241 | */ |
242 | dma_sync_single_for_device(dev: hsotg->dev, |
243 | addr: hsotg->frame_list_dma, |
244 | size: hsotg->frame_list_sz, |
245 | dir: DMA_TO_DEVICE); |
246 | |
247 | if (!enable) |
248 | return; |
249 | |
250 | chan->schinfo = 0; |
251 | if (chan->speed == USB_SPEED_HIGH && qh->host_interval) { |
252 | j = 1; |
253 | /* TODO - check this */ |
254 | inc = (8 + qh->host_interval - 1) / qh->host_interval; |
255 | for (i = 0; i < inc; i++) { |
256 | chan->schinfo |= j; |
257 | j = j << qh->host_interval; |
258 | } |
259 | } else { |
260 | chan->schinfo = 0xff; |
261 | } |
262 | } |
263 | |
264 | static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, |
265 | struct dwc2_qh *qh) |
266 | { |
267 | struct dwc2_host_chan *chan = qh->channel; |
268 | |
269 | if (dwc2_qh_is_non_per(qh)) { |
270 | if (hsotg->params.uframe_sched) |
271 | hsotg->available_host_channels++; |
272 | else |
273 | hsotg->non_periodic_channels--; |
274 | } else { |
275 | dwc2_update_frame_list(hsotg, qh, enable: 0); |
276 | hsotg->available_host_channels++; |
277 | } |
278 | |
279 | /* |
280 | * The condition is added to prevent double cleanup try in case of |
281 | * device disconnect. See channel cleanup in dwc2_hcd_disconnect(). |
282 | */ |
283 | if (chan->qh) { |
284 | if (!list_empty(head: &chan->hc_list_entry)) |
285 | list_del(entry: &chan->hc_list_entry); |
286 | dwc2_hc_cleanup(hsotg, chan); |
287 | list_add_tail(new: &chan->hc_list_entry, head: &hsotg->free_hc_list); |
288 | chan->qh = NULL; |
289 | } |
290 | |
291 | qh->channel = NULL; |
292 | qh->ntd = 0; |
293 | |
294 | if (qh->desc_list) |
295 | memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) * |
296 | dwc2_max_desc_num(qh)); |
297 | } |
298 | |
299 | /** |
300 | * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA |
301 | * related members |
302 | * |
303 | * @hsotg: The HCD state structure for the DWC OTG controller |
304 | * @qh: The QH to init |
305 | * @mem_flags: Indicates the type of memory allocation |
306 | * |
307 | * Return: 0 if successful, negative error code otherwise |
308 | * |
309 | * Allocates memory for the descriptor list. For the first periodic QH, |
310 | * allocates memory for the FrameList and enables periodic scheduling. |
311 | */ |
312 | int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, |
313 | gfp_t mem_flags) |
314 | { |
315 | int retval; |
316 | |
317 | if (qh->do_split) { |
318 | dev_err(hsotg->dev, |
319 | "SPLIT Transfers are not supported in Descriptor DMA mode.\n" ); |
320 | retval = -EINVAL; |
321 | goto err0; |
322 | } |
323 | |
324 | retval = dwc2_desc_list_alloc(hsotg, qh, flags: mem_flags); |
325 | if (retval) |
326 | goto err0; |
327 | |
328 | if (qh->ep_type == USB_ENDPOINT_XFER_ISOC || |
329 | qh->ep_type == USB_ENDPOINT_XFER_INT) { |
330 | if (!hsotg->frame_list) { |
331 | retval = dwc2_frame_list_alloc(hsotg, mem_flags); |
332 | if (retval) |
333 | goto err1; |
334 | /* Enable periodic schedule on first periodic QH */ |
335 | dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64); |
336 | } |
337 | } |
338 | |
339 | qh->ntd = 0; |
340 | return 0; |
341 | |
342 | err1: |
343 | dwc2_desc_list_free(hsotg, qh); |
344 | err0: |
345 | return retval; |
346 | } |
347 | |
348 | /** |
349 | * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related |
350 | * members |
351 | * |
352 | * @hsotg: The HCD state structure for the DWC OTG controller |
353 | * @qh: The QH to free |
354 | * |
355 | * Frees descriptor list memory associated with the QH. If QH is periodic and |
356 | * the last, frees FrameList memory and disables periodic scheduling. |
357 | */ |
358 | void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) |
359 | { |
360 | unsigned long flags; |
361 | |
362 | dwc2_desc_list_free(hsotg, qh); |
363 | |
364 | /* |
365 | * Channel still assigned due to some reasons. |
366 | * Seen on Isoc URB dequeue. Channel halted but no subsequent |
367 | * ChHalted interrupt to release the channel. Afterwards |
368 | * when it comes here from endpoint disable routine |
369 | * channel remains assigned. |
370 | */ |
371 | spin_lock_irqsave(&hsotg->lock, flags); |
372 | if (qh->channel) |
373 | dwc2_release_channel_ddma(hsotg, qh); |
374 | spin_unlock_irqrestore(lock: &hsotg->lock, flags); |
375 | |
376 | if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || |
377 | qh->ep_type == USB_ENDPOINT_XFER_INT) && |
378 | (hsotg->params.uframe_sched || |
379 | !hsotg->periodic_channels) && hsotg->frame_list) { |
380 | dwc2_per_sched_disable(hsotg); |
381 | dwc2_frame_list_free(hsotg); |
382 | } |
383 | } |
384 | |
385 | static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx) |
386 | { |
387 | if (qh->dev_speed == USB_SPEED_HIGH) |
388 | /* Descriptor set (8 descriptors) index which is 8-aligned */ |
389 | return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8; |
390 | else |
391 | return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1); |
392 | } |
393 | |
394 | /* |
395 | * Determine starting frame for Isochronous transfer. |
396 | * Few frames skipped to prevent race condition with HC. |
397 | */ |
398 | static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg, |
399 | struct dwc2_qh *qh, u16 *skip_frames) |
400 | { |
401 | u16 frame; |
402 | |
403 | hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); |
404 | |
405 | /* |
406 | * next_active_frame is always frame number (not uFrame) both in FS |
407 | * and HS! |
408 | */ |
409 | |
410 | /* |
411 | * skip_frames is used to limit activated descriptors number |
412 | * to avoid the situation when HC services the last activated |
413 | * descriptor firstly. |
414 | * Example for FS: |
415 | * Current frame is 1, scheduled frame is 3. Since HC always fetches |
416 | * the descriptor corresponding to curr_frame+1, the descriptor |
417 | * corresponding to frame 2 will be fetched. If the number of |
418 | * descriptors is max=64 (or greather) the list will be fully programmed |
419 | * with Active descriptors and it is possible case (rare) that the |
420 | * latest descriptor(considering rollback) corresponding to frame 2 will |
421 | * be serviced first. HS case is more probable because, in fact, up to |
422 | * 11 uframes (16 in the code) may be skipped. |
423 | */ |
424 | if (qh->dev_speed == USB_SPEED_HIGH) { |
425 | /* |
426 | * Consider uframe counter also, to start xfer asap. If half of |
427 | * the frame elapsed skip 2 frames otherwise just 1 frame. |
428 | * Starting descriptor index must be 8-aligned, so if the |
429 | * current frame is near to complete the next one is skipped as |
430 | * well. |
431 | */ |
432 | if (dwc2_micro_frame_num(frame: hsotg->frame_number) >= 5) { |
433 | *skip_frames = 2 * 8; |
434 | frame = dwc2_frame_num_inc(frame: hsotg->frame_number, |
435 | inc: *skip_frames); |
436 | } else { |
437 | *skip_frames = 1 * 8; |
438 | frame = dwc2_frame_num_inc(frame: hsotg->frame_number, |
439 | inc: *skip_frames); |
440 | } |
441 | |
442 | frame = dwc2_full_frame_num(frame); |
443 | } else { |
444 | /* |
445 | * Two frames are skipped for FS - the current and the next. |
446 | * But for descriptor programming, 1 frame (descriptor) is |
447 | * enough, see example above. |
448 | */ |
449 | *skip_frames = 1; |
450 | frame = dwc2_frame_num_inc(frame: hsotg->frame_number, inc: 2); |
451 | } |
452 | |
453 | return frame; |
454 | } |
455 | |
456 | /* |
457 | * Calculate initial descriptor index for isochronous transfer based on |
458 | * scheduled frame |
459 | */ |
460 | static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg, |
461 | struct dwc2_qh *qh) |
462 | { |
463 | u16 frame, fr_idx, fr_idx_tmp, skip_frames; |
464 | |
465 | /* |
466 | * With current ISOC processing algorithm the channel is being released |
467 | * when no more QTDs in the list (qh->ntd == 0). Thus this function is |
468 | * called only when qh->ntd == 0 and qh->channel == 0. |
469 | * |
470 | * So qh->channel != NULL branch is not used and just not removed from |
471 | * the source file. It is required for another possible approach which |
472 | * is, do not disable and release the channel when ISOC session |
473 | * completed, just move QH to inactive schedule until new QTD arrives. |
474 | * On new QTD, the QH moved back to 'ready' schedule, starting frame and |
475 | * therefore starting desc_index are recalculated. In this case channel |
476 | * is released only on ep_disable. |
477 | */ |
478 | |
479 | /* |
480 | * Calculate starting descriptor index. For INTERRUPT endpoint it is |
481 | * always 0. |
482 | */ |
483 | if (qh->channel) { |
484 | frame = dwc2_calc_starting_frame(hsotg, qh, skip_frames: &skip_frames); |
485 | /* |
486 | * Calculate initial descriptor index based on FrameList current |
487 | * bitmap and servicing period |
488 | */ |
489 | fr_idx_tmp = dwc2_frame_list_idx(frame); |
490 | fr_idx = (FRLISTEN_64_SIZE + |
491 | dwc2_frame_list_idx(frame: qh->next_active_frame) - |
492 | fr_idx_tmp) % dwc2_frame_incr_val(qh); |
493 | fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE; |
494 | } else { |
495 | qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh, |
496 | skip_frames: &skip_frames); |
497 | fr_idx = dwc2_frame_list_idx(frame: qh->next_active_frame); |
498 | } |
499 | |
500 | qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, frame_idx: fr_idx); |
501 | |
502 | return skip_frames; |
503 | } |
504 | |
505 | #define ISOC_URB_GIVEBACK_ASAP |
506 | |
507 | #define MAX_ISOC_XFER_SIZE_FS 1023 |
508 | #define MAX_ISOC_XFER_SIZE_HS 3072 |
509 | #define DESCNUM_THRESHOLD 4 |
510 | |
511 | static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, |
512 | struct dwc2_qtd *qtd, |
513 | struct dwc2_qh *qh, u32 max_xfer_size, |
514 | u16 idx) |
515 | { |
516 | struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx]; |
517 | struct dwc2_hcd_iso_packet_desc *frame_desc; |
518 | |
519 | memset(dma_desc, 0, sizeof(*dma_desc)); |
520 | frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; |
521 | |
522 | if (frame_desc->length > max_xfer_size) |
523 | qh->n_bytes[idx] = max_xfer_size; |
524 | else |
525 | qh->n_bytes[idx] = frame_desc->length; |
526 | |
527 | dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); |
528 | dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & |
529 | HOST_DMA_ISOC_NBYTES_MASK; |
530 | |
531 | /* Set active bit */ |
532 | dma_desc->status |= HOST_DMA_A; |
533 | |
534 | qh->ntd++; |
535 | qtd->isoc_frame_index_last++; |
536 | |
537 | #ifdef ISOC_URB_GIVEBACK_ASAP |
538 | /* Set IOC for each descriptor corresponding to last frame of URB */ |
539 | if (qtd->isoc_frame_index_last == qtd->urb->packet_count) |
540 | dma_desc->status |= HOST_DMA_IOC; |
541 | #endif |
542 | |
543 | dma_sync_single_for_device(dev: hsotg->dev, |
544 | addr: qh->desc_list_dma + |
545 | (idx * sizeof(struct dwc2_dma_desc)), |
546 | size: sizeof(struct dwc2_dma_desc), |
547 | dir: DMA_TO_DEVICE); |
548 | } |
549 | |
550 | static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, |
551 | struct dwc2_qh *qh, u16 skip_frames) |
552 | { |
553 | struct dwc2_qtd *qtd; |
554 | u32 max_xfer_size; |
555 | u16 idx, inc, n_desc = 0, ntd_max = 0; |
556 | u16 cur_idx; |
557 | u16 next_idx; |
558 | |
559 | idx = qh->td_last; |
560 | inc = qh->host_interval; |
561 | hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); |
562 | cur_idx = dwc2_frame_list_idx(frame: hsotg->frame_number); |
563 | next_idx = dwc2_desclist_idx_inc(idx: qh->td_last, inc, speed: qh->dev_speed); |
564 | |
565 | /* |
566 | * Ensure current frame number didn't overstep last scheduled |
567 | * descriptor. If it happens, the only way to recover is to move |
568 | * qh->td_last to current frame number + 1. |
569 | * So that next isoc descriptor will be scheduled on frame number + 1 |
570 | * and not on a past frame. |
571 | */ |
572 | if (dwc2_frame_idx_num_gt(fr_idx1: cur_idx, fr_idx2: next_idx) || (cur_idx == next_idx)) { |
573 | if (inc < 32) { |
574 | dev_vdbg(hsotg->dev, |
575 | "current frame number overstep last descriptor\n" ); |
576 | qh->td_last = dwc2_desclist_idx_inc(idx: cur_idx, inc, |
577 | speed: qh->dev_speed); |
578 | idx = qh->td_last; |
579 | } |
580 | } |
581 | |
582 | if (qh->host_interval) { |
583 | ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) / |
584 | qh->host_interval; |
585 | if (skip_frames && !qh->channel) |
586 | ntd_max -= skip_frames / qh->host_interval; |
587 | } |
588 | |
589 | max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ? |
590 | MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; |
591 | |
592 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { |
593 | if (qtd->in_process && |
594 | qtd->isoc_frame_index_last == |
595 | qtd->urb->packet_count) |
596 | continue; |
597 | |
598 | qtd->isoc_td_first = idx; |
599 | while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < |
600 | qtd->urb->packet_count) { |
601 | dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, |
602 | max_xfer_size, idx); |
603 | idx = dwc2_desclist_idx_inc(idx, inc, speed: qh->dev_speed); |
604 | n_desc++; |
605 | } |
606 | qtd->isoc_td_last = idx; |
607 | qtd->in_process = 1; |
608 | } |
609 | |
610 | qh->td_last = idx; |
611 | |
612 | #ifdef ISOC_URB_GIVEBACK_ASAP |
613 | /* Set IOC for last descriptor if descriptor list is full */ |
614 | if (qh->ntd == ntd_max) { |
615 | idx = dwc2_desclist_idx_dec(idx: qh->td_last, inc, speed: qh->dev_speed); |
616 | qh->desc_list[idx].status |= HOST_DMA_IOC; |
617 | dma_sync_single_for_device(dev: hsotg->dev, |
618 | addr: qh->desc_list_dma + (idx * |
619 | sizeof(struct dwc2_dma_desc)), |
620 | size: sizeof(struct dwc2_dma_desc), |
621 | dir: DMA_TO_DEVICE); |
622 | } |
623 | #else |
624 | /* |
625 | * Set IOC bit only for one descriptor. Always try to be ahead of HW |
626 | * processing, i.e. on IOC generation driver activates next descriptor |
627 | * but core continues to process descriptors following the one with IOC |
628 | * set. |
629 | */ |
630 | |
631 | if (n_desc > DESCNUM_THRESHOLD) |
632 | /* |
633 | * Move IOC "up". Required even if there is only one QTD |
634 | * in the list, because QTDs might continue to be queued, |
635 | * but during the activation it was only one queued. |
636 | * Actually more than one QTD might be in the list if this |
637 | * function called from XferCompletion - QTDs was queued during |
638 | * HW processing of the previous descriptor chunk. |
639 | */ |
640 | idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), |
641 | qh->dev_speed); |
642 | else |
643 | /* |
644 | * Set the IOC for the latest descriptor if either number of |
645 | * descriptors is not greater than threshold or no more new |
646 | * descriptors activated |
647 | */ |
648 | idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); |
649 | |
650 | qh->desc_list[idx].status |= HOST_DMA_IOC; |
651 | dma_sync_single_for_device(hsotg->dev, |
652 | qh->desc_list_dma + |
653 | (idx * sizeof(struct dwc2_dma_desc)), |
654 | sizeof(struct dwc2_dma_desc), |
655 | DMA_TO_DEVICE); |
656 | #endif |
657 | } |
658 | |
659 | static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, |
660 | struct dwc2_host_chan *chan, |
661 | struct dwc2_qtd *qtd, struct dwc2_qh *qh, |
662 | int n_desc) |
663 | { |
664 | struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc]; |
665 | int len = chan->xfer_len; |
666 | |
667 | if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1)) |
668 | len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1); |
669 | |
670 | if (chan->ep_is_in) { |
671 | int num_packets; |
672 | |
673 | if (len > 0 && chan->max_packet) |
674 | num_packets = (len + chan->max_packet - 1) |
675 | / chan->max_packet; |
676 | else |
677 | /* Need 1 packet for transfer length of 0 */ |
678 | num_packets = 1; |
679 | |
680 | /* Always program an integral # of packets for IN transfers */ |
681 | len = num_packets * chan->max_packet; |
682 | } |
683 | |
684 | dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK; |
685 | qh->n_bytes[n_desc] = len; |
686 | |
687 | if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL && |
688 | qtd->control_phase == DWC2_CONTROL_SETUP) |
689 | dma_desc->status |= HOST_DMA_SUP; |
690 | |
691 | dma_desc->buf = (u32)chan->xfer_dma; |
692 | |
693 | dma_sync_single_for_device(dev: hsotg->dev, |
694 | addr: qh->desc_list_dma + |
695 | (n_desc * sizeof(struct dwc2_dma_desc)), |
696 | size: sizeof(struct dwc2_dma_desc), |
697 | dir: DMA_TO_DEVICE); |
698 | |
699 | /* |
700 | * Last (or only) descriptor of IN transfer with actual size less |
701 | * than MaxPacket |
702 | */ |
703 | if (len > chan->xfer_len) { |
704 | chan->xfer_len = 0; |
705 | } else { |
706 | chan->xfer_dma += len; |
707 | chan->xfer_len -= len; |
708 | } |
709 | } |
710 | |
711 | static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, |
712 | struct dwc2_qh *qh) |
713 | { |
714 | struct dwc2_qtd *qtd; |
715 | struct dwc2_host_chan *chan = qh->channel; |
716 | int n_desc = 0; |
717 | |
718 | dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n" , __func__, qh, |
719 | (unsigned long)chan->xfer_dma, chan->xfer_len); |
720 | |
721 | /* |
722 | * Start with chan->xfer_dma initialized in assign_and_init_hc(), then |
723 | * if SG transfer consists of multiple URBs, this pointer is re-assigned |
724 | * to the buffer of the currently processed QTD. For non-SG request |
725 | * there is always one QTD active. |
726 | */ |
727 | |
728 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { |
729 | dev_vdbg(hsotg->dev, "qtd=%p\n" , qtd); |
730 | |
731 | if (n_desc) { |
732 | /* SG request - more than 1 QTD */ |
733 | chan->xfer_dma = qtd->urb->dma + |
734 | qtd->urb->actual_length; |
735 | chan->xfer_len = qtd->urb->length - |
736 | qtd->urb->actual_length; |
737 | dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n" , |
738 | (unsigned long)chan->xfer_dma, chan->xfer_len); |
739 | } |
740 | |
741 | qtd->n_desc = 0; |
742 | do { |
743 | if (n_desc > 1) { |
744 | qh->desc_list[n_desc - 1].status |= HOST_DMA_A; |
745 | dev_vdbg(hsotg->dev, |
746 | "set A bit in desc %d (%p)\n" , |
747 | n_desc - 1, |
748 | &qh->desc_list[n_desc - 1]); |
749 | dma_sync_single_for_device(dev: hsotg->dev, |
750 | addr: qh->desc_list_dma + |
751 | ((n_desc - 1) * |
752 | sizeof(struct dwc2_dma_desc)), |
753 | size: sizeof(struct dwc2_dma_desc), |
754 | dir: DMA_TO_DEVICE); |
755 | } |
756 | dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); |
757 | dev_vdbg(hsotg->dev, |
758 | "desc %d (%p) buf=%08x status=%08x\n" , |
759 | n_desc, &qh->desc_list[n_desc], |
760 | qh->desc_list[n_desc].buf, |
761 | qh->desc_list[n_desc].status); |
762 | qtd->n_desc++; |
763 | n_desc++; |
764 | } while (chan->xfer_len > 0 && |
765 | n_desc != MAX_DMA_DESC_NUM_GENERIC); |
766 | |
767 | dev_vdbg(hsotg->dev, "n_desc=%d\n" , n_desc); |
768 | qtd->in_process = 1; |
769 | if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) |
770 | break; |
771 | if (n_desc == MAX_DMA_DESC_NUM_GENERIC) |
772 | break; |
773 | } |
774 | |
775 | if (n_desc) { |
776 | qh->desc_list[n_desc - 1].status |= |
777 | HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; |
778 | dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n" , |
779 | n_desc - 1, &qh->desc_list[n_desc - 1]); |
780 | dma_sync_single_for_device(dev: hsotg->dev, |
781 | addr: qh->desc_list_dma + (n_desc - 1) * |
782 | sizeof(struct dwc2_dma_desc), |
783 | size: sizeof(struct dwc2_dma_desc), |
784 | dir: DMA_TO_DEVICE); |
785 | if (n_desc > 1) { |
786 | qh->desc_list[0].status |= HOST_DMA_A; |
787 | dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n" , |
788 | &qh->desc_list[0]); |
789 | dma_sync_single_for_device(dev: hsotg->dev, |
790 | addr: qh->desc_list_dma, |
791 | size: sizeof(struct dwc2_dma_desc), |
792 | dir: DMA_TO_DEVICE); |
793 | } |
794 | chan->ntd = n_desc; |
795 | } |
796 | } |
797 | |
798 | /** |
799 | * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode |
800 | * |
801 | * @hsotg: The HCD state structure for the DWC OTG controller |
802 | * @qh: The QH to init |
803 | * |
804 | * Return: 0 if successful, negative error code otherwise |
805 | * |
806 | * For Control and Bulk endpoints, initializes descriptor list and starts the |
807 | * transfer. For Interrupt and Isochronous endpoints, initializes descriptor |
808 | * list then updates FrameList, marking appropriate entries as active. |
809 | * |
810 | * For Isochronous endpoints the starting descriptor index is calculated based |
811 | * on the scheduled frame, but only on the first transfer descriptor within a |
812 | * session. Then the transfer is started via enabling the channel. |
813 | * |
814 | * For Isochronous endpoints the channel is not halted on XferComplete |
815 | * interrupt so remains assigned to the endpoint(QH) until session is done. |
816 | */ |
817 | void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) |
818 | { |
819 | /* Channel is already assigned */ |
820 | struct dwc2_host_chan *chan = qh->channel; |
821 | u16 skip_frames = 0; |
822 | |
823 | switch (chan->ep_type) { |
824 | case USB_ENDPOINT_XFER_CONTROL: |
825 | case USB_ENDPOINT_XFER_BULK: |
826 | dwc2_init_non_isoc_dma_desc(hsotg, qh); |
827 | dwc2_hc_start_transfer_ddma(hsotg, chan); |
828 | break; |
829 | case USB_ENDPOINT_XFER_INT: |
830 | dwc2_init_non_isoc_dma_desc(hsotg, qh); |
831 | dwc2_update_frame_list(hsotg, qh, enable: 1); |
832 | dwc2_hc_start_transfer_ddma(hsotg, chan); |
833 | break; |
834 | case USB_ENDPOINT_XFER_ISOC: |
835 | if (!qh->ntd) |
836 | skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh); |
837 | dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames); |
838 | |
839 | if (!chan->xfer_started) { |
840 | dwc2_update_frame_list(hsotg, qh, enable: 1); |
841 | |
842 | /* |
843 | * Always set to max, instead of actual size. Otherwise |
844 | * ntd will be changed with channel being enabled. Not |
845 | * recommended. |
846 | */ |
847 | chan->ntd = dwc2_max_desc_num(qh); |
848 | |
849 | /* Enable channel only once for ISOC */ |
850 | dwc2_hc_start_transfer_ddma(hsotg, chan); |
851 | } |
852 | |
853 | break; |
854 | default: |
855 | break; |
856 | } |
857 | } |
858 | |
859 | #define DWC2_CMPL_DONE 1 |
860 | #define DWC2_CMPL_STOP 2 |
861 | |
862 | static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, |
863 | struct dwc2_host_chan *chan, |
864 | struct dwc2_qtd *qtd, |
865 | struct dwc2_qh *qh, u16 idx) |
866 | { |
867 | struct dwc2_dma_desc *dma_desc; |
868 | struct dwc2_hcd_iso_packet_desc *frame_desc; |
869 | u16 remain = 0; |
870 | int rc = 0; |
871 | |
872 | if (!qtd->urb) |
873 | return -EINVAL; |
874 | |
875 | dma_sync_single_for_cpu(dev: hsotg->dev, addr: qh->desc_list_dma + (idx * |
876 | sizeof(struct dwc2_dma_desc)), |
877 | size: sizeof(struct dwc2_dma_desc), |
878 | dir: DMA_FROM_DEVICE); |
879 | |
880 | dma_desc = &qh->desc_list[idx]; |
881 | |
882 | frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; |
883 | dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); |
884 | if (chan->ep_is_in) |
885 | remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >> |
886 | HOST_DMA_ISOC_NBYTES_SHIFT; |
887 | |
888 | if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { |
889 | /* |
890 | * XactError, or unable to complete all the transactions |
891 | * in the scheduled micro-frame/frame, both indicated by |
892 | * HOST_DMA_STS_PKTERR |
893 | */ |
894 | qtd->urb->error_count++; |
895 | frame_desc->actual_length = qh->n_bytes[idx] - remain; |
896 | frame_desc->status = -EPROTO; |
897 | } else { |
898 | /* Success */ |
899 | frame_desc->actual_length = qh->n_bytes[idx] - remain; |
900 | frame_desc->status = 0; |
901 | } |
902 | |
903 | if (++qtd->isoc_frame_index == qtd->urb->packet_count) { |
904 | /* |
905 | * urb->status is not used for isoc transfers here. The |
906 | * individual frame_desc status are used instead. |
907 | */ |
908 | dwc2_host_complete(hsotg, qtd, status: 0); |
909 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); |
910 | |
911 | /* |
912 | * This check is necessary because urb_dequeue can be called |
913 | * from urb complete callback (sound driver for example). All |
914 | * pending URBs are dequeued there, so no need for further |
915 | * processing. |
916 | */ |
917 | if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) |
918 | return -1; |
919 | rc = DWC2_CMPL_DONE; |
920 | } |
921 | |
922 | qh->ntd--; |
923 | |
924 | /* Stop if IOC requested descriptor reached */ |
925 | if (dma_desc->status & HOST_DMA_IOC) |
926 | rc = DWC2_CMPL_STOP; |
927 | |
928 | return rc; |
929 | } |
930 | |
931 | static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, |
932 | struct dwc2_host_chan *chan, |
933 | enum dwc2_halt_status halt_status) |
934 | { |
935 | struct dwc2_hcd_iso_packet_desc *frame_desc; |
936 | struct dwc2_qtd *qtd, *qtd_tmp; |
937 | struct dwc2_qh *qh; |
938 | u16 idx; |
939 | int rc; |
940 | |
941 | qh = chan->qh; |
942 | idx = qh->td_first; |
943 | |
944 | if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { |
945 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) |
946 | qtd->in_process = 0; |
947 | return; |
948 | } |
949 | |
950 | if (halt_status == DWC2_HC_XFER_AHB_ERR || |
951 | halt_status == DWC2_HC_XFER_BABBLE_ERR) { |
952 | /* |
953 | * Channel is halted in these error cases, considered as serious |
954 | * issues. |
955 | * Complete all URBs marking all frames as failed, irrespective |
956 | * whether some of the descriptors (frames) succeeded or not. |
957 | * Pass error code to completion routine as well, to update |
958 | * urb->status, some of class drivers might use it to stop |
959 | * queing transfer requests. |
960 | */ |
961 | int err = halt_status == DWC2_HC_XFER_AHB_ERR ? |
962 | -EIO : -EOVERFLOW; |
963 | |
964 | list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, |
965 | qtd_list_entry) { |
966 | if (qtd->urb) { |
967 | for (idx = 0; idx < qtd->urb->packet_count; |
968 | idx++) { |
969 | frame_desc = &qtd->urb->iso_descs[idx]; |
970 | frame_desc->status = err; |
971 | } |
972 | |
973 | dwc2_host_complete(hsotg, qtd, status: err); |
974 | } |
975 | |
976 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); |
977 | } |
978 | |
979 | return; |
980 | } |
981 | |
982 | list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { |
983 | if (!qtd->in_process) |
984 | break; |
985 | |
986 | /* |
987 | * Ensure idx corresponds to descriptor where first urb of this |
988 | * qtd was added. In fact, during isoc desc init, dwc2 may skip |
989 | * an index if current frame number is already over this index. |
990 | */ |
991 | if (idx != qtd->isoc_td_first) { |
992 | dev_vdbg(hsotg->dev, |
993 | "try to complete %d instead of %d\n" , |
994 | idx, qtd->isoc_td_first); |
995 | idx = qtd->isoc_td_first; |
996 | } |
997 | |
998 | do { |
999 | struct dwc2_qtd *qtd_next; |
1000 | u16 cur_idx; |
1001 | |
1002 | rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, |
1003 | idx); |
1004 | if (rc < 0) |
1005 | return; |
1006 | idx = dwc2_desclist_idx_inc(idx, inc: qh->host_interval, |
1007 | speed: chan->speed); |
1008 | if (!rc) |
1009 | continue; |
1010 | |
1011 | if (rc == DWC2_CMPL_DONE) |
1012 | break; |
1013 | |
1014 | /* rc == DWC2_CMPL_STOP */ |
1015 | |
1016 | if (qh->host_interval >= 32) |
1017 | goto stop_scan; |
1018 | |
1019 | qh->td_first = idx; |
1020 | cur_idx = dwc2_frame_list_idx(frame: hsotg->frame_number); |
1021 | qtd_next = list_first_entry(&qh->qtd_list, |
1022 | struct dwc2_qtd, |
1023 | qtd_list_entry); |
1024 | if (dwc2_frame_idx_num_gt(fr_idx1: cur_idx, |
1025 | fr_idx2: qtd_next->isoc_td_last)) |
1026 | break; |
1027 | |
1028 | goto stop_scan; |
1029 | |
1030 | } while (idx != qh->td_first); |
1031 | } |
1032 | |
1033 | stop_scan: |
1034 | qh->td_first = idx; |
1035 | } |
1036 | |
1037 | static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, |
1038 | struct dwc2_host_chan *chan, |
1039 | struct dwc2_qtd *qtd, |
1040 | struct dwc2_dma_desc *dma_desc, |
1041 | enum dwc2_halt_status halt_status, |
1042 | u32 n_bytes, int *xfer_done) |
1043 | { |
1044 | struct dwc2_hcd_urb *urb = qtd->urb; |
1045 | u16 remain = 0; |
1046 | |
1047 | if (chan->ep_is_in) |
1048 | remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >> |
1049 | HOST_DMA_NBYTES_SHIFT; |
1050 | |
1051 | dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n" , remain, urb); |
1052 | |
1053 | if (halt_status == DWC2_HC_XFER_AHB_ERR) { |
1054 | dev_err(hsotg->dev, "EIO\n" ); |
1055 | urb->status = -EIO; |
1056 | return 1; |
1057 | } |
1058 | |
1059 | if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) { |
1060 | switch (halt_status) { |
1061 | case DWC2_HC_XFER_STALL: |
1062 | dev_vdbg(hsotg->dev, "Stall\n" ); |
1063 | urb->status = -EPIPE; |
1064 | break; |
1065 | case DWC2_HC_XFER_BABBLE_ERR: |
1066 | dev_err(hsotg->dev, "Babble\n" ); |
1067 | urb->status = -EOVERFLOW; |
1068 | break; |
1069 | case DWC2_HC_XFER_XACT_ERR: |
1070 | dev_err(hsotg->dev, "XactErr\n" ); |
1071 | urb->status = -EPROTO; |
1072 | break; |
1073 | default: |
1074 | dev_err(hsotg->dev, |
1075 | "%s: Unhandled descriptor error status (%d)\n" , |
1076 | __func__, halt_status); |
1077 | break; |
1078 | } |
1079 | return 1; |
1080 | } |
1081 | |
1082 | if (dma_desc->status & HOST_DMA_A) { |
1083 | dev_vdbg(hsotg->dev, |
1084 | "Active descriptor encountered on channel %d\n" , |
1085 | chan->hc_num); |
1086 | return 0; |
1087 | } |
1088 | |
1089 | if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) { |
1090 | if (qtd->control_phase == DWC2_CONTROL_DATA) { |
1091 | urb->actual_length += n_bytes - remain; |
1092 | if (remain || urb->actual_length >= urb->length) { |
1093 | /* |
1094 | * For Control Data stage do not set urb->status |
1095 | * to 0, to prevent URB callback. Set it when |
1096 | * Status phase is done. See below. |
1097 | */ |
1098 | *xfer_done = 1; |
1099 | } |
1100 | } else if (qtd->control_phase == DWC2_CONTROL_STATUS) { |
1101 | urb->status = 0; |
1102 | *xfer_done = 1; |
1103 | } |
1104 | /* No handling for SETUP stage */ |
1105 | } else { |
1106 | /* BULK and INTR */ |
1107 | urb->actual_length += n_bytes - remain; |
1108 | dev_vdbg(hsotg->dev, "length=%d actual=%d\n" , urb->length, |
1109 | urb->actual_length); |
1110 | if (remain || urb->actual_length >= urb->length) { |
1111 | urb->status = 0; |
1112 | *xfer_done = 1; |
1113 | } |
1114 | } |
1115 | |
1116 | return 0; |
1117 | } |
1118 | |
1119 | static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, |
1120 | struct dwc2_host_chan *chan, |
1121 | int chnum, struct dwc2_qtd *qtd, |
1122 | int desc_num, |
1123 | enum dwc2_halt_status halt_status, |
1124 | int *xfer_done) |
1125 | { |
1126 | struct dwc2_qh *qh = chan->qh; |
1127 | struct dwc2_hcd_urb *urb = qtd->urb; |
1128 | struct dwc2_dma_desc *dma_desc; |
1129 | u32 n_bytes; |
1130 | int failed; |
1131 | |
1132 | dev_vdbg(hsotg->dev, "%s()\n" , __func__); |
1133 | |
1134 | if (!urb) |
1135 | return -EINVAL; |
1136 | |
1137 | dma_sync_single_for_cpu(dev: hsotg->dev, |
1138 | addr: qh->desc_list_dma + (desc_num * |
1139 | sizeof(struct dwc2_dma_desc)), |
1140 | size: sizeof(struct dwc2_dma_desc), |
1141 | dir: DMA_FROM_DEVICE); |
1142 | |
1143 | dma_desc = &qh->desc_list[desc_num]; |
1144 | n_bytes = qh->n_bytes[desc_num]; |
1145 | dev_vdbg(hsotg->dev, |
1146 | "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n" , |
1147 | qtd, urb, desc_num, dma_desc, n_bytes); |
1148 | failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, |
1149 | halt_status, n_bytes, |
1150 | xfer_done); |
1151 | if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { |
1152 | dwc2_host_complete(hsotg, qtd, status: urb->status); |
1153 | dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); |
1154 | dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n" , |
1155 | failed, *xfer_done); |
1156 | return failed; |
1157 | } |
1158 | |
1159 | if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) { |
1160 | switch (qtd->control_phase) { |
1161 | case DWC2_CONTROL_SETUP: |
1162 | if (urb->length > 0) |
1163 | qtd->control_phase = DWC2_CONTROL_DATA; |
1164 | else |
1165 | qtd->control_phase = DWC2_CONTROL_STATUS; |
1166 | dev_vdbg(hsotg->dev, |
1167 | " Control setup transaction done\n" ); |
1168 | break; |
1169 | case DWC2_CONTROL_DATA: |
1170 | if (*xfer_done) { |
1171 | qtd->control_phase = DWC2_CONTROL_STATUS; |
1172 | dev_vdbg(hsotg->dev, |
1173 | " Control data transfer done\n" ); |
1174 | } else if (desc_num + 1 == qtd->n_desc) { |
1175 | /* |
1176 | * Last descriptor for Control data stage which |
1177 | * is not completed yet |
1178 | */ |
1179 | dwc2_hcd_save_data_toggle(hsotg, chan, chnum, |
1180 | qtd); |
1181 | } |
1182 | break; |
1183 | default: |
1184 | break; |
1185 | } |
1186 | } |
1187 | |
1188 | return 0; |
1189 | } |
1190 | |
1191 | static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, |
1192 | struct dwc2_host_chan *chan, |
1193 | int chnum, |
1194 | enum dwc2_halt_status halt_status) |
1195 | { |
1196 | struct list_head *qtd_item, *qtd_tmp; |
1197 | struct dwc2_qh *qh = chan->qh; |
1198 | struct dwc2_qtd *qtd = NULL; |
1199 | int xfer_done; |
1200 | int desc_num = 0; |
1201 | |
1202 | if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) { |
1203 | list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) |
1204 | qtd->in_process = 0; |
1205 | return; |
1206 | } |
1207 | |
1208 | list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { |
1209 | int i; |
1210 | int qtd_desc_count; |
1211 | |
1212 | qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); |
1213 | xfer_done = 0; |
1214 | qtd_desc_count = qtd->n_desc; |
1215 | |
1216 | for (i = 0; i < qtd_desc_count; i++) { |
1217 | if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, |
1218 | desc_num, halt_status, |
1219 | xfer_done: &xfer_done)) { |
1220 | qtd = NULL; |
1221 | goto stop_scan; |
1222 | } |
1223 | |
1224 | desc_num++; |
1225 | } |
1226 | } |
1227 | |
1228 | stop_scan: |
1229 | if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { |
1230 | /* |
1231 | * Resetting the data toggle for bulk and interrupt endpoints |
1232 | * in case of stall. See handle_hc_stall_intr(). |
1233 | */ |
1234 | if (halt_status == DWC2_HC_XFER_STALL) |
1235 | qh->data_toggle = DWC2_HC_PID_DATA0; |
1236 | else |
1237 | dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL); |
1238 | } |
1239 | |
1240 | if (halt_status == DWC2_HC_XFER_COMPLETE) { |
1241 | if (chan->hcint & HCINTMSK_NYET) { |
1242 | /* |
1243 | * Got a NYET on the last transaction of the transfer. |
1244 | * It means that the endpoint should be in the PING |
1245 | * state at the beginning of the next transfer. |
1246 | */ |
1247 | qh->ping_state = 1; |
1248 | } |
1249 | } |
1250 | } |
1251 | |
1252 | /** |
1253 | * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's |
1254 | * status and calls completion routine for the URB if it's done. Called from |
1255 | * interrupt handlers. |
1256 | * |
1257 | * @hsotg: The HCD state structure for the DWC OTG controller |
1258 | * @chan: Host channel the transfer is completed on |
1259 | * @chnum: Index of Host channel registers |
1260 | * @halt_status: Reason the channel is being halted or just XferComplete |
1261 | * for isochronous transfers |
1262 | * |
1263 | * Releases the channel to be used by other transfers. |
1264 | * In case of Isochronous endpoint the channel is not halted until the end of |
1265 | * the session, i.e. QTD list is empty. |
1266 | * If periodic channel released the FrameList is updated accordingly. |
1267 | * Calls transaction selection routines to activate pending transfers. |
1268 | */ |
1269 | void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, |
1270 | struct dwc2_host_chan *chan, int chnum, |
1271 | enum dwc2_halt_status halt_status) |
1272 | { |
1273 | struct dwc2_qh *qh = chan->qh; |
1274 | int continue_isoc_xfer = 0; |
1275 | enum dwc2_transaction_type tr_type; |
1276 | |
1277 | if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) { |
1278 | dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status); |
1279 | |
1280 | /* Release the channel if halted or session completed */ |
1281 | if (halt_status != DWC2_HC_XFER_COMPLETE || |
1282 | list_empty(head: &qh->qtd_list)) { |
1283 | struct dwc2_qtd *qtd, *qtd_tmp; |
1284 | |
1285 | /* |
1286 | * Kill all remainings QTDs since channel has been |
1287 | * halted. |
1288 | */ |
1289 | list_for_each_entry_safe(qtd, qtd_tmp, |
1290 | &qh->qtd_list, |
1291 | qtd_list_entry) { |
1292 | dwc2_host_complete(hsotg, qtd, |
1293 | status: -ECONNRESET); |
1294 | dwc2_hcd_qtd_unlink_and_free(hsotg, |
1295 | qtd, qh); |
1296 | } |
1297 | |
1298 | /* Halt the channel if session completed */ |
1299 | if (halt_status == DWC2_HC_XFER_COMPLETE) |
1300 | dwc2_hc_halt(hsotg, chan, halt_status); |
1301 | dwc2_release_channel_ddma(hsotg, qh); |
1302 | dwc2_hcd_qh_unlink(hsotg, qh); |
1303 | } else { |
1304 | /* Keep in assigned schedule to continue transfer */ |
1305 | list_move_tail(list: &qh->qh_list_entry, |
1306 | head: &hsotg->periodic_sched_assigned); |
1307 | /* |
1308 | * If channel has been halted during giveback of urb |
1309 | * then prevent any new scheduling. |
1310 | */ |
1311 | if (!chan->halt_status) |
1312 | continue_isoc_xfer = 1; |
1313 | } |
1314 | /* |
1315 | * Todo: Consider the case when period exceeds FrameList size. |
1316 | * Frame Rollover interrupt should be used. |
1317 | */ |
1318 | } else { |
1319 | /* |
1320 | * Scan descriptor list to complete the URB(s), then release |
1321 | * the channel |
1322 | */ |
1323 | dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum, |
1324 | halt_status); |
1325 | dwc2_release_channel_ddma(hsotg, qh); |
1326 | dwc2_hcd_qh_unlink(hsotg, qh); |
1327 | |
1328 | if (!list_empty(head: &qh->qtd_list)) { |
1329 | /* |
1330 | * Add back to inactive non-periodic schedule on normal |
1331 | * completion |
1332 | */ |
1333 | dwc2_hcd_qh_add(hsotg, qh); |
1334 | } |
1335 | } |
1336 | |
1337 | tr_type = dwc2_hcd_select_transactions(hsotg); |
1338 | if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) { |
1339 | if (continue_isoc_xfer) { |
1340 | if (tr_type == DWC2_TRANSACTION_NONE) |
1341 | tr_type = DWC2_TRANSACTION_PERIODIC; |
1342 | else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC) |
1343 | tr_type = DWC2_TRANSACTION_ALL; |
1344 | } |
1345 | dwc2_hcd_queue_transactions(hsotg, tr_type); |
1346 | } |
1347 | } |
1348 | |