Lines Matching refs:etd
57 #define DEBUG_LOG_FRAME(imx21, etd, event) \ argument
58 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
60 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) argument
140 struct imx21 *imx21, struct etd_priv *etd, int status);
142 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
151 struct etd_priv *etd = imx21->etd; in alloc_etd() local
153 for (i = 0; i < USB_NUM_ETD; i++, etd++) { in alloc_etd()
154 if (etd->alloc == 0) { in alloc_etd()
155 memset(etd, 0, sizeof(imx21->etd[0])); in alloc_etd()
156 etd->alloc = 1; in alloc_etd()
167 struct etd_priv *etd = &imx21->etd[num]; in disactivate_etd() local
174 etd->active_count = 0; in disactivate_etd()
176 DEBUG_LOG_FRAME(imx21, etd, disactivated); in disactivate_etd()
181 struct etd_priv *etd = imx21->etd + num; in reset_etd() local
188 etd->urb = NULL; in reset_etd()
189 etd->ep = NULL; in reset_etd()
190 etd->td = NULL; in reset_etd()
191 etd->bounce_buffer = NULL; in reset_etd()
203 if (imx21->etd[num].alloc == 0) { in free_etd()
210 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); in free_etd()
257 struct etd_priv *etd = &imx21->etd[etd_num]; in activate_etd() local
259 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { in activate_etd()
261 if (etd->len <= etd->dmem_size) { in activate_etd()
265 etd->dmem_offset, in activate_etd()
266 etd->cpu_buffer, etd->len); in activate_etd()
268 etd->dma_handle = 0; in activate_etd()
276 etd->bounce_buffer = kmalloc(etd->len, in activate_etd()
280 etd->bounce_buffer = kmemdup(etd->cpu_buffer, in activate_etd()
281 etd->len, in activate_etd()
284 if (!etd->bounce_buffer) { in activate_etd()
289 etd->dma_handle = in activate_etd()
291 etd->bounce_buffer, in activate_etd()
292 etd->len, in activate_etd()
294 if (dma_mapping_error(imx21->dev, etd->dma_handle)) { in activate_etd()
306 if (etd->dma_handle) { in activate_etd()
310 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); in activate_etd()
320 DEBUG_LOG_FRAME(imx21, etd, activated); in activate_etd()
323 if (!etd->active_count) { in activate_etd()
325 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); in activate_etd()
326 etd->disactivated_frame = -1; in activate_etd()
327 etd->last_int_frame = -1; in activate_etd()
328 etd->last_req_frame = -1; in activate_etd()
331 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); in activate_etd()
335 etd->active_count = 1; in activate_etd()
340 kfree(etd->bounce_buffer); in activate_etd()
343 free_dmem(imx21, etd); in activate_etd()
344 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); in activate_etd()
393 struct etd_priv *etd, u32 dmem_offset) in activate_queued_etd() argument
395 struct urb_priv *urb_priv = etd->urb->hcpriv; in activate_queued_etd()
396 int etd_num = etd - &imx21->etd[0]; in activate_queued_etd()
405 etd->dmem_offset = dmem_offset; in activate_queued_etd()
410 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) in free_dmem() argument
417 if (!etd->dmem_size) in free_dmem()
419 etd->dmem_size = 0; in free_dmem()
421 offset = etd->dmem_offset; in free_dmem()
439 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { in free_dmem()
440 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); in free_dmem()
442 list_del(&etd->queue); in free_dmem()
443 activate_queued_etd(imx21, etd, (u32)offset); in free_dmem()
474 int etd_num = ep_priv->etd[i]; in ep_idle()
475 struct etd_priv *etd; in ep_idle() local
479 etd = &imx21->etd[etd_num]; in ep_idle()
480 ep_priv->etd[i] = -1; in ep_idle()
482 free_dmem(imx21, etd); /* for isoc */ in ep_idle()
496 ep_priv->etd[i] = etd_num; in ep_idle()
530 struct imx21 *imx21, struct etd_priv *etd, int status) in nonisoc_urb_completed_for_etd() argument
532 struct usb_host_endpoint *ep = etd->ep; in nonisoc_urb_completed_for_etd()
534 urb_done(imx21->hcd, etd->urb, status); in nonisoc_urb_completed_for_etd()
535 etd->urb = NULL; in nonisoc_urb_completed_for_etd()
556 struct etd_priv *etd; in schedule_isoc_etds() local
569 etd_num = ep_priv->etd[i]; in schedule_isoc_etds()
573 etd = &imx21->etd[etd_num]; in schedule_isoc_etds()
574 if (etd->urb) in schedule_isoc_etds()
595 etd->td = td; in schedule_isoc_etds()
596 etd->ep = td->ep; in schedule_isoc_etds()
597 etd->urb = td->urb; in schedule_isoc_etds()
598 etd->len = td->len; in schedule_isoc_etds()
599 etd->dma_handle = td->dma_handle; in schedule_isoc_etds()
600 etd->cpu_buffer = td->cpu_buffer; in schedule_isoc_etds()
605 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); in schedule_isoc_etds()
606 etd_writel(imx21, etd_num, 1, etd->dmem_offset); in schedule_isoc_etds()
622 struct etd_priv *etd = imx21->etd + etd_num; in isoc_etd_done() local
623 struct urb *urb = etd->urb; in isoc_etd_done()
625 struct td *td = etd->td; in isoc_etd_done()
626 struct usb_host_endpoint *ep = etd->ep; in isoc_etd_done()
660 if (!etd->dma_handle) in isoc_etd_done()
661 memcpy_fromio(etd->cpu_buffer, in isoc_etd_done()
662 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in isoc_etd_done()
670 etd->td = NULL; in isoc_etd_done()
671 etd->urb = NULL; in isoc_etd_done()
672 etd->ep = NULL; in isoc_etd_done()
691 ep_priv->etd[i] = -1; in alloc_isoc_ep()
706 if (ep_priv->etd[i] < 0) { in alloc_isoc_etds()
711 ep_priv->etd[i] = etd_num; in alloc_isoc_etds()
712 imx21->etd[etd_num].ep = ep_priv->ep; in alloc_isoc_etds()
720 free_etd(imx21, ep_priv->etd[j]); in alloc_isoc_etds()
721 ep_priv->etd[j] = -1; in alloc_isoc_etds()
780 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; in imx21_hc_urb_enqueue_isoc() local
782 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { in imx21_hc_urb_enqueue_isoc()
785 etd->dmem_size, maxpacket); in imx21_hc_urb_enqueue_isoc()
790 if (etd->dmem_size == 0) { in imx21_hc_urb_enqueue_isoc()
791 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); in imx21_hc_urb_enqueue_isoc()
792 if (etd->dmem_offset < 0) { in imx21_hc_urb_enqueue_isoc()
797 etd->dmem_size = maxpacket; in imx21_hc_urb_enqueue_isoc()
876 int etd_num = ep_priv->etd[i]; in dequeue_isoc_urb()
877 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { in dequeue_isoc_urb()
878 struct etd_priv *etd = imx21->etd + etd_num; in dequeue_isoc_urb() local
881 free_dmem(imx21, etd); in dequeue_isoc_urb()
904 int etd_num = ep_priv->etd[0]; in schedule_nonisoc_etd()
905 struct etd_priv *etd; in schedule_nonisoc_etd() local
922 etd = &imx21->etd[etd_num]; in schedule_nonisoc_etd()
933 etd->dma_handle = urb->setup_dma; in schedule_nonisoc_etd()
934 etd->cpu_buffer = urb->setup_packet; in schedule_nonisoc_etd()
950 etd->dma_handle = urb->transfer_dma; in schedule_nonisoc_etd()
951 etd->cpu_buffer = urb->transfer_buffer; in schedule_nonisoc_etd()
970 etd->urb = urb; in schedule_nonisoc_etd()
971 etd->ep = urb_priv->ep; in schedule_nonisoc_etd()
972 etd->len = count; in schedule_nonisoc_etd()
1002 etd->dma_handle = 0; in schedule_nonisoc_etd()
1005 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; in schedule_nonisoc_etd()
1006 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); in schedule_nonisoc_etd()
1007 if (etd->dmem_offset < 0) { in schedule_nonisoc_etd()
1013 list_add_tail(&etd->queue, &imx21->queue_for_dmem); in schedule_nonisoc_etd()
1018 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | in schedule_nonisoc_etd()
1019 (u32) etd->dmem_offset); in schedule_nonisoc_etd()
1033 struct etd_priv *etd = &imx21->etd[etd_num]; in nonisoc_etd_done() local
1034 struct urb *urb = etd->urb; in nonisoc_etd_done()
1046 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); in nonisoc_etd_done()
1057 if (etd->bounce_buffer) { in nonisoc_etd_done()
1058 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); in nonisoc_etd_done()
1060 etd->dma_handle, etd->len, DMA_FROM_DEVICE); in nonisoc_etd_done()
1061 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ in nonisoc_etd_done()
1062 memcpy_fromio(etd->cpu_buffer, in nonisoc_etd_done()
1063 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in nonisoc_etd_done()
1068 kfree(etd->bounce_buffer); in nonisoc_etd_done()
1069 etd->bounce_buffer = NULL; in nonisoc_etd_done()
1070 free_dmem(imx21, etd); in nonisoc_etd_done()
1128 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); in nonisoc_etd_done()
1146 ep_priv->etd[i] = -1; in alloc_ep()
1158 struct etd_priv *etd; in imx21_hc_urb_enqueue() local
1210 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1218 ep_priv->etd[0] = alloc_etd(imx21); in imx21_hc_urb_enqueue()
1219 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1230 etd = &imx21->etd[ep_priv->etd[0]]; in imx21_hc_urb_enqueue()
1231 if (etd->urb == NULL) { in imx21_hc_urb_enqueue()
1232 DEBUG_LOG_FRAME(imx21, etd, last_req); in imx21_hc_urb_enqueue()
1274 int etd_num = ep_priv->etd[0]; in imx21_hc_urb_dequeue()
1276 struct etd_priv *etd = &imx21->etd[etd_num]; in imx21_hc_urb_dequeue() local
1279 free_dmem(imx21, etd); in imx21_hc_urb_dequeue()
1280 etd->urb = NULL; in imx21_hc_urb_dequeue()
1281 kfree(etd->bounce_buffer); in imx21_hc_urb_dequeue()
1282 etd->bounce_buffer = NULL; in imx21_hc_urb_dequeue()
1312 struct etd_priv *etd = &imx21->etd[etd_num]; in process_etds() local
1316 DEBUG_LOG_FRAME(imx21, etd, last_int); in process_etds()
1337 if (etd->active_count && !enabled) /* suspicious... */ in process_etds()
1340 if (!sof || enabled || !etd->active_count) in process_etds()
1347 if (++etd->active_count < 10) in process_etds()
1361 etd->activated_frame, in process_etds()
1362 etd->disactivated_frame, in process_etds()
1363 etd->last_int_frame, in process_etds()
1364 etd->last_req_frame, in process_etds()
1368 etd->active_count = 0; in process_etds()
1372 if (etd->ep == NULL || etd->urb == NULL) { in process_etds()
1376 etd_num, etd->ep, etd->urb); in process_etds()
1381 if (usb_pipeisoc(etd->urb->pipe)) in process_etds()
1435 if (ep_priv->etd[i] > -1) in imx21_hc_endpoint_disable()
1437 ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1439 free_etd(imx21, ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1446 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { in imx21_hc_endpoint_disable()