Lines Matching refs:etd
70 #define DEBUG_LOG_FRAME(imx21, etd, event) \ argument
71 (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
73 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) argument
153 struct imx21 *imx21, struct etd_priv *etd, int status);
155 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
164 struct etd_priv *etd = imx21->etd; in alloc_etd() local
166 for (i = 0; i < USB_NUM_ETD; i++, etd++) { in alloc_etd()
167 if (etd->alloc == 0) { in alloc_etd()
168 memset(etd, 0, sizeof(imx21->etd[0])); in alloc_etd()
169 etd->alloc = 1; in alloc_etd()
180 struct etd_priv *etd = &imx21->etd[num]; in disactivate_etd() local
187 etd->active_count = 0; in disactivate_etd()
189 DEBUG_LOG_FRAME(imx21, etd, disactivated); in disactivate_etd()
194 struct etd_priv *etd = imx21->etd + num; in reset_etd() local
201 etd->urb = NULL; in reset_etd()
202 etd->ep = NULL; in reset_etd()
203 etd->td = NULL; in reset_etd()
204 etd->bounce_buffer = NULL; in reset_etd()
216 if (imx21->etd[num].alloc == 0) { in free_etd()
223 memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); in free_etd()
270 struct etd_priv *etd = &imx21->etd[etd_num]; in activate_etd() local
272 if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) { in activate_etd()
274 if (etd->len <= etd->dmem_size) { in activate_etd()
278 etd->dmem_offset, in activate_etd()
279 etd->cpu_buffer, etd->len); in activate_etd()
281 etd->dma_handle = 0; in activate_etd()
289 etd->bounce_buffer = kmalloc(etd->len, in activate_etd()
293 etd->bounce_buffer = kmemdup(etd->cpu_buffer, in activate_etd()
294 etd->len, in activate_etd()
297 if (!etd->bounce_buffer) { in activate_etd()
302 etd->dma_handle = in activate_etd()
304 etd->bounce_buffer, in activate_etd()
305 etd->len, in activate_etd()
307 if (dma_mapping_error(imx21->dev, etd->dma_handle)) { in activate_etd()
319 if (etd->dma_handle) { in activate_etd()
323 writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num)); in activate_etd()
333 DEBUG_LOG_FRAME(imx21, etd, activated); in activate_etd()
336 if (!etd->active_count) { in activate_etd()
338 etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); in activate_etd()
339 etd->disactivated_frame = -1; in activate_etd()
340 etd->last_int_frame = -1; in activate_etd()
341 etd->last_req_frame = -1; in activate_etd()
344 etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); in activate_etd()
348 etd->active_count = 1; in activate_etd()
353 kfree(etd->bounce_buffer); in activate_etd()
356 free_dmem(imx21, etd); in activate_etd()
357 nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM); in activate_etd()
406 struct etd_priv *etd, u32 dmem_offset) in activate_queued_etd() argument
408 struct urb_priv *urb_priv = etd->urb->hcpriv; in activate_queued_etd()
409 int etd_num = etd - &imx21->etd[0]; in activate_queued_etd()
418 etd->dmem_offset = dmem_offset; in activate_queued_etd()
423 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd) in free_dmem() argument
430 if (!etd->dmem_size) in free_dmem()
432 etd->dmem_size = 0; in free_dmem()
434 offset = etd->dmem_offset; in free_dmem()
452 list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { in free_dmem()
453 offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); in free_dmem()
455 list_del(&etd->queue); in free_dmem()
456 activate_queued_etd(imx21, etd, (u32)offset); in free_dmem()
487 int etd_num = ep_priv->etd[i]; in ep_idle()
488 struct etd_priv *etd; in ep_idle() local
492 etd = &imx21->etd[etd_num]; in ep_idle()
493 ep_priv->etd[i] = -1; in ep_idle()
495 free_dmem(imx21, etd); /* for isoc */ in ep_idle()
509 ep_priv->etd[i] = etd_num; in ep_idle()
543 struct imx21 *imx21, struct etd_priv *etd, int status) in nonisoc_urb_completed_for_etd() argument
545 struct usb_host_endpoint *ep = etd->ep; in nonisoc_urb_completed_for_etd()
547 urb_done(imx21->hcd, etd->urb, status); in nonisoc_urb_completed_for_etd()
548 etd->urb = NULL; in nonisoc_urb_completed_for_etd()
569 struct etd_priv *etd; in schedule_isoc_etds() local
582 etd_num = ep_priv->etd[i]; in schedule_isoc_etds()
586 etd = &imx21->etd[etd_num]; in schedule_isoc_etds()
587 if (etd->urb) in schedule_isoc_etds()
608 etd->td = td; in schedule_isoc_etds()
609 etd->ep = td->ep; in schedule_isoc_etds()
610 etd->urb = td->urb; in schedule_isoc_etds()
611 etd->len = td->len; in schedule_isoc_etds()
612 etd->dma_handle = td->dma_handle; in schedule_isoc_etds()
613 etd->cpu_buffer = td->cpu_buffer; in schedule_isoc_etds()
618 setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); in schedule_isoc_etds()
619 etd_writel(imx21, etd_num, 1, etd->dmem_offset); in schedule_isoc_etds()
635 struct etd_priv *etd = imx21->etd + etd_num; in isoc_etd_done() local
636 struct urb *urb = etd->urb; in isoc_etd_done()
638 struct td *td = etd->td; in isoc_etd_done()
639 struct usb_host_endpoint *ep = etd->ep; in isoc_etd_done()
673 if (!etd->dma_handle) in isoc_etd_done()
674 memcpy_fromio(etd->cpu_buffer, in isoc_etd_done()
675 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in isoc_etd_done()
683 etd->td = NULL; in isoc_etd_done()
684 etd->urb = NULL; in isoc_etd_done()
685 etd->ep = NULL; in isoc_etd_done()
704 ep_priv->etd[i] = -1; in alloc_isoc_ep()
719 if (ep_priv->etd[i] < 0) { in alloc_isoc_etds()
724 ep_priv->etd[i] = etd_num; in alloc_isoc_etds()
725 imx21->etd[etd_num].ep = ep_priv->ep; in alloc_isoc_etds()
733 free_etd(imx21, ep_priv->etd[j]); in alloc_isoc_etds()
734 ep_priv->etd[j] = -1; in alloc_isoc_etds()
793 struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; in imx21_hc_urb_enqueue_isoc() local
795 if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { in imx21_hc_urb_enqueue_isoc()
798 etd->dmem_size, maxpacket); in imx21_hc_urb_enqueue_isoc()
803 if (etd->dmem_size == 0) { in imx21_hc_urb_enqueue_isoc()
804 etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); in imx21_hc_urb_enqueue_isoc()
805 if (etd->dmem_offset < 0) { in imx21_hc_urb_enqueue_isoc()
810 etd->dmem_size = maxpacket; in imx21_hc_urb_enqueue_isoc()
889 int etd_num = ep_priv->etd[i]; in dequeue_isoc_urb()
890 if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { in dequeue_isoc_urb()
891 struct etd_priv *etd = imx21->etd + etd_num; in dequeue_isoc_urb() local
894 free_dmem(imx21, etd); in dequeue_isoc_urb()
917 int etd_num = ep_priv->etd[0]; in schedule_nonisoc_etd()
918 struct etd_priv *etd; in schedule_nonisoc_etd() local
935 etd = &imx21->etd[etd_num]; in schedule_nonisoc_etd()
946 etd->dma_handle = urb->setup_dma; in schedule_nonisoc_etd()
947 etd->cpu_buffer = urb->setup_packet; in schedule_nonisoc_etd()
963 etd->dma_handle = urb->transfer_dma; in schedule_nonisoc_etd()
964 etd->cpu_buffer = urb->transfer_buffer; in schedule_nonisoc_etd()
983 etd->urb = urb; in schedule_nonisoc_etd()
984 etd->ep = urb_priv->ep; in schedule_nonisoc_etd()
985 etd->len = count; in schedule_nonisoc_etd()
1015 etd->dma_handle = 0; in schedule_nonisoc_etd()
1018 etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; in schedule_nonisoc_etd()
1019 etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); in schedule_nonisoc_etd()
1020 if (etd->dmem_offset < 0) { in schedule_nonisoc_etd()
1026 list_add_tail(&etd->queue, &imx21->queue_for_dmem); in schedule_nonisoc_etd()
1031 (((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | in schedule_nonisoc_etd()
1032 (u32) etd->dmem_offset); in schedule_nonisoc_etd()
1046 struct etd_priv *etd = &imx21->etd[etd_num]; in nonisoc_etd_done() local
1047 struct urb *urb = etd->urb; in nonisoc_etd_done()
1059 bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); in nonisoc_etd_done()
1070 if (etd->bounce_buffer) { in nonisoc_etd_done()
1071 memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd); in nonisoc_etd_done()
1073 etd->dma_handle, etd->len, DMA_FROM_DEVICE); in nonisoc_etd_done()
1074 } else if (!etd->dma_handle && bytes_xfrd) {/* PIO */ in nonisoc_etd_done()
1075 memcpy_fromio(etd->cpu_buffer, in nonisoc_etd_done()
1076 imx21->regs + USBOTG_DMEM + etd->dmem_offset, in nonisoc_etd_done()
1081 kfree(etd->bounce_buffer); in nonisoc_etd_done()
1082 etd->bounce_buffer = NULL; in nonisoc_etd_done()
1083 free_dmem(imx21, etd); in nonisoc_etd_done()
1141 nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]); in nonisoc_etd_done()
1159 ep_priv->etd[i] = -1; in alloc_ep()
1171 struct etd_priv *etd; in imx21_hc_urb_enqueue() local
1223 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1231 ep_priv->etd[0] = alloc_etd(imx21); in imx21_hc_urb_enqueue()
1232 if (ep_priv->etd[0] < 0) { in imx21_hc_urb_enqueue()
1243 etd = &imx21->etd[ep_priv->etd[0]]; in imx21_hc_urb_enqueue()
1244 if (etd->urb == NULL) { in imx21_hc_urb_enqueue()
1245 DEBUG_LOG_FRAME(imx21, etd, last_req); in imx21_hc_urb_enqueue()
1287 int etd_num = ep_priv->etd[0]; in imx21_hc_urb_dequeue()
1289 struct etd_priv *etd = &imx21->etd[etd_num]; in imx21_hc_urb_dequeue() local
1292 free_dmem(imx21, etd); in imx21_hc_urb_dequeue()
1293 etd->urb = NULL; in imx21_hc_urb_dequeue()
1294 kfree(etd->bounce_buffer); in imx21_hc_urb_dequeue()
1295 etd->bounce_buffer = NULL; in imx21_hc_urb_dequeue()
1325 struct etd_priv *etd = &imx21->etd[etd_num]; in process_etds() local
1329 DEBUG_LOG_FRAME(imx21, etd, last_int); in process_etds()
1350 if (etd->active_count && !enabled) /* suspicious... */ in process_etds()
1353 if (!sof || enabled || !etd->active_count) in process_etds()
1360 if (++etd->active_count < 10) in process_etds()
1374 etd->activated_frame, in process_etds()
1375 etd->disactivated_frame, in process_etds()
1376 etd->last_int_frame, in process_etds()
1377 etd->last_req_frame, in process_etds()
1381 etd->active_count = 0; in process_etds()
1385 if (etd->ep == NULL || etd->urb == NULL) { in process_etds()
1389 etd_num, etd->ep, etd->urb); in process_etds()
1394 if (usb_pipeisoc(etd->urb->pipe)) in process_etds()
1448 if (ep_priv->etd[i] > -1) in imx21_hc_endpoint_disable()
1450 ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1452 free_etd(imx21, ep_priv->etd[i]); in imx21_hc_endpoint_disable()
1459 if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { in imx21_hc_endpoint_disable()