Lines Matching refs:xfer

122 	struct wa_xfer *xfer;		/* out xfer */  member
168 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer,
170 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
173 static inline void wa_xfer_init(struct wa_xfer *xfer) in wa_xfer_init() argument
175 kref_init(&xfer->refcnt); in wa_xfer_init()
176 INIT_LIST_HEAD(&xfer->list_node); in wa_xfer_init()
177 spin_lock_init(&xfer->lock); in wa_xfer_init()
188 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); in wa_xfer_destroy() local
189 if (xfer->seg) { in wa_xfer_destroy()
191 for (cnt = 0; cnt < xfer->segs; cnt++) { in wa_xfer_destroy()
192 struct wa_seg *seg = xfer->seg[cnt]; in wa_xfer_destroy()
202 kfree(xfer->seg); in wa_xfer_destroy()
204 kfree(xfer); in wa_xfer_destroy()
207 static void wa_xfer_get(struct wa_xfer *xfer) in wa_xfer_get() argument
209 kref_get(&xfer->refcnt); in wa_xfer_get()
212 static void wa_xfer_put(struct wa_xfer *xfer) in wa_xfer_put() argument
214 kref_put(&xfer->refcnt, wa_xfer_destroy); in wa_xfer_put()
280 static void wa_xfer_giveback(struct wa_xfer *xfer) in wa_xfer_giveback() argument
284 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); in wa_xfer_giveback()
285 list_del_init(&xfer->list_node); in wa_xfer_giveback()
286 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb); in wa_xfer_giveback()
287 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); in wa_xfer_giveback()
289 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); in wa_xfer_giveback()
290 wa_put(xfer->wa); in wa_xfer_giveback()
291 wa_xfer_put(xfer); in wa_xfer_giveback()
299 static void wa_xfer_completion(struct wa_xfer *xfer) in wa_xfer_completion() argument
301 if (xfer->wusb_dev) in wa_xfer_completion()
302 wusb_dev_put(xfer->wusb_dev); in wa_xfer_completion()
303 rpipe_put(xfer->ep->hcpriv); in wa_xfer_completion()
304 wa_xfer_giveback(xfer); in wa_xfer_completion()
315 static void wa_xfer_id_init(struct wa_xfer *xfer) in wa_xfer_id_init() argument
317 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); in wa_xfer_id_init()
321 static inline u32 wa_xfer_id(struct wa_xfer *xfer) in wa_xfer_id() argument
323 return xfer->id; in wa_xfer_id()
327 static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) in wa_xfer_id_le32() argument
329 return cpu_to_le32(xfer->id); in wa_xfer_id_le32()
337 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) in __wa_xfer_is_done() argument
339 struct device *dev = &xfer->wa->usb_iface->dev; in __wa_xfer_is_done()
342 struct urb *urb = xfer->urb; in __wa_xfer_is_done()
345 result = xfer->segs_done == xfer->segs_submitted; in __wa_xfer_is_done()
349 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_is_done()
350 seg = xfer->seg[cnt]; in __wa_xfer_is_done()
355 xfer, wa_xfer_id(xfer), cnt, in __wa_xfer_is_done()
361 if (!(usb_pipeisoc(xfer->urb->pipe)) in __wa_xfer_is_done()
362 && seg->result < xfer->seg_size in __wa_xfer_is_done()
363 && cnt != xfer->segs-1) in __wa_xfer_is_done()
367 xfer, wa_xfer_id(xfer), seg->index, found_short, in __wa_xfer_is_done()
371 xfer->result = seg->result; in __wa_xfer_is_done()
373 xfer, wa_xfer_id(xfer), seg->index, seg->result, in __wa_xfer_is_done()
377 xfer->result = seg->result; in __wa_xfer_is_done()
379 xfer, wa_xfer_id(xfer), seg->index, seg->result, in __wa_xfer_is_done()
384 xfer, wa_xfer_id(xfer), cnt, seg->status); in __wa_xfer_is_done()
385 xfer->result = -EINVAL; in __wa_xfer_is_done()
389 xfer->result = 0; in __wa_xfer_is_done()
402 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer *xfer, in __wa_xfer_mark_seg_as_done() argument
406 xfer->segs_done++; in __wa_xfer_mark_seg_as_done()
409 return __wa_xfer_is_done(xfer); in __wa_xfer_mark_seg_as_done()
454 struct wa_xfer *xfer; in __wa_xfer_abort_cb() local
457 xfer = wa_xfer_get_by_id(wa, le32_to_cpu(b->cmd.dwTransferID)); in __wa_xfer_abort_cb()
460 if (xfer) { in __wa_xfer_abort_cb()
463 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in __wa_xfer_abort_cb()
466 __func__, xfer, wa_xfer_id(xfer)); in __wa_xfer_abort_cb()
467 spin_lock_irqsave(&xfer->lock, flags); in __wa_xfer_abort_cb()
469 while (seg_index < xfer->segs) { in __wa_xfer_abort_cb()
470 struct wa_seg *seg = xfer->seg[seg_index]; in __wa_xfer_abort_cb()
480 wa_complete_remaining_xfer_segs(xfer, seg_index, in __wa_xfer_abort_cb()
482 done = __wa_xfer_is_done(xfer); in __wa_xfer_abort_cb()
483 spin_unlock_irqrestore(&xfer->lock, flags); in __wa_xfer_abort_cb()
485 wa_xfer_completion(xfer); in __wa_xfer_abort_cb()
487 wa_xfer_put(xfer); in __wa_xfer_abort_cb()
508 static int __wa_xfer_abort(struct wa_xfer *xfer) in __wa_xfer_abort() argument
511 struct device *dev = &xfer->wa->usb_iface->dev; in __wa_xfer_abort()
513 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in __wa_xfer_abort()
521 b->cmd.dwTransferID = wa_xfer_id_le32(xfer); in __wa_xfer_abort()
522 b->wa = wa_get(xfer->wa); in __wa_xfer_abort()
525 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, in __wa_xfer_abort()
526 usb_sndbulkpipe(xfer->wa->usb_dev, in __wa_xfer_abort()
527 xfer->wa->dto_epd->bEndpointAddress), in __wa_xfer_abort()
536 wa_put(xfer->wa); in __wa_xfer_abort()
539 xfer, result); in __wa_xfer_abort()
550 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer, in __wa_seg_calculate_isoc_frame_count() argument
556 xfer->urb->iso_frame_desc; in __wa_seg_calculate_isoc_frame_count()
558 while ((index < xfer->urb->number_of_packets) in __wa_seg_calculate_isoc_frame_count()
560 <= xfer->seg_size)) { in __wa_seg_calculate_isoc_frame_count()
568 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) in __wa_seg_calculate_isoc_frame_count()
569 && (xfer->is_inbound == 0) in __wa_seg_calculate_isoc_frame_count()
592 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, in __wa_xfer_setup_sizes() argument
596 struct device *dev = &xfer->wa->usb_iface->dev; in __wa_xfer_setup_sizes()
598 struct urb *urb = xfer->urb; in __wa_xfer_setup_sizes()
599 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in __wa_xfer_setup_sizes()
620 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; in __wa_xfer_setup_sizes()
621 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; in __wa_xfer_setup_sizes()
624 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) in __wa_xfer_setup_sizes()
625 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); in __wa_xfer_setup_sizes()
629 if (xfer->seg_size < maxpktsize) { in __wa_xfer_setup_sizes()
632 xfer->seg_size, maxpktsize); in __wa_xfer_setup_sizes()
636 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; in __wa_xfer_setup_sizes()
640 xfer->segs = 0; in __wa_xfer_setup_sizes()
647 index += __wa_seg_calculate_isoc_frame_count(xfer, in __wa_xfer_setup_sizes()
649 ++xfer->segs; in __wa_xfer_setup_sizes()
652 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, in __wa_xfer_setup_sizes()
653 xfer->seg_size); in __wa_xfer_setup_sizes()
654 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) in __wa_xfer_setup_sizes()
655 xfer->segs = 1; in __wa_xfer_setup_sizes()
658 if (xfer->segs > WA_SEGS_MAX) { in __wa_xfer_setup_sizes()
660 (urb->transfer_buffer_length/xfer->seg_size), in __wa_xfer_setup_sizes()
671 struct wa_xfer *xfer, in __wa_setup_isoc_packet_descr() argument
674 xfer->urb->iso_frame_desc; in __wa_setup_isoc_packet_descr()
692 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, in __wa_xfer_setup_hdr0() argument
697 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in __wa_xfer_setup_hdr0()
698 struct wa_seg *seg = xfer->seg[0]; in __wa_xfer_setup_hdr0()
704 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer); in __wa_xfer_setup_hdr0()
710 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; in __wa_xfer_setup_hdr0()
711 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, in __wa_xfer_setup_hdr0()
726 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); in __wa_xfer_setup_hdr0()
749 struct wa_xfer *xfer = seg->xfer; in wa_seg_dto_cb() local
763 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_dto_cb()
764 wa = xfer->wa; in wa_seg_dto_cb()
766 if (usb_pipeisoc(xfer->urb->pipe)) { in wa_seg_dto_cb()
784 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, in wa_seg_dto_cb()
787 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_dto_cb()
791 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_dto_cb()
795 wa_xfer_id(xfer), seg->index, seg->result); in wa_seg_dto_cb()
804 __wa_populate_dto_urb_isoc(xfer, seg, in wa_seg_dto_cb()
809 wa_xfer_get(xfer); in wa_seg_dto_cb()
813 wa_xfer_id(xfer), seg->index, result); in wa_seg_dto_cb()
814 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_dto_cb()
818 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_dto_cb()
833 wa_xfer_id(xfer), seg->index, urb->status); in wa_seg_dto_cb()
838 wa_xfer_put(xfer); in wa_seg_dto_cb()
843 wa_xfer_put(xfer); in wa_seg_dto_cb()
845 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_dto_cb()
846 rpipe = xfer->ep->hcpriv; in wa_seg_dto_cb()
854 __wa_xfer_abort(xfer); in wa_seg_dto_cb()
856 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); in wa_seg_dto_cb()
858 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_dto_cb()
864 wa_xfer_completion(xfer); in wa_seg_dto_cb()
868 wa_xfer_put(xfer); in wa_seg_dto_cb()
886 struct wa_xfer *xfer = seg->xfer; in wa_seg_iso_pack_desc_cb() local
896 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_iso_pack_desc_cb()
897 wa = xfer->wa; in wa_seg_iso_pack_desc_cb()
900 wa_xfer_id(xfer), seg->index); in wa_seg_iso_pack_desc_cb()
901 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) in wa_seg_iso_pack_desc_cb()
903 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_iso_pack_desc_cb()
909 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_iso_pack_desc_cb()
910 wa = xfer->wa; in wa_seg_iso_pack_desc_cb()
912 rpipe = xfer->ep->hcpriv; in wa_seg_iso_pack_desc_cb()
914 wa_xfer_id(xfer), seg->index, urb->status); in wa_seg_iso_pack_desc_cb()
923 __wa_xfer_abort(xfer); in wa_seg_iso_pack_desc_cb()
925 done = __wa_xfer_mark_seg_as_done(xfer, seg, in wa_seg_iso_pack_desc_cb()
928 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_iso_pack_desc_cb()
930 wa_xfer_completion(xfer); in wa_seg_iso_pack_desc_cb()
935 wa_xfer_put(xfer); in wa_seg_iso_pack_desc_cb()
959 struct wa_xfer *xfer = seg->xfer; in wa_seg_tr_cb() local
969 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_tr_cb()
970 wa = xfer->wa; in wa_seg_tr_cb()
973 xfer, wa_xfer_id(xfer), seg->index); in wa_seg_tr_cb()
974 if (xfer->is_inbound && in wa_seg_tr_cb()
976 !(usb_pipeisoc(xfer->urb->pipe))) in wa_seg_tr_cb()
978 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_tr_cb()
984 spin_lock_irqsave(&xfer->lock, flags); in wa_seg_tr_cb()
985 wa = xfer->wa; in wa_seg_tr_cb()
987 rpipe = xfer->ep->hcpriv; in wa_seg_tr_cb()
990 xfer, wa_xfer_id(xfer), seg->index, in wa_seg_tr_cb()
1001 __wa_xfer_abort(xfer); in wa_seg_tr_cb()
1003 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_ERROR); in wa_seg_tr_cb()
1004 spin_unlock_irqrestore(&xfer->lock, flags); in wa_seg_tr_cb()
1006 wa_xfer_completion(xfer); in wa_seg_tr_cb()
1011 wa_xfer_put(xfer); in wa_seg_tr_cb()
1099 static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, in __wa_populate_dto_urb_isoc() argument
1106 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + in __wa_populate_dto_urb_isoc()
1107 xfer->urb->iso_frame_desc[curr_iso_frame].offset; in __wa_populate_dto_urb_isoc()
1109 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) in __wa_populate_dto_urb_isoc()
1113 xfer->urb->iso_frame_desc[curr_iso_frame].length; in __wa_populate_dto_urb_isoc()
1119 static int __wa_populate_dto_urb(struct wa_xfer *xfer, in __wa_populate_dto_urb() argument
1124 if (xfer->is_dma) { in __wa_populate_dto_urb()
1126 xfer->urb->transfer_dma + buf_itr_offset; in __wa_populate_dto_urb()
1137 if (xfer->urb->transfer_buffer) { in __wa_populate_dto_urb()
1139 xfer->urb->transfer_buffer + in __wa_populate_dto_urb()
1153 xfer->urb->sg, in __wa_populate_dto_urb()
1173 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) in __wa_xfer_setup_segs() argument
1176 size_t alloc_size = sizeof(*xfer->seg[0]) in __wa_xfer_setup_segs()
1177 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; in __wa_xfer_setup_segs()
1178 struct usb_device *usb_dev = xfer->wa->usb_dev; in __wa_xfer_setup_segs()
1179 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; in __wa_xfer_setup_segs()
1184 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); in __wa_xfer_setup_segs()
1185 if (xfer->seg == NULL) in __wa_xfer_setup_segs()
1188 buf_size = xfer->urb->transfer_buffer_length; in __wa_xfer_setup_segs()
1189 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_setup_segs()
1197 if (usb_pipeisoc(xfer->urb->pipe)) { in __wa_xfer_setup_segs()
1199 __wa_seg_calculate_isoc_frame_count(xfer, in __wa_xfer_setup_segs()
1206 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, in __wa_xfer_setup_segs()
1211 seg->xfer = xfer; in __wa_xfer_setup_segs()
1218 buf_itr_size = min(buf_size, xfer->seg_size); in __wa_xfer_setup_segs()
1220 if (usb_pipeisoc(xfer->urb->pipe)) { in __wa_xfer_setup_segs()
1247 if (xfer->is_inbound == 0 && buf_size > 0) { in __wa_xfer_setup_segs()
1258 if (usb_pipeisoc(xfer->urb->pipe)) { in __wa_xfer_setup_segs()
1265 __wa_populate_dto_urb_isoc(xfer, seg, in __wa_xfer_setup_segs()
1269 result = __wa_populate_dto_urb(xfer, seg, in __wa_xfer_setup_segs()
1288 usb_free_urb(xfer->seg[cnt]->dto_urb); in __wa_xfer_setup_segs()
1290 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); in __wa_xfer_setup_segs()
1292 kfree(xfer->seg[cnt]); in __wa_xfer_setup_segs()
1293 xfer->seg[cnt] = NULL; in __wa_xfer_setup_segs()
1309 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) in __wa_xfer_setup() argument
1312 struct device *dev = &xfer->wa->usb_iface->dev; in __wa_xfer_setup()
1317 result = __wa_xfer_setup_sizes(xfer, &xfer_type); in __wa_xfer_setup()
1321 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); in __wa_xfer_setup()
1324 xfer, xfer->segs, result); in __wa_xfer_setup()
1328 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; in __wa_xfer_setup()
1329 wa_xfer_id_init(xfer); in __wa_xfer_setup()
1330 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); in __wa_xfer_setup()
1336 cpu_to_le32(xfer->seg[0]->isoc_size); in __wa_xfer_setup()
1337 for (cnt = 1; cnt < xfer->segs; cnt++) { in __wa_xfer_setup()
1339 struct wa_seg *seg = xfer->seg[cnt]; in __wa_xfer_setup()
1356 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); in __wa_xfer_setup()
1361 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? in __wa_xfer_setup()
1362 cpu_to_le32(xfer->seg_size) : in __wa_xfer_setup()
1364 transfer_size -= xfer->seg_size; in __wa_xfer_setup()
1365 for (cnt = 1; cnt < xfer->segs; cnt++) { in __wa_xfer_setup()
1366 xfer_hdr = &xfer->seg[cnt]->xfer_hdr; in __wa_xfer_setup()
1370 transfer_size > xfer->seg_size ? in __wa_xfer_setup()
1371 cpu_to_le32(xfer->seg_size) in __wa_xfer_setup()
1373 xfer->seg[cnt]->status = WA_SEG_READY; in __wa_xfer_setup()
1374 transfer_size -= xfer->seg_size; in __wa_xfer_setup()
1389 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, in __wa_seg_submit() argument
1401 wa_xfer_get(xfer); in __wa_seg_submit()
1407 __func__, xfer, seg->index, result); in __wa_seg_submit()
1408 wa_xfer_put(xfer); in __wa_seg_submit()
1413 wa_xfer_get(xfer); in __wa_seg_submit()
1418 __func__, xfer, seg->index, result); in __wa_seg_submit()
1419 wa_xfer_put(xfer); in __wa_seg_submit()
1425 struct wahc *wa = xfer->wa; in __wa_seg_submit()
1426 wa_xfer_get(xfer); in __wa_seg_submit()
1430 __func__, xfer, seg->index, result); in __wa_seg_submit()
1431 wa_xfer_put(xfer); in __wa_seg_submit()
1469 struct wa_xfer *xfer; in __wa_xfer_delayed_run() local
1481 xfer = seg->xfer; in __wa_xfer_delayed_run()
1487 wa_xfer_get(xfer); in __wa_xfer_delayed_run()
1488 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); in __wa_xfer_delayed_run()
1493 xfer, wa_xfer_id(xfer), seg->index, in __wa_xfer_delayed_run()
1499 spin_lock_irqsave(&xfer->lock, flags); in __wa_xfer_delayed_run()
1500 __wa_xfer_abort(xfer); in __wa_xfer_delayed_run()
1505 xfer->segs_done++; in __wa_xfer_delayed_run()
1506 done = __wa_xfer_is_done(xfer); in __wa_xfer_delayed_run()
1507 spin_unlock_irqrestore(&xfer->lock, flags); in __wa_xfer_delayed_run()
1509 wa_xfer_completion(xfer); in __wa_xfer_delayed_run()
1512 wa_xfer_put(xfer); in __wa_xfer_delayed_run()
1554 static int __wa_xfer_submit(struct wa_xfer *xfer) in __wa_xfer_submit() argument
1557 struct wahc *wa = xfer->wa; in __wa_xfer_submit()
1562 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in __wa_xfer_submit()
1568 list_add_tail(&xfer->list_node, &wa->xfer_list); in __wa_xfer_submit()
1574 for (cnt = 0; cnt < xfer->segs; cnt++) { in __wa_xfer_submit()
1579 seg = xfer->seg[cnt]; in __wa_xfer_submit()
1588 result = __wa_seg_submit(rpipe, xfer, seg, in __wa_xfer_submit()
1591 xfer, wa_xfer_id(xfer), cnt, available, in __wa_xfer_submit()
1597 __wa_xfer_abort(xfer); in __wa_xfer_submit()
1605 xfer, wa_xfer_id(xfer), cnt, available, empty); in __wa_xfer_submit()
1609 xfer->segs_submitted++; in __wa_xfer_submit()
1652 static int wa_urb_enqueue_b(struct wa_xfer *xfer) in wa_urb_enqueue_b() argument
1656 struct urb *urb = xfer->urb; in wa_urb_enqueue_b()
1657 struct wahc *wa = xfer->wa; in wa_urb_enqueue_b()
1662 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); in wa_urb_enqueue_b()
1684 spin_lock_irqsave(&xfer->lock, flags); in wa_urb_enqueue_b()
1685 xfer->wusb_dev = wusb_dev; in wa_urb_enqueue_b()
1692 result = __wa_xfer_setup(xfer, urb); in wa_urb_enqueue_b()
1702 wa_xfer_get(xfer); in wa_urb_enqueue_b()
1703 result = __wa_xfer_submit(xfer); in wa_urb_enqueue_b()
1708 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_enqueue_b()
1709 wa_xfer_put(xfer); in wa_urb_enqueue_b()
1719 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_enqueue_b()
1724 rpipe_put(xfer->ep->hcpriv); in wa_urb_enqueue_b()
1726 xfer->result = result; in wa_urb_enqueue_b()
1730 done = __wa_xfer_is_done(xfer); in wa_urb_enqueue_b()
1731 xfer->result = result; in wa_urb_enqueue_b()
1732 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_enqueue_b()
1734 wa_xfer_completion(xfer); in wa_urb_enqueue_b()
1735 wa_xfer_put(xfer); in wa_urb_enqueue_b()
1753 struct wa_xfer *xfer, *next; in wa_urb_enqueue_run() local
1767 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { in wa_urb_enqueue_run()
1768 list_del_init(&xfer->list_node); in wa_urb_enqueue_run()
1770 urb = xfer->urb; in wa_urb_enqueue_run()
1771 if (wa_urb_enqueue_b(xfer) < 0) in wa_urb_enqueue_run()
1772 wa_xfer_giveback(xfer); in wa_urb_enqueue_run()
1784 struct wa_xfer *xfer, *next; in wa_process_errored_transfers_run() local
1799 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { in wa_process_errored_transfers_run()
1804 spin_lock_irqsave(&xfer->lock, flags); in wa_process_errored_transfers_run()
1805 ep = xfer->ep; in wa_process_errored_transfers_run()
1807 spin_unlock_irqrestore(&xfer->lock, flags); in wa_process_errored_transfers_run()
1813 wa_xfer_completion(xfer); in wa_process_errored_transfers_run()
1838 struct wa_xfer *xfer; in wa_urb_enqueue() local
1857 xfer = kzalloc(sizeof(*xfer), gfp); in wa_urb_enqueue()
1858 if (xfer == NULL) in wa_urb_enqueue()
1864 wa_xfer_init(xfer); in wa_urb_enqueue()
1865 xfer->wa = wa_get(wa); in wa_urb_enqueue()
1866 xfer->urb = urb; in wa_urb_enqueue()
1867 xfer->gfp = gfp; in wa_urb_enqueue()
1868 xfer->ep = ep; in wa_urb_enqueue()
1869 urb->hcpriv = xfer; in wa_urb_enqueue()
1872 xfer, urb, urb->pipe, urb->transfer_buffer_length, in wa_urb_enqueue()
1880 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); in wa_urb_enqueue()
1884 result = wa_urb_enqueue_b(xfer); in wa_urb_enqueue()
1893 wa_put(xfer->wa); in wa_urb_enqueue()
1894 wa_xfer_put(xfer); in wa_urb_enqueue()
1904 kfree(xfer); in wa_urb_enqueue()
1935 struct wa_xfer *xfer; in wa_urb_dequeue() local
1956 xfer = urb->hcpriv; in wa_urb_dequeue()
1957 if (xfer == NULL) in wa_urb_dequeue()
1959 spin_lock_irqsave(&xfer->lock, flags); in wa_urb_dequeue()
1960 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer)); in wa_urb_dequeue()
1961 rpipe = xfer->ep->hcpriv; in wa_urb_dequeue()
1964 __func__, xfer, wa_xfer_id(xfer), in wa_urb_dequeue()
1973 if (__wa_xfer_is_done(xfer)) { in wa_urb_dequeue()
1975 xfer, wa_xfer_id(xfer)); in wa_urb_dequeue()
1981 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) in wa_urb_dequeue()
1984 if (xfer->seg == NULL) /* still hasn't reached */ in wa_urb_dequeue()
1987 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; in wa_urb_dequeue()
1993 for (cnt = 0; cnt < xfer->segs; cnt++) { in wa_urb_dequeue()
1994 seg = xfer->seg[cnt]; in wa_urb_dequeue()
1996 __func__, wa_xfer_id(xfer), cnt, seg->status); in wa_urb_dequeue()
2001 xfer, cnt, seg->status); in wa_urb_dequeue()
2014 xfer->segs_done++; in wa_urb_dequeue()
2047 xfer->segs_done++; in wa_urb_dequeue()
2053 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ in wa_urb_dequeue()
2054 done = __wa_xfer_is_done(xfer); in wa_urb_dequeue()
2055 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_dequeue()
2057 wa_xfer_completion(xfer); in wa_urb_dequeue()
2060 wa_xfer_put(xfer); in wa_urb_dequeue()
2064 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_dequeue()
2065 wa_xfer_put(xfer); in wa_urb_dequeue()
2069 list_del_init(&xfer->list_node); in wa_urb_dequeue()
2071 xfer->result = urb->status; in wa_urb_dequeue()
2072 spin_unlock_irqrestore(&xfer->lock, flags); in wa_urb_dequeue()
2073 wa_xfer_giveback(xfer); in wa_urb_dequeue()
2074 wa_xfer_put(xfer); in wa_urb_dequeue()
2138 static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, in wa_complete_remaining_xfer_segs() argument
2142 struct wa_rpipe *rpipe = xfer->ep->hcpriv; in wa_complete_remaining_xfer_segs()
2144 for (index = starting_index; index < xfer->segs_submitted; index++) { in wa_complete_remaining_xfer_segs()
2145 struct wa_seg *current_seg = xfer->seg[index]; in wa_complete_remaining_xfer_segs()
2159 xfer->segs_done++; in wa_complete_remaining_xfer_segs()
2166 __func__, wa_xfer_id(xfer), index, in wa_complete_remaining_xfer_segs()
2175 struct urb *buf_in_urb, struct wa_xfer *xfer, struct wa_seg *seg) in __wa_populate_buf_in_urb_isoc() argument
2180 xfer->urb->iso_frame_desc; in __wa_populate_buf_in_urb_isoc()
2216 buf_in_urb->transfer_dma = xfer->urb->transfer_dma + in __wa_populate_buf_in_urb_isoc()
2230 static int wa_populate_buf_in_urb(struct urb *buf_in_urb, struct wa_xfer *xfer, in wa_populate_buf_in_urb() argument
2234 struct wa_seg *seg = xfer->seg[seg_idx]; in wa_populate_buf_in_urb()
2240 if (xfer->is_dma) { in wa_populate_buf_in_urb()
2241 buf_in_urb->transfer_dma = xfer->urb->transfer_dma in wa_populate_buf_in_urb()
2242 + (seg_idx * xfer->seg_size); in wa_populate_buf_in_urb()
2251 if (xfer->urb->transfer_buffer) { in wa_populate_buf_in_urb()
2253 xfer->urb->transfer_buffer in wa_populate_buf_in_urb()
2254 + (seg_idx * xfer->seg_size); in wa_populate_buf_in_urb()
2263 xfer->urb->sg, in wa_populate_buf_in_urb()
2264 seg_idx * xfer->seg_size, in wa_populate_buf_in_urb()
2288 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, in wa_xfer_result_chew() argument
2303 spin_lock_irqsave(&xfer->lock, flags); in wa_xfer_result_chew()
2305 if (unlikely(seg_idx >= xfer->segs)) in wa_xfer_result_chew()
2307 seg = xfer->seg[seg_idx]; in wa_xfer_result_chew()
2308 rpipe = xfer->ep->hcpriv; in wa_xfer_result_chew()
2311 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status); in wa_xfer_result_chew()
2320 xfer, seg_idx, seg->status); in wa_xfer_result_chew()
2326 xfer, xfer->id, seg->index, usb_status); in wa_xfer_result_chew()
2340 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, in wa_xfer_result_chew()
2342 if (usb_pipeisoc(xfer->urb->pipe) in wa_xfer_result_chew()
2345 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); in wa_xfer_result_chew()
2348 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe) in wa_xfer_result_chew()
2352 result = wa_populate_buf_in_urb(buf_in_urb, xfer, seg_idx, in wa_xfer_result_chew()
2366 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); in wa_xfer_result_chew()
2368 spin_unlock_irqrestore(&xfer->lock, flags); in wa_xfer_result_chew()
2370 wa_xfer_completion(xfer); in wa_xfer_result_chew()
2383 xfer, seg_idx, result); in wa_xfer_result_chew()
2388 __wa_xfer_abort(xfer); in wa_xfer_result_chew()
2391 xfer->segs_done++; in wa_xfer_result_chew()
2393 wa_complete_remaining_xfer_segs(xfer, seg->index + 1, seg->status); in wa_xfer_result_chew()
2394 done = __wa_xfer_is_done(xfer); in wa_xfer_result_chew()
2400 usb_endpoint_xfer_control(&xfer->ep->desc) && in wa_xfer_result_chew()
2406 list_move_tail(&xfer->list_node, &wa->xfer_errored_list); in wa_xfer_result_chew()
2408 spin_unlock_irqrestore(&xfer->lock, flags); in wa_xfer_result_chew()
2411 spin_unlock_irqrestore(&xfer->lock, flags); in wa_xfer_result_chew()
2413 wa_xfer_completion(xfer); in wa_xfer_result_chew()
2421 spin_unlock_irqrestore(&xfer->lock, flags); in wa_xfer_result_chew()
2422 wa_urb_dequeue(wa, xfer->urb, -ENOENT); in wa_xfer_result_chew()
2424 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); in wa_xfer_result_chew()
2434 spin_unlock_irqrestore(&xfer->lock, flags); in wa_xfer_result_chew()
2447 struct wa_xfer *xfer; in wa_process_iso_packet_status() local
2464 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress); in wa_process_iso_packet_status()
2465 if (xfer == NULL) { in wa_process_iso_packet_status()
2470 spin_lock_irqsave(&xfer->lock, flags); in wa_process_iso_packet_status()
2471 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) in wa_process_iso_packet_status()
2473 seg = xfer->seg[wa->dti_isoc_xfer_seg]; in wa_process_iso_packet_status()
2474 rpipe = xfer->ep->hcpriv; in wa_process_iso_packet_status()
2490 xfer->urb->start_frame = in wa_process_iso_packet_status()
2494 xfer->urb->iso_frame_desc; in wa_process_iso_packet_status()
2512 if (xfer->is_inbound && data_frame_count) { in wa_process_iso_packet_status()
2528 buf_in_urb, xfer, seg); in wa_process_iso_packet_status()
2540 &(xfer->urb->iso_frame_desc[urb_frame_index]); in wa_process_iso_packet_status()
2564 done = __wa_xfer_mark_seg_as_done(xfer, seg, WA_SEG_DONE); in wa_process_iso_packet_status()
2566 spin_unlock_irqrestore(&xfer->lock, flags); in wa_process_iso_packet_status()
2572 wa_xfer_completion(xfer); in wa_process_iso_packet_status()
2575 wa_xfer_put(xfer); in wa_process_iso_packet_status()
2579 spin_unlock_irqrestore(&xfer->lock, flags); in wa_process_iso_packet_status()
2580 wa_xfer_put(xfer); in wa_process_iso_packet_status()
2598 struct wa_xfer *xfer = seg->xfer; in wa_buf_in_cb() local
2611 spin_lock_irqsave(&xfer->lock, flags); in wa_buf_in_cb()
2612 wa = xfer->wa; in wa_buf_in_cb()
2616 rpipe = xfer->ep->hcpriv; in wa_buf_in_cb()
2618 if (usb_pipeisoc(xfer->urb->pipe)) { in wa_buf_in_cb()
2620 xfer->urb->iso_frame_desc; in wa_buf_in_cb()
2641 spin_unlock_irqrestore(&xfer->lock, flags); in wa_buf_in_cb()
2645 spin_lock_irqsave(&xfer->lock, flags); in wa_buf_in_cb()
2653 xfer, seg); in wa_buf_in_cb()
2676 xfer, wa_xfer_id(xfer), seg->index, in wa_buf_in_cb()
2679 done = __wa_xfer_mark_seg_as_done(xfer, seg, in wa_buf_in_cb()
2682 spin_unlock_irqrestore(&xfer->lock, flags); in wa_buf_in_cb()
2684 wa_xfer_completion(xfer); in wa_buf_in_cb()
2698 spin_lock_irqsave(&xfer->lock, flags); in wa_buf_in_cb()
2701 xfer, wa_xfer_id(xfer), seg->index, in wa_buf_in_cb()
2712 done = __wa_xfer_mark_seg_as_done(xfer, seg, in wa_buf_in_cb()
2715 __wa_xfer_abort(xfer); in wa_buf_in_cb()
2716 spin_unlock_irqrestore(&xfer->lock, flags); in wa_buf_in_cb()
2718 wa_xfer_completion(xfer); in wa_buf_in_cb()
2776 struct wa_xfer *xfer; in wa_dti_cb() local
2807 xfer = wa_xfer_get_by_id(wa, xfer_id); in wa_dti_cb()
2808 if (xfer == NULL) { in wa_dti_cb()
2814 wa_xfer_result_chew(wa, xfer, xfer_result); in wa_dti_cb()
2815 wa_xfer_put(xfer); in wa_dti_cb()