Lines Matching refs:hwreq
358 static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, in add_td_to_list() argument
379 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in add_td_to_list()
381 if (hwreq->req.length == 0 in add_td_to_list()
382 || hwreq->req.length % hwep->ep.maxpacket) in add_td_to_list()
388 temp = (u32) (sg_dma_address(s) + hwreq->req.actual); in add_td_to_list()
391 temp = (u32) (hwreq->req.dma + hwreq->req.actual); in add_td_to_list()
403 hwreq->req.actual += length; in add_td_to_list()
405 if (!list_empty(&hwreq->tds)) { in add_td_to_list()
407 lastnode = list_entry(hwreq->tds.prev, in add_td_to_list()
413 list_add_tail(&node->td, &hwreq->tds); in add_td_to_list()
428 struct ci_hw_req *hwreq) in prepare_td_for_non_sg() argument
430 unsigned int rest = hwreq->req.length; in prepare_td_for_non_sg()
435 ret = add_td_to_list(hwep, hwreq, 0, NULL); in prepare_td_for_non_sg()
444 if (hwreq->req.dma % PAGE_SIZE) in prepare_td_for_non_sg()
448 unsigned int count = min(hwreq->req.length - hwreq->req.actual, in prepare_td_for_non_sg()
451 ret = add_td_to_list(hwep, hwreq, count, NULL); in prepare_td_for_non_sg()
458 if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX in prepare_td_for_non_sg()
459 && (hwreq->req.length % hwep->ep.maxpacket == 0)) { in prepare_td_for_non_sg()
460 ret = add_td_to_list(hwep, hwreq, 0, NULL); in prepare_td_for_non_sg()
468 static int prepare_td_per_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq, in prepare_td_per_sg() argument
474 hwreq->req.actual = 0; in prepare_td_per_sg()
479 ret = add_td_to_list(hwep, hwreq, count, s); in prepare_td_per_sg()
508 static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in prepare_td_for_sg() argument
510 struct usb_request *req = &hwreq->req; in prepare_td_for_sg()
530 ret = prepare_td_per_sg(hwep, hwreq, s); in prepare_td_for_sg()
534 node = list_entry(hwreq->tds.prev, in prepare_td_for_sg()
587 static int sglist_do_bounce(struct ci_hw_req *hwreq, int index, in sglist_do_bounce() argument
596 ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL); in sglist_do_bounce()
600 sg = src = hwreq->req.sg; in sglist_do_bounce()
601 num_sgs = hwreq->req.num_sgs; in sglist_do_bounce()
602 rest = hwreq->req.length; in sglist_do_bounce()
603 dst = hwreq->sgt.sgl; in sglist_do_bounce()
616 sg_free_table(&hwreq->sgt); in sglist_do_bounce()
622 hwreq->req.sg = hwreq->sgt.sgl; in sglist_do_bounce()
623 hwreq->req.num_sgs = nents; in sglist_do_bounce()
624 hwreq->sgt.sgl = sg; in sglist_do_bounce()
625 hwreq->sgt.nents = num_sgs; in sglist_do_bounce()
635 static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy) in sglist_do_debounce() argument
641 sg = hwreq->req.sg; in sglist_do_debounce()
642 num_sgs = hwreq->req.num_sgs; in sglist_do_debounce()
647 dst = hwreq->sgt.sgl; in sglist_do_debounce()
651 nents = hwreq->sgt.nents - num_sgs + 1; in sglist_do_debounce()
655 hwreq->req.sg = hwreq->sgt.sgl; in sglist_do_debounce()
656 hwreq->req.num_sgs = hwreq->sgt.nents; in sglist_do_debounce()
657 hwreq->sgt.sgl = sg; in sglist_do_debounce()
658 hwreq->sgt.nents = num_sgs; in sglist_do_debounce()
661 sg_free_table(&hwreq->sgt); in sglist_do_debounce()
671 static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_enqueue() argument
680 if (hwreq->req.status == -EALREADY) in _hardware_enqueue()
683 hwreq->req.status = -EALREADY; in _hardware_enqueue()
685 if (hwreq->req.num_sgs && hwreq->req.length && in _hardware_enqueue()
688 &hwreq->req); in _hardware_enqueue()
689 if (ret < hwreq->req.num_sgs) { in _hardware_enqueue()
690 ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX, in _hardware_enqueue()
698 &hwreq->req, hwep->dir); in _hardware_enqueue()
702 if (hwreq->sgt.sgl) { in _hardware_enqueue()
704 sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs); in _hardware_enqueue()
708 if (hwreq->req.num_mapped_sgs) in _hardware_enqueue()
709 ret = prepare_td_for_sg(hwep, hwreq); in _hardware_enqueue()
711 ret = prepare_td_for_non_sg(hwep, hwreq); in _hardware_enqueue()
716 lastnode = list_entry(hwreq->tds.prev, in _hardware_enqueue()
720 if (!hwreq->req.no_interrupt) in _hardware_enqueue()
723 list_for_each_entry_safe(firstnode, lastnode, &hwreq->tds, td) in _hardware_enqueue()
724 trace_ci_prepare_td(hwep, hwreq, firstnode); in _hardware_enqueue()
726 firstnode = list_first_entry(&hwreq->tds, struct td_node, td); in _hardware_enqueue()
730 hwreq->req.actual = 0; in _hardware_enqueue()
775 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in _hardware_enqueue()
777 if (hwreq->req.length == 0 in _hardware_enqueue()
778 || hwreq->req.length % hwep->ep.maxpacket) in _hardware_enqueue()
820 static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq) in _hardware_dequeue() argument
825 unsigned actual = hwreq->req.length; in _hardware_dequeue()
829 if (hwreq->req.status != -EALREADY) in _hardware_dequeue()
832 hwreq->req.status = 0; in _hardware_dequeue()
834 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in _hardware_dequeue()
836 trace_ci_complete_td(hwep, hwreq, node); in _hardware_dequeue()
844 hwreq->req.status = -EALREADY; in _hardware_dequeue()
852 hwreq->req.status = tmptoken & TD_STATUS; in _hardware_dequeue()
853 if ((TD_STATUS_HALTED & hwreq->req.status)) { in _hardware_dequeue()
854 hwreq->req.status = -EPIPE; in _hardware_dequeue()
856 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) { in _hardware_dequeue()
857 hwreq->req.status = -EPROTO; in _hardware_dequeue()
859 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) { in _hardware_dequeue()
861 hwreq->req.status = 0; in _hardware_dequeue()
863 hwreq->req.status = -EILSEQ; in _hardware_dequeue()
870 hwreq->req.status = -EPROTO; in _hardware_dequeue()
887 &hwreq->req, hwep->dir); in _hardware_dequeue()
890 if (hwreq->sgt.sgl) in _hardware_dequeue()
891 sglist_do_debounce(hwreq, hwep->dir == RX); in _hardware_dequeue()
893 hwreq->req.actual += actual; in _hardware_dequeue()
895 if (hwreq->req.status) in _hardware_dequeue()
896 return hwreq->req.status; in _hardware_dequeue()
898 return hwreq->req.actual; in _hardware_dequeue()
921 struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next, in _ep_nuke() local
924 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in _ep_nuke()
931 list_del_init(&hwreq->queue); in _ep_nuke()
932 hwreq->req.status = -ESHUTDOWN; in _ep_nuke()
934 if (hwreq->req.complete != NULL) { in _ep_nuke()
936 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in _ep_nuke()
1096 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in _ep_queue() local
1115 hwreq->req.length > hwep->ep.mult * hwep->ep.maxpacket) { in _ep_queue()
1121 hwreq->req.length > CI_MAX_REQ_SIZE) { in _ep_queue()
1127 if (!list_empty(&hwreq->queue)) { in _ep_queue()
1133 hwreq->req.status = -EINPROGRESS; in _ep_queue()
1134 hwreq->req.actual = 0; in _ep_queue()
1136 retval = _hardware_enqueue(hwep, hwreq); in _ep_queue()
1141 list_add_tail(&hwreq->queue, &hwep->qh.queue); in _ep_queue()
1274 struct ci_hw_req *hwreq, *hwreqtemp; in isr_tr_complete_low() local
1278 list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue, in isr_tr_complete_low()
1280 retval = _hardware_dequeue(hwep, hwreq); in isr_tr_complete_low()
1283 list_del_init(&hwreq->queue); in isr_tr_complete_low()
1284 if (hwreq->req.complete != NULL) { in isr_tr_complete_low()
1287 hwreq->req.length) in isr_tr_complete_low()
1289 usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req); in isr_tr_complete_low()
1636 struct ci_hw_req *hwreq; in ep_alloc_request() local
1641 hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags); in ep_alloc_request()
1642 if (hwreq != NULL) { in ep_alloc_request()
1643 INIT_LIST_HEAD(&hwreq->queue); in ep_alloc_request()
1644 INIT_LIST_HEAD(&hwreq->tds); in ep_alloc_request()
1647 return (hwreq == NULL) ? NULL : &hwreq->req; in ep_alloc_request()
1658 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in ep_free_request() local
1664 } else if (!list_empty(&hwreq->queue)) { in ep_free_request()
1671 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in ep_free_request()
1678 kfree(hwreq); in ep_free_request()
1716 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); in ep_dequeue() local
1720 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || in ep_dequeue()
1721 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || in ep_dequeue()
1729 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) { in ep_dequeue()
1736 list_del_init(&hwreq->queue); in ep_dequeue()
1740 if (hwreq->sgt.sgl) in ep_dequeue()
1741 sglist_do_debounce(hwreq, false); in ep_dequeue()
1745 if (hwreq->req.complete != NULL) { in ep_dequeue()
1747 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); in ep_dequeue()