Lines Matching refs:endp

201 	struct u132_endp *endp[MAX_U132_ENDPS];  member
328 struct u132_endp *endp = kref_to_u132_endp(kref); in u132_endp_delete() local
329 struct u132 *u132 = endp->u132; in u132_endp_delete()
330 u8 usb_addr = endp->usb_addr; in u132_endp_delete()
331 u8 usb_endp = endp->usb_endp; in u132_endp_delete()
334 u8 endp_number = endp->endp_number; in u132_endp_delete()
335 struct usb_host_endpoint *hep = endp->hep; in u132_endp_delete()
336 struct u132_ring *ring = endp->ring; in u132_endp_delete()
337 struct list_head *head = &endp->endp_ring; in u132_endp_delete()
339 if (endp == ring->curr_endp) { in u132_endp_delete()
351 if (endp->input) { in u132_endp_delete()
355 if (endp->output) { in u132_endp_delete()
359 u132->endp[endp_number - 1] = NULL; in u132_endp_delete()
361 kfree(endp); in u132_endp_delete()
365 static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp) in u132_endp_put_kref() argument
367 kref_put(&endp->kref, u132_endp_delete); in u132_endp_put_kref()
370 static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp) in u132_endp_get_kref() argument
372 kref_get(&endp->kref); in u132_endp_get_kref()
376 struct u132_endp *endp) in u132_endp_init_kref() argument
378 kref_init(&endp->kref); in u132_endp_init_kref()
382 static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, in u132_endp_queue_work() argument
385 if (queue_delayed_work(workqueue, &endp->scheduler, delta)) in u132_endp_queue_work()
386 kref_get(&endp->kref); in u132_endp_queue_work()
389 static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) in u132_endp_cancel_work() argument
391 if (cancel_delayed_work(&endp->scheduler)) in u132_endp_cancel_work()
392 kref_put(&endp->kref, u132_endp_delete); in u132_endp_cancel_work()
504 static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp, in u132_hcd_giveback_urb() argument
511 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in u132_hcd_giveback_urb()
513 endp->queue_next += 1; in u132_hcd_giveback_urb()
514 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_giveback_urb()
515 endp->active = 0; in u132_hcd_giveback_urb()
516 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_hcd_giveback_urb()
518 struct list_head *next = endp->urb_more.next; in u132_hcd_giveback_urb()
522 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = in u132_hcd_giveback_urb()
524 endp->active = 0; in u132_hcd_giveback_urb()
525 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_hcd_giveback_urb()
529 ring = endp->ring; in u132_hcd_giveback_urb()
534 u132_endp_put_kref(u132, endp); in u132_hcd_giveback_urb()
538 static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp, in u132_hcd_forget_urb() argument
541 u132_endp_put_kref(u132, endp); in u132_hcd_forget_urb()
544 static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp, in u132_hcd_abandon_urb() argument
550 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in u132_hcd_abandon_urb()
552 endp->queue_next += 1; in u132_hcd_abandon_urb()
553 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_abandon_urb()
554 endp->active = 0; in u132_hcd_abandon_urb()
555 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_hcd_abandon_urb()
557 struct list_head *next = endp->urb_more.next; in u132_hcd_abandon_urb()
561 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = in u132_hcd_abandon_urb()
563 endp->active = 0; in u132_hcd_abandon_urb()
564 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_hcd_abandon_urb()
571 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, in edset_input() argument
572 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, in edset_input()
576 return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp, in edset_input()
577 urb, address, endp->usb_endp, toggle_bits, callback); in edset_input()
581 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, in edset_setup() argument
582 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, in edset_setup()
586 return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp, in edset_setup()
587 urb, address, endp->usb_endp, toggle_bits, callback); in edset_setup()
591 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, in edset_single() argument
592 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, in edset_single()
597 endp, urb, address, endp->usb_endp, toggle_bits, callback); in edset_single()
601 struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits, in edset_output() argument
602 void (*callback) (void *endp, struct urb *urb, u8 *buf, int len, in edset_output()
607 endp, urb, address, endp->usb_endp, toggle_bits, callback); in edset_output()
619 struct u132_endp *endp = data; in u132_hcd_interrupt_recv() local
620 struct u132 *u132 = endp->u132; in u132_hcd_interrupt_recv()
621 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_interrupt_recv()
628 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_interrupt_recv()
630 } else if (endp->dequeueing) { in u132_hcd_interrupt_recv()
631 endp->dequeueing = 0; in u132_hcd_interrupt_recv()
633 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_interrupt_recv()
639 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_interrupt_recv()
642 struct u132_ring *ring = endp->ring; in u132_hcd_interrupt_recv()
653 endp->toggle_bits = toggle_bits; in u132_hcd_interrupt_recv()
654 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_interrupt_recv()
659 retval = edset_single(u132, ring, endp, urb, in u132_hcd_interrupt_recv()
660 address, endp->toggle_bits, in u132_hcd_interrupt_recv()
663 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_interrupt_recv()
667 endp->active = 0; in u132_hcd_interrupt_recv()
668 endp->jiffies = jiffies + in u132_hcd_interrupt_recv()
673 u132_endp_put_kref(u132, endp); in u132_hcd_interrupt_recv()
678 endp->toggle_bits = toggle_bits; in u132_hcd_interrupt_recv()
679 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_interrupt_recv()
682 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_interrupt_recv()
686 endp->toggle_bits = toggle_bits; in u132_hcd_interrupt_recv()
687 usb_settoggle(udev->usb_device, endp->usb_endp, in u132_hcd_interrupt_recv()
690 endp->toggle_bits = 0x2; in u132_hcd_interrupt_recv()
691 usb_settoggle(udev->usb_device, endp->usb_endp, in u132_hcd_interrupt_recv()
694 endp->toggle_bits = 0x2; in u132_hcd_interrupt_recv()
695 usb_settoggle(udev->usb_device, endp->usb_endp, in u132_hcd_interrupt_recv()
702 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_interrupt_recv()
710 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_interrupt_recv()
719 struct u132_endp *endp = data; in u132_hcd_bulk_output_sent() local
720 struct u132 *u132 = endp->u132; in u132_hcd_bulk_output_sent()
721 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_bulk_output_sent()
727 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_bulk_output_sent()
729 } else if (endp->dequeueing) { in u132_hcd_bulk_output_sent()
730 endp->dequeueing = 0; in u132_hcd_bulk_output_sent()
732 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_bulk_output_sent()
738 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_bulk_output_sent()
741 struct u132_ring *ring = endp->ring; in u132_hcd_bulk_output_sent()
743 endp->toggle_bits = toggle_bits; in u132_hcd_bulk_output_sent()
747 retval = edset_output(u132, ring, endp, urb, address, in u132_hcd_bulk_output_sent()
748 endp->toggle_bits, u132_hcd_bulk_output_sent); in u132_hcd_bulk_output_sent()
750 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_bulk_output_sent()
754 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_bulk_output_sent()
761 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_bulk_output_sent()
770 struct u132_endp *endp = data; in u132_hcd_bulk_input_recv() local
771 struct u132 *u132 = endp->u132; in u132_hcd_bulk_input_recv()
772 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_bulk_input_recv()
779 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_bulk_input_recv()
781 } else if (endp->dequeueing) { in u132_hcd_bulk_input_recv()
782 endp->dequeueing = 0; in u132_hcd_bulk_input_recv()
784 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_bulk_input_recv()
790 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_bulk_input_recv()
793 struct u132_ring *ring = endp->ring; in u132_hcd_bulk_input_recv()
805 endp->toggle_bits = toggle_bits; in u132_hcd_bulk_input_recv()
806 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_bulk_input_recv()
810 ring->number, endp, urb, address, in u132_hcd_bulk_input_recv()
811 endp->usb_endp, endp->toggle_bits, in u132_hcd_bulk_input_recv()
814 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_bulk_input_recv()
817 endp->toggle_bits = toggle_bits; in u132_hcd_bulk_input_recv()
818 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_bulk_input_recv()
821 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_bulk_input_recv()
826 endp->toggle_bits = toggle_bits; in u132_hcd_bulk_input_recv()
827 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_bulk_input_recv()
830 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_bulk_input_recv()
833 endp->toggle_bits = toggle_bits; in u132_hcd_bulk_input_recv()
834 usb_settoggle(udev->usb_device, endp->usb_endp, 0, in u132_hcd_bulk_input_recv()
840 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_bulk_input_recv()
843 endp->toggle_bits = 0x2; in u132_hcd_bulk_input_recv()
844 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); in u132_hcd_bulk_input_recv()
846 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_bulk_input_recv()
850 endp->toggle_bits = 0x2; in u132_hcd_bulk_input_recv()
851 usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0); in u132_hcd_bulk_input_recv()
856 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_bulk_input_recv()
864 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_bulk_input_recv()
873 struct u132_endp *endp = data; in u132_hcd_configure_empty_sent() local
874 struct u132 *u132 = endp->u132; in u132_hcd_configure_empty_sent()
880 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_empty_sent()
882 } else if (endp->dequeueing) { in u132_hcd_configure_empty_sent()
883 endp->dequeueing = 0; in u132_hcd_configure_empty_sent()
885 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_configure_empty_sent()
891 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_empty_sent()
895 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_empty_sent()
901 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_empty_sent()
910 struct u132_endp *endp = data; in u132_hcd_configure_input_recv() local
911 struct u132 *u132 = endp->u132; in u132_hcd_configure_input_recv()
912 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_configure_input_recv()
918 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_input_recv()
920 } else if (endp->dequeueing) { in u132_hcd_configure_input_recv()
921 endp->dequeueing = 0; in u132_hcd_configure_input_recv()
923 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_configure_input_recv()
929 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_input_recv()
932 struct u132_ring *ring = endp->ring; in u132_hcd_configure_input_recv()
947 ring->number, endp, urb, address, in u132_hcd_configure_input_recv()
948 endp->usb_endp, 0x3, in u132_hcd_configure_input_recv()
951 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_configure_input_recv()
957 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_configure_input_recv()
965 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_configure_input_recv()
973 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_input_recv()
982 struct u132_endp *endp = data; in u132_hcd_configure_empty_recv() local
983 struct u132 *u132 = endp->u132; in u132_hcd_configure_empty_recv()
989 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_empty_recv()
991 } else if (endp->dequeueing) { in u132_hcd_configure_empty_recv()
992 endp->dequeueing = 0; in u132_hcd_configure_empty_recv()
994 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_configure_empty_recv()
1000 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_empty_recv()
1004 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_empty_recv()
1010 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_empty_recv()
1019 struct u132_endp *endp = data; in u132_hcd_configure_setup_sent() local
1020 struct u132 *u132 = endp->u132; in u132_hcd_configure_setup_sent()
1021 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_configure_setup_sent()
1027 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_setup_sent()
1029 } else if (endp->dequeueing) { in u132_hcd_configure_setup_sent()
1030 endp->dequeueing = 0; in u132_hcd_configure_setup_sent()
1032 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_configure_setup_sent()
1038 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_configure_setup_sent()
1043 struct u132_ring *ring = endp->ring; in u132_hcd_configure_setup_sent()
1046 ring->number, endp, urb, address, in u132_hcd_configure_setup_sent()
1047 endp->usb_endp, 0, in u132_hcd_configure_setup_sent()
1050 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_configure_setup_sent()
1054 struct u132_ring *ring = endp->ring; in u132_hcd_configure_setup_sent()
1057 ring->number, endp, urb, address, in u132_hcd_configure_setup_sent()
1058 endp->usb_endp, 0, in u132_hcd_configure_setup_sent()
1061 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_configure_setup_sent()
1068 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_configure_setup_sent()
1077 struct u132_endp *endp = data; in u132_hcd_enumeration_empty_recv() local
1078 struct u132 *u132 = endp->u132; in u132_hcd_enumeration_empty_recv()
1079 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_enumeration_empty_recv()
1086 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_enumeration_empty_recv()
1088 } else if (endp->dequeueing) { in u132_hcd_enumeration_empty_recv()
1089 endp->dequeueing = 0; in u132_hcd_enumeration_empty_recv()
1091 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_enumeration_empty_recv()
1097 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_enumeration_empty_recv()
1101 endp->usb_addr = udev->usb_addr; in u132_hcd_enumeration_empty_recv()
1103 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_enumeration_empty_recv()
1109 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_enumeration_empty_recv()
1118 struct u132_endp *endp = data; in u132_hcd_enumeration_address_sent() local
1119 struct u132 *u132 = endp->u132; in u132_hcd_enumeration_address_sent()
1125 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_enumeration_address_sent()
1127 } else if (endp->dequeueing) { in u132_hcd_enumeration_address_sent()
1128 endp->dequeueing = 0; in u132_hcd_enumeration_address_sent()
1130 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_enumeration_address_sent()
1136 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_enumeration_address_sent()
1140 struct u132_ring *ring = endp->ring; in u132_hcd_enumeration_address_sent()
1143 ring->number, endp, urb, 0, endp->usb_endp, 0, in u132_hcd_enumeration_address_sent()
1146 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_enumeration_address_sent()
1152 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_enumeration_address_sent()
1161 struct u132_endp *endp = data; in u132_hcd_initial_empty_sent() local
1162 struct u132 *u132 = endp->u132; in u132_hcd_initial_empty_sent()
1168 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_empty_sent()
1170 } else if (endp->dequeueing) { in u132_hcd_initial_empty_sent()
1171 endp->dequeueing = 0; in u132_hcd_initial_empty_sent()
1173 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_initial_empty_sent()
1179 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_empty_sent()
1183 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_initial_empty_sent()
1189 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_initial_empty_sent()
1198 struct u132_endp *endp = data; in u132_hcd_initial_input_recv() local
1199 struct u132 *u132 = endp->u132; in u132_hcd_initial_input_recv()
1200 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_initial_input_recv()
1206 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_input_recv()
1208 } else if (endp->dequeueing) { in u132_hcd_initial_input_recv()
1209 endp->dequeueing = 0; in u132_hcd_initial_input_recv()
1211 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_initial_input_recv()
1217 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_input_recv()
1221 struct u132_ring *ring = endp->ring; in u132_hcd_initial_input_recv()
1232 ring->number, endp, urb, address, endp->usb_endp, 0x3, in u132_hcd_initial_input_recv()
1235 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_initial_input_recv()
1241 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_initial_input_recv()
1250 struct u132_endp *endp = data; in u132_hcd_initial_setup_sent() local
1251 struct u132 *u132 = endp->u132; in u132_hcd_initial_setup_sent()
1252 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_initial_setup_sent()
1258 u132_hcd_forget_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_setup_sent()
1260 } else if (endp->dequeueing) { in u132_hcd_initial_setup_sent()
1261 endp->dequeueing = 0; in u132_hcd_initial_setup_sent()
1263 u132_hcd_giveback_urb(u132, endp, urb, -EINTR); in u132_hcd_initial_setup_sent()
1269 u132_hcd_giveback_urb(u132, endp, urb, -ENODEV); in u132_hcd_initial_setup_sent()
1273 struct u132_ring *ring = endp->ring; in u132_hcd_initial_setup_sent()
1276 ring->number, endp, urb, address, endp->usb_endp, 0, in u132_hcd_initial_setup_sent()
1279 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_initial_setup_sent()
1285 u132_hcd_giveback_urb(u132, endp, urb, 0); in u132_hcd_initial_setup_sent()
1305 struct u132_endp *endp, *last_endp = ring->curr_endp; in u132_hcd_ring_work_scheduler() local
1307 list_for_each_entry(endp, &last_endp->endp_ring, endp_ring) { in u132_hcd_ring_work_scheduler()
1308 if (endp->queue_next == endp->queue_last) { in u132_hcd_ring_work_scheduler()
1309 } else if ((endp->delayed == 0) in u132_hcd_ring_work_scheduler()
1310 || time_after_eq(jiffies, endp->jiffies)) { in u132_hcd_ring_work_scheduler()
1311 ring->curr_endp = endp; in u132_hcd_ring_work_scheduler()
1318 unsigned long delta = endp->jiffies - jiffies; in u132_hcd_ring_work_scheduler()
1355 struct u132_endp *endp = in u132_hcd_endp_work_scheduler() local
1357 struct u132 *u132 = endp->u132; in u132_hcd_endp_work_scheduler()
1359 ring = endp->ring; in u132_hcd_endp_work_scheduler()
1360 if (endp->edset_flush) { in u132_hcd_endp_work_scheduler()
1361 endp->edset_flush = 0; in u132_hcd_endp_work_scheduler()
1362 if (endp->dequeueing) in u132_hcd_endp_work_scheduler()
1364 ring->number, endp); in u132_hcd_endp_work_scheduler()
1366 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1368 } else if (endp->active) { in u132_hcd_endp_work_scheduler()
1370 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1374 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1376 } else if (endp->queue_next == endp->queue_last) { in u132_hcd_endp_work_scheduler()
1378 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1380 } else if (endp->pipetype == PIPE_INTERRUPT) { in u132_hcd_endp_work_scheduler()
1381 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_endp_work_scheduler()
1384 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1388 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & in u132_hcd_endp_work_scheduler()
1389 endp->queue_next]; in u132_hcd_endp_work_scheduler()
1390 endp->active = 1; in u132_hcd_endp_work_scheduler()
1391 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1394 retval = edset_single(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1395 endp->toggle_bits, u132_hcd_interrupt_recv); in u132_hcd_endp_work_scheduler()
1397 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_endp_work_scheduler()
1400 } else if (endp->pipetype == PIPE_CONTROL) { in u132_hcd_endp_work_scheduler()
1401 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_endp_work_scheduler()
1404 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1408 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & in u132_hcd_endp_work_scheduler()
1409 endp->queue_next]; in u132_hcd_endp_work_scheduler()
1410 endp->active = 1; in u132_hcd_endp_work_scheduler()
1411 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1414 retval = edset_setup(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1417 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_endp_work_scheduler()
1419 } else if (endp->usb_addr == 0) { in u132_hcd_endp_work_scheduler()
1421 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & in u132_hcd_endp_work_scheduler()
1422 endp->queue_next]; in u132_hcd_endp_work_scheduler()
1423 endp->active = 1; in u132_hcd_endp_work_scheduler()
1424 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1427 retval = edset_setup(u132, ring, endp, urb, 0, 0x2, in u132_hcd_endp_work_scheduler()
1430 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_endp_work_scheduler()
1434 struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK & in u132_hcd_endp_work_scheduler()
1435 endp->queue_next]; in u132_hcd_endp_work_scheduler()
1436 address = u132->addr[endp->usb_addr].address; in u132_hcd_endp_work_scheduler()
1437 endp->active = 1; in u132_hcd_endp_work_scheduler()
1438 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1441 retval = edset_setup(u132, ring, endp, urb, address, in u132_hcd_endp_work_scheduler()
1444 u132_hcd_giveback_urb(u132, endp, urb, retval); in u132_hcd_endp_work_scheduler()
1448 if (endp->input) { in u132_hcd_endp_work_scheduler()
1449 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_endp_work_scheduler()
1452 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1456 struct urb *urb = endp->urb_list[ in u132_hcd_endp_work_scheduler()
1457 ENDP_QUEUE_MASK & endp->queue_next]; in u132_hcd_endp_work_scheduler()
1458 endp->active = 1; in u132_hcd_endp_work_scheduler()
1459 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1462 retval = edset_input(u132, ring, endp, urb, in u132_hcd_endp_work_scheduler()
1463 address, endp->toggle_bits, in u132_hcd_endp_work_scheduler()
1467 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_endp_work_scheduler()
1472 u8 address = u132->addr[endp->usb_addr].address; in u132_hcd_endp_work_scheduler()
1475 u132_endp_put_kref(u132, endp); in u132_hcd_endp_work_scheduler()
1479 struct urb *urb = endp->urb_list[ in u132_hcd_endp_work_scheduler()
1480 ENDP_QUEUE_MASK & endp->queue_next]; in u132_hcd_endp_work_scheduler()
1481 endp->active = 1; in u132_hcd_endp_work_scheduler()
1482 ring->curr_endp = endp; in u132_hcd_endp_work_scheduler()
1485 retval = edset_output(u132, ring, endp, urb, in u132_hcd_endp_work_scheduler()
1486 address, endp->toggle_bits, in u132_hcd_endp_work_scheduler()
1490 u132_hcd_giveback_urb(u132, endp, urb, in u132_hcd_endp_work_scheduler()
1856 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); in create_endpoint_and_queue_int() local
1858 if (!endp) in create_endpoint_and_queue_int()
1861 spin_lock_init(&endp->queue_lock.slock); in create_endpoint_and_queue_int()
1862 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_int()
1865 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_int()
1866 kfree(endp); in create_endpoint_and_queue_int()
1871 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; in create_endpoint_and_queue_int()
1872 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); in create_endpoint_and_queue_int()
1873 INIT_LIST_HEAD(&endp->urb_more); in create_endpoint_and_queue_int()
1874 ring = endp->ring = &u132->ring[0]; in create_endpoint_and_queue_int()
1876 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_int()
1878 INIT_LIST_HEAD(&endp->endp_ring); in create_endpoint_and_queue_int()
1879 ring->curr_endp = endp; in create_endpoint_and_queue_int()
1882 endp->dequeueing = 0; in create_endpoint_and_queue_int()
1883 endp->edset_flush = 0; in create_endpoint_and_queue_int()
1884 endp->active = 0; in create_endpoint_and_queue_int()
1885 endp->delayed = 0; in create_endpoint_and_queue_int()
1886 endp->endp_number = endp_number; in create_endpoint_and_queue_int()
1887 endp->u132 = u132; in create_endpoint_and_queue_int()
1888 endp->hep = urb->ep; in create_endpoint_and_queue_int()
1889 endp->pipetype = usb_pipetype(urb->pipe); in create_endpoint_and_queue_int()
1890 u132_endp_init_kref(u132, endp); in create_endpoint_and_queue_int()
1892 endp->toggle_bits = 0x2; in create_endpoint_and_queue_int()
1894 endp->input = 1; in create_endpoint_and_queue_int()
1895 endp->output = 0; in create_endpoint_and_queue_int()
1899 endp->toggle_bits = 0x2; in create_endpoint_and_queue_int()
1901 endp->input = 0; in create_endpoint_and_queue_int()
1902 endp->output = 1; in create_endpoint_and_queue_int()
1907 endp->delayed = 1; in create_endpoint_and_queue_int()
1908 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); in create_endpoint_and_queue_int()
1909 endp->udev_number = address; in create_endpoint_and_queue_int()
1910 endp->usb_addr = usb_addr; in create_endpoint_and_queue_int()
1911 endp->usb_endp = usb_endp; in create_endpoint_and_queue_int()
1912 endp->queue_size = 1; in create_endpoint_and_queue_int()
1913 endp->queue_last = 0; in create_endpoint_and_queue_int()
1914 endp->queue_next = 0; in create_endpoint_and_queue_int()
1915 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in create_endpoint_and_queue_int()
1916 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_int()
1917 u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval)); in create_endpoint_and_queue_int()
1923 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, in queue_int_on_old_endpoint() argument
1927 endp->delayed = 1; in queue_int_on_old_endpoint()
1928 endp->jiffies = jiffies + msecs_to_jiffies(urb->interval); in queue_int_on_old_endpoint()
1929 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_int_on_old_endpoint()
1930 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in queue_int_on_old_endpoint()
1935 endp->queue_size -= 1; in queue_int_on_old_endpoint()
1938 list_add_tail(&urbq->urb_more, &endp->urb_more); in queue_int_on_old_endpoint()
1955 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); in create_endpoint_and_queue_bulk() local
1957 if (!endp) in create_endpoint_and_queue_bulk()
1960 spin_lock_init(&endp->queue_lock.slock); in create_endpoint_and_queue_bulk()
1961 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_bulk()
1964 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_bulk()
1965 kfree(endp); in create_endpoint_and_queue_bulk()
1970 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; in create_endpoint_and_queue_bulk()
1971 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); in create_endpoint_and_queue_bulk()
1972 INIT_LIST_HEAD(&endp->urb_more); in create_endpoint_and_queue_bulk()
1973 endp->dequeueing = 0; in create_endpoint_and_queue_bulk()
1974 endp->edset_flush = 0; in create_endpoint_and_queue_bulk()
1975 endp->active = 0; in create_endpoint_and_queue_bulk()
1976 endp->delayed = 0; in create_endpoint_and_queue_bulk()
1977 endp->endp_number = endp_number; in create_endpoint_and_queue_bulk()
1978 endp->u132 = u132; in create_endpoint_and_queue_bulk()
1979 endp->hep = urb->ep; in create_endpoint_and_queue_bulk()
1980 endp->pipetype = usb_pipetype(urb->pipe); in create_endpoint_and_queue_bulk()
1981 u132_endp_init_kref(u132, endp); in create_endpoint_and_queue_bulk()
1983 endp->toggle_bits = 0x2; in create_endpoint_and_queue_bulk()
1986 endp->input = 1; in create_endpoint_and_queue_bulk()
1987 endp->output = 0; in create_endpoint_and_queue_bulk()
1991 endp->toggle_bits = 0x2; in create_endpoint_and_queue_bulk()
1994 endp->input = 0; in create_endpoint_and_queue_bulk()
1995 endp->output = 1; in create_endpoint_and_queue_bulk()
1999 ring = endp->ring = &u132->ring[ring_number - 1]; in create_endpoint_and_queue_bulk()
2001 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_bulk()
2003 INIT_LIST_HEAD(&endp->endp_ring); in create_endpoint_and_queue_bulk()
2004 ring->curr_endp = endp; in create_endpoint_and_queue_bulk()
2008 endp->udev_number = address; in create_endpoint_and_queue_bulk()
2009 endp->usb_addr = usb_addr; in create_endpoint_and_queue_bulk()
2010 endp->usb_endp = usb_endp; in create_endpoint_and_queue_bulk()
2011 endp->queue_size = 1; in create_endpoint_and_queue_bulk()
2012 endp->queue_last = 0; in create_endpoint_and_queue_bulk()
2013 endp->queue_next = 0; in create_endpoint_and_queue_bulk()
2014 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in create_endpoint_and_queue_bulk()
2015 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_bulk()
2016 u132_endp_queue_work(u132, endp, 0); in create_endpoint_and_queue_bulk()
2022 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, in queue_bulk_on_old_endpoint() argument
2026 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_bulk_on_old_endpoint()
2027 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in queue_bulk_on_old_endpoint()
2032 endp->queue_size -= 1; in queue_bulk_on_old_endpoint()
2035 list_add_tail(&urbq->urb_more, &endp->urb_more); in queue_bulk_on_old_endpoint()
2051 struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags); in create_endpoint_and_queue_control() local
2053 if (!endp) in create_endpoint_and_queue_control()
2056 spin_lock_init(&endp->queue_lock.slock); in create_endpoint_and_queue_control()
2057 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_control()
2060 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_control()
2061 kfree(endp); in create_endpoint_and_queue_control()
2066 urb->ep->hcpriv = u132->endp[endp_number - 1] = endp; in create_endpoint_and_queue_control()
2067 INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); in create_endpoint_and_queue_control()
2068 INIT_LIST_HEAD(&endp->urb_more); in create_endpoint_and_queue_control()
2069 ring = endp->ring = &u132->ring[0]; in create_endpoint_and_queue_control()
2071 list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring); in create_endpoint_and_queue_control()
2073 INIT_LIST_HEAD(&endp->endp_ring); in create_endpoint_and_queue_control()
2074 ring->curr_endp = endp; in create_endpoint_and_queue_control()
2077 endp->dequeueing = 0; in create_endpoint_and_queue_control()
2078 endp->edset_flush = 0; in create_endpoint_and_queue_control()
2079 endp->active = 0; in create_endpoint_and_queue_control()
2080 endp->delayed = 0; in create_endpoint_and_queue_control()
2081 endp->endp_number = endp_number; in create_endpoint_and_queue_control()
2082 endp->u132 = u132; in create_endpoint_and_queue_control()
2083 endp->hep = urb->ep; in create_endpoint_and_queue_control()
2084 u132_endp_init_kref(u132, endp); in create_endpoint_and_queue_control()
2085 u132_endp_get_kref(u132, endp); in create_endpoint_and_queue_control()
2089 endp->udev_number = address; in create_endpoint_and_queue_control()
2090 endp->usb_addr = usb_addr; in create_endpoint_and_queue_control()
2091 endp->usb_endp = usb_endp; in create_endpoint_and_queue_control()
2092 endp->input = 1; in create_endpoint_and_queue_control()
2093 endp->output = 1; in create_endpoint_and_queue_control()
2094 endp->pipetype = usb_pipetype(urb->pipe); in create_endpoint_and_queue_control()
2100 endp->queue_size = 1; in create_endpoint_and_queue_control()
2101 endp->queue_last = 0; in create_endpoint_and_queue_control()
2102 endp->queue_next = 0; in create_endpoint_and_queue_control()
2103 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in create_endpoint_and_queue_control()
2104 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_control()
2105 u132_endp_queue_work(u132, endp, 0); in create_endpoint_and_queue_control()
2110 endp->udev_number = address; in create_endpoint_and_queue_control()
2111 endp->usb_addr = usb_addr; in create_endpoint_and_queue_control()
2112 endp->usb_endp = usb_endp; in create_endpoint_and_queue_control()
2113 endp->input = 1; in create_endpoint_and_queue_control()
2114 endp->output = 1; in create_endpoint_and_queue_control()
2115 endp->pipetype = usb_pipetype(urb->pipe); in create_endpoint_and_queue_control()
2121 endp->queue_size = 1; in create_endpoint_and_queue_control()
2122 endp->queue_last = 0; in create_endpoint_and_queue_control()
2123 endp->queue_next = 0; in create_endpoint_and_queue_control()
2124 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb; in create_endpoint_and_queue_control()
2125 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in create_endpoint_and_queue_control()
2126 u132_endp_queue_work(u132, endp, 0); in create_endpoint_and_queue_control()
2133 struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr, in queue_control_on_old_endpoint() argument
2139 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_control_on_old_endpoint()
2140 endp->urb_list[ENDP_QUEUE_MASK & in queue_control_on_old_endpoint()
2141 endp->queue_last++] = urb; in queue_control_on_old_endpoint()
2147 endp->queue_size -= 1; in queue_control_on_old_endpoint()
2151 &endp->urb_more); in queue_control_on_old_endpoint()
2167 endp->udev_number = i; in queue_control_on_old_endpoint()
2172 endp->endp_number; in queue_control_on_old_endpoint()
2175 endp->endp_number; in queue_control_on_old_endpoint()
2189 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_control_on_old_endpoint()
2190 endp->urb_list[ENDP_QUEUE_MASK & in queue_control_on_old_endpoint()
2191 endp->queue_last++] = urb; in queue_control_on_old_endpoint()
2197 endp->queue_size -= 1; in queue_control_on_old_endpoint()
2201 &endp->urb_more); in queue_control_on_old_endpoint()
2213 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_control_on_old_endpoint()
2214 endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = in queue_control_on_old_endpoint()
2220 endp->queue_size -= 1; in queue_control_on_old_endpoint()
2223 list_add_tail(&urbq->urb_more, &endp->urb_more); in queue_control_on_old_endpoint()
2256 struct u132_endp *endp = urb->ep->hcpriv; in u132_urb_enqueue() local
2258 if (endp) { in u132_urb_enqueue()
2261 spin_lock_irqsave(&endp->queue_lock.slock, in u132_urb_enqueue()
2267 usb_dev, endp, in u132_urb_enqueue()
2274 spin_unlock_irqrestore(&endp->queue_lock.slock, in u132_urb_enqueue()
2279 u132_endp_queue_work(u132, endp, in u132_urb_enqueue()
2298 struct u132_endp *endp = urb->ep->hcpriv; in u132_urb_enqueue() local
2300 if (endp) { in u132_urb_enqueue()
2303 spin_lock_irqsave(&endp->queue_lock.slock, in u132_urb_enqueue()
2309 usb_dev, endp, in u132_urb_enqueue()
2316 spin_unlock_irqrestore(&endp->queue_lock.slock, in u132_urb_enqueue()
2321 u132_endp_queue_work(u132, endp, 0); in u132_urb_enqueue()
2331 struct u132_endp *endp = urb->ep->hcpriv; in u132_urb_enqueue() local
2349 if (endp) { in u132_urb_enqueue()
2352 spin_lock_irqsave(&endp->queue_lock.slock, in u132_urb_enqueue()
2358 endp, usb_addr, in u132_urb_enqueue()
2364 spin_unlock_irqrestore(&endp->queue_lock.slock, in u132_urb_enqueue()
2369 u132_endp_queue_work(u132, endp, 0); in u132_urb_enqueue()
2383 struct u132_endp *endp, struct urb *urb) in dequeue_from_overflow_chain() argument
2387 list_for_each_entry(urbq, &endp->urb_more, urb_more) { in dequeue_from_overflow_chain()
2391 endp->queue_size -= 1; in dequeue_from_overflow_chain()
2399 "\n", urb, endp->endp_number, endp, endp->ring->number, in dequeue_from_overflow_chain()
2400 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ', in dequeue_from_overflow_chain()
2401 endp->usb_endp, endp->usb_addr, endp->queue_size, in dequeue_from_overflow_chain()
2402 endp->queue_next, endp->queue_last); in dequeue_from_overflow_chain()
2406 static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp, in u132_endp_urb_dequeue() argument
2412 spin_lock_irqsave(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2415 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2418 if (endp->queue_size == 0) { in u132_endp_urb_dequeue()
2421 endp->endp_number, endp, endp->ring->number, in u132_endp_urb_dequeue()
2422 endp->input ? 'I' : ' ', endp->output ? 'O' : ' ', in u132_endp_urb_dequeue()
2423 endp->usb_endp, endp->usb_addr); in u132_endp_urb_dequeue()
2424 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2427 if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) { in u132_endp_urb_dequeue()
2428 if (endp->active) { in u132_endp_urb_dequeue()
2429 endp->dequeueing = 1; in u132_endp_urb_dequeue()
2430 endp->edset_flush = 1; in u132_endp_urb_dequeue()
2431 u132_endp_queue_work(u132, endp, 0); in u132_endp_urb_dequeue()
2432 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2435 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2436 u132_hcd_abandon_urb(u132, endp, urb, status); in u132_endp_urb_dequeue()
2441 u16 queue_size = endp->queue_size; in u132_endp_urb_dequeue()
2442 u16 queue_scan = endp->queue_next; in u132_endp_urb_dequeue()
2445 if (urb == endp->urb_list[ENDP_QUEUE_MASK & in u132_endp_urb_dequeue()
2447 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK & in u132_endp_urb_dequeue()
2453 *urb_slot = endp->urb_list[ENDP_QUEUE_MASK & in u132_endp_urb_dequeue()
2455 urb_slot = &endp->urb_list[ENDP_QUEUE_MASK & in u132_endp_urb_dequeue()
2462 endp->queue_size -= 1; in u132_endp_urb_dequeue()
2463 if (list_empty(&endp->urb_more)) { in u132_endp_urb_dequeue()
2464 spin_unlock_irqrestore(&endp->queue_lock.slock, in u132_endp_urb_dequeue()
2467 struct list_head *next = endp->urb_more.next; in u132_endp_urb_dequeue()
2472 spin_unlock_irqrestore(&endp->queue_lock.slock, in u132_endp_urb_dequeue()
2479 } else if (list_empty(&endp->urb_more)) { in u132_endp_urb_dequeue()
2483 endp->endp_number, endp, endp->ring->number, in u132_endp_urb_dequeue()
2484 endp->input ? 'I' : ' ', in u132_endp_urb_dequeue()
2485 endp->output ? 'O' : ' ', endp->usb_endp, in u132_endp_urb_dequeue()
2486 endp->usb_addr, endp->queue_size, in u132_endp_urb_dequeue()
2487 endp->queue_next, endp->queue_last); in u132_endp_urb_dequeue()
2488 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2494 retval = dequeue_from_overflow_chain(u132, endp, in u132_endp_urb_dequeue()
2496 spin_unlock_irqrestore(&endp->queue_lock.slock, irqs); in u132_endp_urb_dequeue()
2516 struct u132_endp *endp = u132->endp[endp_number - 1]; in u132_urb_dequeue() local
2517 return u132_endp_urb_dequeue(u132, endp, urb, status); in u132_urb_dequeue()
2520 struct u132_endp *endp = u132->endp[endp_number - 1]; in u132_urb_dequeue() local
2521 return u132_endp_urb_dequeue(u132, endp, urb, status); in u132_urb_dequeue()
2535 struct u132_endp *endp = hep->hcpriv; in u132_endpoint_disable() local
2536 if (endp) in u132_endpoint_disable()
2537 u132_endp_put_kref(u132, endp); in u132_endpoint_disable()
2982 struct u132_endp *endp = u132->endp[endps]; in u132_remove() local
2983 if (endp) in u132_remove()
2984 u132_endp_cancel_work(u132, endp); in u132_remove()
3050 u132->endp[endps] = NULL; in u132_initialise()