Lines Matching refs:sdev

14 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum,  in stub_enqueue_ret_unlink()  argument
21 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC); in stub_enqueue_ret_unlink()
28 list_add_tail(&unlink->list, &sdev->unlink_tx); in stub_enqueue_ret_unlink()
43 struct stub_device *sdev = priv->sdev; in stub_complete() local
91 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_complete()
92 if (sdev->ud.tcp_socket == NULL) { in stub_complete()
96 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); in stub_complete()
99 list_move_tail(&priv->list, &sdev->priv_tx); in stub_complete()
101 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_complete()
104 wake_up(&sdev->tx_waitq); in stub_complete()
132 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev) in dequeue_from_priv_tx() argument
137 spin_lock_irqsave(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
139 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) { in dequeue_from_priv_tx()
140 list_move_tail(&priv->list, &sdev->priv_free); in dequeue_from_priv_tx()
141 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
145 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_priv_tx()
150 static int stub_send_ret_submit(struct stub_device *sdev) in stub_send_ret_submit() argument
160 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { in stub_send_ret_submit()
177 dev_err(&sdev->udev->dev, in stub_send_ret_submit()
196 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); in stub_send_ret_submit()
287 dev_err(&sdev->udev->dev, in stub_send_ret_submit()
292 usbip_event_add(&sdev->ud, in stub_send_ret_submit()
304 usbip_event_add(&sdev->ud, in stub_send_ret_submit()
316 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, in stub_send_ret_submit()
319 dev_err(&sdev->udev->dev, in stub_send_ret_submit()
324 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); in stub_send_ret_submit()
334 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_send_ret_submit()
335 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { in stub_send_ret_submit()
338 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_send_ret_submit()
343 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev) in dequeue_from_unlink_tx() argument
348 spin_lock_irqsave(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
350 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { in dequeue_from_unlink_tx()
351 list_move_tail(&unlink->list, &sdev->unlink_free); in dequeue_from_unlink_tx()
352 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
356 spin_unlock_irqrestore(&sdev->priv_lock, flags); in dequeue_from_unlink_tx()
361 static int stub_send_ret_unlink(struct stub_device *sdev) in stub_send_ret_unlink() argument
372 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) { in stub_send_ret_unlink()
391 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, in stub_send_ret_unlink()
394 dev_err(&sdev->udev->dev, in stub_send_ret_unlink()
397 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); in stub_send_ret_unlink()
405 spin_lock_irqsave(&sdev->priv_lock, flags); in stub_send_ret_unlink()
407 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { in stub_send_ret_unlink()
412 spin_unlock_irqrestore(&sdev->priv_lock, flags); in stub_send_ret_unlink()
420 struct stub_device *sdev = container_of(ud, struct stub_device, ud); in stub_tx_loop() local
440 if (stub_send_ret_submit(sdev) < 0) in stub_tx_loop()
443 if (stub_send_ret_unlink(sdev) < 0) in stub_tx_loop()
446 wait_event_interruptible(sdev->tx_waitq, in stub_tx_loop()
447 (!list_empty(&sdev->priv_tx) || in stub_tx_loop()
448 !list_empty(&sdev->unlink_tx) || in stub_tx_loop()