Lines Matching refs:vsock
72 struct vhost_vsock *vsock; in vhost_vsock_get() local
74 hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { in vhost_vsock_get()
75 u32 other_cid = vsock->guest_cid; in vhost_vsock_get()
82 return vsock; in vhost_vsock_get()
90 vhost_transport_do_send_pkt(struct vhost_vsock *vsock, in vhost_transport_do_send_pkt() argument
93 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_do_send_pkt()
107 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
120 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue); in vhost_transport_do_send_pkt()
123 vhost_enable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
130 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); in vhost_transport_do_send_pkt()
135 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); in vhost_transport_do_send_pkt()
139 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_transport_do_send_pkt()
140 vhost_disable_notify(&vsock->dev, vq); in vhost_transport_do_send_pkt()
233 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb); in vhost_transport_do_send_pkt()
238 val = atomic_dec_return(&vsock->queued_replies); in vhost_transport_do_send_pkt()
251 vhost_signal(&vsock->dev, vq); in vhost_transport_do_send_pkt()
263 struct vhost_vsock *vsock; in vhost_transport_send_pkt_work() local
265 vsock = container_of(work, struct vhost_vsock, send_pkt_work); in vhost_transport_send_pkt_work()
266 vq = &vsock->vqs[VSOCK_VQ_RX]; in vhost_transport_send_pkt_work()
268 vhost_transport_do_send_pkt(vsock, vq); in vhost_transport_send_pkt_work()
275 struct vhost_vsock *vsock; in vhost_transport_send_pkt() local
281 vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid)); in vhost_transport_send_pkt()
282 if (!vsock) { in vhost_transport_send_pkt()
289 atomic_inc(&vsock->queued_replies); in vhost_transport_send_pkt()
291 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb); in vhost_transport_send_pkt()
292 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); in vhost_transport_send_pkt()
301 struct vhost_vsock *vsock; in vhost_transport_cancel_pkt() local
308 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); in vhost_transport_cancel_pkt()
309 if (!vsock) in vhost_transport_cancel_pkt()
312 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue); in vhost_transport_cancel_pkt()
315 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_transport_cancel_pkt()
318 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in vhost_transport_cancel_pkt()
391 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock) in vhost_vsock_more_replies() argument
393 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_more_replies()
397 val = atomic_read(&vsock->queued_replies); in vhost_vsock_more_replies()
465 struct vhost_vsock *vsock; in vhost_transport_seqpacket_allow() local
469 vsock = vhost_vsock_get(remote_cid); in vhost_transport_seqpacket_allow()
471 if (vsock) in vhost_transport_seqpacket_allow()
472 seqpacket_allow = vsock->seqpacket_allow; in vhost_transport_seqpacket_allow()
483 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_tx_kick() local
498 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
502 if (!vhost_vsock_more_replies(vsock)) { in vhost_vsock_handle_tx_kick()
516 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) { in vhost_vsock_handle_tx_kick()
517 vhost_disable_notify(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
537 if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid && in vhost_vsock_handle_tx_kick()
550 vhost_signal(&vsock->dev, vq); in vhost_vsock_handle_tx_kick()
560 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock, in vhost_vsock_handle_rx_kick() local
563 vhost_transport_do_send_pkt(vsock, vq); in vhost_vsock_handle_rx_kick()
566 static int vhost_vsock_start(struct vhost_vsock *vsock) in vhost_vsock_start() argument
572 mutex_lock(&vsock->dev.mutex); in vhost_vsock_start()
574 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_start()
578 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
579 vq = &vsock->vqs[i]; in vhost_vsock_start()
589 vhost_vq_set_backend(vq, vsock); in vhost_vsock_start()
601 vhost_vq_work_queue(&vsock->vqs[VSOCK_VQ_RX], &vsock->send_pkt_work); in vhost_vsock_start()
603 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
610 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_start()
611 vq = &vsock->vqs[i]; in vhost_vsock_start()
618 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_start()
622 static int vhost_vsock_stop(struct vhost_vsock *vsock, bool check_owner) in vhost_vsock_stop() argument
627 mutex_lock(&vsock->dev.mutex); in vhost_vsock_stop()
630 ret = vhost_dev_check_owner(&vsock->dev); in vhost_vsock_stop()
635 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_stop()
636 struct vhost_virtqueue *vq = &vsock->vqs[i]; in vhost_vsock_stop()
644 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_stop()
648 static void vhost_vsock_free(struct vhost_vsock *vsock) in vhost_vsock_free() argument
650 kvfree(vsock); in vhost_vsock_free()
656 struct vhost_vsock *vsock; in vhost_vsock_dev_open() local
662 vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); in vhost_vsock_dev_open()
663 if (!vsock) in vhost_vsock_dev_open()
666 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL); in vhost_vsock_dev_open()
672 vsock->guest_cid = 0; /* no CID assigned yet */ in vhost_vsock_dev_open()
673 vsock->seqpacket_allow = false; in vhost_vsock_dev_open()
675 atomic_set(&vsock->queued_replies, 0); in vhost_vsock_dev_open()
677 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX]; in vhost_vsock_dev_open()
678 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX]; in vhost_vsock_dev_open()
679 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; in vhost_vsock_dev_open()
680 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; in vhost_vsock_dev_open()
682 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), in vhost_vsock_dev_open()
686 file->private_data = vsock; in vhost_vsock_dev_open()
687 skb_queue_head_init(&vsock->send_pkt_queue); in vhost_vsock_dev_open()
688 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); in vhost_vsock_dev_open()
692 vhost_vsock_free(vsock); in vhost_vsock_dev_open()
696 static void vhost_vsock_flush(struct vhost_vsock *vsock) in vhost_vsock_flush() argument
698 vhost_dev_flush(&vsock->dev); in vhost_vsock_flush()
729 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_dev_release() local
732 if (vsock->guest_cid) in vhost_vsock_dev_release()
733 hash_del_rcu(&vsock->hash); in vhost_vsock_dev_release()
749 vhost_vsock_stop(vsock, false); in vhost_vsock_dev_release()
750 vhost_vsock_flush(vsock); in vhost_vsock_dev_release()
751 vhost_dev_stop(&vsock->dev); in vhost_vsock_dev_release()
753 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue); in vhost_vsock_dev_release()
755 vhost_dev_cleanup(&vsock->dev); in vhost_vsock_dev_release()
756 kfree(vsock->dev.vqs); in vhost_vsock_dev_release()
757 vhost_vsock_free(vsock); in vhost_vsock_dev_release()
761 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid) in vhost_vsock_set_cid() argument
783 if (other && other != vsock) { in vhost_vsock_set_cid()
788 if (vsock->guest_cid) in vhost_vsock_set_cid()
789 hash_del_rcu(&vsock->hash); in vhost_vsock_set_cid()
791 vsock->guest_cid = guest_cid; in vhost_vsock_set_cid()
792 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid); in vhost_vsock_set_cid()
798 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features) in vhost_vsock_set_features() argument
806 mutex_lock(&vsock->dev.mutex); in vhost_vsock_set_features()
808 !vhost_log_access_ok(&vsock->dev)) { in vhost_vsock_set_features()
813 if (vhost_init_device_iotlb(&vsock->dev)) in vhost_vsock_set_features()
817 vsock->seqpacket_allow = features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET); in vhost_vsock_set_features()
819 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { in vhost_vsock_set_features()
820 vq = &vsock->vqs[i]; in vhost_vsock_set_features()
825 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
829 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_set_features()
836 struct vhost_vsock *vsock = f->private_data; in vhost_vsock_dev_ioctl() local
847 return vhost_vsock_set_cid(vsock, guest_cid); in vhost_vsock_dev_ioctl()
852 return vhost_vsock_start(vsock); in vhost_vsock_dev_ioctl()
854 return vhost_vsock_stop(vsock, true); in vhost_vsock_dev_ioctl()
863 return vhost_vsock_set_features(vsock, features); in vhost_vsock_dev_ioctl()
874 vhost_set_backend_features(&vsock->dev, features); in vhost_vsock_dev_ioctl()
877 mutex_lock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
878 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
880 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp); in vhost_vsock_dev_ioctl()
882 vhost_vsock_flush(vsock); in vhost_vsock_dev_ioctl()
883 mutex_unlock(&vsock->dev.mutex); in vhost_vsock_dev_ioctl()
891 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_read_iter() local
892 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_read_iter()
902 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_write_iter() local
903 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_write_iter()
910 struct vhost_vsock *vsock = file->private_data; in vhost_vsock_chr_poll() local
911 struct vhost_dev *dev = &vsock->dev; in vhost_vsock_chr_poll()