Lines Matching refs:vq
22 dev_err(&(_vq)->vq.vdev->dev, \
23 "%s:"fmt, (_vq)->vq.name, ##args); \
31 (_vq)->vq.name, (_vq)->in_use); \
59 dev_err(&_vq->vq.vdev->dev, \
60 "%s:"fmt, (_vq)->vq.name, ##args); \
63 #define START_USE(vq) argument
64 #define END_USE(vq) argument
65 #define LAST_ADD_TIME_UPDATE(vq) argument
66 #define LAST_ADD_TIME_CHECK(vq) argument
67 #define LAST_ADD_TIME_INVALID(vq) argument
155 struct virtqueue vq; member
200 bool (*notify)(struct virtqueue *vq);
234 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
236 static inline bool virtqueue_use_indirect(struct vring_virtqueue *vq, in virtqueue_use_indirect() argument
243 return (vq->indirect && total_sg > 1 && vq->vq.num_free); in virtqueue_use_indirect()
352 static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq) in vring_dma_dev() argument
354 return vq->dma_dev; in vring_dma_dev()
358 static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq, in vring_map_one_sg() argument
362 if (!vq->use_dma_api) { in vring_map_one_sg()
377 return dma_map_page(vring_dma_dev(vq), in vring_map_one_sg()
382 static dma_addr_t vring_map_single(const struct vring_virtqueue *vq, in vring_map_single() argument
386 if (!vq->use_dma_api) in vring_map_single()
389 return dma_map_single(vring_dma_dev(vq), in vring_map_single()
393 static int vring_mapping_error(const struct vring_virtqueue *vq, in vring_mapping_error() argument
396 if (!vq->use_dma_api) in vring_mapping_error()
399 return dma_mapping_error(vring_dma_dev(vq), addr); in vring_mapping_error()
402 static void virtqueue_init(struct vring_virtqueue *vq, u32 num) in virtqueue_init() argument
404 vq->vq.num_free = num; in virtqueue_init()
406 if (vq->packed_ring) in virtqueue_init()
407 vq->last_used_idx = 0 | (1 << VRING_PACKED_EVENT_F_WRAP_CTR); in virtqueue_init()
409 vq->last_used_idx = 0; in virtqueue_init()
411 vq->event_triggered = false; in virtqueue_init()
412 vq->num_added = 0; in virtqueue_init()
415 vq->in_use = false; in virtqueue_init()
416 vq->last_add_time_valid = false; in virtqueue_init()
425 static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq, in vring_unmap_one_split_indirect() argument
430 if (!vq->use_dma_api) in vring_unmap_one_split_indirect()
433 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags); in vring_unmap_one_split_indirect()
435 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split_indirect()
436 virtio64_to_cpu(vq->vq.vdev, desc->addr), in vring_unmap_one_split_indirect()
437 virtio32_to_cpu(vq->vq.vdev, desc->len), in vring_unmap_one_split_indirect()
442 static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq, in vring_unmap_one_split() argument
445 struct vring_desc_extra *extra = vq->split.desc_extra; in vring_unmap_one_split()
448 if (!vq->use_dma_api) in vring_unmap_one_split()
454 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_one_split()
460 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_one_split()
494 static inline unsigned int virtqueue_add_desc_split(struct virtqueue *vq, in virtqueue_add_desc_split() argument
502 struct vring_virtqueue *vring = to_vvq(vq); in virtqueue_add_desc_split()
506 desc[i].flags = cpu_to_virtio16(vq->vdev, flags); in virtqueue_add_desc_split()
507 desc[i].addr = cpu_to_virtio64(vq->vdev, addr); in virtqueue_add_desc_split()
508 desc[i].len = cpu_to_virtio32(vq->vdev, len); in virtqueue_add_desc_split()
512 desc[i].next = cpu_to_virtio16(vq->vdev, next); in virtqueue_add_desc_split()
518 next = virtio16_to_cpu(vq->vdev, desc[i].next); in virtqueue_add_desc_split()
532 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_split() local
539 START_USE(vq); in virtqueue_add_split()
542 BUG_ON(ctx && vq->indirect); in virtqueue_add_split()
544 if (unlikely(vq->broken)) { in virtqueue_add_split()
545 END_USE(vq); in virtqueue_add_split()
549 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_split()
553 head = vq->free_head; in virtqueue_add_split()
555 if (virtqueue_use_indirect(vq, total_sg)) in virtqueue_add_split()
559 WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect); in virtqueue_add_split()
570 desc = vq->split.vring.desc; in virtqueue_add_split()
575 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_split()
577 descs_used, vq->vq.num_free); in virtqueue_add_split()
582 vq->notify(&vq->vq); in virtqueue_add_split()
585 END_USE(vq); in virtqueue_add_split()
591 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE); in virtqueue_add_split()
592 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
606 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE); in virtqueue_add_split()
607 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
623 if (!indirect && vq->use_dma_api) in virtqueue_add_split()
624 vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &= in virtqueue_add_split()
630 vq, desc, total_sg * sizeof(struct vring_desc), in virtqueue_add_split()
632 if (vring_mapping_error(vq, addr)) in virtqueue_add_split()
635 virtqueue_add_desc_split(_vq, vq->split.vring.desc, in virtqueue_add_split()
643 vq->vq.num_free -= descs_used; in virtqueue_add_split()
647 vq->free_head = vq->split.desc_extra[head].next; in virtqueue_add_split()
649 vq->free_head = i; in virtqueue_add_split()
652 vq->split.desc_state[head].data = data; in virtqueue_add_split()
654 vq->split.desc_state[head].indir_desc = desc; in virtqueue_add_split()
656 vq->split.desc_state[head].indir_desc = ctx; in virtqueue_add_split()
660 avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1); in virtqueue_add_split()
661 vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); in virtqueue_add_split()
665 virtio_wmb(vq->weak_barriers); in virtqueue_add_split()
666 vq->split.avail_idx_shadow++; in virtqueue_add_split()
667 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_add_split()
668 vq->split.avail_idx_shadow); in virtqueue_add_split()
669 vq->num_added++; in virtqueue_add_split()
671 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_split()
672 END_USE(vq); in virtqueue_add_split()
676 if (unlikely(vq->num_added == (1 << 16) - 1)) in virtqueue_add_split()
693 vring_unmap_one_split_indirect(vq, &desc[i]); in virtqueue_add_split()
696 i = vring_unmap_one_split(vq, i); in virtqueue_add_split()
702 END_USE(vq); in virtqueue_add_split()
708 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_split() local
712 START_USE(vq); in virtqueue_kick_prepare_split()
715 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_split()
717 old = vq->split.avail_idx_shadow - vq->num_added; in virtqueue_kick_prepare_split()
718 new = vq->split.avail_idx_shadow; in virtqueue_kick_prepare_split()
719 vq->num_added = 0; in virtqueue_kick_prepare_split()
721 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_split()
722 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_split()
724 if (vq->event) { in virtqueue_kick_prepare_split()
726 vring_avail_event(&vq->split.vring)), in virtqueue_kick_prepare_split()
729 needs_kick = !(vq->split.vring.used->flags & in virtqueue_kick_prepare_split()
733 END_USE(vq); in virtqueue_kick_prepare_split()
737 static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head, in detach_buf_split() argument
741 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT); in detach_buf_split()
744 vq->split.desc_state[head].data = NULL; in detach_buf_split()
749 while (vq->split.vring.desc[i].flags & nextflag) { in detach_buf_split()
750 vring_unmap_one_split(vq, i); in detach_buf_split()
751 i = vq->split.desc_extra[i].next; in detach_buf_split()
752 vq->vq.num_free++; in detach_buf_split()
755 vring_unmap_one_split(vq, i); in detach_buf_split()
756 vq->split.desc_extra[i].next = vq->free_head; in detach_buf_split()
757 vq->free_head = head; in detach_buf_split()
760 vq->vq.num_free++; in detach_buf_split()
762 if (vq->indirect) { in detach_buf_split()
764 vq->split.desc_state[head].indir_desc; in detach_buf_split()
771 len = vq->split.desc_extra[head].len; in detach_buf_split()
773 BUG_ON(!(vq->split.desc_extra[head].flags & in detach_buf_split()
778 vring_unmap_one_split_indirect(vq, &indir_desc[j]); in detach_buf_split()
781 vq->split.desc_state[head].indir_desc = NULL; in detach_buf_split()
783 *ctx = vq->split.desc_state[head].indir_desc; in detach_buf_split()
787 static inline bool more_used_split(const struct vring_virtqueue *vq) in more_used_split() argument
789 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, in more_used_split()
790 vq->split.vring.used->idx); in more_used_split()
797 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_split() local
802 START_USE(vq); in virtqueue_get_buf_ctx_split()
804 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_split()
805 END_USE(vq); in virtqueue_get_buf_ctx_split()
809 if (!more_used_split(vq)) { in virtqueue_get_buf_ctx_split()
811 END_USE(vq); in virtqueue_get_buf_ctx_split()
816 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_split()
818 last_used = (vq->last_used_idx & (vq->split.vring.num - 1)); in virtqueue_get_buf_ctx_split()
820 vq->split.vring.used->ring[last_used].id); in virtqueue_get_buf_ctx_split()
822 vq->split.vring.used->ring[last_used].len); in virtqueue_get_buf_ctx_split()
824 if (unlikely(i >= vq->split.vring.num)) { in virtqueue_get_buf_ctx_split()
825 BAD_RING(vq, "id %u out of range\n", i); in virtqueue_get_buf_ctx_split()
828 if (unlikely(!vq->split.desc_state[i].data)) { in virtqueue_get_buf_ctx_split()
829 BAD_RING(vq, "id %u is not a head!\n", i); in virtqueue_get_buf_ctx_split()
834 ret = vq->split.desc_state[i].data; in virtqueue_get_buf_ctx_split()
835 detach_buf_split(vq, i, ctx); in virtqueue_get_buf_ctx_split()
836 vq->last_used_idx++; in virtqueue_get_buf_ctx_split()
840 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) in virtqueue_get_buf_ctx_split()
841 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_split()
842 &vring_used_event(&vq->split.vring), in virtqueue_get_buf_ctx_split()
843 cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); in virtqueue_get_buf_ctx_split()
845 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_split()
847 END_USE(vq); in virtqueue_get_buf_ctx_split()
853 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_split() local
855 if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { in virtqueue_disable_cb_split()
856 vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_disable_cb_split()
857 if (vq->event) in virtqueue_disable_cb_split()
859 vring_used_event(&vq->split.vring) = 0x0; in virtqueue_disable_cb_split()
861 vq->split.vring.avail->flags = in virtqueue_disable_cb_split()
863 vq->split.avail_flags_shadow); in virtqueue_disable_cb_split()
869 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_split() local
872 START_USE(vq); in virtqueue_enable_cb_prepare_split()
879 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_prepare_split()
880 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_prepare_split()
881 if (!vq->event) in virtqueue_enable_cb_prepare_split()
882 vq->split.vring.avail->flags = in virtqueue_enable_cb_prepare_split()
884 vq->split.avail_flags_shadow); in virtqueue_enable_cb_prepare_split()
886 vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev, in virtqueue_enable_cb_prepare_split()
887 last_used_idx = vq->last_used_idx); in virtqueue_enable_cb_prepare_split()
888 END_USE(vq); in virtqueue_enable_cb_prepare_split()
894 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_split() local
897 vq->split.vring.used->idx); in virtqueue_poll_split()
902 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_split() local
905 START_USE(vq); in virtqueue_enable_cb_delayed_split()
912 if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { in virtqueue_enable_cb_delayed_split()
913 vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; in virtqueue_enable_cb_delayed_split()
914 if (!vq->event) in virtqueue_enable_cb_delayed_split()
915 vq->split.vring.avail->flags = in virtqueue_enable_cb_delayed_split()
917 vq->split.avail_flags_shadow); in virtqueue_enable_cb_delayed_split()
920 bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4; in virtqueue_enable_cb_delayed_split()
922 virtio_store_mb(vq->weak_barriers, in virtqueue_enable_cb_delayed_split()
923 &vring_used_event(&vq->split.vring), in virtqueue_enable_cb_delayed_split()
924 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); in virtqueue_enable_cb_delayed_split()
926 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx) in virtqueue_enable_cb_delayed_split()
927 - vq->last_used_idx) > bufs)) { in virtqueue_enable_cb_delayed_split()
928 END_USE(vq); in virtqueue_enable_cb_delayed_split()
932 END_USE(vq); in virtqueue_enable_cb_delayed_split()
938 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_split() local
942 START_USE(vq); in virtqueue_detach_unused_buf_split()
944 for (i = 0; i < vq->split.vring.num; i++) { in virtqueue_detach_unused_buf_split()
945 if (!vq->split.desc_state[i].data) in virtqueue_detach_unused_buf_split()
948 buf = vq->split.desc_state[i].data; in virtqueue_detach_unused_buf_split()
949 detach_buf_split(vq, i, NULL); in virtqueue_detach_unused_buf_split()
950 vq->split.avail_idx_shadow--; in virtqueue_detach_unused_buf_split()
951 vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev, in virtqueue_detach_unused_buf_split()
952 vq->split.avail_idx_shadow); in virtqueue_detach_unused_buf_split()
953 END_USE(vq); in virtqueue_detach_unused_buf_split()
957 BUG_ON(vq->vq.num_free != vq->split.vring.num); in virtqueue_detach_unused_buf_split()
959 END_USE(vq); in virtqueue_detach_unused_buf_split()
964 struct vring_virtqueue *vq) in virtqueue_vring_init_split() argument
968 vdev = vq->vq.vdev; in virtqueue_vring_init_split()
974 if (!vq->vq.callback) { in virtqueue_vring_init_split()
976 if (!vq->event) in virtqueue_vring_init_split()
982 static void virtqueue_reinit_split(struct vring_virtqueue *vq) in virtqueue_reinit_split() argument
986 num = vq->split.vring.num; in virtqueue_reinit_split()
988 vq->split.vring.avail->flags = 0; in virtqueue_reinit_split()
989 vq->split.vring.avail->idx = 0; in virtqueue_reinit_split()
992 vq->split.vring.avail->ring[num] = 0; in virtqueue_reinit_split()
994 vq->split.vring.used->flags = 0; in virtqueue_reinit_split()
995 vq->split.vring.used->idx = 0; in virtqueue_reinit_split()
998 *(__virtio16 *)&(vq->split.vring.used->ring[num]) = 0; in virtqueue_reinit_split()
1000 virtqueue_init(vq, num); in virtqueue_reinit_split()
1002 virtqueue_vring_init_split(&vq->split, vq); in virtqueue_reinit_split()
1005 static void virtqueue_vring_attach_split(struct vring_virtqueue *vq, in virtqueue_vring_attach_split() argument
1008 vq->split = *vring_split; in virtqueue_vring_attach_split()
1011 vq->free_head = 0; in virtqueue_vring_attach_split()
1117 struct virtqueue *vq; in vring_create_virtqueue_split() local
1125 vq = __vring_new_virtqueue(index, &vring_split, vdev, weak_barriers, in vring_create_virtqueue_split()
1127 if (!vq) { in vring_create_virtqueue_split()
1132 to_vvq(vq)->we_own_ring = true; in vring_create_virtqueue_split()
1134 return vq; in vring_create_virtqueue_split()
1140 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize_split() local
1145 vq->split.vring_align, in virtqueue_resize_split()
1146 vq->split.may_reduce_num, in virtqueue_resize_split()
1147 vring_dma_dev(vq)); in virtqueue_resize_split()
1155 vring_free(&vq->vq); in virtqueue_resize_split()
1157 virtqueue_vring_init_split(&vring_split, vq); in virtqueue_resize_split()
1159 virtqueue_init(vq, vring_split.vring.num); in virtqueue_resize_split()
1160 virtqueue_vring_attach_split(vq, &vring_split); in virtqueue_resize_split()
1165 vring_free_split(&vring_split, vdev, vring_dma_dev(vq)); in virtqueue_resize_split()
1167 virtqueue_reinit_split(vq); in virtqueue_resize_split()
1185 static void vring_unmap_extra_packed(const struct vring_virtqueue *vq, in vring_unmap_extra_packed() argument
1190 if (!vq->use_dma_api) in vring_unmap_extra_packed()
1196 dma_unmap_single(vring_dma_dev(vq), in vring_unmap_extra_packed()
1201 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_extra_packed()
1208 static void vring_unmap_desc_packed(const struct vring_virtqueue *vq, in vring_unmap_desc_packed() argument
1213 if (!vq->use_dma_api) in vring_unmap_desc_packed()
1218 dma_unmap_page(vring_dma_dev(vq), in vring_unmap_desc_packed()
1242 static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq, in virtqueue_add_indirect_packed() argument
1256 head = vq->packed.next_avail_idx; in virtqueue_add_indirect_packed()
1261 if (unlikely(vq->vq.num_free < 1)) { in virtqueue_add_indirect_packed()
1264 END_USE(vq); in virtqueue_add_indirect_packed()
1269 id = vq->free_head; in virtqueue_add_indirect_packed()
1270 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_indirect_packed()
1274 addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_indirect_packed()
1276 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1288 addr = vring_map_single(vq, desc, in virtqueue_add_indirect_packed()
1291 if (vring_mapping_error(vq, addr)) in virtqueue_add_indirect_packed()
1294 vq->packed.vring.desc[head].addr = cpu_to_le64(addr); in virtqueue_add_indirect_packed()
1295 vq->packed.vring.desc[head].len = cpu_to_le32(total_sg * in virtqueue_add_indirect_packed()
1297 vq->packed.vring.desc[head].id = cpu_to_le16(id); in virtqueue_add_indirect_packed()
1299 if (vq->use_dma_api) { in virtqueue_add_indirect_packed()
1300 vq->packed.desc_extra[id].addr = addr; in virtqueue_add_indirect_packed()
1301 vq->packed.desc_extra[id].len = total_sg * in virtqueue_add_indirect_packed()
1303 vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1304 vq->packed.avail_used_flags; in virtqueue_add_indirect_packed()
1312 virtio_wmb(vq->weak_barriers); in virtqueue_add_indirect_packed()
1313 vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT | in virtqueue_add_indirect_packed()
1314 vq->packed.avail_used_flags); in virtqueue_add_indirect_packed()
1317 vq->vq.num_free -= 1; in virtqueue_add_indirect_packed()
1321 if (n >= vq->packed.vring.num) { in virtqueue_add_indirect_packed()
1323 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_indirect_packed()
1324 vq->packed.avail_used_flags ^= in virtqueue_add_indirect_packed()
1328 vq->packed.next_avail_idx = n; in virtqueue_add_indirect_packed()
1329 vq->free_head = vq->packed.desc_extra[id].next; in virtqueue_add_indirect_packed()
1332 vq->packed.desc_state[id].num = 1; in virtqueue_add_indirect_packed()
1333 vq->packed.desc_state[id].data = data; in virtqueue_add_indirect_packed()
1334 vq->packed.desc_state[id].indir_desc = desc; in virtqueue_add_indirect_packed()
1335 vq->packed.desc_state[id].last = id; in virtqueue_add_indirect_packed()
1337 vq->num_added += 1; in virtqueue_add_indirect_packed()
1339 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_indirect_packed()
1340 END_USE(vq); in virtqueue_add_indirect_packed()
1348 vring_unmap_desc_packed(vq, &desc[i]); in virtqueue_add_indirect_packed()
1352 END_USE(vq); in virtqueue_add_indirect_packed()
1365 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add_packed() local
1373 START_USE(vq); in virtqueue_add_packed()
1376 BUG_ON(ctx && vq->indirect); in virtqueue_add_packed()
1378 if (unlikely(vq->broken)) { in virtqueue_add_packed()
1379 END_USE(vq); in virtqueue_add_packed()
1383 LAST_ADD_TIME_UPDATE(vq); in virtqueue_add_packed()
1387 if (virtqueue_use_indirect(vq, total_sg)) { in virtqueue_add_packed()
1388 err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs, in virtqueue_add_packed()
1391 END_USE(vq); in virtqueue_add_packed()
1398 head = vq->packed.next_avail_idx; in virtqueue_add_packed()
1399 avail_used_flags = vq->packed.avail_used_flags; in virtqueue_add_packed()
1401 WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect); in virtqueue_add_packed()
1403 desc = vq->packed.vring.desc; in virtqueue_add_packed()
1407 if (unlikely(vq->vq.num_free < descs_used)) { in virtqueue_add_packed()
1409 descs_used, vq->vq.num_free); in virtqueue_add_packed()
1410 END_USE(vq); in virtqueue_add_packed()
1414 id = vq->free_head; in virtqueue_add_packed()
1415 BUG_ON(id == vq->packed.vring.num); in virtqueue_add_packed()
1421 dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ? in virtqueue_add_packed()
1423 if (vring_mapping_error(vq, addr)) in virtqueue_add_packed()
1426 flags = cpu_to_le16(vq->packed.avail_used_flags | in virtqueue_add_packed()
1438 if (unlikely(vq->use_dma_api)) { in virtqueue_add_packed()
1439 vq->packed.desc_extra[curr].addr = addr; in virtqueue_add_packed()
1440 vq->packed.desc_extra[curr].len = sg->length; in virtqueue_add_packed()
1441 vq->packed.desc_extra[curr].flags = in virtqueue_add_packed()
1445 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1447 if ((unlikely(++i >= vq->packed.vring.num))) { in virtqueue_add_packed()
1449 vq->packed.avail_used_flags ^= in virtqueue_add_packed()
1457 vq->packed.avail_wrap_counter ^= 1; in virtqueue_add_packed()
1460 vq->vq.num_free -= descs_used; in virtqueue_add_packed()
1463 vq->packed.next_avail_idx = i; in virtqueue_add_packed()
1464 vq->free_head = curr; in virtqueue_add_packed()
1467 vq->packed.desc_state[id].num = descs_used; in virtqueue_add_packed()
1468 vq->packed.desc_state[id].data = data; in virtqueue_add_packed()
1469 vq->packed.desc_state[id].indir_desc = ctx; in virtqueue_add_packed()
1470 vq->packed.desc_state[id].last = prev; in virtqueue_add_packed()
1477 virtio_wmb(vq->weak_barriers); in virtqueue_add_packed()
1478 vq->packed.vring.desc[head].flags = head_flags; in virtqueue_add_packed()
1479 vq->num_added += descs_used; in virtqueue_add_packed()
1481 pr_debug("Added buffer head %i to %p\n", head, vq); in virtqueue_add_packed()
1482 END_USE(vq); in virtqueue_add_packed()
1489 curr = vq->free_head; in virtqueue_add_packed()
1491 vq->packed.avail_used_flags = avail_used_flags; in virtqueue_add_packed()
1496 vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]); in virtqueue_add_packed()
1497 curr = vq->packed.desc_extra[curr].next; in virtqueue_add_packed()
1499 if (i >= vq->packed.vring.num) in virtqueue_add_packed()
1503 END_USE(vq); in virtqueue_add_packed()
1509 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare_packed() local
1520 START_USE(vq); in virtqueue_kick_prepare_packed()
1526 virtio_mb(vq->weak_barriers); in virtqueue_kick_prepare_packed()
1528 old = vq->packed.next_avail_idx - vq->num_added; in virtqueue_kick_prepare_packed()
1529 new = vq->packed.next_avail_idx; in virtqueue_kick_prepare_packed()
1530 vq->num_added = 0; in virtqueue_kick_prepare_packed()
1532 snapshot.u32 = *(u32 *)vq->packed.vring.device; in virtqueue_kick_prepare_packed()
1535 LAST_ADD_TIME_CHECK(vq); in virtqueue_kick_prepare_packed()
1536 LAST_ADD_TIME_INVALID(vq); in virtqueue_kick_prepare_packed()
1547 if (wrap_counter != vq->packed.avail_wrap_counter) in virtqueue_kick_prepare_packed()
1548 event_idx -= vq->packed.vring.num; in virtqueue_kick_prepare_packed()
1552 END_USE(vq); in virtqueue_kick_prepare_packed()
1556 static void detach_buf_packed(struct vring_virtqueue *vq, in detach_buf_packed() argument
1563 state = &vq->packed.desc_state[id]; in detach_buf_packed()
1568 vq->packed.desc_extra[state->last].next = vq->free_head; in detach_buf_packed()
1569 vq->free_head = id; in detach_buf_packed()
1570 vq->vq.num_free += state->num; in detach_buf_packed()
1572 if (unlikely(vq->use_dma_api)) { in detach_buf_packed()
1575 vring_unmap_extra_packed(vq, in detach_buf_packed()
1576 &vq->packed.desc_extra[curr]); in detach_buf_packed()
1577 curr = vq->packed.desc_extra[curr].next; in detach_buf_packed()
1581 if (vq->indirect) { in detach_buf_packed()
1589 if (vq->use_dma_api) { in detach_buf_packed()
1590 len = vq->packed.desc_extra[id].len; in detach_buf_packed()
1593 vring_unmap_desc_packed(vq, &desc[i]); in detach_buf_packed()
1602 static inline bool is_used_desc_packed(const struct vring_virtqueue *vq, in is_used_desc_packed() argument
1608 flags = le16_to_cpu(vq->packed.vring.desc[idx].flags); in is_used_desc_packed()
1615 static inline bool more_used_packed(const struct vring_virtqueue *vq) in more_used_packed() argument
1621 last_used_idx = READ_ONCE(vq->last_used_idx); in more_used_packed()
1624 return is_used_desc_packed(vq, last_used, used_wrap_counter); in more_used_packed()
1631 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx_packed() local
1636 START_USE(vq); in virtqueue_get_buf_ctx_packed()
1638 if (unlikely(vq->broken)) { in virtqueue_get_buf_ctx_packed()
1639 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1643 if (!more_used_packed(vq)) { in virtqueue_get_buf_ctx_packed()
1645 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1650 virtio_rmb(vq->weak_barriers); in virtqueue_get_buf_ctx_packed()
1652 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_get_buf_ctx_packed()
1655 id = le16_to_cpu(vq->packed.vring.desc[last_used].id); in virtqueue_get_buf_ctx_packed()
1656 *len = le32_to_cpu(vq->packed.vring.desc[last_used].len); in virtqueue_get_buf_ctx_packed()
1658 if (unlikely(id >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1659 BAD_RING(vq, "id %u out of range\n", id); in virtqueue_get_buf_ctx_packed()
1662 if (unlikely(!vq->packed.desc_state[id].data)) { in virtqueue_get_buf_ctx_packed()
1663 BAD_RING(vq, "id %u is not a head!\n", id); in virtqueue_get_buf_ctx_packed()
1668 ret = vq->packed.desc_state[id].data; in virtqueue_get_buf_ctx_packed()
1669 detach_buf_packed(vq, id, ctx); in virtqueue_get_buf_ctx_packed()
1671 last_used += vq->packed.desc_state[id].num; in virtqueue_get_buf_ctx_packed()
1672 if (unlikely(last_used >= vq->packed.vring.num)) { in virtqueue_get_buf_ctx_packed()
1673 last_used -= vq->packed.vring.num; in virtqueue_get_buf_ctx_packed()
1678 WRITE_ONCE(vq->last_used_idx, last_used); in virtqueue_get_buf_ctx_packed()
1685 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC) in virtqueue_get_buf_ctx_packed()
1686 virtio_store_mb(vq->weak_barriers, in virtqueue_get_buf_ctx_packed()
1687 &vq->packed.vring.driver->off_wrap, in virtqueue_get_buf_ctx_packed()
1688 cpu_to_le16(vq->last_used_idx)); in virtqueue_get_buf_ctx_packed()
1690 LAST_ADD_TIME_INVALID(vq); in virtqueue_get_buf_ctx_packed()
1692 END_USE(vq); in virtqueue_get_buf_ctx_packed()
1698 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb_packed() local
1700 if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_disable_cb_packed()
1701 vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE; in virtqueue_disable_cb_packed()
1702 vq->packed.vring.driver->flags = in virtqueue_disable_cb_packed()
1703 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_disable_cb_packed()
1709 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare_packed() local
1711 START_USE(vq); in virtqueue_enable_cb_prepare_packed()
1718 if (vq->event) { in virtqueue_enable_cb_prepare_packed()
1719 vq->packed.vring.driver->off_wrap = in virtqueue_enable_cb_prepare_packed()
1720 cpu_to_le16(vq->last_used_idx); in virtqueue_enable_cb_prepare_packed()
1725 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_prepare_packed()
1728 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_prepare_packed()
1729 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_prepare_packed()
1732 vq->packed.vring.driver->flags = in virtqueue_enable_cb_prepare_packed()
1733 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_prepare_packed()
1736 END_USE(vq); in virtqueue_enable_cb_prepare_packed()
1737 return vq->last_used_idx; in virtqueue_enable_cb_prepare_packed()
1742 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll_packed() local
1749 return is_used_desc_packed(vq, used_idx, wrap_counter); in virtqueue_poll_packed()
1754 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed_packed() local
1758 START_USE(vq); in virtqueue_enable_cb_delayed_packed()
1765 if (vq->event) { in virtqueue_enable_cb_delayed_packed()
1767 bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4; in virtqueue_enable_cb_delayed_packed()
1768 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1772 if (used_idx >= vq->packed.vring.num) { in virtqueue_enable_cb_delayed_packed()
1773 used_idx -= vq->packed.vring.num; in virtqueue_enable_cb_delayed_packed()
1777 vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx | in virtqueue_enable_cb_delayed_packed()
1784 virtio_wmb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1787 if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { in virtqueue_enable_cb_delayed_packed()
1788 vq->packed.event_flags_shadow = vq->event ? in virtqueue_enable_cb_delayed_packed()
1791 vq->packed.vring.driver->flags = in virtqueue_enable_cb_delayed_packed()
1792 cpu_to_le16(vq->packed.event_flags_shadow); in virtqueue_enable_cb_delayed_packed()
1799 virtio_mb(vq->weak_barriers); in virtqueue_enable_cb_delayed_packed()
1801 last_used_idx = READ_ONCE(vq->last_used_idx); in virtqueue_enable_cb_delayed_packed()
1804 if (is_used_desc_packed(vq, used_idx, wrap_counter)) { in virtqueue_enable_cb_delayed_packed()
1805 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1809 END_USE(vq); in virtqueue_enable_cb_delayed_packed()
1815 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf_packed() local
1819 START_USE(vq); in virtqueue_detach_unused_buf_packed()
1821 for (i = 0; i < vq->packed.vring.num; i++) { in virtqueue_detach_unused_buf_packed()
1822 if (!vq->packed.desc_state[i].data) in virtqueue_detach_unused_buf_packed()
1825 buf = vq->packed.desc_state[i].data; in virtqueue_detach_unused_buf_packed()
1826 detach_buf_packed(vq, i, NULL); in virtqueue_detach_unused_buf_packed()
1827 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1831 BUG_ON(vq->vq.num_free != vq->packed.vring.num); in virtqueue_detach_unused_buf_packed()
1833 END_USE(vq); in virtqueue_detach_unused_buf_packed()
1978 static void virtqueue_vring_attach_packed(struct vring_virtqueue *vq, in virtqueue_vring_attach_packed() argument
1981 vq->packed = *vring_packed; in virtqueue_vring_attach_packed()
1984 vq->free_head = 0; in virtqueue_vring_attach_packed()
1987 static void virtqueue_reinit_packed(struct vring_virtqueue *vq) in virtqueue_reinit_packed() argument
1989 memset(vq->packed.vring.device, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
1990 memset(vq->packed.vring.driver, 0, vq->packed.event_size_in_bytes); in virtqueue_reinit_packed()
1993 memset(vq->packed.vring.desc, 0, vq->packed.ring_size_in_bytes); in virtqueue_reinit_packed()
1995 virtqueue_init(vq, vq->packed.vring.num); in virtqueue_reinit_packed()
1996 virtqueue_vring_init_packed(&vq->packed, !!vq->vq.callback); in virtqueue_reinit_packed()
2013 struct vring_virtqueue *vq; in vring_create_virtqueue_packed() local
2019 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in vring_create_virtqueue_packed()
2020 if (!vq) in vring_create_virtqueue_packed()
2023 vq->vq.callback = callback; in vring_create_virtqueue_packed()
2024 vq->vq.vdev = vdev; in vring_create_virtqueue_packed()
2025 vq->vq.name = name; in vring_create_virtqueue_packed()
2026 vq->vq.index = index; in vring_create_virtqueue_packed()
2027 vq->vq.reset = false; in vring_create_virtqueue_packed()
2028 vq->we_own_ring = true; in vring_create_virtqueue_packed()
2029 vq->notify = notify; in vring_create_virtqueue_packed()
2030 vq->weak_barriers = weak_barriers; in vring_create_virtqueue_packed()
2032 vq->broken = true; in vring_create_virtqueue_packed()
2034 vq->broken = false; in vring_create_virtqueue_packed()
2036 vq->packed_ring = true; in vring_create_virtqueue_packed()
2037 vq->dma_dev = dma_dev; in vring_create_virtqueue_packed()
2038 vq->use_dma_api = vring_use_dma_api(vdev); in vring_create_virtqueue_packed()
2040 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in vring_create_virtqueue_packed()
2042 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in vring_create_virtqueue_packed()
2045 vq->weak_barriers = false; in vring_create_virtqueue_packed()
2053 virtqueue_init(vq, num); in vring_create_virtqueue_packed()
2054 virtqueue_vring_attach_packed(vq, &vring_packed); in vring_create_virtqueue_packed()
2057 list_add_tail(&vq->vq.list, &vdev->vqs); in vring_create_virtqueue_packed()
2059 return &vq->vq; in vring_create_virtqueue_packed()
2062 kfree(vq); in vring_create_virtqueue_packed()
2072 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize_packed() local
2076 if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq))) in virtqueue_resize_packed()
2083 vring_free(&vq->vq); in virtqueue_resize_packed()
2085 virtqueue_vring_init_packed(&vring_packed, !!vq->vq.callback); in virtqueue_resize_packed()
2087 virtqueue_init(vq, vring_packed.vring.num); in virtqueue_resize_packed()
2088 virtqueue_vring_attach_packed(vq, &vring_packed); in virtqueue_resize_packed()
2093 vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq)); in virtqueue_resize_packed()
2095 virtqueue_reinit_packed(vq); in virtqueue_resize_packed()
2113 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_add() local
2115 return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg, in virtqueue_add()
2169 int virtqueue_add_outbuf(struct virtqueue *vq, in virtqueue_add_outbuf() argument
2174 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp); in virtqueue_add_outbuf()
2191 int virtqueue_add_inbuf(struct virtqueue *vq, in virtqueue_add_inbuf() argument
2196 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp); in virtqueue_add_inbuf()
2214 int virtqueue_add_inbuf_ctx(struct virtqueue *vq, in virtqueue_add_inbuf_ctx() argument
2220 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp); in virtqueue_add_inbuf_ctx()
2237 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_kick_prepare() local
2239 return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) : in virtqueue_kick_prepare()
2254 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_notify() local
2256 if (unlikely(vq->broken)) in virtqueue_notify()
2260 if (!vq->notify(_vq)) { in virtqueue_notify()
2261 vq->broken = true; in virtqueue_notify()
2280 bool virtqueue_kick(struct virtqueue *vq) in virtqueue_kick() argument
2282 if (virtqueue_kick_prepare(vq)) in virtqueue_kick()
2283 return virtqueue_notify(vq); in virtqueue_kick()
2308 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_buf_ctx() local
2310 return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) : in virtqueue_get_buf_ctx()
2331 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_disable_cb() local
2336 if (vq->event_triggered) in virtqueue_disable_cb()
2339 if (vq->packed_ring) in virtqueue_disable_cb()
2360 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_prepare() local
2362 if (vq->event_triggered) in virtqueue_enable_cb_prepare()
2363 vq->event_triggered = false; in virtqueue_enable_cb_prepare()
2365 return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) : in virtqueue_enable_cb_prepare()
2381 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_poll() local
2383 if (unlikely(vq->broken)) in virtqueue_poll()
2386 virtio_mb(vq->weak_barriers); in virtqueue_poll()
2387 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : in virtqueue_poll()
2426 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_enable_cb_delayed() local
2428 if (vq->event_triggered) in virtqueue_enable_cb_delayed()
2429 vq->event_triggered = false; in virtqueue_enable_cb_delayed()
2431 return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : in virtqueue_enable_cb_delayed()
2446 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_detach_unused_buf() local
2448 return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) : in virtqueue_detach_unused_buf()
2453 static inline bool more_used(const struct vring_virtqueue *vq) in more_used() argument
2455 return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq); in more_used()
2468 struct vring_virtqueue *vq = to_vvq(_vq); in vring_interrupt() local
2470 if (!more_used(vq)) { in vring_interrupt()
2471 pr_debug("virtqueue interrupt with no work for %p\n", vq); in vring_interrupt()
2475 if (unlikely(vq->broken)) { in vring_interrupt()
2477 dev_warn_once(&vq->vq.vdev->dev, in vring_interrupt()
2486 if (vq->event) in vring_interrupt()
2487 vq->event_triggered = true; in vring_interrupt()
2489 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); in vring_interrupt()
2490 if (vq->vq.callback) in vring_interrupt()
2491 vq->vq.callback(&vq->vq); in vring_interrupt()
2508 struct vring_virtqueue *vq; in __vring_new_virtqueue() local
2514 vq = kmalloc(sizeof(*vq), GFP_KERNEL); in __vring_new_virtqueue()
2515 if (!vq) in __vring_new_virtqueue()
2518 vq->packed_ring = false; in __vring_new_virtqueue()
2519 vq->vq.callback = callback; in __vring_new_virtqueue()
2520 vq->vq.vdev = vdev; in __vring_new_virtqueue()
2521 vq->vq.name = name; in __vring_new_virtqueue()
2522 vq->vq.index = index; in __vring_new_virtqueue()
2523 vq->vq.reset = false; in __vring_new_virtqueue()
2524 vq->we_own_ring = false; in __vring_new_virtqueue()
2525 vq->notify = notify; in __vring_new_virtqueue()
2526 vq->weak_barriers = weak_barriers; in __vring_new_virtqueue()
2528 vq->broken = true; in __vring_new_virtqueue()
2530 vq->broken = false; in __vring_new_virtqueue()
2532 vq->dma_dev = dma_dev; in __vring_new_virtqueue()
2533 vq->use_dma_api = vring_use_dma_api(vdev); in __vring_new_virtqueue()
2535 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) && in __vring_new_virtqueue()
2537 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); in __vring_new_virtqueue()
2540 vq->weak_barriers = false; in __vring_new_virtqueue()
2544 kfree(vq); in __vring_new_virtqueue()
2548 virtqueue_vring_init_split(vring_split, vq); in __vring_new_virtqueue()
2550 virtqueue_init(vq, vring_split->vring.num); in __vring_new_virtqueue()
2551 virtqueue_vring_attach_split(vq, vring_split); in __vring_new_virtqueue()
2554 list_add_tail(&vq->vq.list, &vdev->vqs); in __vring_new_virtqueue()
2556 return &vq->vq; in __vring_new_virtqueue()
2633 void (*recycle)(struct virtqueue *vq, void *buf)) in virtqueue_resize() argument
2635 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_resize() local
2636 struct virtio_device *vdev = vq->vq.vdev; in virtqueue_resize()
2640 if (!vq->we_own_ring) in virtqueue_resize()
2643 if (num > vq->vq.num_max) in virtqueue_resize()
2649 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) in virtqueue_resize()
2665 if (vq->packed_ring) in virtqueue_resize()
2685 bool (*notify)(struct virtqueue *vq), in vring_new_virtqueue() argument
2686 void (*callback)(struct virtqueue *vq), in vring_new_virtqueue() argument
2703 struct vring_virtqueue *vq = to_vvq(_vq); in vring_free() local
2705 if (vq->we_own_ring) { in vring_free()
2706 if (vq->packed_ring) { in vring_free()
2707 vring_free_queue(vq->vq.vdev, in vring_free()
2708 vq->packed.ring_size_in_bytes, in vring_free()
2709 vq->packed.vring.desc, in vring_free()
2710 vq->packed.ring_dma_addr, in vring_free()
2711 vring_dma_dev(vq)); in vring_free()
2713 vring_free_queue(vq->vq.vdev, in vring_free()
2714 vq->packed.event_size_in_bytes, in vring_free()
2715 vq->packed.vring.driver, in vring_free()
2716 vq->packed.driver_event_dma_addr, in vring_free()
2717 vring_dma_dev(vq)); in vring_free()
2719 vring_free_queue(vq->vq.vdev, in vring_free()
2720 vq->packed.event_size_in_bytes, in vring_free()
2721 vq->packed.vring.device, in vring_free()
2722 vq->packed.device_event_dma_addr, in vring_free()
2723 vring_dma_dev(vq)); in vring_free()
2725 kfree(vq->packed.desc_state); in vring_free()
2726 kfree(vq->packed.desc_extra); in vring_free()
2728 vring_free_queue(vq->vq.vdev, in vring_free()
2729 vq->split.queue_size_in_bytes, in vring_free()
2730 vq->split.vring.desc, in vring_free()
2731 vq->split.queue_dma_addr, in vring_free()
2732 vring_dma_dev(vq)); in vring_free()
2735 if (!vq->packed_ring) { in vring_free()
2736 kfree(vq->split.desc_state); in vring_free()
2737 kfree(vq->split.desc_extra); in vring_free()
2743 struct vring_virtqueue *vq = to_vvq(_vq); in vring_del_virtqueue() local
2745 spin_lock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2747 spin_unlock(&vq->vq.vdev->vqs_list_lock); in vring_del_virtqueue()
2751 kfree(vq); in vring_del_virtqueue()
2792 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_vring_size() local
2794 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size()
2803 struct vring_virtqueue *vq = to_vvq(_vq); in __virtqueue_break() local
2806 WRITE_ONCE(vq->broken, true); in __virtqueue_break()
2815 struct vring_virtqueue *vq = to_vvq(_vq); in __virtqueue_unbreak() local
2818 WRITE_ONCE(vq->broken, false); in __virtqueue_unbreak()
2824 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_is_broken() local
2826 return READ_ONCE(vq->broken); in virtqueue_is_broken()
2840 struct vring_virtqueue *vq = to_vvq(_vq); in virtio_break_device() local
2843 WRITE_ONCE(vq->broken, true); in virtio_break_device()
2862 struct vring_virtqueue *vq = to_vvq(_vq); in __virtio_unbreak_device() local
2865 WRITE_ONCE(vq->broken, false); in __virtio_unbreak_device()
2873 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_desc_addr() local
2875 BUG_ON(!vq->we_own_ring); in virtqueue_get_desc_addr()
2877 if (vq->packed_ring) in virtqueue_get_desc_addr()
2878 return vq->packed.ring_dma_addr; in virtqueue_get_desc_addr()
2880 return vq->split.queue_dma_addr; in virtqueue_get_desc_addr()
2886 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_avail_addr() local
2888 BUG_ON(!vq->we_own_ring); in virtqueue_get_avail_addr()
2890 if (vq->packed_ring) in virtqueue_get_avail_addr()
2891 return vq->packed.driver_event_dma_addr; in virtqueue_get_avail_addr()
2893 return vq->split.queue_dma_addr + in virtqueue_get_avail_addr()
2894 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr()
2900 struct vring_virtqueue *vq = to_vvq(_vq); in virtqueue_get_used_addr() local
2902 BUG_ON(!vq->we_own_ring); in virtqueue_get_used_addr()
2904 if (vq->packed_ring) in virtqueue_get_used_addr()
2905 return vq->packed.device_event_dma_addr; in virtqueue_get_used_addr()
2907 return vq->split.queue_dma_addr + in virtqueue_get_used_addr()
2908 ((char *)vq->split.vring.used - (char *)vq->split.vring.desc); in virtqueue_get_used_addr()
2913 const struct vring *virtqueue_get_vring(struct virtqueue *vq) in virtqueue_get_vring() argument
2915 return &to_vvq(vq)->split.vring; in virtqueue_get_vring()