| /drivers/platform/mellanox/ |
| A D | mlxbf-tmfifo.c | 255 size = vring_size(vring->num, vring->align); in mlxbf_tmfifo_free_vrings() 257 vring->va, vring->dma); in mlxbf_tmfifo_free_vrings() 288 size = vring_size(vring->num, vring->align); in mlxbf_tmfifo_alloc_vrings() 330 const struct vring *vr = virtqueue_get_vring(vring->vq); in mlxbf_tmfifo_get_next_desc() 354 const struct vring *vr = virtqueue_get_vring(vring->vq); in mlxbf_tmfifo_release_desc() 725 vring->rem_len = vring->pkt_len; in mlxbf_tmfifo_rxtx_header() 726 fifo->vring[is_rx] = vring; in mlxbf_tmfifo_rxtx_header() 792 if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && in mlxbf_tmfifo_rxtx_one_desc() 860 vring->fifo->vring[0] = NULL; in mlxbf_tmfifo_check_tx_timeout() 888 if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) in mlxbf_tmfifo_rxtx() [all …]
|
| /drivers/net/wireless/ath/wil6210/ |
| A D | txrx.c | 122 vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL); in wil_vring_alloc() 166 vring->va, &vring->pa, vring->ctx); in wil_vring_alloc() 200 vring_index, vring->size, vring->va, in wil_vring_free() 201 &vring->pa, vring->ctx); in wil_vring_free() 204 vring->size, vring->va, in wil_vring_free() 205 &vring->pa, vring->ctx); in wil_vring_free() 216 &vring->va[vring->swtail].tx.legacy; in wil_vring_free() 218 ctx = &vring->ctx[vring->swtail]; in wil_vring_free() 234 &vring->va[vring->swhead].rx.legacy; in wil_vring_free() 236 ctx = &vring->ctx[vring->swhead]; in wil_vring_free() [all …]
|
| A D | trace.h | 212 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 frags), 213 TP_ARGS(vring, index, len, frags), 215 __field(u8, vring) 221 __entry->vring = vring; 227 __entry->vring, __entry->index, __entry->len, __entry->frags) 231 TP_PROTO(u8 vring, u16 index, unsigned int len, u8 err), 232 TP_ARGS(vring, index, len, err), 234 __field(u8, vring) 240 __entry->vring = vring; 246 __entry->vring, __entry->index, __entry->len,
|
| A D | wmi.c | 2609 int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring) in wmi_rx_chain_add() argument 2619 .ring_mem_base = cpu_to_le64(vring->pa), in wmi_rx_chain_add() 2620 .ring_size = cpu_to_le16(vring->size), in wmi_rx_chain_add() 2667 vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); in wmi_rx_chain_add() 2670 le32_to_cpu(evt.evt.status), vring->hwtail); in wmi_rx_chain_add()
|
| A D | wil6210.h | 1259 int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
|
| /drivers/virtio/ |
| A D | virtio_ring.c | 99 struct vring vring; member 133 } vring; member 579 desc = vq->split.vring.desc; in virtqueue_add_split() 1012 num = vq->split.vring.num; in virtqueue_reinit_split() 1070 vring_split->vring.desc, in vring_free_split() 2812 if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num) in virtqueue_resize() 2910 vq->packed.vring.desc, in vring_free() 2931 vq->split.vring.desc, in vring_free() 3014 return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num; in virtqueue_get_vring_size() 3114 ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc); in virtqueue_get_avail_addr() [all …]
|
| /drivers/vdpa/virtio_pci/ |
| A D | vp_vdpa.c | 37 struct vp_vring *vring; member 102 int irq = vp_vdpa->vring[idx].irq; in vp_vdpa_get_vq_irq() 120 &vp_vdpa->vring[i]); in vp_vdpa_free_irq() 139 struct vp_vring *vring = arg; in vp_vdpa_vq_handler() local 141 if (vring->cb.callback) in vp_vdpa_vq_handler() 142 return vring->cb.callback(vring->cb.private); in vp_vdpa_vq_handler() 191 &vp_vdpa->vring[i]); in vp_vdpa_request_irq() 198 vp_vdpa->vring[i].irq = irq; in vp_vdpa_request_irq() 318 vp_vdpa->vring[qid].cb = *cb; in vp_vdpa_set_vq_cb() 551 if (!vp_vdpa->vring) { in vp_vdpa_dev_add() [all …]
|
| /drivers/vdpa/ifcvf/ |
| A D | ifcvf_main.c | 32 struct vring_info *vring = arg; in ifcvf_vq_intr_handler() local 34 if (vring->cb.callback) in ifcvf_vq_intr_handler() 35 return vring->cb.callback(vring->cb.private); in ifcvf_vq_intr_handler() 43 struct vring_info *vring; in ifcvf_vqs_reused_intr_handler() local 47 vring = &vf->vring[i]; in ifcvf_vqs_reused_intr_handler() 48 if (vring->cb.callback) in ifcvf_vqs_reused_intr_handler() 49 vring->cb.callback(vring->cb.private); in ifcvf_vqs_reused_intr_handler() 79 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); in ifcvf_free_per_vq_irq() 171 &vf->vring[i]); in ifcvf_request_per_vq_irq() 177 vf->vring[i].irq = irq; in ifcvf_request_per_vq_irq() [all …]
|
| A D | ifcvf_base.c | 167 hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL); in ifcvf_init_hw() 168 if (!hw->vring) in ifcvf_init_hw() 174 hw->vring[i].notify_addr = hw->notify_base + in ifcvf_init_hw() 176 hw->vring[i].notify_pa = hw->notify_base_pa + in ifcvf_init_hw() 178 hw->vring[i].irq = -EINVAL; in ifcvf_init_hw() 395 hw->vring[qid].cb.callback = NULL; in ifcvf_reset_vring() 396 hw->vring[qid].cb.private = NULL; in ifcvf_reset_vring() 430 vp_iowrite16(qid, hw->vring[qid].notify_addr); in ifcvf_notify_queue()
|
| A D | ifcvf_base.h | 80 struct vring_info *vring; member
|
| /drivers/vhost/ |
| A D | vringh.c | 49 &vrh->vring.avail->idx); in __vringh_get_head() 54 return vrh->vring.num; in __vringh_get_head() 68 if (head >= vrh->vring.num) { in __vringh_get_head() 70 head, vrh->vring.num); in __vringh_get_head() 250 *descs = vrh->vring.desc; in return_from_indirect() 310 descs = vrh->vring.desc; in __vringh_iov() 673 vrh->vring.num = num; in vringh_init_user() 711 *head = vrh->vring.num; in vringh_getdesc_user() 917 vrh->vring.num = num; in vringh_init_kern() 918 vrh->vring.desc = desc; in vringh_init_kern() [all …]
|
| /drivers/vdpa/alibaba/ |
| A D | eni_vdpa.c | 41 struct eni_vring *vring; member 104 int irq = eni_vdpa->vring[idx].irq; in eni_vdpa_get_vq_irq() 122 &eni_vdpa->vring[i]); in eni_vdpa_free_irq() 141 struct eni_vring *vring = arg; in eni_vdpa_vq_handler() local 143 if (vring->cb.callback) in eni_vdpa_vq_handler() 144 return vring->cb.callback(vring->cb.private); in eni_vdpa_vq_handler() 184 &eni_vdpa->vring[i]); in eni_vdpa_request_irq() 190 eni_vdpa->vring[i].irq = irq; in eni_vdpa_request_irq() 293 eni_vdpa->vring[qid].cb = *cb; in eni_vdpa_set_vq_cb() 503 sizeof(*eni_vdpa->vring), in eni_vdpa_probe() [all …]
|
| /drivers/vdpa/vdpa_sim/ |
| A D | vdpa_sim_net.c | 74 vringh_complete_iotlb(&vq->vring, vq->head, len); in vdpasim_net_complete() 80 if (vringh_need_notify_iotlb(&vq->vring) > 0) in vdpasim_net_complete() 81 vringh_notify(&vq->vring); in vdpasim_net_complete() 115 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_ctrl_mac() 144 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->in_iov, in vdpasim_handle_cvq() 174 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->out_iov, in vdpasim_handle_cvq() 176 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in vdpasim_handle_cvq() 220 err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL, in vdpasim_net_work() 229 read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov, in vdpasim_net_work() 240 err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov, in vdpasim_net_work() [all …]
|
| A D | vdpa_sim_blk.c | 123 ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov, in vdpasim_blk_handle_req() 146 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr, in vdpasim_blk_handle_req() 179 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, in vdpasim_blk_handle_req() 202 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, in vdpasim_blk_handle_req() 215 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, in vdpasim_blk_handle_req() 245 bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &range, in vdpasim_blk_handle_req() 307 bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1); in vdpasim_blk_handle_req() 319 vringh_complete_iotlb(&vq->vring, vq->head, pushed); in vdpasim_blk_handle_req() 349 if (vringh_need_notify_iotlb(&vq->vring) > 0) in vdpasim_blk_work() 350 vringh_notify(&vq->vring); in vdpasim_blk_work()
|
| A D | vdpa_sim.c | 82 static void vdpasim_vq_notify(struct vringh *vring) in vdpasim_vq_notify() argument 85 container_of(vring, struct vdpasim_virtqueue, vring); in vdpasim_vq_notify() 96 uint16_t last_avail_idx = vq->vring.last_avail_idx; in vdpasim_queue_ready() 108 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, in vdpasim_queue_ready() 112 vq->vring.last_avail_idx = last_avail_idx; in vdpasim_queue_ready() 123 vq->vring.last_used_idx = last_avail_idx; in vdpasim_queue_ready() 124 vq->vring.notify = vdpasim_vq_notify; in vdpasim_queue_ready() 139 vq->vring.notify = NULL; in vdpasim_vq_reset() 378 struct vringh *vrh = &vq->vring; in vdpasim_set_vq_state() 392 struct vringh *vrh = &vq->vring; in vdpasim_get_vq_state() [all …]
|
| A D | vdpa_sim.h | 23 struct vringh vring; member
|
| /drivers/remoteproc/ |
| A D | remoteproc_virtio.c | 119 if (id >= ARRAY_SIZE(rvdev->vring)) in rp_find_vq() 131 rvring = &rvdev->vring[id]; in rp_find_vq() 161 rsc->vring[id].da = mem->da; in rp_find_vq() 299 cfg = &rsc->vring[rsc->num_of_vrings]; in rproc_virtio_get() 317 cfg = &rsc->vring[rsc->num_of_vrings]; in rproc_virtio_set() 570 rproc_free_vring(&rvdev->vring[i]); in rproc_virtio_probe() 582 for (id = 0; id < ARRAY_SIZE(rvdev->vring); id++) { in rproc_virtio_remove() 583 rvring = &rvdev->vring[id]; in rproc_virtio_remove()
|
| A D | remoteproc_core.c | 350 size, rsc->vring[i].da, in rproc_alloc_vring() 382 rsc->vring[i].notifyid = notifyid; in rproc_alloc_vring() 391 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; in rproc_parse_vring() local 395 i, vring->da, vring->num, vring->align); in rproc_parse_vring() 398 if (!vring->num || !vring->align) { in rproc_parse_vring() 400 vring->num, vring->align); in rproc_parse_vring() 404 rvring->num = vring->num; in rproc_parse_vring() 405 rvring->align = vring->align; in rproc_parse_vring() 414 int idx = rvring - rvring->rvdev->vring; in rproc_free_vring() 432 rsc->vring[idx].da = 0; in rproc_free_vring() [all …]
|
| A D | remoteproc_debugfs.c | 341 seq_printf(seq, " Device Address 0x%x\n", v->vring[j].da); in rproc_rsc_table_show() 342 seq_printf(seq, " Alignment %d\n", v->vring[j].align); in rproc_rsc_table_show() 343 seq_printf(seq, " Number of buffers %d\n", v->vring[j].num); in rproc_rsc_table_show() 344 seq_printf(seq, " Notify ID %d\n", v->vring[j].notifyid); in rproc_rsc_table_show() 346 v->vring[j].pa); in rproc_rsc_table_show()
|
| A D | ingenic_rproc.c | 156 u32 vring; in vpu_interrupt() local 158 vring = readl(vpu->aux_base + REG_AUX_MSG); in vpu_interrupt() 163 return rproc_vq_interrupt(rproc, vring); in vpu_interrupt()
|
| /drivers/vdpa/mlx5/net/ |
| A D | mlx5_vnet.c | 2412 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in mlx5_cvq_kick_handler() 2416 if (vringh_need_notify_iotlb(&cvq->vring)) in mlx5_cvq_kick_handler() 2417 vringh_notify(&cvq->vring); in mlx5_cvq_kick_handler() 2489 cvq->vring.vring.num = num; in mlx5_vdpa_set_vq_num() 2508 static void mlx5_cvq_notify(struct vringh *vring) in mlx5_cvq_notify() argument 2510 struct mlx5_control_vq *cvq = container_of(vring, struct mlx5_control_vq, vring); in mlx5_cvq_notify() 2526 cvq->vring.notify = mlx5_cvq_notify; in set_cvq_ready() 2582 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state() 3164 u16 idx = cvq->vring.last_avail_idx; in setup_cvq_vring() 3167 cvq->vring.vring.num, false, in setup_cvq_vring() [all …]
|
| /drivers/vdpa/mlx5/core/ |
| A D | mlx5_vdpa.h | 57 struct vringh vring; member
|
| A D | resources.c | 237 vringh_set_iotlb(&mvdev->cvq.vring, mvdev->cvq.iotlb, &mvdev->cvq.iommu_lock); in init_ctrl_vq()
|