Lines Matching refs:vdpasim
42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) in vdpa_to_sim()
44 return container_of(vdpa, struct vdpasim, vdpa); in vdpa_to_sim()
58 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) in vdpasim_queue_ready() argument
60 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_queue_ready()
63 vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true, in vdpasim_queue_ready()
74 static void vdpasim_vq_reset(struct vdpasim *vdpasim, in vdpasim_vq_reset() argument
83 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features, in vdpasim_vq_reset()
89 static void vdpasim_do_reset(struct vdpasim *vdpasim) in vdpasim_do_reset() argument
93 spin_lock(&vdpasim->iommu_lock); in vdpasim_do_reset()
95 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_do_reset()
96 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]); in vdpasim_do_reset()
97 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_do_reset()
98 &vdpasim->iommu_lock); in vdpasim_do_reset()
101 for (i = 0; i < vdpasim->dev_attr.nas; i++) { in vdpasim_do_reset()
102 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_do_reset()
103 vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, in vdpasim_do_reset()
105 vdpasim->iommu_pt[i] = true; in vdpasim_do_reset()
108 vdpasim->running = true; in vdpasim_do_reset()
109 spin_unlock(&vdpasim->iommu_lock); in vdpasim_do_reset()
111 vdpasim->features = 0; in vdpasim_do_reset()
112 vdpasim->status = 0; in vdpasim_do_reset()
113 ++vdpasim->generation; in vdpasim_do_reset()
119 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr, in vdpasim_create()
124 struct vdpasim *vdpasim; in vdpasim_create() local
153 vdpasim = vdpa_to_sim(vdpa); in vdpasim_create()
154 vdpasim->dev_attr = *dev_attr; in vdpasim_create()
155 INIT_WORK(&vdpasim->work, dev_attr->work_fn); in vdpasim_create()
156 spin_lock_init(&vdpasim->lock); in vdpasim_create()
157 spin_lock_init(&vdpasim->iommu_lock); in vdpasim_create()
159 dev = &vdpasim->vdpa.dev; in vdpasim_create()
163 vdpasim->vdpa.mdev = dev_attr->mgmt_dev; in vdpasim_create()
165 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL); in vdpasim_create()
166 if (!vdpasim->config) in vdpasim_create()
169 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue), in vdpasim_create()
171 if (!vdpasim->vqs) in vdpasim_create()
174 vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
175 sizeof(*vdpasim->iommu), GFP_KERNEL); in vdpasim_create()
176 if (!vdpasim->iommu) in vdpasim_create()
179 vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas, in vdpasim_create()
180 sizeof(*vdpasim->iommu_pt), GFP_KERNEL); in vdpasim_create()
181 if (!vdpasim->iommu_pt) in vdpasim_create()
184 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_create()
185 vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0); in vdpasim_create()
187 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL); in vdpasim_create()
188 if (!vdpasim->buffer) in vdpasim_create()
192 vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0], in vdpasim_create()
193 &vdpasim->iommu_lock); in vdpasim_create()
195 vdpasim->vdpa.dma_dev = dev; in vdpasim_create()
197 return vdpasim; in vdpasim_create()
210 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_address() local
211 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_address()
222 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_num() local
223 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_num()
230 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_kick_vq() local
231 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_kick_vq()
233 if (!vdpasim->running && in vdpasim_kick_vq()
234 (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) { in vdpasim_kick_vq()
235 vdpasim->pending_kick = true; in vdpasim_kick_vq()
240 schedule_work(&vdpasim->work); in vdpasim_kick_vq()
246 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_cb() local
247 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_cb()
255 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_ready() local
256 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_ready()
259 spin_lock(&vdpasim->lock); in vdpasim_set_vq_ready()
263 vdpasim_queue_ready(vdpasim, idx); in vdpasim_set_vq_ready()
265 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_ready()
270 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_ready() local
271 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_ready()
279 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_vq_state() local
280 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_set_vq_state()
283 spin_lock(&vdpasim->lock); in vdpasim_set_vq_state()
285 spin_unlock(&vdpasim->lock); in vdpasim_set_vq_state()
293 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_state() local
294 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; in vdpasim_get_vq_state()
305 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_vq_stats() local
307 if (vdpasim->dev_attr.get_stats) in vdpasim_get_vq_stats()
308 return vdpasim->dev_attr.get_stats(vdpasim, idx, in vdpasim_get_vq_stats()
329 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_device_features() local
331 return vdpasim->dev_attr.supported_features; in vdpasim_get_device_features()
336 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_driver_features() local
342 vdpasim->features = features & vdpasim->dev_attr.supported_features; in vdpasim_set_driver_features()
349 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_driver_features() local
351 return vdpasim->features; in vdpasim_get_driver_features()
367 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_device_id() local
369 return vdpasim->dev_attr.id; in vdpasim_get_device_id()
379 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_status() local
382 spin_lock(&vdpasim->lock); in vdpasim_get_status()
383 status = vdpasim->status; in vdpasim_get_status()
384 spin_unlock(&vdpasim->lock); in vdpasim_get_status()
391 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_status() local
393 spin_lock(&vdpasim->lock); in vdpasim_set_status()
394 vdpasim->status = status; in vdpasim_set_status()
395 spin_unlock(&vdpasim->lock); in vdpasim_set_status()
400 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_reset() local
402 spin_lock(&vdpasim->lock); in vdpasim_reset()
403 vdpasim->status = 0; in vdpasim_reset()
404 vdpasim_do_reset(vdpasim); in vdpasim_reset()
405 spin_unlock(&vdpasim->lock); in vdpasim_reset()
412 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_suspend() local
414 spin_lock(&vdpasim->lock); in vdpasim_suspend()
415 vdpasim->running = false; in vdpasim_suspend()
416 spin_unlock(&vdpasim->lock); in vdpasim_suspend()
423 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_resume() local
426 spin_lock(&vdpasim->lock); in vdpasim_resume()
427 vdpasim->running = true; in vdpasim_resume()
429 if (vdpasim->pending_kick) { in vdpasim_resume()
431 for (i = 0; i < vdpasim->dev_attr.nvqs; ++i) in vdpasim_resume()
434 vdpasim->pending_kick = false; in vdpasim_resume()
437 spin_unlock(&vdpasim->lock); in vdpasim_resume()
444 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_config_size() local
446 return vdpasim->dev_attr.config_size; in vdpasim_get_config_size()
452 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_config() local
454 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_get_config()
457 if (vdpasim->dev_attr.get_config) in vdpasim_get_config()
458 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config); in vdpasim_get_config()
460 memcpy(buf, vdpasim->config + offset, len); in vdpasim_get_config()
466 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_config() local
468 if (offset + len > vdpasim->dev_attr.config_size) in vdpasim_set_config()
471 memcpy(vdpasim->config + offset, buf, len); in vdpasim_set_config()
473 if (vdpasim->dev_attr.set_config) in vdpasim_set_config()
474 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config); in vdpasim_set_config()
479 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_get_generation() local
481 return vdpasim->generation; in vdpasim_get_generation()
497 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_group_asid() local
501 if (group > vdpasim->dev_attr.ngroups) in vdpasim_set_group_asid()
504 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_group_asid()
507 iommu = &vdpasim->iommu[asid]; in vdpasim_set_group_asid()
509 spin_lock(&vdpasim->lock); in vdpasim_set_group_asid()
511 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) in vdpasim_set_group_asid()
513 vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu, in vdpasim_set_group_asid()
514 &vdpasim->iommu_lock); in vdpasim_set_group_asid()
516 spin_unlock(&vdpasim->lock); in vdpasim_set_group_asid()
524 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_set_map() local
530 if (asid >= vdpasim->dev_attr.nas) in vdpasim_set_map()
533 spin_lock(&vdpasim->iommu_lock); in vdpasim_set_map()
535 iommu = &vdpasim->iommu[asid]; in vdpasim_set_map()
537 vdpasim->iommu_pt[asid] = false; in vdpasim_set_map()
546 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
551 spin_unlock(&vdpasim->iommu_lock); in vdpasim_set_map()
559 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_dma_map() local
562 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_map()
565 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_map()
566 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_map()
567 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_map()
568 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_map()
570 ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova, in vdpasim_dma_map()
572 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_map()
580 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_dma_unmap() local
582 if (asid >= vdpasim->dev_attr.nas) in vdpasim_dma_unmap()
585 if (vdpasim->iommu_pt[asid]) { in vdpasim_dma_unmap()
586 vhost_iotlb_reset(&vdpasim->iommu[asid]); in vdpasim_dma_unmap()
587 vdpasim->iommu_pt[asid] = false; in vdpasim_dma_unmap()
590 spin_lock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
591 vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1); in vdpasim_dma_unmap()
592 spin_unlock(&vdpasim->iommu_lock); in vdpasim_dma_unmap()
599 struct vdpasim *vdpasim = vdpa_to_sim(vdpa); in vdpasim_free() local
602 cancel_work_sync(&vdpasim->work); in vdpasim_free()
604 for (i = 0; i < vdpasim->dev_attr.nvqs; i++) { in vdpasim_free()
605 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov); in vdpasim_free()
606 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov); in vdpasim_free()
609 kvfree(vdpasim->buffer); in vdpasim_free()
610 for (i = 0; i < vdpasim->dev_attr.nas; i++) in vdpasim_free()
611 vhost_iotlb_reset(&vdpasim->iommu[i]); in vdpasim_free()
612 kfree(vdpasim->iommu); in vdpasim_free()
613 kfree(vdpasim->iommu_pt); in vdpasim_free()
614 kfree(vdpasim->vqs); in vdpasim_free()
615 kfree(vdpasim->config); in vdpasim_free()