Lines Matching refs:worker

236 static void vhost_worker_queue(struct vhost_worker *worker,  in vhost_worker_queue()  argument
244 llist_add(&work->node, &worker->work_list); in vhost_worker_queue()
245 vhost_task_wake(worker->vtsk); in vhost_worker_queue()
251 struct vhost_worker *worker; in vhost_vq_work_queue() local
255 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue()
256 if (worker) { in vhost_vq_work_queue()
258 vhost_worker_queue(worker, work); in vhost_vq_work_queue()
272 static void __vhost_worker_flush(struct vhost_worker *worker) in __vhost_worker_flush() argument
276 if (!worker->attachment_cnt || worker->killed) in __vhost_worker_flush()
282 vhost_worker_queue(worker, &flush.work); in __vhost_worker_flush()
287 mutex_unlock(&worker->mutex); in __vhost_worker_flush()
289 mutex_lock(&worker->mutex); in __vhost_worker_flush()
292 static void vhost_worker_flush(struct vhost_worker *worker) in vhost_worker_flush() argument
294 mutex_lock(&worker->mutex); in vhost_worker_flush()
295 __vhost_worker_flush(worker); in vhost_worker_flush()
296 mutex_unlock(&worker->mutex); in vhost_worker_flush()
301 struct vhost_worker *worker; in vhost_dev_flush() local
304 xa_for_each(&dev->worker_xa, i, worker) in vhost_dev_flush()
305 vhost_worker_flush(worker); in vhost_dev_flush()
312 struct vhost_worker *worker; in vhost_vq_has_work() local
316 worker = rcu_dereference(vq->worker); in vhost_vq_has_work()
317 if (worker && !llist_empty(&worker->work_list)) in vhost_vq_has_work()
386 rcu_assign_pointer(vq->worker, NULL); in vhost_vq_reset()
393 struct vhost_worker *worker = data; in vhost_run_work_list() local
397 node = llist_del_all(&worker->work_list); in vhost_run_work_list()
406 kcov_remote_start_common(worker->kcov_handle); in vhost_run_work_list()
418 struct vhost_worker *worker = data; in vhost_worker_killed() local
419 struct vhost_dev *dev = worker->dev; in vhost_worker_killed()
423 mutex_lock(&worker->mutex); in vhost_worker_killed()
424 worker->killed = true; in vhost_worker_killed()
430 if (worker == in vhost_worker_killed()
431 rcu_dereference_check(vq->worker, in vhost_worker_killed()
433 rcu_assign_pointer(vq->worker, NULL); in vhost_worker_killed()
439 worker->attachment_cnt -= attach_cnt; in vhost_worker_killed()
446 vhost_run_work_list(worker); in vhost_worker_killed()
447 mutex_unlock(&worker->mutex); in vhost_worker_killed()
622 struct vhost_worker *worker) in vhost_worker_destroy() argument
624 if (!worker) in vhost_worker_destroy()
627 WARN_ON(!llist_empty(&worker->work_list)); in vhost_worker_destroy()
628 xa_erase(&dev->worker_xa, worker->id); in vhost_worker_destroy()
629 vhost_task_stop(worker->vtsk); in vhost_worker_destroy()
630 kfree(worker); in vhost_worker_destroy()
635 struct vhost_worker *worker; in vhost_workers_free() local
642 rcu_assign_pointer(dev->vqs[i]->worker, NULL); in vhost_workers_free()
647 xa_for_each(&dev->worker_xa, i, worker) in vhost_workers_free()
648 vhost_worker_destroy(dev, worker); in vhost_workers_free()
654 struct vhost_worker *worker; in vhost_worker_create() local
660 worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT); in vhost_worker_create()
661 if (!worker) in vhost_worker_create()
664 worker->dev = dev; in vhost_worker_create()
668 worker, name); in vhost_worker_create()
672 mutex_init(&worker->mutex); in vhost_worker_create()
673 init_llist_head(&worker->work_list); in vhost_worker_create()
674 worker->kcov_handle = kcov_common_handle(); in vhost_worker_create()
675 worker->vtsk = vtsk; in vhost_worker_create()
679 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); in vhost_worker_create()
682 worker->id = id; in vhost_worker_create()
684 return worker; in vhost_worker_create()
689 kfree(worker); in vhost_worker_create()
695 struct vhost_worker *worker) in __vhost_vq_attach_worker() argument
699 mutex_lock(&worker->mutex); in __vhost_vq_attach_worker()
700 if (worker->killed) { in __vhost_vq_attach_worker()
701 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
707 old_worker = rcu_dereference_check(vq->worker, in __vhost_vq_attach_worker()
709 rcu_assign_pointer(vq->worker, worker); in __vhost_vq_attach_worker()
710 worker->attachment_cnt++; in __vhost_vq_attach_worker()
714 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
718 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
768 struct vhost_worker *worker; in vhost_vq_attach_worker() local
773 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_vq_attach_worker()
774 if (!worker || worker->id != info->worker_id) in vhost_vq_attach_worker()
777 __vhost_vq_attach_worker(vq, worker); in vhost_vq_attach_worker()
785 struct vhost_worker *worker; in vhost_new_worker() local
787 worker = vhost_worker_create(dev); in vhost_new_worker()
788 if (!worker) in vhost_new_worker()
791 info->worker_id = worker->id; in vhost_new_worker()
800 struct vhost_worker *worker; in vhost_free_worker() local
802 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_free_worker()
803 if (!worker || worker->id != info->worker_id) in vhost_free_worker()
806 mutex_lock(&worker->mutex); in vhost_free_worker()
807 if (worker->attachment_cnt || worker->killed) { in vhost_free_worker()
808 mutex_unlock(&worker->mutex); in vhost_free_worker()
816 __vhost_worker_flush(worker); in vhost_free_worker()
817 mutex_unlock(&worker->mutex); in vhost_free_worker()
819 vhost_worker_destroy(dev, worker); in vhost_free_worker()
850 struct vhost_worker *worker; in vhost_worker_ioctl() local
898 worker = rcu_dereference_check(vq->worker, in vhost_worker_ioctl()
900 if (!worker) { in vhost_worker_ioctl()
906 ring_worker.worker_id = worker->id; in vhost_worker_ioctl()
923 struct vhost_worker *worker; in vhost_dev_set_owner() local
945 worker = vhost_worker_create(dev); in vhost_dev_set_owner()
946 if (!worker) { in vhost_dev_set_owner()
952 __vhost_vq_attach_worker(dev->vqs[i], worker); in vhost_dev_set_owner()