Lines Matching refs:pvr_dev

24 static int get_xfer_ctx_state_size(struct pvr_device *pvr_dev)  in get_xfer_ctx_state_size()  argument
28 if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) { in get_xfer_ctx_state_size()
33 err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers); in get_xfer_ctx_state_size()
43 static int get_frag_ctx_state_size(struct pvr_device *pvr_dev) in get_frag_ctx_state_size() argument
48 if (PVR_HAS_FEATURE(pvr_dev, xe_memory_hierarchy)) { in get_frag_ctx_state_size()
49 err = PVR_FEATURE_VALUE(pvr_dev, num_raster_pipes, &num_isp_store_registers); in get_frag_ctx_state_size()
53 if (PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support)) { in get_frag_ctx_state_size()
56 err = PVR_FEATURE_VALUE(pvr_dev, xpu_max_slaves, &xpu_max_slaves); in get_frag_ctx_state_size()
63 err = PVR_FEATURE_VALUE(pvr_dev, num_isp_ipp_pipes, &num_isp_store_registers); in get_frag_ctx_state_size()
73 static int get_ctx_state_size(struct pvr_device *pvr_dev, enum drm_pvr_job_type type) in get_ctx_state_size() argument
79 return get_frag_ctx_state_size(pvr_dev); in get_ctx_state_size()
83 return get_xfer_ctx_state_size(pvr_dev); in get_ctx_state_size()
443 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence() local
453 kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); in pvr_queue_get_job_kccb_fence()
562 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked() local
564 lockdep_assert_held(&pvr_dev->queues.lock); in pvr_queue_update_active_state_locked()
574 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
576 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
594 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state() local
596 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
598 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
689 struct pvr_device *pvr_dev = job->pvr_dev; in pvr_queue_run_job() local
735 pvr_cccb_send_kccb_combined_kick(pvr_dev, in pvr_queue_run_job()
748 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
803 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job() local
819 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
821 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
835 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
837 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
840 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
843 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
987 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
1054 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1212 struct pvr_device *pvr_dev = ctx->pvr_dev; in pvr_queue_create() local
1239 ctx_state_size = get_ctx_state_size(pvr_dev, type); in pvr_queue_create()
1257 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1261 err = pvr_fw_object_create(pvr_dev, ctx_state_size, in pvr_queue_create()
1275 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1287 pvr_dev->sched_wq, 1, 64 * 1024, 1, in pvr_queue_create()
1289 pvr_dev->sched_wq, NULL, "pvr-queue", in pvr_queue_create()
1290 pvr_dev->base.dev); in pvr_queue_create()
1300 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_create()
1301 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1302 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_create()
1325 void pvr_queue_device_pre_reset(struct pvr_device *pvr_dev) in pvr_queue_device_pre_reset() argument
1329 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1330 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1332 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1334 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1337 void pvr_queue_device_post_reset(struct pvr_device *pvr_dev) in pvr_queue_device_post_reset() argument
1341 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1342 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1344 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1346 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1377 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1379 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1406 int pvr_queue_device_init(struct pvr_device *pvr_dev) in pvr_queue_device_init() argument
1410 INIT_LIST_HEAD(&pvr_dev->queues.active); in pvr_queue_device_init()
1411 INIT_LIST_HEAD(&pvr_dev->queues.idle); in pvr_queue_device_init()
1412 err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock); in pvr_queue_device_init()
1416 pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0); in pvr_queue_device_init()
1417 if (!pvr_dev->sched_wq) in pvr_queue_device_init()
1429 void pvr_queue_device_fini(struct pvr_device *pvr_dev) in pvr_queue_device_fini() argument
1431 destroy_workqueue(pvr_dev->sched_wq); in pvr_queue_device_fini()