Lines Matching refs:pvr_dev

48 pvr_ccb_init(struct pvr_device *pvr_dev, struct pvr_ccb *pvr_ccb,  in pvr_ccb_init()  argument
58 err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_ccb->lock); in pvr_ccb_init()
66 pvr_ccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_ccb->ctrl), in pvr_ccb_init()
72 pvr_ccb->ccb = pvr_fw_object_create_and_map(pvr_dev, ccb_size, in pvr_ccb_init()
136 process_fwccb_command(struct pvr_device *pvr_dev, struct rogue_fwif_fwccb_cmd *cmd) in process_fwccb_command() argument
140 pvr_power_reset(pvr_dev, false); in process_fwccb_command()
144 pvr_free_list_process_reconstruct_req(pvr_dev, in process_fwccb_command()
149 pvr_free_list_process_grow_req(pvr_dev, &cmd->cmd_data.cmd_free_list_gs); in process_fwccb_command()
153 drm_info(from_pvr_device(pvr_dev), "Received unknown FWCCB command %x\n", in process_fwccb_command()
163 void pvr_fwccb_process(struct pvr_device *pvr_dev) in pvr_fwccb_process() argument
165 struct rogue_fwif_fwccb_cmd *fwccb = pvr_dev->fwccb.ccb; in pvr_fwccb_process()
166 struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->fwccb.ctrl; in pvr_fwccb_process()
169 mutex_lock(&pvr_dev->fwccb.lock); in pvr_fwccb_process()
177 mutex_unlock(&pvr_dev->fwccb.lock); in pvr_fwccb_process()
179 process_fwccb_command(pvr_dev, &cmd); in pvr_fwccb_process()
181 mutex_lock(&pvr_dev->fwccb.lock); in pvr_fwccb_process()
184 mutex_unlock(&pvr_dev->fwccb.lock); in pvr_fwccb_process()
194 static u32 pvr_kccb_capacity(struct pvr_device *pvr_dev) in pvr_kccb_capacity() argument
201 return pvr_dev->kccb.slot_count - 1; in pvr_kccb_capacity()
214 pvr_kccb_used_slot_count_locked(struct pvr_device *pvr_dev) in pvr_kccb_used_slot_count_locked() argument
216 struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb; in pvr_kccb_used_slot_count_locked()
227 used_count = wr_offset + pvr_dev->kccb.slot_count - rd_offset; in pvr_kccb_used_slot_count_locked()
240 pvr_kccb_send_cmd_reserved_powered(struct pvr_device *pvr_dev, in pvr_kccb_send_cmd_reserved_powered() argument
244 struct pvr_ccb *pvr_ccb = &pvr_dev->kccb.ccb; in pvr_kccb_send_cmd_reserved_powered()
250 WARN_ON(pvr_dev->lost); in pvr_kccb_send_cmd_reserved_powered()
254 if (WARN_ON(!pvr_dev->kccb.reserved_count)) in pvr_kccb_send_cmd_reserved_powered()
268 WRITE_ONCE(pvr_dev->kccb.rtn[old_write_offset], in pvr_kccb_send_cmd_reserved_powered()
273 pvr_dev->kccb.reserved_count--; in pvr_kccb_send_cmd_reserved_powered()
276 pvr_fw_mts_schedule(pvr_dev, in pvr_kccb_send_cmd_reserved_powered()
291 static bool pvr_kccb_try_reserve_slot(struct pvr_device *pvr_dev) in pvr_kccb_try_reserve_slot() argument
296 mutex_lock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_try_reserve_slot()
298 used_count = pvr_kccb_used_slot_count_locked(pvr_dev); in pvr_kccb_try_reserve_slot()
299 if (pvr_dev->kccb.reserved_count < pvr_kccb_capacity(pvr_dev) - used_count) { in pvr_kccb_try_reserve_slot()
300 pvr_dev->kccb.reserved_count++; in pvr_kccb_try_reserve_slot()
304 mutex_unlock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_try_reserve_slot()
318 static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev) in pvr_kccb_reserve_slot_sync() argument
326 reserved = pvr_kccb_try_reserve_slot(pvr_dev); in pvr_kccb_reserve_slot_sync()
350 pvr_kccb_send_cmd_powered(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd, in pvr_kccb_send_cmd_powered() argument
355 err = pvr_kccb_reserve_slot_sync(pvr_dev); in pvr_kccb_send_cmd_powered()
359 pvr_kccb_send_cmd_reserved_powered(pvr_dev, cmd, kccb_slot); in pvr_kccb_send_cmd_powered()
374 pvr_kccb_send_cmd(struct pvr_device *pvr_dev, struct rogue_fwif_kccb_cmd *cmd, in pvr_kccb_send_cmd() argument
379 err = pvr_power_get(pvr_dev); in pvr_kccb_send_cmd()
383 err = pvr_kccb_send_cmd_powered(pvr_dev, cmd, kccb_slot); in pvr_kccb_send_cmd()
385 pvr_power_put(pvr_dev); in pvr_kccb_send_cmd()
402 pvr_kccb_wait_for_completion(struct pvr_device *pvr_dev, u32 slot_nr, in pvr_kccb_wait_for_completion() argument
405 int ret = wait_event_timeout(pvr_dev->kccb.rtn_q, READ_ONCE(pvr_dev->kccb.rtn[slot_nr]) & in pvr_kccb_wait_for_completion()
409 *rtn_out = READ_ONCE(pvr_dev->kccb.rtn[slot_nr]); in pvr_kccb_wait_for_completion()
423 pvr_kccb_is_idle(struct pvr_device *pvr_dev) in pvr_kccb_is_idle() argument
425 struct rogue_fwif_ccb_ctl *ctrl = pvr_dev->kccb.ccb.ctrl; in pvr_kccb_is_idle()
428 mutex_lock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_is_idle()
432 mutex_unlock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_is_idle()
471 void pvr_kccb_wake_up_waiters(struct pvr_device *pvr_dev) in pvr_kccb_wake_up_waiters() argument
477 wake_up_all(&pvr_dev->kccb.rtn_q); in pvr_kccb_wake_up_waiters()
480 mutex_lock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_wake_up_waiters()
481 used_count = pvr_kccb_used_slot_count_locked(pvr_dev); in pvr_kccb_wake_up_waiters()
483 if (WARN_ON(used_count + pvr_dev->kccb.reserved_count > pvr_kccb_capacity(pvr_dev))) in pvr_kccb_wake_up_waiters()
486 available_count = pvr_kccb_capacity(pvr_dev) - used_count - pvr_dev->kccb.reserved_count; in pvr_kccb_wake_up_waiters()
487 list_for_each_entry_safe(fence, tmp_fence, &pvr_dev->kccb.waiters, node) { in pvr_kccb_wake_up_waiters()
492 pvr_dev->kccb.reserved_count++; in pvr_kccb_wake_up_waiters()
499 mutex_unlock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_wake_up_waiters()
506 void pvr_kccb_fini(struct pvr_device *pvr_dev) in pvr_kccb_fini() argument
508 pvr_ccb_fini(&pvr_dev->kccb.ccb); in pvr_kccb_fini()
509 WARN_ON(!list_empty(&pvr_dev->kccb.waiters)); in pvr_kccb_fini()
510 WARN_ON(pvr_dev->kccb.reserved_count); in pvr_kccb_fini()
522 pvr_kccb_init(struct pvr_device *pvr_dev) in pvr_kccb_init() argument
524 pvr_dev->kccb.slot_count = 1 << ROGUE_FWIF_KCCB_NUMCMDS_LOG2_DEFAULT; in pvr_kccb_init()
525 INIT_LIST_HEAD(&pvr_dev->kccb.waiters); in pvr_kccb_init()
526 pvr_dev->kccb.fence_ctx.id = dma_fence_context_alloc(1); in pvr_kccb_init()
527 spin_lock_init(&pvr_dev->kccb.fence_ctx.lock); in pvr_kccb_init()
529 return pvr_ccb_init(pvr_dev, &pvr_dev->kccb.ccb, in pvr_kccb_init()
588 pvr_kccb_reserve_slot(struct pvr_device *pvr_dev, struct dma_fence *f) in pvr_kccb_reserve_slot() argument
594 mutex_lock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_reserve_slot()
596 used_count = pvr_kccb_used_slot_count_locked(pvr_dev); in pvr_kccb_reserve_slot()
597 if (pvr_dev->kccb.reserved_count >= pvr_kccb_capacity(pvr_dev) - used_count) { in pvr_kccb_reserve_slot()
599 &pvr_dev->kccb.fence_ctx.lock, in pvr_kccb_reserve_slot()
600 pvr_dev->kccb.fence_ctx.id, in pvr_kccb_reserve_slot()
601 atomic_inc_return(&pvr_dev->kccb.fence_ctx.seqno)); in pvr_kccb_reserve_slot()
603 list_add_tail(&fence->node, &pvr_dev->kccb.waiters); in pvr_kccb_reserve_slot()
606 pvr_dev->kccb.reserved_count++; in pvr_kccb_reserve_slot()
609 mutex_unlock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_reserve_slot()
623 void pvr_kccb_release_slot(struct pvr_device *pvr_dev) in pvr_kccb_release_slot() argument
625 mutex_lock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_release_slot()
626 if (!WARN_ON(!pvr_dev->kccb.reserved_count)) in pvr_kccb_release_slot()
627 pvr_dev->kccb.reserved_count--; in pvr_kccb_release_slot()
628 mutex_unlock(&pvr_dev->kccb.ccb.lock); in pvr_kccb_release_slot()
640 pvr_fwccb_init(struct pvr_device *pvr_dev) in pvr_fwccb_init() argument
642 return pvr_ccb_init(pvr_dev, &pvr_dev->fwccb, in pvr_fwccb_init()