Home
last modified time | relevance | path

Searched refs:cmdq (Results 1 – 25 of 61) sorted by relevance

123

/drivers/infiniband/hw/erdma/
A Derdma_cmdq.c90 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_sq_init() local
125 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_cq_init() local
159 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_eq_init() local
181 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_init() local
210 cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr); in erdma_cmdq_init()
216 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_init()
230 struct erdma_cmdq *cmdq = &dev->cmdq; in erdma_cmdq_destroy() local
237 cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr); in erdma_cmdq_destroy()
267 wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth, in push_cmdq_sqe()
271 cmdq->sq.pi += cmdq->sq.wqebb_cnt; in push_cmdq_sqe()
[all …]
A Derdma.h200 struct erdma_cmdq cmdq; member
267 int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
269 void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
A Derdma_verbs.c129 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, &resp1, in create_qp_cmd()
188 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in regmr_cmd()
251 return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in create_cq_cmd()
468 erdma_post_cmd_wait(&qp->dev->cmdq, &req, sizeof(req), NULL, NULL, in erdma_flush_worker()
1272 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in erdma_dereg_mr()
1298 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in erdma_destroy_cq()
1346 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in erdma_destroy_qp()
1481 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in free_db_resources()
1845 ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0, in erdma_query_qp()
2079 err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL, in erdma_query_hw_stats()
[all …]
/drivers/mailbox/
A Dmtk-cmdq-mailbox.c69 struct cmdq *cmdq; member
97 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() argument
141 static void cmdq_init(struct cmdq *cmdq) in cmdq_init() argument
224 struct cmdq *cmdq = task->cmdq; in cmdq_task_handle_error() local
289 struct cmdq *cmdq = dev; in cmdq_irq_handler() local
312 struct cmdq *cmdq = dev_get_drvdata(dev); in cmdq_runtime_resume() local
325 struct cmdq *cmdq = dev_get_drvdata(dev); in cmdq_runtime_suspend() local
334 struct cmdq *cmdq = dev_get_drvdata(dev); in cmdq_suspend() local
397 task->cmdq = cmdq; in cmdq_mbox_send_data()
629 struct cmdq *cmdq; in cmdq_probe() local
[all …]
/drivers/gpu/drm/nouveau/nvkm/falcon/
A Dcmdq.c28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room()
29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room()
35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room()
67 cmdq->position = cmdq->offset; in nvkm_falcon_cmdq_rewind()
84 cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg); in nvkm_falcon_cmdq_open()
95 nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position); in nvkm_falcon_cmdq_close()
180 cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride; in nvkm_falcon_cmdq_init()
181 cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride; in nvkm_falcon_cmdq_init()
187 index, cmdq->offset, cmdq->size); in nvkm_falcon_cmdq_init()
194 if (cmdq) { in nvkm_falcon_cmdq_del()
[all …]
/drivers/crypto/cavium/nitrox/
A Dnitrox_lib.c29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init()
30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init()
36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init()
37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init()
64 if (!cmdq) in nitrox_cmdq_cleanup()
74 cmdq->unalign_base, cmdq->unalign_dma); in nitrox_cmdq_cleanup()
82 cmdq->dma = 0; in nitrox_cmdq_cleanup()
83 cmdq->qsize = 0; in nitrox_cmdq_cleanup()
106 cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); in nitrox_alloc_aqm_queues()
107 if (!cmdq) { in nitrox_alloc_aqm_queues()
[all …]
A Dnitrox_reqmgr.c296 idx = cmdq->write_idx; in post_se_instr()
298 ent = cmdq->base + (idx * cmdq->instr_size); in post_se_instr()
302 response_list_add(sr, cmdq); in post_se_instr()
342 post_se_instr(sr, cmdq); in post_backlog_cmds()
351 struct nitrox_cmdq *cmdq = sr->cmdq; in nitrox_enqueue_request() local
355 post_backlog_cmds(cmdq); in nitrox_enqueue_request()
364 backlog_list_add(sr, cmdq); in nitrox_enqueue_request()
367 post_se_instr(sr, cmdq); in nitrox_enqueue_request()
507 struct nitrox_cmdq *cmdq; in backlog_qflush_work() local
510 post_backlog_cmds(cmdq); in backlog_qflush_work()
[all …]
A Dnitrox_isr.c32 struct nitrox_cmdq *cmdq = qvec->cmdq; in nps_pkt_slc_isr() local
34 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); in nps_pkt_slc_isr()
337 qvec->cmdq = &ndev->pkt_inq[qvec->ring]; in nitrox_register_interrupts()
/drivers/net/ethernet/brocade/bna/
A Dbfa_msgq.c56 cmdq->flags = 0; in cmdq_sm_stopped_entry()
57 cmdq->token = 0; in cmdq_sm_stopped_entry()
58 cmdq->offset = 0; in cmdq_sm_stopped_entry()
195 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, in bfa_msgq_cmdq_dbell()
218 BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth); in __cmd_copy()
268 cmdq->token = 0; in bfa_msgq_cmdq_copy_req()
290 cmdq->token++; in bfa_msgq_cmdq_copy_rsp()
294 if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, in bfa_msgq_cmdq_copy_rsp()
305 cmdq->msgq = msgq; in bfa_msgq_cmdq_attach()
515 bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa); in bfa_msgq_init()
[all …]
/drivers/accel/ivpu/
A Divpu_job.c84 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); in ivpu_cmdq_alloc()
85 if (!cmdq) in ivpu_cmdq_alloc()
96 return cmdq; in ivpu_cmdq_alloc()
99 kfree(cmdq); in ivpu_cmdq_alloc()
167 cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq); in ivpu_cmdq_create()
220 cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); in ivpu_register_db()
224 cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); in ivpu_register_db()
314 if (cmdq->is_legacy && cmdq->priority == priority) in ivpu_cmdq_acquire_legacy()
683 cmdq->jobq->header.head = cmdq->jobq->header.tail; in ivpu_job_submit()
920 if (cmdq) in ivpu_cmdq_create_ioctl()
[all …]
A Divpu_mmu.c373 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc()
465 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local
473 cmdq->cons = cmdq->prod; in ivpu_mmu_cmdq_wait_for_cons()
492 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() local
493 u64 *queue_buffer = cmdq->base; in ivpu_mmu_cmdq_cmd_write()
496 if (ivpu_mmu_queue_is_full(cmdq)) { in ivpu_mmu_cmdq_cmd_write()
503 cmdq->prod = (cmdq->prod + 1) & IVPU_MMU_Q_WRAP_MASK; in ivpu_mmu_cmdq_cmd_write()
512 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync()
570 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset()
573 mmu->cmdq.prod = 0; in ivpu_mmu_reset()
[all …]
/drivers/net/ethernet/huawei/hinic/
A Dhinic_hw_cmdq.c78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument
364 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
443 cmdq->wrapped = !cmdq->wrapped; in cmdq_set_arm_bit()
510 struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; in hinic_set_arm_bit() local
643 struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; in cmdq_ceq_handler() local
743 cmdq->wq = wq; in init_cmdq()
745 cmdq->wrapped = 1; in init_cmdq()
749 cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); in init_cmdq()
750 if (!cmdq->done) in init_cmdq()
753 cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode), in init_cmdq()
[all …]
A Dhinic_hw_io.c533 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local
565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init()
573 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init()
600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init()
619 enum hinic_cmdq_type cmdq; in hinic_io_free() local
628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free()
629 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); in hinic_io_free()
/drivers/infiniband/hw/bnxt_re/
A Dqplib_rcfw.c118 cmdq = &rcfw->cmdq; in bnxt_re_is_fw_stalled()
151 cmdq = &rcfw->cmdq; in __wait_for_resp()
194 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __block_for_resp() local
238 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __send_message_no_waiter() local
296 cmdq = &rcfw->cmdq; in __send_message()
389 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __poll_for_resp() local
423 cmdq = &rcfw->cmdq; in __send_message_basic_sanity()
934 cmdq = &rcfw->cmdq; in bnxt_qplib_alloc_rcfw_channel()
1010 cmdq = &rcfw->cmdq; in bnxt_qplib_disable_rcfw_channel()
1137 cmdq = &rcfw->cmdq; in bnxt_qplib_start_rcfw()
[all …]
/drivers/net/ethernet/hisilicon/hns3/hns3_common/
A Dhclge_comm_cmd.c550 spin_lock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit()
551 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_uninit()
553 spin_unlock(&cmdq->crq.lock); in hclge_comm_cmd_uninit()
570 cmdq->csq.pdev = pdev; in hclge_comm_cmd_queue_init()
571 cmdq->crq.pdev = pdev; in hclge_comm_cmd_queue_init()
620 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_init()
622 cmdq->csq.next_to_clean = 0; in hclge_comm_cmd_init()
623 cmdq->csq.next_to_use = 0; in hclge_comm_cmd_init()
624 cmdq->crq.next_to_clean = 0; in hclge_comm_cmd_init()
625 cmdq->crq.next_to_use = 0; in hclge_comm_cmd_init()
[all …]
/drivers/iommu/arm/arm-smmu-v3/
A Darm-smmu-v3.c374 struct arm_smmu_cmdq *cmdq = NULL; in arm_smmu_get_cmdq() local
379 return cmdq ?: &smmu->cmdq; in arm_smmu_get_cmdq()
385 if (cmdq == &smmu->cmdq) in arm_smmu_cmdq_needs_busy_polling()
513 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
596 ptr = &cmdq->valid_map[swidx]; in __arm_smmu_cmdq_poll_set_valid_map()
650 WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); in arm_smmu_cmdq_poll_until_not_full()
673 struct arm_smmu_cmdq *cmdq, in __arm_smmu_cmdq_poll_until_msi() argument
747 struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_poll_until_sync() argument
846 arm_smmu_cmdq_shared_lock(cmdq); in arm_smmu_cmdq_issue_cmdlist()
3762 atomic_set(&cmdq->lock, 0); in arm_smmu_cmdq_init()
[all …]
A Dtegra241-cmdqv.c146 struct arm_smmu_cmdq cmdq; member
412 if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) in tegra241_cmdqv_get_cmdq()
414 return &vcmdq->cmdq; in tegra241_cmdqv_get_cmdq()
618 struct arm_smmu_cmdq *cmdq = &vcmdq->cmdq; in tegra241_vcmdq_alloc_smmu_cmdq() local
619 struct arm_smmu_queue *q = &cmdq->q; in tegra241_vcmdq_alloc_smmu_cmdq()
643 cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd; in tegra241_vcmdq_alloc_smmu_cmdq()
645 return arm_smmu_cmdq_init(smmu, cmdq); in tegra241_vcmdq_alloc_smmu_cmdq()
1084 vcmdq->cmdq.q.q_base & VCMDQ_ADDR, in tegra241_vcmdq_hw_init_user()
1085 1UL << (vcmdq->cmdq.q.q_base & VCMDQ_LOG2SIZE)); in tegra241_vcmdq_hw_init_user()
1180 vcmdq->cmdq.q.q_base = base_addr_pa & VCMDQ_ADDR; in tegra241_vintf_alloc_lvcmdq_user()
[all …]
A Darm-smmu-v3.h640 static inline bool arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq *cmdq, in arm_smmu_cmdq_supports_cmd() argument
643 return cmdq->supports_cmd ? cmdq->supports_cmd(ent) : true; in arm_smmu_cmdq_supports_cmd()
648 struct arm_smmu_cmdq *cmdq; member
780 struct arm_smmu_cmdq cmdq; member
978 struct arm_smmu_cmdq *cmdq);
984 struct arm_smmu_cmdq *cmdq);
1011 struct arm_smmu_cmdq *cmdq, u64 *cmds, int n,
/drivers/gpu/drm/nouveau/nvkm/engine/sec2/
A Dbase.c45 struct nvkm_falcon_cmdq *cmdq = sec2->cmdq; in nvkm_sec2_fini() local
56 ret = nvkm_falcon_cmdq_send(cmdq, &cmd, nvkm_sec2_finimsg, sec2, in nvkm_sec2_fini()
68 nvkm_falcon_cmdq_fini(cmdq); in nvkm_sec2_fini()
119 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor()
159 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
A Dga102.c50 nvkm_falcon_cmdq_init(sec2->cmdq, msg.queue_info[i].index, in ga102_sec2_initmsg()
104 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in ga102_sec2_acr_bootstrap_falcon()
137 .cmdq = { 0xc00, 0xc04, 8 },
A Dgp102.c71 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in gp102_sec2_acr_bootstrap_falcon()
142 nvkm_falcon_cmdq_init(sec2->cmdq, in gp102_sec2_initmsg()
214 .cmdq = { 0xa00, 0xa04, 8 },
/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/
A Drpc.c369 msg->sequence = gsp->cmdq.seq++; in r535_gsp_cmdq_push()
377 wptr = *gsp->cmdq.wptr; in r535_gsp_cmdq_push()
380 free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1; in r535_gsp_cmdq_push()
381 if (free >= gsp->cmdq.cnt) in r535_gsp_cmdq_push()
382 free -= gsp->cmdq.cnt; in r535_gsp_cmdq_push()
401 if (wptr == gsp->cmdq.cnt) in r535_gsp_cmdq_push()
410 (*gsp->cmdq.wptr) = wptr; in r535_gsp_cmdq_push()
541 mutex_lock(&gsp->cmdq.mutex); in r535_gsp_rpc_poll()
543 mutex_unlock(&gsp->cmdq.mutex); in r535_gsp_rpc_poll()
636 mutex_lock(&gsp->cmdq.mutex); in r535_gsp_rpc_push()
[all …]
A Dgsp.c1136 } *cmdq, *msgq; in r535_gsp_shared_init() local
1155 gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size; in r535_gsp_shared_init()
1160 cmdq = gsp->shm.cmdq.ptr; in r535_gsp_shared_init()
1161 cmdq->tx.version = 0; in r535_gsp_shared_init()
1162 cmdq->tx.size = gsp->shm.cmdq.size; in r535_gsp_shared_init()
1165 cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize; in r535_gsp_shared_init()
1166 cmdq->tx.writePtr = 0; in r535_gsp_shared_init()
1167 cmdq->tx.flags = 1; in r535_gsp_shared_init()
1168 cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr); in r535_gsp_shared_init()
1172 gsp->cmdq.cnt = cmdq->tx.msgCount; in r535_gsp_shared_init()
[all …]
/drivers/atm/
A Dfore200e.c554 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_pca_prom_read()
559 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_pca_prom_read()
1223 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_activate_vcin()
1667 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_getstats()
1713 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1753 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_set_oc3()
2237 &cmdq->status, in fore200e_init_cmd_queue()
2250 cmdq->host_entry[ i ].status = in fore200e_init_cmd_queue()
2252 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; in fore200e_init_cmd_queue()
2254 *cmdq->host_entry[ i ].status = STATUS_FREE; in fore200e_init_cmd_queue()
[all …]
/drivers/infiniband/hw/hns/
A Dhns_roce_trace.h175 DECLARE_EVENT_CLASS(cmdq,
201 DEFINE_EVENT(cmdq, hns_cmdq_req,
205 DEFINE_EVENT(cmdq, hns_cmdq_resp,

Completed in 94 milliseconds

123