| /drivers/gpu/drm/imagination/ |
| A D | pvr_context.c | 204 if (IS_ERR(ctx->queues.geometry)) { in pvr_context_create_queues() 205 err = PTR_ERR(ctx->queues.geometry); in pvr_context_create_queues() 206 ctx->queues.geometry = NULL; in pvr_context_create_queues() 212 if (IS_ERR(ctx->queues.fragment)) { in pvr_context_create_queues() 214 ctx->queues.fragment = NULL; in pvr_context_create_queues() 222 if (IS_ERR(ctx->queues.compute)) { in pvr_context_create_queues() 223 err = PTR_ERR(ctx->queues.compute); in pvr_context_create_queues() 224 ctx->queues.compute = NULL; in pvr_context_create_queues() 232 if (IS_ERR(ctx->queues.transfer)) { in pvr_context_create_queues() 234 ctx->queues.transfer = NULL; in pvr_context_create_queues() [all …]
|
| A D | pvr_queue.c | 533 job->ctx->queues.fragment); in pvr_queue_prepare_job() 606 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state() 608 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state() 829 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job() 831 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job() 845 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job() 853 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job() 1321 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_create() 1323 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_create() 1350 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset() [all …]
|
| A D | pvr_context.h | 87 } queues; member 98 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL; in pvr_context_get_queue_for_job() 100 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL; in pvr_context_get_queue_for_job() 102 return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL; in pvr_context_get_queue_for_job() 104 return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL; in pvr_context_get_queue_for_job()
|
| /drivers/nvme/target/ |
| A D | loop.c | 30 struct nvme_loop_queue *queues; member 73 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 293 kfree(ctrl->queues); in nvme_loop_free_ctrl() 331 ctrl->queues[i].ctrl = ctrl; in nvme_loop_init_io_queues() 334 &ctrl->queues[i].nvme_cq); in nvme_loop_init_io_queues() 368 ctrl->queues[0].ctrl = ctrl; in nvme_loop_configure_admin_queue() 371 &ctrl->queues[0].nvme_cq); in nvme_loop_configure_admin_queue() 594 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), in nvme_loop_create_ctrl() 596 if (!ctrl->queues) in nvme_loop_create_ctrl() [all …]
|
| /drivers/net/wireless/silabs/wfx/ |
| A D | queue.c | 244 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb() 248 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb() 256 skb = skb_dequeue(&queues[i]->offchan); in wfx_tx_queues_get_skb() 264 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb() 265 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb() 278 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb() 287 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb() 288 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb() 297 skb = skb_dequeue(&queues[i]->normal); in wfx_tx_queues_get_skb() 299 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb() [all …]
|
| /drivers/scsi/aacraid/ |
| A D | comminit.c | 373 struct aac_entry * queues; in aac_comm_init() local 399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 420 queues += ADAP_HIGH_CMD_ENTRIES; in aac_comm_init() 426 queues += HOST_NORM_RESP_ENTRIES; in aac_comm_init() 636 if (dev->queues == NULL) { in aac_init_adapter() 642 kfree(dev->queues); in aac_init_adapter() 643 dev->queues = NULL; in aac_init_adapter() 650 kfree(dev->queues); in aac_init_adapter() [all …]
|
| A D | commsup.c | 363 q = &dev->queues->queue[qid]; in aac_get_entry() 572 if (!dev->queues) in aac_fib_send() 912 q = &dev->queues->queue[AdapNormRespQueue]; in aac_fib_adapter_complete() 1550 kfree(aac->queues); in _aac_reset_adapter() 1551 aac->queues = NULL; in _aac_reset_adapter() 2036 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events() 2051 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events() 2149 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events() 2336 } else if (!dev->queues) in aac_command_thread() 2346 if (ret || !dev->queues) in aac_command_thread() [all …]
|
| /drivers/media/platform/nxp/imx8-isi/ |
| A D | imx8-isi-m2m.c | 59 } queues; member 86 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata() 88 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata() 153 ctx->queues.out.info, in mxc_isi_m2m_device_run() 154 &ctx->queues.out.format); in mxc_isi_m2m_device_run() 156 ctx->queues.cap.info, in mxc_isi_m2m_device_run() 157 &ctx->queues.cap.format); in mxc_isi_m2m_device_run() 470 ctx->queues.out.format = *pix; in mxc_isi_m2m_s_fmt_vid() 471 ctx->queues.out.info = info; in mxc_isi_m2m_s_fmt_vid() 478 ctx->queues.cap.format = *pix; in mxc_isi_m2m_s_fmt_vid() [all …]
|
| /drivers/target/ |
| A D | target_core_tmr.c | 118 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task() 120 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task() 121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task() 148 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 163 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task() 301 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list() 303 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list() 304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list() 333 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
|
| /drivers/vdpa/alibaba/ |
| A D | eni_vdpa.c | 45 int queues; member 118 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_free_irq() 164 int queues = eni_vdpa->queues; in eni_vdpa_request_irq() local 165 int vectors = queues + 1; in eni_vdpa_request_irq() 177 for (i = 0; i < queues; i++) { in eni_vdpa_request_irq() 195 irq = pci_irq_vector(pdev, queues); in eni_vdpa_request_irq() 202 vp_legacy_config_vector(ldev, queues); in eni_vdpa_request_irq() 500 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); in eni_vdpa_probe() 502 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, in eni_vdpa_probe() 511 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_probe() [all …]
|
| /drivers/infiniband/hw/mana/ |
| A D | qp.c | 461 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id; in mana_table_store_ud_qp() 482 u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id; in mana_table_remove_ud_qp() 554 qp->rc_qp.queues[i].id = INVALID_QUEUE_ID; in mana_ib_create_rc_qp() 559 &qp->rc_qp.queues[i]); in mana_ib_create_rc_qp() 579 resp.queue_id[j] = qp->rc_qp.queues[i].id; in mana_ib_create_rc_qp() 599 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]); in mana_ib_create_rc_qp() 649 &qp->ud_qp.queues[i]); in mana_ib_create_ud_qp() 680 qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id; in mana_ib_create_ud_qp() 697 mana_ib_destroy_queue(mdev, &qp->ud_qp.queues[i]); in mana_ib_create_ud_qp() 863 mana_ib_destroy_queue(mdev, &qp->rc_qp.queues[i]); in mana_ib_destroy_rc_qp() [all …]
|
| /drivers/net/ethernet/mellanox/mlx5/core/steering/hws/ |
| A D | action_ste_pool.c | 308 for (i = 0; i < ctx->queues; i++) { in hws_action_ste_pool_cleanup() 329 size_t queues = ctx->queues; in mlx5hws_action_ste_pool_init() local 332 pool = kcalloc(queues, sizeof(*pool), GFP_KERNEL); in mlx5hws_action_ste_pool_init() 336 for (i = 0; i < queues; i++) { in mlx5hws_action_ste_pool_init() 362 size_t queues = ctx->queues; in mlx5hws_action_ste_pool_uninit() local 367 for (i = 0; i < queues; i++) in mlx5hws_action_ste_pool_uninit()
|
| /drivers/net/ethernet/netronome/nfp/ |
| A D | nfp_net_debugfs.c | 131 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local 145 queues = debugfs_create_dir("queue", nn->debugfs_dir); in nfp_net_debugfs_vnic_add() 147 rx = debugfs_create_dir("rx", queues); in nfp_net_debugfs_vnic_add() 148 tx = debugfs_create_dir("tx", queues); in nfp_net_debugfs_vnic_add() 149 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
|
| /drivers/gpu/drm/xe/ |
| A D | xe_pxp.c | 403 INIT_LIST_HEAD(&pxp->queues.list); in xe_pxp_init() 404 spin_lock_init(&pxp->queues.lock); in xe_pxp_init() 518 spin_lock_irq(&pxp->queues.lock); in __exec_queue_add() 519 list_add_tail(&q->pxp.link, &pxp->queues.list); in __exec_queue_add() 520 spin_unlock_irq(&pxp->queues.lock); in __exec_queue_add() 700 spin_lock_irq(&pxp->queues.lock); in __pxp_exec_queue_remove() 710 spin_unlock_irq(&pxp->queues.lock); in __pxp_exec_queue_remove() 735 spin_lock_irq(&pxp->queues.lock); in pxp_invalidate_queues() 737 list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) { in pxp_invalidate_queues() 744 spin_unlock_irq(&pxp->queues.lock); in pxp_invalidate_queues()
|
| /drivers/vdpa/virtio_pci/ |
| A D | vp_vdpa.c | 42 int queues; member 116 for (i = 0; i < vp_vdpa->queues; i++) { in vp_vdpa_free_irq() 162 int queues = vp_vdpa->queues; in vp_vdpa_request_irq() local 166 for (i = 0; i < queues; i++) { in vp_vdpa_request_irq() 181 for (i = 0; i < queues; i++) { in vp_vdpa_request_irq() 524 vp_vdpa->queues = vp_modern_get_num_queues(mdev); in vp_vdpa_dev_add() 548 vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues, in vp_vdpa_dev_add() 557 for (i = 0; i < vp_vdpa->queues; i++) { in vp_vdpa_dev_add() 571 ret = _vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues); in vp_vdpa_dev_add()
|
| /drivers/staging/media/ipu3/ |
| A D | ipu3.c | 77 &imgu_pipe->queues[i].dmap); in imgu_dummybufs_cleanup() 98 &imgu_pipe->queues[i].dmap, size)) { in imgu_dummybufs_preallocate() 138 &imgu_pipe->queues[i].dmap, in imgu_dummybufs_init() 145 imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i, in imgu_dummybufs_init() 146 imgu_pipe->queues[i].dmap.daddr); in imgu_dummybufs_init() 163 if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr)) in imgu_dummybufs_get() 168 if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) != in imgu_dummybufs_get() 175 imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue, in imgu_dummybufs_get() 176 imgu_pipe->queues[queue].dmap.daddr); in imgu_dummybufs_get() 178 return &imgu_pipe->queues[queue].dummybufs[i]; in imgu_dummybufs_get() [all …]
|
| /drivers/net/xen-netback/ |
| A D | interface.c | 230 queue = &vif->queues[index]; in xenvif_start_xmit() 287 queue = &vif->queues[index]; in xenvif_get_stats() 311 queue = &vif->queues[queue_index]; in xenvif_up() 327 queue = &vif->queues[queue_index]; in xenvif_down() 440 void *vif_stats = &vif->queues[queue_index].stats; in xenvif_get_ethtool_stats() 519 vif->queues = NULL; in xenvif_alloc() 811 queue = &vif->queues[queue_index]; in xenvif_disconnect_data() 845 struct xenvif_queue *queues = vif->queues; in xenvif_free() local 853 xenvif_deinit_queue(&queues[queue_index]); in xenvif_free() 854 vfree(queues); in xenvif_free()
|
| A D | xenbus.c | 182 &vif->queues[i], in xenvif_debugfs_addif() 271 xenvif_deinit_queue(&vif->queues[queue_index]); in backend_disconnect() 273 vfree(vif->queues); in backend_disconnect() 274 vif->queues = NULL; in backend_disconnect() 534 struct xenvif_queue *queue = &vif->queues[queue_index]; in xen_net_rate_changed() 764 be->vif->queues = vzalloc(array_size(requested_num_queues, in connect() 766 if (!be->vif->queues) { in connect() 776 queue = &be->vif->queues[queue_index]; in connect() 840 xenvif_deinit_queue(&be->vif->queues[queue_index]); in connect() 841 vfree(be->vif->queues); in connect() [all …]
|
| /drivers/net/ |
| A D | xen-netfront.c | 372 queue = &np->queues[i]; in xennet_open() 881 if (!info->queues) in xennet_destroy_queues() 892 kfree(info->queues); in xennet_destroy_queues() 893 info->queues = NULL; in xennet_destroy_queues() 1726 np->queues = NULL; in xennet_create_dev() 1876 if (info->queues) in netfront_resume() 2217 if (!info->queues) in xennet_create_queues() 2305 if (info->queues) in talk_to_netback() 2314 kfree(info->queues); in talk_to_netback() 2315 info->queues = NULL; in talk_to_netback() [all …]
|
| /drivers/net/ethernet/cadence/ |
| A D | macb_main.c | 1081 queue_index = queue - bp->queues; in macb_tx_error_task() 2644 bp->queues[0].tx_head = 0; in macb_init_rings() 2645 bp->queues[0].tx_tail = 0; in macb_init_rings() 4295 queue = &bp->queues[q]; in macb_init() 4802 bp->queues[0].bp = bp; in at91ether_init() 5489 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend() 5496 bp->queues[0].irq, err); in macb_suspend() 5508 bp->queues[0].irq, err); in macb_suspend() 5579 queue_readl(bp->queues, ISR); in macb_resume() 5583 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume() [all …]
|
| /drivers/gpu/drm/panthor/ |
| A D | panthor_sched.c | 1026 group->queues[i]->doorbell_id = -1; in group_unbind_locked() 1207 if (group->queues[i]) in csg_slot_sync_queues_state_locked() 1277 if (group->queues[i]) in csg_slot_sync_state_locked() 1309 if (group->queues[i]) { in csg_slot_prog_locked() 2628 queue_start(group->queues[i]); in panthor_group_start() 3408 queue = group->queues[i]; in add_group_kbo_sizes() 3493 group_args->queues.count * in panthor_group_create() 3514 if (IS_ERR(group->queues[i])) { in panthor_group_create() 3515 ret = PTR_ERR(group->queues[i]); in panthor_group_create() 3516 group->queues[i] = NULL; in panthor_group_create() [all …]
|
| /drivers/gpu/drm/msm/adreno/ |
| A D | a6xx_hfi.c | 129 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack() 192 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; in a6xx_hfi_send_msg() 920 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_hfi_stop() 921 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_hfi_stop() 977 table_size += (ARRAY_SIZE(gmu->queues) * in a6xx_hfi_init() 985 table->num_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init() 986 table->active_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init() 990 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, in a6xx_hfi_init() 995 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, in a6xx_hfi_init()
|
| /drivers/net/wireless/realtek/rtw88/ |
| A D | mac.h | 42 void rtw_mac_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop); 48 rtw_mac_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, drop); in rtw_mac_flush_all_queues()
|
| /drivers/nvme/host/ |
| A D | rdma.c | 105 struct nvme_rdma_queue *queues; member 161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx() 576 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue() 777 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue() 790 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue() 848 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue() 860 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue() 981 kfree(ctrl->queues); in nvme_rdma_free_ctrl() 2292 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_alloc_ctrl() 2294 if (!ctrl->queues) in nvme_rdma_alloc_ctrl() [all …]
|
| /drivers/net/ethernet/intel/idpf/ |
| A D | Kconfig | 22 This option enables support for legacy single Rx/Tx queues w/no 23 completion and fill queues. Only enable if you have hardware which
|