/linux-6.3-rc2/drivers/crypto/cavium/cpt/ |
A D | cptvf_main.c | 42 if (cptvf->nr_queues) { in init_worker_threads() 44 cptvf->nr_queues); in init_worker_threads() 71 cptvf->nr_queues); in cleanup_worker_threads() 104 u32 nr_queues) in alloc_pending_queues() argument 110 pqinfo->nr_queues = nr_queues; in alloc_pending_queues() 141 if (!nr_queues) in init_pending_queues() 147 nr_queues); in init_pending_queues() 320 nr_queues = min_t(u32, nr_queues, max_dev_queues); in cptvf_sw_init() 321 cptvf->nr_queues = nr_queues; in cptvf_sw_init() 326 nr_queues); in cptvf_sw_init() [all …]
|
A D | cptvf.h | 85 u32 nr_queues; /* Number of queues supported */ member 91 for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \ 110 u32 nr_queues; member
|
A D | cptvf_reqmanager.c | 233 if (unlikely(qno >= cptvf->nr_queues)) { in send_cpt_command() 235 qno, cptvf->nr_queues); in send_cpt_command() 545 if (unlikely(qno > cptvf->nr_queues)) { in vq_post_process()
|
/linux-6.3-rc2/drivers/crypto/cavium/nitrox/ |
A D | nitrox_sriov.c | 58 int nr_queues = 0; in vf_mode_to_nr_queues() local 62 nr_queues = MAX_PF_QUEUES; in vf_mode_to_nr_queues() 65 nr_queues = 8; in vf_mode_to_nr_queues() 68 nr_queues = 4; in vf_mode_to_nr_queues() 71 nr_queues = 2; in vf_mode_to_nr_queues() 74 nr_queues = 1; in vf_mode_to_nr_queues() 78 return nr_queues; in vf_mode_to_nr_queues()
|
A D | nitrox_lib.c | 91 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_aqm_queues() 102 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_aqm_queues() 142 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_free_pktin_queues() 155 ndev->pkt_inq = kcalloc_node(ndev->nr_queues, in nitrox_alloc_pktin_queues() 161 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_alloc_pktin_queues()
|
A D | nitrox_dev.h | 160 int nr_queues; member 250 u16 nr_queues; member
|
A D | nitrox_mbx.c | 68 vfdev->nr_queues = vfdev->msg.data; in pf2vf_send_response() 76 vfdev->nr_queues = 0; in pf2vf_send_response()
|
A D | nitrox_hal.c | 124 for (i = 0; i < ndev->nr_queues; i++) { in nitrox_config_pkt_input_rings() 240 for (i = 0; i < ndev->nr_queues; i++) in nitrox_config_pkt_solicit_ports() 356 for (ring = 0; ring < ndev->nr_queues; ring++) { in nitrox_config_aqm_rings()
|
/linux-6.3-rc2/drivers/ufs/core/ |
A D | ufs-mcq.c | 167 hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; in ufshcd_mcq_config_nr_queues() 168 rem -= hba->nr_queues[HCTX_TYPE_DEFAULT]; in ufshcd_mcq_config_nr_queues() 174 hba->nr_queues[HCTX_TYPE_POLL] = poll_queues; in ufshcd_mcq_config_nr_queues() 175 rem -= hba->nr_queues[HCTX_TYPE_POLL]; in ufshcd_mcq_config_nr_queues() 179 hba->nr_queues[HCTX_TYPE_READ] = read_queues; in ufshcd_mcq_config_nr_queues() 180 rem -= hba->nr_queues[HCTX_TYPE_READ]; in ufshcd_mcq_config_nr_queues() 183 if (!hba->nr_queues[HCTX_TYPE_DEFAULT]) in ufshcd_mcq_config_nr_queues() 184 hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues, in ufshcd_mcq_config_nr_queues() 188 host->nr_hw_queues += hba->nr_queues[i]; in ufshcd_mcq_config_nr_queues() 358 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) in ufshcd_mcq_make_queues_operational()
|
/linux-6.3-rc2/block/ |
A D | blk-mq-pci.c | 32 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_pci_map_queues() 44 WARN_ON_ONCE(qmap->nr_queues > 1); in blk_mq_pci_map_queues()
|
A D | blk-mq-cpumap.c | 24 masks = group_cpus_evenly(qmap->nr_queues); in blk_mq_map_queues() 31 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_map_queues()
|
A D | blk-mq-rdma.c | 30 for (queue = 0; queue < map->nr_queues; queue++) { in blk_mq_rdma_map_queues()
|
A D | blk-mq-virtio.c | 33 for (queue = 0; queue < qmap->nr_queues; queue++) { in blk_mq_virtio_map_queues()
|
/linux-6.3-rc2/tools/perf/util/ |
A D | auxtrace.c | 206 if (nr_queues > max_nr_queues) in auxtrace_alloc_queue_array() 213 for (i = 0; i < nr_queues; i++) { in auxtrace_alloc_queue_array() 233 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow() local 237 if (!nr_queues) in auxtrace_queues__grow() 240 while (nr_queues && nr_queues < new_nr_queues) in auxtrace_queues__grow() 241 nr_queues <<= 1; in auxtrace_queues__grow() 243 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow() 259 queues->nr_queues = nr_queues; in auxtrace_queues__grow() 294 if (idx >= queues->nr_queues) { in auxtrace_queues__queue_buffer() 467 queues->nr_queues = 0; in auxtrace_queues__free() [all …]
|
A D | s390-cpumsf.c | 203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr() 825 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues() 1019 for (i = 0; i < queues->nr_queues; i++) { in s390_cpumsf_free_queues()
|
A D | intel-bts.c | 211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues() 542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit() 713 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_free_events()
|
/linux-6.3-rc2/drivers/block/rnbd/ |
A D | rnbd-clt.c | 1171 set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues() 1173 set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus(); in rnbd_rdma_map_queues() 1180 set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues; in rnbd_rdma_map_queues() 1182 set->map[HCTX_TYPE_READ].nr_queues; in rnbd_rdma_map_queues() 1186 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues() 1187 set->map[HCTX_TYPE_READ].nr_queues, in rnbd_rdma_map_queues() 1188 set->map[HCTX_TYPE_POLL].nr_queues); in rnbd_rdma_map_queues() 1192 set->map[HCTX_TYPE_DEFAULT].nr_queues, in rnbd_rdma_map_queues() 1193 set->map[HCTX_TYPE_READ].nr_queues); in rnbd_rdma_map_queues()
|
/linux-6.3-rc2/drivers/block/null_blk/ |
A D | main.c | 1517 if (nullb->nr_queues != 1) in nullb_to_queue() 1518 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); in nullb_to_queue() 1589 map->nr_queues = submit_queues; in null_map_queues() 1592 map->nr_queues = 0; in null_map_queues() 1595 map->nr_queues = poll_queues; in null_map_queues() 1599 qoff += map->nr_queues; in null_map_queues() 1718 for (i = 0; i < nullb->nr_queues; i++) in cleanup_queues() 1729 nullb->nr_queues--; in null_exit_hctx() 1755 nullb->nr_queues++; in null_init_hctx() 1886 nullb->nr_queues++; in init_driver_queues()
|
A D | null_blk.h | 135 unsigned int nr_queues; member
|
/linux-6.3-rc2/drivers/s390/cio/ |
A D | qdio_setup.c | 100 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) in __qdio_allocate_qs() argument 105 for (i = 0; i < nr_queues; i++) { in __qdio_allocate_qs()
|
/linux-6.3-rc2/drivers/nvme/host/ |
A D | rdma.c | 903 int ret, nr_queues; in nvme_rdma_configure_io_queues() local 920 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); in nvme_rdma_configure_io_queues() 921 ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues); in nvme_rdma_configure_io_queues() 945 ret = nvme_rdma_start_io_queues(ctrl, nr_queues, in nvme_rdma_configure_io_queues() 2150 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues() 2153 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues() 2159 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues() 2162 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues() 2173 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_rdma_map_queues()
|
A D | tcp.c | 1865 int ret, nr_queues; in nvme_tcp_configure_io_queues() local 1885 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues() 1886 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); in nvme_tcp_configure_io_queues() 1910 ret = nvme_tcp_start_io_queues(ctrl, nr_queues, in nvme_tcp_configure_io_queues() 2435 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues() 2438 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues() 2444 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues() 2447 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues() 2456 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
|
A D | pci.c | 450 map->nr_queues = dev->io_queues[i]; in nvme_pci_map_queues() 451 if (!map->nr_queues) { in nvme_pci_map_queues() 465 qoff += map->nr_queues; in nvme_pci_map_queues() 466 offset += map->nr_queues; in nvme_pci_map_queues() 2418 int nr_queues = dev->online_queues - 1, sent = 0; in __nvme_delete_io_queues() local 2423 while (nr_queues > 0) { in __nvme_delete_io_queues() 2424 if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) in __nvme_delete_io_queues() 2426 nr_queues--; in __nvme_delete_io_queues() 2430 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; in __nvme_delete_io_queues() 2438 if (nr_queues) in __nvme_delete_io_queues()
|
/linux-6.3-rc2/drivers/char/ |
A D | virtio_console.c | 1841 u32 i, j, nr_ports, nr_queues; in init_vqs() local 1845 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; in init_vqs() 1847 vqs = kmalloc_array(nr_queues, sizeof(struct virtqueue *), GFP_KERNEL); in init_vqs() 1848 io_callbacks = kmalloc_array(nr_queues, sizeof(vq_callback_t *), in init_vqs() 1850 io_names = kmalloc_array(nr_queues, sizeof(char *), GFP_KERNEL); in init_vqs() 1888 err = virtio_find_vqs(portdev->vdev, nr_queues, vqs, in init_vqs()
|
/linux-6.3-rc2/drivers/block/ |
A D | ublk_drv.c | 1403 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_deinit_queues() local 1409 for (i = 0; i < nr_queues; i++) in ublk_deinit_queues() 1416 int nr_queues = ub->dev_info.nr_hw_queues; in ublk_init_queues() local 1422 ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL); in ublk_init_queues() 1426 for (i = 0; i < nr_queues; i++) { in ublk_init_queues()
|