/linux-6.3-rc2/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | wq.h | 107 return wq->cur_sz == wq->sz; in mlx5_wq_cyc_is_full() 112 return wq->sz - wq->cur_sz; in mlx5_wq_cyc_missing() 139 *wq->db = cpu_to_be32(wq->wqe_ctr); in mlx5_wq_cyc_update_db_record() 149 return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); in mlx5_wq_cyc_get_head() 154 return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); in mlx5_wq_cyc_get_tail() 197 return mlx5_cqwq_ctr2ix(wq, wq->cc); in mlx5_cqwq_get_ci() 217 return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc); in mlx5_cqwq_get_wrap_cnt() 227 *wq->db = cpu_to_be32(wq->cc & 0xffffff); in mlx5_cqwq_update_db_record() 270 return wq->cur_sz == wq->fbc.sz_m1; in mlx5_wq_ll_is_full() 280 return wq->fbc.sz_m1 - wq->cur_sz; in mlx5_wq_ll_missing() [all …]
|
A D | wq.c | 52 wq->db = wq_ctrl->db.db; in mlx5_wq_cyc_create() 62 wq->sz = mlx5_wq_cyc_get_size(wq); in mlx5_wq_cyc_create() 88 mlx5_wq_cyc_get_size(wq), wq->cur_sz, ix, len); in mlx5_wq_cyc_wqe_dump() 94 wq->wqe_ctr = 0; in mlx5_wq_cyc_reset() 95 wq->cur_sz = 0; in mlx5_wq_cyc_reset() 138 &wq->sq.fbc); in mlx5_wq_qp_create() 174 wq->db = wq_ctrl->db.db; in mlx5_cqwq_create() 225 wq->db = wq_ctrl->db.db; in mlx5_wq_ll_create() 249 wq->head = 0; in mlx5_wq_ll_reset() 250 wq->wqe_ctr = 0; in mlx5_wq_ll_reset() [all …]
|
/linux-6.3-rc2/drivers/scsi/fnic/ |
A D | vnic_wq.c | 21 if (!wq->ctrl) in vnic_wq_get_ctrl() 67 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() 77 vdev = wq->vdev; in vnic_wq_free() 86 wq->ctrl = NULL; in vnic_wq_free() 96 wq->vdev = vdev; in vnic_wq_alloc() 99 if (!wq->ctrl) { in vnic_wq_alloc() 125 wq->index = 0; in vnic_wq_devcmd2_alloc() 126 wq->vdev = vdev; in vnic_wq_devcmd2_alloc() 159 wq->to_use = wq->to_clean = in vnic_wq_init_start() 173 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); in vnic_wq_init() [all …]
|
A D | vnic_wq_copy.h | 31 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 43 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 44 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post() 45 wq->ring.desc_avail--; in vnic_wq_copy_post() 54 iowrite32(wq->to_use_index, &wq->ctrl->posted_index); in vnic_wq_copy_post() 64 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; in vnic_wq_copy_desc_process() 66 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); in vnic_wq_copy_desc_process() 82 (*q_service)(wq, &wq_desc[wq->to_clean_index]); in vnic_wq_copy_service() 91 ((wq->to_clean_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_service() 92 (wq->to_clean_index = 0) : (wq->to_clean_index++); in vnic_wq_copy_service() [all …]
|
A D | vnic_wq_copy.c | 15 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable() 33 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable() 48 wq->to_use_index = wq->to_clean_index = 0; in vnic_wq_copy_clean() 61 vdev = wq->vdev; in vnic_wq_copy_free() 63 wq->ctrl = NULL; in vnic_wq_copy_free() 70 wq->index = index; in vnic_wq_copy_alloc() 71 wq->vdev = vdev; in vnic_wq_copy_alloc() 72 wq->to_use_index = wq->to_clean_index = 0; in vnic_wq_copy_alloc() 74 if (!wq->ctrl) { in vnic_wq_copy_alloc() 79 vnic_wq_copy_disable(wq); in vnic_wq_copy_alloc() [all …]
|
A D | vnic_wq.h | 89 return wq->ring.desc_avail; in vnic_wq_desc_avail() 95 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 100 return wq->to_use->desc; in vnic_wq_next_desc() 107 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 124 wq->to_use = buf; in vnic_wq_post() 126 wq->ring.desc_avail--; in vnic_wq_post() 137 buf = wq->to_clean; in vnic_wq_service() 142 wq->ring.desc_avail++; in vnic_wq_service() 144 wq->to_clean = buf->next; in vnic_wq_service() 149 buf = wq->to_clean; in vnic_wq_service() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/cisco/enic/ |
A D | vnic_wq.c | 26 if (!wq->bufs[i]) in vnic_wq_alloc_bufs() 51 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() 61 vdev = wq->vdev; in vnic_wq_free() 72 wq->ctrl = NULL; in vnic_wq_free() 81 wq->vdev = vdev; in vnic_wq_alloc() 84 if (!wq->ctrl) { in vnic_wq_alloc() 109 wq->index = 0; in enic_wq_devcmd2_alloc() 110 wq->vdev = vdev; in enic_wq_devcmd2_alloc() 113 if (!wq->ctrl) in enic_wq_devcmd2_alloc() 139 wq->to_use = wq->to_clean = in enic_wq_init_start() [all …]
|
A D | vnic_wq.h | 86 struct vnic_wq wq; member 93 return wq->ring.desc_avail; in vnic_wq_desc_avail() 99 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 104 return wq->to_use->desc; in vnic_wq_next_desc() 115 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); in vnic_wq_doorbell() 124 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 136 wq->to_use = buf; in vnic_wq_post() 149 buf = wq->to_clean; in vnic_wq_service() 154 wq->ring.desc_avail++; in vnic_wq_service() 156 wq->to_clean = buf->next; in vnic_wq_service() [all …]
|
/linux-6.3-rc2/drivers/scsi/snic/ |
A D | vnic_wq.c | 16 if (!wq->ctrl) in vnic_wq_get_ctrl() 45 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 62 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() 72 vdev = wq->vdev; in svnic_wq_free() 81 wq->ctrl = NULL; in svnic_wq_free() 90 wq->index = 0; in vnic_wq_devcmd2_alloc() 91 wq->vdev = vdev; in vnic_wq_devcmd2_alloc() 114 wq->index = index; in svnic_wq_alloc() 115 wq->vdev = vdev; in svnic_wq_alloc() 158 wq->to_use = wq->to_clean = in vnic_wq_init_start() [all …]
|
A D | vnic_wq.h | 74 return wq->ring.desc_avail; in svnic_wq_desc_avail() 80 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used() 85 return wq->to_use->desc; in svnic_wq_next_desc() 92 struct vnic_wq_buf *buf = wq->to_use; in svnic_wq_post() 109 wq->to_use = buf; in svnic_wq_post() 111 wq->ring.desc_avail--; in svnic_wq_post() 122 buf = wq->to_clean; in svnic_wq_service() 127 wq->ring.desc_avail++; in svnic_wq_service() 129 wq->to_clean = buf->next; in svnic_wq_service() 134 buf = wq->to_clean; in svnic_wq_service() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/huawei/hinic/ |
A D | hinic_hw_wq.c | 34 #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) argument 69 ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) 71 #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) argument 450 num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; in alloc_wq_pages() 539 err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); in hinic_wq_allocate() 552 wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); in hinic_wq_allocate() 553 wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); in hinic_wq_allocate() 554 wq->block_paddr = WQ_BASE_PADDR(wqs, wq); in hinic_wq_allocate() 570 wqs_return_block(wqs, wq->page_idx, wq->block_idx); in hinic_wq_allocate() 581 free_wq_pages(wq, wqs->hwif, wq->num_q_pages); in hinic_wq_free() [all …]
|
/linux-6.3-rc2/drivers/dma/idxd/ |
A D | device.c | 117 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold; in idxd_wq_alloc_resources() 150 desc->wq = wq; in idxd_wq_alloc_resources() 159 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); in idxd_wq_alloc_resources() 174 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); in idxd_wq_free_resources() 845 wq->wqcfg->wq_size = wq->size; in idxd_wq_config_write() 848 wq->wqcfg->wq_thresh = wq->threshold; in idxd_wq_config_write() 872 wq->wqcfg->priority = wq->priority; in idxd_wq_config_write() 1049 wq->size = wq->wqcfg->wq_size; in idxd_wq_load_config() 1050 wq->threshold = wq->wqcfg->wq_thresh; in idxd_wq_load_config() 1058 wq->priority = wq->wqcfg->priority; in idxd_wq_load_config() [all …]
|
A D | cdev.c | 34 struct idxd_wq *wq; member 45 struct idxd_wq *wq = idxd_cdev->wq; in idxd_cdev_dev_release() local 82 idxd = wq->idxd; in idxd_cdev_open() 93 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { in idxd_cdev_open() 98 ctx->wq = wq; in idxd_cdev_open() 129 idxd_wq_get(wq); in idxd_cdev_open() 142 struct idxd_wq *wq = ctx->wq; in idxd_cdev_release() local 168 idxd_wq_put(wq); in idxd_cdev_release() 192 struct idxd_wq *wq = ctx->wq; in idxd_cdev_mmap() local 218 struct idxd_wq *wq = ctx->wq; in idxd_cdev_poll() local [all …]
|
A D | dma.c | 20 return idxd_chan->wq; in to_idxd_wq() 139 idxd_wq_get(wq); in idxd_dma_alloc_chan_resources() 141 idxd_wq_refcount(wq)); in idxd_dma_alloc_chan_resources() 150 idxd_wq_put(wq); in idxd_dma_free_chan_resources() 152 idxd_wq_refcount(wq)); in idxd_dma_free_chan_resources() 278 idxd_chan->wq = wq; in idxd_register_dma_channel() 292 kfree(wq->idxd_chan); in idxd_unregister_dma_channel() 293 wq->idxd_chan = NULL; in idxd_unregister_dma_channel() 329 drv_disable_wq(wq); in idxd_dmaengine_drv_probe() 341 __idxd_wq_quiesce(wq); in idxd_dmaengine_drv_remove() [all …]
|
A D | idxd.h | 148 struct idxd_wq *wq; member 175 struct idxd_wq *wq; member 338 struct idxd_wq *wq; member 350 #define wq_confdev(wq) &wq->idxd_dev.conf_dev argument 451 if (wq->type == IDXD_WQT_KERNEL && strcmp(wq->name, "dmaengine") == 0) in is_idxd_wq_dmaengine() 488 return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) || in wq_pasid_enabled() 489 (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd)); in wq_pasid_enabled() 533 return wq->portal + ofs; in idxd_wq_portal_addr() 538 wq->client_count++; in idxd_wq_get() 543 wq->client_count--; in idxd_wq_put() [all …]
|
A D | submit.c | 14 struct idxd_device *idxd = wq->idxd; in __get_desc() 16 desc = wq->descs[idx]; in __get_desc() 30 struct idxd_device *idxd = wq->idxd; in idxd_alloc_desc() 38 sbq = &wq->sbq; in idxd_alloc_desc() 44 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 62 return __get_desc(wq, idx, cpu); in idxd_alloc_desc() 183 portal = idxd_wq_portal_addr(wq); in idxd_submit_desc() 197 ie = &wq->ie; in idxd_submit_desc() 202 if (wq_dedicated(wq)) { in idxd_submit_desc() 207 percpu_ref_put(&wq->wq_active); in idxd_submit_desc() [all …]
|
/linux-6.3-rc2/fs/autofs/ |
A D | waitq.c | 30 while (wq) { in autofs_catatonic_mode() 33 kfree(wq->name.name - wq->offset); in autofs_catatonic_mode() 37 wq = nwq; in autofs_catatonic_mode() 93 wq->name.len, wq->name.name, type); in autofs_notify_daemon() 110 memcpy(mp->name, wq->name.name, wq->name.len); in autofs_notify_daemon() 123 memcpy(ep->name, wq->name.name, wq->name.len); in autofs_notify_daemon() 143 memcpy(packet->name, wq->name.name, wq->name.len); in autofs_notify_daemon() 183 for (wq = sbi->queues; wq; wq = wq->next) { in autofs_find_wait() 190 return wq; in autofs_find_wait() 215 if (wq) { in validate_request() [all …]
|
/linux-6.3-rc2/fs/btrfs/ |
A D | async-thread.c | 52 return wq->fs_info; in btrfs_workqueue_owner() 71 return atomic_read(&wq->pending) > wq->thresh * 2; in btrfs_workqueue_normal_congested() 148 wq->count++; in thresh_exec_hook() 149 wq->count %= (wq->thresh / 4); in thresh_exec_hook() 150 if (!wq->count) in thresh_exec_hook() 172 workqueue_set_max_active(wq->normal_wq, wq->current_active); in thresh_exec_hook() 264 struct btrfs_workqueue *wq = work->wq; in btrfs_work_helper() local 312 work->wq = wq; in btrfs_queue_work() 325 if (!wq) in btrfs_destroy_workqueue() 329 kfree(wq); in btrfs_destroy_workqueue() [all …]
|
/linux-6.3-rc2/Documentation/translations/zh_CN/core-api/ |
A D | workqueue.rst | 40 在最初的wq实现中,多线程(MT)wq在每个CPU上有一个工作者线程,而单线程 41 (ST)wq在全系统有一个工作者线程。一个MT wq需要保持与CPU数量相同的工 46 制在ST和MT wq中都有,只是在MT中没有那么严重。每个wq都保持着自己独立的 47 工作者池。一个MT wq只能为每个CPU提供一个执行环境,而一个ST wq则为整个 122 的工作项都需要在wq上排队,wq上保留了一个救援工作者,以便在内存有压 156 一个可冻结的wq参与了系统暂停操作的冻结阶段。wq上的工作项被 188 时执行16个wq的工作项。 195 一个wq的活动工作项的数量通常由wq的用户来调节,更具体地说,是由用 259 现在,我们假设w1和w2被排到了不同的wq q1上,这个wq q1 281 它们应该被排在不同的wq中,每个wq都有 ``WQ_MEM_RECLAIM`` 。 [all …]
|
/linux-6.3-rc2/include/linux/ |
A D | swait.h | 123 return !list_empty(&wq->task_list); in swait_active() 144 return swait_active(wq); in swq_has_sleeper() 178 finish_swait(&wq, &__wait); \ 182 #define __swait_event(wq, condition) \ argument 186 #define swait_event_exclusive(wq, condition) \ argument 190 __swait_event(wq, condition); \ 227 __ret = __swait_event_interruptible_timeout(wq, \ 232 #define __swait_event_idle(wq, condition) \ argument 247 #define swait_event_idle_exclusive(wq, condition) \ argument 251 __swait_event_idle(wq, condition); \ [all …]
|
/linux-6.3-rc2/kernel/ |
A D | workqueue.c | 1676 dwork->wq = wq; in __queue_delayed_work() 1780 rwork->wq = wq; in queue_rcu_work() 2134 struct workqueue_struct *wq = pwq->wq; in send_mayday() local 3605 if (wq->lock_name != wq->name) in wq_free_lockdep() 3819 struct workqueue_struct *wq = pwq->wq; in pwq_unbound_release_workfn() local 3863 struct workqueue_struct *wq = pwq->wq; in pwq_adjust_max_active() local 3917 pwq->wq = wq; in init_pwq() 3929 struct workqueue_struct *wq = pwq->wq; in link_pwq() local 4127 ctx->wq = wq; in apply_wqattrs_prepare() 4812 copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq)); in print_worker_info() [all …]
|
/linux-6.3-rc2/io_uring/ |
A D | io-wq.c | 188 struct io_wq *wq = wqe->wq; in io_worker_cancel_cb() local 212 struct io_wq *wq = wqe->wq; in io_worker_exit() local 330 wq = wqe->wq; in create_worker_cb() 353 struct io_wq *wq = wqe->wq; in io_queue_worker_create() local 450 struct io_wq *wq = wqe->wq; in io_wait_on_hash() local 543 struct io_wq *wq = wqe->wq; in io_worker_handle_work() local 618 struct io_wq *wq = wqe->wq; in io_wqe_worker() local 874 struct io_wq *wq = wqe->wq; in io_run_cancel() local 1152 if (!wq) in io_wq_create() 1191 wqe->wq = wq; in io_wq_create() [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/cxgb4/ |
A D | t4.h | 492 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 498 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce() 508 if (++wq->rq.cidx == wq->rq.size) in t4_rq_consume() 534 return wq->sq.size - 1 - wq->sq.in_use; in t4_sq_avail() 540 if (++wq->sq.pidx == wq->sq.size) in t4_sq_produce() 549 if (wq->sq.cidx == wq->sq.flush_cidx) in t4_sq_consume() 552 if (++wq->sq.cidx == wq->sq.size) in t4_sq_consume() 622 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); in t4_ring_sq_db() 647 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); in t4_ring_rq_db() 664 wq->rq.queue[wq->rq.size].status.db_off = 1; in t4_disable_wq_db() [all …]
|
A D | qp.c | 163 wq->rq.memsize, wq->rq.queue, in destroy_qp() 226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), in create_qp() 387 wq->sq.qid, wq->rq.qid, wq->db, in create_qp() 388 wq->sq.bar2_va, wq->rq.bar2_va); in create_qp() 2330 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, in c4iw_create_qp() 2478 struct t4_srq *wq = &srq->wq; in free_srq_queue() local 2503 wq->memsize, wq->queue, in free_srq_queue() 2515 struct t4_srq *wq = &srq->wq; in alloc_srq_queue() local 2528 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq), in alloc_srq_queue() 2539 wq->rqt_size = wq->size; in alloc_srq_queue() [all …]
|
/linux-6.3-rc2/drivers/infiniband/hw/mana/ |
A D | wq.c | 15 struct mana_ib_wq *wq; in mana_ib_create_wq() local 29 wq = kzalloc(sizeof(*wq), GFP_KERNEL); in mana_ib_create_wq() 30 if (!wq) in mana_ib_create_wq() 44 wq->umem = umem; in mana_ib_create_wq() 45 wq->wqe = init_attr->max_wr; in mana_ib_create_wq() 49 err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region); in mana_ib_create_wq() 59 err, wq->gdma_region); in mana_ib_create_wq() 63 return &wq->ibwq; in mana_ib_create_wq() 69 kfree(wq); in mana_ib_create_wq() 90 ib_umem_release(wq->umem); in mana_ib_destroy_wq() [all …]
|