| /drivers/infiniband/hw/mlx5/ |
| A D | wr.c | 43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow() 1030 qp->sq.head += nreq; in mlx5r_ring_db() 1066 int nreq; in mlx5_ib_post_send() local 1082 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send() 1099 nreq); in mlx5_ib_post_send() 1193 if (likely(nreq)) in mlx5_ib_post_send() 1194 mlx5r_ring_db(qp, nreq, ctrl); in mlx5_ib_post_send() 1216 int nreq; in mlx5_ib_post_recv() local 1233 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv() 1270 if (likely(nreq)) { in mlx5_ib_post_recv() [all …]
|
| A D | wr.h | 94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq); 97 int *size, void **cur_edge, int nreq, __be32 general_id, 101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode); 102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
|
| A D | srq.c | 419 int nreq; in mlx5_ib_post_srq_recv() local 430 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 462 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 463 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
| /drivers/infiniband/hw/mthca/ |
| A D | mthca_qp.c | 1634 int nreq; in mthca_tavor_post_send() local 1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1830 int nreq; in mthca_tavor_post_receive() local 1890 if (!nreq) in mthca_tavor_post_receive() 1897 ++nreq; in mthca_tavor_post_receive() 1899 nreq = 0; in mthca_tavor_post_receive() 1938 int nreq; in mthca_arbel_post_send() local 1959 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_send() 1961 nreq = 0; in mthca_arbel_post_send() 2169 int nreq; in mthca_arbel_post_receive() local [all …]
|
| A D | mthca_srq.c | 493 int nreq; in mthca_tavor_post_srq_recv() local 502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 543 ++nreq; in mthca_tavor_post_srq_recv() 544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 545 nreq = 0; in mthca_tavor_post_srq_recv() 561 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 586 int nreq; in mthca_arbel_post_srq_recv() local 592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() 627 if (likely(nreq)) { in mthca_arbel_post_srq_recv() [all …]
|
| /drivers/infiniband/hw/mlx4/ |
| A D | srq.c | 315 int nreq; in mlx4_ib_post_srq_recv() local 326 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 358 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 359 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
| A D | qp.c | 3300 return cur + nreq >= wq->max_post; in mlx4_wq_overflow() 3538 int nreq; in _mlx4_ib_post_send() local 3574 nreq = 0; in _mlx4_ib_post_send() 3580 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send() 3823 if (likely(nreq)) { in _mlx4_ib_post_send() 3824 qp->sq.head += nreq; in _mlx4_ib_post_send() 3858 int nreq; in _mlx4_ib_post_recv() local 3871 nreq = 0; in _mlx4_ib_post_recv() 3877 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_recv() 3922 if (likely(nreq)) { in _mlx4_ib_post_recv() [all …]
|
| /drivers/usb/gadget/function/ |
| A D | uvc_video.c | 504 unsigned int nreq; in uvc_video_prep_requests() local 521 nreq = DIV_ROUND_UP(video->interval, interval_duration); in uvc_video_prep_requests() 523 header_size = nreq * UVCG_REQUEST_HEADER_LEN; in uvc_video_prep_requests() 525 req_size = DIV_ROUND_UP(video->imagesize + header_size, nreq); in uvc_video_prep_requests() 542 video->uvc_num_requests = nreq + UVCG_REQ_MAX_ZERO_COUNT; in uvc_video_prep_requests() 543 video->reqs_per_frame = nreq; in uvc_video_prep_requests()
|
| /drivers/crypto/inside-secure/ |
| A D | safexcel.c | 824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local 864 nreq++; in safexcel_dequeue() 875 if (!nreq) in safexcel_dequeue() 880 priv->ring[ring].requests += nreq; in safexcel_dequeue() 1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local 1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor() 1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor() 1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor() 1025 if (!nreq) in safexcel_handle_result_descriptor() 1028 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor() [all …]
|
| /drivers/crypto/intel/qat/qat_common/ |
| A D | qat_algs.c | 1070 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_encrypt() local 1076 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_encrypt() 1077 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_encrypt() 1078 return crypto_skcipher_encrypt(nreq); in qat_alg_skcipher_xts_encrypt() 1138 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_decrypt() local 1144 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_decrypt() 1145 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_decrypt() 1146 return crypto_skcipher_decrypt(nreq); in qat_alg_skcipher_xts_decrypt()
|
| A D | qat_asym_algs.c | 388 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_generate_public_key() local 393 memcpy(nreq, req, sizeof(*req)); in qat_dh_generate_public_key() 394 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_generate_public_key() 395 return crypto_kpp_generate_public_key(nreq); in qat_dh_generate_public_key() 403 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_compute_shared_secret() local 408 memcpy(nreq, req, sizeof(*req)); in qat_dh_compute_shared_secret() 409 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_compute_shared_secret() 410 return crypto_kpp_compute_shared_secret(nreq); in qat_dh_compute_shared_secret()
|
| /drivers/dma/ |
| A D | bcm-sba-raid.c | 289 struct sba_request *nreq; in sba_free_chained_requests() local 295 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests() 296 _sba_free_request(sba, nreq); in sba_free_chained_requests() 412 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local 434 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request() 435 _sba_free_request(sba, nreq); in sba_process_received_request() 520 struct sba_request *req, *nreq; in sba_tx_submit() local 532 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit() 533 _sba_pending_request(sba, nreq); in sba_tx_submit()
|
| A D | arm-dma350.c | 196 int nreq; member 561 dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg); in d350_probe() 563 dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq); in d350_probe()
|
| /drivers/infiniband/hw/hns/ |
| A D | hns_roce_hw_v2.c | 701 u32 nreq; in hns_roce_v2_post_send() local 709 nreq = 0; in hns_roce_v2_post_send() 715 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send() 752 if (likely(nreq)) { in hns_roce_v2_post_send() 753 qp->sq.head += nreq; in hns_roce_v2_post_send() 835 nreq = 0; in hns_roce_v2_post_recv() 840 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_recv() 862 if (likely(nreq)) { in hns_roce_v2_post_recv() 972 u32 nreq; in hns_roce_v2_post_srq_recv() local 977 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_srq_recv() [all …]
|
| A D | hns_roce_qp.c | 1553 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, in hns_roce_wq_overflow() argument 1560 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow() 1568 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
|
| A D | hns_roce_device.h | 1262 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq,
|
| /drivers/crypto/rockchip/ |
| A D | rk3288_crypto.h | 223 unsigned long nreq; member
|
| A D | rk3288_crypto.c | 203 dd->nreq); in rk_crypto_debugfs_show()
|
| A D | rk3288_crypto_ahash.c | 284 rkc->nreq++; in rk_hash_run()
|
| A D | rk3288_crypto_skcipher.c | 322 rkc->nreq++; in rk_cipher_run()
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| A D | main.c | 2958 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local 2964 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x() 2966 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x() 2970 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x() 2973 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x() 2974 nreq); in mlx4_enable_msi_x() 2976 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x() 2981 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
|
| /drivers/nvme/host/ |
| A D | apple.c | 790 struct nvme_request *nreq = nvme_req(req); in apple_nvme_init_request() local 793 nreq->ctrl = &anv->ctrl; in apple_nvme_init_request() 794 nreq->cmd = &iod->cmd; in apple_nvme_init_request()
|
| /drivers/usb/isp1760/ |
| A D | isp1760-udc.c | 841 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local 868 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
|
| /drivers/infiniband/sw/rdmavt/ |
| A D | qp.c | 2149 unsigned nreq = 0; in rvt_post_send() local 2176 nreq++; in rvt_post_send() 2180 if (nreq) { in rvt_post_send() 2185 if (nreq == 1 && call_send) in rvt_post_send()
|
| /drivers/infiniband/hw/hfi1/ |
| A D | tid_rdma.c | 5199 struct tid_rdma_request *req, *nreq; in make_tid_rdma_ack() local 5273 nreq = ack_to_tid_req(&qp->s_ack_queue[next]); in make_tid_rdma_ack() 5274 if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) in make_tid_rdma_ack()
|