Home
last modified time | relevance | path

Searched refs:nreq (Results 1 – 25 of 32) sorted by relevance

12

/linux/net/ipv4/
A Dinet_connection_sock.c980 if (!nreq) { in inet_reqsk_clone()
1010 rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq); in inet_reqsk_clone()
1012 return nreq; in inet_reqsk_clone()
1100 if (!nreq) in reqsk_timer_handler()
1112 req = nreq; in reqsk_timer_handler()
1160 if (!nreq) in reqsk_timer_handler()
1174 reqsk_put(nreq); in reqsk_timer_handler()
1182 if (nreq) { in reqsk_timer_handler()
1187 __reqsk_free(nreq); in reqsk_timer_handler()
1444 if (!nreq) { in inet_csk_complete_hashdance()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dwr.c43 return cur + nreq >= wq->max_post; in mlx5r_wq_overflow()
1030 qp->sq.head += nreq; in mlx5r_ring_db()
1066 int nreq; in mlx5_ib_post_send() local
1082 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send()
1099 nreq); in mlx5_ib_post_send()
1193 if (likely(nreq)) in mlx5_ib_post_send()
1194 mlx5r_ring_db(qp, nreq, ctrl); in mlx5_ib_post_send()
1216 int nreq; in mlx5_ib_post_recv() local
1233 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_recv()
1270 if (likely(nreq)) { in mlx5_ib_post_recv()
[all …]
A Dwr.h94 int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
97 int *size, void **cur_edge, int nreq, __be32 general_id,
101 u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
102 void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
A Dsrq.c419 int nreq; in mlx5_ib_post_srq_recv() local
430 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv()
462 if (likely(nreq)) { in mlx5_ib_post_srq_recv()
463 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
/linux/crypto/
A Dechainiv.c45 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in echainiv_encrypt()
47 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in echainiv_encrypt()
48 skcipher_request_set_callback(nreq, req->base.flags, in echainiv_encrypt()
50 skcipher_request_set_crypt(nreq, req->src, req->dst, in echainiv_encrypt()
54 err = crypto_skcipher_encrypt(nreq); in echainiv_encrypt()
A Dseqiv.c68 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); in seqiv_aead_encrypt()
70 skcipher_request_set_sync_tfm(nreq, ctx->sknull); in seqiv_aead_encrypt()
71 skcipher_request_set_callback(nreq, req->base.flags, in seqiv_aead_encrypt()
73 skcipher_request_set_crypt(nreq, req->src, req->dst, in seqiv_aead_encrypt()
77 err = crypto_skcipher_encrypt(nreq); in seqiv_aead_encrypt()
A Dgcm.c962 SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst()
964 skcipher_request_set_sync_tfm(nreq, ctx->null); in crypto_rfc4543_copy_src_to_dst()
965 skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); in crypto_rfc4543_copy_src_to_dst()
966 skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); in crypto_rfc4543_copy_src_to_dst()
968 return crypto_skcipher_encrypt(nreq); in crypto_rfc4543_copy_src_to_dst()
/linux/drivers/infiniband/hw/mthca/
A Dmthca_qp.c1634 int nreq; in mthca_tavor_post_send() local
1655 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send()
1830 int nreq; in mthca_tavor_post_receive() local
1890 if (!nreq) in mthca_tavor_post_receive()
1897 ++nreq; in mthca_tavor_post_receive()
1899 nreq = 0; in mthca_tavor_post_receive()
1938 int nreq; in mthca_arbel_post_send() local
1959 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_send()
1961 nreq = 0; in mthca_arbel_post_send()
2169 int nreq; in mthca_arbel_post_receive() local
[all …]
A Dmthca_srq.c493 int nreq; in mthca_tavor_post_srq_recv() local
502 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv()
543 ++nreq; in mthca_tavor_post_srq_recv()
544 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv()
545 nreq = 0; in mthca_tavor_post_srq_recv()
561 if (likely(nreq)) { in mthca_tavor_post_srq_recv()
568 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv()
586 int nreq; in mthca_arbel_post_srq_recv() local
592 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv()
627 if (likely(nreq)) { in mthca_arbel_post_srq_recv()
[all …]
/linux/drivers/usb/gadget/function/
A Duvc_queue.c48 unsigned int nreq; in uvc_queue_setup() local
64 nreq = DIV_ROUND_UP(DIV_ROUND_UP(sizes[0], 2), req_size); in uvc_queue_setup()
65 nreq = clamp(nreq, 4U, 64U); in uvc_queue_setup()
66 video->uvc_num_requests = nreq; in uvc_queue_setup()
/linux/drivers/infiniband/hw/mlx4/
A Dsrq.c315 int nreq; in mlx4_ib_post_srq_recv() local
326 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv()
358 if (likely(nreq)) { in mlx4_ib_post_srq_recv()
359 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
A Dqp.c3292 return cur + nreq >= wq->max_post; in mlx4_wq_overflow()
3530 int nreq; in _mlx4_ib_post_send() local
3566 nreq = 0; in _mlx4_ib_post_send()
3572 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_send()
3815 if (likely(nreq)) { in _mlx4_ib_post_send()
3816 qp->sq.head += nreq; in _mlx4_ib_post_send()
3850 int nreq; in _mlx4_ib_post_recv() local
3863 nreq = 0; in _mlx4_ib_post_recv()
3869 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx4_ib_post_recv()
3914 if (likely(nreq)) { in _mlx4_ib_post_recv()
[all …]
/linux/fs/nfs/
A Dpnfs_nfs.c412 unsigned int nreq = 0; in pnfs_bucket_alloc_ds_commits() local
424 nreq++; in pnfs_bucket_alloc_ds_commits()
428 return nreq; in pnfs_bucket_alloc_ds_commits()
433 return nreq; in pnfs_bucket_alloc_ds_commits()
468 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local
479 nreq++; in pnfs_generic_commit_pagelist()
482 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo); in pnfs_generic_commit_pagelist()
483 if (nreq == 0) in pnfs_generic_commit_pagelist()
/linux/drivers/crypto/inside-secure/
A Dsafexcel.c824 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; in safexcel_dequeue() local
864 nreq++; in safexcel_dequeue()
875 if (!nreq) in safexcel_dequeue()
880 priv->ring[ring].requests += nreq; in safexcel_dequeue()
1016 int ret, i, nreq, ndesc, tot_descs, handled = 0; in safexcel_handle_result_descriptor() local
1022 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); in safexcel_handle_result_descriptor()
1023 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; in safexcel_handle_result_descriptor()
1024 nreq &= EIP197_xDR_PROC_xD_PKT_MASK; in safexcel_handle_result_descriptor()
1025 if (!nreq) in safexcel_handle_result_descriptor()
1028 for (i = 0; i < nreq; i++) { in safexcel_handle_result_descriptor()
[all …]
/linux/drivers/crypto/intel/qat/qat_common/
A Dqat_algs.c1070 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_encrypt() local
1076 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_encrypt()
1077 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_encrypt()
1078 return crypto_skcipher_encrypt(nreq); in qat_alg_skcipher_xts_encrypt()
1138 struct skcipher_request *nreq = skcipher_request_ctx(req); in qat_alg_skcipher_xts_decrypt() local
1144 memcpy(nreq, req, sizeof(*req)); in qat_alg_skcipher_xts_decrypt()
1145 skcipher_request_set_tfm(nreq, ctx->ftfm); in qat_alg_skcipher_xts_decrypt()
1146 return crypto_skcipher_decrypt(nreq); in qat_alg_skcipher_xts_decrypt()
A Dqat_asym_algs.c388 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_generate_public_key() local
393 memcpy(nreq, req, sizeof(*req)); in qat_dh_generate_public_key()
394 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_generate_public_key()
395 return crypto_kpp_generate_public_key(nreq); in qat_dh_generate_public_key()
403 struct kpp_request *nreq = kpp_request_ctx(req); in qat_dh_compute_shared_secret() local
408 memcpy(nreq, req, sizeof(*req)); in qat_dh_compute_shared_secret()
409 kpp_request_set_tfm(nreq, ctx->ftfm); in qat_dh_compute_shared_secret()
410 return crypto_kpp_compute_shared_secret(nreq); in qat_dh_compute_shared_secret()
/linux/drivers/dma/
A Dbcm-sba-raid.c289 struct sba_request *nreq; in sba_free_chained_requests() local
295 list_for_each_entry(nreq, &req->next, next) in sba_free_chained_requests()
296 _sba_free_request(sba, nreq); in sba_free_chained_requests()
412 struct sba_request *nreq, *first = req->first; in sba_process_received_request() local
434 list_for_each_entry(nreq, &first->next, next) in sba_process_received_request()
435 _sba_free_request(sba, nreq); in sba_process_received_request()
520 struct sba_request *req, *nreq; in sba_tx_submit() local
532 list_for_each_entry(nreq, &req->next, next) in sba_tx_submit()
533 _sba_pending_request(sba, nreq); in sba_tx_submit()
/linux/fs/nilfs2/
A Dbtree.c1745 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument
1772 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert()
1773 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert()
1774 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1791 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert()
1805 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument
1823 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert()
1825 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert()
1841 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert()
1886 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; in nilfs_btree_convert_and_insert() local
[all …]
/linux/drivers/crypto/rockchip/
A Drk3288_crypto.h223 unsigned long nreq; member
A Drk3288_crypto.c203 dd->nreq); in rk_crypto_debugfs_show()
A Drk3288_crypto_ahash.c282 rkc->nreq++; in rk_hash_run()
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_hw_v2.c702 u32 nreq; in hns_roce_v2_post_send() local
710 nreq = 0; in hns_roce_v2_post_send()
716 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send()
751 if (likely(nreq)) { in hns_roce_v2_post_send()
752 qp->sq.head += nreq; in hns_roce_v2_post_send()
831 nreq = 0; in hns_roce_v2_post_recv()
836 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_recv()
858 if (likely(nreq)) { in hns_roce_v2_post_recv()
968 u32 nreq; in hns_roce_v2_post_srq_recv() local
973 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_srq_recv()
[all …]
A Dhns_roce_qp.c1525 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, u32 nreq, in hns_roce_wq_overflow() argument
1532 if (likely(cur + nreq < hr_wq->wqe_cnt)) in hns_roce_wq_overflow()
1540 return cur + nreq >= hr_wq->wqe_cnt; in hns_roce_wq_overflow()
/linux/drivers/net/ethernet/mellanox/mlx4/
A Dmain.c2964 int nreq = min3(dev->caps.num_ports * in mlx4_enable_msi_x() local
2970 nreq = min_t(int, nreq, msi_x); in mlx4_enable_msi_x()
2972 entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); in mlx4_enable_msi_x()
2976 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x()
2979 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x()
2980 nreq); in mlx4_enable_msi_x()
2982 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x()
2987 dev->caps.num_comp_vectors = nreq - 1; in mlx4_enable_msi_x()
/linux/drivers/nvme/host/
A Dapple.c789 struct nvme_request *nreq = nvme_req(req); in apple_nvme_init_request() local
792 nreq->ctrl = &anv->ctrl; in apple_nvme_init_request()
793 nreq->cmd = &iod->cmd; in apple_nvme_init_request()

Completed in 90 milliseconds

12