/linux-6.3-rc2/net/dns_resolver/ |
A D | dns_query.c | 79 struct key *rkey; in dns_query() local 131 if (IS_ERR(rkey)) { in dns_query() 132 ret = PTR_ERR(rkey); in dns_query() 136 down_read(&rkey->sem); in dns_query() 138 rkey->perm |= KEY_USR_VIEW; in dns_query() 140 ret = key_validate(rkey); in dns_query() 149 upayload = user_key_payload_locked(rkey); in dns_query() 160 *_expiry = rkey->expiry; in dns_query() 164 up_read(&rkey->sem); in dns_query() 166 key_invalidate(rkey); in dns_query() [all …]
|
/linux-6.3-rc2/drivers/infiniband/sw/rxe/ |
A D | rxe_mw.c | 31 mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); in rxe_alloc_mw() 138 u32 key = wqe->wr.wr.mw.rkey & 0xff; in rxe_do_bind_mw() 140 mw->rkey = (mw->rkey & ~0xff) | key; in rxe_do_bind_mw() 179 if (unlikely(mw->rkey != mw_rkey)) { in rxe_bind_mw() 251 int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) in rxe_invalidate_mw() argument 257 mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8); in rxe_invalidate_mw() 263 if (rkey != mw->rkey) { in rxe_invalidate_mw() 283 struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) in rxe_lookup_mw() argument 288 int index = rkey >> 8; in rxe_lookup_mw() 294 if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || in rxe_lookup_mw()
|
A D | rxe_hdr.h | 528 __be32 rkey; member 550 return be32_to_cpu(reth->rkey); in __reth_rkey() 557 reth->rkey = cpu_to_be32(rkey); in __reth_set_rkey() 595 rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey); in reth_set_rkey() 661 __be32 rkey; member 684 return be32_to_cpu(atmeth->rkey); in __atmeth_rkey() 691 atmeth->rkey = cpu_to_be32(rkey); in __atmeth_set_rkey() 919 __be32 rkey; member 926 return be32_to_cpu(ieth->rkey); in __ieth_rkey() 933 ieth->rkey = cpu_to_be32(rkey); in __ieth_set_rkey() [all …]
|
A D | rxe_resp.c | 437 qp->resp.rkey = 0; in qp_resp_from_reth() 439 qp->resp.rkey = reth_rkey(pkt); in qp_resp_from_reth() 460 u32 rkey; in check_rkey() local 500 rkey = qp->resp.rkey; in check_rkey() 504 if (rkey_is_mw(rkey)) { in check_rkey() 638 res->read.rkey = qp->resp.rkey; in rxe_prepare_res() 853 if (rkey_is_mw(rkey)) { in rxe_recheck_mr() 859 if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID || in rxe_recheck_mr() 875 if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) { in rxe_recheck_mr() 992 if (rkey_is_mw(rkey)) in invalidate_rkey() [all …]
|
A D | rxe_verbs.h | 163 u32 rkey; member 197 u32 rkey; member 286 static inline int rkey_is_mw(u32 rkey) in rkey_is_mw() argument 288 u32 index = rkey >> 8; in rkey_is_mw() 300 u32 rkey; member 333 u32 rkey; member
|
A D | rxe_mr.c | 55 u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; in rxe_mr_init() local 63 mr->rkey = mr->ibmr.rkey = rkey; in rxe_mr_init() 633 (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || in lookup_mr() 656 if (mr->rkey ? (key != mr->rkey) : (key != mr->lkey)) { in rxe_invalidate_mr() 658 key, (mr->rkey ? mr->rkey : mr->lkey)); in rxe_invalidate_mr() 718 mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0; in rxe_reg_fast_mr()
|
A D | rxe_req.c | 432 reth_set_rkey(pkt, ibwr->wr.flush.rkey); in init_req_packet() 434 reth_set_rkey(pkt, ibwr->wr.rdma.rkey); in init_req_packet() 457 atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey); in init_req_packet() 589 u32 rkey; in rxe_do_local_ops() local 594 rkey = wqe->wr.ex.invalidate_rkey; in rxe_do_local_ops() 595 if (rkey_is_mw(rkey)) in rxe_do_local_ops() 596 ret = rxe_invalidate_mw(qp, rkey); in rxe_do_local_ops() 598 ret = rxe_invalidate_mr(qp, rkey); in rxe_do_local_ops()
|
/linux-6.3-rc2/drivers/infiniband/ulp/iser/ |
A D | iser_memory.c | 135 reg->rkey = device->pd->unsafe_global_rkey; in iser_reg_dma() 137 reg->rkey = 0; in iser_reg_dma() 142 " length=0x%x\n", reg->sge.lkey, reg->rkey, in iser_reg_dma() 241 inv_wr->ex.invalidate_rkey = mr->rkey; in iser_inv_rkey() 270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_reg_sig_mr() 287 wr->key = mr->rkey; in iser_reg_sig_mr() 294 sig_reg->rkey = mr->rkey; in iser_reg_sig_mr() 299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr, in iser_reg_sig_mr() 319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); in iser_fast_reg_mr() 334 wr->key = mr->rkey; in iser_fast_reg_mr() [all …]
|
A D | iser_initiator.c | 70 hdr->read_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_read_cmd() 74 task->itt, mem_reg->rkey, in iser_prepare_read_cmd() 117 hdr->write_stag = cpu_to_be32(mem_reg->rkey); in iser_prepare_write_cmd() 122 task->itt, mem_reg->rkey, in iser_prepare_write_cmd() 577 if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || in iser_inv_desc() 578 (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { in iser_inv_desc() 579 iser_err("Bogus remote invalidation for rkey %#x\n", rkey); in iser_inv_desc() 593 u32 rkey = wc->ex.invalidate_rkey; in iser_check_remote_inv() local 596 iser_conn, rkey); in iser_check_remote_inv() 611 if (unlikely(iser_inv_desc(desc, rkey))) in iser_check_remote_inv() [all …]
|
/linux-6.3-rc2/fs/nfs/ |
A D | nfs4idmap.c | 283 struct key *rkey = ERR_PTR(-EAGAIN); in nfs_idmap_request_key() local 292 if (IS_ERR(rkey)) { in nfs_idmap_request_key() 298 if (!IS_ERR(rkey)) in nfs_idmap_request_key() 302 return rkey; in nfs_idmap_request_key() 310 struct key *rkey; in nfs_idmap_get_key() local 318 if (IS_ERR(rkey)) { in nfs_idmap_get_key() 319 ret = PTR_ERR(rkey); in nfs_idmap_get_key() 324 rkey->perm |= KEY_USR_VIEW; in nfs_idmap_get_key() 326 ret = key_validate(rkey); in nfs_idmap_get_key() 330 payload = user_key_payload_rcu(rkey); in nfs_idmap_get_key() [all …]
|
/linux-6.3-rc2/drivers/infiniband/core/ |
A D | rw.c | 126 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_mr_wrs() argument 162 reg->wr.rkey = rkey; in rdma_rw_init_mr_wrs() 197 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_init_map_wrs() argument 223 rdma_wr->rkey = rkey; in rdma_rw_init_map_wrs() 271 rdma_wr->rkey = rkey; in rdma_rw_init_single_wr() 294 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_ctx_init() argument 328 sg_offset, remote_addr, rkey, dir); in rdma_rw_ctx_init() 331 remote_addr, rkey, dir); in rdma_rw_ctx_init() 334 remote_addr, rkey, dir); in rdma_rw_ctx_init() 368 u64 remote_addr, u32 rkey, enum dma_data_direction dir) in rdma_rw_ctx_signature_init() argument [all …]
|
A D | uverbs_std_types_mr.c | 152 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER() 169 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER() 265 &mr->rkey, sizeof(mr->rkey)); in UVERBS_HANDLER()
|
/linux-6.3-rc2/include/uapi/rdma/ |
A D | rdma_user_rxe.h | 88 __u32 rkey; member 94 __u32 rkey; member 101 __u32 rkey; member 118 __u32 rkey; member
|
A D | vmw_pvrdma-abi.h | 251 __u32 rkey; member 258 __u32 rkey; member 264 __u32 rkey; member 277 __u32 rkey; member
|
/linux-6.3-rc2/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_mr.c | 95 mr->ibmr.rkey = resp->rkey; in pvrdma_get_dma_mr() 183 mr->ibmr.rkey = resp->rkey; in pvrdma_reg_user_mr() 255 mr->ibmr.rkey = resp->rkey; in pvrdma_alloc_mr()
|
/linux-6.3-rc2/fs/reiserfs/ |
A D | dir.c | 69 const struct reiserfs_key *rkey; in reiserfs_readdir_inode() local 242 rkey = get_rkey(&path_to_entry, inode->i_sb); in reiserfs_readdir_inode() 243 if (!comp_le_keys(rkey, &MIN_KEY)) { in reiserfs_readdir_inode() 253 if (COMP_SHORT_KEYS(rkey, &pos_key)) { in reiserfs_readdir_inode() 259 le_key_k_offset(KEY_FORMAT_3_5, rkey)); in reiserfs_readdir_inode()
|
/linux-6.3-rc2/drivers/infiniband/sw/siw/ |
A D | siw_qp_tx.c | 140 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 185 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 197 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); in siw_qp_prepare_tx() 211 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); in siw_qp_prepare_tx() 946 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); in siw_fastreg_mr() 949 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); in siw_fastreg_mr() 953 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { in siw_fastreg_mr() 958 mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); in siw_fastreg_mr() 975 mem->stag = sqe->rkey; in siw_fastreg_mr() 978 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); in siw_fastreg_mr() [all …]
|
/linux-6.3-rc2/arch/arm64/crypto/ |
A D | sm4-ce-glue.c | 29 asmlinkage void sm4_ce_crypt_block(const u32 *rkey, u8 *dst, const u8 *src); 30 asmlinkage void sm4_ce_crypt(const u32 *rkey, u8 *dst, const u8 *src, 32 asmlinkage void sm4_ce_cbc_enc(const u32 *rkey, u8 *dst, const u8 *src, 34 asmlinkage void sm4_ce_cbc_dec(const u32 *rkey, u8 *dst, const u8 *src, 36 asmlinkage void sm4_ce_cbc_cts_enc(const u32 *rkey, u8 *dst, const u8 *src, 38 asmlinkage void sm4_ce_cbc_cts_dec(const u32 *rkey, u8 *dst, const u8 *src, 40 asmlinkage void sm4_ce_cfb_enc(const u32 *rkey, u8 *dst, const u8 *src, 42 asmlinkage void sm4_ce_cfb_dec(const u32 *rkey, u8 *dst, const u8 *src, 44 asmlinkage void sm4_ce_ctr_enc(const u32 *rkey, u8 *dst, const u8 *src, 114 static int sm4_ecb_do_crypt(struct skcipher_request *req, const u32 *rkey) in sm4_ecb_do_crypt() argument [all …]
|
/linux-6.3-rc2/drivers/infiniband/sw/rdmavt/ |
A D | mr.c | 260 mr->ibmr.rkey = mr->mr.lkey; in __rvt_alloc_mr() 630 ibmr->rkey = key; in rvt_fast_reg_mr() 647 int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey) in rvt_invalidate_rkey() argument 653 if (rkey == 0) in rvt_invalidate_rkey() 658 rkt->table[(rkey >> (32 - dev->dparms.lkey_table_size))]); in rvt_invalidate_rkey() 659 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_invalidate_rkey() 827 u32 len, u64 vaddr, u32 rkey, int acc) in rvt_rkey_ok() argument 840 if (rkey == 0) { in rvt_rkey_ok() 861 mr = rcu_dereference(rkt->table[rkey >> rkt->shift]); in rvt_rkey_ok() 869 mr->lkey != rkey || qp->ibqp.pd != mr->pd)) in rvt_rkey_ok()
|
/linux-6.3-rc2/drivers/infiniband/ulp/rtrs/ |
A D | README | 51 then pass it to the block layer. A new rkey is generated and registered for the 53 The new rkey is sent back to the client along with the IO result. 144 using the IMM field, Server invalidate rkey associated to the memory chunks 149 inflight IO and for the error code. The new rkey is sent back using 150 SEND_WITH_IMM WR, client When it recived new rkey message, it validates 151 the message and finished IO after update rkey for the rbuffer, then post 186 Server invalidate rkey associated to the memory chunks first, when it finishes, 192 outstanding inflight IO and the error code. The new rkey is sent back using 193 SEND_WITH_IMM WR, client When it recived new rkey message, it validates 194 the message and finished IO after update rkey for the rbuffer, then post
|
A D | rtrs-srv.c | 205 u32 rkey = 0; in rdma_write_sg() local 237 if (rkey == 0) in rdma_write_sg() 238 rkey = wr->rkey; in rdma_write_sg() 241 WARN_ON_ONCE(rkey != wr->rkey); in rdma_write_sg() 274 inv_wr.ex.invalidate_rkey = rkey; in rdma_write_sg() 287 rwr.key = srv_mr->mr->rkey; in rdma_write_sg() 293 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in rdma_write_sg() 408 rwr.key = srv_mr->mr->rkey; in send_io_resp_imm() 414 msg->rkey = cpu_to_le32(srv_mr->mr->rkey); in send_io_resp_imm() 841 rwr[mri].key = mr->rkey; in process_info_req() [all …]
|
/linux-6.3-rc2/include/rdma/ |
A D | rw.h | 47 u64 remote_addr, u32 rkey, enum dma_data_direction dir); 55 struct ib_sig_attrs *sig_attrs, u64 remote_addr, u32 rkey,
|
/linux-6.3-rc2/drivers/infiniband/hw/qib/ |
A D | qib_rc.c | 350 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 393 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 436 wqe->atomic_wr.rkey); in qib_make_rc_req() 555 ohdr->u.rc.reth.rkey = in qib_make_rc_req() 556 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_rc_req() 1608 u32 rkey = be32_to_cpu(reth->rkey); in qib_rc_rcv_error() local 1906 u32 rkey = be32_to_cpu(reth->rkey); in qib_rc_rcv() local 1964 u32 rkey = be32_to_cpu(reth->rkey); in qib_rc_rcv() local 2013 u32 rkey; in qib_rc_rcv() local 2036 rkey = be32_to_cpu(ateth->rkey); in qib_rc_rcv() [all …]
|
A D | qib_uc.c | 133 ohdr->u.rc.reth.rkey = in qib_make_uc_req() 134 cpu_to_be32(wqe->rdma_wr.rkey); in qib_make_uc_req() 421 u32 rkey = be32_to_cpu(reth->rkey); in qib_uc_rcv() local 427 vaddr, rkey, IB_ACCESS_REMOTE_WRITE); in qib_uc_rcv()
|
/linux-6.3-rc2/drivers/infiniband/hw/hns/ |
A D | hns_roce_mr.c | 210 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_get_dma_mr() 251 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_reg_user_mr() 390 mr->ibmr.rkey = mr->ibmr.lkey = mr->key; in hns_roce_alloc_mr() 466 key_to_hw_index(mw->rkey) & in hns_roce_mw_free() 472 key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 476 (int)key_to_hw_index(mw->rkey)); in hns_roce_mw_free() 485 unsigned long mtpt_idx = key_to_hw_index(mw->rkey); in hns_roce_mw_enable() 544 mw->rkey = hw_index_to_key(id); in hns_roce_alloc_mw() 546 ibmw->rkey = mw->rkey; in hns_roce_alloc_mw()
|