| /drivers/mailbox/ |
| A D | ast2700-mailbox.c | 26 u8 msg_size; member 31 u8 msg_size; member 51 int num_words = mb->msg_size / sizeof(u32); in ast2700_mbox_irq() 69 data_reg = mb->rx_regs + IPCR_DATA + mb->msg_size * n; in ast2700_mbox_irq() 88 void __iomem *data_reg = mb->tx_regs + IPCR_DATA + mb->msg_size * idx; in ast2700_mbox_send_data() 90 int num_words = mb->msg_size / sizeof(u32); in ast2700_mbox_send_data() 177 mb->mbox.chans[i].con_priv = devm_kcalloc(dev, dev_data->msg_size, in ast2700_mbox_probe() 193 mb->msg_size = dev_data->msg_size; in ast2700_mbox_probe() 215 .msg_size = 0x20,
|
| /drivers/crypto/intel/qat/qat_common/ |
| A D | qat_crypto.c | 138 int msg_size; in qat_crypto_create_instances() local 203 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; in qat_crypto_create_instances() 206 msg_size, key, NULL, 0, &inst->sym_tx); in qat_crypto_create_instances() 210 msg_size = msg_size >> 1; in qat_crypto_create_instances() 213 msg_size, key, NULL, 0, &inst->pke_tx); in qat_crypto_create_instances() 217 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; in qat_crypto_create_instances() 220 msg_size, key, qat_alg_callback, 0, in qat_crypto_create_instances() 227 msg_size, key, qat_alg_asym_callback, 0, in qat_crypto_create_instances()
|
| A D | adf_transport.c | 29 static int adf_verify_ring_size(u32 msg_size, u32 msg_num) in adf_verify_ring_size() argument 34 if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) in adf_verify_ring_size() 93 ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) { in adf_send_message() 99 ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); in adf_send_message() 102 ADF_MSG_SIZE_TO_BYTES(ring->msg_size), in adf_send_message() 123 ADF_MSG_SIZE_TO_BYTES(ring->msg_size), in adf_handle_response() 219 u32 msg_size, const char *ring_name, in adf_create_ring() argument 236 if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { in adf_create_ring() 241 ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) { in adf_create_ring() 271 ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size); in adf_create_ring() [all …]
|
| A D | adf_pfvf_pf_proto.c | 87 u8 msg_size; in adf_pf2vf_blkmsg_get_data() local 103 msg_size = ADF_PFVF_BLKMSG_HEADER_SIZE + blkmsg[ADF_PFVF_BLKMSG_LEN_BYTE]; in adf_pf2vf_blkmsg_get_data() 105 if (unlikely(msg_size >= max_size)) { in adf_pf2vf_blkmsg_get_data() 107 msg_size, type); in adf_pf2vf_blkmsg_get_data() 112 if (unlikely(byte >= msg_size)) { in adf_pf2vf_blkmsg_get_data() 114 byte, msg_size); in adf_pf2vf_blkmsg_get_data()
|
| A D | adf_transport_debug.c | 15 ADF_MSG_SIZE_TO_BYTES(ring->msg_size)) 31 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); in adf_ring_start() 45 return ring->base_addr + (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * val); in adf_ring_next() 74 ADF_MSG_SIZE_TO_BYTES(ring->msg_size)); in adf_ring_show() 79 v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false); in adf_ring_show()
|
| A D | qat_compression.c | 121 int msg_size; in qat_compression_create_instances() local 166 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ; in qat_compression_create_instances() 169 msg_size, key, NULL, 0, &inst->dc_tx); in qat_compression_create_instances() 173 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ; in qat_compression_create_instances() 176 msg_size, key, qat_comp_alg_callback, 0, in qat_compression_create_instances()
|
| /drivers/net/ethernet/marvell/octeontx2/af/ |
| A D | mbox.c | 30 mdev->msg_size = 0; in __otx2_mbox_reset() 33 tx_hdr->msg_size = 0; in __otx2_mbox_reset() 36 rx_hdr->msg_size = 0; in __otx2_mbox_reset() 327 mdev->msg_size); in otx2_mbox_msg_send_data() 332 tx_hdr->msg_size = mdev->msg_size; in otx2_mbox_msg_send_data() 337 mdev->msg_size = 0; in otx2_mbox_msg_send_data() 352 trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size, in otx2_mbox_msg_send_data() 414 if ((mdev->msg_size + size) > mbox->tx_size - msgs_offset) in otx2_mbox_alloc_msg_rsp() 419 if (mdev->msg_size == 0) in otx2_mbox_alloc_msg_rsp() 429 mdev->msg_size += size; in otx2_mbox_alloc_msg_rsp() [all …]
|
| A D | rvu_trace.h | 39 TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size, 41 TP_ARGS(pdev, num_msgs, msg_size, id, pcifunc), 44 __field(u64, msg_size) 50 __entry->msg_size = msg_size; 55 __get_str(dev), __entry->num_msgs, __entry->msg_size,
|
| /drivers/net/can/peak_canfd/ |
| A D | peak_canfd.c | 450 int msg_size = le16_to_cpu(msg->size); in peak_canfd_handle_msg() local 453 if (!msg_size || !msg_type) { in peak_canfd_handle_msg() 479 return msg_size; in peak_canfd_handle_msg() 487 int i, msg_size = 0; in peak_canfd_handle_msgs_list() local 493 if (msg_size <= 0) in peak_canfd_handle_msgs_list() 496 msg_ptr += ALIGN(msg_size, 4); in peak_canfd_handle_msgs_list() 499 if (msg_size < 0) in peak_canfd_handle_msgs_list() 500 return msg_size; in peak_canfd_handle_msgs_list() 648 u16 msg_size, msg_flags; in peak_canfd_start_xmit() local 657 msg_size = ALIGN(sizeof(*msg) + cf->len, 4); in peak_canfd_start_xmit() [all …]
|
| /drivers/crypto/marvell/octeontx2/ |
| A D | otx2_cptvf_mbox.c | 34 u64 msg_size; in otx2_cpt_sync_mbox_bbuf() local 40 msg_size = hdr->msg_size; in otx2_cpt_sync_mbox_bbuf() 42 if (msg_size > mbox->rx_size - msgs_offset) in otx2_cpt_sync_mbox_bbuf() 43 msg_size = mbox->rx_size - msgs_offset; in otx2_cpt_sync_mbox_bbuf() 47 hw_mbase + mbox->rx_start, msg_size + msgs_offset); in otx2_cpt_sync_mbox_bbuf()
|
| /drivers/net/ethernet/broadcom/bnxt/ |
| A D | bnxt_sriov.c | 1004 __le16 encap_resp_cpr, u32 msg_size) in bnxt_hwrm_fwd_resp() argument 1009 if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { in bnxt_hwrm_fwd_resp() 1011 msg_size); in bnxt_hwrm_fwd_resp() 1020 req->encap_resp_len = cpu_to_le16(msg_size); in bnxt_hwrm_fwd_resp() 1023 memcpy(req->encap_resp, encap_resp, msg_size); in bnxt_hwrm_fwd_resp() 1033 u32 msg_size) in bnxt_hwrm_fwd_err_resp() argument 1038 if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) in bnxt_hwrm_fwd_err_resp() 1056 u32 msg_size) in bnxt_hwrm_exec_fwd_resp() argument 1061 if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) in bnxt_hwrm_exec_fwd_resp() 1096 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); in bnxt_vf_configure_mac() [all …]
|
| /drivers/gpu/drm/amd/display/modules/hdcp/ |
| A D | hdcp_log.h | 96 #define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ argument 97 mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ 102 #define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \ argument 103 mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \
|
| A D | hdcp_log.c | 29 void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, in mod_hdcp_dump_binary_message() argument 36 uint32_t line_count = msg_size / bytes_per_line, in mod_hdcp_dump_binary_message() 37 trailing_bytes = msg_size % bytes_per_line; in mod_hdcp_dump_binary_message() 44 for (i = 0; i < msg_size; i++) { in mod_hdcp_dump_binary_message()
|
| A D | hdcp_psp.c | 41 in->process.msg1_desc.msg_size = 0; in hdcp2_message_init() 43 in->process.msg2_desc.msg_size = 0; in hdcp2_message_init() 45 in->process.msg3_desc.msg_size = 0; in hdcp2_message_init() 628 msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_CERT; in mod_hdcp_hdcp2_validate_ake_cert() 688 msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_H_PRIME; in mod_hdcp_hdcp2_validate_h_prime() 695 msg_in->process.msg2_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__AKE_SEND_PAIRING_INFO; in mod_hdcp_hdcp2_validate_h_prime() 767 msg_in->process.msg1_desc.msg_size = TA_HDCP_HDCP2_MSG_ID_MAX_SIZE__LC_SEND_L_PRIME; in mod_hdcp_hdcp2_validate_l_prime() 815 msg_out->prepare.msg1_desc.msg_size = in mod_hdcp_hdcp2_prepare_eks() 879 msg_in->process.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.rx_id_list); in mod_hdcp_hdcp2_validate_rx_id_list() 983 hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size; in mod_hdcp_hdcp2_prepare_stream_management() [all …]
|
| /drivers/media/platform/mediatek/vcodec/encoder/ |
| A D | venc_vpu_if.c | 191 size_t msg_size = is_ext ? in vpu_enc_set_param() local 241 if (vpu_enc_send_msg(vpu, &out, msg_size)) { in vpu_enc_set_param() 258 size_t msg_size = is_ext ? in vpu_enc_encode_32bits() local 291 if (vpu_enc_send_msg(vpu, &out, msg_size)) { in vpu_enc_encode_32bits() 306 size_t msg_size = sizeof(struct venc_ap_ipi_msg_enc_ext_34); in vpu_enc_encode_34bits() local 337 if (vpu_enc_send_msg(vpu, &out, msg_size)) { in vpu_enc_encode_34bits()
|
| /drivers/tty/ |
| A D | rpmsg_tty.c | 81 int msg_max_size, msg_size; in rpmsg_tty_write() local 90 msg_size = min_t(unsigned int, len, msg_max_size); in rpmsg_tty_write() 96 ret = rpmsg_trysend(rpdev->ept, (void *)buf, msg_size); in rpmsg_tty_write() 102 return msg_size; in rpmsg_tty_write()
|
| /drivers/firmware/arm_scmi/transports/ |
| A D | optee.c | 281 static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size) in invoke_process_msg_channel() argument 296 param[1].u.memref.size = msg_size; in invoke_process_msg_channel() 333 const size_t msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE; in setup_dynamic_shmem() local 336 channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size); in setup_dynamic_shmem() 343 memset(shbuf, 0, msg_size); in setup_dynamic_shmem() 345 channel->rx_len = msg_size; in setup_dynamic_shmem()
|
| /drivers/accel/amdxdna/ |
| A D | amdxdna_mailbox.c | 280 u32 msg_size, rest; in mailbox_get_msg() local 322 msg_size = sizeof(header) + header.total_size; in mailbox_get_msg() 324 if (msg_size > ringbuf_size - head || msg_size > tail - head) { in mailbox_get_msg() 326 msg_size, tail, head); in mailbox_get_msg() 337 mailbox_set_headptr(mb_chann, head + msg_size); in mailbox_get_msg()
|
| /drivers/misc/sgi-xp/ |
| A D | xpc_uv.c | 266 size_t msg_size) in xpc_send_gru_msg() argument 272 ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size); in xpc_send_gru_msg() 614 DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); in xpc_send_activate_IRQ_uv() 649 msg_size); in xpc_send_activate_IRQ_uv() 662 size_t msg_size, int msg_type) in xpc_send_activate_IRQ_part_uv() argument 666 ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); in xpc_send_activate_IRQ_part_uv() 673 void *msg, size_t msg_size, int msg_type) in xpc_send_activate_IRQ_ch_uv() argument 1457 size_t msg_size; in xpc_send_payload_uv() local 1462 if (msg_size > ch->entry_size) in xpc_send_payload_uv() 1496 msg->hdr.size = msg_size; in xpc_send_payload_uv() [all …]
|
| A D | xpnet.c | 368 u16 msg_size = sizeof(struct xpnet_message); in xpnet_send() local 378 msg_size += embedded_bytes - 1; in xpnet_send() 397 msg_size, xpnet_send_completed, queued_msg); in xpnet_send()
|
| /drivers/net/ethernet/brocade/bna/ |
| A D | bfa_msgq.h | 38 (_cmd)->msg_size = (_msg_size); \ 50 size_t msg_size; member
|
| /drivers/s390/char/ |
| A D | sclp_rw.c | 90 int msg_size; in sclp_initialize_mto() local 93 msg_size = sizeof(struct msg_buf) + max_len; in sclp_initialize_mto() 97 if ((MAX_SCCB_ROOM - sccb->length) < msg_size) in sclp_initialize_mto()
|
| /drivers/net/ethernet/intel/idpf/ |
| A D | idpf_virtchnl.h | 116 u16 msg_size, u8 *msg, u16 cookie); 161 u8 *send_msg, u16 msg_size,
|
| /drivers/greybus/ |
| A D | operation.c | 1032 size_t msg_size; in gb_connection_recv() local 1049 msg_size = le16_to_cpu(header.size); in gb_connection_recv() 1050 if (size < msg_size) { in gb_connection_recv() 1055 header.type, size, msg_size); in gb_connection_recv() 1061 msg_size); in gb_connection_recv() 1064 msg_size); in gb_connection_recv()
|
| /drivers/net/ethernet/netronome/nfp/crypto/ |
| A D | ipsec.c | 137 int i, msg_size, ret; in nfp_net_ipsec_cfg() local 143 msg_size = ARRAY_SIZE(msg->raw); in nfp_net_ipsec_cfg() 144 for (i = 0; i < msg_size; i++) in nfp_net_ipsec_cfg() 154 for (i = 0; i < msg_size; i++) in nfp_net_ipsec_cfg()
|