| /drivers/media/mc/ |
| A D | mc-request.c | 76 kfree(req); in media_request_release() 274 return req; in media_request_get_by_fd() 296 req = kzalloc(sizeof(*req), GFP_KERNEL); in media_request_alloc() 297 if (!req) in media_request_alloc() 325 snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", in media_request_alloc() 340 kfree(req); in media_request_alloc() 349 struct media_request *req = obj->req; in media_request_object_release() local 390 obj->req = NULL; in media_request_object_init() 415 obj->req = req; in media_request_object_bind() 434 struct media_request *req = obj->req; in media_request_object_unbind() local [all …]
|
| /drivers/s390/scsi/ |
| A D | zfcp_fsf.c | 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() 103 kfree(req); in zfcp_fsf_req_free() 464 req->handler(req); in zfcp_fsf_req_complete() 810 req = kmalloc(sizeof(*req), GFP_ATOMIC); in zfcp_fsf_alloc() 815 memset(req, 0, sizeof(*req)); in zfcp_fsf_alloc() 817 return req; in zfcp_fsf_alloc() 868 req->qtcb->prefix.req_id = req->req_id; in zfcp_fsf_req_create() 872 req->qtcb->header.req_handle = req->req_id; in zfcp_fsf_req_create() 879 return req; in zfcp_fsf_req_create() [all …]
|
| /drivers/nvme/target/ |
| A D | admin-cmd.c | 172 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len)); in nvmet_execute_get_log_page_noop() 301 if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { in nvmet_execute_get_log_page_rmi() 644 req->cmd->get_log_page.lid, req->sq->qid); in nvmet_execute_get_log_page() 827 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns() 836 cpu_to_le64(req->ns->size >> req->ns->blksize_shift); in nvmet_execute_identify_ns() 837 switch (req->port->ana_state[req->ns->anagrpid]) { in nvmet_execute_identify_ns() 1087 if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) in nvmet_execute_id_cs_indep() 1094 if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) in nvmet_execute_id_cs_indep() 1159 req->cmd->identify.cns, req->sq->qid); in nvmet_execute_identify() 1233 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_set_feat_kato() [all …]
|
| A D | io-cmd-file.c | 108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() 112 mempool_free(req->f.bvec, req->ns->bvec_pool); in nvmet_file_io_done() 134 pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; in nvmet_file_execute_io() 135 if (unlikely(pos + req->transfer_len > req->ns->size)) { in nvmet_file_execute_io() 136 nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC)); in nvmet_file_execute_io() 141 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_file_execute_io() 222 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_file_execute_rw() 234 req->f.bvec = req->inline_bvec; in nvmet_file_execute_rw() 238 req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL); in nvmet_file_execute_rw() 262 nvmet_req_complete(req, nvmet_file_flush(req)); in nvmet_file_flush_work() [all …]
|
| A D | fabrics-cmd-auth.c | 50 if (req->sq->qid) in nvmet_auth_negotiate() 244 req->error_loc = in nvmet_execute_auth_send() 250 req->error_loc = in nvmet_execute_auth_send() 256 req->error_loc = in nvmet_execute_auth_send() 263 req->error_loc = in nvmet_execute_auth_send() 384 req->sq->dhchap_status, req->sq->dhchap_step); in nvmet_execute_auth_send() 441 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, in nvmet_auth_challenge() 499 req->error_loc = in nvmet_execute_auth_receive() 533 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); in nvmet_execute_auth_receive() 561 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); in nvmet_execute_auth_receive() [all …]
|
| A D | zns.c | 119 nvmet_ns_changed(req->ns->subsys, req->ns->nsid); in nvmet_execute_identify_ns_zns() 156 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba); in nvmet_bdev_validate_zone_mgmt_recv() 194 req->error_loc = in nvmet_bdev_validate_zone_mgmt_recv() 282 .req = req, in nvmet_bdev_zone_zmgmt_recv_work() 404 .req = req, in nvmet_bdev_zone_mgmt_emulate_all() 475 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->zms.slba); in nvmet_bdev_zmgmt_send_work() 535 sector_t sect = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_zone_append() 545 if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) in nvmet_bdev_execute_zone_append() 574 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_zone_append() 577 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append() [all …]
|
| A D | io-cmd-bdev.c | 168 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 171 req->error_slba = in blk_to_nvme_status() 184 nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); in nvmet_bio_done() 278 sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba); in nvmet_bdev_execute_rw() 282 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_rw() 297 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_bdev_execute_rw() 347 bio_init(bio, req->ns->bdev, req->inline_bvec, in nvmet_bdev_execute_flush() 414 if (!nvmet_check_data_len_lte(req, nvmet_dsm_len(req))) in nvmet_bdev_execute_dsm() 452 nvmet_req_complete(req, errno_to_nvme_status(req, ret)); in nvmet_bdev_execute_write_zeroes() 462 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) in nvmet_bdev_parse_io_cmd() [all …]
|
| A D | fabrics-cmd.c | 18 if (req->cmd->prop_set.attrib & 1) { in nvmet_execute_prop_set() 19 req->error_loc = in nvmet_execute_prop_set() 30 req->error_loc = in nvmet_execute_prop_set() 35 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 77 req->error_loc = in nvmet_execute_prop_get() 80 req->error_loc = in nvmet_execute_prop_get() 241 req->sq->ctrl = NULL; in nvmet_install_queue() 273 .port = req->port, in nvmet_execute_admin_connect() 274 .sq = req->sq, in nvmet_execute_admin_connect() 275 .ops = req->ops, in nvmet_execute_admin_connect() [all …]
|
| A D | passthru.c | 143 if (req->port->inline_data_size) in nvmet_passthru_override_id_ctrl() 217 struct request *rq = req->p.rq; in nvmet_passthru_execute_cmd_work() 267 if (req->sg_cnt > BIO_MAX_VECS) in nvmet_passthru_map_sg() 271 bio = &req->p.inline_bio; in nvmet_passthru_map_sg() 280 for_each_sg(req->sg, sg, req->sg_cnt, i) { in nvmet_passthru_map_sg() 292 nvmet_req_bio_put(req, bio); in nvmet_passthru_map_sg() 333 if (req->sg_cnt) { in nvmet_passthru_execute_cmd() 347 if (req->p.use_workqueue || in nvmet_passthru_execute_cmd() 350 req->p.rq = rq; in nvmet_passthru_execute_cmd() 354 rq->end_io_data = req; in nvmet_passthru_execute_cmd() [all …]
|
| /drivers/block/drbd/ |
| A D | drbd_req.c | 29 if (!req) in drbd_req_new() 31 memset(req, 0, sizeof(*req)); in drbd_req_new() 55 return req; in drbd_req_new() 108 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy() 248 bio_end_io_acct(req->master_bio, req->start_jif); in drbd_req_complete() 830 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod() 1048 req->i.sector, req->i.size)) { in do_remote_read() 1144 req->i.sector, req->i.size >> 9, flags); in drbd_process_discard_or_zeroes_req() 1226 req->private_bio->bi_private = req; in drbd_request_prepare() 1355 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit() [all …]
|
| /drivers/staging/greybus/ |
| A D | audio_apbridgea.c | 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 45 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_register_cport() 62 ret = gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_unregister_cport() 80 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_tx_data_size() 93 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_prepare_tx() 107 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_start_tx() 119 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_stop_tx() 132 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_shutdown_tx() 146 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_rx_data_size() 159 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_prepare_rx() [all …]
|
| A D | audio_gb.c | 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 91 &req, sizeof(req), NULL, 0); in gb_audio_gb_enable_widget() 103 &req, sizeof(req), NULL, 0); in gb_audio_gb_disable_widget() 144 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_pcm() 157 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_tx_data_size() 169 &req, sizeof(req), NULL, 0); in gb_audio_gb_activate_tx() 181 &req, sizeof(req), NULL, 0); in gb_audio_gb_deactivate_tx() 194 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_rx_data_size() 206 &req, sizeof(req), NULL, 0); in gb_audio_gb_activate_rx() [all …]
|
| /drivers/net/ethernet/marvell/octeontx2/af/ |
| A D | mcs_rvu_if.c | 84 mcs_set_lmac_mode(mcs, req->lmac_id, req->mode); in rvu_mbox_handler_mcs_set_lmac_mode() 233 mcs_reset_port(mcs, req->port_id, req->reset); in rvu_mbox_handler_mcs_port_reset() 254 mcs_clear_stats(mcs, req->type, req->id, req->dir); in rvu_mbox_handler_mcs_clear_stats() 334 mcs_get_sc_stats(mcs, rsp, req->id, req->dir); in rvu_mbox_handler_mcs_get_sc_stats() 358 mcs_get_sa_stats(mcs, rsp, req->id, req->dir); in rvu_mbox_handler_mcs_get_sa_stats() 491 mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena); in rvu_mbox_handler_mcs_flowid_ena_entry() 505 mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir); in rvu_mbox_handler_mcs_pn_table_write() 569 req->sa_index[i], req->dir); in rvu_mbox_handler_mcs_sa_plcy_write() 583 mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id); in rvu_mbox_handler_mcs_rx_sc_cam_write() 598 req->secy_id, req->dir); in rvu_mbox_handler_mcs_secy_plcy_write() [all …]
|
| /drivers/infiniband/hw/hfi1/ |
| A D | user_sdma.c | 603 req->tids[req->tididx]) { in compute_data_length() 616 len = min(req->data_len - req->sent, (u32)req->info.fragsize); in compute_data_length() 721 tx->req = req; in user_sdma_send_pkts() 738 iovec = &req->iovs[req->iov_idx]; in user_sdma_send_pkts() 978 tidval = req->tids[req->tididx]; in set_txreq_header() 991 !req->tids[req->tididx]) { in set_txreq_header() 994 tidval = req->tids[req->tididx]; in set_txreq_header() 1080 tidval = req->tids[req->tididx]; in set_txreq_header_ahg() 1094 !req->tids[req->tididx]) in set_txreq_header_ahg() 1096 tidval = req->tids[req->tididx]; in set_txreq_header_ahg() [all …]
|
| /drivers/peci/ |
| A D | request.c | 206 req = kzalloc(sizeof(*req), GFP_KERNEL); in peci_request_alloc() 207 if (!req) in peci_request_alloc() 214 return req; in peci_request_alloc() 224 kfree(req); in peci_request_free() 234 if (!req) in peci_xfer_get_dib() 245 return req; in peci_xfer_get_dib() 255 if (!req) in peci_xfer_get_temp() 277 if (!req) in __pkg_cfg_read() 308 if (!req) in __pci_cfg_local_read() 336 if (!req) in __ep_pci_cfg_read() [all …]
|
| /drivers/clk/sunxi/ |
| A D | clk-sunxi.c | 137 req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz) in sun6i_a31_get_pll1_factors() 145 req->n = (req->n + 1) / 2 - 1; in sun6i_a31_get_pll1_factors() 146 req->m = (req->m + 1) / 2 - 1; in sun6i_a31_get_pll1_factors() 255 req->rate = req->parent_rate; in sun5i_a13_get_ahb_factors() 296 if (req->parent_rate && req->rate > req->parent_rate) in sun6i_get_ahb1_factors() 297 req->rate = req->parent_rate; in sun6i_get_ahb1_factors() 329 req->rate = req->parent_rate; in sun6i_ahb1_recalc() 333 req->rate /= req->m + 1; in sun6i_ahb1_recalc() 336 req->rate >>= req->p; in sun6i_ahb1_recalc() 351 req->rate = req->parent_rate; in sun4i_get_apb1_factors() [all …]
|
| /drivers/crypto/inside-secure/ |
| A D | safexcel_hash.c | 72 return req->len - req->processed; in safexcel_queued_len() 129 if (!req->finish && req->xcbcmac) in safexcel_context_control() 167 req->hmac_zlen || (req->processed != req->block_sz)) { in safexcel_context_control() 844 req->cache[req->block_sz-8] = (req->block_sz << 3) & in safexcel_ahash_final() 846 req->cache[req->block_sz-7] = (req->block_sz >> 5); in safexcel_ahash_final() 849 req->cache[req->block_sz-2] = (req->block_sz >> 5); in safexcel_ahash_final() 850 req->cache[req->block_sz-1] = (req->block_sz << 3) & in safexcel_ahash_final() 938 memset(req, 0, sizeof(*req)); in safexcel_sha1_init() 1015 memset(req, 0, sizeof(*req)); in safexcel_hmac_sha1_init() 1241 memset(req, 0, sizeof(*req)); in safexcel_sha256_init() [all …]
|
| /drivers/macintosh/ |
| A D | via-pmu.c | 718 vb = (req->reply[1] << 8) | req->reply[2]; in done_battery_state_ohare() 730 pcharge = (req->reply[6] << 8) + req->reply[7]; in done_battery_state_ohare() 802 max = (req->reply[4] << 8) | req->reply[5]; in done_battery_state_smart() 989 req->data[i] = req->data[i+1]; in pmu_send_request() 1017 req->data[i] = req->data[i+1]; in pmu_send_request() 1030 req->data[i+2] = req->data[i]; in pmu_send_request() 1031 req->data[3] = req->nbytes - 2; in pmu_send_request() 1512 reply_ptr = req->reply + req->reply_len; in pmu_sr_intr() 1741 (req.reply[2] << 8) + req.reply[3]; in pmu_get_time() 2559 (*req->done)(req); in pmu_polled_request() [all …]
|
| A D | via-macii.c | 218 req.sent = 0; in macii_queue_poll() 219 req.complete = 0; in macii_queue_poll() 227 last_req = &req; in macii_queue_poll() 252 if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) { in macii_write() 258 req->sent = 0; in macii_write() 266 last_req = req; in macii_write() 269 last_req = req; in macii_write() 447 req->sent = 1; in macii_interrupt() 473 (*req->done)(req); in macii_interrupt() 480 (*req->done)(req); in macii_interrupt() [all …]
|
| /drivers/s390/cio/ |
| A D | ccwreq.c | 43 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local 49 req->retries = req->maxretries; in ccwreq_next_path() 50 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path() 60 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local 69 req->callback(cdev, req->data, rc); in ccwreq_stop() 77 struct ccw_request *req = &cdev->private->req; in ccwreq_do() local 122 struct ccw_request *req = &cdev->private->req; in ccw_request_start() local 128 req->mask = req->lpm; in ccw_request_start() 130 req->retries = req->maxretries; in ccw_request_start() 131 req->mask = lpm_adjust(req->mask, req->lpm); in ccw_request_start() [all …]
|
| /drivers/infiniband/hw/bnxt_re/ |
| A D | qplib_tlv.h | 53 return req->opcode; in __get_cmdq_base_opcode() 62 req->opcode = val; in __set_cmdq_base_opcode() 70 return req->cookie; in __get_cmdq_base_cookie() 79 req->cookie = val; in __set_cmdq_base_cookie() 87 return req->resp_addr; in __get_cmdq_base_resp_addr() 96 req->resp_addr = val; in __set_cmdq_base_resp_addr() 113 req->resp_size = val; in __set_cmdq_base_resp_size() 121 return req->cmd_size; in __get_cmdq_base_cmd_size() 130 req->cmd_size = val; in __set_cmdq_base_cmd_size() 138 return req->flags; in __get_cmdq_base_flags() [all …]
|
| /drivers/usb/gadget/udc/ |
| A D | renesas_usbf.c | 612 req->req.actual, req->req.length); in usbf_ep0_pio_in() 662 req->req.actual, req->req.length); in usbf_ep0_pio_out() 957 req->req.actual, req->req.length); in usbf_epn_dma_in() 1019 req->req.actual, req->req.length); in usbf_epn_pio_out() 1265 req->req.dma + req->req.actual, in usbf_epn_dma_out() 1291 req->req.dma + req->req.actual, in usbf_epn_dma_out() 1328 req->req.actual, req->req.length); in usbf_epn_dma_out() 1391 req->req.dma + req->req.actual, in usbf_epn_dma_out() 1551 req->req.actual, req->req.length, req->req.status); in usbf_ep_req_done() 2001 req->req.length, req->req.zero, req->req.short_not_ok); in usbf_ep_queue() [all …]
|
| A D | aspeed_udc.c | 287 req, req->req.actual, req->req.length, in ast_udc_done() 535 last = req->req.length - req->req.actual; in ast_udc_epn_kick() 558 last = req->req.length - req->req.actual; in ast_udc_epn_kick_desc() 564 req->req.actual, req->req.length, in ast_udc_epn_kick_desc() 589 last = req->req.length - req->req.actual; in ast_udc_ep0_queue() 599 "dma", req->req.dma + req->req.actual, in ast_udc_ep0_queue() 601 req->req.actual, req->req.length, in ast_udc_ep0_queue() 613 "dma", req->req.dma + req->req.actual, in ast_udc_ep0_queue() 802 req->req.actual, req->req.length); in ast_udc_ep0_out() 814 req->req.actual, req->req.length); in ast_udc_ep0_out() [all …]
|
| /drivers/crypto/marvell/cesa/ |
| A D | cipher.c | 42 struct skcipher_request *req) in mv_cesa_skcipher_req_iter_init() argument 63 if (req->dst != req->src) { in mv_cesa_skcipher_dma_cleanup() 81 mv_cesa_skcipher_dma_cleanup(req); in mv_cesa_skcipher_cleanup() 139 if (sreq->offset < req->cryptlen) in mv_cesa_skcipher_std_process() 328 if (req->src != req->dst) { in mv_cesa_skcipher_dma_req_init() 397 if (req->dst != req->src) in mv_cesa_skcipher_dma_req_init() 403 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); in mv_cesa_skcipher_dma_req_init() 435 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen); in mv_cesa_skcipher_req_init() 440 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); in mv_cesa_skcipher_req_init() 464 if (!req->cryptlen) in mv_cesa_skcipher_queue_req() [all …]
|
| /drivers/accel/ivpu/ |
| A D | ivpu_jsm_msg.c | 102 req.payload.register_db.db_idx = db_id; in ivpu_jsm_register_db() 103 req.payload.register_db.jobq_base = jobq_base; in ivpu_jsm_register_db() 104 req.payload.register_db.jobq_size = jobq_size; in ivpu_jsm_register_db() 105 req.payload.register_db.host_ssid = ctx_id; in ivpu_jsm_register_db() 121 req.payload.unregister_db.db_idx = db_id; in ivpu_jsm_unregister_db() 163 req.payload.engine_reset.engine_idx = engine; in ivpu_jsm_reset_engine() 276 req.payload.pwr_d0i3_enter.send_response = 1; in ivpu_jsm_pwr_d0i3_enter() 294 req.payload.hws_create_cmdq.process_id = pid; in ivpu_jsm_hws_create_cmdq() 297 req.payload.hws_create_cmdq.cmdq_id = cmdq_id; in ivpu_jsm_hws_create_cmdq() 333 req.payload.hws_register_db.db_id = db_id; in ivpu_jsm_hws_register_db() [all …]
|