| /linux/drivers/ufs/core/ |
| A D | ufshcd-priv.h | 107 if (hba->vops && hba->vops->exit) in ufshcd_vops_exit() 108 return hba->vops->exit(hba); in ufshcd_vops_exit() 197 hba->vops->fixup_dev_quirks(hba); in ufshcd_vops_fixup_dev_quirks() 203 if (hba->vops && hba->vops->suspend) in ufshcd_vops_suspend() 211 if (hba->vops && hba->vops->resume) in ufshcd_vops_resume() 212 return hba->vops->resume(hba, op); in ufshcd_vops_resume() 220 hba->vops->dbg_register_dump(hba); in ufshcd_vops_dbg_register_dump() 226 return hba->vops->device_reset(hba); in ufshcd_vops_device_reset() 242 hba->vops->reinit_notify(hba); in ufshcd_vops_reinit_notify() 272 if (hba->vops && hba->vops->config_esi) in ufshcd_mcq_vops_config_esi() [all …]
|
| A D | ufshcd.c | 640 hba->saved_err, hba->saved_uic_err); in ufshcd_print_host_state() 656 hba->eh_flags, hba->req_abort_count); in ufshcd_print_host_state() 658 hba->ufs_version, hba->capabilities, hba->caps); in ufshcd_print_host_state() 2421 hba->reserved_slot = hba->nutrs - 1; in ufshcd_hba_capabilities() 3935 hba->lrb = devm_kcalloc(hba->dev, in ufshcd_memory_alloc() 6403 (!(hba->saved_err || hba->saved_uic_err || hba->force_reset || in ufshcd_err_handling_should_stop() 6525 hba->is_powered, hba->shutting_down, hba->saved_err, in ufshcd_err_handler() 6827 hba->saved_err |= hba->errors; in ufshcd_check_errors() 8667 devm_kfree(hba->dev, hba->lrb); in ufshcd_release_sdb_queue() 9640 hba->rpm_lvl : hba->spm_lvl; in __ufshcd_wl_suspend() [all …]
|
| A D | ufs-mcq.c | 147 if (!hba->vops || !hba->vops->get_hba_mac) { in ufshcd_mcq_decide_queue_depth() 158 mac = hba->vops->get_hba_mac(hba); in ufshcd_mcq_decide_queue_depth() 373 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQD, i), in ufshcd_mcq_make_queues_operational() 376 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_SQIS, i), in ufshcd_mcq_make_queues_operational() 386 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQD, i), in ufshcd_mcq_make_queues_operational() 389 ufsmcq_writelx(hba, ufshcd_mcq_opr_offset(hba, OPR_CQIS, i), in ufshcd_mcq_make_queues_operational() 402 if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) in ufshcd_mcq_make_queues_operational() 422 ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, in ufshcd_mcq_enable_esi() 467 hba->uhq = devm_kzalloc(hba->dev, in ufshcd_mcq_init() 470 if (!hba->uhq) { in ufshcd_mcq_init() [all …]
|
| A D | ufs-debugfs.c | 64 *val = hba->ee_usr_mask; in ee_usr_mask_get() 69 __acquires(&hba->host_sem) in ufs_debugfs_get_user_access() 71 down(&hba->host_sem); in ufs_debugfs_get_user_access() 73 up(&hba->host_sem); in ufs_debugfs_get_user_access() 76 ufshcd_rpm_get_sync(hba); in ufs_debugfs_get_user_access() 81 __releases(&hba->host_sem) in ufs_debugfs_put_user_access() 84 up(&hba->host_sem); in ufs_debugfs_put_user_access() 114 ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status); in ufs_debugfs_exception_event() 135 if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) || in ufs_debugfs_restart_ee() 149 p = &hba->saved_err; in ufs_saved_err_show() [all …]
|
| A D | ufshcd-crypto.c | 27 ufshcd_hold(hba); in ufshcd_program_key() 29 if (hba->vops && hba->vops->program_key) { in ufshcd_program_key() 30 err = hba->vops->program_key(hba, cfg, slot); in ufshcd_program_key() 47 ufshcd_release(hba); in ufshcd_program_key() 55 struct ufs_hba *hba = in ufshcd_crypto_keyslot_program() local 102 struct ufs_hba *hba = in ufshcd_crypto_keyslot_evict() local 180 hba->crypto_cap_array = in ufshcd_hba_init_crypto_capabilities() 181 devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap, in ufshcd_hba_init_crypto_capabilities() 190 hba->dev, &hba->crypto_profile, in ufshcd_hba_init_crypto_capabilities() 198 hba->crypto_profile.dev = hba->dev; in ufshcd_hba_init_crypto_capabilities() [all …]
|
| A D | ufs-hwmon.c | 14 struct ufs_hba *hba; member 54 struct ufs_hba *hba = data->hba; in ufs_hwmon_read() local 57 down(&hba->host_sem); in ufs_hwmon_read() 60 up(&hba->host_sem); in ufs_hwmon_read() 64 ufshcd_rpm_get_sync(hba); in ufs_hwmon_read() 91 up(&hba->host_sem); in ufs_hwmon_read() 100 struct ufs_hba *hba = data->hba; in ufs_hwmon_write() local 109 down(&hba->host_sem); in ufs_hwmon_write() 112 up(&hba->host_sem); in ufs_hwmon_write() 125 up(&hba->host_sem); in ufs_hwmon_write() [all …]
|
| A D | ufs-sysfs.c | 106 hba->rpm_lvl = value; in ufs_sysfs_pm_lvl_store() 108 hba->spm_lvl = value; in ufs_sysfs_pm_lvl_store() 205 up(&hba->host_sem); in ufshcd_read_hci_reg() 210 ufshcd_hold(hba); in ufshcd_read_hci_reg() 212 ufshcd_release(hba); in ufshcd_read_hci_reg() 215 up(&hba->host_sem); in ufshcd_read_hci_reg() 262 up(&hba->host_sem); in auto_hibern8_store() 281 if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba) in wb_on_store() 307 up(&hba->host_sem); in wb_on_store() 379 up(&hba->host_sem); in enable_wb_buf_flush_store() [all …]
|
| A D | ufshcd-crypto.h | 40 static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, in ufshcd_crypto_fill_prdt() argument 46 if (crypt_ctx && hba->vops && hba->vops->fill_crypto_prdt) in ufshcd_crypto_fill_prdt() 47 return hba->vops->fill_crypto_prdt(hba, crypt_ctx, in ufshcd_crypto_fill_prdt() 56 if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT)) in ufshcd_crypto_clear_prdt() 64 ufshcd_sg_entry_size(hba) * scsi_sg_count(lrbp->cmd)); in ufshcd_crypto_clear_prdt() 67 bool ufshcd_crypto_enable(struct ufs_hba *hba); 69 int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba); 71 void ufshcd_init_crypto(struct ufs_hba *hba); 84 static inline int ufshcd_crypto_fill_prdt(struct ufs_hba *hba, in ufshcd_crypto_fill_prdt() argument 93 static inline bool ufshcd_crypto_enable(struct ufs_hba *hba) in ufshcd_crypto_enable() argument [all …]
|
| A D | ufs_bsg.c | 43 dev_err(hba->dev, "Illegal desc size\n"); in ufs_bsg_alloc_desc_buffer() 48 dev_err(hba->dev, "Illegal desc size\n"); in ufs_bsg_alloc_desc_buffer() 79 if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en) in ufs_bsg_exec_advanced_rpmb_req() 147 ufshcd_rpm_get_sync(hba); in ufs_bsg_request() 173 ret = ufshcd_send_uic_cmd(hba, &uc); in ufs_bsg_request() 194 ufshcd_rpm_put_sync(hba); in ufs_bsg_request() 215 if (!hba->bsg_queue) in ufs_bsg_remove() 218 bsg_remove_queue(hba->bsg_queue); in ufs_bsg_remove() 237 int ufs_bsg_probe(struct ufs_hba *hba) in ufs_bsg_probe() argument 240 struct Scsi_Host *shost = hba->host; in ufs_bsg_probe() [all …]
|
| /linux/drivers/ufs/host/ |
| A D | ufs-mediatek.c | 149 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg() 260 hba->ahit = 0; in ufs_mtk_hce_enable_notify() 939 host->mcq_intr_info[i].hba = hba; in ufs_mtk_init_mcq_irq() 979 host->hba = hba; in ufs_mtk_init() 1215 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) in ufs_mtk_setup_clk_gating() 1317 ufshcd_mcq_config_mac(hba, hba->nutrs); in ufs_mtk_link_set_hpm() 1374 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) in ufs_mtk_dev_vreg_set_lpm() 1382 if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) { in ufs_mtk_dev_vreg_set_lpm() 1546 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && in ufs_mtk_fixup_dev_quirks() 1702 hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities); in ufs_mtk_mcq_config_resource() [all …]
|
| A D | ufs-hisi.c | 35 err |= ufshcd_dme_get(hba, in ufs_hisi_check_hibern8() 52 err |= ufshcd_dme_get(hba, in ufs_hisi_check_hibern8() 222 dev_info(hba->dev, in ufs_hisi_link_startup_pre_change() 372 dev_err(hba->dev, in ufs_hisi_pwr_change_notify() 383 dev_err(hba->dev, in ufs_hisi_pwr_change_notify() 478 host->hba = hba; in ufs_hisi_init_common() 512 ufs_hisi_clk_init(hba); in ufs_hi3660_init() 514 ufs_hisi_soc_init(hba); in ufs_hi3660_init() 531 ufs_hisi_clk_init(hba); in ufs_hi3670_init() 533 ufs_hisi_soc_init(hba); in ufs_hi3670_init() [all …]
|
| A D | ufshcd-pci.c | 168 (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) in ufs_intel_lkf_pwr_change_notify() 275 intel_cache_ltr(hba); in intel_ltr_set() 297 intel_cache_ltr(hba); in intel_add_debugfs() 374 intel_add_debugfs(hba); in ufs_intel_common_init() 381 intel_ltr_hide(hba->dev); in ufs_intel_common_exit() 458 hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba); in ufs_qemu_mcq_config_resource() 479 opr = &hba->mcq_opr[i]; in ufs_qemu_op_runtime_config() 547 ufshcd_set_link_off(hba); in ufshcd_pci_restore() 564 ufshcd_remove(hba); in ufshcd_pci_remove() 565 ufshcd_dealloc_host(hba); in ufshcd_pci_remove() [all …]
|
| A D | ufs-qcom.c | 117 struct ufs_hba *hba = host->hba; in ufs_qcom_ice_init() local 248 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8() 264 err = ufshcd_dme_get(hba, in ufs_qcom_check_hibern8() 302 ufshcd_disable_irq(hba); in ufs_qcom_host_reset() 328 ufshcd_enable_irq(hba); in ufs_qcom_host_reset() 1056 host->hba = hba; in ufs_qcom_init() 1111 ufs_qcom_set_caps(hba); in ufs_qcom_init() 1598 res = &hba->res[i]; in ufs_qcom_mcq_config_resource() 1742 nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; in ufs_qcom_config_esi() 1770 devm_free_irq(hba->dev, desc->irq, hba); in ufs_qcom_config_esi() [all …]
|
| A D | ufs-exynos.c | 224 struct ufs_hba *hba = ufs->hba; in exynosauto_ufs_post_hce_enable() local 238 struct ufs_hba *hba = ufs->hba; in exynosauto_ufs_pre_link() local 293 struct ufs_hba *hba = ufs->hba; in exynosauto_ufs_pre_pwr_change() local 306 struct ufs_hba *hba = ufs->hba; in exynosauto_ufs_post_pwr_change() local 321 struct ufs_hba *hba = ufs->hba; in exynos7_ufs_pre_link() local 351 struct ufs_hba *hba = ufs->hba; in exynos7_ufs_post_link() local 381 struct ufs_hba *hba = ufs->hba; in exynos7_ufs_post_pwr_change() local 427 struct ufs_hba *hba = ufs->hba; in exynos_ufs_get_clk_info() local 494 struct ufs_hba *hba = ufs->hba; in exynos_ufs_set_pwm_clk_div() local 503 struct ufs_hba *hba = ufs->hba; in exynos_ufs_calc_pwm_clk_div() local [all …]
|
| A D | cdns-pltfrm.c | 47 ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), in cdns_ufs_get_l4_attr() 49 ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), in cdns_ufs_get_l4_attr() 139 ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV); in cdns_ufs_set_hclkdiv() 157 return cdns_ufs_set_hclkdiv(hba); in cdns_ufs_hce_enable_notify() 170 cdns_ufs_get_l4_attr(hba); in cdns_ufs_hibern8_notify() 172 cdns_ufs_set_l4_attr(hba); in cdns_ufs_hibern8_notify() 195 ufshcd_disable_host_tx_lcc(hba); in cdns_ufs_link_startup_notify() 201 hba->ahit = 0; in cdns_ufs_link_startup_notify() 216 struct device *dev = hba->dev; in cdns_ufs_init() 222 ufshcd_set_variant(hba, host); in cdns_ufs_init() [all …]
|
| A D | ufs-sprd.c | 134 struct device *dev = hba->dev; in ufs_sprd_common_init() 149 host->hba = hba; in ufs_sprd_common_init() 150 ufshcd_set_variant(hba, host); in ufs_sprd_common_init() 230 ufs_sprd_n6_host_reset(hba); in ufs_sprd_n6_key_acc_enable() 248 hba->caps &= ~UFSHCD_CAP_CRYPTO; in ufs_sprd_n6_key_acc_enable() 256 ret = ufs_sprd_common_init(hba); in ufs_sprd_n6_init() 267 ufs_sprd_n6_key_acc_enable(hba); in ufs_sprd_n6_init() 334 ufs_sprd_n6_host_reset(hba); in sprd_ufs_n6_hce_enable_notify() 341 err = ufs_sprd_n6_phy_init(hba); in sprd_ufs_n6_hce_enable_notify() 347 ufs_sprd_get_unipro_ver(hba); in sprd_ufs_n6_hce_enable_notify() [all …]
|
| /linux/drivers/scsi/ |
| A D | hptiop.c | 349 hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); in iop_send_sync_request_mvfrey() 384 hba->ops->disable_intr(hba); in iop_send_sync_msg() 385 hba->ops->post_msg(hba, msg); in iop_send_sync_msg() 389 hba->ops->iop_intr(hba); in iop_send_sync_msg() 396 hba->ops->enable_intr(hba); in iop_send_sync_msg() 571 hba->ops->enable_intr(hba); in hptiop_initialize_iop() 773 free_req(hba, &hba->reqs[tag]); in hptiop_finish_scsi_req() 1050 hba->ops->post_req(hba, _req); in hptiop_queuecommand_lck() 1487 hba->ops->unmap_pci_bar(hba); in hptiop_probe() 1515 hba->ops->disable_intr(hba); in hptiop_shutdown() [all …]
|
| A D | stex.c | 411 struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size; in stex_alloc_req() 414 hba->req_head %= hba->rq_count+1; in stex_alloc_req() 548 addr = hba->dma_handle + hba->req_head * hba->rq_size; in stex_ss_send_cmd() 553 hba->req_head %= hba->rq_count+1; in stex_ss_send_cmd() 700 req = hba->alloc_rq(hba); in stex_queuecommand_lck() 719 if (!hba->map_sg(hba, req, &hba->ccb[tag])) { in stex_queuecommand_lck() 724 hba->send(hba, req, tag); in stex_queuecommand_lck() 1074 status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size; in stex_common_handshake() 1263 hba->wait_ccb = &hba->ccb[tag]; in stex_abort() 1869 req = hba->alloc_rq(hba); in stex_hba_stop() [all …]
|
| /linux/drivers/scsi/bnx2i/ |
| A D | bnx2i_init.c | 101 hba->pci_did); in bnx2i_identify_device() 120 hba = tmp_hba; in get_adapter_list_head() 126 return hba; in get_adapter_list_head() 143 return hba; in bnx2i_find_hba_for_cnic() 285 hba->cnic = cnic; in bnx2i_init_one() 288 hba->age++; in bnx2i_init_one() 323 if (!hba) { in bnx2i_ulp_init() 347 if (!hba) { in bnx2i_ulp_exit() 357 hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); in bnx2i_ulp_exit() 378 if (!hba) in bnx2i_get_stats() [all …]
|
| A D | bnx2i_iscsi.c | 398 bnx2i_ep->hba = hba; in bnx2i_alloc_ep() 543 hba->mp_bd_tbl, hba->mp_bd_dma); in bnx2i_setup_mp_bdt() 571 hba->mp_bd_tbl, hba->mp_bd_dma); in bnx2i_free_mp_bdt() 805 hba->pci_did = hba->pcidev->device; in bnx2i_alloc_hba() 806 hba->pci_vid = hba->pcidev->vendor; in bnx2i_alloc_hba() 863 hba->num_ccell = hba->max_sqes / 2; in bnx2i_alloc_hba() 1371 bnx2i_conn->hba = hba; in bnx2i_conn_create() 1441 if (bnx2i_ep->hba != hba) { in bnx2i_conn_bind() 1531 struct bnx2i_hba *hba = bnx2i_ep->hba; in bnx2i_ep_get_param() local 1676 if (hba && hba->cnic) in bnx2i_check_route() [all …]
|
| /linux/include/ufs/ |
| A D | ufshcd.h | 101 #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \ argument 103 #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \ argument 105 #define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \ argument 108 #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \ argument 110 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \ argument 112 #define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \ argument 1337 BUG_ON(!hba); in ufshcd_set_variant() 1466 if (hba->vops && hba->vops->init) in ufshcd_vops_init() 1467 return hba->vops->init(hba); in ufshcd_vops_init() 1474 if (hba->vops && hba->vops->phy_initialization) in ufshcd_vops_phy_initialization() [all …]
|
| /linux/drivers/parisc/ |
| A D | lba_pci.c | 113 return container_of(hba, struct lba_device, hba); in LBA_DEV() 739 ldev->hba.io_space.name, in lba_fixup_bus() 740 ldev->hba.io_space.start, ldev->hba.io_space.end, in lba_fixup_bus() 741 ldev->hba.io_space.flags); in lba_fixup_bus() 743 ldev->hba.lmmio_space.name, in lba_fixup_bus() 744 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end, in lba_fixup_bus() 1141 r = &lba_dev->hba.io_space; in lba_pat_resources() 1183 r = &(lba_dev->hba.bus_num); in lba_legacy_resources() 1323 r = &(lba_dev->hba.io_space); in lba_legacy_resources() 1358 d->hba.base_addr, in lba_hw_init() [all …]
|
| /linux/drivers/target/ |
| A D | target_core_hba.c | 111 struct se_hba *hba; in core_alloc_hba() local 114 hba = kzalloc(sizeof(*hba), GFP_KERNEL); in core_alloc_hba() 115 if (!hba) { in core_alloc_hba() 127 if (!hba->backend) { in core_alloc_hba() 132 ret = hba->backend->ops->attach_hba(hba, plugin_dep_id); in core_alloc_hba() 144 return hba; in core_alloc_hba() 148 hba->backend = NULL; in core_alloc_hba() 150 kfree(hba); in core_alloc_hba() 159 hba->backend->ops->detach_hba(hba); in core_delete_hba() 170 hba->backend = NULL; in core_delete_hba() [all …]
|
| /linux/drivers/scsi/bnx2fc/ |
| A D | bnx2fc_fcoe.c | 193 struct bnx2fc_hba *hba = interface->hba; in bnx2fc_cleanup() local 284 hba = interface->hba; in bnx2fc_xmit() 804 hba = interface->hba; in bnx2fc_net_config() 899 if (interface->hba == hba && in bnx2fc_indicate_netevent() 913 if (interface->hba != hba) in bnx2fc_indicate_netevent() 1373 hba->max_xid = (hba->max_tasks - 1); in bnx2fc_hba_create() 1452 interface->hba = hba; in bnx2fc_interface_create() 1837 if (interface->hba == hba) { in bnx2fc_ulp_start() 1966 if (interface->hba == hba) in bnx2fc_ulp_stop() 2185 hba = interface->hba; in __bnx2fc_enable() [all …]
|
| A D | bnx2fc_hwi.c | 47 if (hba->cnic && hba->cnic->submit_kwqes) in bnx2fc_send_stat_req() 141 if (hba->cnic && hba->cnic->submit_kwqes) in bnx2fc_send_fw_fcoe_init_msg() 160 if (hba->cnic && hba->cnic->submit_kwqes) in bnx2fc_send_fw_fcoe_destroy_msg() 177 struct bnx2fc_hba *hba = interface->hba; in bnx2fc_send_session_ofld_req() local 344 if (hba->cnic && hba->cnic->submit_kwqes) in bnx2fc_send_session_ofld_req() 362 struct bnx2fc_hba *hba = interface->hba; in bnx2fc_send_session_enable_req() local 431 struct bnx2fc_hba *hba = interface->hba; in bnx2fc_send_session_disable_req() local 542 hba = unsol_els->hba; in bnx2fc_unsol_els_work() 615 unsol_els->hba = interface->hba; in bnx2fc_process_l2_frame_compl() 1171 if (hba != interface->hba) { in bnx2fc_process_ofld_cmpl() [all …]
|