| /drivers/md/persistent-data/ |
| A D | dm-space-map.h | 81 sm->destroy(sm); in dm_sm_destroy() 86 return sm->extend(sm, extra_blocks); in dm_sm_extend() 91 return sm->get_nr_blocks(sm, count); in dm_sm_get_nr_blocks() 96 return sm->get_nr_free(sm, count); in dm_sm_get_nr_free() 102 return sm->get_count(sm, b, result); in dm_sm_get_count() 114 return sm->set_count(sm, b, count); in dm_sm_set_count() 119 return sm->commit(sm); in dm_sm_commit() 124 return sm->inc_blocks(sm, b, e); in dm_sm_inc_blocks() 134 return sm->dec_blocks(sm, b, e); in dm_sm_dec_blocks() 144 return sm->new_block(sm, b); in dm_sm_new_block() [all …]
|
| A D | dm-space-map-metadata.c | 278 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_destroy() 285 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_nr_blocks() 294 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_nr_free() 307 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_get_count() 347 struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); in sm_metadata_count_is_more_than_one() 729 memcpy(sm, &bootstrap_ops, sizeof(*sm)); in sm_metadata_extend() 765 memcpy(sm, &ops, sizeof(*sm)); in sm_metadata_extend() 779 memcpy(&smm->sm, &ops, sizeof(smm->sm)); in dm_sm_metadata_init() 781 return &smm->sm; in dm_sm_metadata_init() 798 memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); in dm_sm_metadata_create() [all …]
|
| A D | dm-space-map-disk.c | 37 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_destroy() 44 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_extend() 51 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_nr_blocks() 60 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_nr_free() 70 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_get_count() 95 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_set_count() 108 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_inc_blocks() 121 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_dec_blocks() 134 struct sm_disk *smd = container_of(sm, struct sm_disk, sm); in sm_disk_new_block() 229 memcpy(&smd->sm, &ops, sizeof(smd->sm)); in dm_sm_disk_create() [all …]
|
| A D | dm-transaction-manager.c | 96 struct dm_space_map *sm; member 195 tm->sm = sm; in dm_tm_create() 239 r = dm_sm_commit(tm->sm); in dm_tm_pre_commit() 383 dm_sm_inc_block(tm->sm, b); in dm_tm_inc() 405 dm_sm_dec_block(tm->sm, b); in dm_tm_dec() 490 *sm = dm_sm_metadata_init(); in dm_tm_create_internal() 491 if (IS_ERR(*sm)) in dm_tm_create_internal() 492 return PTR_ERR(*sm); in dm_tm_create_internal() 494 *tm = dm_tm_create(bm, *sm); in dm_tm_create_internal() 496 dm_sm_destroy(*sm); in dm_tm_create_internal() [all …]
|
| /drivers/mfd/ |
| A D | sm501.c | 1338 sm = kzalloc(sizeof(*sm), GFP_KERNEL); in sm501_plat_probe() 1339 if (!sm) { in sm501_plat_probe() 1355 if (!sm->io_res || !sm->mem_res) { in sm501_plat_probe() 1361 sm->regs_claim = request_mem_region(sm->io_res->start, in sm501_plat_probe() 1371 sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res)); in sm501_plat_probe() 1389 kfree(sm); in sm501_plat_probe() 1456 if (sm->platdata && sm->platdata->init) { in sm501_plat_resume() 1457 sm501_init_regs(sm, sm->platdata->init); in sm501_plat_resume() 1521 sm = kzalloc(sizeof(*sm), GFP_KERNEL); in sm501_pci_probe() 1522 if (!sm) { in sm501_pci_probe() [all …]
|
| /drivers/net/fddi/skfp/ |
| A D | smt.c | 510 smc->sba.sm = sm ; in smt_received_pack() 552 sm->smt_version, &sm->smt_source); in smt_received_pack() 558 ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) { in smt_received_pack() 633 if (sm->smt_tid == smc->sm.pend[SMT_TID_NIF]) { in smt_received_pack() 670 smc->sm.pend[SMT_TID_NIF], sm->smt_tid); in smt_received_pack() 702 if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) { in smt_received_pack() 705 else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) { in smt_received_pack() 708 else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) { in smt_received_pack() 720 if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) { in smt_received_pack() 730 sm->smt_dest = sm->smt_source ; in smt_received_pack() [all …]
|
| A D | ess.c | 148 DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type); in ess_raf_received_pack() 149 DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid); in ess_raf_received_pack() 167 if (sm->smt_type == SMT_REQUEST) { in ess_raf_received_pack() 186 smc->ess.alloc_trans_id = sm->smt_tid ; in ess_raf_received_pack() 194 sm->smt_dest = smt_sba_da ; in ess_raf_received_pack() 277 if (sm->smt_type != SMT_REQUEST) { in ess_raf_received_pack() 311 &sm->smt_source); in ess_raf_received_pack() 336 if (sm->smt_type != SMT_REQUEST) { in ess_raf_received_pack() 342 &sm->smt_source); in ess_raf_received_pack() 493 chg->smt.smt_tid = sm->smt_tid ; in ess_send_response() [all …]
|
| A D | pmf.c | 275 struct smt_header *sm ; in smt_pmf_received_pack() local 281 dump_smt(smc,sm,"PMF Received") ; in smt_pmf_received_pack() 289 if (sm->smt_class == SMT_PMF_GET || in smt_pmf_received_pack() 296 dump_smt(smc,sm,"PMF Reply") ; in smt_pmf_received_pack() 504 if (memcmp((char *) &sm->smt_sid, in smt_authorize() 1449 smc->sm.please_reconnect = 1 ; in smt_set_para() 1579 sm->smt_class,sm->smt_type,sm->smt_version) ; in dump_smt() 1582 printf(" LEN %x\n",sm->smt_len) ; in dump_smt() 1584 len = sm->smt_len ; in dump_smt() 1600 smt_swap_para(sm,smtlen,0) ; in dump_smt() [all …]
|
| /drivers/scsi/isci/ |
| A D | phy.c | 1063 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_initial_substate_enter() 1071 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sas_power_substate_enter() 1079 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sas_power_substate_exit() 1087 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_power_substate_enter() 1095 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_power_substate_exit() 1103 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_phy_substate_enter() 1110 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_phy_substate_exit() 1117 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_speed_substate_enter() 1124 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sata_speed_substate_exit() 1131 struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm); in sci_phy_starting_await_sig_fis_uf_substate_enter() [all …]
|
| A D | remote_device.c | 341 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_stop() local 398 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_frame_handler() local 493 struct sci_base_state_machine *sm = &idev->sm; in is_remote_device_ready() local 527 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_event_handler() local 613 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_start_io() local 749 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_complete_io() local 831 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_start_task() local 951 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); in sci_remote_device_initial_state_enter() 972 struct sci_base_state_machine *sm = &idev->sm; in sci_remote_device_destruct() local 1015 struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); in sci_remote_device_stopped_state_enter() [all …]
|
| A D | remote_node_context.c | 270 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_initial_state_enter() 288 struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); in sci_remote_node_context_posting_state_enter() 295 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_invalidating_state_enter() 304 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_resuming_state_enter() 325 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_ready_state_enter() 347 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_tx_suspended_state_enter() 354 struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_tx_rx_suspended_state_enter() 373 struct sci_base_state_machine *sm) in sci_remote_node_context_await_suspend_state_exit() argument 376 = container_of(sm, typeof(*rnc), sm); in sci_remote_node_context_await_suspend_state_exit() 569 = sci_rnc->sm.current_state_id; in sci_remote_node_context_suspend() [all …]
|
| A D | host.c | 159 handler(sm); in sci_init_sm() 167 handler = sm->state_table[sm->current_state_id].exit_state; in sci_change_state() 169 handler(sm); in sci_change_state() 171 sm->previous_state_id = sm->current_state_id; in sci_change_state() 174 handler = sm->state_table[sm->current_state_id].enter_state; in sci_change_state() 176 handler(sm); in sci_change_state() 1310 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_initial_state_enter() 1317 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_starting_state_exit() 1444 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); in sci_controller_ready_state_enter() 1597 struct sci_base_state_machine *sm = &ihost->sm; in controller_timeout() local [all …]
|
| A D | port.c | 298 struct sci_base_state_machine *sm = &iport->sm; in port_state_machine_change() local 708 struct sci_base_state_machine *sm = &iport->sm; in sci_port_general_link_up_handler() local 945 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_waiting_enter() 961 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in scic_sds_port_ready_substate_waiting_exit() 968 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_operational_enter() 1028 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_operational_exit() 1047 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_substate_configuring_enter() 1491 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_stopped_state_enter() 1504 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_stopped_state_exit() 1512 struct isci_port *iport = container_of(sm, typeof(*iport), sm); in sci_port_ready_state_enter() [all …]
|
| A D | request.c | 806 state = ireq->sm.current_state_id; in sci_request_start() 854 state = ireq->sm.current_state_id; in sci_io_request_terminate() 915 state = ireq->sm.current_state_id; in sci_request_complete() 936 state = ireq->sm.current_state_id; in sci_io_request_event_handler() 1706 state = ireq->sm.current_state_id; in sci_io_request_frame_handler() 2953 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); in sci_request_started_state_enter() 2986 sci_change_state(sm, state); in sci_request_started_state_enter() 2991 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); in sci_request_completed_state_enter() 3004 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); in sci_request_aborting_state_enter() 3012 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); in sci_stp_request_started_non_data_await_h2d_completion_enter() [all …]
|
| /drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
| A D | ctxgm200.c | 52 u8 sm, i; in gm200_grctx_generate_smid_config() local 54 for (sm = 0; sm < gr->sm_nr; sm++) { in gm200_grctx_generate_smid_config() 55 const u8 gpc = gr->sm[sm].gpc; in gm200_grctx_generate_smid_config() 56 const u8 tpc = gr->sm[sm].tpc; in gm200_grctx_generate_smid_config() 57 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8); in gm200_grctx_generate_smid_config() 58 gpcs[gpc] |= sm << (tpc * 8); in gm200_grctx_generate_smid_config()
|
| A D | ctxgp100.c | 111 u8 sm, i; in gp100_grctx_generate_smid_config() local 113 for (sm = 0; sm < gr->sm_nr; sm++) { in gp100_grctx_generate_smid_config() 114 const u8 gpc = gr->sm[sm].gpc; in gp100_grctx_generate_smid_config() 115 const u8 tpc = gr->sm[sm].tpc; in gp100_grctx_generate_smid_config() 116 dist[sm / 4] |= ((gpc << 4) | tpc) << ((sm % 4) * 8); in gp100_grctx_generate_smid_config() 117 gpcs[gpc + (gr->func->gpc_nr * (tpc / 4))] |= sm << ((tpc % 4) * 8); in gp100_grctx_generate_smid_config()
|
| A D | tu102.c | 39 int sm; in tu102_gr_init_fs() local 44 for (sm = 0; sm < gr->sm_nr; sm++) { in tu102_gr_init_fs() 45 int tpc = gv100_gr_nonpes_aware_tpc(gr, gr->sm[sm].gpc, gr->sm[sm].tpc); in tu102_gr_init_fs() 47 nvkm_wr32(device, GPC_UNIT(gr->sm[sm].gpc, 0x0c10 + tpc * 4), sm); in tu102_gr_init_fs()
|
| A D | ctxgf100.c | 1078 int i, j, sm = 0; in gf100_grctx_generate_r4060a8() local 1083 if (sm < gr->sm_nr) in gf100_grctx_generate_r4060a8() 1084 data |= gr->sm[sm++].gpc << (j * 8); in gf100_grctx_generate_r4060a8() 1296 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x698), sm); in gf100_grctx_generate_sm_id() 1297 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x4e8), sm); in gf100_grctx_generate_sm_id() 1298 nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm); in gf100_grctx_generate_sm_id() 1299 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); in gf100_grctx_generate_sm_id() 1306 int sm; in gf100_grctx_generate_floorsweep() local 1308 for (sm = 0; sm < gr->sm_nr; sm++) { in gf100_grctx_generate_floorsweep() 1309 func->sm_id(gr, gr->sm[sm].gpc, gr->sm[sm].tpc, sm); in gf100_grctx_generate_floorsweep() [all …]
|
| A D | gv100.c | 28 gv100_gr_trap_sm(struct gf100_gr *gr, int gpc, int tpc, int sm) in gv100_gr_trap_sm() argument 32 u32 werr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x730 + (sm * 0x80))); in gv100_gr_trap_sm() 33 u32 gerr = nvkm_rd32(device, TPC_UNIT(gpc, tpc, 0x734 + (sm * 0x80))); in gv100_gr_trap_sm() 42 gpc, tpc, sm, gerr, glob, werr, warp ? warp->name : ""); in gv100_gr_trap_sm() 44 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x730 + sm * 0x80), 0x00000000); in gv100_gr_trap_sm() 45 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x734 + sm * 0x80), gerr); in gv100_gr_trap_sm() 67 int sm; in gv100_gr_init_shader_exceptions() local 68 for (sm = 0; sm < 0x100; sm += 0x80) { in gv100_gr_init_shader_exceptions() 70 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x72c + sm), 0x00000004); in gv100_gr_init_shader_exceptions() 268 gr->sm[gtpc].gpc = gpc_table[gtpc]; in gv100_gr_oneinit_sm_id() [all …]
|
| /drivers/scsi/csiostor/ |
| A D | csio_scsi.h | 264 csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED); in csio_scsi_completed() 265 if (csio_list_deleted(&ioreq->sm.sm_list)) in csio_scsi_completed() 266 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_completed() 272 csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED); in csio_scsi_aborted() 273 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_aborted() 279 csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED); in csio_scsi_closed() 280 list_add_tail(&ioreq->sm.sm_list, cbfn_q); in csio_scsi_closed() 298 csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO); in csio_scsi_start_io() 311 csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM); in csio_scsi_start_tm() 324 csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT); in csio_scsi_abort() [all …]
|
| A D | csio_rnode.c | 127 list_for_each(tmp, &rnhead->sm.sm_list) { in csio_rn_lookup() 151 list_for_each(tmp, &rnhead->sm.sm_list) { in csio_rn_lookup_wwpn() 175 list_for_each(tmp, &rnhead->sm.sm_list) { in csio_rnode_lookup_portid() 200 list_for_each(tmp, &rnhead->sm.sm_list) { in csio_rn_dup_flowid() 616 csio_set_state(&rn->sm, csio_rns_ready); in csio_rns_uninit() 697 csio_set_state(&rn->sm, csio_rns_uninit); in csio_rns_ready() 741 csio_set_state(&rn->sm, csio_rns_ready); in csio_rns_offline() 764 csio_set_state(&rn->sm, csio_rns_uninit); in csio_rns_offline() 800 csio_set_state(&rn->sm, csio_rns_ready); in csio_rns_disappeared() 887 csio_post_event(&rn->sm, evt); in csio_rnode_fwevt_handler() [all …]
|
| /drivers/char/tpm/ |
| A D | tpm_crb.c | 94 u32 sm; member 198 if (!tpm_crb_has_idle(priv->sm)) in __crb_go_idle() 245 if (!tpm_crb_has_idle(priv->sm)) in __crb_cmd_ready() 459 priv->sm == ACPI_TPM2_MEMORY_MAPPED || in crb_send() 463 if (priv->sm == ACPI_TPM2_START_METHOD || in crb_send() 490 if ((priv->sm == ACPI_TPM2_START_METHOD || in crb_cancel() 782 u32 sm; in crb_acpi_add() local 793 sm = buf->start_method; in crb_acpi_add() 794 if (sm == ACPI_TPM2_MEMORY_MAPPED) { in crb_acpi_add() 818 if (sm == ACPI_TPM2_CRB_WITH_ARM_FFA) { in crb_acpi_add() [all …]
|
| /drivers/firmware/arm_scmi/vendors/imx/ |
| A D | Makefile | 2 obj-$(CONFIG_IMX_SCMI_BBM_EXT) += imx-sm-bbm.o 3 obj-$(CONFIG_IMX_SCMI_CPU_EXT) += imx-sm-cpu.o 4 obj-$(CONFIG_IMX_SCMI_LMM_EXT) += imx-sm-lmm.o 5 obj-$(CONFIG_IMX_SCMI_MISC_EXT) += imx-sm-misc.o
|
| /drivers/net/ethernet/brocade/bna/ |
| A D | bfa_cs.h | 28 t sm; /* state machine function */ \ 34 n ## _sm_to_state(struct n ## _sm_table_s *smt, t sm) \ 38 while (smt[i].sm && smt[i].sm != sm) \
|
| /drivers/power/supply/ |
| A D | wm8350_power.c | 52 u16 sm, ov, co, chrg; in wm8350_get_supplies() local 55 sm = wm8350_reg_read(wm8350, WM8350_STATE_MACHINE_STATUS); in wm8350_get_supplies() 61 sm = (sm & WM8350_USB_SM_MASK) >> WM8350_USB_SM_SHIFT; in wm8350_get_supplies() 68 if (((sm == WM8350_USB_SM_100_SLV) || in wm8350_get_supplies() 69 (sm == WM8350_USB_SM_500_SLV) || in wm8350_get_supplies() 70 (sm == WM8350_USB_SM_STDBY_SLV)) in wm8350_get_supplies() 73 else if (((sm == WM8350_USB_SM_100_SLV) || in wm8350_get_supplies() 74 (sm == WM8350_USB_SM_500_SLV) || in wm8350_get_supplies() 75 (sm == WM8350_USB_SM_STDBY_SLV)) in wm8350_get_supplies()
|