| /linux/arch/x86/kvm/ |
| A D | emulate.c | 252 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); in writeback_registers() 928 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, in __do_insn_fetch_bytes() 3000 ctxt->_eip = ctxt->eip; in emulator_task_switch() 3277 if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val)) in em_cr_write() 3445 ctxt->_eip = ctxt->eip; in em_hypercall() 3526 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); in em_smsw() 3532 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) in em_lmsw() 3571 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val, in em_out() 4740 ctxt->_eip = ctxt->eip; in x86_decode_insn() 5304 ctxt->ops->halt(ctxt); in x86_emulate_insn() [all …]
|
| A D | kvm_emulate.h | 379 #define KVM_EMULATOR_BUG_ON(cond, ctxt) \ argument 384 ctxt->ops->vm_bugged(ctxt); \ 526 if (!(ctxt->regs_valid & (1 << nr))) { in reg_read() 527 ctxt->regs_valid |= 1 << nr; in reg_read() 528 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); in reg_read() 530 return ctxt->_regs[nr]; in reg_read() 541 ctxt->regs_valid |= 1 << nr; in reg_write() 542 ctxt->regs_dirty |= 1 << nr; in reg_write() 543 return &ctxt->_regs[nr]; in reg_write() 548 reg_read(ctxt, nr); in reg_rmw() [all …]
|
| /linux/arch/arm64/kvm/hyp/include/hyp/ |
| A D | sysreg-sr.h | 26 if (ctxt_has_s1poe(ctxt)) in __sysreg_save_common_state() 41 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); in ctxt_to_vcpu() 60 vcpu = ctxt_to_vcpu(ctxt); in ctxt_has_s1pie() 71 vcpu = ctxt_to_vcpu(ctxt); in ctxt_has_tcrx() 82 vcpu = ctxt_to_vcpu(ctxt); in ctxt_has_s1poe() 93 if (ctxt_has_tcrx(ctxt)) { in __sysreg_save_el1_state() 101 if (ctxt_has_s1poe(ctxt)) in __sysreg_save_el1_state() 116 if (ctxt_has_mte(ctxt)) { in __sysreg_save_el1_state() 145 if (ctxt_has_s1poe(ctxt)) in __sysreg_restore_common_state() 178 if (ctxt_has_tcrx(ctxt)) { in __sysreg_restore_el1_state() [all …]
|
| /linux/net/sunrpc/xprtrdma/ |
| A D | svc_rdma_recvfrom.c | 126 ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node); in svc_rdma_recv_ctxt_alloc() 127 if (!ctxt) in svc_rdma_recv_ctxt_alloc() 144 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc() 145 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge; in svc_rdma_recv_ctxt_alloc() 153 return ctxt; in svc_rdma_recv_ctxt_alloc() 222 release_pages(ctxt->rc_pages, ctxt->rc_page_count); in svc_rdma_recv_ctxt_put() 247 if (ctxt) in svc_rdma_release_ctxt() 265 if (!ctxt) in svc_rdma_refresh_recvs() 314 if (!ctxt) in svc_rdma_post_recvs() 942 if (ctxt) in svc_rdma_recvfrom() [all …]
|
| A D | svc_rdma_sendto.c | 127 if (!ctxt) in svc_rdma_send_ctxt_alloc() 141 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 142 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc() 146 xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf, in svc_rdma_send_ctxt_alloc() 205 xdr_init_encode(&ctxt->sc_stream, &ctxt->sc_hdrbuf, in svc_rdma_send_ctxt_get() 212 ctxt->sc_wr_chain = &ctxt->sc_send_wr; in svc_rdma_send_ctxt_get() 219 if (!ctxt) in svc_rdma_send_ctxt_get() 233 release_pages(ctxt->sc_pages, ctxt->sc_page_count); in svc_rdma_send_ctxt_release() 256 svc_rdma_send_ctxt_release(ctxt->sc_rdma, ctxt); in svc_rdma_send_ctxt_put_async() 578 ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr; in svc_rdma_page_dma_map() [all …]
|
| A D | svc_rdma_rw.c | 68 ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, first_sgl_nents), in svc_rdma_get_rw_ctxt() 70 if (!ctxt) in svc_rdma_get_rw_ctxt() 77 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 82 return ctxt; in svc_rdma_get_rw_ctxt() 85 kfree(ctxt); in svc_rdma_get_rw_ctxt() 94 sg_free_table_chained(&ctxt->rw_sg_table, ctxt->rw_first_sgl_nents); in __svc_rdma_put_rw_ctxt() 116 kfree(ctxt); in svc_rdma_destroy_rw_ctxts() 139 ctxt->rw_sg_table.sgl, ctxt->rw_nents, in svc_rdma_rw_ctx_init() 428 ctxt->rw_nents = 1; in svc_rdma_vec_to_sg() 493 if (!ctxt) in svc_rdma_build_writes() [all …]
|
| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_vsi_vlan_lib.c | 97 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion() 144 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_stripping() 242 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in __ice_vsi_set_inner_port_vlan() 295 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_clear_inner_port_vlan() 339 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_cfg_vlan_pruning() 481 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_ena_outer_stripping() 532 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_dis_outer_stripping() 587 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_ena_outer_insertion() 639 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_dis_outer_insertion() 697 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in __ice_vsi_set_outer_port_vlan() [all …]
|
| /linux/kernel/printk/ |
| A D | nbcon.c | 255 if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) in nbcon_context_try_acquire_direct() 604 ctxt->seq = nbcon_seq_read(ctxt->console); in nbcon_context_try_acquire() 774 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_can_proceed() local 837 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_write_context_set_buf() local 861 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_enter_unsafe() local 885 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_exit_unsafe() local 914 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_reacquire_nobuf() local 942 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_emit_next_record() local 1089 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); in nbcon_emit_one() local 1174 struct nbcon_context *ctxt = &ACCESS_PRIVATE(&wctxt, ctxt); in nbcon_kthread_func() local [all …]
|
| /linux/arch/x86/power/ |
| A D | cpu.c | 89 store_idt(&ctxt->idt); in __save_processor_state() 100 store_tr(ctxt->tr); in __save_processor_state() 123 ctxt->cr0 = read_cr0(); in __save_processor_state() 124 ctxt->cr2 = read_cr2(); in __save_processor_state() 129 msr_save_context(ctxt); in __save_processor_state() 206 if (ctxt->cr4) in __restore_processor_state() 211 __write_cr4(ctxt->cr4); in __restore_processor_state() 213 write_cr3(ctxt->cr3); in __restore_processor_state() 214 write_cr2(ctxt->cr2); in __restore_processor_state() 215 write_cr0(ctxt->cr0); in __restore_processor_state() [all …]
|
| /linux/drivers/net/wireless/intel/iwlwifi/mvm/ |
| A D | phy-ctxt.c | 74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 280 ctxt->ref); in iwl_mvm_phy_ctxt_add() 284 ctxt->width = chandef->width; in iwl_mvm_phy_ctxt_add() 294 ctxt->ref++; in iwl_mvm_phy_ctxt_add() 310 WARN_ON(!ctxt->ref); in iwl_mvm_phy_ctxt_ref() 311 ctxt->ref++; in iwl_mvm_phy_ctxt_ref() 328 if (WARN_ON_ONCE(!ctxt->ref)) in iwl_mvm_phy_ctxt_changed() 356 ctxt->width = chandef->width; in iwl_mvm_phy_ctxt_changed() 369 if (WARN_ON_ONCE(!ctxt)) in iwl_mvm_phy_ctxt_unref() 372 ctxt->ref--; in iwl_mvm_phy_ctxt_unref() [all …]
|
| /linux/fs/bcachefs/ |
| A D | move.c | 84 struct moving_context *ctxt = io->write.ctxt; in move_free() local 102 struct moving_context *ctxt = io->write.ctxt; in move_write_done() local 147 struct moving_context *ctxt = io->write.ctxt; in move_read_endio() local 179 move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); in bch2_moving_ctxt_flush_all() 200 memset(ctxt, 0, sizeof(*ctxt)); in bch2_moving_ctxt_exit() 210 memset(ctxt, 0, sizeof(*ctxt)); in bch2_moving_ctxt_init() 216 ctxt->wp = wp; in bch2_moving_ctxt_init() 261 if (ctxt->stats) in bch2_move_extent() 290 io->write.ctxt = ctxt; in bch2_move_extent() 320 if (ctxt->rate) in bch2_move_extent() [all …]
|
| A D | movinggc.c | 138 bch2_trans_unlock_long(ctxt->trans); in move_buckets_wait() 153 struct btree_trans *trans = ctxt->trans; in bch2_copygc_get_buckets() 211 struct btree_trans *trans = ctxt->trans; in bch2_copygc() 323 struct moving_context ctxt; in bch2_copygc_thread() local 350 bch2_trans_unlock_long(ctxt.trans); in bch2_copygc_thread() 354 move_buckets_wait(&ctxt, buckets, true); in bch2_copygc_thread() 360 move_buckets_wait(&ctxt, buckets, true); in bch2_copygc_thread() 371 move_buckets_wait(&ctxt, buckets, true); in bch2_copygc_thread() 392 move_buckets_wait(&ctxt, buckets, true); in bch2_copygc_thread() 398 move_buckets_wait(&ctxt, buckets, true); in bch2_copygc_thread() [all …]
|
| A D | rebalance.c | 200 struct btree_trans *trans = ctxt->trans; in do_rebalance_extent() 209 ctxt->stats = &r->work_stats; in do_rebalance_extent() 236 bch2_move_ctxt_wait_for_io(ctxt); in do_rebalance_extent() 276 struct btree_trans *trans = ctxt->trans; in do_rebalance_scan() 281 ctxt->stats = &r->scan_stats; in do_rebalance_scan() 324 struct btree_trans *trans = ctxt->trans; in do_rebalance() 340 while (!bch2_move_ratelimit(ctxt)) { in do_rebalance() 342 bch2_moving_ctxt_flush_all(ctxt); in do_rebalance() 379 bch2_moving_ctxt_flush_all(ctxt); in do_rebalance() 393 struct moving_context ctxt; in bch2_rebalance_thread() local [all …]
|
| /linux/arch/x86/coco/sev/ |
| A D | core.c | 322 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn() 332 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes)) in __vc_decode_user_insn() 350 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_kern_insn() 583 ctxt->fi.error_code = 0; in vc_ioio_check() 593 ctxt->regs->orig_ax = ctxt->fi.error_code; in vc_forward_exception() 1591 ctxt->regs->orig_ax = ctxt->fi.error_code; in vc_early_forward_exception() 1601 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs); in vc_insn_get_rm() 1619 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs); in vc_do_mmio() 1696 ctxt->regs->si += off; in vc_handle_mmio_movs() 1697 ctxt->regs->di += off; in vc_handle_mmio_movs() [all …]
|
| A D | shared.c | 212 memset(ctxt, 0, sizeof(*ctxt)); in vc_init_em_ctxt() 213 ctxt->regs = regs; in vc_init_em_ctxt() 216 ret = vc_decode_insn(ctxt); in vc_init_em_ctxt() 223 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn() 242 ctxt->fi.vector = v; in verify_exception_info() 338 struct es_em_ctxt ctxt; in svsm_perform_ghcb_protocol() local 365 vc_forward_exception(&ctxt); in svsm_perform_ghcb_protocol() 375 struct es_em_ctxt *ctxt, in sev_es_ghcb_hv_call() argument 791 ctxt->fi.cr2 = address; in vc_insn_string_check() 874 struct insn *insn = &ctxt->insn; in vc_ioio_exitinfo() [all …]
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| A D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() argument 23 __sysreg_save_el1_state(ctxt); in __sysreg_save_state_nvhe() 24 __sysreg_save_common_state(ctxt); in __sysreg_save_state_nvhe() 25 __sysreg_save_user_state(ctxt); in __sysreg_save_state_nvhe() 26 __sysreg_save_el2_return_state(ctxt); in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe() argument 31 __sysreg_restore_el1_state(ctxt); in __sysreg_restore_state_nvhe() 32 __sysreg_restore_common_state(ctxt); in __sysreg_restore_state_nvhe() 33 __sysreg_restore_user_state(ctxt); in __sysreg_restore_state_nvhe() 34 __sysreg_restore_el2_return_state(ctxt); in __sysreg_restore_state_nvhe()
|
| A D | ffa.c | 100 cpu_reg(ctxt, 0) = res->a0; in ffa_set_retval() 101 cpu_reg(ctxt, 1) = res->a1; in ffa_set_retval() 102 cpu_reg(ctxt, 2) = res->a2; in ffa_set_retval() 103 cpu_reg(ctxt, 3) = res->a3; in ffa_set_retval() 192 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map() argument 196 DECLARE_REG(u32, npages, ctxt, 3); in do_ffa_rxtx_map() 273 DECLARE_REG(u32, id, ctxt, 1); in do_ffa_rxtx_unmap() 433 DECLARE_REG(u32, len, ctxt, 1); in __do_ffa_mem_xfer() 529 DECLARE_REG(u32, flags, ctxt, 3); in do_ffa_mem_reclaim() 640 DECLARE_REG(u32, id, ctxt, 1); in do_ffa_features() [all …]
|
| /linux/arch/arm64/kvm/hyp/vhe/ |
| A D | sysreg-sr.c | 28 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() argument 30 __sysreg_save_common_state(ctxt); in sysreg_save_host_state_vhe() 34 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() argument 36 __sysreg_save_common_state(ctxt); in sysreg_save_guest_state_vhe() 37 __sysreg_save_el2_return_state(ctxt); in sysreg_save_guest_state_vhe() 41 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() argument 43 __sysreg_restore_common_state(ctxt); in sysreg_restore_host_state_vhe() 49 __sysreg_restore_common_state(ctxt); in sysreg_restore_guest_state_vhe() 50 __sysreg_restore_el2_return_state(ctxt); in sysreg_restore_guest_state_vhe() 67 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in __vcpu_load_switch_sysregs() [all …]
|
| /linux/arch/arm64/include/asm/ |
| A D | kvm_asm.h | 306 .macro get_vcpu_ptr vcpu, ctxt 307 get_host_ctxt \ctxt, \vcpu 308 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 311 .macro get_loaded_vcpu vcpu, ctxt 313 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 316 .macro set_loaded_vcpu vcpu, ctxt, tmp 345 .macro save_callee_saved_regs ctxt 355 .macro restore_callee_saved_regs ctxt 366 .macro save_sp_el0 ctxt, tmp 368 str \tmp, [\ctxt, #CPU_SP_EL0_OFFSET] [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| A D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 102 TP_PROTO(unsigned int ctxt), 103 TP_ARGS(ctxt), 104 TP_STRUCT__entry(__field(unsigned int, ctxt)), [all …]
|
| A D | trace_rx.h | 29 __field(u32, ctxt) 38 __entry->ctxt = packet->rcd->ctxt; 48 __entry->ctxt, 62 __field(u32, ctxt) 67 __entry->ctxt = rcd->ctxt; 73 __entry->ctxt, 80 TP_PROTO(unsigned int ctxt, u16 subctxt, const char *type, 82 TP_ARGS(ctxt, subctxt, type, start, end), 84 __field(unsigned int, ctxt) 91 __entry->ctxt = ctxt; [all …]
|
| A D | trace_tx.h | 177 __entry->ctxt = ctxt; 182 __entry->ctxt, 197 __entry->ctxt = ctxt; 221 __entry->ctxt = ctxt; 228 __entry->ctxt, 263 __entry->ctxt = ctxt; 291 __entry->ctxt = ctxt; 499 __entry->ctxt = ctxt; 562 __entry->ctxt = ctxt; 604 __entry->ctxt = ctxt; [all …]
|
| /linux/arch/x86/xen/ |
| A D | smp_pv.c | 227 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 234 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 235 if (ctxt == NULL) { in cpu_initialize_context() 248 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 250 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 251 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 264 ctxt->gdt_frames[0] = gdt_mfn; in cpu_initialize_context() 272 ctxt->kernel_ss = __KERNEL_DS; in cpu_initialize_context() 276 ctxt->event_callback_eip = in cpu_initialize_context() 278 ctxt->failsafe_callback_eip = in cpu_initialize_context() [all …]
|
| A D | pmu.c | 29 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 30 (uintptr_t)ctxt->field)) 201 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 215 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 218 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 221 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 224 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 264 struct xen_pmu_amd_ctxt *ctxt; in xen_amd_pmu_emulate() local 276 ctxt = &xenpmu_data->pmu.c.amd; in xen_amd_pmu_emulate() 351 struct xen_pmu_amd_ctxt *ctxt; in xen_amd_read_pmc() local [all …]
|
| /linux/fs/nilfs2/ |
| A D | btnode.c | 212 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 218 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 219 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 258 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 286 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 287 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 314 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 335 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument 337 struct buffer_head *nbh = ctxt->newbh; in nilfs_btnode_abort_change_key() 338 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_abort_change_key() [all …]
|