/linux-6.3-rc2/tools/virtio/ringtest/ |
A D | virtio_ring_0_9.c | 41 struct guest { struct 52 } guest; argument 78 guest.avail_idx = 0; in alloc_ring() 83 guest.free_head = 0; in alloc_ring() 107 if (!guest.num_free) in add_inbuf() 113 head = guest.free_head; in add_inbuf() 115 guest.num_free--; in add_inbuf() 192 guest.num_free++; in get_buf() 193 guest.last_used_idx++; in get_buf() 234 guest.avail_idx, in kick_available() [all …]
|
A D | ring.c | 59 struct guest { struct 65 } guest; argument 92 guest.avail_idx = 0; in alloc_ring() 94 guest.last_used_idx = 0; in alloc_ring() 103 guest.num_free = ring_size; in alloc_ring() 116 if (!guest.num_free) in add_inbuf() 119 guest.num_free--; in add_inbuf() 159 guest.num_free++; in get_buf() 160 guest.last_used_idx++; in get_buf() 195 guest.avail_idx, in kick_available() [all …]
|
/linux-6.3-rc2/drivers/misc/cxl/ |
A D | of.c | 88 afu->guest->handle = addr; in read_phys_addr() 91 afu->guest->p2n_phys += addr; in read_phys_addr() 92 afu->guest->p2n_size = size; in read_phys_addr() 191 afu->irqs_max = afu->guest->max_ints; in cxl_of_read_afu_properties() 271 afu->guest->p2n_phys, afu->guest->p2n_size); in cxl_of_read_afu_properties() 330 kfree(adapter->guest->irq_avail); in read_adapter_irq_config() 331 adapter->guest->irq_avail = NULL; in read_adapter_irq_config() 402 if (adapter->guest->status == NULL) in cxl_of_read_adapter_properties() 408 adapter->guest->vendor = val; in cxl_of_read_adapter_properties() 412 adapter->guest->device = val; in cxl_of_read_adapter_properties() [all …]
|
A D | guest.c | 670 kfree(afu->guest); in guest_release_afu() 813 if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) { in guest_map_slice_regs() 997 afu->guest->parent = afu; in cxl_guest_init_afu() 998 afu->guest->handle_err = true; in cxl_guest_init_afu() 1019 kfree(afu->guest); in cxl_guest_init_afu() 1031 afu->guest->handle_err = false; in cxl_guest_remove_afu() 1054 if (adapter->guest) { in free_adapter() 1062 kfree(adapter->guest->status); in free_adapter() 1063 kfree(adapter->guest); in free_adapter() 1118 adapter->guest->pdev = pdev; in cxl_guest_init_adapter() [all …]
|
/linux-6.3-rc2/arch/mips/include/asm/ |
A D | cpu-features.h | 649 #define cpu_guest_has_conf1 (cpu_data[0].guest.conf & (1 << 1)) 652 #define cpu_guest_has_conf2 (cpu_data[0].guest.conf & (1 << 2)) 655 #define cpu_guest_has_conf3 (cpu_data[0].guest.conf & (1 << 3)) 658 #define cpu_guest_has_conf4 (cpu_data[0].guest.conf & (1 << 4)) 661 #define cpu_guest_has_conf5 (cpu_data[0].guest.conf & (1 << 5)) 664 #define cpu_guest_has_conf6 (cpu_data[0].guest.conf & (1 << 6)) 667 #define cpu_guest_has_conf7 (cpu_data[0].guest.conf & (1 << 7)) 670 #define cpu_guest_has_fpu (cpu_data[0].guest.options & MIPS_CPU_FPU) 688 #define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW) 694 #define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH) [all …]
|
/linux-6.3-rc2/Documentation/virt/kvm/x86/ |
A D | running-nested-guests.rst | 7 A nested guest is the ability to run a guest inside another guest (it 9 example is a KVM guest that in turn runs on a KVM guest (the rest of 33 - L1 – level-1 guest; a VM running on L0; also called the "guest 36 - L2 – level-2 guest; a VM running on L1, this is the "nested guest" 46 (guest hypervisor), L3 (nested guest). 144 Starting a nested guest (x86) 187 Migrating an L1 guest, with a *live* nested guest in it, to another 191 On AMD systems, once an L1 guest has started an L2 guest, the L1 guest 194 or save-and-load an L1 guest while an L2 guest is running will result in 205 - Migrating a nested guest (L2) to another L1 guest on the *same* bare [all …]
|
A D | mmu.rst | 8 for presenting a standard x86 mmu to the guest, while translating guest 47 gfn guest frame number 76 - when guest paging is disabled, we translate guest physical addresses to 78 - when guest paging is enabled, we translate guest virtual addresses, to 80 - when the guest launches a guest of its own, we translate nested guest 81 virtual addresses, to nested guest physical addresses, to guest physical 278 guest's cr3. This is expensive, so we keep all guest page tables write 284 protection from the guest page, and allowing the guest to modify it freely. 288 random guest data. 301 - a true guest fault (the guest translation won't allow the access) (*) [all …]
|
A D | amd-memory-encryption.rst | 112 __u32 policy; /* guest's policy */ 154 measurement. Since the guest owner knows the initial contents of the guest at 178 issued to make the guest ready for the execution. 186 SEV-enabled guest. 195 __u32 handle; /* guest handle */ 196 __u32 policy; /* guest policy */ 200 SEV guest state: 282 used by the guest owner with the KVM_SEV_LAUNCH_MEASURE. 304 outgoing guest encryption context. 392 __u32 policy; /* guest's policy */ [all …]
|
A D | cpuid.rst | 9 A guest running on a kvm host, can check some of its features using 12 a guest. 65 KVM_FEATURE_PV_UNHALT 7 guest checks this feature bit 69 KVM_FEATURE_PV_TLB_FLUSH 9 guest checks this feature bit 77 KVM_FEATURE_PV_SEND_IPI 11 guest checks this feature bit 85 KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit 89 KVM_FEATURE_ASYNC_PF_INT 14 guest checks this feature bit 95 KVM_FEATURE_MSI_EXT_DEST_ID 15 guest checks this feature bit 99 KVM_FEATURE_HC_MAP_GPA_RANGE 16 guest checks this feature bit before 106 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24 host will warn if no guest-side [all …]
|
A D | hypercalls.rst | 54 :Purpose: Trigger guest exit so that the host can check for pending 83 The guest can map this shared page to access its supervisor register 93 A vcpu of a paravirtualized guest that is busywaiting in guest 107 :Purpose: Hypercall used to synchronize host and guest clocks. 111 a0: guest physical address where host copies 130 * tsc: guest TSC value used to calculate sec/nsec pair 133 The hypercall lets a guest compute a precise timestamp across 134 host and guest. The guest can use the returned TSC value to 152 The hypercall lets a guest send multicast IPIs, with at most 128 179 a0: the guest physical address of the start page [all …]
|
A D | msr.rst | 40 guest has to check version before and after grabbing 87 guest has to check version before and after grabbing 127 coordinated between the guest and the hypervisor. Availability 139 | | | guest vcpu has been paused by | 287 a sequence counter. In other words, guest has to check 321 EOI by clearing the bit in guest memory - this location will 335 guest and clear the least significant bit in the memory area 336 in the window between guest testing it to detect 337 whether it can skip EOI apic write and between guest 387 When a guest is started, bit 0 will be 0 if the guest has encrypted [all …]
|
/linux-6.3-rc2/Documentation/x86/ |
A D | tdx.rst | 7 Intel's Trust Domain Extensions (TDX) protect confidential guest VMs from 10 mode sits between the host and the guest and manages the guest/host 13 Since the host cannot directly access guest registers or memory, much 16 guest kernel. A #VE is handled entirely inside the guest kernel, but some 20 guest to the hypervisor or the TDX module. 64 indicates a bug in the guest. The guest may try to handle the #GP with a 79 return values (in guest EAX/EBX/ECX/EDX) are configurable by the 101 shared between guest and hypervisor and does not receive full TDX 106 entries. This helps ensure that a guest does not place sensitive 179 guest #VE handler then emulates the MMIO instruction inside the guest and [all …]
|
/linux-6.3-rc2/tools/perf/Documentation/ |
A D | guest-files.txt | 4 Guest OS /proc/kallsyms file copy. perf reads it to get guest 5 kernel symbols. Users copy it out from guest OS. 8 Guest OS /proc/modules file copy. perf reads it to get guest 9 kernel module information. Users copy it out from guest OS. 14 --guest-code:: 15 Indicate that guest code can be found in the hypervisor process,
|
A D | perf-kvm.txt | 6 perf-kvm - Tool to trace/measure kvm guest os 11 'perf kvm' [--host] [--guest] [--guestmount=<path> 28 default behavior of perf kvm as --guest, so if neither --host nor --guest 33 Default('') -> perf.data.guest 35 --guest -> perf.data.guest 36 --host --guest -> perf.data.kvm 37 --host --no-guest -> perf.data.host 78 --guest:: 79 Collect guest side performance profile. 81 :GMEXAMPLECMD: kvm --host --guest [all …]
|
/linux-6.3-rc2/Documentation/ABI/testing/ |
A D | sysfs-hypervisor-xen | 6 Type of guest: 7 "Xen": standard guest type on arm 8 "HVM": fully virtualized guest (x86) 9 "PV": paravirtualized guest (x86) 10 "PVH": fully virtualized guest without legacy emulation (x86) 22 "self" The guest can profile itself 23 "hv" The guest can profile itself and, if it is 25 "all" The guest can profile itself, the hypervisor
|
/linux-6.3-rc2/Documentation/virt/hyperv/ |
A D | vmbus.rst | 5 VMbus is a software construct provided by Hyper-V to guest VMs. It 7 devices that Hyper-V presents to guest VMs. The control path is 8 used to offer synthetic devices to the guest VM and, in some cases, 10 channels for communicating between the device driver in the guest VM 12 signaling primitives to allow Hyper-V and the guest to interrupt 65 guest, and the "out" ring buffer is for messages from the guest to 67 viewed by the guest side. The ring buffers are memory that is 128 between the guest and the Hyper-V host, the actual data to be 147 of guest memory can be targeted. 151 single logical area of guest memory to be targeted. [all …]
|
A D | overview.rst | 6 enlightened guest on Microsoft's Hyper-V hypervisor. Hyper-V 25 returns control to the guest. This behavior is generally invisible 46 between Hyper-V and the guest, along with various signaling 70 * Linux tells Hyper-V the guest physical address (GPA) of the 73 GPAs, which usually do not need to be contiguous in the guest 87 range of 4 Kbytes. Since the Linux guest page size on x86/x64 is 98 uses the usual approach of allocating guest memory and telling 108 and the guest page originally allocated by Linux becomes visible 135 A Linux guest CPU may be taken offline using the normal Linux 169 via flags in synthetic MSRs that Hyper-V provides to the guest, [all …]
|
/linux-6.3-rc2/Documentation/virt/kvm/s390/ |
A D | s390-pv.rst | 10 access VM state like guest memory or guest registers. Instead, the 15 Each guest starts in non-protected mode and then may make a request to 16 transition into protected mode. On transition, KVM registers the guest 20 The Ultravisor will secure and decrypt the guest's boot memory 22 starts/stops and injected interrupts while the guest is running. 24 As access to the guest's state, such as the SIE state description, is 29 reduce exposed guest state. 50 access to the guest memory. 84 instruction text, in order not to leak guest instruction text. 92 this instruction to be moved to the guest via the two data areas [all …]
|
/linux-6.3-rc2/Documentation/s390/ |
A D | vfio-ap.rst | 133 use by the KVM guest. 205 by a KVM guest's SIE state description to grant the guest access to a matrix 271 used by a guest 475 guest. This facility can be made available to the guest only if it is 489 Note: If the APFT facility is turned off (apft=off) for the guest, the guest 961 guest. 979 the mdev is in use by a KVM guest. If the guest is being emulated by QEMU, 988 the guest named 'my-guest': 990 virsh detach-device my-guest ~/config/my-guest-hostdev.xml 1032 the guest named 'my-guest': [all …]
|
/linux-6.3-rc2/Documentation/arm64/ |
A D | perf.rst | 34 For the guest this attribute will exclude EL1. Please note that EL2 is 35 never counted within a guest. 48 guest/host transitions. 50 For the guest this attribute has no effect. Please note that EL2 is 51 never counted within a guest. 57 These attributes exclude the KVM host and guest, respectively. 62 The KVM guest may run at EL0 (userspace) and EL1 (kernel). 79 non-VHE guest however please note that EL2 is never counted within a guest. 87 enabling/disabling the counters and entering/exiting the guest. We are 89 entry/exit when counting guest events by filtering out EL2 for [all …]
|
/linux-6.3-rc2/arch/x86/xen/ |
A D | Kconfig | 7 bool "Xen guest support" 19 bool "Xen PV guest support" 28 Support running as a Xen PV guest. 60 bool "Xen PVHVM guest support" 64 Support running as a Xen PVHVM guest. 80 bool "Xen PVH guest support" 85 Support for running as a Xen PVH guest. 94 Support running as a Xen Dom0 guest.
|
/linux-6.3-rc2/tools/virtio/virtio-trace/ |
A D | README | 4 Trace agent is a user tool for sending trace data of a guest to a Host in low 48 For example, if a guest use three CPUs, the names are 83 example, if a guest use three CPUs, chardev names should be trace-path-cpu0, 86 3) Boot the guest 87 You can find some chardev in /dev/virtio-ports/ in the guest. 93 0) Build trace agent in a guest 96 1) Enable ftrace in the guest 100 2) Run trace agent in the guest 104 option, trace data are output via stdout in the guest. 113 A host injects read start order to the guest via virtio-serial. [all …]
|
/linux-6.3-rc2/Documentation/virt/kvm/ |
A D | vcpu-requests.rst | 50 a guest mode exit. However, a VCPU thread may not be in guest mode at the 55 1) Send an IPI. This forces a guest mode exit. 67 guest is running in guest mode or not, as well as some specific 76 The VCPU thread is outside guest mode. 80 The VCPU thread is in guest mode. 89 The VCPU thread is outside guest mode, but it wants the sender of 112 KVM's common MMU notifier may need to flush all of a guest's TLB 137 guarantee the to-be-kicked vCPU has fully exited guest mode. 195 IPIs will only trigger guest mode exits for VCPU threads that are in guest 203 - enable interrupts atomically when entering the guest. [all …]
|
/linux-6.3-rc2/Documentation/ABI/stable/ |
A D | sysfs-hypervisor-xen | 33 Space separated list of supported guest system types. Each type 40 <major>: major guest interface version 41 <minor>: minor guest interface version 43 "x86_32": 32 bit x86 guest without PAE 44 "x86_32p": 32 bit x86 guest with PAE 45 "x86_64": 64 bit x86 guest 46 "armv7l": 32 bit arm guest 47 "aarch64": 64 bit arm guest 64 Features the Xen hypervisor supports for the guest as defined 96 UUID of the guest as known to the Xen hypervisor.
|
/linux-6.3-rc2/tools/testing/vsock/ |
A D | README | 3 These tests exercise net/vmw_vsock/ host<->guest sockets for VMware, KVM, and 16 3. Install the kernel and tests inside the guest. 17 4. Boot the guest and ensure that the AF_VSOCK transport is enabled. 21 # host=server, guest=client 25 (guest)# $TEST_BINARY --mode=client \ 30 # host=client, guest=server 31 (guest)# $TEST_BINARY --mode=server \
|