| /hypervisor/common/ |
| A D | ptdev.c | 76 entry = n; in find_ptirq_entry() 87 entry = n; in find_ptirq_entry() 94 return entry; in find_ptirq_entry() 137 entry = NULL; in ptirq_dequeue_softirq() 142 return entry; in ptirq_dequeue_softirq() 155 entry->vm = vm; in ptirq_alloc_entry() 168 return entry; in ptirq_alloc_entry() 225 if ((entry->intr_type == PTDEV_INTR_INTX) || !is_pi_capable(entry->vm)) { in ptirq_activate_entry() 241 key = ptirq_hash_key(entry->vm, &(entry->virt_sid)); in ptirq_activate_entry() 274 if ((entry->vm == vm) && is_entry_active(entry)) { in ptdev_release_all_entries() [all …]
|
| A D | efi_mmap.c | 40 uint32_t entry = 0U; in init_efi_mmap_entries() local 45 hv_memdesc[entry] = *efi_memdesc; in init_efi_mmap_entries() 55 entry ++; in init_efi_mmap_entries() 58 hv_memdesc_nr = entry; in init_efi_mmap_entries()
|
| /hypervisor/arch/x86/guest/ |
| A D | vcpuid.c | 88 return entry; in find_vcpuid_entry() 127 cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); in init_vcpuid_entry() 131 entry->ebx = entry->eax; in init_vcpuid_entry() 190 cpuid_subleaf(leaf, subleaf, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); in init_vcpuid_entry() 330 entry.eax = (entry.eax & ~0x1F) | (vcbm_len - 1U); in set_vcpuid_vcat_10h_subleaf_res() 338 entry.ebx = (uint32_t)vcat_pcbm_to_vcbm(vm, entry.ebx, res); in set_vcpuid_vcat_10h_subleaf_res() 383 cpuid_subleaf(CPUID_CACHE, *ecx, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); in guest_cpuid_04h() 409 guest_cpuid_04h(vm, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); in set_vcpuid_cache() 431 cpuid_subleaf(CPUID_EXTEND_FEATURE, 0U, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); in set_vcpuid_extfeat() 499 cpuid_subleaf(CPUID_EXTEND_FEATURE, i, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); in set_vcpuid_extfeat() [all …]
|
| A D | assign.c | 173 entry->pmsi.data = entry->vmsi.data; in ptirq_build_physical_msi() 177 entry->pmsi.addr = entry->vmsi.addr; in ptirq_build_physical_msi() 184 entry->vmsi.addr.full, entry->vmsi.data.full, in ptirq_build_physical_msi() 185 entry->pmsi.addr.full, entry->pmsi.data.full); in ptirq_build_physical_msi() 341 return entry; in add_msix_remapping() 353 if ((entry != NULL) && (entry->vm == vm)) { in remove_msix_remapping() 422 return entry; in add_intx_remapping() 435 if ((entry != NULL) && (entry->vm != vm)) { in remove_intx_remapping() 459 entry->vm->vm_id, entry->virt_sid.intx_id.gsi); in remove_intx_remapping() 547 ptirq_handle_intx(entry->vm, entry); in ptirq_softirq() [all …]
|
| A D | hyperv.c | 268 entry->ebx = 0U; in hyperv_init_vcpuid_entry() 269 entry->ecx = 0U; in hyperv_init_vcpuid_entry() 270 entry->edx = 0U; in hyperv_init_vcpuid_entry() 273 entry->eax = 0U; in hyperv_init_vcpuid_entry() 274 entry->ebx = 0U; in hyperv_init_vcpuid_entry() 275 entry->ecx = 0U; in hyperv_init_vcpuid_entry() 276 entry->edx = 0U; in hyperv_init_vcpuid_entry() 282 entry->ebx = 0U; in hyperv_init_vcpuid_entry() 283 entry->ecx = 0U; in hyperv_init_vcpuid_entry() 287 entry->eax = 0U; in hyperv_init_vcpuid_entry() [all …]
|
| A D | ve820.c | 36 end = round_page_down(entry->baseaddr + entry->length); in find_space_from_ve820() 81 entry = &service_vm_e820[i]; in filter_mem_from_service_vm_e820() 82 entry_start = entry->baseaddr; in filter_mem_from_service_vm_e820() 83 entry_end = entry->baseaddr + entry->length; in filter_mem_from_service_vm_e820() 114 entry->baseaddr = end_pa; in filter_mem_from_service_vm_e820() 125 entry->length = new_entry.length; in filter_mem_from_service_vm_e820() 126 entry->type = new_entry.type; in filter_mem_from_service_vm_e820() 232 entry->baseaddr = gpa; in add_ram_entry() 233 entry->length = length; in add_ram_entry() 234 entry->type = E820_TYPE_RAM; in add_ram_entry() [all …]
|
| A D | guest_memory.c | 62 uint64_t entry = 0U; in local_gva2gpa_common() local 94 entry = *(base64 + index); in local_gva2gpa_common() 116 ((entry & PAGE_NX) != 0U)) { in local_gva2gpa_common() 134 addr = entry; in local_gva2gpa_common() 154 entry >>= shift; in local_gva2gpa_common() 156 entry <<= (shift + 12U); in local_gva2gpa_common() 157 entry >>= 12U; in local_gva2gpa_common() 175 uint64_t entry; in local_gva2gpa_pae() local 184 entry = base[index]; in local_gva2gpa_pae() 187 if ((entry & PAGE_PRESENT) != 0U) { in local_gva2gpa_pae() [all …]
|
| A D | vm.c | 334 const struct e820_entry *entry; in prepare_prelaunched_vm_memmap() local 342 if (entry->length == 0UL) { in prepare_prelaunched_vm_memmap() 355 if ((entry->type == E820_TYPE_RESERVED) && (entry->baseaddr > MEM_1M)) { in prepare_prelaunched_vm_memmap() 359 base_gpa = entry->baseaddr; in prepare_prelaunched_vm_memmap() 493 const struct e820_entry *entry; in prepare_service_vm_memmap() local 501 entry = p_e820 + i; in prepare_service_vm_memmap() 503 pr_dbg("BaseAddress: 0x%016lx length: 0x%016lx\n", entry->baseaddr, entry->length); in prepare_service_vm_memmap() 512 entry = p_e820 + i; in prepare_service_vm_memmap() 514 ept_modify_mr(vm, pml4_page, entry->baseaddr, entry->length, EPT_WB, EPT_MT_MASK); in prepare_service_vm_memmap() 645 entry = p_e820 + i; in prepare_vm_identical_memmap() [all …]
|
| A D | optee.c | 185 void handle_x86_tee_int(struct ptirq_remapping_info *entry, uint16_t pcpu_id) in handle_x86_tee_int() argument 190 if (is_ree_vm(entry->vm) && is_tee_vm(curr_vcpu->vm)) { in handle_x86_tee_int() 198 tee_vcpu = vcpu_from_pid(get_companion_vm(entry->vm), pcpu_id); in handle_x86_tee_int() 200 } else if (is_tee_vm(entry->vm) && is_ree_vm(curr_vcpu->vm)) { in handle_x86_tee_int() 208 tee_vcpu = vcpu_from_pid(entry->vm, pcpu_id); in handle_x86_tee_int()
|
| A D | ept.c | 154 static inline void ept_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} in ept_nop_tweak_exe_right() argument 155 static inline void ept_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} in ept_nop_recover_exe_right() argument 158 static inline void ept_tweak_exe_right(uint64_t *entry) in ept_tweak_exe_right() argument 160 *entry &= ~EPT_EXE; in ept_tweak_exe_right() 166 static inline void ept_recover_exe_right(uint64_t *entry) in ept_recover_exe_right() argument 168 *entry |= EPT_EXE; in ept_recover_exe_right()
|
| /hypervisor/debug/ |
| A D | trace.c | 55 entry->tsc = cpu_ticks(); in trace_put() 56 entry->id = evid; in trace_put() 58 entry->cpu = (uint8_t)cpu_id; in trace_put() 59 (void)sbuf_put(sbuf, (uint8_t *)entry, sizeof(*entry)); in trace_put() 64 struct trace_entry entry; in TRACE_2L() local 71 entry.payload.fields_64.e = e; in TRACE_2L() 72 entry.payload.fields_64.f = f; in TRACE_2L() 78 struct trace_entry entry; in TRACE_4I() local 94 struct trace_entry entry; in TRACE_6C() local 116 struct trace_entry entry; in TRACE_16STR() local [all …]
|
| A D | shell.c | 1215 if (is_entry_active(entry)) { in get_entry_info() 1216 if (entry->intr_type == PTDEV_INTR_MSI) { in get_entry_info() 1218 *dest = entry->pmsi.addr.bits.dest_field; in get_entry_info() 1226 bdf->value = entry->phys_sid.msi_id.bdf; in get_entry_info() 1227 vbdf->value = entry->virt_sid.msi_id.bdf; in get_entry_info() 1244 *pgsi = entry->phys_sid.intx_id.gsi; in get_entry_info() 1245 *vgsi = entry->virt_sid.intx_id.gsi; in get_entry_info() 1249 *irq = entry->allocated_pirq; in get_entry_info() 1267 struct ptirq_remapping_info *entry; in get_ptdev_info() local 1285 entry = &ptirq_entries[idx]; in get_ptdev_info() [all …]
|
| /hypervisor/arch/x86/seed/ |
| A D | seed_sbl.c | 67 struct seed_entry *entry = NULL; in parse_seed_sbl() local 82 entry = (struct seed_entry *)((uint8_t *)seed_hob + sizeof(struct seed_list_hob)); in parse_seed_sbl() 85 if (entry != NULL) { in parse_seed_sbl() 87 if ((SEED_ENTRY_TYPE_SVNSEED == entry->type) && in parse_seed_sbl() 88 (SEED_ENTRY_USAGE_DSEED == entry->usage)) { in parse_seed_sbl() 93 if ((entry->index != dseed_index) || in parse_seed_sbl() 94 (entry->index >= BOOTLOADER_SEED_MAX_ENTRIES)) { in parse_seed_sbl() 100 (void *)&entry->seed[0U], sizeof(struct seed_info)); in parse_seed_sbl() 104 (void)memset((void *)&entry->seed[0U], 0U, sizeof(struct seed_info)); in parse_seed_sbl() 107 entry = (struct seed_entry *)((uint8_t *)entry + entry->seed_entry_size); in parse_seed_sbl()
|
| /hypervisor/arch/x86/ |
| A D | rtcm.c | 58 struct rtct_entry *entry; in parse_rtct() local 64 foreach_rtct_entry(acpi_rtct_tbl, entry) { in parse_rtct() 65 if (entry->type == RTCT_V2_COMPATIBILITY) { in parse_rtct() 74 foreach_rtct_entry(acpi_rtct_tbl, entry) { in parse_rtct() 75 if (entry->type == RTCT_ENTRY_TYPE_SOFTWARE_SRAM) { in parse_rtct() 76 ssram = (struct rtct_entry_data_ssram *)entry->data; in parse_rtct() 82 } else if (entry->type == RTCT_ENTRY_TYPE_RTCM_BINARY) { in parse_rtct() 89 foreach_rtct_entry(acpi_rtct_tbl, entry) { in parse_rtct() 90 if (entry->type == RTCT_V2_SSRAM) { in parse_rtct() 91 ssram_v2 = (struct rtct_entry_data_ssram_v2 *)entry->data; in parse_rtct() [all …]
|
| A D | e820.c | 47 struct e820_entry *entry, *new_entry; in e820_alloc_memory() local 50 entry = &hv_e820[i]; in e820_alloc_memory() 53 start = round_page_up(entry->baseaddr); in e820_alloc_memory() 54 end = round_page_down(entry->baseaddr + entry->length); in e820_alloc_memory() 57 if ((entry->type == E820_TYPE_RAM) && (length >= size) && ((start + size) <= max_addr)) { in e820_alloc_memory() 62 entry->type = E820_TYPE_RESERVED; in e820_alloc_memory() 79 new_entry->length = (entry->baseaddr + entry->length) - new_entry->baseaddr; in e820_alloc_memory() 81 entry->length -= new_entry->length; in e820_alloc_memory()
|
| A D | mmu.c | 83 static inline void ppt_clflush_pagewalk(const void* entry __attribute__((unused))) in ppt_clflush_pagewalk() argument 88 static inline void ppt_nop_tweak_exe_right(uint64_t *entry __attribute__((unused))) {} in ppt_nop_tweak_exe_right() argument 89 static inline void ppt_nop_recover_exe_right(uint64_t *entry __attribute__((unused))) {} in ppt_nop_recover_exe_right() argument 257 const struct abi_mmap *entry; in init_paging() local 270 entry = p_mmap + i; in init_paging() 271 if (entry->type == MMAP_TYPE_RAM) { in init_paging() 272 uint64_t end = entry->baseaddr + entry->length; in init_paging() 276 high64_min_ram = min(entry->baseaddr, high64_min_ram); in init_paging()
|
| A D | idt.S | 25 .macro interrupt_descriptor entry, dpl=0 ist=0 29 .quad entry 32 .macro trap_descriptor entry, dpl=0, ist=0 36 .quad entry
|
| /hypervisor/boot/ |
| A D | acpi_base.c | 90 const struct e820_entry *entry = get_e820_entry(); in init_acpi() local 94 if (entry[i].type == E820_TYPE_ACPI_RECLAIM) { in init_acpi() 95 rsdp = found_rsdp((char *)hpa2hva(entry[i].baseaddr), entry[i].length); in init_acpi() 181 struct acpi_subtable_header *entry; in local_parse_madt() local 188 for (iterator = first; (iterator) < (end); iterator += entry->length) { in local_parse_madt() 189 entry = (struct acpi_subtable_header *)iterator; in local_parse_madt() 190 if (entry->length < sizeof(struct acpi_subtable_header)) { in local_parse_madt() 194 if (entry->type == ACPI_MADT_TYPE_LOCAL_APIC) { in local_parse_madt() 224 uint64_t entry, end; in parse_madt_ioapic() local 231 for (entry = (uint64_t)(madt + 1); entry < end; entry += ioapic->header.length) { in parse_madt_ioapic() [all …]
|
| A D | reloc.c | 67 struct Elf64_Rel *entry = NULL; in relocate() local 106 entry = (struct Elf64_Rel *)rela_start; in relocate() 107 if ((elf64_r_type(entry->r_info)) == R_X86_64_RELATIVE) { in relocate() 108 addr = (uint64_t *)(delta + entry->r_offset); in relocate() 121 if ((entry->r_offset > trampoline_end) && (entry->r_offset > primary_entry_end)) { in relocate()
|
| /hypervisor/include/lib/ |
| A D | list.h | 74 static inline void list_del(const struct list_head *entry) in list_del() argument 76 list_del_node(entry->prev, entry->next); in list_del() 79 static inline void list_del_init(struct list_head *entry) in list_del_init() argument 81 list_del_node(entry->prev, entry->next); in list_del_init() 82 INIT_LIST_HEAD(entry); in list_del_init()
|
| /hypervisor/include/common/ |
| A D | ptdev.h | 121 typedef void (*ptirq_arch_release_fn_t)(const struct ptirq_remapping_info *entry); 149 static inline bool is_entry_active(const struct ptirq_remapping_info *entry) in is_entry_active() argument 151 return entry->active; in is_entry_active() 257 void ptirq_release_entry(struct ptirq_remapping_info *entry); 271 int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_irq); 278 void ptirq_deactivate_entry(struct ptirq_remapping_info *entry);
|
| /hypervisor/dm/vpci/ |
| A D | vmsix.c | 99 struct msix_table_entry *entry; in rw_vmsix_table() local 113 entry = &vdev->msix.table_entries[index]; in rw_vmsix_table() 118 (void *)entry + entry_offset, (size_t)mmio->size); in rw_vmsix_table() 120 (void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size, in rw_vmsix_table()
|
| A D | vmcs9900.c | 25 struct msix_table_entry *entry = &vdev->msix.table_entries[0]; in trigger_vmcs9900_msix() local 27 ret = vlapic_inject_msi(vm, entry->addr, entry->data); in trigger_vmcs9900_msix() 31 vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, entry->addr, entry->data); in trigger_vmcs9900_msix()
|
| /hypervisor/bsp/ld/ |
| A D | link_ram.ld.in | 20 .entry : 22 KEEP(*(entry)) ;
|
| /hypervisor/include/arch/x86/asm/guest/ |
| A D | optee.h | 25 void handle_x86_tee_int(struct ptirq_remapping_info *entry, uint16_t pcpu_id);
|