Home
last modified time | relevance | path

Searched refs:size (Results 1 – 25 of 67) sorted by relevance

123

/hypervisor/include/arch/x86/asm/
A Dpage.h55 #define PML4_PAGE_NUM(size) 1UL argument
64 #define PDPT_PAGE_NUM(size) (((size) + PML4E_SIZE - 1UL) >> PML4E_SHIFT) argument
72 #define PD_PAGE_NUM(size) (((size) + PDPTE_SIZE - 1UL) >> PDPTE_SHIFT) argument
80 #define PT_PAGE_NUM(size) (((size) + PDE_SIZE - 1UL) >> PDE_SHIFT) argument
A Drtct.h45 uint16_t size; member
61 uint32_t size; member
69 uint32_t size; member
77 uint32_t size; member
A Dmmu.h150 void set_paging_supervisor(uint64_t base, uint64_t size);
151 void set_paging_x(uint64_t base, uint64_t size);
152 void set_paging_nx(uint64_t base, uint64_t size);
182 void flush_tlb_range(uint64_t addr, uint64_t size);
186 void flush_cache_range(const volatile void *p, uint64_t size);
A Dsgx.h35 uint64_t size; /* EPC section size in byte, must be page aligned */ member
42 uint64_t size; /* EPC reource size in byte, must be page aligned */ member
/hypervisor/include/arch/x86/asm/lib/
A Datomic.h35 #define build_atomic_inc(name, size, type) \ argument
38 asm volatile(BUS_LOCK "inc" size " %0" \
46 #define build_atomic_dec(name, size, type) \ argument
49 asm volatile(BUS_LOCK "dec" size " %0" \
57 #define build_atomic_swap(name, size, type) \ argument
60 asm volatile(BUS_LOCK "xchg" size " %1,%0" \
87 #define build_atomic_cmpxchg(name, size, type) \ argument
91 asm volatile(BUS_LOCK "cmpxchg" size " %2,%1" \
100 #define build_atomic_xadd(name, size, type) \ argument
103 asm volatile(BUS_LOCK "xadd" size " %0,%1" \
/hypervisor/include/arch/x86/asm/guest/
A Dept.h11 typedef void (*pge_handler)(uint64_t *pgentry, uint64_t size);
34 bool ept_is_valid_mr(struct acrn_vm *vm, uint64_t mr_base_gpa, uint64_t size);
63 uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size);
86 uint64_t gpa, uint64_t size, uint64_t prot_orig);
101 uint64_t size, uint64_t prot_set, uint64_t prot_clr);
114 uint64_t size);
123 void ept_flush_leaf_page(uint64_t *pge, uint64_t size);
A Dguest_memory.h63 int32_t copy_from_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
82 int32_t copy_to_gpa(struct acrn_vm *vm, void *h_ptr, uint64_t gpa, uint32_t size);
97 uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
112 uint32_t size, uint32_t *err_code, uint64_t *fault_addr);
/hypervisor/arch/x86/
A Dsgx.c23 static int32_t get_epc_section(uint32_t sec_id, uint64_t* base, uint64_t* size) in get_epc_section() argument
33 *size = (((uint64_t)edx & CPUID_SGX_EPC_HIGH_MASK) << 32U) | in get_epc_section()
35 if (*size != 0UL) { in get_epc_section()
37 pepc_sections[sec_id].size = *size; in get_epc_section()
62 uint64_t vm_request_size = vm_config->epc.size; in partition_epc()
73 vm_request_size = vm_config->epc.size; in partition_epc()
84 vm_epc_maps[mid][vm_id].size = alloc_size; in partition_epc()
86 vm_epc_maps[mid][vm_id].gpa = vm_config->epc.base + vm_config->epc.size - vm_request_size; in partition_epc()
125 return ((init_sgx_ret == 0) && (vm_epc_maps[0][vm_id].size != 0U)); in is_vsgx_supported()
A Drtcm.c28 e = (struct rtct_entry *)((uint64_t)e + e->size))
78 ssram_top_hpa = max(ssram_top_hpa, ssram->base + ssram->size); in parse_rtct()
81 ssram->cache_level, ssram->base, ssram->size); in parse_rtct()
84 ASSERT((rtcm_binary->address != 0UL && rtcm_binary->size != 0U), in parse_rtct()
93 ssram_top_hpa = max(ssram_top_hpa, ssram_v2->base + ssram_v2->size); in parse_rtct()
96 ssram_v2->cache_level, ssram_v2->base, ssram_v2->size); in parse_rtct()
99 ASSERT((rtcm_binary->address != 0UL && rtcm_binary->size != 0U), in parse_rtct()
142 set_paging_x((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size); in init_software_sram()
156 flush_tlb_range((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size); in init_software_sram()
166 set_paging_nx((uint64_t)hpa2hva(rtcm_binary->address), rtcm_binary->size); in init_software_sram()
[all …]
A Dmmu.c199 void set_paging_supervisor(uint64_t base, uint64_t size) in set_paging_supervisor() argument
203 uint64_t region_end = base + size; in set_paging_supervisor()
213 void set_paging_nx(uint64_t base, uint64_t size) in set_paging_nx() argument
215 uint64_t region_end = base + size; in set_paging_nx()
223 void set_paging_x(uint64_t base, uint64_t size) in set_paging_x() argument
225 uint64_t region_end = base + size; in set_paging_x()
334 void flush_tlb_range(uint64_t addr, uint64_t size) in flush_tlb_range() argument
338 for (linear_addr = addr; linear_addr < (addr + size); linear_addr += PAGE_SIZE) { in flush_tlb_range()
353 void flush_cache_range(const volatile void *p, uint64_t size) in flush_cache_range() argument
357 for (i = 0UL; i < size; i += CACHE_LINE_SIZE) { in flush_cache_range()
/hypervisor/dm/
A Dmmio_dev.c25 mem_aligned_check(res->size, PAGE_SIZE)) { in assign_mmio_dev()
28 res->size, EPT_RWX | (res->mem_type & EPT_MT_MASK)); in assign_mmio_dev()
31 __FUNCTION__, i, res->user_vm_pa, res->host_pa, res->size); in assign_mmio_dev()
49 if (ept_is_valid_mr(vm, gpa, res->size)) { in deassign_mmio_dev()
51 mem_aligned_check(res->size, PAGE_SIZE)) { in deassign_mmio_dev()
52 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa, res->size); in deassign_mmio_dev()
55 __FUNCTION__, i, res->user_vm_pa, res->host_pa, res->size); in deassign_mmio_dev()
/hypervisor/arch/x86/guest/
A Dguest_memory.c274 uint32_t size, uint32_t fix_pg_size, bool cp_from_vm) in local_copy_gpa() argument
292 len = (size > (pg_size - offset_in_pg)) ? (pg_size - offset_in_pg) : size; in local_copy_gpa()
314 uint32_t size = size_arg; in copy_gpa() local
317 while (size > 0U) { in copy_gpa()
325 size -= len; in copy_gpa()
343 uint32_t size = size_arg; in copy_gva() local
345 while ((size > 0U) && (ret == 0)) { in copy_gva()
352 size -= len; in copy_gva()
376 ret = copy_gpa(vm, h_ptr, gpa, size, 1); in copy_from_gpa()
395 ret = copy_gpa(vm, h_ptr, gpa, size, 0); in copy_to_gpa()
[all …]
A Dept.c246 uint64_t local_gpa2hpa(struct acrn_vm *vm, uint64_t gpa, uint32_t *size) in local_gpa2hpa() argument
266 if ((size != NULL) && (hpa != INVALID_HPA)) { in local_gpa2hpa()
267 *size = (uint32_t)pg_size; in local_gpa2hpa()
318 uint64_t hpa, uint64_t gpa, uint64_t size, uint64_t prot_orig) in ept_add_mr() argument
323 __func__, vm->vm_id, hpa, gpa, size, prot); in ept_add_mr()
327 pgtable_add_map(pml4_page, hpa, gpa, size, prot, &vm->arch_vm.ept_pgtable); in ept_add_mr()
335 uint64_t gpa, uint64_t size, in ept_modify_mr() argument
353 void ept_del_mr(struct acrn_vm *vm, uint64_t *pml4_page, uint64_t gpa, uint64_t size) in ept_del_mr() argument
369 void ept_flush_leaf_page(uint64_t *pge, uint64_t size) in ept_flush_leaf_page() argument
375 base_hpa = (*pge & (~(size - 1UL))); in ept_flush_leaf_page()
[all …]
A Dvmtrr.c140 uint64_t size, uint8_t type) in update_ept() argument
163 ept_modify_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, start, size, attr, EPT_MT_MASK); in update_ept()
169 uint64_t start, size; in update_ept_mem_type() local
185 size = get_subrange_size_of_fixed_mtrr(i); in update_ept_mem_type()
190 size += get_subrange_size_of_fixed_mtrr(i); in update_ept_mem_type()
192 update_ept(vm, start, size, type); in update_ept_mem_type()
195 size = get_subrange_size_of_fixed_mtrr(i); in update_ept_mem_type()
199 update_ept(vm, start, size, type); in update_ept_mem_type()
A Dinstr_emul.c719 uint8_t size; in vie_update_rflags() local
726 size = 8U; in vie_update_rflags()
775 uint8_t size; in emulate_mov() local
883 uint8_t size; in emulate_movx() local
1217 uint8_t size; in emulate_test() local
1281 uint8_t size; in emulate_and() local
1364 uint8_t size; in emulate_or() local
1450 uint8_t size; in emulate_cmp() local
1515 size = 1U; in emulate_cmp()
1537 uint8_t size; in emulate_sub() local
[all …]
A Dvmx_io.c49 uint64_t mask = 0xFFFFFFFFUL >> (32UL - (8UL * pio_req->size)); in emulate_pio_complete()
78 pio_req->size = vm_exit_io_instruction_size(exit_qual) + 1UL; in pio_instr_vmexit_handler()
81 mask = 0xFFFFFFFFU >> (32U - (8U * pio_req->size)); in pio_instr_vmexit_handler()
91 (uint32_t)pio_req->size, in pio_instr_vmexit_handler()
155 mmio_req->size = (uint64_t)ret; in ept_violation_vmexit_handler()
A Dtrusty.c57 uint64_t size, uint64_t gpa_rebased) in create_secure_world_ept() argument
65 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, gpa_orig, size); in create_secure_world_ept()
71 ept_add_mr(vm, (uint64_t *)vm->arch_vm.sworld_eptp, hpa, gpa_rebased, size, EPT_RWX | EPT_WB); in create_secure_world_ept()
76 vm->sworld_control.sworld_memory.length = size; in create_secure_world_ept()
83 uint64_t size = vm->sworld_control.sworld_memory.length; in destroy_secure_world() local
89 (void)memset(hpa2hva(hpa), 0U, (size_t)size); in destroy_secure_world()
93 ept_del_mr(vm, vm->arch_vm.sworld_eptp, gpa_user_vm, size); in destroy_secure_world()
97 ept_add_mr(vm, vm->arch_vm.nworld_eptp, hpa, gpa_user_vm, size, EPT_RWX | EPT_WB); in destroy_secure_world()
322 uint32_t size, in init_secure_world_env() argument
330 TRUSTY_EPT_REBASE_GPA + size; in init_secure_world_env()
[all …]
/hypervisor/debug/
A Dshell.c911 size -= len; in dump_vcpu_reg()
924 size -= len; in dump_vcpu_reg()
932 size -= len; in dump_vcpu_reg()
1163 size -= len; in get_cpu_interrupt_info()
1171 size -= len; in get_cpu_interrupt_info()
1281 size -= len; in get_ptdev_info()
1339 size -= len; in get_vioapic_info()
1348 size -= len; in get_vioapic_info()
1372 size -= len; in get_vioapic_info()
1423 size -= len; in get_ioapic_info()
[all …]
/hypervisor/boot/include/
A Dmultiboot_std.h82 uint32_t size; member
211 uint32_t size; member
216 uint32_t size; member
222 uint32_t size; member
230 uint32_t size; member
238 uint32_t size; member
244 uint32_t size; member
250 uint32_t size; member
/hypervisor/dm/vpci/
A Dvmsix.c104 if ((mmio->size <= 8U) && mem_aligned_check(mmio->address, mmio->size)) { in rw_vmsix_table()
108 if ((mmio->size == 4U) || (mmio->size == 8U)) { in rw_vmsix_table()
117 (void)memcpy_s(&mmio->value, (size_t)mmio->size, in rw_vmsix_table()
118 (void *)entry + entry_offset, (size_t)mmio->size); in rw_vmsix_table()
120 (void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size, in rw_vmsix_table()
121 &mmio->value, (size_t)mmio->size); in rw_vmsix_table()
131 mmio->value = mmio_read(hva, mmio->size); in rw_vmsix_table()
133 mmio_write(hva, mmio->size, mmio->value); in rw_vmsix_table()
A Divshmem.c113 addr += mem_regions[i].size; in init_ivshmem_shared_memory()
262 if ((mmio->size == 4U) && ((offset & 0x3U) == 0U) && in ivshmem_mmio_handler()
395 (vbar->base_gpa + vbar->size), vdev, false); in ivshmem_vbar_map()
399 (vbar->base_gpa + vbar->size), vdev, false); in ivshmem_vbar_map()
473 uint64_t addr, mask, size = 0UL; in init_ivshmem_bar() local
485 size = region->size; in init_ivshmem_bar()
492 size = VMSIX_ENTRY_TABLE_PBA_BAR_SIZE; in init_ivshmem_bar()
494 size = IVSHMEM_MMIO_BAR_SIZE; in init_ivshmem_bar()
496 if (size != 0UL) { in init_ivshmem_bar()
497 vbar->size = size; in init_ivshmem_bar()
[all …]
A Dvmcs9900.c68 vbar->base_gpa, vbar->base_gpa + vbar->size, vdev, false); in map_vmcs9900_vbar()
69 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vbar->base_gpa, vbar->size); in map_vmcs9900_vbar()
73 (vbar->base_gpa + vbar->size), vdev, false); in map_vmcs9900_vbar()
74 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vbar->base_gpa, vbar->size); in map_vmcs9900_vbar()
91 unregister_mmio_emulation_handler(vm, vbar->base_gpa, vbar->base_gpa + vbar->size); in unmap_vmcs9900_vbar()
129 mmio_vbar->size = 0x1000U; in init_vmcs9900()
131 mmio_vbar->mask = (uint32_t) (~(mmio_vbar->size - 1UL)); in init_vmcs9900()
135 msix_vbar->size = 0x1000U; in init_vmcs9900()
137 msix_vbar->mask = (uint32_t) (~(msix_vbar->size - 1UL)); in init_vmcs9900()
A Dpci_pt.c235 vbar->size); in vdev_pt_unmap_mem_vbar()
257 vbar->size, in vdev_pt_map_mem_vbar()
279 allow_guest_pio_access(vm, (uint16_t)vbar->base_gpa, (uint32_t)(vbar->size)); in vdev_pt_allow_io_vbar()
297 deny_guest_pio_access(vm, (uint16_t)(vbar->base_gpa), (uint32_t)(vbar->size)); in vdev_pt_deny_io_vbar()
459 vbar->size = (uint64_t)size32 & mask; in init_bars()
476 vbar->size |= ((uint64_t)size32 << 32U); in init_bars()
477 vbar->size = vbar->size & ~(vbar->size - 1UL); in init_bars()
478 vbar->size = round_page_up(vbar->size); in init_bars()
498 vbar->size = vbar->size & ~(vbar->size - 1UL); in init_bars()
500 vbar->size = round_page_up(vbar->size); in init_bars()
[all …]
/hypervisor/boot/guest/
A Dvboot_info.c33 vm->sw.ramdisk_info.size = mod->size; in init_vm_ramdisk_info()
36 dev_dbg(DBG_LEVEL_BOOT, "ramdisk mod start=0x%x, size=0x%x", (uint64_t)mod->start, mod->size); in init_vm_ramdisk_info()
46 vm->sw.acpi_info.size = ACPI_MODULE_SIZE; in init_vm_acpi_info()
58 (uint64_t)mod->start, mod->size); in init_vm_kernel_info()
61 if ((mod->start != NULL) && (mod->size != 0U)) { in init_vm_kernel_info()
63 vm->sw.kernel_info.kernel_size = mod->size; in init_vm_kernel_info()
121 …vm->sw.bootargs_info.size = strnlen_s((const char *)vm->sw.bootargs_info.src_addr, (MAX_BOOTARGS_S… in init_vm_bootargs_info()
152 pr_err("Unsupported module: start at HPA 0, size 0x%x .", mod->size); in get_mod_by_tag()
197 if ((mod != NULL) && (mod->size == ACPI_MODULE_SIZE)) { in init_vm_sw_load()
/hypervisor/boot/multiboot/
A Dmultiboot2.c22 abi->mmap_entries = (mb2_tag_mmap->size - 16U) / sizeof(struct multiboot2_mmap_entry); in mb2_mmap_to_abi()
42 abi->mods[mbi_mod_idx].size = mb2_tag_mods->mod_end - mb2_tag_mods->mod_start; in mb2_mods_to_abi()
68 abi->uefi_info.memmap_size = mb2_tag_efimmap->size - 16U; in mb2_efimmap_to_abi()
124 if (mb2_tag->size == 0U) { in multiboot2_to_acrn_bi()
136 + ((mb2_tag->size + (MULTIBOOT2_INFO_ALIGN - 1U)) & ~(MULTIBOOT2_INFO_ALIGN - 1U))); in multiboot2_to_acrn_bi()

Completed in 49 milliseconds

123