| /hypervisor/lib/crypto/mbedtls/ |
| A D | sha256.c | 77 *(b + i + 3U) = (uint8_t) n; in put_unint32_be() 122 …*(w + i) = sigma1(*(w + i - (2U))) + *(w + i - (7U)) + sigma0(*(w + i - (15U))) + *(w + i - (16U)); in decomposition() 197 uint32_t i; in mbedtls_internal_sha256_process() local 199 for (i = 0U; i < 8U; i++) { in mbedtls_internal_sha256_process() 200 a[i] = ctx->state[i]; in mbedtls_internal_sha256_process() 203 for (i = 0U; i < 16U; i++) { in mbedtls_internal_sha256_process() 204 w[i] = get_uint32_be(data, 4 * i); in mbedtls_internal_sha256_process() 207 for (i = 0U; i < 16U; i += 8U) { in mbedtls_internal_sha256_process() 218 for (i = 16U; i < 64U; i += 8U) { in mbedtls_internal_sha256_process() 244 for (i = 0U; i < 8U; i++) { in mbedtls_internal_sha256_process() [all …]
|
| /hypervisor/arch/x86/seed/ |
| A D | seed.c | 45 uint32_t i = SEED_ARG_NUM - 1U; in parse_seed_arg() local 51 for (i = 0U; seed_arg[i].str != NULL; i++) { in parse_seed_arg() 73 return i; in parse_seed_arg() 96 uint32_t i; in fill_seed_arg() local 98 for (i = 0U; seed_arg[i].str != NULL; i++) { in fill_seed_arg() 139 uint32_t i; in derive_virtual_seed() local 145 for (i = 0U; i < g_phy_seed.num_seeds; i++) { in derive_virtual_seed() 158 seed_list[i].cse_svn = g_phy_seed.seed_list[i].cse_svn; in derive_virtual_seed() 170 for (i = 1U; i < g_phy_seed.num_seeds; i++) { in get_max_svn_index() 171 if (g_phy_seed.seed_list[i].cse_svn > g_phy_seed.seed_list[i - 1U].cse_svn) { in get_max_svn_index() [all …]
|
| A D | seed_abl.c | 46 uint32_t i; in parse_seed_abl() local 63 for (i = 1U; i < abl_seed->num_seeds; i++) { in parse_seed_abl() 64 if (abl_seed->seed_list[i].svn < abl_seed->seed_list[legacy_seed_index].svn) { in parse_seed_abl() 65 legacy_seed_index = i; in parse_seed_abl() 75 for (i = 0U; i < abl_seed->num_seeds; i++) { in parse_seed_abl() 76 seed_list[i].cse_svn = abl_seed->seed_list[i].svn; in parse_seed_abl() 77 (void)memcpy_s((void *)&seed_list[i].seed[0U], sizeof(seed_list[i].seed), in parse_seed_abl() 78 (void *)&abl_seed->seed_list[i].seed[0U], sizeof(abl_seed->seed_list[i].seed)); in parse_seed_abl() 80 if (i == legacy_seed_index) { in parse_seed_abl() 84 (void)memset((void *)&abl_seed->seed_list[i].seed[0U], 0U, in parse_seed_abl() [all …]
|
| /hypervisor/arch/x86/ |
| A D | rdt.c | 32 uint32_t i; in get_rdt_res_ins() local 36 for (i = 0U; i < info->num_ins; i++) { in get_rdt_res_ins() 38 ins = &info->ins_array[i]; in get_rdt_res_ins() 47 uint16_t i; in setup_res_clos_msr() local 60 for (i = 0U; i < ins->num_clos_config; i++) { in setup_res_clos_msr() 79 uint16_t i, j; in setup_clos() local 83 for (i = 0U; i < RDT_NUM_RESOURCES; i++) { in setup_clos() 84 info = &res_cap_info[i]; in setup_clos() 109 uint32_t i; in is_rdt_type_capable() local 114 for (i = 0U; i < info->num_ins; i++) { in is_rdt_type_capable() [all …]
|
| A D | e820.c | 44 int32_t i; in e820_alloc_memory() local 49 for (i = (int32_t)hv_e820_entries_nr - 1; i >= 0; i--) { in e820_alloc_memory() 114 for (i = hv_e820_entries_nr - 1; i > index; i--) { in insert_e820_entry() 115 hv_e820[i] = hv_e820[i-1]; in insert_e820_entry() 128 for (i = 0U; i < get_efi_mmap_entries_count(); i++) { in init_e820_from_efi_mmap() 209 for (i = 0U; i < hv_e820_entries_nr; i++) { in init_e820_from_mmap() 211 hv_e820[i].baseaddr = mmap[i].baseaddr; in init_e820_from_mmap() 212 hv_e820[i].length = mmap[i].length; in init_e820_from_mmap() 213 hv_e820[i].type = mmap[i].type; in init_e820_from_mmap() 216 mmap[i].type, mmap[i].baseaddr, mmap[i].length); in init_e820_from_mmap() [all …]
|
| A D | irq.c | 219 uint32_t i; in init_irq_descs_arch() local 225 for (i = 0U; i < CONFIG_MAX_VM_NUM; i++) { in init_irq_descs_arch() 235 for (i = 0U; i < NR_IRQS; i++) { in init_irq_descs_arch() 237 descs[i].arch_data = &irq_data[i]; in init_irq_descs_arch() 240 for (i = 0U; i <= NR_MAX_VECTOR; i++) { in init_irq_descs_arch() 241 vector_to_irq[i] = IRQ_INVALID; in init_irq_descs_arch() 245 for (i = 0U; i < NR_STATIC_MAPPINGS; i++) { in init_irq_descs_arch() 270 uint32_t i; in fixup_idt() local 274 for (i = 0U; i < HOST_IDT_ENTRIES; i++) { in fixup_idt() 276 entry_hi_32 = idt_desc[i].rsvd; in fixup_idt() [all …]
|
| A D | cpu.c | 65 uint16_t i; in init_percpu_lapic_id() local 73 for (i = 0U; i < phys_cpu_num; i++) { in init_percpu_lapic_id() 74 per_cpu(lapic_id, i) = lapic_id_array[i]; in init_percpu_lapic_id() 343 uint16_t i; in get_pcpu_id_from_lapic_id() local 346 for (i = 0U; i < phys_cpu_num; i++) { in get_pcpu_id_from_lapic_id() 348 pcpu_id = i; in get_pcpu_id_from_lapic_id() 401 uint16_t i; in start_pcpus() local 405 i = ffs64(expected_start_mask); in start_pcpus() 406 while (i != INVALID_BIT_INDEX) { in start_pcpus() 409 if (pcpu_id == i) { in start_pcpus() [all …]
|
| /hypervisor/debug/ |
| A D | profiling.c | 71 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_initialize_pmi() 150 for (i = 0U; i < MAX_MSR_LIST_NUM; i++) { in profiling_enable_pmu() 729 for (i = 0U; i < pcpu_nums; i++) { in profiling_start_pmu() 737 for (i = 0U; i < pcpu_nums; i++) { in profiling_start_pmu() 768 for (i = 0U; i < pcpu_nums; i++) { in profiling_stop_pmu() 818 for (i = 0U; i < pcpu_nums; i++) { in profiling_msr_ops_all_cpus() 854 for (i = 0U; i < pcpu_nums; i++) { in profiling_vm_list_info() 1084 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_pmi() 1102 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_pmi() 1161 for (i = 0U; i < pcpu_nums; i++) { in profiling_configure_vmsw() [all …]
|
| A D | dbg_cmd.c | 30 uint32_t i; in handle_dbg_cmd() local 33 for (i = 0; i < ARRAY_SIZE(cmd_list); i++) { in handle_dbg_cmd() 34 int32_t tmp = strnlen_s(cmd_list[i].str, MAX_CMD_LEN); in handle_dbg_cmd() 35 int type = cmd_list[i].type; in handle_dbg_cmd() 41 if (strncmp(cmd_list[i].str, cmd, tmp) != 0) in handle_dbg_cmd() 61 return (i < ARRAY_SIZE(cmd_list)); in handle_dbg_cmd()
|
| A D | dump.c | 114 uint32_t i; in dump_guest_stack() local 127 for (i = 0U; i < (DUMP_STACK_SIZE >> 5U); i++) { in dump_guest_stack() 129 (vcpu_get_gpreg(vcpu, CPU_REG_RSP)+(i*32U)), in dump_guest_stack() 130 tmp[i*4], tmp[(i*4)+1], in dump_guest_stack() 131 tmp[(i*4)+2], tmp[(i*4)+3]); in dump_guest_stack() 149 uint32_t i = 0U; in show_host_call_trace() local 155 for (i = 0U; i < (DUMP_STACK_SIZE >> 5U); i++) { in show_host_call_trace() 157 (rsp + (i * 32U)), sp[i * 4U], in show_host_call_trace() 158 sp[(i * 4U) + 1U], sp[(i * 4U) + 2U], in show_host_call_trace() 159 sp[(i * 4U) + 3U]); in show_host_call_trace()
|
| /hypervisor/boot/multiboot/ |
| A D | multiboot.c | 17 uint32_t i; in multiboot_to_acrn_bi() local 37 for (i = 0U; i < abi->mmap_entries; i++) { in multiboot_to_acrn_bi() 38 abi->mmap_entry[i].baseaddr = (mmap + i)->baseaddr; in multiboot_to_acrn_bi() 39 abi->mmap_entry[i].length = (mmap + i)->length; in multiboot_to_acrn_bi() 40 abi->mmap_entry[i].type = (mmap + i)->type; in multiboot_to_acrn_bi() 52 for (i = 0U; i < abi->mods_count; i++) { in multiboot_to_acrn_bi() 53 abi->mods[i].start = hpa2hva_early((uint64_t)(mods + i)->mm_mod_start); in multiboot_to_acrn_bi() 54 if ((mods + i)->mm_mod_end > (mods + i)->mm_mod_start) { in multiboot_to_acrn_bi() 55 abi->mods[i].size = (mods + i)->mm_mod_end - (mods + i)->mm_mod_start; in multiboot_to_acrn_bi() 57 (void)strncpy_s((void *)(abi->mods[i].string), MAX_MOD_STRING_SIZE, in multiboot_to_acrn_bi() [all …]
|
| A D | multiboot2.c | 18 uint32_t i; in mb2_mmap_to_abi() local 27 for (i = 0U; i < abi->mmap_entries; i++) { in mb2_mmap_to_abi() 28 abi->mmap_entry[i].baseaddr = (mb2_mmap + i)->addr; in mb2_mmap_to_abi() 29 abi->mmap_entry[i].length = (mb2_mmap + i)->len; in mb2_mmap_to_abi() 30 abi->mmap_entry[i].type = (mb2_mmap + i)->type; in mb2_mmap_to_abi()
|
| /hypervisor/arch/x86/guest/ |
| A D | vmtrr.c | 53 uint32_t i; in get_index_of_fixed_mtrr() local 55 for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) { in get_index_of_fixed_mtrr() 56 if (fixed_mtrr_map[i].msr == msr) { in get_index_of_fixed_mtrr() 61 return (i < FIXED_RANGE_MTRR_NUM) ? i : FIXED_MTRR_INVALID_INDEX; in get_index_of_fixed_mtrr() 98 uint32_t i; in init_vmtrr() local 115 for (i = 0U; i < FIXED_RANGE_MTRR_NUM; i++) { in init_vmtrr() 124 vmtrr->fixed_range[i].value = msr_read(fixed_mtrr_map[i].msr); in init_vmtrr() 134 vcpu->vm->vm_id, vcpu->vcpu_id, i, in init_vmtrr() 135 vmtrr->fixed_range[i].value); in init_vmtrr() 170 uint32_t i, j; in update_ept_mem_type() local [all …]
|
| A D | ve820.c | 25 int32_t i; in find_space_from_ve820() local 31 for (i = (int32_t)(vm->e820_entry_num - 1U); i >= 0; i--) { in find_space_from_ve820() 57 uint32_t i,j; in sort_vm_e820() local 61 for (i = 0U; i < (vm->e820_entry_num - 1U); i++) { in sort_vm_e820() 74 uint32_t i; in filter_mem_from_service_vm_e820() local 80 for (i = 0U; i < entries_count; i++) { in filter_mem_from_service_vm_e820() 140 uint32_t i; in create_service_vm_e820() local 155 for (i = 0; i < vm_config->memory.region_num; i++) { in create_service_vm_e820() 163 for (i = 0U; i < vm->e820_entry_num; i++) { in create_service_vm_e820() 217 uint64_t i; in calculate_memory_size() local [all …]
|
| A D | vm.c | 339 for (i = 0U; i < vm->e820_entry_num; i++) { in prepare_prelaunched_vm_memmap() 391 for (i = 0U; i < MAX_MMIO_DEV_NUM; i++) { in prepare_prelaunched_vm_memmap() 452 for (i = 0; i < pci_dev_num; i++) { in deny_pdevs() 466 for (i = 0U; i < get_hv_owned_pdev_num(); i++) { in deny_hv_owned_devices() 500 for (i = 0U; i < entries_count; i++) { in prepare_service_vm_memmap() 511 for (i = 0U; i < entries_count; i++) { in prepare_service_vm_memmap() 523 for (i = 0U; (i < MAX_EPC_SECTIONS) && (epc_secs[i].size != 0UL); i++) { in prepare_service_vm_memmap() 543 for (i = 0U; i < MAX_MMIO_DEV_NUM; i++) { in prepare_service_vm_memmap() 604 for (i = 0U; (i < MAX_EPC_SECTIONS) && (vm_epc_maps[i].size != 0UL); i++) { in prepare_epc_vm_memmap() 644 for (i = 0U; i < entries_count; i++) { in prepare_vm_identical_memmap() [all …]
|
| A D | vept.c | 78 uint64_t i, j, k; in free_sept_table() local 81 for (i = 0UL; i < PTRS_PER_PML4E; i++) { in free_sept_table() 115 uint32_t i; in find_vept_desc() local 120 for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) { in find_vept_desc() 123 desc = &vept_desc_bucket[i]; in find_vept_desc() 156 uint32_t i; in get_vept_desc() local 161 for (i = 0L; i < CONFIG_MAX_GUEST_EPT_NUM; i++) { in get_vept_desc() 164 desc = &vept_desc_bucket[i]; in get_vept_desc() 170 desc = &vept_desc_bucket[i]; in get_vept_desc() 493 uint32_t i; in invept_vmexit_handler() local [all …]
|
| /hypervisor/dm/ |
| A D | mmio_dev.c | 18 int32_t i, ret = 0; in assign_mmio_dev() local 21 for (i = 0; i < MMIODEV_RES_NUM; i++) { in assign_mmio_dev() 22 res = &mmiodev->res[i]; in assign_mmio_dev() 31 __FUNCTION__, i, res->user_vm_pa, res->host_pa, res->size); in assign_mmio_dev() 42 int32_t i, ret = 0; in deassign_mmio_dev() local 46 for (i = 0; i < MMIODEV_RES_NUM; i++) { in deassign_mmio_dev() 47 res = &mmiodev->res[i]; in deassign_mmio_dev() 55 __FUNCTION__, i, res->user_vm_pa, res->host_pa, res->size); in deassign_mmio_dev()
|
| A D | vuart.c | 168 uint8_t i; in find_vuart_by_port() local 172 for (i = 0U; i < MAX_VUART_NUM_PER_VM; i++) { in find_vuart_by_port() 751 uint8_t i; in is_vuart_intx() local 754 for (i = 0U; i < MAX_VUART_NUM_PER_VM; i++) { in is_vuart_intx() 755 if ((vm->vuart[i].active) && (vm->vuart[i].irq == intx_gsi)) { in is_vuart_intx() 786 uint8_t i; in init_legacy_vuarts() local 789 for (i = 0U; i < MAX_VUART_NUM_PER_VM; i++) { in init_legacy_vuarts() 804 if (i != 0U) { in init_legacy_vuarts() 805 vuart_setup_connection(vm, &vu_config[i], i); in init_legacy_vuarts() 831 uint8_t i; in deinit_legacy_vuarts() local [all …]
|
| /hypervisor/boot/guest/ |
| A D | elf_loader.c | 44 uint32_t i, mmap_length = 0U; in prepare_multiboot_mmap() local 48 for (i = 0U; i < vm->e820_entry_num; i++) { in prepare_multiboot_mmap() 90 uint32_t i; in do_load_elf64() local 96 for (i = 0U; i < p_elf_header64->e_phnum; i++) { in do_load_elf64() 125 for (i = 0U; i < p_elf_header64->e_shnum; i++) { in do_load_elf64() 156 uint32_t i; in do_load_elf32() local 162 for (i = 0U; i < p_elf_header32->e_phnum; i++) { in do_load_elf32() 191 for (i = 0U; i < p_elf_header32->e_shnum; i++) { in do_load_elf32() 246 uint16_t i, j; in find_img_multiboot_header() local 251 for (i = 0U; i <= (((MEM_4K * 2U) / sizeof(uint32_t)) - 3U); i++) { in find_img_multiboot_header() [all …]
|
| A D | bzimage_loader.c | 191 uint16_t i, desc_idx = 0U; in create_service_vm_efi_mmap_desc() local 194 for (i = 0U; i < (uint16_t)get_efi_mmap_entries_count(); i++) { in create_service_vm_efi_mmap_desc() 206 || (hv_efi_mmap_desc[i].type == EFI_PALCODE) in create_service_vm_efi_mmap_desc() 209 efi_mmap_desc[desc_idx] = hv_efi_mmap_desc[i]; in create_service_vm_efi_mmap_desc() 214 for (i = 0U; i < (uint16_t)vm->e820_entry_num; i++) { in create_service_vm_efi_mmap_desc() 221 if (vm->e820_entries[i].type == E820_TYPE_RAM) { in create_service_vm_efi_mmap_desc() 231 for (i = 0U; i < desc_idx; i++) { in create_service_vm_efi_mmap_desc() 233 efi_mmap_desc[i].phys_addr, efi_mmap_desc[i].num_pages * PAGE_SIZE, efi_mmap_desc[i].type); in create_service_vm_efi_mmap_desc() 336 uint32_t i; in load_bzimage() local 395 for (i = 0U; i < NUM_GPRS; i++) { in load_bzimage() [all …]
|
| /hypervisor/boot/ |
| A D | boot.c | 21 uint32_t i; in get_boot_mods_range() local 25 for (i = 0; i < abi->mods_count; i++) { in get_boot_mods_range() 26 if (hva2hpa(abi->mods[i].start) < start) { in get_boot_mods_range() 27 start = hva2hpa(abi->mods[i].start); in get_boot_mods_range() 29 if (hva2hpa(abi->mods[i].start + abi->mods[i].size) > end) { in get_boot_mods_range() 30 end = hva2hpa(abi->mods[i].start + abi->mods[i].size); in get_boot_mods_range()
|
| A D | acpi_base.c | 89 uint32_t i; in init_acpi() local 93 for (i = 0U; i < entries_count; i++) { in init_acpi() 94 if (entry[i].type == E820_TYPE_ACPI_RECLAIM) { in init_acpi() 95 rsdp = found_rsdp((char *)hpa2hva(entry[i].baseaddr), entry[i].length); in init_acpi() 132 uint32_t i, count; in get_acpi_tbl() local 148 for (i = 0U; i < count; i++) { in get_acpi_tbl() 149 if (probe_table(xsdt->table_offset_entry[i], signature)) { in get_acpi_tbl() 150 addr = xsdt->table_offset_entry[i]; in get_acpi_tbl() 159 for (i = 0U; i < count; i++) { in get_acpi_tbl() 160 if (probe_table(rsdt->table_offset_entry[i], signature)) { in get_acpi_tbl() [all …]
|
| /hypervisor/dm/vpci/ |
| A D | vmsix_on_msi.c | 30 uint32_t i; in need_vmsix_on_msi_emulation() local 32 for(i = 0U; i < MAX_VMSIX_ON_MSI_PDEVS_NUM; i++) { in need_vmsix_on_msi_emulation() 77 uint32_t i; in init_vmsix_on_msi() local 82 for (i = 0U; i < vdev->nr_bars; i++) { in init_vmsix_on_msi() 87 i++; in init_vmsix_on_msi() 90 if (i < vdev->nr_bars) { in init_vmsix_on_msi() 96 vdev->msix.table_bar = i; in init_vmsix_on_msi() 109 vdev->vbars[i].size = 4096U; in init_vmsix_on_msi() 110 vdev->vbars[i].base_hpa = 0x0UL; in init_vmsix_on_msi() 122 vdev->vbars[i].base_gpa = vdev->pci_dev_config->vbar_base[i]; in init_vmsix_on_msi() [all …]
|
| A D | ivshmem.c | 107 uint32_t i; in init_ivshmem_shared_memory() local 111 for (i = 0U; i < ARRAY_SIZE(mem_regions); i++) { in init_ivshmem_shared_memory() 112 mem_regions[i].hpa = addr; in init_ivshmem_shared_memory() 124 for (i = 0U; i < num; i++) { in find_shm_region() 129 return ((i < num) ? &mem_regions[i] : NULL); in find_shm_region() 203 uint32_t i; in create_ivshmem_device() local 206 for (i = 0U; i < IVSHMEM_DEV_NUM; i++) { in create_ivshmem_device() 645 uint32_t i; in create_ivshmem_vdev() local 651 for (i = 0U; i < vm_config->pci_dev_num; i++) { in create_ivshmem_vdev() 702 uint32_t i; in destroy_ivshmem_vdev() local [all …]
|
| /hypervisor/quirks/ |
| A D | security_vm_fixup.c | 22 uint32_t i, entry_cnt = 0U; in get_acpi_mod_entry() local 28 for (i = 0; i < entry_cnt; i++) { in get_acpi_mod_entry() 29 header = acpi + xsdt->table_offset_entry[i] - VIRT_ACPI_DATA_ADDR; in get_acpi_mod_entry() 43 uint8_t checksum, i; in tpm2_fixup() local 53 for (i = 0U; i < MAX_MMIO_DEV_NUM; i++) { in tpm2_fixup() 54 if (strncmp(config->mmiodevs[i].name, "tpm2", 4) == 0) { in tpm2_fixup() 55 dev = &config->mmiodevs[i]; in tpm2_fixup() 129 uint64_t i; in efi_search_guid() local 133 for (i = 0; i < tab->NumberOfTableEntries; i++) { in efi_search_guid() 134 EFI_CONFIGURATION_TABLE *conf_tab = &tab->ConfigurationTable[i]; in efi_search_guid()
|