| /hypervisor/dm/vpci/ |
| A D | vdev.c | 110 vbar = &vdev->vbars[idx]; in pci_vdev_update_vbar_base() 111 offset = pci_bar_offset(idx); in pci_vdev_update_vbar_base() 117 vbar = &vdev->vbars[idx + 1U]; in pci_vdev_update_vbar_base() 170 vdev->vbars[idx].base_gpa = base; in pci_vdev_update_vbar_base() 176 uint32_t idx; in check_pt_dev_pio_bars() local 179 for (idx = 0U; idx < vdev->nr_bars; idx++) { in check_pt_dev_pio_bars() 180 …if ((is_pci_io_bar(&vdev->vbars[idx])) && (vdev->vbars[idx].base_gpa != vdev->vbars[idx].base_hpa)… in check_pt_dev_pio_bars() 185 vdev->vbars[idx].base_hpa, vdev->vbars[idx].base_gpa); in check_pt_dev_pio_bars() 198 uint32_t update_idx = idx; in pci_vdev_write_vbar() 200 vbar = &vdev->vbars[idx]; in pci_vdev_write_vbar() [all …]
|
| A D | pci_pt.c | 399 uint32_t idx, bar_cnt; in init_bars() local 413 for (idx = 0U; idx < bar_cnt; idx++) { in init_bars() 415 vbar = &vdev->sriov.vbars[idx]; in init_bars() 416 offset = sriov_bar_offset(vdev, idx); in init_bars() 418 vbar = &vdev->vbars[idx]; in init_bars() 419 offset = pci_bar_offset(idx); in init_bars() 466 idx++; in init_bars() 470 offset = pci_bar_offset(idx); in init_bars() 481 vbar = &vdev->sriov.vbars[idx]; in init_bars() 483 vbar = &vdev->vbars[idx]; in init_bars() [all …]
|
| A D | vsriov.c | 158 uint16_t idx; in enable_vfs() local 202 for (idx = 0U; idx < num_vfs; idx++) { in enable_vfs() 203 vf_bdf.fields.bus = get_vf_bus(pf_vdev, fst_off, stride, idx); in enable_vfs() 204 vf_bdf.fields.devfun = get_vf_devfun(pf_vdev, fst_off, stride, idx); in enable_vfs() 214 create_vf(pf_vdev, vf_bdf, idx); in enable_vfs() 237 uint16_t idx, num_vfs, stride, first; in disable_vfs() local 252 for (idx = 0U; idx < num_vfs; idx++) { in disable_vfs() 255 bdf.fields.bus = get_vf_bus(pf_vdev, first, stride, idx); in disable_vfs() 256 bdf.fields.devfun = get_vf_devfun(pf_vdev, first, stride, idx); in disable_vfs()
|
| A D | vmcs9900.c | 60 static void map_vmcs9900_vbar(struct pci_vdev *vdev, uint32_t idx) in map_vmcs9900_vbar() argument 64 struct pci_vbar *vbar = &vdev->vbars[idx]; in map_vmcs9900_vbar() 66 if ((idx == MCS9900_MMIO_BAR) && (vbar->base_gpa != 0UL)) { in map_vmcs9900_vbar() 71 } else if ((idx == MCS9900_MSIX_BAR) && (vbar->base_gpa != 0UL)) { in map_vmcs9900_vbar() 82 static void unmap_vmcs9900_vbar(struct pci_vdev *vdev, uint32_t idx) in unmap_vmcs9900_vbar() argument 86 struct pci_vbar *vbar = &vdev->vbars[idx]; in unmap_vmcs9900_vbar() 88 if ((idx == MCS9900_MMIO_BAR) && (vbar->base_gpa != 0UL)) { in unmap_vmcs9900_vbar()
|
| A D | ivshmem.c | 343 static void ivshmem_vbar_unmap(struct pci_vdev *vdev, uint32_t idx) in ivshmem_vbar_unmap() argument 346 struct pci_vbar *vbar = &vdev->vbars[idx]; in ivshmem_vbar_unmap() 348 if ((idx == IVSHMEM_SHM_BAR) && (vbar->base_gpa != 0UL)) { in ivshmem_vbar_unmap() 350 } else if (((idx == IVSHMEM_MMIO_BAR) || (idx == IVSHMEM_MSIX_BAR)) && (vbar->base_gpa != 0UL)) { in ivshmem_vbar_unmap() 385 static void ivshmem_vbar_map(struct pci_vdev *vdev, uint32_t idx) in ivshmem_vbar_map() argument 388 struct pci_vbar *vbar = &vdev->vbars[idx]; in ivshmem_vbar_map() 390 if ((idx == IVSHMEM_SHM_BAR) && (vbar->base_hpa != INVALID_HPA) && (vbar->base_gpa != 0UL)) { in ivshmem_vbar_map() 393 } else if ((idx == IVSHMEM_MMIO_BAR) && (vbar->base_gpa != 0UL)) { in ivshmem_vbar_map() 397 } else if ((idx == IVSHMEM_MSIX_BAR) && (vbar->base_gpa != 0UL)) { in ivshmem_vbar_map()
|
| A D | vpci.c | 769 uint16_t idx; in vpci_init_vdevs() local 775 for (idx = 0U; idx < vm_config->pci_dev_num; idx++) { in vpci_init_vdevs() 778 vdev = vpci_init_vdev(vpci, &vm_config->pci_devs[idx], NULL); in vpci_init_vdevs() 783 ret = check_pt_dev_pio_bars(&vpci->pci_vdevs[idx]); in vpci_init_vdevs() 802 uint32_t idx; in vpci_assign_pcidev() local 835 for (idx = 0U; idx < vdev->nr_bars; idx++) { in vpci_assign_pcidev() 838 vdev->vbars[idx] = vdev_in_service_vm->vbars[idx]; in vpci_assign_pcidev() 839 if (has_msix_cap(vdev) && (idx == vdev->msix.table_bar)) { in vpci_assign_pcidev() 840 vdev->msix.mmio_hpa = vdev->vbars[idx].base_hpa; in vpci_assign_pcidev() 841 vdev->msix.mmio_size = vdev->vbars[idx].size; in vpci_assign_pcidev() [all …]
|
| A D | vpci_priv.h | 147 void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val); 178 void pci_vdev_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
|
| /hypervisor/arch/x86/ |
| A D | page.c | 28 uint64_t loop_idx, idx, bit; in alloc_page() local 33 idx = loop_idx % pool->bitmap_size; in alloc_page() 34 if (*(pool->bitmap + idx) != ~0UL) { in alloc_page() 35 bit = ffz64(*(pool->bitmap + idx)); in alloc_page() 36 bitmap_set_nolock(bit, pool->bitmap + idx); in alloc_page() 37 page = pool->start_page + ((idx << 6U) + bit); in alloc_page() 39 pool->last_hint_id = idx; in alloc_page() 65 uint64_t idx, bit; in free_page() local 68 idx = (page - pool->start_page) >> 6U; in free_page() 70 bitmap_clear_nolock(bit, pool->bitmap + idx); in free_page()
|
| A D | irq.c | 226 uint32_t idx = i + NR_STATIC_MAPPINGS_1; in init_irq_descs_arch() local 228 ASSERT(irq_static_mappings[idx].irq == 0U, ""); in init_irq_descs_arch() 229 ASSERT(irq_static_mappings[idx].vector == 0U, ""); in init_irq_descs_arch() 231 irq_static_mappings[idx].irq = POSTED_INTR_IRQ + i; in init_irq_descs_arch() 232 irq_static_mappings[idx].vector = POSTED_INTR_VECTOR + i; in init_irq_descs_arch()
|
| /hypervisor/common/ |
| A D | thermal.c | 16 uint32_t idx; in thermal_softirq() local 18 for (idx = 0; idx < CONFIG_MAX_VM_NUM; idx++) { in thermal_softirq() 19 vcpu = per_cpu(vcpu_array, pcpu_id)[idx]; in thermal_softirq()
|
| A D | ptdev.c | 269 uint16_t idx; in ptdev_release_all_entries() local 272 for (idx = 0U; idx < CONFIG_MAX_PT_IRQ_ENTRIES; idx++) { in ptdev_release_all_entries() 273 entry = &ptirq_entries[idx]; in ptdev_release_all_entries()
|
| A D | hypercall.c | 712 uint32_t idx; in hcall_set_vm_memory_regions() local 719 idx = 0U; in hcall_set_vm_memory_regions() 720 while (idx < regions.mr_num) { in hcall_set_vm_memory_regions() 721 if (copy_from_gpa(vm, &mr, regions.regions_gpa + idx * sizeof(mr), sizeof(mr)) != 0) { in hcall_set_vm_memory_regions() 730 idx++; in hcall_set_vm_memory_regions()
|
| /hypervisor/hw/ |
| A D | pci.c | 241 uint32_t idx, bar; in pdev_need_bar_restore() local 243 for (idx = 0U; idx < pdev->nr_bars; idx++) { in pdev_need_bar_restore() 265 uint32_t idx; in pdev_save_bar() local 267 for (idx = 0U; idx < pdev->nr_bars; idx++) { in pdev_save_bar() 268 get_pci_bar_resource(pdev->bdf, pci_bar_offset(idx), &pdev->bars[idx]); in pdev_save_bar() 274 uint32_t idx; in pdev_restore_bar() local 276 for (idx = 0U; idx < pdev->nr_bars; idx++) { in pdev_restore_bar() 583 uint32_t idx, cnt = 0U; in init_all_dev_config() local 587 for (idx = 0U; idx < num_pci_pdev; idx++) { in init_all_dev_config() 777 uint32_t len, idx; in pci_enumerate_cap() local [all …]
|
| /hypervisor/arch/x86/guest/ |
| A D | nested.c | 713 uint32_t idx; in reset_vvmcs() local 717 for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) { in reset_vvmcs() 834 uint32_t idx; in lookup_vvmcs() local 836 for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) { in lookup_vvmcs() 856 for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) { in get_or_replace_vvmcs_entry() 866 for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) { in get_or_replace_vvmcs_entry() 878 for (idx = 0U; idx < MAX_ACTIVE_VVMCS_NUM; idx++) { in get_or_replace_vvmcs_entry() 988 uint32_t idx; in sync_vmcs02_to_vmcs12() local 990 for (idx = 0; idx < MAX_SHADOW_VMCS_FIELDS; idx++) { in sync_vmcs02_to_vmcs12() 1038 uint32_t idx; in sync_vmcs12_to_vmcs02() local [all …]
|
| A D | vm.c | 409 uint32_t idx; in deny_pci_bar_access() local 416 for ( idx= 0; idx < pdev->nr_bars; idx++) { in deny_pci_bar_access() 417 vbar.bar_type.bits = pdev->bars[idx].phy_bar; in deny_pci_bar_access() 419 base = pdev->bars[idx].phy_bar; in deny_pci_bar_access() 420 size = pdev->bars[idx].size_mask; in deny_pci_bar_access() 422 idx++; in deny_pci_bar_access() 423 base |= (((uint64_t)pdev->bars[idx].phy_bar) << 32UL); in deny_pci_bar_access() 424 size |= (((uint64_t)pdev->bars[idx].size_mask) << 32UL); in deny_pci_bar_access() 439 pdev->bdf.bits.b, pdev->bdf.bits.d, pdev->bdf.bits.f, idx, base); in deny_pci_bar_access()
|
| A D | vlapic.c | 484 uint32_t idx; in apicv_basic_accept_intr() local 487 idx = vector >> 5U; in apicv_basic_accept_intr() 641 uint32_t idx; in vlapic_get_lvt() local 643 idx = lvt_off_to_idx(offset); in vlapic_get_lvt() 644 return vlapic->lvt_last[idx]; in vlapic_get_lvt() 1269 uint32_t idx; in vlapic_get_deliverable_intr() local 1275 idx = vector >> 5U; in vlapic_get_deliverable_intr() 2230 uint32_t idx; in apicv_set_intr_ready() local 2234 idx = vector >> 6U; in apicv_set_intr_ready() 2459 uint32_t idx; in veoi_vmexit_handler() local [all …]
|
| A D | virtual_cr.c | 568 uint32_t idx; in cr_access_vmexit_handler() local 573 idx = (uint32_t)vm_exit_cr_access_reg_idx(exit_qual); in cr_access_vmexit_handler() 575 ASSERT((idx <= 15U), "index out of range"); in cr_access_vmexit_handler() 576 reg = vcpu_get_gpreg(vcpu, idx); in cr_access_vmexit_handler()
|
| A D | vmexit.c | 394 int32_t idx, ret = -1; /* ret < 0 call vcpu_inject_gp(vcpu, 0U) */ in xsetbv_vmexit_handler() local 399 idx = vcpu->arch.cur_context; in xsetbv_vmexit_handler() 404 if ((idx < NR_WORLD) && (cpl == 0U)) { in xsetbv_vmexit_handler()
|
| A D | instr_emul.c | 2278 uint64_t base, segbase, idx, gva, gpa; in instr_check_gva() local 2296 idx = 0UL; in instr_check_gva() 2298 idx = vm_get_register(vcpu, vie->index_register); in instr_check_gva() 2334 gva = segbase + base + (uint64_t)vie->scale * idx + (uint64_t)vie->displacement; in instr_check_gva()
|
| /hypervisor/include/arch/x86/asm/lib/ |
| A D | bits.h | 137 uint64_t idx; in ffz64_ex() local 139 for (idx = 0UL; (idx << 6U) < size; idx++) { in ffz64_ex() 140 if (addr[idx] != ~0UL) { in ffz64_ex() 141 ret = (idx << 6U) + ffz64(addr[idx]); in ffz64_ex()
|
| /hypervisor/dm/ |
| A D | io_req.c | 587 uint32_t idx; in hv_emulate_pio() local 602 for (idx = 0U; idx < EMUL_PIO_IDX_MAX; idx++) { in hv_emulate_pio() 603 handler = &(vm->emul_pio[idx]); in hv_emulate_pio() 651 uint16_t idx; in hv_emulate_mmio() local 666 for (idx = 0U; idx <= vcpu->vm->nr_emul_mmio_regions; idx++) { in hv_emulate_mmio() 819 uint16_t idx; in find_match_mmio_node() local 822 for (idx = 0U; idx < CONFIG_MAX_EMULATED_MMIO_REGIONS; idx++) { in find_match_mmio_node() 823 mmio_node = &(vm->emul_mmio[idx]); in find_match_mmio_node() 850 uint16_t idx; in find_free_mmio_node() local 855 if (vm->nr_emul_mmio_regions < idx) { in find_free_mmio_node() [all …]
|
| A D | vuart.c | 868 uint16_t idx = pci_cfg->vuart_idx; in init_pci_vuart() local 872 setup_vuart(vm, idx); in init_pci_vuart() 874 vm_cfg->vuart[idx].type = VUART_PCI; in init_pci_vuart() 875 vm_cfg->vuart[idx].t_vuart.vm_id = pci_cfg->t_vuart.vm_id; in init_pci_vuart() 876 vm_cfg->vuart[idx].t_vuart.vuart_id = pci_cfg->t_vuart.vuart_id; in init_pci_vuart() 881 vuart_setup_connection(vm, &vm_cfg->vuart[idx], idx); in init_pci_vuart()
|
| /hypervisor/include/arch/x86/asm/guest/ |
| A D | vcpu.h | 143 #define foreach_vcpu(idx, vm, vcpu) \ argument 144 for ((idx) = 0U, (vcpu) = &((vm)->hw.vcpu_array[(idx)]); \ 145 (idx) < (vm)->hw.created_vcpus; \ 146 (idx)++, (vcpu) = &((vm)->hw.vcpu_array[(idx)])) \
|
| /hypervisor/debug/ |
| A D | shell.c | 801 uint16_t idx; in shell_list_vcpu() local 806 for (idx = 0U; idx < CONFIG_MAX_VM_NUM; idx++) { in shell_list_vcpu() 807 vm = get_vm_from_vmid(idx); in shell_list_vcpu() 1268 uint16_t idx; in get_ptdev_info() local 1284 for (idx = 0U; idx < CONFIG_MAX_PT_IRQ_ENTRIES; idx++) { in get_ptdev_info() 1285 entry = &ptirq_entries[idx]; in get_ptdev_info()
|
| /hypervisor/include/hw/ |
| A D | pci.h | 327 static inline uint32_t pci_bar_offset(uint32_t idx) in pci_bar_offset() argument 329 return PCIR_BARS + (idx << 2U); in pci_bar_offset()
|