Lines Matching refs:vpe

259 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)  in vpe_to_cpuid_lock()  argument
261 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); in vpe_to_cpuid_lock()
262 return vpe->col_idx; in vpe_to_cpuid_lock()
265 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) in vpe_to_cpuid_unlock() argument
267 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in vpe_to_cpuid_unlock()
276 cpu = vpe_to_cpuid_lock(map->vpe, flags); in irq_to_cpuid_lock()
293 vpe_to_cpuid_unlock(map->vpe, flags); in irq_to_cpuid_unlock()
304 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) in valid_vpe() argument
306 if (valid_col(its->collections + vpe->col_idx)) in valid_vpe()
307 return vpe; in valid_vpe()
365 struct its_vpe *vpe; member
369 struct its_vpe *vpe; member
375 struct its_vpe *vpe; member
383 struct its_vpe *vpe; member
390 struct its_vpe *vpe; member
397 struct its_vpe *vpe; member
401 struct its_vpe *vpe; member
753 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); in its_build_vinvall_cmd()
757 return valid_vpe(its, desc->its_vinvall_cmd.vpe); in its_build_vinvall_cmd()
769 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); in its_build_vmapp_cmd()
774 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
781 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); in its_build_vmapp_cmd()
791 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); in its_build_vmapp_cmd()
793 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); in its_build_vmapp_cmd()
805 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); in its_build_vmapp_cmd()
810 return valid_vpe(its, desc->its_vmapp_cmd.vpe); in its_build_vmapp_cmd()
820 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; in its_build_vmapti_cmd()
826 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); in its_build_vmapti_cmd()
833 return valid_vpe(its, desc->its_vmapti_cmd.vpe); in its_build_vmapti_cmd()
843 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; in its_build_vmovi_cmd()
849 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); in its_build_vmovi_cmd()
856 return valid_vpe(its, desc->its_vmovi_cmd.vpe); in its_build_vmovi_cmd()
869 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); in its_build_vmovp_cmd()
874 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); in its_build_vmovp_cmd()
879 return valid_vpe(its, desc->its_vmovp_cmd.vpe); in its_build_vmovp_cmd()
897 return valid_vpe(its, map->vpe); in its_build_vinv_cmd()
915 return valid_vpe(its, map->vpe); in its_build_vint_cmd()
933 return valid_vpe(its, map->vpe); in its_build_vclear_cmd()
944 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); in its_build_invdb_cmd()
948 return valid_vpe(its, desc->its_invdb_cmd.vpe); in its_build_invdb_cmd()
959 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); in its_build_vsgi_cmd()
968 return valid_vpe(its, desc->its_vsgi_cmd.vpe); in its_build_vsgi_cmd()
1251 desc.its_vmapti_cmd.vpe = map->vpe; in its_send_vmapti()
1265 desc.its_vmovi_cmd.vpe = map->vpe; in its_send_vmovi()
1274 struct its_vpe *vpe, bool valid) in its_send_vmapp() argument
1278 desc.its_vmapp_cmd.vpe = vpe; in its_send_vmapp()
1280 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; in its_send_vmapp()
1285 static void its_send_vmovp(struct its_vpe *vpe) in its_send_vmovp() argument
1290 int col_id = vpe->col_idx; in its_send_vmovp()
1292 desc.its_vmovp_cmd.vpe = vpe; in its_send_vmovp()
1312 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); in its_send_vmovp()
1319 if (!require_its_list_vmovp(vpe->its_vm, its)) in its_send_vmovp()
1329 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) in its_send_vinvall() argument
1333 desc.its_vinvall_cmd.vpe = vpe; in its_send_vinvall()
1379 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) in its_send_invdb() argument
1383 desc.its_invdb_cmd.vpe = vpe; in its_send_invdb()
1444 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); in direct_lpi_inv()
1771 struct its_vpe *vpe = vm->vpes[i]; in its_map_vm() local
1772 struct irq_data *d = irq_get_irq_data(vpe->irq); in its_map_vm()
1775 vpe->col_idx = cpumask_first(cpu_online_mask); in its_map_vm()
1776 its_send_vmapp(its, vpe, true); in its_map_vm()
1777 its_send_vinvall(its, vpe); in its_map_vm()
1778 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_map_vm()
3669 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) in its_vpe_db_proxy_unmap_locked() argument
3676 if (vpe->vpe_proxy_event == -1) in its_vpe_db_proxy_unmap_locked()
3679 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_db_proxy_unmap_locked()
3680 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; in its_vpe_db_proxy_unmap_locked()
3690 vpe_proxy.next_victim = vpe->vpe_proxy_event; in its_vpe_db_proxy_unmap_locked()
3692 vpe->vpe_proxy_event = -1; in its_vpe_db_proxy_unmap_locked()
3695 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) in its_vpe_db_proxy_unmap() argument
3705 its_vpe_db_proxy_unmap_locked(vpe); in its_vpe_db_proxy_unmap()
3710 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) in its_vpe_db_proxy_map_locked() argument
3717 if (vpe->vpe_proxy_event != -1) in its_vpe_db_proxy_map_locked()
3725 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; in its_vpe_db_proxy_map_locked()
3726 vpe->vpe_proxy_event = vpe_proxy.next_victim; in its_vpe_db_proxy_map_locked()
3729 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; in its_vpe_db_proxy_map_locked()
3730 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); in its_vpe_db_proxy_map_locked()
3733 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) in its_vpe_db_proxy_move() argument
3746 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_db_proxy_move()
3754 its_vpe_db_proxy_map_locked(vpe); in its_vpe_db_proxy_move()
3757 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); in its_vpe_db_proxy_move()
3758 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; in its_vpe_db_proxy_move()
3767 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_affinity() local
3784 from = vpe_to_cpuid_lock(vpe, &flags); in its_vpe_set_affinity()
3788 vpe->col_idx = cpu; in its_vpe_set_affinity()
3798 its_send_vmovp(vpe); in its_vpe_set_affinity()
3799 its_vpe_db_proxy_move(vpe, from, cpu); in its_vpe_set_affinity()
3803 vpe_to_cpuid_unlock(vpe, flags); in its_vpe_set_affinity()
3822 static void its_vpe_schedule(struct its_vpe *vpe) in its_vpe_schedule() argument
3828 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & in its_vpe_schedule()
3835 val = virt_to_phys(page_address(vpe->vpt_page)) & in its_vpe_schedule()
3849 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; in its_vpe_schedule()
3854 static void its_vpe_deschedule(struct its_vpe *vpe) in its_vpe_deschedule() argument
3861 vpe->idai = !!(val & GICR_VPENDBASER_IDAI); in its_vpe_deschedule()
3862 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_deschedule()
3865 static void its_vpe_invall(struct its_vpe *vpe) in its_vpe_invall() argument
3873 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) in its_vpe_invall()
3880 its_send_vinvall(its, vpe); in its_vpe_invall()
3887 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_vcpu_affinity() local
3892 its_vpe_schedule(vpe); in its_vpe_set_vcpu_affinity()
3896 its_vpe_deschedule(vpe); in its_vpe_set_vcpu_affinity()
3904 its_vpe_invall(vpe); in its_vpe_set_vcpu_affinity()
3912 static void its_vpe_send_cmd(struct its_vpe *vpe, in its_vpe_send_cmd() argument
3919 its_vpe_db_proxy_map_locked(vpe); in its_vpe_send_cmd()
3920 cmd(vpe_proxy.dev, vpe->vpe_proxy_event); in its_vpe_send_cmd()
3927 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_send_inv() local
3933 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); in its_vpe_send_inv()
3934 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_send_inv()
3937 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); in its_vpe_send_inv()
3939 its_vpe_send_cmd(vpe, its_send_inv); in its_vpe_send_inv()
3966 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_set_irqchip_state() local
3974 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; in its_vpe_set_irqchip_state()
3976 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); in its_vpe_set_irqchip_state()
3978 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); in its_vpe_set_irqchip_state()
3983 its_vpe_send_cmd(vpe, its_send_int); in its_vpe_set_irqchip_state()
3985 its_vpe_send_cmd(vpe, its_send_clear); in its_vpe_set_irqchip_state()
4026 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_4_1_send_inv() local
4036 its_send_invdb(its, vpe); in its_vpe_4_1_send_inv()
4051 static void its_vpe_4_1_schedule(struct its_vpe *vpe, in its_vpe_4_1_schedule() argument
4061 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); in its_vpe_4_1_schedule()
4066 static void its_vpe_4_1_deschedule(struct its_vpe *vpe, in its_vpe_4_1_deschedule() argument
4085 raw_spin_lock_irqsave(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4089 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); in its_vpe_4_1_deschedule()
4090 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); in its_vpe_4_1_deschedule()
4099 vpe->pending_last = true; in its_vpe_4_1_deschedule()
4103 static void its_vpe_4_1_invall(struct its_vpe *vpe) in its_vpe_4_1_invall() argument
4111 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); in its_vpe_4_1_invall()
4114 cpu = vpe_to_cpuid_lock(vpe, &flags); in its_vpe_4_1_invall()
4121 vpe_to_cpuid_unlock(vpe, flags); in its_vpe_4_1_invall()
4126 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_4_1_set_vcpu_affinity() local
4131 its_vpe_4_1_schedule(vpe, info); in its_vpe_4_1_set_vcpu_affinity()
4135 its_vpe_4_1_deschedule(vpe, info); in its_vpe_4_1_set_vcpu_affinity()
4143 its_vpe_4_1_invall(vpe); in its_vpe_4_1_set_vcpu_affinity()
4162 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_configure_sgi() local
4165 desc.its_vsgi_cmd.vpe = vpe; in its_configure_sgi()
4167 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; in its_configure_sgi()
4168 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; in its_configure_sgi()
4169 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; in its_configure_sgi()
4182 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_mask_irq() local
4184 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_mask_irq()
4190 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_unmask_irq() local
4192 vpe->sgi_config[d->hwirq].enabled = true; in its_sgi_unmask_irq()
4217 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_set_irqchip_state() local
4221 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); in its_sgi_set_irqchip_state()
4234 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_get_irqchip_state() local
4254 cpu = vpe_to_cpuid_lock(vpe, &flags); in its_sgi_get_irqchip_state()
4257 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); in its_sgi_get_irqchip_state()
4274 vpe_to_cpuid_unlock(vpe, flags); in its_sgi_get_irqchip_state()
4286 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_set_vcpu_affinity() local
4291 vpe->sgi_config[d->hwirq].priority = info->priority; in its_sgi_set_vcpu_affinity()
4292 vpe->sgi_config[d->hwirq].group = info->group; in its_sgi_set_vcpu_affinity()
4315 struct its_vpe *vpe = args; in its_sgi_irq_domain_alloc() local
4322 vpe->sgi_config[i].priority = 0; in its_sgi_irq_domain_alloc()
4323 vpe->sgi_config[i].enabled = false; in its_sgi_irq_domain_alloc()
4324 vpe->sgi_config[i].group = false; in its_sgi_irq_domain_alloc()
4327 &its_sgi_irq_chip, vpe); in its_sgi_irq_domain_alloc()
4352 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_sgi_irq_domain_deactivate() local
4364 vpe->sgi_config[d->hwirq].enabled = false; in its_sgi_irq_domain_deactivate()
4386 static int its_vpe_init(struct its_vpe *vpe) in its_vpe_init() argument
4409 raw_spin_lock_init(&vpe->vpe_lock); in its_vpe_init()
4410 vpe->vpe_id = vpe_id; in its_vpe_init()
4411 vpe->vpt_page = vpt_page; in its_vpe_init()
4413 atomic_set(&vpe->vmapp_count, 0); in its_vpe_init()
4415 vpe->vpe_proxy_event = -1; in its_vpe_init()
4420 static void its_vpe_teardown(struct its_vpe *vpe) in its_vpe_teardown() argument
4422 its_vpe_db_proxy_unmap(vpe); in its_vpe_teardown()
4423 its_vpe_id_free(vpe->vpe_id); in its_vpe_teardown()
4424 its_free_pending_table(vpe->vpt_page); in its_vpe_teardown()
4439 struct its_vpe *vpe = irq_data_get_irq_chip_data(data); in its_vpe_irq_domain_free() local
4441 BUG_ON(vm != vpe->its_vm); in its_vpe_irq_domain_free()
4444 its_vpe_teardown(vpe); in its_vpe_irq_domain_free()
4516 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_activate() local
4528 vpe->col_idx = cpumask_first(cpu_online_mask); in its_vpe_irq_domain_activate()
4534 its_send_vmapp(its, vpe, true); in its_vpe_irq_domain_activate()
4535 its_send_vinvall(its, vpe); in its_vpe_irq_domain_activate()
4538 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); in its_vpe_irq_domain_activate()
4546 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); in its_vpe_irq_domain_deactivate() local
4560 its_send_vmapp(its, vpe, false); in its_vpe_irq_domain_deactivate()
4568 if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) in its_vpe_irq_domain_deactivate()
4569 gic_flush_dcache_to_poc(page_address(vpe->vpt_page), in its_vpe_irq_domain_deactivate()