Lines Matching refs:matrix_mdev

27 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
50 struct ap_matrix_mdev *matrix_mdev, in vfio_ap_get_queue() argument
55 if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) in vfio_ap_get_queue()
57 if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) in vfio_ap_get_queue()
62 q->matrix_mdev = matrix_mdev; in vfio_ap_get_queue()
120 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { in vfio_ap_free_aqic_resources()
121 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); in vfio_ap_free_aqic_resources()
124 if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { in vfio_ap_free_aqic_resources()
125 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), in vfio_ap_free_aqic_resources()
182 q->matrix_mdev = NULL; in vfio_ap_irq_disable()
217 ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, in vfio_ap_irq_enable()
227 kvm = q->matrix_mdev->kvm; in vfio_ap_irq_enable()
246 vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1); in vfio_ap_irq_enable()
287 struct ap_matrix_mdev *matrix_mdev; in handle_pqap() local
298 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, in handle_pqap()
302 if (!matrix_mdev->kvm) in handle_pqap()
305 q = vfio_ap_get_queue(matrix_mdev, apqn); in handle_pqap()
335 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe() local
341 matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL); in vfio_ap_mdev_probe()
342 if (!matrix_mdev) { in vfio_ap_mdev_probe()
346 vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev, in vfio_ap_mdev_probe()
349 matrix_mdev->mdev = mdev; in vfio_ap_mdev_probe()
350 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); in vfio_ap_mdev_probe()
351 matrix_mdev->pqap_hook = handle_pqap; in vfio_ap_mdev_probe()
353 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); in vfio_ap_mdev_probe()
356 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
359 dev_set_drvdata(&mdev->dev, matrix_mdev); in vfio_ap_mdev_probe()
364 list_del(&matrix_mdev->node); in vfio_ap_mdev_probe()
366 vfio_uninit_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
367 kfree(matrix_mdev); in vfio_ap_mdev_probe()
375 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); in vfio_ap_mdev_remove() local
377 vfio_unregister_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
380 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_remove()
381 list_del(&matrix_mdev->node); in vfio_ap_mdev_remove()
383 vfio_uninit_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
384 kfree(matrix_mdev); in vfio_ap_mdev_remove()
528 vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_verify_queues_reserved_for_apid() argument
533 unsigned long nbits = matrix_mdev->matrix.aqm_max + 1; in vfio_ap_mdev_verify_queues_reserved_for_apid()
535 if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits) in vfio_ap_mdev_verify_queues_reserved_for_apid()
538 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) { in vfio_ap_mdev_verify_queues_reserved_for_apid()
558 static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_verify_no_sharing() argument
565 if (matrix_mdev == lstdev) in vfio_ap_mdev_verify_no_sharing()
575 if (!bitmap_and(apm, matrix_mdev->matrix.apm, in vfio_ap_mdev_verify_no_sharing()
579 if (!bitmap_and(aqm, matrix_mdev->matrix.aqm, in vfio_ap_mdev_verify_no_sharing()
625 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_adapter_store() local
630 if (matrix_mdev->kvm) { in assign_adapter_store()
639 if (apid > matrix_mdev->matrix.apm_max) { in assign_adapter_store()
649 ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid); in assign_adapter_store()
653 set_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
655 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); in assign_adapter_store()
663 clear_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
692 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_adapter_store() local
697 if (matrix_mdev->kvm) { in unassign_adapter_store()
706 if (apid > matrix_mdev->matrix.apm_max) { in unassign_adapter_store()
711 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); in unassign_adapter_store()
720 vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_verify_queues_reserved_for_apqi() argument
725 unsigned long nbits = matrix_mdev->matrix.apm_max + 1; in vfio_ap_mdev_verify_queues_reserved_for_apqi()
727 if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits) in vfio_ap_mdev_verify_queues_reserved_for_apqi()
730 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) { in vfio_ap_mdev_verify_queues_reserved_for_apqi()
775 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_domain_store() local
776 unsigned long max_apqi = matrix_mdev->matrix.aqm_max; in assign_domain_store()
781 if (matrix_mdev->kvm) { in assign_domain_store()
794 ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi); in assign_domain_store()
798 set_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
800 ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev); in assign_domain_store()
808 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
838 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_domain_store() local
843 if (matrix_mdev->kvm) { in unassign_domain_store()
852 if (apqi > matrix_mdev->matrix.aqm_max) { in unassign_domain_store()
857 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); in unassign_domain_store()
886 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_control_domain_store() local
891 if (matrix_mdev->kvm) { in assign_control_domain_store()
900 if (id > matrix_mdev->matrix.adm_max) { in assign_control_domain_store()
910 set_bit_inv(id, matrix_mdev->matrix.adm); in assign_control_domain_store()
938 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_control_domain_store() local
939 unsigned long max_domid = matrix_mdev->matrix.adm_max; in unassign_control_domain_store()
944 if (matrix_mdev->kvm) { in unassign_control_domain_store()
957 clear_bit_inv(domid, matrix_mdev->matrix.adm); in unassign_control_domain_store()
973 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in control_domains_show() local
974 unsigned long max_domid = matrix_mdev->matrix.adm_max; in control_domains_show()
977 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { in control_domains_show()
991 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in matrix_show() local
997 unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1; in matrix_show()
998 unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1; in matrix_show()
1002 apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits); in matrix_show()
1003 apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits); in matrix_show()
1008 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { in matrix_show()
1009 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in matrix_show()
1018 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) { in matrix_show()
1024 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) { in matrix_show()
1075 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_set_kvm() argument
1082 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; in vfio_ap_mdev_set_kvm()
1089 if (m != matrix_mdev && m->kvm == kvm) { in vfio_ap_mdev_set_kvm()
1097 matrix_mdev->kvm = kvm; in vfio_ap_mdev_set_kvm()
1099 matrix_mdev->matrix.apm, in vfio_ap_mdev_set_kvm()
1100 matrix_mdev->matrix.aqm, in vfio_ap_mdev_set_kvm()
1101 matrix_mdev->matrix.adm); in vfio_ap_mdev_set_kvm()
1125 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_iommu_notifier() local
1127 matrix_mdev = container_of(nb, struct ap_matrix_mdev, iommu_notifier); in vfio_ap_mdev_iommu_notifier()
1133 vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1); in vfio_ap_mdev_iommu_notifier()
1154 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unset_kvm() argument
1166 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_unset_kvm()
1168 matrix_mdev->kvm = NULL; in vfio_ap_mdev_unset_kvm()
1179 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_group_notifier() local
1184 matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); in vfio_ap_mdev_group_notifier()
1187 vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); in vfio_ap_mdev_group_notifier()
1188 else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) in vfio_ap_mdev_group_notifier()
1260 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_reset_queues() argument
1267 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, in vfio_ap_mdev_reset_queues()
1268 matrix_mdev->matrix.apm_max + 1) { in vfio_ap_mdev_reset_queues()
1269 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in vfio_ap_mdev_reset_queues()
1270 matrix_mdev->matrix.aqm_max + 1) { in vfio_ap_mdev_reset_queues()
1288 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_open_device() local
1293 matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; in vfio_ap_mdev_open_device()
1297 &events, &matrix_mdev->group_notifier); in vfio_ap_mdev_open_device()
1301 matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; in vfio_ap_mdev_open_device()
1304 &events, &matrix_mdev->iommu_notifier); in vfio_ap_mdev_open_device()
1311 &matrix_mdev->group_notifier); in vfio_ap_mdev_open_device()
1317 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_close_device() local
1321 &matrix_mdev->iommu_notifier); in vfio_ap_mdev_close_device()
1323 &matrix_mdev->group_notifier); in vfio_ap_mdev_close_device()
1324 vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm); in vfio_ap_mdev_close_device()
1350 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_ioctl() local
1360 ret = vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_ioctl()