Lines Matching refs:matrix_mdev
35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) in get_update_locks_for_mdev() argument
103 if (matrix_mdev && matrix_mdev->kvm) in get_update_locks_for_mdev()
104 mutex_lock(&matrix_mdev->kvm->lock); in get_update_locks_for_mdev()
123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) in release_update_locks_for_mdev() argument
126 if (matrix_mdev && matrix_mdev->kvm) in release_update_locks_for_mdev()
127 mutex_unlock(&matrix_mdev->kvm->lock); in release_update_locks_for_mdev()
152 struct ap_matrix_mdev *matrix_mdev; in get_update_locks_by_apqn() local
156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in get_update_locks_by_apqn()
157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && in get_update_locks_by_apqn()
158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { in get_update_locks_by_apqn()
159 if (matrix_mdev->kvm) in get_update_locks_by_apqn()
160 mutex_lock(&matrix_mdev->kvm->lock); in get_update_locks_by_apqn()
164 return matrix_mdev; in get_update_locks_by_apqn()
192 if (q->matrix_mdev && q->matrix_mdev->kvm) in get_update_locks_for_queue()
193 mutex_lock(&q->matrix_mdev->kvm->lock); in get_update_locks_for_queue()
207 struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_get_queue() argument
212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, in vfio_ap_mdev_get_queue()
274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { in vfio_ap_free_aqic_resources()
275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); in vfio_ap_free_aqic_resources()
278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { in vfio_ap_free_aqic_resources()
279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); in vfio_ap_free_aqic_resources()
428 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, in vfio_ap_irq_enable()
442 kvm = q->matrix_mdev->kvm; in vfio_ap_irq_enable()
451 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
461 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
484 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
577 struct ap_matrix_mdev *matrix_mdev; in handle_pqap() local
598 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, in handle_pqap()
602 if (!matrix_mdev->kvm) { in handle_pqap()
603 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); in handle_pqap()
610 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); in handle_pqap()
641 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_update_guest_apcb() argument
643 if (matrix_mdev->kvm) in vfio_ap_mdev_update_guest_apcb()
644 kvm_arch_crypto_set_masks(matrix_mdev->kvm, in vfio_ap_mdev_update_guest_apcb()
645 matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_update_guest_apcb()
646 matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_update_guest_apcb()
647 matrix_mdev->shadow_apcb.adm); in vfio_ap_mdev_update_guest_apcb()
650 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_filter_cdoms() argument
654 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); in vfio_ap_mdev_filter_cdoms()
655 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, in vfio_ap_mdev_filter_cdoms()
658 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_filter_cdoms()
699 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_filter_matrix() argument
706 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); in vfio_ap_mdev_filter_matrix()
707 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); in vfio_ap_mdev_filter_matrix()
708 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); in vfio_ap_mdev_filter_matrix()
716 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, in vfio_ap_mdev_filter_matrix()
718 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, in vfio_ap_mdev_filter_matrix()
721 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { in vfio_ap_mdev_filter_matrix()
722 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_filter_matrix()
733 if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { in vfio_ap_mdev_filter_matrix()
734 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_filter_matrix()
749 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_filter_matrix()
751 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_filter_matrix()
757 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_init_dev() local
760 matrix_mdev->mdev = to_mdev_device(vdev->dev); in vfio_ap_mdev_init_dev()
761 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); in vfio_ap_mdev_init_dev()
762 matrix_mdev->pqap_hook = handle_pqap; in vfio_ap_mdev_init_dev()
763 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); in vfio_ap_mdev_init_dev()
764 hash_init(matrix_mdev->qtable.queues); in vfio_ap_mdev_init_dev()
771 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe() local
774 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, in vfio_ap_mdev_probe()
776 if (IS_ERR(matrix_mdev)) in vfio_ap_mdev_probe()
777 return PTR_ERR(matrix_mdev); in vfio_ap_mdev_probe()
779 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
782 matrix_mdev->req_trigger = NULL; in vfio_ap_mdev_probe()
783 dev_set_drvdata(&mdev->dev, matrix_mdev); in vfio_ap_mdev_probe()
785 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); in vfio_ap_mdev_probe()
790 vfio_put_device(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
794 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_queue() argument
797 if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn)) in vfio_ap_mdev_link_queue()
800 q->matrix_mdev = matrix_mdev; in vfio_ap_mdev_link_queue()
801 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); in vfio_ap_mdev_link_queue()
804 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) in vfio_ap_mdev_link_apqn() argument
809 vfio_ap_mdev_link_queue(matrix_mdev, q); in vfio_ap_mdev_link_apqn()
819 q->matrix_mdev = NULL; in vfio_ap_unlink_mdev_fr_queue()
822 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_unlink_fr_queues() argument
827 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { in vfio_ap_mdev_unlink_fr_queues()
828 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in vfio_ap_mdev_unlink_fr_queues()
830 q = vfio_ap_mdev_get_queue(matrix_mdev, in vfio_ap_mdev_unlink_fr_queues()
833 q->matrix_mdev = NULL; in vfio_ap_mdev_unlink_fr_queues()
840 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); in vfio_ap_mdev_remove() local
842 vfio_unregister_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
846 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_remove()
847 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); in vfio_ap_mdev_remove()
848 list_del(&matrix_mdev->node); in vfio_ap_mdev_remove()
851 vfio_put_device(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
857 static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_log_sharing_err() argument
862 const struct device *dev = mdev_dev(matrix_mdev->mdev); in vfio_ap_mdev_log_sharing_err()
885 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_verify_no_sharing() local
889 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_verify_no_sharing()
894 if (mdev_apm == matrix_mdev->matrix.apm && in vfio_ap_mdev_verify_no_sharing()
895 mdev_aqm == matrix_mdev->matrix.aqm) in vfio_ap_mdev_verify_no_sharing()
905 if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, in vfio_ap_mdev_verify_no_sharing()
909 if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, in vfio_ap_mdev_verify_no_sharing()
913 vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); in vfio_ap_mdev_verify_no_sharing()
936 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_validate_masks() argument
938 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, in vfio_ap_mdev_validate_masks()
939 matrix_mdev->matrix.aqm)) in vfio_ap_mdev_validate_masks()
942 return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, in vfio_ap_mdev_validate_masks()
943 matrix_mdev->matrix.aqm); in vfio_ap_mdev_validate_masks()
946 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_adapter() argument
951 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) in vfio_ap_mdev_link_adapter()
952 vfio_ap_mdev_link_apqn(matrix_mdev, in vfio_ap_mdev_link_adapter()
956 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, in collect_queues_to_reset() argument
963 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { in collect_queues_to_reset()
964 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); in collect_queues_to_reset()
970 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, in reset_queues_for_apid() argument
976 collect_queues_to_reset(matrix_mdev, apid, &qlist); in reset_queues_for_apid()
980 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, in reset_queues_for_apids() argument
992 collect_queues_to_reset(matrix_mdev, apid, &qlist); in reset_queues_for_apids()
1038 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_adapter_store() local
1041 get_update_locks_for_mdev(matrix_mdev); in assign_adapter_store()
1047 if (apid > matrix_mdev->matrix.apm_max) { in assign_adapter_store()
1052 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { in assign_adapter_store()
1057 set_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
1059 ret = vfio_ap_mdev_validate_masks(matrix_mdev); in assign_adapter_store()
1061 clear_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
1065 vfio_ap_mdev_link_adapter(matrix_mdev, apid); in assign_adapter_store()
1067 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in assign_adapter_store()
1068 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_adapter_store()
1069 reset_queues_for_apids(matrix_mdev, apm_filtered); in assign_adapter_store()
1074 release_update_locks_for_mdev(matrix_mdev); in assign_adapter_store()
1082 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_unlink_apqn_fr_mdev() argument
1087 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); in vfio_ap_unlink_apqn_fr_mdev()
1104 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unlink_adapter() argument
1111 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { in vfio_ap_mdev_unlink_adapter()
1112 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); in vfio_ap_mdev_unlink_adapter()
1115 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_unlink_adapter()
1116 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in vfio_ap_mdev_unlink_adapter()
1122 static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_adapters() argument
1133 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); in vfio_ap_mdev_hot_unplug_adapters()
1135 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { in vfio_ap_mdev_hot_unplug_adapters()
1136 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_hot_unplug_adapters()
1143 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_adapters()
1153 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_adapter() argument
1160 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids); in vfio_ap_mdev_hot_unplug_adapter()
1184 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_adapter_store() local
1186 get_update_locks_for_mdev(matrix_mdev); in unassign_adapter_store()
1192 if (apid > matrix_mdev->matrix.apm_max) { in unassign_adapter_store()
1197 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { in unassign_adapter_store()
1202 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); in unassign_adapter_store()
1203 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); in unassign_adapter_store()
1206 release_update_locks_for_mdev(matrix_mdev); in unassign_adapter_store()
1211 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_domain() argument
1216 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) in vfio_ap_mdev_link_domain()
1217 vfio_ap_mdev_link_apqn(matrix_mdev, in vfio_ap_mdev_link_domain()
1262 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_domain_store() local
1265 get_update_locks_for_mdev(matrix_mdev); in assign_domain_store()
1271 if (apqi > matrix_mdev->matrix.aqm_max) { in assign_domain_store()
1276 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { in assign_domain_store()
1281 set_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
1283 ret = vfio_ap_mdev_validate_masks(matrix_mdev); in assign_domain_store()
1285 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
1289 vfio_ap_mdev_link_domain(matrix_mdev, apqi); in assign_domain_store()
1291 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in assign_domain_store()
1292 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_domain_store()
1293 reset_queues_for_apids(matrix_mdev, apm_filtered); in assign_domain_store()
1298 release_update_locks_for_mdev(matrix_mdev); in assign_domain_store()
1305 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unlink_domain() argument
1312 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { in vfio_ap_mdev_unlink_domain()
1313 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); in vfio_ap_mdev_unlink_domain()
1316 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_unlink_domain()
1317 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in vfio_ap_mdev_unlink_domain()
1323 static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_domains() argument
1334 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); in vfio_ap_mdev_hot_unplug_domains()
1336 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { in vfio_ap_mdev_hot_unplug_domains()
1337 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); in vfio_ap_mdev_hot_unplug_domains()
1344 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_domains()
1354 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_domain() argument
1361 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis); in vfio_ap_mdev_hot_unplug_domain()
1385 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_domain_store() local
1387 get_update_locks_for_mdev(matrix_mdev); in unassign_domain_store()
1393 if (apqi > matrix_mdev->matrix.aqm_max) { in unassign_domain_store()
1398 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { in unassign_domain_store()
1403 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); in unassign_domain_store()
1404 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); in unassign_domain_store()
1408 release_update_locks_for_mdev(matrix_mdev); in unassign_domain_store()
1433 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_control_domain_store() local
1435 get_update_locks_for_mdev(matrix_mdev); in assign_control_domain_store()
1441 if (id > matrix_mdev->matrix.adm_max) { in assign_control_domain_store()
1446 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { in assign_control_domain_store()
1456 set_bit_inv(id, matrix_mdev->matrix.adm); in assign_control_domain_store()
1457 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) in assign_control_domain_store()
1458 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_control_domain_store()
1462 release_update_locks_for_mdev(matrix_mdev); in assign_control_domain_store()
1487 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_control_domain_store() local
1489 get_update_locks_for_mdev(matrix_mdev); in unassign_control_domain_store()
1495 if (domid > matrix_mdev->matrix.adm_max) { in unassign_control_domain_store()
1500 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { in unassign_control_domain_store()
1505 clear_bit_inv(domid, matrix_mdev->matrix.adm); in unassign_control_domain_store()
1507 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { in unassign_control_domain_store()
1508 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); in unassign_control_domain_store()
1509 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in unassign_control_domain_store()
1514 release_update_locks_for_mdev(matrix_mdev); in unassign_control_domain_store()
1527 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in control_domains_show() local
1528 unsigned long max_domid = matrix_mdev->matrix.adm_max; in control_domains_show()
1531 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) { in control_domains_show()
1588 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in matrix_show() local
1591 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); in matrix_show()
1602 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in guest_matrix_show() local
1605 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); in guest_matrix_show()
1621 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in ap_config_show() local
1624 idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ','); in ap_config_show()
1625 idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ','); in ap_config_show()
1626 idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n'); in ap_config_show()
1648 static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev) in ap_matrix_overflow_check() argument
1652 for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) { in ap_matrix_overflow_check()
1653 if (bit > matrix_mdev->matrix.apm_max) in ap_matrix_overflow_check()
1657 for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) { in ap_matrix_overflow_check()
1658 if (bit > matrix_mdev->matrix.aqm_max) in ap_matrix_overflow_check()
1662 for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) { in ap_matrix_overflow_check()
1663 if (bit > matrix_mdev->matrix.adm_max) in ap_matrix_overflow_check()
1684 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in ap_config_store() local
1698 get_update_locks_for_mdev(matrix_mdev); in ap_config_store()
1701 ap_matrix_copy(&m_old, &matrix_mdev->matrix); in ap_config_store()
1715 ap_matrix_copy(&matrix_mdev->matrix, &m_new); in ap_config_store()
1718 rc = vfio_ap_mdev_validate_masks(matrix_mdev); in ap_config_store()
1720 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1723 rc = ap_matrix_overflow_check(matrix_mdev); in ap_config_store()
1725 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1731 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1734 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm); in ap_config_store()
1735 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm); in ap_config_store()
1738 ap_matrix_copy(&matrix_mdev->matrix, &m_new); in ap_config_store()
1742 vfio_ap_mdev_link_adapter(matrix_mdev, newbit); in ap_config_store()
1745 vfio_ap_mdev_link_domain(matrix_mdev, newbit); in ap_config_store()
1748 do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); in ap_config_store()
1749 do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev); in ap_config_store()
1753 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in ap_config_store()
1754 reset_queues_for_apids(matrix_mdev, apm_filtered); in ap_config_store()
1757 release_update_locks_for_mdev(matrix_mdev); in ap_config_store()
1797 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_set_kvm() argument
1804 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; in vfio_ap_mdev_set_kvm()
1810 if (m != matrix_mdev && m->kvm == kvm) { in vfio_ap_mdev_set_kvm()
1817 matrix_mdev->kvm = kvm; in vfio_ap_mdev_set_kvm()
1818 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_set_kvm()
1826 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) in unmap_iova() argument
1828 struct ap_queue_table *qtable = &matrix_mdev->qtable; in unmap_iova()
1841 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_dma_unmap() local
1846 unmap_iova(matrix_mdev, iova, length); in vfio_ap_mdev_dma_unmap()
1857 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_unset_kvm() argument
1859 struct kvm *kvm = matrix_mdev->kvm; in vfio_ap_mdev_unset_kvm()
1869 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_unset_kvm()
1871 matrix_mdev->kvm = NULL; in vfio_ap_mdev_unset_kvm()
1993 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_reset_queues() argument
1998 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) in vfio_ap_mdev_reset_queues()
2001 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { in vfio_ap_mdev_reset_queues()
2031 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_open_device() local
2037 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); in vfio_ap_mdev_open_device()
2042 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_close_device() local
2045 vfio_ap_mdev_unset_kvm(matrix_mdev); in vfio_ap_mdev_close_device()
2051 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_request() local
2053 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); in vfio_ap_mdev_request()
2055 if (matrix_mdev->req_trigger) { in vfio_ap_mdev_request()
2061 eventfd_signal(matrix_mdev->req_trigger); in vfio_ap_mdev_request()
2135 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_set_request_irq() argument
2150 if (matrix_mdev->req_trigger) in vfio_ap_set_request_irq()
2151 eventfd_ctx_put(matrix_mdev->req_trigger); in vfio_ap_set_request_irq()
2152 matrix_mdev->req_trigger = NULL; in vfio_ap_set_request_irq()
2158 if (matrix_mdev->req_trigger) in vfio_ap_set_request_irq()
2159 eventfd_ctx_put(matrix_mdev->req_trigger); in vfio_ap_set_request_irq()
2161 matrix_mdev->req_trigger = req_trigger; in vfio_ap_set_request_irq()
2169 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_set_irqs() argument
2183 return vfio_ap_set_request_irq(matrix_mdev, arg); in vfio_ap_set_irqs()
2195 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_ioctl() local
2205 ret = vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_ioctl()
2211 ret = vfio_ap_set_irqs(matrix_mdev, arg); in vfio_ap_mdev_ioctl()
2224 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_for_queue() local
2228 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_for_queue()
2229 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && in vfio_ap_mdev_for_queue()
2230 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) in vfio_ap_mdev_for_queue()
2231 return matrix_mdev; in vfio_ap_mdev_for_queue()
2244 struct ap_matrix_mdev *matrix_mdev; in status_show() local
2249 matrix_mdev = vfio_ap_mdev_for_queue(q); in status_show()
2255 if (matrix_mdev) { in status_show()
2263 if (matrix_mdev->kvm && in status_show()
2264 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in status_show()
2265 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in status_show()
2352 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe_queue() local
2368 matrix_mdev = get_update_locks_by_apqn(q->apqn); in vfio_ap_mdev_probe_queue()
2370 if (matrix_mdev) { in vfio_ap_mdev_probe_queue()
2371 vfio_ap_mdev_link_queue(matrix_mdev, q); in vfio_ap_mdev_probe_queue()
2380 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || in vfio_ap_mdev_probe_queue()
2381 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) in vfio_ap_mdev_probe_queue()
2384 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in vfio_ap_mdev_probe_queue()
2385 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_probe_queue()
2386 reset_queues_for_apids(matrix_mdev, apm_filtered); in vfio_ap_mdev_probe_queue()
2392 release_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_probe_queue()
2405 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_remove_queue() local
2410 matrix_mdev = q->matrix_mdev; in vfio_ap_mdev_remove_queue()
2414 if (matrix_mdev) { in vfio_ap_mdev_remove_queue()
2416 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_remove_queue()
2417 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { in vfio_ap_mdev_remove_queue()
2423 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_remove_queue()
2424 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_remove_queue()
2425 reset_queues_for_apid(matrix_mdev, apid); in vfio_ap_mdev_remove_queue()
2442 if (matrix_mdev) in vfio_ap_mdev_remove_queue()
2447 release_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_remove_queue()
2488 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_cfg() argument
2496 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_hot_unplug_cfg()
2497 matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_hot_unplug_cfg()
2502 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_hot_unplug_cfg()
2503 matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_hot_unplug_cfg()
2508 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_hot_unplug_cfg()
2509 matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_hot_unplug_cfg()
2513 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_cfg()
2533 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_cfg_remove() local
2539 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_cfg_remove()
2540 mutex_lock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_cfg_remove()
2544 matrix_mdev->matrix.apm, in vfio_ap_mdev_cfg_remove()
2547 matrix_mdev->matrix.aqm, in vfio_ap_mdev_cfg_remove()
2550 matrix_mdev->matrix.adm, in vfio_ap_mdev_cfg_remove()
2554 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, in vfio_ap_mdev_cfg_remove()
2558 mutex_unlock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_cfg_remove()
2675 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_cfg_add() local
2682 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_cfg_add()
2683 bitmap_and(matrix_mdev->apm_add, in vfio_ap_mdev_cfg_add()
2684 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); in vfio_ap_mdev_cfg_add()
2685 bitmap_and(matrix_mdev->aqm_add, in vfio_ap_mdev_cfg_add()
2686 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); in vfio_ap_mdev_cfg_add()
2687 bitmap_and(matrix_mdev->adm_add, in vfio_ap_mdev_cfg_add()
2688 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); in vfio_ap_mdev_cfg_add()
2750 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_hot_plug_cfg() argument
2755 mutex_lock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_hot_plug_cfg()
2758 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, in vfio_ap_mdev_hot_plug_cfg()
2759 matrix_mdev->apm_add, AP_DEVICES); in vfio_ap_mdev_hot_plug_cfg()
2760 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, in vfio_ap_mdev_hot_plug_cfg()
2761 matrix_mdev->aqm_add, AP_DOMAINS); in vfio_ap_mdev_hot_plug_cfg()
2762 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, in vfio_ap_mdev_hot_plug_cfg()
2763 matrix_mdev->adm_add, AP_DOMAINS); in vfio_ap_mdev_hot_plug_cfg()
2766 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); in vfio_ap_mdev_hot_plug_cfg()
2769 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); in vfio_ap_mdev_hot_plug_cfg()
2772 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_plug_cfg()
2774 reset_queues_for_apids(matrix_mdev, apm_filtered); in vfio_ap_mdev_hot_plug_cfg()
2777 mutex_unlock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_hot_plug_cfg()
2783 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_on_scan_complete() local
2787 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_on_scan_complete()
2788 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && in vfio_ap_on_scan_complete()
2789 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && in vfio_ap_on_scan_complete()
2790 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) in vfio_ap_on_scan_complete()
2793 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); in vfio_ap_on_scan_complete()
2794 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); in vfio_ap_on_scan_complete()
2795 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); in vfio_ap_on_scan_complete()
2796 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); in vfio_ap_on_scan_complete()