Lines Matching refs:matrix_mdev
35 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
100 static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) in get_update_locks_for_mdev() argument
103 if (matrix_mdev && matrix_mdev->kvm) in get_update_locks_for_mdev()
104 mutex_lock(&matrix_mdev->kvm->lock); in get_update_locks_for_mdev()
123 static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev) in release_update_locks_for_mdev() argument
126 if (matrix_mdev && matrix_mdev->kvm) in release_update_locks_for_mdev()
127 mutex_unlock(&matrix_mdev->kvm->lock); in release_update_locks_for_mdev()
152 struct ap_matrix_mdev *matrix_mdev; in get_update_locks_by_apqn() local
156 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in get_update_locks_by_apqn()
157 if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) && in get_update_locks_by_apqn()
158 test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) { in get_update_locks_by_apqn()
159 if (matrix_mdev->kvm) in get_update_locks_by_apqn()
160 mutex_lock(&matrix_mdev->kvm->lock); in get_update_locks_by_apqn()
164 return matrix_mdev; in get_update_locks_by_apqn()
192 if (q->matrix_mdev && q->matrix_mdev->kvm) in get_update_locks_for_queue()
193 mutex_lock(&q->matrix_mdev->kvm->lock); in get_update_locks_for_queue()
207 struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_get_queue() argument
212 hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode, in vfio_ap_mdev_get_queue()
274 !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { in vfio_ap_free_aqic_resources()
275 kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); in vfio_ap_free_aqic_resources()
278 if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) { in vfio_ap_free_aqic_resources()
279 vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1); in vfio_ap_free_aqic_resources()
440 ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1, in vfio_ap_irq_enable()
454 kvm = q->matrix_mdev->kvm; in vfio_ap_irq_enable()
463 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
473 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
496 vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1); in vfio_ap_irq_enable()
589 struct ap_matrix_mdev *matrix_mdev; in handle_pqap() local
610 matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook, in handle_pqap()
614 if (!matrix_mdev->kvm) { in handle_pqap()
615 vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid); in handle_pqap()
622 q = vfio_ap_mdev_get_queue(matrix_mdev, apqn); in handle_pqap()
653 static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev) in signal_guest_ap_cfg_changed() argument
655 if (matrix_mdev->cfg_chg_trigger) in signal_guest_ap_cfg_changed()
656 eventfd_signal(matrix_mdev->cfg_chg_trigger); in signal_guest_ap_cfg_changed()
659 static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_update_guest_apcb() argument
661 if (matrix_mdev->kvm) { in vfio_ap_mdev_update_guest_apcb()
662 kvm_arch_crypto_set_masks(matrix_mdev->kvm, in vfio_ap_mdev_update_guest_apcb()
663 matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_update_guest_apcb()
664 matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_update_guest_apcb()
665 matrix_mdev->shadow_apcb.adm); in vfio_ap_mdev_update_guest_apcb()
667 signal_guest_ap_cfg_changed(matrix_mdev); in vfio_ap_mdev_update_guest_apcb()
671 static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_filter_cdoms() argument
675 bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS); in vfio_ap_mdev_filter_cdoms()
676 bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm, in vfio_ap_mdev_filter_cdoms()
679 return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_filter_cdoms()
720 static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_filter_matrix() argument
727 bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES); in vfio_ap_mdev_filter_matrix()
728 bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS); in vfio_ap_mdev_filter_matrix()
729 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); in vfio_ap_mdev_filter_matrix()
737 bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm, in vfio_ap_mdev_filter_matrix()
739 bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm, in vfio_ap_mdev_filter_matrix()
742 for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) { in vfio_ap_mdev_filter_matrix()
743 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_filter_matrix()
754 if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) { in vfio_ap_mdev_filter_matrix()
755 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_filter_matrix()
770 return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_filter_matrix()
772 !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_filter_matrix()
778 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_init_dev() local
781 matrix_mdev->mdev = to_mdev_device(vdev->dev); in vfio_ap_mdev_init_dev()
782 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix); in vfio_ap_mdev_init_dev()
783 matrix_mdev->pqap_hook = handle_pqap; in vfio_ap_mdev_init_dev()
784 vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb); in vfio_ap_mdev_init_dev()
785 hash_init(matrix_mdev->qtable.queues); in vfio_ap_mdev_init_dev()
792 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe() local
795 matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev, in vfio_ap_mdev_probe()
797 if (IS_ERR(matrix_mdev)) in vfio_ap_mdev_probe()
798 return PTR_ERR(matrix_mdev); in vfio_ap_mdev_probe()
800 ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
803 matrix_mdev->req_trigger = NULL; in vfio_ap_mdev_probe()
804 matrix_mdev->cfg_chg_trigger = NULL; in vfio_ap_mdev_probe()
805 dev_set_drvdata(&mdev->dev, matrix_mdev); in vfio_ap_mdev_probe()
807 list_add(&matrix_mdev->node, &matrix_dev->mdev_list); in vfio_ap_mdev_probe()
812 vfio_put_device(&matrix_mdev->vdev); in vfio_ap_mdev_probe()
816 static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_queue() argument
819 if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn)) in vfio_ap_mdev_link_queue()
822 q->matrix_mdev = matrix_mdev; in vfio_ap_mdev_link_queue()
823 hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn); in vfio_ap_mdev_link_queue()
826 static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn) in vfio_ap_mdev_link_apqn() argument
831 vfio_ap_mdev_link_queue(matrix_mdev, q); in vfio_ap_mdev_link_apqn()
841 q->matrix_mdev = NULL; in vfio_ap_unlink_mdev_fr_queue()
844 static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_unlink_fr_queues() argument
849 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { in vfio_ap_mdev_unlink_fr_queues()
850 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, in vfio_ap_mdev_unlink_fr_queues()
852 q = vfio_ap_mdev_get_queue(matrix_mdev, in vfio_ap_mdev_unlink_fr_queues()
855 q->matrix_mdev = NULL; in vfio_ap_mdev_unlink_fr_queues()
862 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev); in vfio_ap_mdev_remove() local
864 vfio_unregister_group_dev(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
868 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_remove()
869 vfio_ap_mdev_unlink_fr_queues(matrix_mdev); in vfio_ap_mdev_remove()
870 list_del(&matrix_mdev->node); in vfio_ap_mdev_remove()
873 vfio_put_device(&matrix_mdev->vdev); in vfio_ap_mdev_remove()
977 static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_validate_masks() argument
979 if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm, in vfio_ap_mdev_validate_masks()
980 matrix_mdev->matrix.aqm)) in vfio_ap_mdev_validate_masks()
983 return vfio_ap_mdev_verify_no_sharing(matrix_mdev, in vfio_ap_mdev_validate_masks()
984 matrix_mdev->matrix.apm, in vfio_ap_mdev_validate_masks()
985 matrix_mdev->matrix.aqm); in vfio_ap_mdev_validate_masks()
988 static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_adapter() argument
993 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) in vfio_ap_mdev_link_adapter()
994 vfio_ap_mdev_link_apqn(matrix_mdev, in vfio_ap_mdev_link_adapter()
998 static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev, in collect_queues_to_reset() argument
1005 for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) { in collect_queues_to_reset()
1006 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); in collect_queues_to_reset()
1012 static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev, in reset_queues_for_apid() argument
1018 collect_queues_to_reset(matrix_mdev, apid, &qlist); in reset_queues_for_apid()
1022 static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev, in reset_queues_for_apids() argument
1034 collect_queues_to_reset(matrix_mdev, apid, &qlist); in reset_queues_for_apids()
1080 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_adapter_store() local
1083 get_update_locks_for_mdev(matrix_mdev); in assign_adapter_store()
1089 if (apid > matrix_mdev->matrix.apm_max) { in assign_adapter_store()
1094 if (test_bit_inv(apid, matrix_mdev->matrix.apm)) { in assign_adapter_store()
1099 set_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
1101 ret = vfio_ap_mdev_validate_masks(matrix_mdev); in assign_adapter_store()
1103 clear_bit_inv(apid, matrix_mdev->matrix.apm); in assign_adapter_store()
1107 vfio_ap_mdev_link_adapter(matrix_mdev, apid); in assign_adapter_store()
1109 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in assign_adapter_store()
1110 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_adapter_store()
1111 reset_queues_for_apids(matrix_mdev, apm_filtered); in assign_adapter_store()
1116 release_update_locks_for_mdev(matrix_mdev); in assign_adapter_store()
1124 *vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_unlink_apqn_fr_mdev() argument
1129 q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi)); in vfio_ap_unlink_apqn_fr_mdev()
1146 static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unlink_adapter() argument
1153 for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) { in vfio_ap_mdev_unlink_adapter()
1154 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); in vfio_ap_mdev_unlink_adapter()
1157 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_unlink_adapter()
1158 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in vfio_ap_mdev_unlink_adapter()
1164 static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_adapters() argument
1175 vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist); in vfio_ap_mdev_hot_unplug_adapters()
1177 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) { in vfio_ap_mdev_hot_unplug_adapters()
1178 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_hot_unplug_adapters()
1185 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_adapters()
1195 static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_adapter() argument
1202 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids); in vfio_ap_mdev_hot_unplug_adapter()
1226 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_adapter_store() local
1228 get_update_locks_for_mdev(matrix_mdev); in unassign_adapter_store()
1234 if (apid > matrix_mdev->matrix.apm_max) { in unassign_adapter_store()
1239 if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) { in unassign_adapter_store()
1244 clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm); in unassign_adapter_store()
1245 vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid); in unassign_adapter_store()
1248 release_update_locks_for_mdev(matrix_mdev); in unassign_adapter_store()
1253 static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_link_domain() argument
1258 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) in vfio_ap_mdev_link_domain()
1259 vfio_ap_mdev_link_apqn(matrix_mdev, in vfio_ap_mdev_link_domain()
1304 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_domain_store() local
1307 get_update_locks_for_mdev(matrix_mdev); in assign_domain_store()
1313 if (apqi > matrix_mdev->matrix.aqm_max) { in assign_domain_store()
1318 if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { in assign_domain_store()
1323 set_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
1325 ret = vfio_ap_mdev_validate_masks(matrix_mdev); in assign_domain_store()
1327 clear_bit_inv(apqi, matrix_mdev->matrix.aqm); in assign_domain_store()
1331 vfio_ap_mdev_link_domain(matrix_mdev, apqi); in assign_domain_store()
1333 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in assign_domain_store()
1334 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_domain_store()
1335 reset_queues_for_apids(matrix_mdev, apm_filtered); in assign_domain_store()
1340 release_update_locks_for_mdev(matrix_mdev); in assign_domain_store()
1347 static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_unlink_domain() argument
1354 for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) { in vfio_ap_mdev_unlink_domain()
1355 q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi); in vfio_ap_mdev_unlink_domain()
1358 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_unlink_domain()
1359 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in vfio_ap_mdev_unlink_domain()
1365 static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_domains() argument
1376 vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist); in vfio_ap_mdev_hot_unplug_domains()
1378 if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { in vfio_ap_mdev_hot_unplug_domains()
1379 clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm); in vfio_ap_mdev_hot_unplug_domains()
1386 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_domains()
1396 static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_domain() argument
1403 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis); in vfio_ap_mdev_hot_unplug_domain()
1427 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_domain_store() local
1429 get_update_locks_for_mdev(matrix_mdev); in unassign_domain_store()
1435 if (apqi > matrix_mdev->matrix.aqm_max) { in unassign_domain_store()
1440 if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) { in unassign_domain_store()
1445 clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm); in unassign_domain_store()
1446 vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi); in unassign_domain_store()
1450 release_update_locks_for_mdev(matrix_mdev); in unassign_domain_store()
1475 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in assign_control_domain_store() local
1477 get_update_locks_for_mdev(matrix_mdev); in assign_control_domain_store()
1483 if (id > matrix_mdev->matrix.adm_max) { in assign_control_domain_store()
1488 if (test_bit_inv(id, matrix_mdev->matrix.adm)) { in assign_control_domain_store()
1498 set_bit_inv(id, matrix_mdev->matrix.adm); in assign_control_domain_store()
1499 if (vfio_ap_mdev_filter_cdoms(matrix_mdev)) in assign_control_domain_store()
1500 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in assign_control_domain_store()
1504 release_update_locks_for_mdev(matrix_mdev); in assign_control_domain_store()
1529 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in unassign_control_domain_store() local
1531 get_update_locks_for_mdev(matrix_mdev); in unassign_control_domain_store()
1537 if (domid > matrix_mdev->matrix.adm_max) { in unassign_control_domain_store()
1542 if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) { in unassign_control_domain_store()
1547 clear_bit_inv(domid, matrix_mdev->matrix.adm); in unassign_control_domain_store()
1549 if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) { in unassign_control_domain_store()
1550 clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm); in unassign_control_domain_store()
1551 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in unassign_control_domain_store()
1556 release_update_locks_for_mdev(matrix_mdev); in unassign_control_domain_store()
1566 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in control_domains_show() local
1567 unsigned long max_domid = matrix_mdev->matrix.adm_max; in control_domains_show()
1571 for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) in control_domains_show()
1612 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in matrix_show() local
1615 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf); in matrix_show()
1626 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in guest_matrix_show() local
1629 nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf); in guest_matrix_show()
1645 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in ap_config_show() local
1648 idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ','); in ap_config_show()
1649 idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ','); in ap_config_show()
1650 idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n'); in ap_config_show()
1672 static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev) in ap_matrix_overflow_check() argument
1676 for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) { in ap_matrix_overflow_check()
1677 if (bit > matrix_mdev->matrix.apm_max) in ap_matrix_overflow_check()
1681 for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) { in ap_matrix_overflow_check()
1682 if (bit > matrix_mdev->matrix.aqm_max) in ap_matrix_overflow_check()
1686 for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) { in ap_matrix_overflow_check()
1687 if (bit > matrix_mdev->matrix.adm_max) in ap_matrix_overflow_check()
1708 struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev); in ap_config_store() local
1722 get_update_locks_for_mdev(matrix_mdev); in ap_config_store()
1725 ap_matrix_copy(&m_old, &matrix_mdev->matrix); in ap_config_store()
1739 ap_matrix_copy(&matrix_mdev->matrix, &m_new); in ap_config_store()
1742 rc = vfio_ap_mdev_validate_masks(matrix_mdev); in ap_config_store()
1744 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1747 rc = ap_matrix_overflow_check(matrix_mdev); in ap_config_store()
1749 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1755 ap_matrix_copy(&matrix_mdev->matrix, &m_old); in ap_config_store()
1758 vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm); in ap_config_store()
1759 vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm); in ap_config_store()
1762 ap_matrix_copy(&matrix_mdev->matrix, &m_new); in ap_config_store()
1766 vfio_ap_mdev_link_adapter(matrix_mdev, newbit); in ap_config_store()
1769 vfio_ap_mdev_link_domain(matrix_mdev, newbit); in ap_config_store()
1772 do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); in ap_config_store()
1773 do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev); in ap_config_store()
1777 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in ap_config_store()
1778 reset_queues_for_apids(matrix_mdev, apm_filtered); in ap_config_store()
1781 release_update_locks_for_mdev(matrix_mdev); in ap_config_store()
1821 static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_set_kvm() argument
1828 kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; in vfio_ap_mdev_set_kvm()
1834 if (m != matrix_mdev && m->kvm == kvm) { in vfio_ap_mdev_set_kvm()
1841 matrix_mdev->kvm = kvm; in vfio_ap_mdev_set_kvm()
1842 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_set_kvm()
1850 static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length) in unmap_iova() argument
1852 struct ap_queue_table *qtable = &matrix_mdev->qtable; in unmap_iova()
1865 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_dma_unmap() local
1870 unmap_iova(matrix_mdev, iova, length); in vfio_ap_mdev_dma_unmap()
1881 static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_unset_kvm() argument
1883 struct kvm *kvm = matrix_mdev->kvm; in vfio_ap_mdev_unset_kvm()
1893 vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_unset_kvm()
1895 matrix_mdev->kvm = NULL; in vfio_ap_mdev_unset_kvm()
2017 static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_reset_queues() argument
2022 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) in vfio_ap_mdev_reset_queues()
2025 hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) { in vfio_ap_mdev_reset_queues()
2055 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_open_device() local
2061 return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); in vfio_ap_mdev_open_device()
2066 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_close_device() local
2069 vfio_ap_mdev_unset_kvm(matrix_mdev); in vfio_ap_mdev_close_device()
2075 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_request() local
2077 matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); in vfio_ap_mdev_request()
2079 get_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_request()
2081 if (matrix_mdev->kvm) { in vfio_ap_mdev_request()
2082 kvm_arch_crypto_clear_masks(matrix_mdev->kvm); in vfio_ap_mdev_request()
2083 signal_guest_ap_cfg_changed(matrix_mdev); in vfio_ap_mdev_request()
2086 if (matrix_mdev->req_trigger) { in vfio_ap_mdev_request()
2092 eventfd_signal(matrix_mdev->req_trigger); in vfio_ap_mdev_request()
2098 release_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_request()
2172 static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_set_request_irq() argument
2187 if (matrix_mdev->req_trigger) in vfio_ap_set_request_irq()
2188 eventfd_ctx_put(matrix_mdev->req_trigger); in vfio_ap_set_request_irq()
2189 matrix_mdev->req_trigger = NULL; in vfio_ap_set_request_irq()
2195 if (matrix_mdev->req_trigger) in vfio_ap_set_request_irq()
2196 eventfd_ctx_put(matrix_mdev->req_trigger); in vfio_ap_set_request_irq()
2198 matrix_mdev->req_trigger = req_trigger; in vfio_ap_set_request_irq()
2206 static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg) in vfio_ap_set_cfg_change_irq() argument
2220 if (matrix_mdev->cfg_chg_trigger) in vfio_ap_set_cfg_change_irq()
2221 eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); in vfio_ap_set_cfg_change_irq()
2222 matrix_mdev->cfg_chg_trigger = NULL; in vfio_ap_set_cfg_change_irq()
2228 if (matrix_mdev->cfg_chg_trigger) in vfio_ap_set_cfg_change_irq()
2229 eventfd_ctx_put(matrix_mdev->cfg_chg_trigger); in vfio_ap_set_cfg_change_irq()
2231 matrix_mdev->cfg_chg_trigger = cfg_chg_trigger; in vfio_ap_set_cfg_change_irq()
2239 static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_set_irqs() argument
2253 return vfio_ap_set_request_irq(matrix_mdev, arg); in vfio_ap_set_irqs()
2255 return vfio_ap_set_cfg_change_irq(matrix_mdev, arg); in vfio_ap_set_irqs()
2267 struct ap_matrix_mdev *matrix_mdev = in vfio_ap_mdev_ioctl() local
2277 ret = vfio_ap_mdev_reset_queues(matrix_mdev); in vfio_ap_mdev_ioctl()
2283 ret = vfio_ap_set_irqs(matrix_mdev, arg); in vfio_ap_mdev_ioctl()
2296 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_for_queue() local
2300 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_for_queue()
2301 if (test_bit_inv(apid, matrix_mdev->matrix.apm) && in vfio_ap_mdev_for_queue()
2302 test_bit_inv(apqi, matrix_mdev->matrix.aqm)) in vfio_ap_mdev_for_queue()
2303 return matrix_mdev; in vfio_ap_mdev_for_queue()
2316 struct ap_matrix_mdev *matrix_mdev; in status_show() local
2321 matrix_mdev = vfio_ap_mdev_for_queue(q); in status_show()
2327 if (matrix_mdev) { in status_show()
2335 if (matrix_mdev->kvm && in status_show()
2336 test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in status_show()
2337 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) in status_show()
2421 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_probe_queue() local
2437 matrix_mdev = get_update_locks_by_apqn(q->apqn); in vfio_ap_mdev_probe_queue()
2439 if (matrix_mdev) { in vfio_ap_mdev_probe_queue()
2440 vfio_ap_mdev_link_queue(matrix_mdev, q); in vfio_ap_mdev_probe_queue()
2449 if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) || in vfio_ap_mdev_probe_queue()
2450 !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS)) in vfio_ap_mdev_probe_queue()
2453 if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) { in vfio_ap_mdev_probe_queue()
2454 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_probe_queue()
2455 reset_queues_for_apids(matrix_mdev, apm_filtered); in vfio_ap_mdev_probe_queue()
2461 release_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_probe_queue()
2474 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_remove_queue() local
2479 matrix_mdev = q->matrix_mdev; in vfio_ap_mdev_remove_queue()
2483 if (matrix_mdev) { in vfio_ap_mdev_remove_queue()
2485 if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) && in vfio_ap_mdev_remove_queue()
2486 test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) { in vfio_ap_mdev_remove_queue()
2492 clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm); in vfio_ap_mdev_remove_queue()
2493 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_remove_queue()
2494 reset_queues_for_apid(matrix_mdev, apid); in vfio_ap_mdev_remove_queue()
2511 if (matrix_mdev) in vfio_ap_mdev_remove_queue()
2516 release_update_locks_for_mdev(matrix_mdev); in vfio_ap_mdev_remove_queue()
2557 static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev, in vfio_ap_mdev_hot_unplug_cfg() argument
2565 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_hot_unplug_cfg()
2566 matrix_mdev->shadow_apcb.apm, in vfio_ap_mdev_hot_unplug_cfg()
2571 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_hot_unplug_cfg()
2572 matrix_mdev->shadow_apcb.aqm, in vfio_ap_mdev_hot_unplug_cfg()
2577 do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_hot_unplug_cfg()
2578 matrix_mdev->shadow_apcb.adm, in vfio_ap_mdev_hot_unplug_cfg()
2582 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_unplug_cfg()
2602 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_cfg_remove() local
2608 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_cfg_remove()
2609 mutex_lock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_cfg_remove()
2613 matrix_mdev->matrix.apm, in vfio_ap_mdev_cfg_remove()
2616 matrix_mdev->matrix.aqm, in vfio_ap_mdev_cfg_remove()
2619 matrix_mdev->matrix.adm, in vfio_ap_mdev_cfg_remove()
2623 vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem, in vfio_ap_mdev_cfg_remove()
2627 mutex_unlock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_cfg_remove()
2744 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_mdev_cfg_add() local
2751 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_mdev_cfg_add()
2752 bitmap_and(matrix_mdev->apm_add, in vfio_ap_mdev_cfg_add()
2753 matrix_mdev->matrix.apm, apm_add, AP_DEVICES); in vfio_ap_mdev_cfg_add()
2754 bitmap_and(matrix_mdev->aqm_add, in vfio_ap_mdev_cfg_add()
2755 matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS); in vfio_ap_mdev_cfg_add()
2756 bitmap_and(matrix_mdev->adm_add, in vfio_ap_mdev_cfg_add()
2757 matrix_mdev->matrix.adm, adm_add, AP_DEVICES); in vfio_ap_mdev_cfg_add()
2819 static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev) in vfio_ap_mdev_hot_plug_cfg() argument
2824 mutex_lock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_hot_plug_cfg()
2827 filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm, in vfio_ap_mdev_hot_plug_cfg()
2828 matrix_mdev->apm_add, AP_DEVICES); in vfio_ap_mdev_hot_plug_cfg()
2829 filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm, in vfio_ap_mdev_hot_plug_cfg()
2830 matrix_mdev->aqm_add, AP_DOMAINS); in vfio_ap_mdev_hot_plug_cfg()
2831 filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm, in vfio_ap_mdev_hot_plug_cfg()
2832 matrix_mdev->adm_add, AP_DOMAINS); in vfio_ap_mdev_hot_plug_cfg()
2835 do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered); in vfio_ap_mdev_hot_plug_cfg()
2838 do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev); in vfio_ap_mdev_hot_plug_cfg()
2841 vfio_ap_mdev_update_guest_apcb(matrix_mdev); in vfio_ap_mdev_hot_plug_cfg()
2843 reset_queues_for_apids(matrix_mdev, apm_filtered); in vfio_ap_mdev_hot_plug_cfg()
2846 mutex_unlock(&matrix_mdev->kvm->lock); in vfio_ap_mdev_hot_plug_cfg()
2852 struct ap_matrix_mdev *matrix_mdev; in vfio_ap_on_scan_complete() local
2856 list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { in vfio_ap_on_scan_complete()
2857 if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) && in vfio_ap_on_scan_complete()
2858 bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) && in vfio_ap_on_scan_complete()
2859 bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS)) in vfio_ap_on_scan_complete()
2862 vfio_ap_mdev_hot_plug_cfg(matrix_mdev); in vfio_ap_on_scan_complete()
2863 bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES); in vfio_ap_on_scan_complete()
2864 bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS); in vfio_ap_on_scan_complete()
2865 bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS); in vfio_ap_on_scan_complete()