1 /*
2  * Copyright (c) 2011 NetApp, Inc.
3  * Copyright (c) 2018-2022 Intel Corporation.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <asm/guest/vm.h>
30 #include <ptdev.h>
31 #include <asm/guest/assign.h>
32 #include <vpci.h>
33 #include <asm/vtd.h>
34 #include "vpci_priv.h"
35 
36 
37 /**
38  * @pre vdev != NULL
39  * @pre vdev->pdev != NULL
40  */
enable_disable_msi(const struct pci_vdev * vdev,bool enable)41 static inline void enable_disable_msi(const struct pci_vdev *vdev, bool enable)
42 {
43 	union pci_bdf pbdf = vdev->pdev->bdf;
44 	uint32_t capoff = vdev->msi.capoff;
45 	uint32_t msgctrl = pci_pdev_read_cfg(pbdf, capoff + PCIR_MSI_CTRL, 2U);
46 
47 	if (enable) {
48 		msgctrl |= PCIM_MSICTRL_MSI_ENABLE;
49 	} else {
50 		msgctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
51 	}
52 	pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_CTRL, 2U, msgctrl);
53 }
54 /**
55  * @brief Remap vMSI virtual address and data to MSI physical address and data
56  * This function is called when physical MSI is disabled.
57  *
58  * @pre vdev != NULL
59  * @pre vdev->vpci != NULL
60  * @pre vdev->pdev != NULL
61  */
remap_vmsi(const struct pci_vdev * vdev)62 static void remap_vmsi(const struct pci_vdev *vdev)
63 {
64 	struct msi_info info = {};
65 	union pci_bdf pbdf = vdev->pdev->bdf;
66 	struct acrn_vm *vm = vpci2vm(vdev->vpci);
67 	uint32_t capoff = vdev->msi.capoff;
68 	uint32_t vmsi_msgdata, vmsi_addrlo, vmsi_addrhi = 0U;
69 
70 	/* Read the MSI capability structure from virtual device */
71 	vmsi_addrlo = pci_vdev_read_vcfg(vdev, (capoff + PCIR_MSI_ADDR), 4U);
72 	if (vdev->msi.is_64bit) {
73 		vmsi_addrhi = pci_vdev_read_vcfg(vdev, (capoff + PCIR_MSI_ADDR_HIGH), 4U);
74 		vmsi_msgdata = pci_vdev_read_vcfg(vdev, (capoff + PCIR_MSI_DATA_64BIT), 2U);
75 	} else {
76 		vmsi_msgdata = pci_vdev_read_vcfg(vdev, (capoff + PCIR_MSI_DATA), 2U);
77 	}
78 	info.addr.full = (uint64_t)vmsi_addrlo | ((uint64_t)vmsi_addrhi << 32U);
79 	info.data.full = vmsi_msgdata;
80 
81 	if (ptirq_prepare_msix_remap(vm, vdev->bdf.value, pbdf.value, 0U, &info, INVALID_IRTE_ID) == 0) {
82 		pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR, 0x4U, (uint32_t)info.addr.full);
83 		if (vdev->msi.is_64bit) {
84 			pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_ADDR_HIGH, 0x4U,
85 					(uint32_t)(info.addr.full >> 32U));
86 			pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA_64BIT, 0x2U, (uint16_t)info.data.full);
87 		} else {
88 			pci_pdev_write_cfg(pbdf, capoff + PCIR_MSI_DATA, 0x2U, (uint16_t)info.data.full);
89 		}
90 
91 		/* If MSI Enable is being set, make sure INTxDIS bit is set */
92 		enable_disable_pci_intx(pbdf, false);
93 		enable_disable_msi(vdev, true);
94 	}
95 }
96 
97 /**
98  * @brief Writing MSI Capability Structure
99  *
100  * @pre vdev != NULL
101  */
write_vmsi_cap_reg(struct pci_vdev * vdev,uint32_t offset,uint32_t bytes,uint32_t val)102 void write_vmsi_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
103 {
104 	/* Capability ID, Next Capability Pointer and Message Control
105 	 * (Except MSI Enable bit and Multiple Message Enable) are RO */
106 	static const uint8_t msi_ro_mask[0xEU] = { 0xffU, 0xffU, 0x8eU, 0xffU };
107 	uint32_t msgctrl, old, ro_mask = ~0U;
108 
109 	(void)memcpy_s((void *)&ro_mask, bytes, (void *)&msi_ro_mask[offset - vdev->msi.capoff], bytes);
110 	if (ro_mask != ~0U) {
111 		enable_disable_msi(vdev, false);
112 
113 		old = pci_vdev_read_vcfg(vdev, offset, bytes);
114 		pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
115 
116 		msgctrl = pci_vdev_read_vcfg(vdev, vdev->msi.capoff + PCIR_MSI_CTRL, 2U);
117 		if ((msgctrl & (PCIM_MSICTRL_MSI_ENABLE | PCIM_MSICTRL_MME_MASK)) == PCIM_MSICTRL_MSI_ENABLE) {
118 			remap_vmsi(vdev);
119 		}
120 	}
121 }
122 
123 /**
124  * @pre vdev != NULL
125  * @pre vdev->vpci != NULL
126  */
deinit_vmsi(const struct pci_vdev * vdev)127 void deinit_vmsi(const struct pci_vdev *vdev)
128 {
129 	if (has_msi_cap(vdev)) {
130 		ptirq_remove_msix_remapping(vpci2vm(vdev->vpci), vdev->pdev->bdf.value, 1U);
131 	}
132 }
133 
134 /**
135  * @pre vdev != NULL
136  * @pre vdev->pdev != NULL
137  */
init_vmsi(struct pci_vdev * vdev)138 void init_vmsi(struct pci_vdev *vdev)
139 {
140 	struct pci_pdev *pdev = vdev->pdev;
141 	uint32_t val;
142 
143 	vdev->msi.capoff = pdev->msi_capoff;
144 
145 	if (has_msi_cap(vdev)) {
146 		val = pci_pdev_read_cfg(pdev->bdf, vdev->msi.capoff, 4U);
147 		vdev->msi.caplen = ((val & (PCIM_MSICTRL_64BIT << 16U)) != 0U) ? 0xEU : 0xAU;
148 		vdev->msi.is_64bit = ((val & (PCIM_MSICTRL_64BIT << 16U)) != 0U);
149 
150 		val &= ~((uint32_t)PCIM_MSICTRL_MMC_MASK << 16U);
151 		val &= ~((uint32_t)PCIM_MSICTRL_MME_MASK << 16U);
152 
153 		pci_vdev_write_vcfg(vdev, vdev->msi.capoff, 4U, val);
154 	}
155 }
156 
157