1 /*
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2018-2022 Intel Corporation.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #include <asm/guest/vm.h>
30 #include <asm/io.h>
31 #include <errno.h>
32 #include <vpci.h>
33 #include <asm/guest/ept.h>
34 #include <asm/mmu.h>
35 #include <logmsg.h>
36 #include "vpci_priv.h"
37
38 /**
39 * @brief Reading MSI-X Capability Structure
40 *
41 * @pre vdev != NULL
42 * @pre vdev->pdev != NULL
43 */
read_vmsix_cap_reg(struct pci_vdev * vdev,uint32_t offset,uint32_t bytes,uint32_t * val)44 void read_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val)
45 {
46 static const uint8_t msix_pt_mask[12U] = {
47 0x0U, 0x0U, 0xffU, 0xffU }; /* Only PT MSI-X Message Control Register */
48 uint32_t virt, phy = 0U, ctrl, pt_mask = 0U;
49
50 virt = pci_vdev_read_vcfg(vdev, offset, bytes);
51 (void)memcpy_s((void *)&pt_mask, bytes, (void *)&msix_pt_mask[offset - vdev->msix.capoff], bytes);
52 if (pt_mask != 0U) {
53 phy = pci_pdev_read_cfg(vdev->pdev->bdf, offset, bytes);
54 ctrl = pci_pdev_read_cfg(vdev->pdev->bdf, vdev->msix.capoff + PCIR_MSIX_CTRL, 2U);
55 if (((ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1U) != vdev->msix.table_count) {
56 vdev->msix.table_count = (ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1U;
57 pr_info("%s reprogram MSI-X Table Size to %d\n", __func__, vdev->msix.table_count);
58 /*In this case, we don't need to unmap msix EPT mapping again. */
59 ASSERT(vdev->msix.table_count <= (PAGE_SIZE/ MSIX_TABLE_ENTRY_SIZE), "");
60 }
61 }
62
63 *val = (virt & ~pt_mask) | (phy & pt_mask);
64 }
65
66 /**
67 * @brief Writing MSI-X Capability Structure
68 *
69 * @pre vdev != NULL
70 * @pre vdev->pdev != NULL
71 */
write_vmsix_cap_reg(struct pci_vdev * vdev,uint32_t offset,uint32_t bytes,uint32_t val)72 bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val)
73 {
74 static const uint8_t msix_ro_mask[12U] = {
75 0xffU, 0xffU, 0xffU, 0x3fU, /* Only Function Mask and MSI-X Enable writable */
76 0xffU, 0xffU, 0xffU, 0xffU,
77 0xffU, 0xffU, 0xffU, 0xffU };
78 bool is_written = false;
79 uint32_t old, ro_mask = ~0U;
80
81 (void)memcpy_s((void *)&ro_mask, bytes, (void *)&msix_ro_mask[offset - vdev->msix.capoff], bytes);
82 if (ro_mask != ~0U) {
83 old = pci_vdev_read_vcfg(vdev, offset, bytes);
84 pci_vdev_write_vcfg(vdev, offset, bytes, (old & ro_mask) | (val & ~ro_mask));
85 is_written = true;
86 }
87
88 return is_written;
89 }
90
91 /**
92 * @pre vdev != NULL
93 * @pre io_req != NULL
94 * @pre mmio->address >= vdev->msix.mmio_gpa
95 */
rw_vmsix_table(struct pci_vdev * vdev,struct io_request * io_req)96 uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req)
97 {
98 struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
99 struct msix_table_entry *entry;
100 uint32_t entry_offset, table_offset, index = CONFIG_MAX_MSIX_TABLE_NUM;
101 uint64_t offset;
102 void *hva;
103
104 if ((mmio->size <= 8U) && mem_aligned_check(mmio->address, mmio->size)) {
105 offset = mmio->address - vdev->msix.mmio_gpa;
106 if (msixtable_access(vdev, (uint32_t)offset)) {
107 /* Must be full DWORD or full QWORD aligned. */
108 if ((mmio->size == 4U) || (mmio->size == 8U)) {
109
110 table_offset = (uint32_t)(offset - vdev->msix.table_offset);
111 index = table_offset / MSIX_TABLE_ENTRY_SIZE;
112
113 entry = &vdev->msix.table_entries[index];
114 entry_offset = table_offset % MSIX_TABLE_ENTRY_SIZE;
115
116 if (mmio->direction == ACRN_IOREQ_DIR_READ) {
117 (void)memcpy_s(&mmio->value, (size_t)mmio->size,
118 (void *)entry + entry_offset, (size_t)mmio->size);
119 } else {
120 (void)memcpy_s((void *)entry + entry_offset, (size_t)mmio->size,
121 &mmio->value, (size_t)mmio->size);
122 }
123 } else {
124 pr_err("%s, Only DWORD and QWORD are permitted", __func__);
125 }
126 } else {
127 if (vdev->pdev != NULL) {
128 hva = hpa2hva(vdev->msix.mmio_hpa + (mmio->address - vdev->msix.mmio_gpa));
129 stac();
130 if (mmio->direction == ACRN_IOREQ_DIR_READ) {
131 mmio->value = mmio_read(hva, mmio->size);
132 } else {
133 mmio_write(hva, mmio->size, mmio->value);
134 }
135 clac();
136 } else {
137 if (mmio->direction == ACRN_IOREQ_DIR_READ) {
138 mmio->value = 0UL;
139 }
140 }
141 }
142 }
143
144 return index;
145 }
146
147 /**
148 * @pre io_req != NULL
149 * @pre priv_data != NULL
150 */
vmsix_handle_table_mmio_access(struct io_request * io_req,void * priv_data)151 int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data)
152 {
153 (void)rw_vmsix_table((struct pci_vdev *)priv_data, io_req);
154 return 0;
155 }
156
157 /**
158 * @pre vdev != NULL
159 */
add_vmsix_capability(struct pci_vdev * vdev,uint32_t entry_num,uint8_t bar_num)160 int32_t add_vmsix_capability(struct pci_vdev *vdev, uint32_t entry_num, uint8_t bar_num)
161 {
162 uint32_t table_size, i;
163 struct msixcap msixcap;
164 int32_t ret = -1;
165
166 if ((bar_num < PCI_BAR_COUNT) &&
167 (entry_num <= min(CONFIG_MAX_MSIX_TABLE_NUM, VMSIX_MAX_TABLE_ENTRY_NUM))) {
168
169 table_size = VMSIX_MAX_ENTRY_TABLE_SIZE;
170
171 vdev->msix.caplen = MSIX_CAPLEN;
172 vdev->msix.table_bar = bar_num;
173 vdev->msix.table_offset = 0U;
174 vdev->msix.table_count = entry_num;
175
176 /* set mask bit of vector control register */
177 for (i = 0; i < entry_num; i++) {
178 vdev->msix.table_entries[i].vector_control |= PCIM_MSIX_VCTRL_MASK;
179 }
180
181 (void)memset(&msixcap, 0U, sizeof(struct msixcap));
182
183 msixcap.capid = PCIY_MSIX;
184 msixcap.msgctrl = (uint16_t)entry_num - 1U;
185
186 /* - MSI-X table start at offset 0 */
187 msixcap.table_info = bar_num;
188 msixcap.pba_info = table_size | bar_num;
189
190 vdev->msix.capoff = vpci_add_capability(vdev, (uint8_t *)(&msixcap), sizeof(struct msixcap));
191 if (vdev->msix.capoff != 0U) {
192 ret = 0;
193 }
194 }
195 return ret;
196 }
197