1 /*
2 * Copyright (C) 2020-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <asm/guest/vm.h>
8 #include <asm/guest/ept.h>
9 #include <vpci.h>
10 #include <logmsg.h>
11 #include <vmcs9900.h>
12 #include "vpci_priv.h"
13 #include <errno.h>
14
15 #define MCS9900_MMIO_BAR 0U
16 #define MCS9900_MSIX_BAR 1U
17
18 /*
19 * @pre vdev != NULL
20 */
trigger_vmcs9900_msix(struct pci_vdev * vdev)21 void trigger_vmcs9900_msix(struct pci_vdev *vdev)
22 {
23 struct acrn_vm *vm = vpci2vm(vdev->vpci);
24 int32_t ret = -1;
25 struct msix_table_entry *entry = &vdev->msix.table_entries[0];
26
27 ret = vlapic_inject_msi(vm, entry->addr, entry->data);
28
29 if (ret != 0) {
30 pr_warn("%2x:%2x.%dfaild injecting msi msi_addr:0x%lx msi_data:0x%x",
31 vdev->bdf.bits.b, vdev->bdf.bits.d, vdev->bdf.bits.f, entry->addr, entry->data);
32 }
33 }
34
read_vmcs9900_cfg(struct pci_vdev * vdev,uint32_t offset,uint32_t bytes,uint32_t * val)35 static int32_t read_vmcs9900_cfg(struct pci_vdev *vdev,
36 uint32_t offset, uint32_t bytes, uint32_t * val)
37 {
38 *val = pci_vdev_read_vcfg(vdev, offset, bytes);
39 return 0;
40 }
41
vmcs9900_mmio_handler(struct io_request * io_req,void * data)42 static int32_t vmcs9900_mmio_handler(struct io_request *io_req, void *data)
43 {
44 struct acrn_mmio_request *mmio = &io_req->reqs.mmio_request;
45 struct pci_vdev *vdev = (struct pci_vdev *)data;
46 struct acrn_vuart *vu = vdev->priv_data;
47 struct pci_vbar *vbar = &vdev->vbars[MCS9900_MMIO_BAR];
48 uint16_t offset;
49
50 offset = (uint16_t)(mmio->address - vbar->base_gpa);
51
52 if (mmio->direction == ACRN_IOREQ_DIR_READ) {
53 mmio->value = vuart_read_reg(vu, offset);
54 } else {
55 vuart_write_reg(vu, offset, (uint8_t) mmio->value);
56 }
57 return 0;
58 }
59
map_vmcs9900_vbar(struct pci_vdev * vdev,uint32_t idx)60 static void map_vmcs9900_vbar(struct pci_vdev *vdev, uint32_t idx)
61 {
62 struct acrn_vuart *vu = vdev->priv_data;
63 struct acrn_vm *vm = vpci2vm(vdev->vpci);
64 struct pci_vbar *vbar = &vdev->vbars[idx];
65
66 if ((idx == MCS9900_MMIO_BAR) && (vbar->base_gpa != 0UL)) {
67 register_mmio_emulation_handler(vm, vmcs9900_mmio_handler,
68 vbar->base_gpa, vbar->base_gpa + vbar->size, vdev, false);
69 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vbar->base_gpa, vbar->size);
70 vu->active = true;
71 } else if ((idx == MCS9900_MSIX_BAR) && (vbar->base_gpa != 0UL)) {
72 register_mmio_emulation_handler(vm, vmsix_handle_table_mmio_access, vbar->base_gpa,
73 (vbar->base_gpa + vbar->size), vdev, false);
74 ept_del_mr(vm, (uint64_t *)vm->arch_vm.nworld_eptp, vbar->base_gpa, vbar->size);
75 vdev->msix.mmio_gpa = vbar->base_gpa;
76 } else {
77 /* No action required. */
78 }
79
80 }
81
unmap_vmcs9900_vbar(struct pci_vdev * vdev,uint32_t idx)82 static void unmap_vmcs9900_vbar(struct pci_vdev *vdev, uint32_t idx)
83 {
84 struct acrn_vuart *vu = vdev->priv_data;
85 struct acrn_vm *vm = vpci2vm(vdev->vpci);
86 struct pci_vbar *vbar = &vdev->vbars[idx];
87
88 if ((idx == MCS9900_MMIO_BAR) && (vbar->base_gpa != 0UL)) {
89 vu->active = false;
90 }
91 unregister_mmio_emulation_handler(vm, vbar->base_gpa, vbar->base_gpa + vbar->size);
92 }
93
write_vmcs9900_cfg(struct pci_vdev * vdev,uint32_t offset,uint32_t bytes,uint32_t val)94 static int32_t write_vmcs9900_cfg(struct pci_vdev *vdev, uint32_t offset,
95 uint32_t bytes, uint32_t val)
96 {
97 if (vbar_access(vdev, offset)) {
98 vpci_update_one_vbar(vdev, pci_bar_index(offset), val,
99 map_vmcs9900_vbar, unmap_vmcs9900_vbar);
100 } else if (msixcap_access(vdev, offset)) {
101 write_vmsix_cap_reg(vdev, offset, bytes, val);
102 } else {
103 pci_vdev_write_vcfg(vdev, offset, bytes, val);
104 }
105
106 return 0;
107 }
108
init_vmcs9900(struct pci_vdev * vdev)109 static void init_vmcs9900(struct pci_vdev *vdev)
110 {
111 struct acrn_vm_pci_dev_config *pci_cfg = vdev->pci_dev_config;
112 struct acrn_vm *vm = vpci2vm(vdev->vpci);
113 struct pci_vbar *mmio_vbar = &vdev->vbars[MCS9900_MMIO_BAR];
114 struct pci_vbar *msix_vbar = &vdev->vbars[MCS9900_MSIX_BAR];
115 struct acrn_vuart *vu = &vm->vuart[pci_cfg->vuart_idx];
116
117 /* 8250-pci compartiable device */
118 pci_vdev_write_vcfg(vdev, PCIR_VENDOR, 2U, MCS9900_VENDOR);
119 pci_vdev_write_vcfg(vdev, PCIR_DEVICE, 2U, MCS9900_DEV);
120 pci_vdev_write_vcfg(vdev, PCIR_CLASS, 1U, PCIC_SIMPLECOMM);
121 pci_vdev_write_vcfg(vdev, PCIV_SUB_SYSTEM_ID, 2U, 0x1000U);
122 pci_vdev_write_vcfg(vdev, PCIV_SUB_VENDOR_ID, 2U, 0xa000U);
123 pci_vdev_write_vcfg(vdev, PCIR_SUBCLASS, 1U, 0x0U);
124 pci_vdev_write_vcfg(vdev, PCIR_CLASS_CODE, 1U, 0x2U);
125
126 add_vmsix_capability(vdev, 1, MCS9900_MSIX_BAR);
127
128 /* initialize vuart-pci mem bar */
129 mmio_vbar->size = 0x1000U;
130 mmio_vbar->base_gpa = pci_cfg->vbar_base[MCS9900_MMIO_BAR];
131 mmio_vbar->mask = (uint32_t) (~(mmio_vbar->size - 1UL));
132 mmio_vbar->bar_type.bits = PCIM_BAR_MEM_32;
133
134 /* initialize vuart-pci msix bar */
135 msix_vbar->size = 0x1000U;
136 msix_vbar->base_gpa = pci_cfg->vbar_base[MCS9900_MSIX_BAR];
137 msix_vbar->mask = (uint32_t) (~(msix_vbar->size - 1UL));
138 msix_vbar->bar_type.bits = PCIM_BAR_MEM_32;
139
140 vdev->nr_bars = 2;
141
142 pci_vdev_write_vbar(vdev, MCS9900_MMIO_BAR, mmio_vbar->base_gpa);
143 pci_vdev_write_vbar(vdev, MCS9900_MSIX_BAR, msix_vbar->base_gpa);
144
145 /* init acrn_vuart */
146 pr_info("init acrn_vuart[%d]", pci_cfg->vuart_idx);
147 vdev->priv_data = vu;
148 init_pci_vuart(vdev);
149
150 vdev->user = vdev;
151 }
152
deinit_vmcs9900(struct pci_vdev * vdev)153 static void deinit_vmcs9900(struct pci_vdev *vdev)
154 {
155 deinit_pci_vuart(vdev);
156 vdev->user = NULL;
157 }
158
159 const struct pci_vdev_ops vmcs9900_ops = {
160 .init_vdev = init_vmcs9900,
161 .deinit_vdev = deinit_vmcs9900,
162 .write_vdev_cfg = write_vmcs9900_cfg,
163 .read_vdev_cfg = read_vmcs9900_cfg,
164 };
165
create_vmcs9900_vdev(struct acrn_vm * vm,struct acrn_vdev * dev)166 int32_t create_vmcs9900_vdev(struct acrn_vm *vm, struct acrn_vdev *dev)
167 {
168 uint16_t i;
169 struct pci_vdev *vdev;
170 struct acrn_vm_config *vm_config = get_vm_config(vm->vm_id);
171 struct acrn_vm_pci_dev_config *dev_config = NULL;
172 int32_t ret = -EINVAL;
173 uint16_t vuart_idx = *((uint16_t*)(dev->args));
174
175 for (i = 0U; i < vm_config->pci_dev_num; i++) {
176 dev_config = &vm_config->pci_devs[i];
177 if (dev_config->vuart_idx == vuart_idx) {
178 dev_config->vbdf.value = (uint16_t) dev->slot;
179 dev_config->vbar_base[0] = (uint64_t) dev->io_addr[0];
180 dev_config->vbar_base[1] = (uint64_t) dev->io_addr[1];
181 spinlock_obtain(&vm->vpci.lock);
182 vdev = vpci_init_vdev(&vm->vpci, dev_config, NULL);
183 spinlock_release(&vm->vpci.lock);
184 if (vdev != NULL) {
185 ret = 0;
186 }
187 break;
188 }
189 }
190
191 if (ret != 0) {
192 pr_err("Failed: create VM%d vuart_idx=%d", vm->vm_id, vuart_idx);
193 }
194
195 return ret;
196 }
197
destroy_vmcs9900_vdev(struct pci_vdev * vdev)198 int32_t destroy_vmcs9900_vdev(struct pci_vdev *vdev)
199 {
200 uint32_t i;
201 struct acrn_vpci *vpci = vdev->vpci;
202
203 for (i = 0U; i < vdev->nr_bars; i++) {
204 vpci_update_one_vbar(vdev, i, 0U, NULL, unmap_vmcs9900_vbar);
205 }
206
207 deinit_pci_vuart(vdev);
208
209 spinlock_obtain(&vpci->lock);
210 vpci_deinit_vdev(vdev);
211 spinlock_release(&vpci->lock);
212
213 return 0;
214 }
215