1 /*-
2 * Copyright (c) 2011 NetApp, Inc.
3 * Copyright (c) 2018-2022 Intel Corporation.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29 #ifndef VPCI_PRIV_H_
30 #define VPCI_PRIV_H_
31
32 #include <list.h>
33 #include <pci.h>
34
35 /*
36 * For hypervisor emulated PCI devices, vMSIX Table contains 128 entries
37 * at most. vMSIX Table begins at an offset of 0, and maps the vMSIX PBA
38 * beginning at an offset of 2 KB.
39 */
40 #define VMSIX_MAX_TABLE_ENTRY_NUM 128U
41 #define VMSIX_MAX_ENTRY_TABLE_SIZE 2048U
42 #define VMSIX_ENTRY_TABLE_PBA_BAR_SIZE 4096U
43
vpci2vm(const struct acrn_vpci * vpci)44 static inline struct acrn_vm *vpci2vm(const struct acrn_vpci *vpci)
45 {
46 return container_of(vpci, struct acrn_vm, vpci);
47 }
48
is_quirk_ptdev(const struct pci_vdev * vdev)49 static inline bool is_quirk_ptdev(const struct pci_vdev *vdev)
50 {
51 return ((vdev->flags & ACRN_PTDEV_QUIRK_ASSIGN) != 0U);
52 }
53
in_range(uint32_t value,uint32_t lower,uint32_t len)54 static inline bool in_range(uint32_t value, uint32_t lower, uint32_t len)
55 {
56 return ((value >= lower) && (value < (lower + len)));
57 }
58
59 /**
60 * @pre vdev != NULL
61 */
has_msix_cap(const struct pci_vdev * vdev)62 static inline bool has_msix_cap(const struct pci_vdev *vdev)
63 {
64 return (vdev->msix.capoff != 0U);
65 }
66
67 /**
68 * @pre vdev != NULL
69 */
msixcap_access(const struct pci_vdev * vdev,uint32_t offset)70 static inline bool msixcap_access(const struct pci_vdev *vdev, uint32_t offset)
71 {
72 return (has_msix_cap(vdev) && in_range(offset, vdev->msix.capoff, vdev->msix.caplen));
73 }
74
75 /**
76 * @pre vdev != NULL
77 */
msixtable_access(const struct pci_vdev * vdev,uint32_t offset)78 static inline bool msixtable_access(const struct pci_vdev *vdev, uint32_t offset)
79 {
80 return in_range(offset, vdev->msix.table_offset, vdev->msix.table_count * MSIX_TABLE_ENTRY_SIZE);
81 }
82
83 /*
84 * @pre vdev != NULL
85 */
has_sriov_cap(const struct pci_vdev * vdev)86 static inline bool has_sriov_cap(const struct pci_vdev *vdev)
87 {
88 return (vdev->sriov.capoff != 0U);
89 }
90
91 /*
92 * @pre vdev != NULL
93 */
sriovcap_access(const struct pci_vdev * vdev,uint32_t offset)94 static inline bool sriovcap_access(const struct pci_vdev *vdev, uint32_t offset)
95 {
96 return (has_sriov_cap(vdev) && in_range(offset, vdev->sriov.capoff, vdev->sriov.caplen));
97 }
98
99 /**
100 * @pre vdev != NULL
101 */
vbar_access(const struct pci_vdev * vdev,uint32_t offset)102 static inline bool vbar_access(const struct pci_vdev *vdev, uint32_t offset)
103 {
104 return is_bar_offset(vdev->nr_bars, offset);
105 }
106
107 /**
108 * @pre vdev != NULL
109 */
cfg_header_access(uint32_t offset)110 static inline bool cfg_header_access(uint32_t offset)
111 {
112 return (offset < PCI_CFG_HEADER_LENGTH);
113 }
114
115 /**
116 * @pre vdev != NULL
117 */
has_msi_cap(const struct pci_vdev * vdev)118 static inline bool has_msi_cap(const struct pci_vdev *vdev)
119 {
120 return (vdev->msi.capoff != 0U);
121 }
122
123 /**
124 * @pre vdev != NULL
125 */
msicap_access(const struct pci_vdev * vdev,uint32_t offset)126 static inline bool msicap_access(const struct pci_vdev *vdev, uint32_t offset)
127 {
128 return (has_msi_cap(vdev) && in_range(offset, vdev->msi.capoff, vdev->msi.caplen));
129 }
130
131 /**
132 * @brief Check if the specified vdev is a zombie VF instance
133 *
134 * @pre: The vdev is a VF instance
135 *
136 * @param vdev Pointer to vdev instance
137 *
138 * @return If the vdev is a zombie VF instance return true, otherwise return false
139 */
is_zombie_vf(const struct pci_vdev * vdev)140 static inline bool is_zombie_vf(const struct pci_vdev *vdev)
141 {
142 return (vdev->user == NULL);
143 }
144
145 void init_vdev_pt(struct pci_vdev *vdev, bool is_pf_vdev);
146 void deinit_vdev_pt(struct pci_vdev *vdev);
147 void vdev_pt_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
148 void vdev_pt_map_msix(struct pci_vdev *vdev, bool hold_lock);
149
150 void init_vmsi(struct pci_vdev *vdev);
151 void write_vmsi_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
152 void deinit_vmsi(const struct pci_vdev *vdev);
153
154 void init_vmsix_pt(struct pci_vdev *vdev);
155 int32_t add_vmsix_capability(struct pci_vdev *vdev, uint32_t entry_num, uint8_t bar_num);
156 void read_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
157 bool write_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
158 void read_pt_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
159 void write_pt_vmsix_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
160 uint32_t rw_vmsix_table(struct pci_vdev *vdev, struct io_request *io_req);
161 int32_t vmsix_handle_table_mmio_access(struct io_request *io_req, void *priv_data);
162 bool vpci_vmsix_enabled(const struct pci_vdev *vdev);
163 void deinit_vmsix_pt(struct pci_vdev *vdev);
164
165 void init_vmsix_on_msi(struct pci_vdev *vdev);
166 void write_vmsix_cap_reg_on_msi(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
167 void remap_one_vmsix_entry_on_msi(struct pci_vdev *vdev, uint32_t index);
168
169 void init_vsriov(struct pci_vdev *vdev);
170 void read_sriov_cap_reg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t *val);
171 void write_sriov_cap_reg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
172 uint32_t sriov_bar_offset(const struct pci_vdev *vdev, uint32_t bar_idx);
173
174 uint32_t pci_vdev_read_vcfg(const struct pci_vdev *vdev, uint32_t offset, uint32_t bytes);
175 void pci_vdev_write_vcfg(struct pci_vdev *vdev, uint32_t offset, uint32_t bytes, uint32_t val);
176 uint32_t vpci_add_capability(struct pci_vdev *vdev, uint8_t *capdata, uint8_t caplen);
177
178 void pci_vdev_write_vbar(struct pci_vdev *vdev, uint32_t idx, uint32_t val);
179
180 void vdev_bridge_pt_restore_space(struct pci_vdev *vdev);
181 void vdev_bridge_pt_restore_bus(struct pci_vdev *vdev);
182
183 void vdev_pt_hide_sriov_cap(struct pci_vdev *vdev);
184
185 int32_t check_pt_dev_pio_bars(struct pci_vdev *vdev);
186
187 typedef void (*map_pcibar)(struct pci_vdev *vdev, uint32_t bar_idx);
188 typedef void (*unmap_pcibar)(struct pci_vdev *vdev, uint32_t bar_idx);
189 void vpci_update_one_vbar(struct pci_vdev *vdev, uint32_t bar_idx, uint32_t val, map_pcibar map_cb, unmap_pcibar unmap_cb);
190 #endif /* VPCI_PRIV_H_ */
191