1 /*
2 * Copyright (c) 2024 Antmicro <www.antmicro.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/device.h>
8 #include <zephyr/drivers/pcie/pcie.h>
9 #include <zephyr/kernel/mm.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/sys/barrier.h>
13 #include <zephyr/sys/byteorder.h>
14 #include <zephyr/drivers/virtio.h>
15 #include <zephyr/drivers/virtio/virtqueue.h>
16 #include "virtio_common.h"
17 #include "assert.h"
18
19 #define DT_DRV_COMPAT virtio_pci
20
21 LOG_MODULE_REGISTER(virtio_pci, CONFIG_VIRTIO_LOG_LEVEL);
22
23 /*
24 * Based on Virtual I/O Device (VIRTIO) Version 1.3 specification:
25 * https://docs.oasis-open.org/virtio/virtio/v1.3/csd01/virtio-v1.3-csd01.pdf
26 */
27
28 struct virtio_pci_cap {
29 uint8_t cap_vndr;
30 uint8_t cap_next;
31 uint8_t cap_len;
32 uint8_t cfg_type;
33 uint8_t bar;
34 uint8_t id;
35 uint8_t pad[2];
36 uint32_t offset;
37 uint32_t length;
38 };
39
40 struct virtio_pci_notify_cap {
41 struct virtio_pci_cap cap;
42 uint32_t notify_off_multiplier;
43 };
44
45 struct virtio_pci_common_cfg {
46 uint32_t device_feature_select; /* read-write */
47 uint32_t device_feature; /* read-only for driver */
48 uint32_t driver_feature_select; /* read-write */
49 uint32_t driver_feature; /* read-write */
50 uint16_t config_msix_vector; /* read-write */
51 uint16_t num_queues; /* read-only for driver */
52 uint8_t device_status; /* read-write */
53 uint8_t config_generation; /* read-only for driver */
54
55 uint16_t queue_select; /* read-write */
56 uint16_t queue_size; /* read-write */
57 uint16_t queue_msix_vector; /* read-write */
58 uint16_t queue_enable; /* read-write */
59 uint16_t queue_notify_off; /* read-only for driver */
60 uint64_t queue_desc; /* read-write */
61 uint64_t queue_driver; /* read-write */
62 uint64_t queue_device; /* read-write */
63 uint16_t queue_notify_data; /* read-only for driver */
64 uint16_t queue_reset; /* read-write */
65 };
66
67 #define VIRTIO_PCI_CAP_COMMON_CFG 1
68 #define VIRTIO_PCI_CAP_NOTIFY_CFG 2
69 #define VIRTIO_PCI_CAP_ISR_CFG 3
70 #define VIRTIO_PCI_CAP_DEVICE_CFG 4
71 #define VIRTIO_PCI_CAP_PCI_CFG 5
72 #define VIRTIO_PCI_CAP_SHARED_MEMORY_CFG 8
73 #define VIRTIO_PCI_CAP_VENDOR_CFG 9
74
75 #define CABABILITY_LIST_VALID_BIT 4
76 #define STATUS_COMMAND_REG 0x1
77 #define CAPABILITIES_POINTER_REG 0xd
78 #define CAPABILITIES_POINTER_MASK 0xfc
79
80 #define VIRTIO_PCI_MSIX_NO_VECTOR 0xffff
81
82 struct virtio_pci_data {
83 volatile struct virtio_pci_common_cfg *common_cfg;
84 void *device_specific_cfg;
85 volatile uint8_t *isr_status;
86 volatile uint8_t *notify_cfg;
87 uint32_t notify_off_multiplier;
88
89 struct virtq *virtqueues;
90 uint16_t virtqueue_count;
91
92 struct k_spinlock isr_lock;
93 struct k_spinlock notify_lock;
94 };
95
96 struct virtio_pci_config {
97 struct pcie_dev *pcie;
98 };
99
100 /*
101 * Even though a virtio device is exposed as a PCI device, it's not a physical
102 * one, so we don't have to care about cache flushing/invalidating like we would
103 * with a real device that may write to the memory from the outside. Whatever
104 * will be written/read to shared memory by the virtio device will be
105 * written/read by a hypervisor running on the same cpu as zephyr guest, so the
106 * caches will stay coherent
107 */
108
virtio_pci_isr(const struct device * dev)109 void virtio_pci_isr(const struct device *dev)
110 {
111 struct virtio_pci_data *data = dev->data;
112 k_spinlock_key_t key = k_spin_lock(&data->isr_lock);
113
114 virtio_isr(dev, *data->isr_status, data->virtqueue_count);
115
116 k_spin_unlock(&data->isr_lock, key);
117 }
118
virtio_pci_read_cap(pcie_bdf_t bdf,uint8_t cfg_type,void * cap_struct,size_t cap_struct_size)119 static bool virtio_pci_read_cap(
120 pcie_bdf_t bdf, uint8_t cfg_type, void *cap_struct, size_t cap_struct_size)
121 {
122 struct virtio_pci_cap tmp;
123 uint16_t status = (pcie_conf_read(bdf, STATUS_COMMAND_REG) & GENMASK(31, 16)) >> 16;
124
125 if (!(status & BIT(CABABILITY_LIST_VALID_BIT))) {
126 LOG_ERR("no capability list for device with bdf 0x%x", bdf);
127 return false;
128 }
129
130 uint32_t cap_ptr =
131 pcie_conf_read(bdf, CAPABILITIES_POINTER_REG) & CAPABILITIES_POINTER_MASK;
132 uint32_t cap_off = cap_ptr / sizeof(uint32_t);
133
134 /*
135 * Every capability type struct has size and alignment of multiple of 4 bytes
136 * so pcie_conf_read() can be used directly without aligning
137 */
138 do {
139 for (int i = 0; i < sizeof(struct virtio_pci_cap) / sizeof(uint32_t); i++) {
140 ((uint32_t *)&tmp)[i] = pcie_conf_read(bdf, cap_off + i);
141 }
142 if (tmp.cfg_type == cfg_type) {
143 assert(tmp.cap_len == cap_struct_size);
144 size_t extra_data_words =
145 (tmp.cap_len - sizeof(struct virtio_pci_cap)) / sizeof(uint32_t);
146 size_t extra_data_offset =
147 cap_off + sizeof(struct virtio_pci_cap) / sizeof(uint32_t);
148 uint32_t *extra_data =
149 (uint32_t *)((struct virtio_pci_cap *)cap_struct + 1);
150
151 *(struct virtio_pci_cap *)cap_struct = tmp;
152
153 for (int i = 0; i < extra_data_words; i++) {
154 extra_data[i] = pcie_conf_read(bdf, extra_data_offset + i);
155 }
156
157 return true;
158 }
159
160 cap_off = (tmp.cap_next & 0xfc) / sizeof(uint32_t);
161 } while (cap_off != 0);
162
163 return false;
164 }
165
virtio_pci_reset(const struct device * dev)166 static void virtio_pci_reset(const struct device *dev)
167 {
168 struct virtio_pci_data *data = dev->data;
169
170 /*
171 * According to spec 4.1.4.3.1 and spec 2.4.2 to reset the device we
172 * must write 0 to the device_status register and wait until we read 0
173 * from it, which means that reset is complete
174 */
175 data->common_cfg->device_status = 0;
176
177 while (data->common_cfg->device_status != 0) {
178 }
179 }
180
virtio_pci_notify_queue(const struct device * dev,uint16_t queue_idx)181 static void virtio_pci_notify_queue(const struct device *dev, uint16_t queue_idx)
182 {
183 struct virtio_pci_data *data = dev->data;
184
185 k_spinlock_key_t key = k_spin_lock(&data->notify_lock);
186
187 data->common_cfg->queue_select = sys_cpu_to_le16(queue_idx);
188 barrier_dmem_fence_full();
189 /*
190 * Because currently we are not negotiating VIRTIO_F_NOTIFICATION_DATA
191 * and VIRTIO_F_NOTIF_CONFIG_DATA, in order to notify the queue we have
192 * to write its index to notify_cfg at the offset
193 * cap.offset + queue_notify_off * notify_off_multiplier,
194 * which here is reduced to queue_notify_off * notify_off_multiplier,
195 * because data->notify_cfg was mapped in virtio_pci_map_cap() to start
196 * at cap.offset. See spec 4.1.4.4 for the offset formula, and spec
197 * 4.1.5.2 and spec 4.1.5.2.1 for the value written
198 */
199 size_t notify_off =
200 sys_le16_to_cpu(data->common_cfg->queue_notify_off) * data->notify_off_multiplier;
201 volatile uint16_t *notify_addr = (uint16_t *)(data->notify_cfg + notify_off);
202
203 *notify_addr = sys_cpu_to_le16(queue_idx);
204
205 k_spin_unlock(&data->notify_lock, key);
206 }
207
208 /*
209 * According to the spec 4.1.3.1, PCI virtio driver must use n byte accesses for n byte fields,
210 * except for 64 bit fields where 32 bit accesses have to be used, so we are using this
211 * function to write 64 bit values to 64 bit fields
212 */
virtio_pci_write64(uint64_t val,uint64_t * dst)213 static void virtio_pci_write64(uint64_t val, uint64_t *dst)
214 {
215 uint64_t val_le = sys_cpu_to_le64(val);
216
217 ((uint32_t *)dst)[0] = val_le & GENMASK64(31, 0);
218 ((uint32_t *)dst)[1] = (val_le & GENMASK64(63, 32)) >> 32;
219 }
220
virtio_pci_set_virtqueue(const struct device * dev,uint16_t virtqueue_n,struct virtq * virtqueue)221 static int virtio_pci_set_virtqueue(
222 const struct device *dev, uint16_t virtqueue_n, struct virtq *virtqueue)
223 {
224 struct virtio_pci_data *data = dev->data;
225
226 data->common_cfg->queue_select = sys_cpu_to_le16(virtqueue_n);
227 barrier_dmem_fence_full();
228
229 uint16_t max_queue_size = sys_le16_to_cpu(data->common_cfg->queue_size);
230
231 if (max_queue_size < virtqueue->num) {
232 LOG_ERR(
233 "virtio pci device doesn't support queue %d bigger than %d, tried to set one with size %d",
234 virtqueue_n,
235 max_queue_size,
236 virtqueue->num
237 );
238 return -EINVAL;
239 }
240 data->common_cfg->queue_size = sys_cpu_to_le16(virtqueue->num);
241 virtio_pci_write64(
242 k_mem_phys_addr(virtqueue->desc), (void *)&data->common_cfg->queue_desc
243 );
244 virtio_pci_write64(
245 k_mem_phys_addr(virtqueue->avail), (void *)&data->common_cfg->queue_driver
246 );
247 virtio_pci_write64(
248 k_mem_phys_addr(virtqueue->used), (void *)&data->common_cfg->queue_device
249 );
250 data->common_cfg->queue_msix_vector = sys_cpu_to_le16(VIRTIO_PCI_MSIX_NO_VECTOR);
251 data->common_cfg->queue_enable = sys_cpu_to_le16(1);
252
253 return 0;
254 }
255
virtio_pci_init_virtqueues(const struct device * dev,uint16_t num_queues,virtio_enumerate_queues cb,void * opaque)256 static int virtio_pci_init_virtqueues(
257 const struct device *dev, uint16_t num_queues, virtio_enumerate_queues cb, void *opaque)
258 {
259 struct virtio_pci_data *data = dev->data;
260 uint16_t queue_count = sys_le16_to_cpu(data->common_cfg->num_queues);
261
262 if (num_queues > queue_count) {
263 LOG_ERR("requested more virtqueues than available");
264 return -EINVAL;
265 }
266
267 data->virtqueues = k_malloc(queue_count * sizeof(struct virtq));
268 if (!data->virtqueues) {
269 LOG_ERR("failed to allocate virtqueue array");
270 return -ENOMEM;
271 }
272 data->virtqueue_count = queue_count;
273
274 int ret = 0;
275 int created_queues = 0;
276 int activated_queues = 0;
277
278 for (int i = 0; i < queue_count; i++) {
279 data->common_cfg->queue_select = sys_cpu_to_le16(i);
280 barrier_dmem_fence_full();
281
282 uint16_t queue_size = cb(i, sys_le16_to_cpu(data->common_cfg->queue_size), opaque);
283
284 ret = virtq_create(&data->virtqueues[i], queue_size);
285 if (ret != 0) {
286 goto fail;
287 }
288 created_queues++;
289
290 ret = virtio_pci_set_virtqueue(dev, i, &data->virtqueues[i]);
291 if (ret != 0) {
292 goto fail;
293 }
294 activated_queues++;
295 }
296
297 return 0;
298
299 fail:
300 for (int j = 0; j < activated_queues; j++) {
301 data->common_cfg->queue_select = sys_cpu_to_le16(j);
302 barrier_dmem_fence_full();
303 data->common_cfg->queue_enable = sys_cpu_to_le16(0);
304 }
305 for (int j = 0; j < created_queues; j++) {
306 virtq_free(&data->virtqueues[j]);
307 }
308 k_free(data->virtqueues);
309 data->virtqueue_count = 0;
310
311 return ret;
312 }
313
virtio_pci_map_cap(pcie_bdf_t bdf,struct virtio_pci_cap * cap,void ** virt_ptr)314 static bool virtio_pci_map_cap(pcie_bdf_t bdf, struct virtio_pci_cap *cap, void **virt_ptr)
315 {
316 struct pcie_bar mbar;
317
318 if (!pcie_get_mbar(bdf, cap->bar, &mbar)) {
319 LOG_ERR("no mbar for capability type %d found", cap->cfg_type);
320 return false;
321 }
322 assert(mbar.phys_addr + cap->offset + cap->length <= mbar.phys_addr + mbar.size);
323
324 #ifdef CONFIG_MMU
325 k_mem_map_phys_bare(
326 (uint8_t **)virt_ptr, mbar.phys_addr + cap->offset, cap->length, K_MEM_PERM_RW
327 );
328 #else
329 *virt_ptr = (void *)(mbar.phys_addr + cap->offset);
330 #endif
331
332 return true;
333 }
334
virtio_pci_read_device_feature_word(const struct device * dev,uint32_t word_n)335 static uint32_t virtio_pci_read_device_feature_word(const struct device *dev, uint32_t word_n)
336 {
337 struct virtio_pci_data *data = dev->data;
338
339 data->common_cfg->device_feature_select = sys_cpu_to_le32(word_n);
340 barrier_dmem_fence_full();
341 return sys_le32_to_cpu(data->common_cfg->device_feature);
342 }
343
virtio_pci_write_driver_feature_word(const struct device * dev,uint32_t word_n,uint32_t val)344 static void virtio_pci_write_driver_feature_word(
345 const struct device *dev, uint32_t word_n, uint32_t val)
346 {
347 struct virtio_pci_data *data = dev->data;
348
349 data->common_cfg->driver_feature_select = sys_cpu_to_le32(word_n);
350 barrier_dmem_fence_full();
351 data->common_cfg->driver_feature = sys_cpu_to_le32(val);
352 }
353
virtio_pci_read_device_feature_bit(const struct device * dev,int bit)354 static bool virtio_pci_read_device_feature_bit(const struct device *dev, int bit)
355 {
356 uint32_t word_n = bit / 32;
357 uint32_t mask = BIT(bit % 32);
358
359 return virtio_pci_read_device_feature_word(dev, word_n) & mask;
360 }
361
virtio_pci_write_driver_feature_bit(const struct device * dev,int bit,bool value)362 static void virtio_pci_write_driver_feature_bit(const struct device *dev, int bit, bool value)
363 {
364 uint32_t word_n = bit / 32;
365 uint32_t mask = BIT(bit % 32);
366 uint32_t word = virtio_pci_read_device_feature_word(dev, word_n);
367
368 virtio_pci_write_driver_feature_word(dev, word_n, value ? word | mask : word & ~mask);
369 }
370
virtio_pci_write_driver_feature_bit_range_check(const struct device * dev,int bit,bool value)371 static int virtio_pci_write_driver_feature_bit_range_check(
372 const struct device *dev, int bit, bool value)
373 {
374 if (!IN_RANGE(bit, DEV_TYPE_FEAT_RANGE_0_BEGIN, DEV_TYPE_FEAT_RANGE_0_END)
375 || !IN_RANGE(bit, DEV_TYPE_FEAT_RANGE_1_BEGIN, DEV_TYPE_FEAT_RANGE_1_END)) {
376 return -EINVAL;
377 }
378
379 virtio_pci_write_driver_feature_bit(dev, bit, value);
380
381 return 0;
382 }
383
virtio_pci_read_status_bit(const struct device * dev,int bit)384 static bool virtio_pci_read_status_bit(const struct device *dev, int bit)
385 {
386 struct virtio_pci_data *data = dev->data;
387 uint32_t mask = BIT(bit);
388
389 barrier_dmem_fence_full();
390 return sys_le32_to_cpu(data->common_cfg->device_status) & mask;
391 }
392
virtio_pci_write_status_bit(const struct device * dev,int bit)393 static void virtio_pci_write_status_bit(const struct device *dev, int bit)
394 {
395 struct virtio_pci_data *data = dev->data;
396 uint32_t mask = BIT(bit);
397
398 barrier_dmem_fence_full();
399 data->common_cfg->device_status |= sys_cpu_to_le32(mask);
400 }
401
virtio_pci_init_common(const struct device * dev)402 static int virtio_pci_init_common(const struct device *dev)
403 {
404 const struct virtio_pci_config *conf = dev->config;
405 struct virtio_pci_data *data = dev->data;
406 struct virtio_pci_cap vpc;
407 struct virtio_pci_notify_cap vpnc = { .notify_off_multiplier = 0 };
408
409 if (conf->pcie->bdf == PCIE_BDF_NONE) {
410 LOG_ERR("no virtio pci device with id 0x%x on the bus", conf->pcie->id);
411 return 1;
412 }
413 LOG_INF(
414 "found virtio pci device with id 0x%x and bdf 0x%x", conf->pcie->id, conf->pcie->bdf
415 );
416
417 if (virtio_pci_read_cap(conf->pcie->bdf, VIRTIO_PCI_CAP_COMMON_CFG, &vpc, sizeof(vpc))) {
418 if (!virtio_pci_map_cap(conf->pcie->bdf, &vpc, (void **)&data->common_cfg)) {
419 return 1;
420 }
421 } else {
422 LOG_ERR(
423 "no VIRTIO_PCI_CAP_COMMON_CFG for the device with id 0x%x and bdf 0x%x, legacy device?",
424 conf->pcie->id,
425 conf->pcie->bdf
426 );
427 return 1;
428 }
429
430 if (virtio_pci_read_cap(conf->pcie->bdf, VIRTIO_PCI_CAP_ISR_CFG, &vpc, sizeof(vpc))) {
431 if (!virtio_pci_map_cap(conf->pcie->bdf, &vpc, (void **)&data->isr_status)) {
432 return 1;
433 }
434 } else {
435 LOG_ERR(
436 "no VIRTIO_PCI_CAP_ISR_CFG for the device with id 0x%x and bdf 0x%x",
437 conf->pcie->id,
438 conf->pcie->bdf
439 );
440 return 1;
441 }
442
443 if (virtio_pci_read_cap(conf->pcie->bdf, VIRTIO_PCI_CAP_NOTIFY_CFG, &vpnc, sizeof(vpnc))) {
444 if (!virtio_pci_map_cap(
445 conf->pcie->bdf, (struct virtio_pci_cap *)&vpnc,
446 (void **)&data->notify_cfg)) {
447 return 1;
448 }
449 data->notify_off_multiplier = sys_le32_to_cpu(vpnc.notify_off_multiplier);
450 } else {
451 LOG_ERR(
452 "no VIRTIO_PCI_CAP_NOTIFY_CFG for the device with id 0x%x and bdf 0x%x",
453 conf->pcie->id,
454 conf->pcie->bdf
455 );
456 return 1;
457 }
458
459 /*
460 * Some of the device types may present VIRTIO_PCI_CAP_DEVICE_CFG capabilities as per spec
461 * 4.1.4.6. It states that there may be more than one such capability per device, however
462 * none of the devices specified in the Device Types (chapter 5) state that they need more
463 * than one (its always one or zero virtio_devtype_config structs), so we are just trying to
464 * read the first one
465 */
466 if (virtio_pci_read_cap(conf->pcie->bdf, VIRTIO_PCI_CAP_DEVICE_CFG, &vpc, sizeof(vpc))) {
467 if (!virtio_pci_map_cap(
468 conf->pcie->bdf, &vpc, (void **)&data->device_specific_cfg)) {
469 return 1;
470 }
471 } else {
472 data->device_specific_cfg = NULL;
473 LOG_INF(
474 "no VIRTIO_PCI_CAP_DEVICE_CFG for the device with id 0x%x and bdf 0x%x",
475 conf->pcie->id,
476 conf->pcie->bdf
477 );
478 }
479
480 /*
481 * The device initialization goes as follows (see 3.1.1):
482 * - first we have to reset the device
483 * - then we have to write ACKNOWLEDGE bit
484 * - then we have to write DRIVER bit
485 * - after that negotiation of feature flags take place, currently this driver only needs
486 * VIRTIO_F_VERSION_1, the rest of flags is left to negotiate to the specific devices via
487 * this driver's api that must be finalized with commit_feature_bits() that writes
488 * FEATURES_OK bit
489 * - next the virtqueues have to be set, again via this driver's api (init_virtqueues())
490 * - initialization is finalized by writing DRIVER_OK bit, which is done by
491 * finalize_init() from api
492 */
493
494 virtio_pci_reset(dev);
495
496 virtio_pci_write_status_bit(dev, DEVICE_STATUS_ACKNOWLEDGE);
497 virtio_pci_write_status_bit(dev, DEVICE_STATUS_DRIVER);
498
499 LOG_INF(
500 "virtio pci device with id 0x%x and bdf 0x%x advertised "
501 "feature bits: 0x%.8x%.8x%.8x%.8x",
502 conf->pcie->id,
503 conf->pcie->bdf,
504 virtio_pci_read_device_feature_word(dev, 3),
505 virtio_pci_read_device_feature_word(dev, 2),
506 virtio_pci_read_device_feature_word(dev, 1),
507 virtio_pci_read_device_feature_word(dev, 0)
508 );
509
510 /*
511 * In case of PCI this should never happen because legacy device would've been caught
512 * earlier in VIRTIO_PCI_CAP_COMMON_CFG check as this capability shouldn't be present
513 * in legacy devices, but we are leaving it here as a sanity check
514 */
515 if (!virtio_pci_read_device_feature_bit(dev, VIRTIO_F_VERSION_1)) {
516 LOG_ERR(
517 "virtio pci device with id 0x%x and bdf 0x%x doesn't advertise "
518 "VIRTIO_F_VERSION_1 feature support",
519 conf->pcie->id,
520 conf->pcie->bdf
521 );
522 return 1;
523 }
524
525 virtio_pci_write_driver_feature_bit(dev, VIRTIO_F_VERSION_1, 1);
526
527 return 0;
528 };
529
virtio_pci_get_virtqueue(const struct device * dev,uint16_t queue_idx)530 struct virtq *virtio_pci_get_virtqueue(const struct device *dev, uint16_t queue_idx)
531 {
532 struct virtio_pci_data *data = dev->data;
533
534 return queue_idx < data->virtqueue_count ? &data->virtqueues[queue_idx] : NULL;
535 }
536
virtio_pci_get_device_specific_config(const struct device * dev)537 void *virtio_pci_get_device_specific_config(const struct device *dev)
538 {
539 struct virtio_pci_data *data = dev->data;
540
541 return data->device_specific_cfg;
542 }
543
virtio_pci_finalize_init(const struct device * dev)544 void virtio_pci_finalize_init(const struct device *dev)
545 {
546 virtio_pci_write_status_bit(dev, DEVICE_STATUS_DRIVER_OK);
547 }
548
virtio_pci_commit_feature_bits(const struct device * dev)549 int virtio_pci_commit_feature_bits(const struct device *dev)
550 {
551 const struct virtio_pci_config *conf = dev->config;
552
553 virtio_pci_write_status_bit(dev, DEVICE_STATUS_FEATURES_OK);
554 if (!virtio_pci_read_status_bit(dev, DEVICE_STATUS_FEATURES_OK)) {
555 LOG_ERR(
556 "virtio pci device with id 0x%x and bdf 0x%x doesn't support selected "
557 "feature bits: 0x%.8x%.8x%.8x%.8x",
558 conf->pcie->id,
559 conf->pcie->bdf,
560 virtio_pci_read_device_feature_word(dev, 3),
561 virtio_pci_read_device_feature_word(dev, 2),
562 virtio_pci_read_device_feature_word(dev, 1),
563 virtio_pci_read_device_feature_word(dev, 0)
564 );
565 return -EINVAL;
566 }
567
568 return 0;
569 }
570
571 static DEVICE_API(virtio, virtio_pci_driver_api) = {
572 .get_virtqueue = virtio_pci_get_virtqueue,
573 .notify_virtqueue = virtio_pci_notify_queue,
574 .get_device_specific_config = virtio_pci_get_device_specific_config,
575 .read_device_feature_bit = virtio_pci_read_device_feature_bit,
576 .write_driver_feature_bit = virtio_pci_write_driver_feature_bit_range_check,
577 .commit_feature_bits = virtio_pci_commit_feature_bits,
578 .init_virtqueues = virtio_pci_init_virtqueues,
579 .finalize_init = virtio_pci_finalize_init
580 };
581
582 #define VIRTIO_PCI_DEFINE(inst) \
583 BUILD_ASSERT(DT_NODE_HAS_COMPAT(DT_INST_PARENT(inst), pcie_controller)); \
584 DEVICE_PCIE_INST_DECLARE(inst); \
585 static struct virtio_pci_data virtio_pci_data##inst; \
586 static struct virtio_pci_config virtio_pci_config##inst = { \
587 DEVICE_PCIE_INST_INIT(inst, pcie) \
588 }; \
589 static int virtio_pci_init##inst(const struct device *dev) \
590 { \
591 IRQ_CONNECT( \
592 DT_INST_IRQN(inst), DT_INST_IRQ(inst, priority), virtio_pci_isr, \
593 DEVICE_DT_INST_GET(inst), 0 \
594 ); \
595 int ret = virtio_pci_init_common(dev); \
596 irq_enable(DT_INST_IRQN(inst)); \
597 return ret; \
598 } \
599 DEVICE_DT_INST_DEFINE( \
600 inst, \
601 virtio_pci_init##inst, \
602 NULL, \
603 &virtio_pci_data##inst, \
604 &virtio_pci_config##inst, \
605 POST_KERNEL, \
606 0, \
607 &virtio_pci_driver_api \
608 );
609
610 DT_INST_FOREACH_STATUS_OKAY(VIRTIO_PCI_DEFINE)
611