Lines Matching refs:dev
26 struct uml_vfio_device *dev; member
163 struct uml_vfio_device *dev = ctx->dev; in uml_vfio_interrupt() local
164 int index = ctx - dev->intr_ctx; in uml_vfio_interrupt()
165 int irqfd = dev->udev.irqfd[index]; in uml_vfio_interrupt()
166 int irq = dev->msix_data[index]; in uml_vfio_interrupt()
180 static int uml_vfio_activate_irq(struct uml_vfio_device *dev, int index) in uml_vfio_activate_irq() argument
182 struct uml_vfio_intr_ctx *ctx = &dev->intr_ctx[index]; in uml_vfio_activate_irq()
188 irqfd = uml_vfio_user_activate_irq(&dev->udev, index); in uml_vfio_activate_irq()
210 uml_vfio_user_deactivate_irq(&dev->udev, index); in uml_vfio_activate_irq()
214 static int uml_vfio_deactivate_irq(struct uml_vfio_device *dev, int index) in uml_vfio_deactivate_irq() argument
216 struct uml_vfio_intr_ctx *ctx = &dev->intr_ctx[index]; in uml_vfio_deactivate_irq()
219 ignore_sigio_fd(dev->udev.irqfd[index]); in uml_vfio_deactivate_irq()
221 uml_vfio_user_deactivate_irq(&dev->udev, index); in uml_vfio_deactivate_irq()
227 static int uml_vfio_update_msix_cap(struct uml_vfio_device *dev, in uml_vfio_update_msix_cap() argument
235 if (size == 2 && offset == dev->msix_cap + PCI_MSIX_FLAGS) { in uml_vfio_update_msix_cap()
239 return uml_vfio_user_update_irqs(&dev->udev); in uml_vfio_update_msix_cap()
245 static int uml_vfio_update_msix_table(struct uml_vfio_device *dev, in uml_vfio_update_msix_table() argument
255 offset -= dev->msix_offset + PCI_MSIX_ENTRY_DATA; in uml_vfio_update_msix_table()
261 if (index >= dev->udev.irq_count) in uml_vfio_update_msix_table()
264 dev->msix_data[index] = val; in uml_vfio_update_msix_table()
266 return val ? uml_vfio_activate_irq(dev, index) : in uml_vfio_update_msix_table()
267 uml_vfio_deactivate_irq(dev, index); in uml_vfio_update_msix_table()
270 static unsigned long __uml_vfio_cfgspace_read(struct uml_vfio_device *dev, in __uml_vfio_cfgspace_read() argument
277 if (uml_vfio_user_cfgspace_read(&dev->udev, offset, data, size)) in __uml_vfio_cfgspace_read()
299 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_cfgspace_read() local
301 return __uml_vfio_cfgspace_read(dev, offset, size); in uml_vfio_cfgspace_read()
304 static void __uml_vfio_cfgspace_write(struct uml_vfio_device *dev, in __uml_vfio_cfgspace_write() argument
327 WARN_ON(uml_vfio_user_cfgspace_write(&dev->udev, offset, data, size)); in __uml_vfio_cfgspace_write()
334 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_cfgspace_write() local
336 if (offset < dev->msix_cap + PCI_CAP_MSIX_SIZEOF && in uml_vfio_cfgspace_write()
337 offset + size > dev->msix_cap) in uml_vfio_cfgspace_write()
338 WARN_ON(uml_vfio_update_msix_cap(dev, offset, size, val)); in uml_vfio_cfgspace_write()
340 __uml_vfio_cfgspace_write(dev, offset, size, val); in uml_vfio_cfgspace_write()
346 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_bar_copy_from() local
349 uml_vfio_user_bar_read(&dev->udev, bar, offset, buffer, size); in uml_vfio_bar_copy_from()
379 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_bar_copy_to() local
381 uml_vfio_user_bar_write(&dev->udev, bar, offset, buffer, size); in uml_vfio_bar_copy_to()
388 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_bar_write() local
391 if (bar == dev->msix_bar && offset + size > dev->msix_offset && in uml_vfio_bar_write()
392 offset < dev->msix_offset + dev->msix_size) in uml_vfio_bar_write()
393 WARN_ON(uml_vfio_update_msix_table(dev, offset, size, val)); in uml_vfio_bar_write()
418 struct uml_vfio_device *dev = to_vdev(pdev); in uml_vfio_bar_set() local
422 uml_vfio_user_bar_write(&dev->udev, bar, offset + i, &value, 1); in uml_vfio_bar_set()
435 static u8 uml_vfio_find_capability(struct uml_vfio_device *dev, u8 cap) in uml_vfio_find_capability() argument
441 pos = __uml_vfio_cfgspace_read(dev, PCI_CAPABILITY_LIST, sizeof(pos)); in uml_vfio_find_capability()
444 ent = __uml_vfio_cfgspace_read(dev, pos, sizeof(ent)); in uml_vfio_find_capability()
458 static int uml_vfio_read_msix_table(struct uml_vfio_device *dev) in uml_vfio_read_msix_table() argument
464 off = uml_vfio_find_capability(dev, PCI_CAP_ID_MSIX); in uml_vfio_read_msix_table()
468 dev->msix_cap = off; in uml_vfio_read_msix_table()
470 tbl = __uml_vfio_cfgspace_read(dev, off + PCI_MSIX_TABLE, sizeof(tbl)); in uml_vfio_read_msix_table()
471 flags = __uml_vfio_cfgspace_read(dev, off + PCI_MSIX_FLAGS, sizeof(flags)); in uml_vfio_read_msix_table()
473 dev->msix_bar = tbl & PCI_MSIX_TABLE_BIR; in uml_vfio_read_msix_table()
474 dev->msix_offset = tbl & PCI_MSIX_TABLE_OFFSET; in uml_vfio_read_msix_table()
475 dev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * PCI_MSIX_ENTRY_SIZE; in uml_vfio_read_msix_table()
477 dev->msix_data = kzalloc(dev->msix_size, GFP_KERNEL); in uml_vfio_read_msix_table()
478 if (!dev->msix_data) in uml_vfio_read_msix_table()
484 static void uml_vfio_open_device(struct uml_vfio_device *dev) in uml_vfio_open_device() argument
489 group_id = uml_vfio_user_get_group_id(dev->name); in uml_vfio_open_device()
492 dev->name, group_id); in uml_vfio_open_device()
496 dev->group = uml_vfio_open_group(group_id); in uml_vfio_open_device()
497 if (dev->group < 0) { in uml_vfio_open_device()
499 group_id, dev->name, dev->group); in uml_vfio_open_device()
503 err = uml_vfio_user_setup_device(&dev->udev, dev->group, dev->name); in uml_vfio_open_device()
506 dev->name, err); in uml_vfio_open_device()
510 err = uml_vfio_read_msix_table(dev); in uml_vfio_open_device()
513 dev->name, err); in uml_vfio_open_device()
517 dev->intr_ctx = kmalloc_array(dev->udev.irq_count, in uml_vfio_open_device()
520 if (!dev->intr_ctx) { in uml_vfio_open_device()
522 dev->name); in uml_vfio_open_device()
526 for (i = 0; i < dev->udev.irq_count; i++) { in uml_vfio_open_device()
527 ctx = &dev->intr_ctx[i]; in uml_vfio_open_device()
528 ctx->dev = dev; in uml_vfio_open_device()
532 dev->pdev.ops = ¨_vfio_um_pci_ops; in uml_vfio_open_device()
534 err = um_pci_device_register(&dev->pdev); in uml_vfio_open_device()
537 dev->name, err); in uml_vfio_open_device()
544 kfree(dev->intr_ctx); in uml_vfio_open_device()
546 kfree(dev->msix_data); in uml_vfio_open_device()
548 uml_vfio_user_teardown_device(&dev->udev); in uml_vfio_open_device()
550 uml_vfio_release_group(dev->group); in uml_vfio_open_device()
552 list_del(&dev->list); in uml_vfio_open_device()
553 kfree(dev->name); in uml_vfio_open_device()
554 kfree(dev); in uml_vfio_open_device()
557 static void uml_vfio_release_device(struct uml_vfio_device *dev) in uml_vfio_release_device() argument
561 for (i = 0; i < dev->udev.irq_count; i++) in uml_vfio_release_device()
562 uml_vfio_deactivate_irq(dev, i); in uml_vfio_release_device()
563 uml_vfio_user_update_irqs(&dev->udev); in uml_vfio_release_device()
565 um_pci_device_unregister(&dev->pdev); in uml_vfio_release_device()
566 kfree(dev->intr_ctx); in uml_vfio_release_device()
567 kfree(dev->msix_data); in uml_vfio_release_device()
568 uml_vfio_user_teardown_device(&dev->udev); in uml_vfio_release_device()
569 uml_vfio_release_group(dev->group); in uml_vfio_release_device()
570 list_del(&dev->list); in uml_vfio_release_device()
571 kfree(dev->name); in uml_vfio_release_device()
572 kfree(dev); in uml_vfio_release_device()
577 struct uml_vfio_device *dev; in uml_vfio_find_device() local
579 list_for_each_entry(dev, ¨_vfio_devices, list) { in uml_vfio_find_device()
580 if (!strcmp(dev->name, device)) in uml_vfio_find_device()
581 return dev; in uml_vfio_find_device()
588 struct uml_vfio_device *dev; in uml_vfio_add_device() local
603 dev = kzalloc(sizeof(*dev), GFP_KERNEL); in uml_vfio_add_device()
604 if (!dev) in uml_vfio_add_device()
607 dev->name = kstrdup(device, GFP_KERNEL); in uml_vfio_add_device()
608 if (!dev->name) { in uml_vfio_add_device()
609 kfree(dev); in uml_vfio_add_device()
613 list_add_tail(&dev->list, ¨_vfio_devices); in uml_vfio_add_device()
614 return dev; in uml_vfio_add_device()
619 struct uml_vfio_device *dev; in uml_vfio_cmdline_set() local
621 dev = uml_vfio_add_device(device); in uml_vfio_cmdline_set()
622 if (IS_ERR(dev)) in uml_vfio_cmdline_set()
623 return PTR_ERR(dev); in uml_vfio_cmdline_set()
648 struct uml_vfio_device *dev; in uml_vfio_mc_config() local
656 dev = uml_vfio_add_device(str); in uml_vfio_mc_config()
657 if (IS_ERR(dev)) in uml_vfio_mc_config()
658 return PTR_ERR(dev); in uml_vfio_mc_config()
659 uml_vfio_open_device(dev); in uml_vfio_mc_config()
684 struct uml_vfio_device *dev, *n; in uml_vfio_init() local
689 list_for_each_entry_safe(dev, n, ¨_vfio_devices, list) in uml_vfio_init()
690 uml_vfio_open_device(dev); in uml_vfio_init()
700 struct uml_vfio_device *dev, *n; in uml_vfio_exit() local
702 list_for_each_entry_safe(dev, n, ¨_vfio_devices, list) in uml_vfio_exit()
703 uml_vfio_release_device(dev); in uml_vfio_exit()