Lines Matching refs:mdev_state
165 struct mdev_state *mdev_state; member
171 struct mdev_state { struct
213 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
215 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
218 static void mbochs_create_config_space(struct mdev_state *mdev_state) in mbochs_create_config_space() argument
220 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], in mbochs_create_config_space()
222 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], in mbochs_create_config_space()
224 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], in mbochs_create_config_space()
226 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], in mbochs_create_config_space()
229 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], in mbochs_create_config_space()
231 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], in mbochs_create_config_space()
233 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; in mbochs_create_config_space()
235 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], in mbochs_create_config_space()
239 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1; in mbochs_create_config_space()
241 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2], in mbochs_create_config_space()
244 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1; in mbochs_create_config_space()
247 static int mbochs_check_framebuffer(struct mdev_state *mdev_state, in mbochs_check_framebuffer() argument
250 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_check_framebuffer()
251 u16 *vbe = mdev_state->vbe; in mbochs_check_framebuffer()
254 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_check_framebuffer()
286 if (mode->offset + mode->size > mdev_state->memsize) { in mbochs_check_framebuffer()
305 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, in handle_pci_cfg_write() argument
308 struct device *dev = mdev_dev(mdev_state->mdev); in handle_pci_cfg_write()
318 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]); in handle_pci_cfg_write()
326 cfg_addr |= (mdev_state->vconfig[offset] & in handle_pci_cfg_write()
328 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); in handle_pci_cfg_write()
333 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset, in handle_mmio_write() argument
336 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_write()
348 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_write()
349 mdev_state->vbe[index] = reg16; in handle_mmio_write()
363 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset, in handle_mmio_read() argument
366 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_read()
373 edid = &mdev_state->edid_regs; in handle_mmio_read()
379 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_mmio_read()
385 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_read()
386 reg16 = mdev_state->vbe[index]; in handle_mmio_read()
400 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset, in handle_edid_regs() argument
403 char *regs = (void *)&mdev_state->edid_regs; in handle_edid_regs()
405 if (offset + count > sizeof(mdev_state->edid_regs)) in handle_edid_regs()
427 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset, in handle_edid_blob() argument
430 if (offset + count > mdev_state->edid_regs.edid_max_size) in handle_edid_blob()
433 memcpy(mdev_state->edid_blob + offset, buf, count); in handle_edid_blob()
435 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_edid_blob()
438 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf, in mdev_access() argument
446 mutex_lock(&mdev_state->ops_lock); in mdev_access()
450 handle_pci_cfg_write(mdev_state, pos, buf, count); in mdev_access()
452 memcpy(buf, (mdev_state->vconfig + pos), count); in mdev_access()
459 handle_mmio_write(mdev_state, pos, buf, count); in mdev_access()
461 handle_mmio_read(mdev_state, pos, buf, count); in mdev_access()
468 handle_edid_regs(mdev_state, pos, buf, count, is_write); in mdev_access()
471 handle_edid_blob(mdev_state, pos, buf, count, is_write); in mdev_access()
476 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { in mdev_access()
479 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); in mdev_access()
489 dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n", in mdev_access()
499 mutex_unlock(&mdev_state->ops_lock); in mdev_access()
504 static int mbochs_reset(struct mdev_state *mdev_state) in mbochs_reset() argument
506 u32 size64k = mdev_state->memsize / (64 * 1024); in mbochs_reset()
509 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++) in mbochs_reset()
510 mdev_state->vbe[i] = 0; in mbochs_reset()
511 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5; in mbochs_reset()
512 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k; in mbochs_reset()
518 struct mdev_state *mdev_state = in mbochs_init_dev() local
519 container_of(vdev, struct mdev_state, vdev); in mbochs_init_dev()
532 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); in mbochs_init_dev()
533 if (!mdev_state->vconfig) in mbochs_init_dev()
536 mdev_state->memsize = type->mbytes * 1024 * 1024; in mbochs_init_dev()
537 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; in mbochs_init_dev()
538 mdev_state->pages = kcalloc(mdev_state->pagecount, in mbochs_init_dev()
541 if (!mdev_state->pages) in mbochs_init_dev()
544 mutex_init(&mdev_state->ops_lock); in mbochs_init_dev()
545 mdev_state->mdev = mdev; in mbochs_init_dev()
546 INIT_LIST_HEAD(&mdev_state->dmabufs); in mbochs_init_dev()
547 mdev_state->next_id = 1; in mbochs_init_dev()
549 mdev_state->type = type; in mbochs_init_dev()
550 mdev_state->edid_regs.max_xres = type->max_x; in mbochs_init_dev()
551 mdev_state->edid_regs.max_yres = type->max_y; in mbochs_init_dev()
552 mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET; in mbochs_init_dev()
553 mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob); in mbochs_init_dev()
554 mbochs_create_config_space(mdev_state); in mbochs_init_dev()
555 mbochs_reset(mdev_state); in mbochs_init_dev()
558 type->type.pretty_name, type->mbytes, mdev_state->pagecount); in mbochs_init_dev()
562 kfree(mdev_state->vconfig); in mbochs_init_dev()
570 struct mdev_state *mdev_state; in mbochs_probe() local
573 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev, in mbochs_probe()
575 if (IS_ERR(mdev_state)) in mbochs_probe()
576 return PTR_ERR(mdev_state); in mbochs_probe()
578 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); in mbochs_probe()
581 dev_set_drvdata(&mdev->dev, mdev_state); in mbochs_probe()
585 vfio_put_device(&mdev_state->vdev); in mbochs_probe()
591 struct mdev_state *mdev_state = in mbochs_release_dev() local
592 container_of(vdev, struct mdev_state, vdev); in mbochs_release_dev()
594 atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); in mbochs_release_dev()
595 kfree(mdev_state->pages); in mbochs_release_dev()
596 kfree(mdev_state->vconfig); in mbochs_release_dev()
601 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); in mbochs_remove() local
603 vfio_unregister_group_dev(&mdev_state->vdev); in mbochs_remove()
604 vfio_put_device(&mdev_state->vdev); in mbochs_remove()
610 struct mdev_state *mdev_state = in mbochs_read() local
611 container_of(vdev, struct mdev_state, vdev); in mbochs_read()
621 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
633 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
645 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
671 struct mdev_state *mdev_state = in mbochs_write() local
672 container_of(vdev, struct mdev_state, vdev); in mbochs_write()
685 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
697 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
709 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
727 static struct page *__mbochs_get_page(struct mdev_state *mdev_state, in __mbochs_get_page() argument
730 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in __mbochs_get_page()
732 if (!mdev_state->pages[pgoff]) { in __mbochs_get_page()
733 mdev_state->pages[pgoff] = in __mbochs_get_page()
735 if (!mdev_state->pages[pgoff]) in __mbochs_get_page()
739 get_page(mdev_state->pages[pgoff]); in __mbochs_get_page()
740 return mdev_state->pages[pgoff]; in __mbochs_get_page()
743 static struct page *mbochs_get_page(struct mdev_state *mdev_state, in mbochs_get_page() argument
748 if (WARN_ON(pgoff >= mdev_state->pagecount)) in mbochs_get_page()
751 mutex_lock(&mdev_state->ops_lock); in mbochs_get_page()
752 page = __mbochs_get_page(mdev_state, pgoff); in mbochs_get_page()
753 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_page()
758 static void mbochs_put_pages(struct mdev_state *mdev_state) in mbochs_put_pages() argument
760 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_put_pages()
763 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_put_pages()
765 for (i = 0; i < mdev_state->pagecount; i++) { in mbochs_put_pages()
766 if (!mdev_state->pages[i]) in mbochs_put_pages()
768 put_page(mdev_state->pages[i]); in mbochs_put_pages()
769 mdev_state->pages[i] = NULL; in mbochs_put_pages()
778 struct mdev_state *mdev_state = vma->vm_private_data; in mbochs_region_vm_fault() local
781 if (page_offset >= mdev_state->pagecount) in mbochs_region_vm_fault()
784 vmf->page = mbochs_get_page(mdev_state, page_offset); in mbochs_region_vm_fault()
797 struct mdev_state *mdev_state = in mbochs_mmap() local
798 container_of(vdev, struct mdev_state, vdev); in mbochs_mmap()
804 if (vma->vm_end - vma->vm_start > mdev_state->memsize) in mbochs_mmap()
810 vma->vm_private_data = mdev_state; in mbochs_mmap()
834 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_mmap_dmabuf()
849 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_print_dmabuf()
866 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_map_dmabuf()
895 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_unmap_dmabuf()
907 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_release_dmabuf() local
908 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_release_dmabuf()
916 mutex_lock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
920 mutex_unlock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
930 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state, in mbochs_dmabuf_alloc() argument
936 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_alloc()
943 dmabuf->id = mdev_state->next_id++; in mbochs_dmabuf_alloc()
952 dmabuf->pages[pg] = __mbochs_get_page(mdev_state, in mbochs_dmabuf_alloc()
958 dmabuf->mdev_state = mdev_state; in mbochs_dmabuf_alloc()
959 list_add(&dmabuf->next, &mdev_state->dmabufs); in mbochs_dmabuf_alloc()
974 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state, in mbochs_dmabuf_find_by_mode() argument
979 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_mode()
981 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_mode()
989 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id) in mbochs_dmabuf_find_by_id() argument
993 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_id()
995 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_id()
1004 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_dmabuf_export() local
1005 struct device *dev = mdev_state->vdev.dev; in mbochs_dmabuf_export()
1009 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_export()
1033 static int mbochs_get_region_info(struct mdev_state *mdev_state, in mbochs_get_region_info() argument
1050 region_info->size = mdev_state->memsize; in mbochs_get_region_info()
1098 static int mbochs_query_gfx_plane(struct mdev_state *mdev_state, in mbochs_query_gfx_plane() argument
1121 mutex_lock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1125 ret = mbochs_check_framebuffer(mdev_state, &mode); in mbochs_query_gfx_plane()
1136 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode); in mbochs_query_gfx_plane()
1138 mbochs_dmabuf_alloc(mdev_state, &mode); in mbochs_query_gfx_plane()
1140 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1153 mdev_state->active_id != plane->dmabuf_id) { in mbochs_query_gfx_plane()
1154 dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n", in mbochs_query_gfx_plane()
1155 __func__, mdev_state->active_id, plane->dmabuf_id); in mbochs_query_gfx_plane()
1156 mdev_state->active_id = plane->dmabuf_id; in mbochs_query_gfx_plane()
1158 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1162 static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id) in mbochs_get_gfx_dmabuf() argument
1166 mutex_lock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1168 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id); in mbochs_get_gfx_dmabuf()
1170 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1177 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1188 struct mdev_state *mdev_state = in mbochs_ioctl() local
1189 container_of(vdev, struct mdev_state, vdev); in mbochs_ioctl()
1230 ret = mbochs_get_region_info(mdev_state, &info); in mbochs_ioctl()
1276 ret = mbochs_query_gfx_plane(mdev_state, &plane); in mbochs_ioctl()
1293 return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id); in mbochs_ioctl()
1300 return mbochs_reset(mdev_state); in mbochs_ioctl()
1307 struct mdev_state *mdev_state = in mbochs_close_device() local
1308 container_of(vdev, struct mdev_state, vdev); in mbochs_close_device()
1311 mutex_lock(&mdev_state->ops_lock); in mbochs_close_device()
1313 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) { in mbochs_close_device()
1322 mbochs_put_pages(mdev_state); in mbochs_close_device()
1324 mutex_unlock(&mdev_state->ops_lock); in mbochs_close_device()
1331 struct mdev_state *mdev_state = dev_get_drvdata(dev); in memory_show() local
1333 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes); in memory_show()