Lines Matching refs:vbo
81 static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo) in vmw_bo_dirty_scan_pagetable() argument
83 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_pagetable()
84 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_pagetable()
85 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_pagetable()
119 static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo) in vmw_bo_dirty_scan_mkwrite() argument
121 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan_mkwrite()
122 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_scan_mkwrite()
123 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_scan_mkwrite()
129 num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping, in vmw_bo_dirty_scan_mkwrite()
163 void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo) in vmw_bo_dirty_scan() argument
165 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_scan()
168 vmw_bo_dirty_scan_pagetable(vbo); in vmw_bo_dirty_scan()
170 vmw_bo_dirty_scan_mkwrite(vbo); in vmw_bo_dirty_scan()
184 static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, in vmw_bo_dirty_pre_unmap() argument
187 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_pre_unmap()
188 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_pre_unmap()
189 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_pre_unmap()
209 void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, in vmw_bo_dirty_unmap() argument
212 unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_unmap()
213 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_unmap()
215 vmw_bo_dirty_pre_unmap(vbo, start, end); in vmw_bo_dirty_unmap()
230 int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) in vmw_bo_dirty_add() argument
232 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_add()
233 pgoff_t num_pages = PFN_UP(vbo->base.resource->size); in vmw_bo_dirty_add()
256 struct address_space *mapping = vbo->base.bdev->dev_mapping; in vmw_bo_dirty_add()
257 pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node); in vmw_bo_dirty_add()
269 vbo->dirty = dirty; in vmw_bo_dirty_add()
287 void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) in vmw_bo_dirty_release() argument
289 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_release()
293 vbo->dirty = NULL; in vmw_bo_dirty_release()
309 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_transfer_to_res() local
310 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_transfer_to_res()
356 struct vmw_buffer_object *vbo = res->backup; in vmw_bo_dirty_clear_res() local
357 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_dirty_clear_res()
383 struct vmw_buffer_object *vbo = in vmw_bo_vm_mkwrite() local
384 container_of(bo, typeof(*vbo), base); in vmw_bo_vm_mkwrite()
403 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && in vmw_bo_vm_mkwrite()
404 !test_bit(page_offset, &vbo->dirty->bitmap[0])) { in vmw_bo_vm_mkwrite()
405 struct vmw_bo_dirty *dirty = vbo->dirty; in vmw_bo_vm_mkwrite()
422 struct vmw_buffer_object *vbo = in vmw_bo_vm_fault() local
435 if (vbo->dirty) { in vmw_bo_vm_fault()
442 vmw_resources_clean(vbo, page_offset, in vmw_bo_vm_fault()
457 if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) in vmw_bo_vm_fault()