Lines Matching refs:mgr

48 to_amdgpu_device(struct amdgpu_vram_mgr *mgr)  in to_amdgpu_device()  argument
50 return container_of(mgr, struct amdgpu_device, mman.vram_mgr); in to_amdgpu_device()
271 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); in amdgpu_vram_mgr_do_reserve() local
272 struct amdgpu_device *adev = to_amdgpu_device(mgr); in amdgpu_vram_mgr_do_reserve()
273 struct drm_buddy *mm = &mgr->mm; in amdgpu_vram_mgr_do_reserve()
278 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { in amdgpu_vram_mgr_do_reserve()
292 atomic64_add(vis_usage, &mgr->vis_usage); in amdgpu_vram_mgr_do_reserve()
296 list_move(&rsv->blocks, &mgr->reserved_pages); in amdgpu_vram_mgr_do_reserve()
309 int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, in amdgpu_vram_mgr_reserve_range() argument
324 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_reserve_range()
325 list_add_tail(&rsv->blocks, &mgr->reservations_pending); in amdgpu_vram_mgr_reserve_range()
326 amdgpu_vram_mgr_do_reserve(&mgr->manager); in amdgpu_vram_mgr_reserve_range()
327 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_reserve_range()
343 int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, in amdgpu_vram_mgr_query_page_status() argument
349 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_query_page_status()
351 list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { in amdgpu_vram_mgr_query_page_status()
359 list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { in amdgpu_vram_mgr_query_page_status()
369 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_query_page_status()
389 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); in amdgpu_vram_mgr_new() local
390 struct amdgpu_device *adev = to_amdgpu_device(mgr); in amdgpu_vram_mgr_new()
393 struct drm_buddy *mm = &mgr->mm; in amdgpu_vram_mgr_new()
438 if (fpfn || lpfn != mgr->mm.size) in amdgpu_vram_mgr_new()
444 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_new()
449 min_block_size = mgr->default_page_size; in amdgpu_vram_mgr_new()
493 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_new()
519 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_new()
523 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_new()
554 atomic64_add(vis_usage, &mgr->vis_usage); in amdgpu_vram_mgr_new()
560 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_new()
580 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); in amdgpu_vram_mgr_del() local
581 struct amdgpu_device *adev = to_amdgpu_device(mgr); in amdgpu_vram_mgr_del()
582 struct drm_buddy *mm = &mgr->mm; in amdgpu_vram_mgr_del()
586 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_del()
593 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_del()
595 atomic64_sub(vis_usage, &mgr->vis_usage); in amdgpu_vram_mgr_del()
719 uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) in amdgpu_vram_mgr_vis_usage() argument
721 return atomic64_read(&mgr->vis_usage); in amdgpu_vram_mgr_vis_usage()
739 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); in amdgpu_vram_mgr_intersects() local
743 list_for_each_entry(block, &mgr->blocks, link) { in amdgpu_vram_mgr_intersects()
772 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); in amdgpu_vram_mgr_compatible() local
776 list_for_each_entry(block, &mgr->blocks, link) { in amdgpu_vram_mgr_compatible()
801 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); in amdgpu_vram_mgr_debug() local
802 struct drm_buddy *mm = &mgr->mm; in amdgpu_vram_mgr_debug()
806 amdgpu_vram_mgr_vis_usage(mgr)); in amdgpu_vram_mgr_debug()
808 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_debug()
810 mgr->default_page_size >> 10); in amdgpu_vram_mgr_debug()
815 list_for_each_entry(block, &mgr->reserved_pages, link) in amdgpu_vram_mgr_debug()
817 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_debug()
837 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; in amdgpu_vram_mgr_init() local
838 struct ttm_resource_manager *man = &mgr->manager; in amdgpu_vram_mgr_init()
846 err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); in amdgpu_vram_mgr_init()
850 mutex_init(&mgr->lock); in amdgpu_vram_mgr_init()
851 INIT_LIST_HEAD(&mgr->reservations_pending); in amdgpu_vram_mgr_init()
852 INIT_LIST_HEAD(&mgr->reserved_pages); in amdgpu_vram_mgr_init()
853 mgr->default_page_size = PAGE_SIZE; in amdgpu_vram_mgr_init()
855 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); in amdgpu_vram_mgr_init()
870 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; in amdgpu_vram_mgr_fini() local
871 struct ttm_resource_manager *man = &mgr->manager; in amdgpu_vram_mgr_fini()
881 mutex_lock(&mgr->lock); in amdgpu_vram_mgr_fini()
882 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) in amdgpu_vram_mgr_fini()
885 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { in amdgpu_vram_mgr_fini()
886 drm_buddy_free_list(&mgr->mm, &rsv->allocated); in amdgpu_vram_mgr_fini()
889 drm_buddy_fini(&mgr->mm); in amdgpu_vram_mgr_fini()
890 mutex_unlock(&mgr->lock); in amdgpu_vram_mgr_fini()