Lines Matching refs:drm
59 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_update_tile_region() local
60 int i = reg - drm->tile.reg; in nv10_bo_update_tile_region()
61 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_update_tile_region()
78 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_get_tile_region() local
79 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; in nv10_bo_get_tile_region()
81 spin_lock(&drm->tile.lock); in nv10_bo_get_tile_region()
89 spin_unlock(&drm->tile.lock); in nv10_bo_get_tile_region()
97 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_put_tile_region() local
100 spin_lock(&drm->tile.lock); in nv10_bo_put_tile_region()
103 spin_unlock(&drm->tile.lock); in nv10_bo_put_tile_region()
111 struct nouveau_drm *drm = nouveau_drm(dev); in nv10_bo_set_tiling() local
112 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); in nv10_bo_set_tiling()
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_ttm() local
140 struct drm_device *dev = drm->dev; in nouveau_bo_del_ttm()
170 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_fixup_align() local
171 struct nvif_device *device = &drm->client.device; in nouveau_bo_fixup_align()
204 struct nouveau_drm *drm = cli->drm; in nouveau_bo_alloc() local
211 NV_WARN(drm, "skipped size %016llx\n", *size); in nouveau_bo_alloc()
221 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
231 if (!nouveau_drm_use_coherent_gpu_mapping(drm)) in nouveau_bo_alloc()
374 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in set_placement_range() local
375 u64 vram_size = drm->client.device.info.ram_size; in set_placement_range()
378 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && in set_placement_range()
424 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_pin() local
433 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_bo_pin()
457 NV_ERROR(drm, "bo %p pinned elsewhere: " in nouveau_bo_pin()
482 drm->gem.vram_available -= bo->base.size; in nouveau_bo_pin()
485 drm->gem.gart_available -= bo->base.size; in nouveau_bo_pin()
501 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_unpin() local
513 drm->gem.vram_available += bo->base.size; in nouveau_bo_unpin()
516 drm->gem.gart_available += bo->base.size; in nouveau_bo_unpin()
554 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_device() local
561 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_device()
580 dma_sync_single_for_device(drm->dev->dev, in nouveau_bo_sync_for_device()
590 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); in nouveau_bo_sync_for_cpu() local
597 NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma); in nouveau_bo_sync_for_cpu()
617 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], in nouveau_bo_sync_for_cpu()
625 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_add_io_reserve_lru() local
628 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
629 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
630 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
635 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_del_io_reserve_lru() local
638 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
640 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
705 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_tt_create() local
707 if (drm->agp.bridge) { in nouveau_ttm_tt_create()
708 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); in nouveau_ttm_tt_create()
720 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_bind() local
725 if (drm->agp.bridge) in nouveau_ttm_tt_bind()
735 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_unbind() local
737 if (drm->agp.bridge) { in nouveau_ttm_tt_unbind()
764 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, in nouveau_bo_move_prep() argument
769 struct nvif_vmm *vmm = &drm->client.vmm.vmm; in nouveau_bo_move_prep()
800 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move_m2mf() local
801 struct nouveau_channel *chan = drm->ttm.chan; in nouveau_bo_move_m2mf()
810 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move_m2mf()
811 ret = nouveau_bo_move_prep(drm, bo, new_reg); in nouveau_bo_move_m2mf()
816 if (drm_drv_uses_atomic_modeset(drm->dev)) in nouveau_bo_move_m2mf()
822 ret = drm->ttm.move(chan, bo, bo->resource, new_reg); in nouveau_bo_move_m2mf()
848 nouveau_bo_move_init(struct nouveau_drm *drm) in nouveau_bo_move_init() argument
892 chan = drm->cechan; in nouveau_bo_move_init()
894 chan = drm->channel; in nouveau_bo_move_init()
901 &drm->ttm.copy); in nouveau_bo_move_init()
903 ret = mthd->init(chan, drm->ttm.copy.handle); in nouveau_bo_move_init()
905 nvif_object_dtor(&drm->ttm.copy); in nouveau_bo_move_init()
909 drm->ttm.move = mthd->exec; in nouveau_bo_move_init()
910 drm->ttm.chan = chan; in nouveau_bo_move_init()
916 NV_INFO(drm, "MM: using %s for buffer copies\n", name); in nouveau_bo_move_init()
957 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_bind() local
958 struct drm_device *dev = drm->dev; in nouveau_bo_vm_bind()
966 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { in nouveau_bo_vm_bind()
979 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_vm_cleanup() local
980 struct drm_device *dev = drm->dev; in nouveau_bo_vm_cleanup()
1000 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_bo_move() local
1019 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); in nouveau_bo_move()
1021 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1049 if (drm->ttm.move) { in nouveau_bo_move()
1071 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { in nouveau_bo_move()
1085 nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, in nouveau_ttm_io_mem_free_locked() argument
1090 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_free_locked()
1108 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_io_mem_reserve() local
1109 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_io_mem_reserve()
1111 struct nvif_mmu *mmu = &drm->client.mmu; in nouveau_ttm_io_mem_reserve()
1114 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1123 if (drm->agp.bridge) { in nouveau_ttm_io_mem_reserve()
1125 drm->agp.base; in nouveau_ttm_io_mem_reserve()
1126 reg->bus.is_iomem = !drm->agp.cma; in nouveau_ttm_io_mem_reserve()
1130 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || in nouveau_ttm_io_mem_reserve()
1143 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && in nouveau_ttm_io_mem_reserve()
1144 mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) in nouveau_ttm_io_mem_reserve()
1149 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { in nouveau_ttm_io_mem_reserve()
1197 nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, in nouveau_ttm_io_mem_reserve()
1204 nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource); in nouveau_ttm_io_mem_reserve()
1209 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_reserve()
1216 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_io_mem_free() local
1218 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1219 nouveau_ttm_io_mem_free_locked(drm, reg); in nouveau_ttm_io_mem_free()
1220 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_ttm_io_mem_free()
1225 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); in nouveau_ttm_fault_reserve_notify() local
1227 struct nvkm_device *device = nvxx_device(&drm->client.device); in nouveau_ttm_fault_reserve_notify()
1235 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1246 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || in nouveau_ttm_fault_reserve_notify()
1278 struct nouveau_drm *drm; in nouveau_ttm_tt_populate() local
1290 drm = nouveau_bdev(bdev); in nouveau_ttm_tt_populate()
1292 return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx); in nouveau_ttm_tt_populate()
1299 struct nouveau_drm *drm; in nouveau_ttm_tt_unpopulate() local
1307 drm = nouveau_bdev(bdev); in nouveau_ttm_tt_unpopulate()
1309 return ttm_pool_free(&drm->ttm.bdev.pool, ttm); in nouveau_ttm_tt_unpopulate()
1317 struct nouveau_drm *drm = nouveau_bdev(bdev); in nouveau_ttm_tt_destroy() local
1318 if (drm->agp.bridge) { in nouveau_ttm_tt_destroy()