Lines Matching refs:gmc

52 	u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;  in amdgpu_gmc_pdb0_alloc()
53 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; in amdgpu_gmc_pdb0_alloc()
66 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
70 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); in amdgpu_gmc_pdb0_alloc()
74 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); in amdgpu_gmc_pdb0_alloc()
77 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); in amdgpu_gmc_pdb0_alloc()
81 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
85 amdgpu_bo_unpin(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
87 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
89 amdgpu_bo_unref(&adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
186 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr()
189 return adev->gmc.agp_start + bo->ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
276 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); in amdgpu_gmc_gart_location()
377 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults() local
389 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) in amdgpu_gmc_filter_faults()
394 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults()
402 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults()
410 fault = &gmc->fault_ring[gmc->last_fault]; in amdgpu_gmc_filter_faults()
415 fault->next = gmc->fault_hash[hash].idx; in amdgpu_gmc_filter_faults()
416 gmc->fault_hash[hash].idx = gmc->last_fault++; in amdgpu_gmc_filter_faults()
433 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults_remove() local
440 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults_remove()
446 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults_remove()
452 if (!adev->gmc.xgmi.connected_to_cpu) { in amdgpu_gmc_ras_early_init()
453 adev->gmc.xgmi.ras = &xgmi_ras; in amdgpu_gmc_ras_early_init()
454 amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); in amdgpu_gmc_ras_early_init()
455 adev->gmc.xgmi.ras_if = &adev->gmc.xgmi.ras->ras_block.ras_comm; in amdgpu_gmc_ras_early_init()
538 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
542 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
565 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
569 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
575 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
591 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_noretry_set() local
600 gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry; in amdgpu_gmc_noretry_set()
686 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) in amdgpu_gmc_get_vbios_allocations()
720 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_init_pdb0()
721 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; in amdgpu_gmc_init_pdb0()
723 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_gmc_init_pdb0()
734 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); in amdgpu_gmc_init_pdb0()
741 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); in amdgpu_gmc_init_pdb0()
750 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); in amdgpu_gmc_init_pdb0()
763 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; in amdgpu_gmc_vram_mc2pa()
787 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; in amdgpu_gmc_vram_cpu_pa()