Lines Matching refs:gmc
48 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_pdb0_alloc()
49 uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21; in amdgpu_gmc_pdb0_alloc()
62 r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
66 r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false); in amdgpu_gmc_pdb0_alloc()
70 r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM); in amdgpu_gmc_pdb0_alloc()
73 r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0); in amdgpu_gmc_pdb0_alloc()
77 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
81 amdgpu_bo_unpin(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
83 amdgpu_bo_unreserve(adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
85 amdgpu_bo_unref(&adev->gmc.pdb0_bo); in amdgpu_gmc_pdb0_alloc()
182 if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) in amdgpu_gmc_agp_addr()
185 return adev->gmc.agp_start + bo->ttm->dma_address[0]; in amdgpu_gmc_agp_addr()
265 u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); in amdgpu_gmc_gart_location()
364 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults() local
372 if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp) in amdgpu_gmc_filter_faults()
377 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults()
385 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults()
393 fault = &gmc->fault_ring[gmc->last_fault]; in amdgpu_gmc_filter_faults()
398 fault->next = gmc->fault_hash[hash].idx; in amdgpu_gmc_filter_faults()
399 gmc->fault_hash[hash].idx = gmc->last_fault++; in amdgpu_gmc_filter_faults()
416 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_filter_faults_remove() local
423 fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; in amdgpu_gmc_filter_faults_remove()
429 fault = &gmc->fault_ring[fault->next]; in amdgpu_gmc_filter_faults_remove()
451 if (!adev->gmc.xgmi.connected_to_cpu) in amdgpu_gmc_ras_late_init()
452 adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs; in amdgpu_gmc_ras_late_init()
454 if (adev->gmc.xgmi.ras_funcs && in amdgpu_gmc_ras_late_init()
455 adev->gmc.xgmi.ras_funcs->ras_late_init) { in amdgpu_gmc_ras_late_init()
456 r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev); in amdgpu_gmc_ras_late_init()
502 if (adev->gmc.xgmi.ras_funcs && in amdgpu_gmc_ras_fini()
503 adev->gmc.xgmi.ras_funcs->ras_fini) in amdgpu_gmc_ras_fini()
504 adev->gmc.xgmi.ras_funcs->ras_fini(adev); in amdgpu_gmc_ras_fini()
567 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
571 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
584 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
588 adev->gmc.tmz_enabled = true; in amdgpu_gmc_tmz_set()
594 adev->gmc.tmz_enabled = false; in amdgpu_gmc_tmz_set()
610 struct amdgpu_gmc *gmc = &adev->gmc; in amdgpu_gmc_noretry_set() local
622 gmc->noretry = 1; in amdgpu_gmc_noretry_set()
624 gmc->noretry = amdgpu_noretry; in amdgpu_gmc_noretry_set()
640 gmc->noretry = 0; in amdgpu_gmc_noretry_set()
642 gmc->noretry = amdgpu_noretry; in amdgpu_gmc_noretry_set()
706 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) in amdgpu_gmc_get_vbios_allocations()
740 u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; in amdgpu_gmc_init_pdb0()
741 u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; in amdgpu_gmc_init_pdb0()
743 adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; in amdgpu_gmc_init_pdb0()
754 flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); in amdgpu_gmc_init_pdb0()
761 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags); in amdgpu_gmc_init_pdb0()
770 amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags); in amdgpu_gmc_init_pdb0()
783 return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset; in amdgpu_gmc_vram_mc2pa()
807 return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; in amdgpu_gmc_vram_cpu_pa()