| /linux/drivers/gpu/drm/xe/ |
| A D | xe_gt_pagefault.c | 89 vma = vm->usm.last_fault_vma; in lookup_vma() 192 down_read(&xe->usm.lock); in asid_to_vm() 198 up_read(&xe->usm.lock); in asid_to_vm() 239 vm->usm.last_fault_vma = vma; in handle_pagefault() 393 queue_work(gt->usm.pf_wq, w); in pf_queue_work_func() 456 gt->usm.acc_queue[i].gt = gt; in xe_gt_pagefault_init() 463 if (!gt->usm.pf_wq) in xe_gt_pagefault_init() 469 if (!gt->usm.acc_wq) { in xe_gt_pagefault_init() 487 gt->usm.pf_queue[i].head = 0; in xe_gt_pagefault_reset() 488 gt->usm.pf_queue[i].tail = 0; in xe_gt_pagefault_reset() [all …]
|
| A D | xe_bb.c | 32 struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm) in xe_bb_new() argument 47 bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool, in xe_bb_new()
|
| A D | xe_migrate.c | 252 batch = tile->primary_gt->usm.bb_pool->bo; in xe_migrate_prepare_vm() 273 batch = tile->primary_gt->usm.bb_pool->bo; in xe_migrate_prepare_vm() 818 bool usm = xe->info.has_usm; in xe_migrate_copy() local 853 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_copy() 888 xe_migrate_batch_base(m, usm), in xe_migrate_copy() 1089 bool usm = xe->info.has_usm; in xe_migrate_clear() local 1110 bb = xe_bb_new(gt, batch_size, usm); in xe_migrate_clear() 1137 xe_migrate_batch_base(m, usm), in xe_migrate_clear() 1324 bool usm = is_migrate && xe->info.has_usm; in __xe_migrate_update_pgtables() local 1347 bb = xe_bb_new(gt, batch_size, usm); in __xe_migrate_update_pgtables() [all …]
|
| A D | xe_bb.h | 17 struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm);
|
| A D | xe_device.c | 330 init_rwsem(&xe->usm.lock); in xe_device_create() 332 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC); in xe_device_create() 339 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL, in xe_device_create() 341 &xe->usm.next_asid, GFP_KERNEL); in xe_device_create() 344 xa_erase(&xe->usm.asid_to_vm, asid); in xe_device_create()
|
| A D | xe_trace_bo.h | 94 __entry->asid = xe_vma_vm(vma)->usm.asid; 188 __entry->asid = vm->usm.asid;
|
| A D | xe_gt.h | 94 hwe->instance == gt->usm.reserved_bcs_instance; in xe_gt_is_usm_hwe()
|
| A D | xe_vm_types.h | 268 } usm; member
|
| A D | xe_gt_types.h | 301 } usm; member
|
| A D | xe_vm.c | 1174 if (vm->usm.last_fault_vma == vma) in xe_vm_remove_vma() 1175 vm->usm.last_fault_vma = NULL; in xe_vm_remove_vma() 1616 down_write(&xe->usm.lock); in xe_vm_close_and_put() 1617 if (vm->usm.asid) { in xe_vm_close_and_put() 1623 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); in xe_vm_close_and_put() 1626 up_write(&xe->usm.lock); in xe_vm_close_and_put() 1769 down_write(&xe->usm.lock); in xe_vm_create_ioctl() 1770 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, in xe_vm_create_ioctl() 1772 &xe->usm.next_asid, GFP_KERNEL); in xe_vm_create_ioctl() 1773 up_write(&xe->usm.lock); in xe_vm_create_ioctl() [all …]
|
| A D | xe_gt.c | 494 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), in all_fw_domain_init() 496 if (IS_ERR(gt->usm.bb_pool)) { in all_fw_domain_init() 497 err = PTR_ERR(gt->usm.bb_pool); in all_fw_domain_init()
|
| A D | xe_device_types.h | 364 } usm; member
|
| A D | xe_gt_tlb_invalidation.c | 414 xe_vma_vm(vma)->usm.asid); in xe_gt_tlb_invalidation_vma()
|
| A D | xe_hw_engine.c | 577 gt->usm.reserved_bcs_instance = hwe->instance; in hw_engine_init() 1126 hwe->instance == gt->usm.reserved_bcs_instance; in xe_hw_engine_is_reserved()
|
| A D | xe_exec_queue.c | 223 gt->usm.reserved_bcs_instance, in xe_exec_queue_create_bind()
|
| A D | xe_pt.c | 2075 pt_update_ops->last, vm->usm.asid); in xe_pt_update_ops_run() 2079 pt_update_ops->last, vm->usm.asid); in xe_pt_update_ops_run()
|
| A D | xe_lrc.c | 987 xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid); in xe_lrc_init()
|
| /linux/arch/powerpc/mm/ |
| A D | drmem.c | 229 ret = func(&lmb, &usm, data); in __walk_drmem_v1_lmbs() 275 ret = func(&lmb, &usm, data); in __walk_drmem_v2_lmbs() 288 const __be32 *prop, *usm; in walk_drmem_lmbs_early() local 301 usm = of_get_flat_dt_prop(node, "linux,drconf-usable-memory", &len); in walk_drmem_lmbs_early() 305 ret = __walk_drmem_v1_lmbs(prop, usm, data, func); in walk_drmem_lmbs_early() 310 ret = __walk_drmem_v2_lmbs(prop, usm, data, func); in walk_drmem_lmbs_early() 321 __maybe_unused const __be32 **usm, in update_lmb() argument 397 const __be32 *prop, *usm; in walk_drmem_lmbs() local 411 usm = of_get_usable_memory(dn); in walk_drmem_lmbs() 415 ret = __walk_drmem_v1_lmbs(prop, usm, data, func); in walk_drmem_lmbs() [all …]
|
| A D | numa.c | 833 static inline int __init read_usm_ranges(const __be32 **usm) in read_usm_ranges() argument 841 return read_n_cells(n_mem_size_cells, usm); in read_usm_ranges() 849 const __be32 **usm, in numa_setup_drmem_lmb() argument 864 if (*usm) in numa_setup_drmem_lmb() 872 ranges = read_usm_ranges(usm); in numa_setup_drmem_lmb() 879 base = read_n_cells(n_mem_addr_cells, usm); in numa_setup_drmem_lmb() 880 size = read_n_cells(n_mem_size_cells, usm); in numa_setup_drmem_lmb()
|
| /linux/arch/powerpc/kernel/ |
| A D | prom.c | 529 const __be32 **usm, in early_init_drmem_lmb() argument 547 if (*usm) in early_init_drmem_lmb() 558 rngs = dt_mem_next_cell(dt_root_size_cells, usm); in early_init_drmem_lmb() 565 base = dt_mem_next_cell(dt_root_addr_cells, usm); in early_init_drmem_lmb() 566 size = dt_mem_next_cell(dt_root_size_cells, usm); in early_init_drmem_lmb()
|
| /linux/arch/powerpc/kexec/ |
| A D | file_load_64.c | 330 static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm, in kdump_setup_usable_lmb() argument 341 if (*usm) { in kdump_setup_usable_lmb()
|