/linux-6.3-rc2/include/linux/ |
A D | mmap_lock.h | 65 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 71 down_write(&mm->mmap_lock); in mmap_write_lock() 104 __mmap_lock_trace_released(mm, true); in mmap_write_unlock() 105 up_write(&mm->mmap_lock); in mmap_write_unlock() 111 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade() 117 down_read(&mm->mmap_lock); in mmap_read_lock() 144 up_read(&mm->mmap_lock); in mmap_read_unlock() 150 up_read_non_owner(&mm->mmap_lock); in mmap_read_unlock_non_owner() 155 lockdep_assert_held(&mm->mmap_lock); in mmap_assert_locked() 156 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); in mmap_assert_locked() [all …]
|
A D | mmu_notifier.h | 89 struct mm_struct *mm); 101 struct mm_struct *mm, 111 struct mm_struct *mm, 122 struct mm_struct *mm, 208 struct mm_struct *mm, 240 struct mm_struct *mm; member 260 struct mm_struct *mm; member 272 struct mm_struct *mm; member 292 mmap_write_lock(mm); in mmu_notifier_get() 294 mmap_write_unlock(mm); in mmu_notifier_get() [all …]
|
A D | page_table_check.h | 29 void __page_table_check_pte_clear_range(struct mm_struct *mm, 55 __page_table_check_pte_clear(mm, addr, pte); in page_table_check_pte_clear() 64 __page_table_check_pmd_clear(mm, addr, pmd); in page_table_check_pmd_clear() 73 __page_table_check_pud_clear(mm, addr, pud); in page_table_check_pud_clear() 76 static inline void page_table_check_pte_set(struct mm_struct *mm, in page_table_check_pte_set() argument 83 __page_table_check_pte_set(mm, addr, ptep, pte); in page_table_check_pte_set() 86 static inline void page_table_check_pmd_set(struct mm_struct *mm, in page_table_check_pmd_set() argument 93 __page_table_check_pmd_set(mm, addr, pmdp, pmd); in page_table_check_pmd_set() 96 static inline void page_table_check_pud_set(struct mm_struct *mm, in page_table_check_pud_set() argument 103 __page_table_check_pud_set(mm, addr, pudp, pud); in page_table_check_pud_set() [all …]
|
/linux-6.3-rc2/arch/powerpc/include/asm/ |
A D | mmu_context.h | 125 atomic_inc(&mm->context.active_cpus); in inc_mm_active_cpus() 141 inc_mm_active_cpus(mm); in mm_context_add_copro() 167 radix__flush_all_mm(mm); in mm_context_remove_copro() 174 dec_mm_active_cpus(mm); in mm_context_remove_copro() 192 mm_context_add_copro(mm); in mm_context_add_vas_window() 199 mm_context_remove_copro(mm); in mm_context_remove_vas_window() 268 mm->context.vdso = NULL; in arch_unmap() 283 #define pkey_mm_init(mm) argument 284 #define arch_dup_pkeys(oldmm, mm) argument 294 struct mm_struct *mm) in arch_dup_mmap() argument [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/ |
A D | drm_buddy.c | 120 mm->free_list = kmalloc_array(mm->max_order + 1, in drm_buddy_init() 131 mm->roots = kmalloc_array(mm->n_roots, in drm_buddy_init() 172 drm_block_free(mm, mm->roots[i]); in drm_buddy_init() 193 drm_block_free(mm, mm->roots[i]); in drm_buddy_fini() 196 WARN_ON(mm->avail != mm->size); in drm_buddy_fini() 297 mm->avail += drm_buddy_block_size(mm, block); in drm_buddy_free_block() 523 mm->avail -= drm_buddy_block_size(mm, block); in __alloc_range() 621 mm->avail += drm_buddy_block_size(mm, block); in drm_buddy_block_trim() 632 mm->avail -= drm_buddy_block_size(mm, block); in drm_buddy_block_trim() 725 mm->avail -= drm_buddy_block_size(mm, block); in drm_buddy_alloc_blocks() [all …]
|
/linux-6.3-rc2/arch/s390/include/asm/ |
A D | pgalloc.h | 46 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 64 if (!mm_p4d_folded(mm)) in p4d_free() 78 if (!mm_pud_folded(mm)) in pud_free() 90 crst_table_free(mm, table); in pmd_alloc_one() 98 if (mm_pmd_folded(mm)) in pmd_free() 121 return (pgd_t *) crst_table_alloc(mm); in pgd_alloc() 135 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte) argument 140 #define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm)) argument 141 #define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm)) argument 143 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) argument [all …]
|
A D | mmu_context.h | 20 struct mm_struct *mm) in init_new_context() argument 30 mm->context.gmap_asce = 0; in init_new_context() 31 mm->context.flush_mm = 0; in init_new_context() 35 (current->mm && current->mm->context.alloc_pgste); in init_new_context() 36 mm->context.has_pgste = 0; in init_new_context() 37 mm->context.uses_skeys = 0; in init_new_context() 38 mm->context.uses_cmm = 0; in init_new_context() 67 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | in init_new_context() 104 struct mm_struct *mm = tsk->mm; in finish_arch_post_lock_switch() local 106 if (mm) { in finish_arch_post_lock_switch() [all …]
|
/linux-6.3-rc2/arch/x86/include/asm/ |
A D | mmu_context.h | 64 mm->context.ldt = NULL; in init_new_context_ldt() 73 struct mm_struct *mm) in ldt_dup_context() argument 104 struct mm_struct *mm) in init_new_context() argument 106 mutex_init(&mm->context.lock); in init_new_context() 119 init_new_context_ldt(mm); in init_new_context() 126 destroy_context_ldt(mm); in destroy_context() 143 #define deactivate_mm(tsk, mm) \ argument 156 struct mm_struct *mm) in arch_dup_pkeys() argument 170 arch_dup_pkeys(oldmm, mm); in arch_dup_mmap() 177 paravirt_arch_exit_mmap(mm); in arch_exit_mmap() [all …]
|
A D | pgalloc.h | 18 #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) argument 67 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel() 74 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); in pmd_populate_kernel_safe() 83 paravirt_alloc_pte(mm, pfn); in pmd_populate() 101 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate() 107 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); in pud_populate_safe() 115 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate() 121 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); in p4d_populate_safe() 138 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate() 146 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate_safe() [all …]
|
/linux-6.3-rc2/mm/ |
A D | debug.c | 185 mm, mm->task_size, in dump_mm() 189 mm->mmap_base, mm->mmap_legacy_base, in dump_mm() 190 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 194 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 196 mm->data_vm, mm->exec_vm, mm->stack_vm, in dump_mm() 197 mm->start_code, mm->end_code, mm->start_data, mm->end_data, in dump_mm() 198 mm->start_brk, mm->brk, mm->start_stack, in dump_mm() 199 mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, in dump_mm() 200 mm->binfmt, mm->flags, in dump_mm() 212 mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, in dump_mm() [all …]
|
A D | mmu_notifier.c | 267 .mm = mm, in mn_itree_release() 682 mmgrab(mm); in __mmu_notifier_register() 683 subscription->mm = mm; in __mmu_notifier_register() 858 mmdrop(mm); in mmu_notifier_unregister() 866 struct mm_struct *mm = subscription->mm; in mmu_notifier_free_rcu() local 870 mmdrop(mm); in mmu_notifier_free_rcu() 897 struct mm_struct *mm = subscription->mm; in mmu_notifier_put() local 918 interval_sub->mm = mm; in __mmu_interval_notifier_insert() 936 mmgrab(mm); in __mmu_interval_notifier_insert() 1063 struct mm_struct *mm = interval_sub->mm; in mmu_interval_notifier_remove() local [all …]
|
/linux-6.3-rc2/arch/m68k/include/asm/ |
A D | mmu_context.h | 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 85 struct mm_struct *mm) in activate_mm() argument 87 get_mmu_context(mm); in activate_mm() 88 set_context(mm->context, mm->pgd); in activate_mm() 115 mm = &init_mm; in load_ksp_mmu() 118 mm = task->mm; in load_ksp_mmu() 121 if (!mm) in load_ksp_mmu() 188 mm->context = get_free_context(mm); in get_mmu_context() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/tests/ |
A D | drm_buddy_test.c | 210 if (!mm->n_roots) { in check_mm() 215 if (mm->n_roots != hweight64(mm->size)) { in check_mm() 217 mm->n_roots, hweight64(mm->size)); in check_mm() 229 root = mm->roots[i]; in check_mm() 331 struct drm_buddy mm; in drm_test_buddy_alloc_pathological() local 402 drm_buddy_fini(&mm); in drm_test_buddy_alloc_pathological() 409 struct drm_buddy mm; in drm_test_buddy_alloc_smoke() local 492 drm_buddy_fini(&mm); in drm_test_buddy_alloc_smoke() 501 struct drm_buddy mm; in drm_test_buddy_alloc_pessimistic() local 652 rem = mm.size; in drm_test_buddy_alloc_range() [all …]
|
/linux-6.3-rc2/arch/powerpc/mm/book3s64/ |
A D | mmu_context.c | 101 if (!mm->context.hash_context) in hash__init_new_context() 118 if (mm->context.id == 0) { in hash__init_new_context() 146 pkey_mm_init(mm); in hash__init_new_context() 207 mm->context.id = index; in init_new_context() 209 mm->context.pte_frag = NULL; in init_new_context() 210 mm->context.pmd_frag = NULL; in init_new_context() 212 mm_iommu_init(mm); in init_new_context() 265 frag = mm->context.pte_frag; in destroy_pagetable_cache() 269 frag = mm->context.pmd_frag; in destroy_pagetable_cache() 296 subpage_prot_free(mm); in destroy_context() [all …]
|
A D | slice.c | 93 vma = find_vma(mm, addr); in slice_area_is_free() 127 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free() 134 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free() 173 struct mm_struct *mm = parm; in slice_flush_segments() local 176 if (mm != current->active_mm) in slice_flush_segments() 251 copro_flush_all_slbs(mm); in slice_convert() 385 return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit); in slice_find_area() 387 return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit); in slice_find_area() 436 struct mm_struct *mm = current->mm; in slice_get_unmapped_area() local 467 BUG_ON(mm->task_size == 0); in slice_get_unmapped_area() [all …]
|
/linux-6.3-rc2/arch/s390/mm/ |
A D | pgtable.c | 119 mm->context.flush_mm = 1; in ptep_flush_lazy() 195 struct mm_struct *mm) in pgste_set_key() argument 260 if (mm_has_pgste(mm)) { in ptep_xchg_start() 271 if (mm_has_pgste(mm)) { in ptep_xchg_commit() 359 if (mm_has_pgste(mm)) { in ptep_modify_prot_start() 374 if (mm_has_pgste(mm)) { in ptep_modify_prot_commit() 393 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_local() 403 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global() 407 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global() 411 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) in pmdp_idte_global() [all …]
|
/linux-6.3-rc2/include/linux/sched/ |
A D | mm.h | 37 atomic_inc(&mm->mm_count); in mmgrab() 50 __mmdrop(mm); in mmdrop() 62 __mmdrop(mm); in __mmdrop_delayed() 78 mmdrop(mm); in mmdrop_sched() 100 atomic_inc(&mm->mm_users); in mmget() 191 rcu_dereference(tsk->real_parent)->mm == tsk->mm; in in_vfork() 423 if (current->mm != mm) in membarrier_mm_sync_core_before_usermode() 457 mm->pasid = INVALID_IOASID; in mm_pasid_init() 463 mm->pasid = pasid; in mm_pasid_set() 469 ioasid_free(mm->pasid); in mm_pasid_drop() [all …]
|
/linux-6.3-rc2/arch/arm/include/asm/ |
A D | mmu_context.h | 32 __check_vmalloc_seq(mm); in check_vmalloc_seq() 44 atomic64_set(&mm->context.id, 0); in init_new_context() 65 check_vmalloc_seq(mm); in check_and_switch_context() 75 mm->context.switch_pending = 1; in check_and_switch_context() 77 cpu_switch_mm(mm->pgd, mm); in check_and_switch_context() 85 struct mm_struct *mm = current->mm; in finish_arch_post_lock_switch() local 87 if (mm && mm->context.switch_pending) { in finish_arch_post_lock_switch() 96 mm->context.switch_pending = 0; in finish_arch_post_lock_switch() 97 cpu_switch_mm(mm->pgd, mm); in finish_arch_post_lock_switch() 144 if (mm != &init_mm) in enter_lazy_tlb() [all …]
|
/linux-6.3-rc2/arch/sparc/include/asm/ |
A D | mmu_context_64.h | 29 void destroy_context(struct mm_struct *mm); 40 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx() 55 void tsb_grow(struct mm_struct *mm, 59 void smp_tsb_sync(struct mm_struct *mm); 86 if (unlikely(mm == &init_mm)) in switch_mm() 90 ctx_valid = CTX_VALID(mm->context); in switch_mm() 92 get_new_mmu_context(mm); in switch_mm() 124 tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); in switch_mm() 131 cpumask_set_cpu(cpu, mm_cpumask(mm)); in switch_mm() 138 #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) argument [all …]
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | ldt.c | 143 load_mm_ldt(mm); in flush_ldt() 193 if (mm->context.ldt) { in do_sanity_check() 370 pte_clear(mm, va, ptep); in unmap_ldt_struct() 427 on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); in install_ldt() 473 free_ldt_pgtables(mm); in ldt_dup_context() 492 mm->context.ldt = NULL; in destroy_context_ldt() 497 free_ldt_pgtables(mm); in ldt_arch_exit_mmap() 502 struct mm_struct *mm = current->mm; in read_ldt() local 508 if (!mm->context.ldt) { in read_ldt() 578 struct mm_struct *mm = current->mm; in write_ldt() local [all …]
|
/linux-6.3-rc2/fs/proc/ |
A D | task_nommu.c | 28 mmap_read_lock(mm); in task_mem() 87 mmap_read_lock(mm); in task_vsize() 103 mmap_read_lock(mm); in task_statm() 113 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) in task_statm() 115 *data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK)) in task_statm() 207 mm = priv->mm; in m_start() 208 if (!mm || !mmget_not_zero(mm)) in m_start() 212 mmput(mm); in m_start() 222 mmput(mm); in m_start() 232 mmput(priv->mm); in m_stop() [all …]
|
/linux-6.3-rc2/arch/sparc/mm/ |
A D | tlb.c | 26 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 33 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 81 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 88 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one() 93 tb->mm = mm; in tlb_batch_add_one() 136 flush_dcache_page_all(mm, page); in tlb_batch_add() 169 if (mm == &init_mm) in __set_pmd_acct() 185 mm->context.thp_pte_count++; in __set_pmd_acct() 190 mm->context.thp_pte_count--; in __set_pmd_acct() 272 if (!pmd_huge_pte(mm, pmdp)) in pgtable_trans_huge_deposit() [all …]
|
A D | tsb.c | 121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local 524 tsb_context_switch(mm); in tsb_grow() 528 smp_tsb_sync(mm); in tsb_grow() 545 spin_lock_init(&mm->context.lock); in init_new_context() 549 mm->context.tag_store = NULL; in init_new_context() 560 mm->context.thp_pte_count = 0; in init_new_context() 579 tsb_grow(mm, MM_TSB_HUGE, in init_new_context() 611 if (CTX_VALID(mm->context)) { in destroy_context() 619 if (mm->context.tag_store) { in destroy_context() 632 kfree(mm->context.tag_store); in destroy_context() [all …]
|
/linux-6.3-rc2/drivers/gpu/drm/i915/gem/ |
A D | i915_gem_shrinker.c | 112 { &i915->mm.purge_list, ~0u }, in i915_gem_shrink() 114 &i915->mm.shrink_list, in i915_gem_shrink() 194 mm.link))) { in i915_gem_shrink() 305 i915->mm.shrinker.batch = in i915_gem_shrinker_count() 367 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) { in i915_gem_shrinker_oom() 428 i915->mm.shrinker.batch = 4096; in i915_gem_driver_register__shrinker() 491 !list_empty(&obj->mm.link)) { in i915_gem_object_make_unshrinkable() 492 list_del_init(&obj->mm.link); in i915_gem_object_make_unshrinkable() 493 i915->mm.shrink_count--; in i915_gem_object_make_unshrinkable() 516 list_add_tail(&obj->mm.link, head); in ___i915_gem_object_make_shrinkable() [all …]
|
/linux-6.3-rc2/arch/mips/include/asm/ |
A D | mmu_context.h | 111 return mm->context.asid[cpu]; in cpu_context() 115 struct mm_struct *mm, u64 ctx) in set_cpu_context() argument 120 mm->context.asid[cpu] = ctx; in set_cpu_context() 124 #define cpu_asid(cpu, mm) \ argument 142 set_cpu_context(0, mm, 0); in init_new_context() 145 set_cpu_context(i, mm, 0); in init_new_context() 183 dsemul_mm_cleanup(mm); in destroy_context() 187 drop_mmu_context(struct mm_struct *mm) in drop_mmu_context() argument 197 ctx = cpu_context(cpu, mm); in drop_mmu_context() 224 get_new_mmu_context(mm); in drop_mmu_context() [all …]
|