Lines Matching refs:mm
179 void mm_trace_rss_stat(struct mm_struct *mm, int member) in mm_trace_rss_stat() argument
181 trace_rss_stat(mm, member); in mm_trace_rss_stat()
194 mm_dec_nr_ptes(tlb->mm); in free_pte_range()
228 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
262 mm_dec_nr_puds(tlb->mm); in free_pud_range()
363 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
424 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) in pmd_install() argument
426 spinlock_t *ptl = pmd_lock(mm, pmd); in pmd_install()
429 mm_inc_nr_ptes(mm); in pmd_install()
444 pmd_populate(mm, pmd, *pte); in pmd_install()
450 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) in __pte_alloc() argument
452 pgtable_t new = pte_alloc_one(mm); in __pte_alloc()
456 pmd_install(mm, pmd, &new); in __pte_alloc()
458 pte_free(mm, new); in __pte_alloc()
485 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) in add_mm_rss_vec() argument
491 add_mm_counter(mm, i, rss[i]); in add_mm_rss_vec()
1495 struct mm_struct *mm = tlb->mm; in zap_present_folio_ptes() local
1499 ptent = get_and_clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); in zap_present_folio_ptes()
1512 clear_full_ptes(mm, addr, pte, nr, tlb->fullmm); in zap_present_folio_ptes()
1546 struct mm_struct *mm = tlb->mm; in zap_present_ptes() local
1554 ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); in zap_present_ptes()
1560 ksm_might_unmap_zero_page(mm, ptent); in zap_present_ptes()
1698 struct mm_struct *mm = tlb->mm; in zap_pte_range() local
1712 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1716 flush_tlb_batched_pending(mm); in zap_pte_range()
1746 direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval); in zap_pte_range()
1748 add_mm_rss_vec(mm, rss); in zap_pte_range()
1776 free_pte(mm, start, tlb, pmdval); in zap_pte_range()
1778 try_to_free_pte(mm, pmd, start, tlb); in zap_pte_range()
1806 spinlock_t *ptl = pmd_lock(tlb->mm, pmd); in zap_pmd_range()
1839 mmap_assert_locked(tlb->mm); in zap_pud_range()
2002 VM_WARN_ON_ONCE(!tlb || tlb->mm != vma->vm_mm); in zap_page_range_single_batched()
2067 static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr) in walk_to_pmd() argument
2074 pgd = pgd_offset(mm, addr); in walk_to_pmd()
2075 p4d = p4d_alloc(mm, pgd, addr); in walk_to_pmd()
2078 pud = pud_alloc(mm, p4d, addr); in walk_to_pmd()
2081 pmd = pmd_alloc(mm, pud, addr); in walk_to_pmd()
2089 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
2092 pmd_t *pmd = walk_to_pmd(mm, addr); in __get_locked_pte()
2096 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
2230 struct mm_struct *const mm = vma->vm_mm; in insert_pages() local
2237 pmd = walk_to_pmd(mm, addr); in insert_pages()
2246 if (pte_alloc(mm, pmd)) in insert_pages()
2253 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); in insert_pages()
2444 struct mm_struct *mm = vma->vm_mm; in insert_pfn() local
2448 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
2484 set_pte_at(mm, addr, pte, entry); in insert_pfn()
2682 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
2690 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
2700 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
2708 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, in remap_pmd_range() argument
2717 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
2723 err = remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
2731 static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d, in remap_pud_range() argument
2740 pud = pud_alloc(mm, p4d, addr); in remap_pud_range()
2745 err = remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
2753 static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd, in remap_p4d_range() argument
2762 p4d = p4d_alloc(mm, pgd, addr); in remap_p4d_range()
2767 err = remap_pud_range(mm, p4d, addr, next, in remap_p4d_range()
2781 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range_internal() local
2815 pgd = pgd_offset(mm, addr); in remap_pfn_range_internal()
2819 err = remap_p4d_range(mm, pgd, addr, next, in remap_pfn_range_internal()
2987 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
2997 mapped_pte = pte = (mm == &init_mm) ? in apply_to_pte_range()
2999 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
3003 mapped_pte = pte = (mm == &init_mm) ? in apply_to_pte_range()
3005 pte_offset_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
3025 if (mm != &init_mm) in apply_to_pte_range()
3030 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, in apply_to_pmd_range() argument
3042 pmd = pmd_alloc_track(mm, pud, addr, mask); in apply_to_pmd_range()
3059 err = apply_to_pte_range(mm, pmd, addr, next, in apply_to_pmd_range()
3068 static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, in apply_to_pud_range() argument
3078 pud = pud_alloc_track(mm, p4d, addr, mask); in apply_to_pud_range()
3095 err = apply_to_pmd_range(mm, pud, addr, next, in apply_to_pud_range()
3104 static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, in apply_to_p4d_range() argument
3114 p4d = p4d_alloc_track(mm, pgd, addr, mask); in apply_to_p4d_range()
3131 err = apply_to_pud_range(mm, p4d, addr, next, in apply_to_p4d_range()
3140 static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, in __apply_to_page_range() argument
3153 pgd = pgd_offset(mm, addr); in __apply_to_page_range()
3167 err = apply_to_p4d_range(mm, pgd, addr, next, in __apply_to_page_range()
3183 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
3186 return __apply_to_page_range(mm, addr, size, fn, data, true); in apply_to_page_range()
3197 int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_existing_page_range() argument
3200 return __apply_to_page_range(mm, addr, size, fn, data, false); in apply_to_existing_page_range()
3239 struct mm_struct *mm = vma->vm_mm; in __wp_page_copy_user() local
3266 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3294 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); in __wp_page_copy_user()
3534 struct mm_struct *mm = vma->vm_mm; in wp_page_copy() local
3552 new_folio = folio_prealloc(mm, vma, vmf->address, pfn_is_zero); in wp_page_copy()
3580 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, in wp_page_copy()
3588 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); in wp_page_copy()
3592 dec_mm_counter(mm, mm_counter_file(old_folio)); in wp_page_copy()
3593 inc_mm_counter(mm, MM_ANONPAGES); in wp_page_copy()
3596 ksm_might_unmap_zero_page(mm, vmf->orig_pte); in wp_page_copy()
3597 inc_mm_counter(mm, MM_ANONPAGES); in wp_page_copy()
3622 set_pte_at(mm, vmf->address, vmf->pte, entry); in wp_page_copy()
6113 struct mm_struct *mm = vma->vm_mm; in __handle_mm_fault() local
6119 pgd = pgd_offset(mm, address); in __handle_mm_fault()
6120 p4d = p4d_alloc(mm, pgd, address); in __handle_mm_fault()
6124 vmf.pud = pud_alloc(mm, p4d, address); in __handle_mm_fault()
6155 vmf.pmd = pmd_alloc(mm, vmf.pud, address); in __handle_mm_fault()
6176 pmd_migration_entry_wait(mm, vmf.pmd); in __handle_mm_fault()
6213 static inline void mm_account_fault(struct mm_struct *mm, struct pt_regs *regs, in mm_account_fault() argument
6229 count_memcg_event_mm(mm, PGFAULT); in mm_account_fault()
6333 struct mm_struct *mm = vma->vm_mm; in handle_mm_fault() local
6390 mm_account_fault(mm, regs, address, flags, ret); in handle_mm_fault()
6401 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __p4d_alloc() argument
6403 p4d_t *new = p4d_alloc_one(mm, address); in __p4d_alloc()
6407 spin_lock(&mm->page_table_lock); in __p4d_alloc()
6409 p4d_free(mm, new); in __p4d_alloc()
6412 pgd_populate(mm, pgd, new); in __p4d_alloc()
6414 spin_unlock(&mm->page_table_lock); in __p4d_alloc()
6424 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address) in __pud_alloc() argument
6426 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
6430 spin_lock(&mm->page_table_lock); in __pud_alloc()
6432 mm_inc_nr_puds(mm); in __pud_alloc()
6434 p4d_populate(mm, p4d, new); in __pud_alloc()
6436 pud_free(mm, new); in __pud_alloc()
6437 spin_unlock(&mm->page_table_lock); in __pud_alloc()
6447 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
6450 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
6454 ptl = pud_lock(mm, pud); in __pmd_alloc()
6456 mm_inc_nr_pmds(mm); in __pmd_alloc()
6458 pud_populate(mm, pud, new); in __pmd_alloc()
6460 pmd_free(mm, new); in __pmd_alloc()
6531 struct mm_struct *mm = vma->vm_mm; in follow_pfnmap_start() local
6547 pgdp = pgd_offset(mm, address); in follow_pfnmap_start()
6561 lock = pud_lock(mm, pudp); in follow_pfnmap_start()
6575 lock = pmd_lock(mm, pmdp); in follow_pfnmap_start()
6586 ptep = pte_offset_map_lock(mm, pmdp, address, &lock); in follow_pfnmap_start()
6686 static int __access_remote_vm(struct mm_struct *mm, unsigned long addr, in __access_remote_vm() argument
6692 if (mmap_read_lock_killable(mm)) in __access_remote_vm()
6696 addr = untagged_addr_remote(mm, addr); in __access_remote_vm()
6699 if (!vma_lookup(mm, addr) && !expand_stack(mm, addr)) in __access_remote_vm()
6708 struct page *page = get_user_page_vma_remote(mm, addr, in __access_remote_vm()
6713 vma = vma_lookup(mm, addr); in __access_remote_vm()
6715 vma = expand_stack(mm, addr); in __access_remote_vm()
6759 mmap_read_unlock(mm); in __access_remote_vm()
6776 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
6779 return __access_remote_vm(mm, addr, buf, len, gup_flags); in access_remote_vm()
6790 struct mm_struct *mm; in access_process_vm() local
6793 mm = get_task_mm(tsk); in access_process_vm()
6794 if (!mm) in access_process_vm()
6797 ret = __access_remote_vm(mm, addr, buf, len, gup_flags); in access_process_vm()
6799 mmput(mm); in access_process_vm()
6810 static int __copy_remote_vm_str(struct mm_struct *mm, unsigned long addr, in __copy_remote_vm_str() argument
6818 if (mmap_read_lock_killable(mm)) in __copy_remote_vm_str()
6821 addr = untagged_addr_remote(mm, addr); in __copy_remote_vm_str()
6824 if (!vma_lookup(mm, addr)) { in __copy_remote_vm_str()
6836 page = get_user_page_vma_remote(mm, addr, gup_flags, &vma); in __copy_remote_vm_str()
6881 mmap_read_unlock(mm); in __copy_remote_vm_str()
6904 struct mm_struct *mm; in copy_remote_vm_str() local
6910 mm = get_task_mm(tsk); in copy_remote_vm_str()
6911 if (!mm) { in copy_remote_vm_str()
6916 ret = __copy_remote_vm_str(mm, addr, buf, len, gup_flags); in copy_remote_vm_str()
6918 mmput(mm); in copy_remote_vm_str()
6930 struct mm_struct *mm = current->mm; in print_vma_addr() local
6936 if (!mmap_read_trylock(mm)) in print_vma_addr()
6939 vma = vma_lookup(mm, ip); in print_vma_addr()
6948 mmap_read_unlock(mm); in print_vma_addr()
6957 if (current->mm) in __might_fault()
6958 might_lock_read(¤t->mm->mmap_lock); in __might_fault()