Lines Matching refs:mm

188 	struct mm_struct *mm = vma->vm_mm;  in __anon_vma_prepare()  local
192 mmap_assert_locked(mm); in __anon_vma_prepare()
211 spin_lock(&mm->page_table_lock); in __anon_vma_prepare()
219 spin_unlock(&mm->page_table_lock); in __anon_vma_prepare()
674 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, in set_tlb_ubc_flush_pending() argument
681 if (!pte_accessible(mm, pteval)) in set_tlb_ubc_flush_pending()
684 arch_tlbbatch_add_pending(&tlb_ubc->arch, mm, start, end); in set_tlb_ubc_flush_pending()
692 batch = atomic_read(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
700 if (!atomic_try_cmpxchg(&mm->tlb_flush_batched, &batch, 1)) in set_tlb_ubc_flush_pending()
703 atomic_inc(&mm->tlb_flush_batched); in set_tlb_ubc_flush_pending()
719 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) in should_defer_flush() argument
724 return arch_tlbbatch_should_defer(mm); in should_defer_flush()
742 void flush_tlb_batched_pending(struct mm_struct *mm) in flush_tlb_batched_pending() argument
744 int batch = atomic_read(&mm->tlb_flush_batched); in flush_tlb_batched_pending()
749 flush_tlb_mm(mm); in flush_tlb_batched_pending()
754 atomic_cmpxchg(&mm->tlb_flush_batched, batch, in flush_tlb_batched_pending()
759 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval, in set_tlb_ubc_flush_pending() argument
764 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) in should_defer_flush() argument
815 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) in mm_find_pmd() argument
822 pgd = pgd_offset(mm, address); in mm_find_pmd()
1880 struct mm_struct *mm = vma->vm_mm; in try_to_unmap_one() local
2012 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_unmap_one()
2039 pteval = get_and_clear_ptes(mm, address, pvmw.pte, nr_pages); in try_to_unmap_one()
2048 if (should_defer_flush(mm, flags)) in try_to_unmap_one()
2049 set_tlb_ubc_flush_pending(mm, pteval, address, end_addr); in try_to_unmap_one()
2055 pte_clear(mm, address, pvmw.pte); in try_to_unmap_one()
2066 update_hiwater_rss(mm); in try_to_unmap_one()
2071 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_unmap_one()
2072 set_huge_pte_at(mm, address, pvmw.pte, pteval, in try_to_unmap_one()
2075 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
2076 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_unmap_one()
2090 dec_mm_counter(mm, mm_counter(folio)); in try_to_unmap_one()
2129 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); in try_to_unmap_one()
2142 set_ptes(mm, address, pvmw.pte, pteval, nr_pages); in try_to_unmap_one()
2145 add_mm_counter(mm, MM_ANONPAGES, -nr_pages); in try_to_unmap_one()
2150 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_unmap_one()
2159 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_unmap_one()
2161 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_unmap_one()
2169 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_unmap_one()
2172 if (list_empty(&mm->mmlist)) { in try_to_unmap_one()
2174 if (list_empty(&mm->mmlist)) in try_to_unmap_one()
2175 list_add(&mm->mmlist, &init_mm.mmlist); in try_to_unmap_one()
2178 dec_mm_counter(mm, MM_ANONPAGES); in try_to_unmap_one()
2179 inc_mm_counter(mm, MM_SWAPENTS); in try_to_unmap_one()
2194 set_pte_at(mm, address, pvmw.pte, swp_pte); in try_to_unmap_one()
2207 dec_mm_counter(mm, mm_counter_file(folio)); in try_to_unmap_one()
2283 struct mm_struct *mm = vma->vm_mm; in try_to_migrate_one() local
2400 if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) { in try_to_migrate_one()
2428 if (should_defer_flush(mm, flags)) { in try_to_migrate_one()
2437 pteval = ptep_get_and_clear(mm, address, pvmw.pte); in try_to_migrate_one()
2439 set_tlb_ubc_flush_pending(mm, pteval, address, address + PAGE_SIZE); in try_to_migrate_one()
2447 pte_clear(mm, address, pvmw.pte); in try_to_migrate_one()
2455 update_hiwater_rss(mm); in try_to_migrate_one()
2462 hugetlb_count_sub(folio_nr_pages(folio), mm); in try_to_migrate_one()
2463 set_huge_pte_at(mm, address, pvmw.pte, pteval, in try_to_migrate_one()
2466 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2467 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_migrate_one()
2481 dec_mm_counter(mm, mm_counter(folio)); in try_to_migrate_one()
2491 if (arch_unmap_one(mm, vma, address, pteval) < 0) { in try_to_migrate_one()
2493 set_huge_pte_at(mm, address, pvmw.pte, in try_to_migrate_one()
2496 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_migrate_one()
2506 set_huge_pte_at(mm, address, pvmw.pte, in try_to_migrate_one()
2514 set_pte_at(mm, address, pvmw.pte, pteval); in try_to_migrate_one()
2552 set_huge_pte_at(mm, address, pvmw.pte, swp_pte, in try_to_migrate_one()
2555 set_pte_at(mm, address, pvmw.pte, swp_pte); in try_to_migrate_one()
2663 struct page *make_device_exclusive(struct mm_struct *mm, unsigned long addr, in make_device_exclusive() argument
2675 mmap_assert_locked(mm); in make_device_exclusive()
2688 page = get_user_page_vma_remote(mm, addr, in make_device_exclusive()
2712 mm, addr, addr + PAGE_SIZE, owner); in make_device_exclusive()
2750 set_pte_at(mm, addr, fw.ptep, swp_pte); in make_device_exclusive()