Lines Matching refs:ops

58 			 struct mem_ops *ops, bool expect);
304 static void *alloc_hpage(struct mem_ops *ops) in alloc_hpage() argument
306 void *p = ops->setup_area(1); in alloc_hpage()
308 ops->fault(p, 0, hpage_pmd_size); in alloc_hpage()
322 if (!ops->check_huge(p, 1)) { in alloc_hpage()
497 struct mem_ops *ops, bool expect) in __madvise_collapse() argument
517 else if (!ops->check_huge(p, expect ? nr_hpages : 0)) in __madvise_collapse()
526 struct mem_ops *ops, bool expect) in madvise_collapse() argument
529 if (!ops->check_huge(p, 0)) { in madvise_collapse()
533 __madvise_collapse(msg, p, nr_hpages, ops, expect); in madvise_collapse()
538 struct mem_ops *ops) in wait_for_scan() argument
544 if (!ops->check_huge(p, 0)) { in wait_for_scan()
556 if (ops->check_huge(p, nr_hpages)) in wait_for_scan()
568 struct mem_ops *ops, bool expect) in khugepaged_collapse() argument
570 if (wait_for_scan(msg, p, nr_hpages, ops)) { in khugepaged_collapse()
583 if (ops != &__anon_ops) in khugepaged_collapse()
584 ops->fault(p, 0, nr_hpages * hpage_pmd_size); in khugepaged_collapse()
586 if (ops->check_huge(p, expect ? nr_hpages : 0)) in khugepaged_collapse()
604 static bool is_tmpfs(struct mem_ops *ops) in is_tmpfs() argument
606 return ops == &__file_ops && finfo.type == VMA_SHMEM; in is_tmpfs()
609 static bool is_anon(struct mem_ops *ops) in is_anon() argument
611 return ops == &__anon_ops; in is_anon()
641 static void collapse_full(struct collapse_context *c, struct mem_ops *ops) in collapse_full() argument
647 p = ops->setup_area(nr_hpages); in collapse_full()
648 ops->fault(p, 0, size); in collapse_full()
650 ops, true); in collapse_full()
652 ops->cleanup_area(p, size); in collapse_full()
655 static void collapse_empty(struct collapse_context *c, struct mem_ops *ops) in collapse_empty() argument
659 p = ops->setup_area(1); in collapse_empty()
660 c->collapse("Do not collapse empty PTE table", p, 1, ops, false); in collapse_empty()
661 ops->cleanup_area(p, hpage_pmd_size); in collapse_empty()
664 static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops *ops) in collapse_single_pte_entry() argument
668 p = ops->setup_area(1); in collapse_single_pte_entry()
669 ops->fault(p, 0, page_size); in collapse_single_pte_entry()
671 1, ops, true); in collapse_single_pte_entry()
672 ops->cleanup_area(p, hpage_pmd_size); in collapse_single_pte_entry()
675 static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops) in collapse_max_ptes_none() argument
680 int fault_nr_pages = is_anon(ops) ? 1 << anon_order : 1; in collapse_max_ptes_none()
685 p = ops->setup_area(1); in collapse_max_ptes_none()
687 if (is_tmpfs(ops)) { in collapse_max_ptes_none()
694 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - fault_nr_pages) * page_size); in collapse_max_ptes_none()
696 ops, !c->enforce_pte_scan_limits); in collapse_max_ptes_none()
700 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size); in collapse_max_ptes_none()
701 c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, ops, in collapse_max_ptes_none()
707 ops->cleanup_area(p, hpage_pmd_size); in collapse_max_ptes_none()
711 static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops) in collapse_swapin_single_pte() argument
715 p = ops->setup_area(1); in collapse_swapin_single_pte()
716 ops->fault(p, 0, hpage_pmd_size); in collapse_swapin_single_pte()
730 c->collapse("Collapse with swapping in single PTE entry", p, 1, ops, in collapse_swapin_single_pte()
734 ops->cleanup_area(p, hpage_pmd_size); in collapse_swapin_single_pte()
737 static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops) in collapse_max_ptes_swap() argument
742 p = ops->setup_area(1); in collapse_max_ptes_swap()
743 ops->fault(p, 0, hpage_pmd_size); in collapse_max_ptes_swap()
757 c->collapse("Maybe collapse with max_ptes_swap exceeded", p, 1, ops, in collapse_max_ptes_swap()
762 ops->fault(p, 0, hpage_pmd_size); in collapse_max_ptes_swap()
777 1, ops, true); in collapse_max_ptes_swap()
781 ops->cleanup_area(p, hpage_pmd_size); in collapse_max_ptes_swap()
784 static void collapse_single_pte_entry_compound(struct collapse_context *c, struct mem_ops *ops) in collapse_single_pte_entry_compound() argument
788 p = alloc_hpage(ops); in collapse_single_pte_entry_compound()
790 if (is_tmpfs(ops)) { in collapse_single_pte_entry_compound()
800 if (ops->check_huge(p, 0)) in collapse_single_pte_entry_compound()
806 p, 1, ops, true); in collapse_single_pte_entry_compound()
809 ops->cleanup_area(p, hpage_pmd_size); in collapse_single_pte_entry_compound()
812 static void collapse_full_of_compound(struct collapse_context *c, struct mem_ops *ops) in collapse_full_of_compound() argument
816 p = alloc_hpage(ops); in collapse_full_of_compound()
820 if (ops->check_huge(p, 0)) in collapse_full_of_compound()
825 c->collapse("Collapse PTE table full of compound pages", p, 1, ops, in collapse_full_of_compound()
828 ops->cleanup_area(p, hpage_pmd_size); in collapse_full_of_compound()
831 static void collapse_compound_extreme(struct collapse_context *c, struct mem_ops *ops) in collapse_compound_extreme() argument
836 p = ops->setup_area(1); in collapse_compound_extreme()
842 ops->fault(BASE_ADDR, 0, hpage_pmd_size); in collapse_compound_extreme()
843 if (!ops->check_huge(BASE_ADDR, 1)) { in collapse_compound_extreme()
870 ops->cleanup_area(BASE_ADDR, hpage_pmd_size); in collapse_compound_extreme()
871 ops->fault(p, 0, hpage_pmd_size); in collapse_compound_extreme()
872 if (!ops->check_huge(p, 1)) in collapse_compound_extreme()
878 ops, true); in collapse_compound_extreme()
881 ops->cleanup_area(p, hpage_pmd_size); in collapse_compound_extreme()
884 static void collapse_fork(struct collapse_context *c, struct mem_ops *ops) in collapse_fork() argument
889 p = ops->setup_area(1); in collapse_fork()
892 ops->fault(p, 0, page_size); in collapse_fork()
893 if (ops->check_huge(p, 0)) in collapse_fork()
904 if (ops->check_huge(p, 0)) in collapse_fork()
909 ops->fault(p, page_size, 2 * page_size); in collapse_fork()
911 p, 1, ops, true); in collapse_fork()
914 ops->cleanup_area(p, hpage_pmd_size); in collapse_fork()
922 if (ops->check_huge(p, 0)) in collapse_fork()
927 ops->cleanup_area(p, hpage_pmd_size); in collapse_fork()
930 static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *ops) in collapse_fork_compound() argument
935 p = alloc_hpage(ops); in collapse_fork_compound()
942 if (ops->check_huge(p, 1)) in collapse_fork_compound()
950 if (ops->check_huge(p, 0)) in collapse_fork_compound()
954 ops->fault(p, 0, page_size); in collapse_fork_compound()
958 p, 1, ops, true); in collapse_fork_compound()
963 ops->cleanup_area(p, hpage_pmd_size); in collapse_fork_compound()
971 if (ops->check_huge(p, 1)) in collapse_fork_compound()
976 ops->cleanup_area(p, hpage_pmd_size); in collapse_fork_compound()
979 static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops) in collapse_max_ptes_shared() argument
985 p = alloc_hpage(ops); in collapse_max_ptes_shared()
992 if (ops->check_huge(p, 1)) in collapse_max_ptes_shared()
999 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size); in collapse_max_ptes_shared()
1000 if (ops->check_huge(p, 0)) in collapse_max_ptes_shared()
1006 1, ops, !c->enforce_pte_scan_limits); in collapse_max_ptes_shared()
1011 ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared) * in collapse_max_ptes_shared()
1013 if (ops->check_huge(p, 0)) in collapse_max_ptes_shared()
1019 p, 1, ops, true); in collapse_max_ptes_shared()
1023 ops->cleanup_area(p, hpage_pmd_size); in collapse_max_ptes_shared()
1031 if (ops->check_huge(p, 1)) in collapse_max_ptes_shared()
1036 ops->cleanup_area(p, hpage_pmd_size); in collapse_max_ptes_shared()
1040 struct mem_ops *ops) in madvise_collapse_existing_thps() argument
1044 p = ops->setup_area(1); in madvise_collapse_existing_thps()
1045 ops->fault(p, 0, hpage_pmd_size); in madvise_collapse_existing_thps()
1046 c->collapse("Collapse fully populated PTE table...", p, 1, ops, true); in madvise_collapse_existing_thps()
1050 __madvise_collapse("Re-collapse PMD-mapped hugepage", p, 1, ops, true); in madvise_collapse_existing_thps()
1052 ops->cleanup_area(p, hpage_pmd_size); in madvise_collapse_existing_thps()
1060 struct mem_ops *ops) in madvise_retracted_page_tables() argument
1066 p = ops->setup_area(nr_hpages); in madvise_retracted_page_tables()
1067 ops->fault(p, 0, size); in madvise_retracted_page_tables()
1071 ops)) { in madvise_retracted_page_tables()
1076 c->collapse("Install huge PMD from page cache", p, nr_hpages, ops, in madvise_retracted_page_tables()
1079 ops->cleanup_area(p, size); in madvise_retracted_page_tables()