| /tools/perf/bench/ |
| A D | bench.h | 14 # ifndef MADV_HUGEPAGE 15 # define MADV_HUGEPAGE 14 macro
|
| A D | numa.c | 451 ret = madvise(buf, bytes, MADV_HUGEPAGE); in alloc_data()
|
| /tools/arch/mips/include/uapi/asm/ |
| A D | mman.h | 10 #define MADV_HUGEPAGE 14 macro
|
| /tools/arch/parisc/include/uapi/asm/ |
| A D | mman.h | 10 #define MADV_HUGEPAGE 14 macro
|
| /tools/arch/alpha/include/uapi/asm/ |
| A D | mman.h | 10 #define MADV_HUGEPAGE 14 macro
|
| /tools/arch/xtensa/include/uapi/asm/ |
| A D | mman.h | 10 #define MADV_HUGEPAGE 14 macro
|
| /tools/include/uapi/asm-generic/ |
| A D | mman-common.h | 62 #define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ macro
|
| /tools/testing/selftests/mm/ |
| A D | split_huge_page_test.c | 101 madvise(result, len, MADV_HUGEPAGE); in allocate_zero_filled_hugepage() 160 madvise(one_page, len, MADV_HUGEPAGE); in split_pmd_thp_to_order() 212 madvise(one_page, len, MADV_HUGEPAGE); in split_pte_mapped_thp() 440 madvise(*addr, fd_size, MADV_HUGEPAGE); in create_pagecache_thp_and_fd()
|
| A D | migration.c | 200 ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0); 232 ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
|
| A D | transhuge-stress.c | 82 if (madvise(ptr, len, MADV_HUGEPAGE)) in main()
|
| A D | gup_test.c | 241 madvise(p, size, MADV_HUGEPAGE); in main()
|
| A D | soft-dirty.c | 97 ret = madvise(map, hpage_len, MADV_HUGEPAGE); in test_hugepage()
|
| A D | khugepaged.c | 326 if (madvise(p, hpage_pmd_size, MADV_HUGEPAGE)) { in alloc_hpage() 513 madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); in __madvise_collapse() 549 madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE); in wait_for_scan() 841 madvise(BASE_ADDR, hpage_pmd_size, MADV_HUGEPAGE); in collapse_compound_extreme()
|
| A D | mkdirty.c | 75 if (madvise(mem, thpsize, MADV_HUGEPAGE)) { in mmap_thp_range()
|
| A D | cow.c | 853 ret = madvise(mem, thpsize, MADV_HUGEPAGE); in do_run_with_thp() 1602 ret = madvise(mem, pmdsize, MADV_HUGEPAGE); in run_with_huge_zeropage() 1608 ret = madvise(smem, pmdsize, MADV_HUGEPAGE); in run_with_huge_zeropage()
|
| A D | vm_util.c | 276 if (madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE)) in allocate_transhuge()
|
| A D | uffd-unit-tests.c | 1228 if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE)) in uffd_move_pmd_test() 1435 if (madvise(area_src, nr_pages * page_size, MADV_HUGEPAGE)) { in request_hugepages()
|
| A D | ksm_tests.c | 547 if (madvise(map_ptr, len, MADV_HUGEPAGE)) in ksm_merge_hugepages_time()
|
| A D | pagemap_ioctl.c | 792 ret = madvise(map, map_size, MADV_HUGEPAGE); in gethugepage() 1526 ret = madvise(mem, hpage_size, MADV_HUGEPAGE); in zeropfn_tests()
|
| A D | protection_keys.c | 702 ret = madvise(ptr, HPAGE_SIZE, MADV_HUGEPAGE); in malloc_pkey_anon_huge()
|
| A D | hmm-tests.c | 714 ret = madvise(map, size, MADV_HUGEPAGE); in TEST_F()
|
| /tools/mm/ |
| A D | thp_swap_allocator_test.c | 138 if (madvise(mem1, MEMSIZE_MTHP, MADV_HUGEPAGE) != 0) { in main()
|
| /tools/testing/selftests/kvm/lib/ |
| A D | kvm_util.c | 1102 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); in vm_mem_add()
|