| /tools/testing/scatterlist/ |
| A D | main.c | 18 static void set_pages(struct page **pages, const unsigned *array, unsigned num) in set_pages() argument 24 pages[i] = (struct page *)(unsigned long) in set_pages() 89 struct page *pages[MAX_PAGES]; in main() local 92 set_pages(pages, test->pfn, test->num_pages); in main() 96 &append, pages, test->num_pages, 0, test->size, in main() 100 &append.sgt, pages, test->num_pages, 0, in main() 109 set_pages(pages, test->pfn_app, test->num_pages); in main() 111 &append, pages, test->num_pages, 0, test->size, in main()
|
| /tools/mm/ |
| A D | page-types.c | 287 return pages; in kpagecgroup_read() 674 unsigned long pages; in walk_pfn() local 690 if (pages == 0) in walk_pfn() 693 if (kpagecgroup_read(cgi, index, pages) != pages) in walk_pfn() 696 if (kpagecount_read(cnt, index, pages) != pages) in walk_pfn() 703 index += pages; in walk_pfn() 704 count -= pages; in walk_pfn() 733 unsigned long pages; in walk_vma() local 740 if (pages == 0) in walk_vma() 751 index += pages; in walk_vma() [all …]
|
| /tools/testing/selftests/powerpc/mm/ |
| A D | subpage_prot.c | 96 long i, j, pages, err; in run_test() local 98 pages = size / 0x10000; in run_test() 99 map = malloc(pages * 4); in run_test() 106 for (i = 0; i < pages; i++) { in run_test() 120 for (i = 0; i < pages; i++) { in run_test()
|
| /tools/perf/arch/s390/util/ |
| A D | auxtrace.c | 52 unsigned int pages; in cpumsf_recording_options() local 68 pages = DEFAULT_AUX_PAGES * factor; in cpumsf_recording_options() 69 opts->auxtrace_mmap_pages = roundup_pow_of_two(pages); in cpumsf_recording_options()
|
| /tools/testing/selftests/kvm/lib/ |
| A D | memstress.c | 53 uint64_t pages; in memstress_guest_code() local 61 pages = vcpu_args->pages; in memstress_guest_code() 70 for (i = 0; i < pages; i++) { in memstress_guest_code() 72 page = guest_random_u32(&rand_state) % pages; in memstress_guest_code() 106 vcpu_args->pages = vcpu_memory_bytes / in memstress_setup_vcpus() 111 vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) / in memstress_setup_vcpus() 120 (vcpu_args->pages * args->guest_page_size)); in memstress_setup_vcpus()
|
| A D | userfaultfd_util.c | 30 int64_t pages = 0; in uffd_handler_thread_fn() local 91 pages++; in uffd_handler_thread_fn() 96 pages, ts_diff.tv_sec, ts_diff.tv_nsec, in uffd_handler_thread_fn() 97 pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC)); in uffd_handler_thread_fn()
|
| A D | kvm_util.c | 1422 pgidx_start, pages)) in vm_vaddr_unused_gap() 1424 pgidx_start, pages); in vm_vaddr_unused_gap() 1433 pgidx_start, pages)) in vm_vaddr_unused_gap() 1436 pgidx_start, pages); in vm_vaddr_unused_gap() 1445 pgidx_start, pages)) { in vm_vaddr_unused_gap() 1447 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap() 1461 pgidx_start, pages), in vm_vaddr_unused_gap() 1465 pgidx_start, pages); in vm_vaddr_unused_gap() 1467 pgidx_start, pages), in vm_vaddr_unused_gap() 1471 pgidx_start, pages); in vm_vaddr_unused_gap() [all …]
|
| /tools/testing/radix-tree/ |
| A D | regression1.c | 80 unsigned int nr_pages, struct page **pages) in find_get_pages() argument 102 pages[ret] = page; in find_get_pages() 157 struct page *pages[10]; in regression1_fn() local 159 find_get_pages(0, 10, pages); in regression1_fn()
|
| A D | regression2.c | 82 struct page *pages[1]; in regression2_test() local 114 radix_tree_gang_lookup_tag_slot(&mt_tree, (void ***)pages, start, end, in regression2_test()
|
| /tools/testing/selftests/kvm/s390/ |
| A D | tprot.c | 17 static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE]; 18 static uint8_t *const page_store_prot = pages[0]; 19 static uint8_t *const page_fetch_prot = pages[1]; 219 mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); in main()
|
| /tools/testing/selftests/kvm/ |
| A D | memslot_modification_stress_test.c | 59 uint64_t pages = max_t(int, vm->page_size, getpagesize()) / vm->page_size; in add_remove_memslot() local 67 gpa = memstress_args.gpa - pages * vm->page_size; in add_remove_memslot() 72 DUMMY_MEMSLOT_INDEX, pages, 0); in add_remove_memslot()
|
| A D | access_tracking_perf_test.c | 177 uint64_t pages = vcpu_args->pages; in pageidle_mark_vcpu_memory_idle() local 194 for (page = 0; page < pages; page++) { in pageidle_mark_vcpu_memory_idle() 215 TEST_ASSERT(no_pfn < pages / 100, in pageidle_mark_vcpu_memory_idle() 217 vcpu_idx, no_pfn, pages); in pageidle_mark_vcpu_memory_idle() 227 if (still_idle >= pages / 10) in pageidle_mark_vcpu_memory_idle() 228 too_many_idle_pages(still_idle, pages, in pageidle_mark_vcpu_memory_idle()
|
| A D | dirty_log_test.c | 300 uint64_t pages; in dirty_ring_create_vm_done() local 308 pages = (1ul << (DIRTY_MEM_BITS - vm->page_shift)) + 3; in dirty_ring_create_vm_done() 309 pages = vm_adjust_num_guest_pages(vm->mode, pages); in dirty_ring_create_vm_done() 311 pages = vm_num_host_pages(vm->mode, pages); in dirty_ring_create_vm_done() 313 limit = 1 << (31 - __builtin_clz(pages)); in dirty_ring_create_vm_done()
|
| A D | demand_paging_test.c | 179 vcpu_args->pages * memstress_args.guest_page_size); in run_test() 228 vcpu_paging_rate = memstress_args.vcpu_args[0].pages / in run_test()
|
| /tools/perf/util/ |
| A D | evlist.c | 833 unsigned long pages; in perf_event_mlock_kb_in_pages() local 849 pages = rounddown_pow_of_two(pages); in perf_event_mlock_kb_in_pages() 851 return pages; in perf_event_mlock_kb_in_pages() 897 pages = roundup_pow_of_two(pages); in parse_pages_arg() 898 if (!pages) in parse_pages_arg() 903 buf, pages); in parse_pages_arg() 906 if (pages > max) in parse_pages_arg() 909 return pages; in parse_pages_arg() 915 long pages; in __evlist__parse_mmap_pages() local 921 if (pages < 0) { in __evlist__parse_mmap_pages() [all …]
|
| A D | evlist.h | 193 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 197 int evlist__mmap(struct evlist *evlist, unsigned int pages); 200 size_t evlist__mmap_size(unsigned long pages);
|
| /tools/testing/selftests/bpf/progs/ |
| A D | verifier_arena.c | 110 volatile char __arena *pages; in basic_alloc3() local 112 pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); in basic_alloc3() 113 if (!pages) in basic_alloc3()
|
| /tools/virtio/linux/ |
| A D | virtio.h | 66 void *pages,
|
| /tools/testing/selftests/kvm/include/ |
| A D | memstress.h | 25 uint64_t pages; member
|
| /tools/testing/selftests/kvm/arm64/ |
| A D | vgic_lpi_stress.c | 141 size_t pages; in setup_memslot() local 160 pages = sz / vm->page_size; in setup_memslot() 163 TEST_MEMSLOT_INDEX, pages, 0); in setup_memslot()
|
| /tools/lib/perf/include/perf/ |
| A D | evsel.h | 34 LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
|
| A D | evlist.h | 38 LIBPERF_API int perf_evlist__mmap(struct perf_evlist *evlist, int pages);
|
| /tools/verification/rv/ |
| A D | README.txt | 18 It also depends on python3-docutils to compile man pages.
|
| /tools/tracing/rtla/ |
| A D | README.txt | 23 It also depends on python3-docutils to compile man pages.
|
| /tools/arch/x86/kcpuid/ |
| A D | cpuid.csv | 732 0x80000005, 0, ebx, 7:0, l1_itlb_4k_nentries , L1 ITLB #entries, 4K pages 733 0x80000005, 0, ebx, 15:8, l1_itlb_4k_assoc , L1 ITLB associativity, 4K pages 734 0x80000005, 0, ebx, 23:16, l1_dtlb_4k_nentries , L1 DTLB #entries, 4K pages 735 0x80000005, 0, ebx, 31:24, l1_dtlb_4k_assoc , L1 DTLB associativity, 4K pages 752 0x80000006, 0, ebx, 11:0, l2_itlb_4k_nentries , L2 iTLB #entries, 4K pages 754 0x80000006, 0, ebx, 27:16, l2_dtlb_4k_nentries , L2 dTLB #entries, 4K pages 859 # AMD TLB 1G-pages enumeration 861 0x80000019, 0, eax, 11:0, l1_itlb_1g_nentries , L1 iTLB #entries, 1G pages 863 0x80000019, 0, eax, 27:16, l1_dtlb_1g_nentries , L1 dTLB #entries, 1G pages 865 0x80000019, 0, ebx, 11:0, l2_itlb_1g_nentries , L2 iTLB #entries, 1G pages [all …]
|