| /tools/testing/selftests/bpf/progs/ |
| A D | verifier_arena.c | 123 char __arena *page; in basic_reserve1() local 127 if (!page) in basic_reserve1() 130 page += __PAGE_SIZE; in basic_reserve1() 138 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); in basic_reserve1() 139 if (page) in basic_reserve1() 144 if (page) in basic_reserve1() 155 char __arena *page; in basic_reserve2() local 163 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); in basic_reserve2() 164 if ((u64)page) in basic_reserve2() 176 char __arena *page; in reserve_twice() local [all …]
|
| A D | verifier_arena_large.c | 90 *page = 0x5a; in access_reserved() 99 if (*page == 0x5a) in access_reserved() 102 if (*page) in access_reserved() 139 char __arena *page; in free_reserved() local 146 if (!page) in free_reserved() 162 if (!page) in free_reserved() 223 pg = page[i]; in big_alloc2() 227 page[i] = NULL; in big_alloc2() 238 pg = page[i]; in big_alloc2() 242 page[i] = NULL; in big_alloc2() [all …]
|
| A D | arena_atomics.c | 225 void __arena *page; in uaf() local 227 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in uaf() 228 bpf_arena_free_pages(&arena, page, 1); in uaf() 231 page32 = (__u32 __arena *)page; in uaf() 257 page64 = (__u64 __arena *)page; in uaf()
|
| /tools/testing/radix-tree/ |
| A D | regression1.c | 48 struct page { struct 57 struct page *p; in page_alloc() 58 p = malloc(sizeof(struct page)); in page_alloc() 68 struct page *p = container_of(rcu, struct page, rcu); in page_rcu_free() 83 struct page *page; in find_get_pages() local 88 if (xas_retry(&xas, page)) in find_get_pages() 91 pthread_mutex_lock(&page->lock); in find_get_pages() 92 if (!page->count) in find_get_pages() 102 pages[ret] = page; in find_get_pages() 125 struct page *p; in regression1_fn() [all …]
|
| A D | regression2.c | 63 struct page { struct 67 static struct page *page_alloc(void) in page_alloc() argument 69 struct page *p; in page_alloc() 70 p = malloc(sizeof(struct page)); in page_alloc() 79 struct page *p; in regression2_test() 82 struct page *pages[1]; in regression2_test()
|
| /tools/mm/ |
| A D | show_page_info.py | 20 def format_page_data(page): argument 28 address = page.value_() 52 def get_memcg_info(page): argument 58 memcg_data = page.memcg_data.read_() 82 print(format_page_data(page)) 123 if PageSlab(page): 126 if PageCompound(page): 128 if PageHead(page): 130 if PageTail(page): 157 page = follow_page(mm, vaddr) [all …]
|
| A D | Makefile | 6 BUILD_TARGETS=page-types slabinfo page_owner_sort thp_swap_allocator_test 26 $(RM) page-types slabinfo page_owner_sort thp_swap_allocator_test
|
| A D | .gitignore | 3 page-types
|
| /tools/testing/selftests/bpf/ |
| A D | bpf_arena_alloc.h | 24 void __arena *page = page_frag_cur_page[cpu]; in bpf_alloc() local 31 if (!page) { in bpf_alloc() 33 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in bpf_alloc() 34 if (!page) in bpf_alloc() 36 cast_kern(page); in bpf_alloc() 37 page_frag_cur_page[cpu] = page; in bpf_alloc() 39 obj_cnt = page + PAGE_SIZE - 8; in bpf_alloc() 42 cast_kern(page); in bpf_alloc() 43 obj_cnt = page + PAGE_SIZE - 8; in bpf_alloc() 52 return page + offset; in bpf_alloc()
|
| /tools/perf/ |
| A D | builtin-help.c | 176 execlp(path, "man", page, NULL); in exec_man_man() 352 exec_man_man(info, page); in exec_viewer() 354 exec_woman_emacs(info, page); in exec_viewer() 356 exec_man_konqueror(info, page); in exec_viewer() 358 exec_man_cmd(info, page); in exec_viewer() 366 const char *page = cmd_to_page(perf_cmd); in show_man_page() local 374 exec_viewer(fallback, page); in show_man_page() 375 exec_viewer("man", page); in show_man_page() 383 const char *page = cmd_to_page(perf_cmd); in show_info_page() local 419 const char *page = cmd_to_page(perf_cmd); in show_html_page() local [all …]
|
| A D | builtin-kmem.c | 292 u64 page; member 474 cmp = data->page - pstat->page; in __page_stat__findnew_page() 488 data->page = pstat->page; in __page_stat__findnew_page() 543 data->page = pstat->page; in __page_stat__findnew_alloc() 810 u64 page; in evsel__process_page_alloc_event() local 848 this.page = page; in evsel__process_page_alloc_event() 882 u64 page; in evsel__process_page_free_event() local 898 this.page = page; in evsel__process_page_free_event() 902 page, order); in evsel__process_page_free_event() 1536 if (l->page < r->page) in page_cmp() [all …]
|
| /tools/testing/scatterlist/linux/ |
| A D | mm.h | 45 static inline unsigned long page_to_phys(struct page *page) in page_to_phys() argument 52 #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE) argument 54 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument 79 static inline void *kmap(struct page *page) in kmap() argument 86 static inline void *kmap_atomic(struct page *page) in kmap_atomic() argument 108 static inline void free_page(unsigned long page) in free_page() argument 110 free((void *)page); in free_page()
|
| /tools/virtio/linux/ |
| A D | scatterlist.h | 30 static inline void sg_assign_page(struct scatterlist *sg, struct page *page) in sg_assign_page() argument 38 BUG_ON((unsigned long) page & 0x03); in sg_assign_page() 42 sg->page_link = page_link | (unsigned long) page; in sg_assign_page() 59 static inline void sg_set_page(struct scatterlist *sg, struct page *page, in sg_set_page() argument 62 sg_assign_page(sg, page); in sg_set_page() 67 static inline struct page *sg_page(struct scatterlist *sg) in sg_page() 72 return (struct page *)((sg)->page_link & ~0x3); in sg_page()
|
| A D | mm_types.h | 2 struct page page; member
|
| A D | kmsan.h | 7 inline void kmsan_handle_dma(struct page *page, size_t offset, size_t size, in kmsan_handle_dma() argument
|
| /tools/testing/nvdimm/ |
| A D | pmem-dax.c | 26 struct page *page; in __pmem_direct_access() local 30 page = vmalloc_to_page(pmem->virt_addr + offset); in __pmem_direct_access() 32 *pfn = page_to_pfn(page); in __pmem_direct_access() 34 __func__, pmem, pgoff, page_to_pfn(page)); in __pmem_direct_access()
|
| A D | dax-dev.c | 26 struct page *page; in dax_pgoff_to_phys() local 31 page = vmalloc_to_page((void *)addr); in dax_pgoff_to_phys() 32 return PFN_PHYS(page_to_pfn(page)); in dax_pgoff_to_phys()
|
| /tools/testing/selftests/net/bench/page_pool/ |
| A D | bench_page_pool_simple.c | 110 struct page **array; in pp_fill_ptr_ring() 113 array = kcalloc(elems, sizeof(struct page *), gfp_mask); in pp_fill_ptr_ring() 134 struct page *page; in time_bench_page_pool() local 162 page = page_pool_alloc_pages(pp, gfp_mask); in time_bench_page_pool() 163 if (!page) in time_bench_page_pool() 173 page_pool_recycle_direct(pp, page); in time_bench_page_pool() 177 page_pool_put_page(pp, page, -1, false); in time_bench_page_pool() 183 get_page(page); /* cause no-recycling */ in time_bench_page_pool() 184 page_pool_put_page(pp, page, -1, false); in time_bench_page_pool() 185 put_page(page); in time_bench_page_pool()
|
| /tools/testing/memblock/ |
| A D | internal.h | 16 struct page {}; struct 18 void memblock_free_pages(struct page *page, unsigned long pfn, in memblock_free_pages() argument
|
| /tools/perf/Documentation/ |
| A D | perf-kmem.txt | 48 for page). Available sort keys are 'ptr, callsite, bytes, hit, 49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order, 50 migtype, gfp' for page. This option should be preceded by one of the 51 mode selection options - i.e. --slab, --page, --alloc and/or --caller. 63 --page:: 64 Analyze page allocator events 67 Show live page stat. The perf kmem shows total allocation stat by 69 instead. (This option works with --page option only)
|
| A D | examples.txt | 28 measured. For example the page alloc/free properties of a 'hackbench 66 well. For example the page allocations done by a 'git gc' can be 77 To check which functions generated page allocations: 119 Furthermore, call-graph sampling can be done too, of page 120 allocations - to see precisely what kind of page allocations there 157 Or you can observe the whole system's page allocations for 10 173 Or observe how fluctuating the page allocations are, via statistical 190 and check which instructions/source-code generated page allocations: 209 ( this shows that 83.42% of __GI___fork's page allocations come from
|
| /tools/power/cpupower/utils/ |
| A D | cpupower.c | 84 char *page; in print_man_page() local 90 page = malloc(len); in print_man_page() 91 if (!page) in print_man_page() 94 sprintf(page, "cpupower"); in print_man_page() 96 strcat(page, "-"); in print_man_page() 97 strcat(page, subpage); in print_man_page() 100 execlp("man", "man", page, NULL); in print_man_page()
|
| /tools/testing/selftests/powerpc/mm/ |
| A D | subpage_prot.c | 68 static inline void check_faulted(void *addr, long page, long subpage, int write) in check_faulted() argument 70 int want_fault = (subpage == ((page + 3) % 16)); in check_faulted() 73 want_fault |= (subpage == ((page + 1) % 16)); in check_faulted() 77 addr, page, subpage, write, in check_faulted()
|
| /tools/testing/selftests/kvm/ |
| A D | dirty_log_test.c | 497 uint64_t page, nr_dirty_pages = 0, nr_clean_pages = 0; in vm_dirty_log_verify() local 500 for (page = 0; page < host_num_pages; page += step) { in vm_dirty_log_verify() 501 uint64_t val = *(uint64_t *)(host_test_mem + page * host_page_size); in vm_dirty_log_verify() 502 bool bmap0_dirty = __test_and_clear_bit_le(page, bmap[0]); in vm_dirty_log_verify() 510 if (__test_and_clear_bit_le(page, bmap[1]) || bmap0_dirty) { in vm_dirty_log_verify() 527 if (page == dirty_ring_prev_iteration_last_page && in vm_dirty_log_verify() 537 if (page == dirty_ring_last_page && in vm_dirty_log_verify() 554 page, val, iteration, dirty_ring_last_page, in vm_dirty_log_verify() 565 page, val, iteration, dirty_ring_last_page, in vm_dirty_log_verify()
|
| /tools/net/ynl/samples/ |
| A D | .gitignore | 5 page-pool
|