| /kernel/dma/ |
| A D | remap.c | 17 return area->pages; in dma_common_find_pages() 24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 32 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 44 struct page **pages; in dma_common_contiguous_remap() local 48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 49 if (!pages) in dma_common_contiguous_remap() 52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap() 54 kvfree(pages); in dma_common_contiguous_remap()
|
| A D | pool.c | 196 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); in dma_atomic_pool_init() local 197 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES); in dma_atomic_pool_init() 198 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K); in dma_atomic_pool_init()
|
| A D | coherent.c | 42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() local 55 dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); in dma_init_coherent_memory() 62 dma_mem->size = pages; in dma_init_coherent_memory()
|
| A D | contiguous.c | 325 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument 328 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
|
| A D | Kconfig | 242 For example, if your system defaults to 4KiB pages, the order value
|
| /kernel/ |
| A D | kexec_core.c | 285 if (pages) { in kimage_alloc_pages() 302 return pages; in kimage_alloc_pages() 359 if (!pages) in kimage_alloc_normal_control_pages() 368 pages = NULL; in kimage_alloc_normal_control_pages() 370 } while (!pages); in kimage_alloc_normal_control_pages() 372 if (pages) { in kimage_alloc_normal_control_pages() 392 return pages; in kimage_alloc_normal_control_pages() 423 pages = NULL; in kimage_alloc_crash_control_pages() 456 if (pages) in kimage_alloc_crash_control_pages() 459 return pages; in kimage_alloc_crash_control_pages() [all …]
|
| A D | watch_queue.c | 240 struct page **pages; in watch_queue_set_size() local 281 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in watch_queue_set_size() 282 if (!pages) in watch_queue_set_size() 286 pages[i] = alloc_page(GFP_KERNEL); in watch_queue_set_size() 287 if (!pages[i]) in watch_queue_set_size() 289 pages[i]->private = i * WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size() 297 wqueue->notes = pages; in watch_queue_set_size() 305 __free_page(pages[i]); in watch_queue_set_size() 306 kfree(pages); in watch_queue_set_size()
|
| A D | kprobes.c | 131 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 153 list_for_each_entry_rcu(kip, &c->pages, list) { in __get_insn_slot() 188 list_add_rcu(&kip->list, &c->pages); in __get_insn_slot() 234 list_for_each_entry_safe(kip, next, &c->pages, list) { in collect_garbage_slots() 256 list_for_each_entry_rcu(kip, &c->pages, list) { in __find_insn_page() 304 list_for_each_entry_rcu(kip, &c->pages, list) { in __is_insn_slot_addr() 323 list_for_each_entry_rcu(kip, &c->pages, list) { in kprobe_cache_get_kallsym() 354 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
|
| A D | fork.c | 270 ret = memcg_kmem_charge_page(vm_area->pages[i], GFP_KERNEL, 0); in memcg_charge_kernel_stack() 278 memcg_kmem_uncharge_page(vm_area->pages[i], 0); in memcg_charge_kernel_stack() 444 mod_lruvec_page_state(vm_area->pages[i], NR_KERNEL_STACK_KB, in account_kernel_stack() 465 memcg_kmem_uncharge_page(vm_area->pages[i], 0); in exit_task_stack_account()
|
| /kernel/module/ |
| A D | decompress.c | 22 sizeof(info->pages), GFP_KERNEL); in module_extend_max_pages() 26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages() 27 kvfree(info->pages); in module_extend_max_pages() 28 info->pages = new_pages; in module_extend_max_pages() 49 info->pages[info->used_pages++] = page; in module_get_next_page() 317 info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL); in module_decompress() 339 __free_page(info->pages[i]); in module_decompress_cleanup() 341 kvfree(info->pages); in module_decompress_cleanup() 343 info->pages = NULL; in module_decompress_cleanup()
|
| A D | internal.h | 81 struct page **pages; member
|
| /kernel/bpf/ |
| A D | ringbuf.c | 31 struct page **pages; member 97 struct page **pages, *page; in bpf_ringbuf_area_alloc() local 121 if (!pages) in bpf_ringbuf_area_alloc() 130 pages[i] = page; in bpf_ringbuf_area_alloc() 138 kmemleak_not_leak(pages); in bpf_ringbuf_area_alloc() 139 rb->pages = pages; in bpf_ringbuf_area_alloc() 146 __free_page(pages[i]); in bpf_ringbuf_area_alloc() 147 bpf_map_area_free(pages); in bpf_ringbuf_area_alloc() 222 struct page **pages = rb->pages; in bpf_ringbuf_free() local 227 __free_page(pages[i]); in bpf_ringbuf_free() [all …]
|
| A D | sysfs_btf.c | 22 unsigned long pages = PAGE_ALIGN(attr->size) >> PAGE_SHIFT; in btf_sysfs_vmlinux_mmap() local 36 if (pfn + pages < pfn) in btf_sysfs_vmlinux_mmap() 39 if ((vm_size >> PAGE_SHIFT) > pages) in btf_sysfs_vmlinux_mmap()
|
| A D | arena.c | 431 struct page **pages; in arena_alloc_pages() local 449 pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL); in arena_alloc_pages() 450 if (!pages) in arena_alloc_pages() 468 ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages); in arena_alloc_pages() 481 kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages); in arena_alloc_pages() 484 __free_page(pages[i]); in arena_alloc_pages() 487 kvfree(pages); in arena_alloc_pages() 492 kvfree(pages); in arena_alloc_pages()
|
| A D | local_storage.c | 475 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument 481 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, in bpf_cgroup_storage_calculate_size() 485 *pages = round_up(round_up(size, 8) * num_possible_cpus(), in bpf_cgroup_storage_calculate_size() 499 u32 pages; in bpf_cgroup_storage_alloc() local 505 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
|
| A D | core.c | 120 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats() 256 u32 pages; in bpf_prog_realloc() local 259 pages = size / PAGE_SIZE; in bpf_prog_realloc() 260 if (pages <= fp_old->pages) in bpf_prog_realloc() 265 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc() 266 fp->pages = pages; in bpf_prog_realloc() 1449 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); in bpf_prog_clone_create() 1455 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); in bpf_prog_clone_create()
|
| /kernel/power/ |
| A D | snapshot.c | 546 unsigned long pages; in create_zone_bm_rtree() local 548 pages = end - start; in create_zone_bm_rtree() 1894 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory() 1905 if (avail_normal > pages) in hibernate_preallocate_memory() 1906 avail_normal -= pages; in hibernate_preallocate_memory() 1909 if (size < pages) in hibernate_preallocate_memory() 1934 if (pages < alloc) { in hibernate_preallocate_memory() 1936 alloc -= pages; in hibernate_preallocate_memory() 1937 pages += pages_highmem; in hibernate_preallocate_memory() 1944 pages += pages_highmem; in hibernate_preallocate_memory() [all …]
|
| A D | swap.c | 940 unsigned long pages; in swsusp_write() local 943 pages = snapshot_get_image_size(); in swsusp_write() 950 if (!enough_swap(pages)) { in swsusp_write() 968 save_image(&handle, &snapshot, pages - 1) : in swsusp_write() 969 save_compressed_image(&handle, &snapshot, pages - 1); in swsusp_write() 1553 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read() 1554 load_compressed_image(&handle, &snapshot, header->pages - 1); in swsusp_read()
|
| A D | power.h | 17 unsigned long pages; member
|
| /kernel/trace/ |
| A D | tracing_map.c | 291 if (!a->pages) in tracing_map_array_clear() 295 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear() 305 if (!a->pages) in tracing_map_array_free() 309 if (!a->pages[i]) in tracing_map_array_free() 311 kmemleak_free(a->pages[i]); in tracing_map_array_free() 312 free_page((unsigned long)a->pages[i]); in tracing_map_array_free() 315 kfree(a->pages); in tracing_map_array_free() 340 if (!a->pages) in tracing_map_array_alloc() 344 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); in tracing_map_array_alloc() 345 if (!a->pages[i]) in tracing_map_array_alloc() [all …]
|
| A D | ring_buffer.c | 488 struct list_head *pages; member 1380 list = cpu_buffer->pages; in rb_set_head_page() 2302 LIST_HEAD(pages); in rb_allocate_pages() 2314 cpu_buffer->pages = pages.next; in rb_allocate_pages() 2315 list_del(&pages); in rb_allocate_pages() 2845 first_page = pages->next; in rb_insert_pages() 2846 last_page = pages->prev; in rb_insert_pages() 2870 INIT_LIST_HEAD(pages); in rb_insert_pages() 7159 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); in __rb_map_vma() 7160 if (!pages) in __rb_map_vma() [all …]
|
| A D | tracing_map.h | 170 void **pages; member 174 (array->pages[idx >> array->entry_shift] + \
|
| A D | ftrace.c | 611 int pages; in ftrace_profile_pages_init() local 615 if (stat->pages) in ftrace_profile_pages_init() 619 if (!stat->pages) in ftrace_profile_pages_init() 657 stat->pages = NULL; in ftrace_profile_pages_init() 765 stat->pages = stat->pages->next; in ftrace_profile_alloc() 768 rec = &stat->pages->records[stat->pages->index++]; in ftrace_profile_alloc() 3801 int pages; in ftrace_allocate_records() local 3843 pages = pg->next; in ftrace_free_pages() 3845 pg = pages; in ftrace_free_pages() 7112 unsigned long pages; in ftrace_process_locs() local [all …]
|
| A D | trace.c | 6773 __free_page(spd->pages[idx]); in tracing_spd_release_pipe() 6833 .pages = pages_def, in tracing_splice_read_pipe() 6870 spd.pages[i] = alloc_page(GFP_KERNEL); in tracing_splice_read_pipe() 6871 if (!spd.pages[i]) in tracing_splice_read_pipe() 6878 page_address(spd.pages[i]), in tracing_splice_read_pipe() 6882 __free_page(spd.pages[i]); in tracing_splice_read_pipe() 8402 .pages = pages_def, in tracing_buffers_splice_read() 8468 spd.pages[i] = page; in tracing_buffers_splice_read() 9446 int pages; in buffer_subbuf_size_write() local 9455 pages = DIV_ROUND_UP(val, PAGE_SIZE); in buffer_subbuf_size_write() [all …]
|
| /kernel/sched/ |
| A D | fair.c | 3222 p->numa_pages_migrated += pages; in task_numa_fault() 3224 p->numa_faults_locality[2] += pages; in task_numa_fault() 3228 p->numa_faults_locality[local] += pages; in task_numa_fault() 3297 long pages, virtpages; in task_numa_work() local 3352 pages = sysctl_numa_balancing_scan_size; in task_numa_work() 3353 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ in task_numa_work() 3354 virtpages = pages * 8; /* Scan up to this much virtual space */ in task_numa_work() 3355 if (!pages) in task_numa_work() 3476 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); in task_numa_work() 3489 pages -= (end - start) >> PAGE_SHIFT; in task_numa_work() [all …]
|