| /mm/ |
| A D | percpu-vm.c | 34 static struct page **pages; in pcpu_get_pages() local 39 if (!pages) in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 165 pages[pcpu_page_idx(cpu, i)] = page; in pcpu_unmap_pages() 197 PAGE_KERNEL, pages, PAGE_SHIFT); in __pcpu_map_pages() 279 struct page **pages; in pcpu_populate_chunk() local 281 pages = pcpu_get_pages(); in pcpu_populate_chunk() 282 if (!pages) in pcpu_populate_chunk() 315 struct page **pages; in pcpu_depopulate_chunk() local 322 pages = pcpu_get_pages(); in pcpu_depopulate_chunk() [all …]
|
| A D | gup_test.c | 19 put_page(pages[i]); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 106 struct page **pages; in __gup_test_ioctl() local 116 if (!pages) in __gup_test_ioctl() 140 pages + i); in __gup_test_ioctl() 147 pages + i); in __gup_test_ioctl() 155 pages + i); in __gup_test_ioctl() 160 pages + i); in __gup_test_ioctl() 202 kvfree(pages); in __gup_test_ioctl() 249 if (!pages) in pin_longterm_test_start() [all …]
|
| A D | gup.c | 416 if (!pages[i]) { in unpin_user_pages() 1468 if (pages) { in __get_user_pages() 1481 if (pages) { in __get_user_pages() 1743 pages += ret; in __get_user_pages_locked() 1790 pages++; in __get_user_pages_locked() 2040 if (pages) { in __get_user_pages_locked() 2042 if (pages[i]) in __get_user_pages_locked() 2452 .pages = pages, in check_and_migrate_movable_pages() 3055 pages, nr)) in gup_fast_pmd_range() 3108 pages, nr)) in gup_fast_p4d_range() [all …]
|
| A D | mincore.c | 211 end = min(vma->vm_end, addr + (pages << PAGE_SHIFT)); in do_mincore() 213 unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE); in do_mincore() local 214 memset(vec, 1, pages); in do_mincore() 215 return pages; in do_mincore() 251 unsigned long pages; in SYSCALL_DEFINE3() local 265 pages = len >> PAGE_SHIFT; in SYSCALL_DEFINE3() 266 pages += (offset_in_page(len)) != 0; in SYSCALL_DEFINE3() 268 if (!access_ok(vec, pages)) in SYSCALL_DEFINE3() 276 while (pages) { in SYSCALL_DEFINE3() 282 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3() [all …]
|
| A D | mprotect.c | 402 pages++; in change_pte_range() 416 pages++; in change_pte_range() 438 pages++; in change_pte_range() 445 return pages; in change_pte_range() 529 pages = ret; in change_pmd_range() 578 return pages; in change_pmd_range() 637 return pages; in change_pud_range() 660 return pages; in change_p4d_range() 690 return pages; in change_protection_range() 698 long pages; in change_protection() local [all …]
|
| A D | balloon_compaction.c | 42 struct list_head *pages) in balloon_page_list_enqueue() argument 49 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 78 struct list_head *pages, size_t n_req_pages) in balloon_page_list_dequeue() argument 85 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue() 100 list_add(&page->lru, pages); in balloon_page_list_dequeue() 177 LIST_HEAD(pages); in balloon_page_dequeue() 180 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); in balloon_page_dequeue() 191 if (unlikely(list_empty(&b_dev_info->pages) && in balloon_page_dequeue() 197 return list_first_entry(&pages, struct page, lru); in balloon_page_dequeue() 230 list_add(&page->lru, &b_dev_info->pages); in balloon_page_putback()
|
| A D | swap_state.c | 519 pages = hits + 2; in __swapin_nr_pages() 520 if (pages == 2) { in __swapin_nr_pages() 527 pages = 1; in __swapin_nr_pages() 530 while (roundup < pages) in __swapin_nr_pages() 532 pages = roundup; in __swapin_nr_pages() 535 if (pages > max_pages) in __swapin_nr_pages() 536 pages = max_pages; in __swapin_nr_pages() 540 if (pages < last_ra) in __swapin_nr_pages() 541 pages = last_ra; in __swapin_nr_pages() 543 return pages; in __swapin_nr_pages() [all …]
|
| A D | page_io.c | 165 sis->pages = page_no - 1; in generic_swapfile_activate() 326 int pages; member 361 for (p = 0; p < sio->pages; p++) { in sio_write_complete() 368 for (p = 0; p < sio->pages; p++) in sio_write_complete() 396 sio->pages = 0; in swap_writepage_fs() 401 sio->pages += 1; in swap_writepage_fs() 488 for (p = 0; p < sio->pages; p++) { in sio_read_complete() 496 count_vm_events(PSWPIN, sio->pages); in sio_read_complete() 498 for (p = 0; p < sio->pages; p++) { in sio_read_complete() 558 sio->pages = 0; in swap_read_folio_fs() [all …]
|
| A D | percpu-km.c | 57 struct page *pages; in pcpu_create_chunk() local 65 pages = alloc_pages(gfp, order_base_2(nr_pages)); in pcpu_create_chunk() 66 if (!pages) { in pcpu_create_chunk() 72 pcpu_set_page_chunk(nth_page(pages, i), chunk); in pcpu_create_chunk() 74 chunk->data = pages; in pcpu_create_chunk() 75 chunk->base_addr = page_address(pages); in pcpu_create_chunk()
|
| A D | mmu_gather.c | 53 struct encoded_page **pages = batch->encoded_pages; in tlb_flush_rmap_batch() local 56 struct encoded_page *enc = pages[i]; in tlb_flush_rmap_batch() 64 nr_pages = encoded_nr_pages(pages[++i]); in tlb_flush_rmap_batch() 103 struct encoded_page **pages = batch->encoded_pages; in __tlb_batch_free_encoded_pages() local 114 if (unlikely(encoded_page_flags(pages[nr - 1]) & in __tlb_batch_free_encoded_pages() 128 if (unlikely(encoded_page_flags(pages[nr]) & in __tlb_batch_free_encoded_pages() 130 nr_pages += encoded_nr_pages(pages[++nr]); in __tlb_batch_free_encoded_pages() 136 free_pages_and_swap_cache(pages, nr); in __tlb_batch_free_encoded_pages() 137 pages += nr; in __tlb_batch_free_encoded_pages()
|
| A D | util.c | 518 if (locked_vm + pages > limit) in __account_locked_vm() 522 mm->locked_vm = locked_vm + pages; in __account_locked_vm() 524 WARN_ON_ONCE(pages > locked_vm); in __account_locked_vm() 525 mm->locked_vm = locked_vm - pages; in __account_locked_vm() 529 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT, in __account_locked_vm() 553 if (pages == 0 || !mm) in account_locked_vm() 557 ret = __account_locked_vm(mm, pages, inc, current, in account_locked_vm() 934 vm_acct_memory(pages); in __vm_enough_memory() 943 if (pages > totalram_pages() + total_swap_pages) in __vm_enough_memory() 967 bytes_failed = pages << PAGE_SHIFT; in __vm_enough_memory() [all …]
|
| A D | Kconfig | 26 bool "Compressed cache for swap pages" 31 A lightweight compressed cache for swap pages. It takes 39 bool "Enable the compressed cache for swap pages by default" 68 for swap pages. 134 swap pages. 175 int "Maximum number of physical pages per-zspage" 686 pages as migration can relocate pages to satisfy a huge page 788 tristate "HWPoison pages injector" 816 of 1 says that all excess pages should be trimmed. 902 allocations, such as transparent huge pages. [all …]
|
| A D | vmalloc.c | 532 struct page *page = pages[*nr]; in vmap_pages_pte_range() 662 page_to_phys(pages[i]), prot, in __vmap_pages_range_noflush() 3048 pages, PAGE_SHIFT) < 0) { in vm_map_ram() 3307 set_direct_map(area->pages[i]); in set_area_direct_map() 3439 kvfree(vm->pages); in vfree() 3516 pages, PAGE_SHIFT) < 0) { in vmap() 3522 area->pages = pages; in vmap() 3618 pages + nr_allocated); in vm_area_alloc_pages() 3622 pages + nr_allocated); in vm_area_alloc_pages() 3701 if (!area->pages) { in __vmalloc_area_node() [all …]
|
| A D | workingset.c | 645 unsigned long pages; in count_shadow_nodes() local 680 for (pages = 0, i = 0; i < NR_LRU_LISTS; i++) in count_shadow_nodes() 681 pages += lruvec_page_state_local(lruvec, in count_shadow_nodes() 683 pages += lruvec_page_state_local( in count_shadow_nodes() 685 pages += lruvec_page_state_local( in count_shadow_nodes() 689 pages = node_present_pages(sc->nid); in count_shadow_nodes() 691 max_nodes = pages >> (XA_CHUNK_SHIFT - 3); in count_shadow_nodes()
|
| A D | cma.c | 76 unsigned long pages) in cma_bitmap_pages_to_bits() argument 78 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; in cma_bitmap_pages_to_bits() 936 bool cma_pages_valid(struct cma *cma, const struct page *pages, in cma_pages_valid() argument 944 if (!cma || !pages || count > cma->count) in cma_pages_valid() 947 pfn = page_to_pfn(pages); in cma_pages_valid() 961 __func__, (void *)pages, count); in cma_pages_valid() 976 bool cma_release(struct cma *cma, const struct page *pages, in cma_release() argument 983 pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); in cma_release() 985 if (!cma_pages_valid(cma, pages, count)) in cma_release() 988 pfn = page_to_pfn(pages); in cma_release() [all …]
|
| A D | Kconfig.debug | 17 Unmap pages from the kernel linear mapping after free_pages(). 22 pages are being allocated and freed, as unexpected state changes 29 fill the pages with poison patterns after free_pages() and verify 33 pages are not saved to the suspend image. 36 allowing the kernel mapping to be backed by large pages on some 129 permissions. Check that anonymous and file pages are not being 148 bool "Poison pages after freeing" 150 Fill the pages with poison patterns after free_pages() and verify 159 If you are only interested in sanitization of freed pages without 200 <arch>/mm: Checked W+X mappings: passed, no W+X pages found. [all …]
|
| A D | page-writeback.c | 660 if (pages > max_dirty_pages) in bdi_check_pages_limit() 775 ret = bdi_check_pages_limit(pages); in bdi_set_min_bytes() 779 min_ratio = bdi_ratio_from_pages(pages); in bdi_set_min_bytes() 796 ret = bdi_check_pages_limit(pages); in bdi_set_max_bytes() 800 max_ratio = bdi_ratio_from_pages(pages); in bdi_set_max_bytes() 1603 int pages; /* target nr_dirtied_pause */ in wb_min_pause() local 1646 if (pages < DIRTY_POLL_THRESH) { in wb_min_pause() 1649 if (pages > DIRTY_POLL_THRESH) { in wb_min_pause() 1650 pages = DIRTY_POLL_THRESH; in wb_min_pause() 1655 pause = HZ * pages / (task_ratelimit + 1); in wb_min_pause() [all …]
|
| A D | cma_debug.c | 122 int pages = val; in cma_free_write() local 125 return cma_free_mem(cma, pages); in cma_free_write() 154 int pages = val; in cma_alloc_write() local 157 return cma_alloc_mem(cma, pages); in cma_alloc_write()
|
| A D | swapfile.c | 999 unsigned long pages; in del_from_avail_list() local 1021 pages = si->pages; in del_from_avail_list() 1039 unsigned long pages; in add_to_avail_list() local 1062 pages = si->pages; in add_to_avail_list() 1063 if (val == pages) { in add_to_avail_list() 1964 n = sis->pages; in count_swap_pages() 2518 *span = sis->pages; in setup_swap_extents() 2908 bytes = K(si->pages); in swap_show() 3157 si->pages--; in setup_swap_map() 3161 if (!si->pages) { in setup_swap_map() [all …]
|
| A D | highmem.c | 116 unsigned long pages = 0; in __nr_free_highpages() local 121 pages += zone_page_state(zone, NR_FREE_PAGES); in __nr_free_highpages() 124 return pages; in __nr_free_highpages() 129 unsigned long pages = 0; in __totalhigh_pages() local 134 pages += zone_managed_pages(zone); in __totalhigh_pages() 137 return pages; in __totalhigh_pages()
|
| A D | shmem.c | 213 pages * VM_ACCT(PAGE_SIZE)); in shmem_acct_blocks() 234 sbinfo->max_blocks, pages)) in shmem_inode_acct_blocks() 480 mapping->nrpages += pages; in shmem_charge() 1861 unsigned long pages; in shmem_suitable_orders() local 1873 pages = 1UL << order; in shmem_suitable_orders() 1922 long pages; in shmem_alloc_and_add_folio() local 1934 pages = 1UL << order; in shmem_alloc_and_add_folio() 1940 if (pages == HPAGE_PMD_NR) in shmem_alloc_and_add_folio() 1946 pages = 1; in shmem_alloc_and_add_folio() 1962 } else if (pages > 1) { in shmem_alloc_and_add_folio() [all …]
|
| /mm/kasan/ |
| A D | shadow.c | 297 struct page **pages; member 312 page = data->pages[index]; in kasan_populate_vmalloc_pte() 319 data->pages[index] = NULL; in kasan_populate_vmalloc_pte() 331 if (pages[i]) { in ___free_pages_bulk() 332 __free_pages(pages[i], 0); in ___free_pages_bulk() 333 pages[i] = NULL; in ___free_pages_bulk() 341 struct page **page_array = pages; in ___alloc_pages_bulk() 349 pages += nr_populated; in ___alloc_pages_bulk() 363 if (!data.pages) in __kasan_populate_vmalloc() 375 ___free_pages_bulk(data.pages, nr_pages); in __kasan_populate_vmalloc() [all …]
|
| /mm/kmsan/ |
| A D | shadow.c | 175 int pages = 1 << order; in kmsan_alloc_page() local 184 __memset(page_address(shadow), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 185 __memset(page_address(origin), 0, PAGE_SIZE * pages); in kmsan_alloc_page() 193 __memset(page_address(shadow), -1, PAGE_SIZE * pages); in kmsan_alloc_page() 201 for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++) in kmsan_alloc_page() 217 pgprot_t prot, struct page **pages, in kmsan_vmap_pages_range_noflush() argument 240 s_pages[i] = shadow_page_for(pages[i]); in kmsan_vmap_pages_range_noflush() 241 o_pages[i] = origin_page_for(pages[i]); in kmsan_vmap_pages_range_noflush()
|
| A D | kmsan_test.c | 320 struct page **pages; in test_init_kmsan_vmap_vunmap() local 325 pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL); in test_init_kmsan_vmap_vunmap() 327 pages[i] = alloc_page(GFP_KERNEL); in test_init_kmsan_vmap_vunmap() 328 vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); in test_init_kmsan_vmap_vunmap() 331 kmsan_check_memory(page_address(pages[i]), PAGE_SIZE); in test_init_kmsan_vmap_vunmap() 336 if (pages[i]) in test_init_kmsan_vmap_vunmap() 337 __free_page(pages[i]); in test_init_kmsan_vmap_vunmap() 339 kfree(pages); in test_init_kmsan_vmap_vunmap()
|
| /mm/kfence/ |
| A D | core.c | 598 struct page *pages; in kfence_init_pool() local 605 pages = virt_to_page(__kfence_pool); in kfence_init_pool() 616 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() 668 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() 935 struct page *pages; in kfence_init_late() local 937 pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, in kfence_init_late() 939 if (!pages) in kfence_init_late() 942 __kfence_pool = page_to_virt(pages); in kfence_init_late() 943 pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, in kfence_init_late() 945 if (pages) in kfence_init_late() [all …]
|