| /mm/ |
| A D | cma_debug.c | 163 struct dentry *tmp, *dir, *rangedir; in cma_debugfs_add_one() local 168 tmp = debugfs_create_dir(cma->name, root_dentry); in cma_debugfs_add_one() 170 debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); in cma_debugfs_add_one() 171 debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); in cma_debugfs_add_one() 172 debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); in cma_debugfs_add_one() 173 debugfs_create_file("order_per_bit", 0444, tmp, in cma_debugfs_add_one() 175 debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); in cma_debugfs_add_one() 176 debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); in cma_debugfs_add_one() 178 rangedir = debugfs_create_dir("ranges", tmp); in cma_debugfs_add_one() 196 debugfs_create_symlink("base_pfn", tmp, "ranges/0/base_pfn"); in cma_debugfs_add_one() [all …]
|
| A D | mmap.c | 1636 if (tmp > 0 && tmp < SZ_128K) in reserve_mem_notifier() 1641 if (tmp > 0 && tmp < SZ_8K) in reserve_mem_notifier() 1780 tmp = vm_area_dup(mpnt); in dup_mmap() 1781 if (!tmp) in dup_mmap() 1786 tmp->vm_mm = mm; in dup_mmap() 1796 tmp->anon_vma = NULL; in dup_mmap() 1814 if (tmp->vm_ops && tmp->vm_ops->open) in dup_mmap() 1815 tmp->vm_ops->open(tmp); in dup_mmap() 1817 file = tmp->vm_file; in dup_mmap() 1882 mpol_put(vma_policy(tmp)); in dup_mmap() [all …]
|
| A D | hugetlb_cma.c | 95 unsigned long tmp; in cmdline_parse_hugetlb_cma() local 99 if (sscanf(s, "%lu%n", &tmp, &count) != 1) in cmdline_parse_hugetlb_cma() 103 if (tmp >= MAX_NUMNODES) in cmdline_parse_hugetlb_cma() 105 nid = array_index_nospec(tmp, MAX_NUMNODES); in cmdline_parse_hugetlb_cma() 108 tmp = memparse(s, &s); in cmdline_parse_hugetlb_cma() 109 hugetlb_cma_size_in_node[nid] = tmp; in cmdline_parse_hugetlb_cma() 110 hugetlb_cma_size += tmp; in cmdline_parse_hugetlb_cma()
|
| A D | mincore.c | 252 unsigned char *tmp; in SYSCALL_DEFINE3() local 271 tmp = (void *) __get_free_page(GFP_USER); in SYSCALL_DEFINE3() 272 if (!tmp) in SYSCALL_DEFINE3() 282 retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); in SYSCALL_DEFINE3() 287 if (copy_to_user(vec, tmp, retval)) { in SYSCALL_DEFINE3() 296 free_page((unsigned long) tmp); in SYSCALL_DEFINE3()
|
| A D | mlock.c | 517 unsigned long nstart, end, tmp; in apply_vma_lock_flags() local 537 tmp = vma->vm_start; in apply_vma_lock_flags() 542 if (vma->vm_start != tmp) in apply_vma_lock_flags() 548 tmp = vma->vm_end; in apply_vma_lock_flags() 549 if (tmp > end) in apply_vma_lock_flags() 550 tmp = end; in apply_vma_lock_flags() 551 error = mlock_fixup(&vmi, vma, &prev, nstart, tmp, newflags); in apply_vma_lock_flags() 554 tmp = vma_iter_end(&vmi); in apply_vma_lock_flags() 555 nstart = tmp; in apply_vma_lock_flags() 558 if (tmp < end) in apply_vma_lock_flags()
|
| A D | mprotect.c | 864 unsigned long nstart, end, tmp, reqprot; in do_mprotect_pkey() local 933 tmp = vma->vm_start; in do_mprotect_pkey() 939 if (vma->vm_start != tmp) { in do_mprotect_pkey() 980 tmp = vma->vm_end; in do_mprotect_pkey() 981 if (tmp > end) in do_mprotect_pkey() 982 tmp = end; in do_mprotect_pkey() 985 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); in do_mprotect_pkey() 990 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey() 994 tmp = vma_iter_end(&vmi); in do_mprotect_pkey() 995 nstart = tmp; in do_mprotect_pkey() [all …]
|
| A D | balloon_compaction.c | 44 struct page *page, *tmp; in balloon_page_list_enqueue() local 49 list_for_each_entry_safe(page, tmp, pages, lru) { in balloon_page_list_enqueue() 80 struct page *page, *tmp; in balloon_page_list_dequeue() local 85 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { in balloon_page_list_dequeue()
|
| A D | vma.c | 2845 struct vm_area_struct *tmp; in unmapped_area() local 2869 tmp = vma_next(&vmi); in unmapped_area() 2870 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ in unmapped_area() 2872 low_limit = tmp->vm_end; in unmapped_area() 2877 tmp = vma_prev(&vmi); in unmapped_area() 2878 if (tmp && vm_end_gap(tmp) > gap) { in unmapped_area() 2902 struct vm_area_struct *tmp; in unmapped_area_topdown() local 2921 tmp = vma_next(&vmi); in unmapped_area_topdown() 2922 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ in unmapped_area_topdown() 2929 tmp = vma_prev(&vmi); in unmapped_area_topdown() [all …]
|
| A D | vmalloc.c | 1090 struct vmap_area *tmp; in __find_vmap_area_exceed_addr() local 1094 va = tmp; in __find_vmap_area_exceed_addr() 1892 tmp = NULL; in preload_this_cpu_lock() 2723 struct vmap_block *tmp; in free_vmap_block() local 2728 BUG_ON(tmp != vb); in free_vmap_block() 3104 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early() 3109 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early() 4627 va = tmp; in pvm_find_va_enclose_addr() 5241 struct vm_struct *tmp; in vmalloc_init() local 5268 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init() [all …]
|
| A D | mempolicy.c | 371 nodemask_t tmp; in mpol_relative_nodemask() local 373 nodes_onto(*ret, tmp, *rel); in mpol_relative_nodemask() 497 nodemask_t tmp; in mpol_rebind_nodemask() local 509 if (nodes_empty(tmp)) in mpol_rebind_nodemask() 510 tmp = *nodes; in mpol_rebind_nodemask() 512 pol->nodes = tmp; in mpol_rebind_nodemask() 1266 nodemask_t tmp; in do_migrate_pages() local 1301 tmp = *from; in do_migrate_pages() 1302 while (!nodes_empty(tmp)) { in do_migrate_pages() 1307 for_each_node_mask(s, tmp) { in do_migrate_pages() [all …]
|
| A D | show_mem.c | 149 char tmp[MIGRATE_TYPES + 1]; in show_migration_types() local 150 char *p = tmp; in show_migration_types() 159 printk(KERN_CONT "(%s) ", tmp); in show_migration_types()
|
| A D | kmemleak.c | 523 struct hlist_node *tmp; in free_object_rcu() local 532 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { in free_object_rcu() 1622 struct kmemleak_object *object, *tmp; in scan_gray_list() local 1637 tmp = list_entry(object->gray_list.next, typeof(*object), in scan_gray_list() 1644 object = tmp; in scan_gray_list() 2186 struct kmemleak_object *object, *tmp; in __kmemleak_do_cleanup() local 2193 list_for_each_entry_safe(object, tmp, &object_list, object_list) { in __kmemleak_do_cleanup()
|
| A D | hugetlb.c | 2402 struct folio *folio, *tmp; in gather_surplus_pages() local 4822 unsigned long tmp; in hugepages_setup() local 4857 if (tmp >= MAX_NUMNODES || !node_online(tmp)) in hugepages_setup() 4868 *mhp += tmp; in hugepages_setup() 4877 *mhp = tmp; in hugepages_setup() 5110 &tmp); in hugetlb_sysctl_handler_common() 5116 NUMA_NO_NODE, tmp, *length); in hugetlb_sysctl_handler_common() 5142 unsigned long tmp; in hugetlb_overcommit_handler() local 5148 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler() 5154 &tmp); in hugetlb_overcommit_handler() [all …]
|
| A D | sparse.c | 697 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; in clear_subsection_map() 704 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); in clear_subsection_map() 706 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), in clear_subsection_map()
|
| A D | dmapool.c | 365 struct dma_page *page, *tmp; in dma_pool_destroy() local 385 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { in dma_pool_destroy()
|
| A D | khugepaged.c | 528 struct folio *folio, *tmp; in release_pte_pages() local 545 list_for_each_entry_safe(folio, tmp, compound_pagelist, lru) { in release_pte_pages() 704 struct folio *src, *tmp; in __collapse_huge_page_copy_succeeded() local 751 list_for_each_entry_safe(src, tmp, compound_pagelist, lru) { in __collapse_huge_page_copy_succeeded() 1853 struct folio *folio, *tmp, *new_folio; in collapse_file() local 2228 list_for_each_entry_safe(folio, tmp, &pagelist, lru) { in collapse_file() 2248 list_for_each_entry_safe(folio, tmp, &pagelist, lru) { in collapse_file()
|
| A D | memory_hotplug.c | 2365 uint8_t *online_types, *tmp; in offline_and_remove_memory() local 2390 tmp = online_types; in offline_and_remove_memory() 2391 rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); in offline_and_remove_memory() 2408 tmp = online_types; in offline_and_remove_memory() 2409 walk_memory_blocks(start, size, &tmp, in offline_and_remove_memory()
|
| A D | memory.c | 3753 vm_fault_t tmp; in wp_page_shared() local 3756 tmp = vmf_can_call_fault(vmf); in wp_page_shared() 3757 if (tmp) { in wp_page_shared() 3759 return tmp; in wp_page_shared() 3762 tmp = do_page_mkwrite(vmf, folio); in wp_page_shared() 3763 if (unlikely(!tmp || (tmp & in wp_page_shared() 3766 return tmp; in wp_page_shared() 3772 return tmp; in wp_page_shared() 5630 vm_fault_t ret, tmp; in do_shared_fault() local 5650 if (unlikely(!tmp || in do_shared_fault() [all …]
|
| A D | page_alloc.c | 1549 struct page *p, *tmp; in free_one_page() local 1552 llist_for_each_entry_safe(p, tmp, llnode, pcp_llist) { in free_one_page() 6310 u64 tmp; in __setup_per_zone_wmarks() local 6313 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks() 6314 tmp = div64_ul(tmp, lowmem_pages); in __setup_per_zone_wmarks() 6335 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks() 6343 tmp = max_t(u64, tmp >> 2, in __setup_per_zone_wmarks() 6348 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks() 6349 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks() 6350 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
|
| A D | memcontrol-v1.c | 947 struct mem_cgroup_eventfd_list *ev, *tmp; in mem_cgroup_oom_unregister_event() local 951 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event() 1203 struct mem_cgroup_event *event, *tmp; in memcg1_css_offline() local 1211 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in memcg1_css_offline()
|
| A D | shmem.c | 2283 swp_entry_t tmp; in shmem_split_large_entry() local 2285 tmp = swp_entry(swp_type(swap), in shmem_split_large_entry() 2289 swp_to_radix_entry(tmp), 0); in shmem_split_large_entry() 5546 char tmp[16]; in shmem_enabled_store() local 5549 if (count + 1 > sizeof(tmp)) in shmem_enabled_store() 5551 memcpy(tmp, buf, count); in shmem_enabled_store() 5552 tmp[count] = '\0'; in shmem_enabled_store() 5553 if (count && tmp[count - 1] == '\n') in shmem_enabled_store() 5554 tmp[count - 1] = '\0'; in shmem_enabled_store() 5556 huge = shmem_parse_huge(tmp); in shmem_enabled_store()
|
| A D | zsmalloc.c | 1837 struct zspage *zspage, *tmp; in async_free_zspage() local 1853 list_for_each_entry_safe(zspage, tmp, &free_pages, list) { in async_free_zspage()
|
| A D | memblock.c | 2688 phys_addr_t start, size, align, tmp; in reserve_mem() local 2735 if (reserve_mem_find_by_name(name, &start, &tmp)) in reserve_mem()
|
| A D | huge_memory.c | 828 struct thpsize *thpsize, *tmp; in hugepage_exit_sysfs() local 830 list_for_each_entry_safe(thpsize, tmp, &thpsize_list, node) { in hugepage_exit_sysfs()
|
| /mm/kmsan/ |
| A D | init.c | 183 struct smallstack tmp = { in collect_split() local 193 smallstack_push(&tmp, &page[0]); in collect_split() 194 smallstack_push(&tmp, &page[1 << tmp.order]); in collect_split() 196 __memcpy(&collect, &tmp, sizeof(tmp)); in collect_split()
|