| /mm/ |
| A D | memblock.c | 545 if (this->base + this->size != next->base || in memblock_merge_regions() 549 BUG_ON(this->base + this->size > next->base); in memblock_merge_regions() 584 rgn->base = base; in memblock_insert_region() 614 phys_addr_t end = base + memblock_cap_size(base, &size); in memblock_add_range() 624 type->regions[0].base = base; in memblock_add_range() 649 base = obase; in memblock_add_range() 690 memblock_insert_region(type, idx, base, end - base, in memblock_add_range() 814 phys_addr_t end = base + memblock_cap_size(base, &size); in memblock_isolate_range() 842 rgn->base = base; in memblock_isolate_range() 1391 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) in __next_mem_pfn_range() [all …]
|
| A D | cma.c | 311 phys_addr_t base; member 330 return mlp->base < mrp->base; in basecmp() 365 if (base < highmem_start && base + size > highmem_start) { in cma_fixed_reserve() 463 base = ALIGN(base, alignment); in __cma_declare_contiguous_nid() 467 if (!base) in __cma_declare_contiguous_nid() 495 base = cma_alloc_mem(base, size, alignment, limit, nid); in __cma_declare_contiguous_nid() 496 if (!base) in __cma_declare_contiguous_nid() 513 *basep = base; in __cma_declare_contiguous_nid() 616 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi() 626 (u64)mrp->base, (u64)mrp->base + size); in cma_declare_contiguous_multi() [all …]
|
| A D | page_ext.c | 172 struct page_ext *base; in lookup_page_ext() local 182 if (unlikely(!base)) in lookup_page_ext() 191 struct page_ext *base; in alloc_node_page_ext() local 213 if (!base) in alloc_node_page_ext() 286 struct page_ext *base; in init_section_page_ext() local 302 kmemleak_not_leak(base); in init_section_page_ext() 304 if (!base) { in init_section_page_ext() 340 struct page_ext *base; in __free_page_ext() local 352 base = (void *)base - PAGE_EXT_INVALID; in __free_page_ext() 355 base = get_entry(base, pfn); in __free_page_ext() [all …]
|
| A D | mapping_dirty_helpers.c | 66 struct wp_walk base; member 73 #define to_clean_walk(_wpwalk) container_of(_wpwalk, struct clean_walk, base) 322 .base = { .total = 0 }, in clean_record_shared_mapping_range() 331 &cwalk.base)); in clean_record_shared_mapping_range() 337 return cwalk.base.total; in clean_record_shared_mapping_range()
|
| A D | highmem.c | 167 unsigned long base = (unsigned long) vaddr & PAGE_MASK; in __kmap_to_page() local 178 if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && in __kmap_to_page() 179 base < __fix_to_virt(FIX_KMAP_BEGIN))) { in __kmap_to_page() 187 if (base_addr == base) in __kmap_to_page()
|
| A D | percpu.c | 2277 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); in __is_kernel_percpu_address() local 2281 void *start = per_cpu_ptr(base, cpu); in __is_kernel_percpu_address() 2288 per_cpu_ptr(base, get_boot_cpu_id()); in __is_kernel_percpu_address() 2339 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); in per_cpu_ptr_to_phys() local 2361 void *start = per_cpu_ptr(base, cpu); in per_cpu_ptr_to_phys() 3004 void *base = (void *)ULONG_MAX; in pcpu_embed_first_chunk() local 3046 base = min(ptr, base); in pcpu_embed_first_chunk() 3050 max_distance = areas[highest_group] - base; in pcpu_embed_first_chunk() 3087 ai->groups[group].base_offset = areas[group] - base; in pcpu_embed_first_chunk() 3094 pcpu_setup_first_chunk(ai, base); in pcpu_embed_first_chunk()
|
| A D | nommu.c | 919 void *base; in do_mmap_private() local 958 base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); in do_mmap_private() 959 if (!base) in do_mmap_private() 966 region->vm_start = (unsigned long) base; in do_mmap_private() 980 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private() 986 memset(base + ret, 0, len - ret); in do_mmap_private()
|
| A D | numa_emulation.c | 204 static u64 uniform_size(u64 max_addr, u64 base, u64 hole, int nr_nodes) in uniform_size() argument 207 unsigned long base_pfn = PHYS_PFN(base); in uniform_size()
|
| A D | filemap.c | 2142 unsigned long base; in find_lock_entries() local 2147 base = folio->index; in find_lock_entries() 2149 if (base < *start) in find_lock_entries() 2152 if (base + nr - 1 > end) in find_lock_entries() 2163 base = xas.xa_index & ~(nr - 1); in find_lock_entries() 2165 if (base < *start) in find_lock_entries() 2168 if (base + nr - 1 > end) in find_lock_entries() 2173 *start = base + nr; in find_lock_entries()
|
| A D | vmalloc.c | 4701 unsigned long base, start, size, end, last_end, orig_start, orig_end; in pcpu_get_vm_areas() local 4752 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 4759 if (base + last_end < vmalloc_start + last_end) in pcpu_get_vm_areas() 4772 if (base + end > va->va_end) { in pcpu_get_vm_areas() 4773 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 4781 if (base + start < va->va_start) { in pcpu_get_vm_areas() 4783 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas() 4798 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas() 4805 start = base + offsets[area]; in pcpu_get_vm_areas()
|
| A D | slub.c | 929 void *base; in check_valid_pointer() local 934 base = slab_address(slab); in check_valid_pointer() 937 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer() 938 (object - base) % s->size) { in check_valid_pointer() 5944 void *base; in __kmem_obj_info() local 5955 base = slab_address(slab); in __kmem_obj_info() 5964 objp = base + s->size * objnr; in __kmem_obj_info() 5966 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info() 5967 || (objp - base) % s->size) || in __kmem_obj_info()
|
| A D | memory.c | 6974 int i, n, base, l, ret; in process_huge_page() local 6983 base = 0; in process_huge_page() 6994 base = nr_pages - 2 * (nr_pages - n); in process_huge_page() 6997 for (i = 0; i < base; i++) { in process_huge_page() 7009 int left_idx = base + i; in process_huge_page() 7010 int right_idx = base + 2 * l - 1 - i; in process_huge_page()
|
| /mm/kasan/ |
| A D | common.c | 99 void *base = task_stack_page(task); in kasan_unpoison_task_stack() local 101 kasan_unpoison(base, THREAD_SIZE, false); in kasan_unpoison_task_stack() 112 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1)); in kasan_unpoison_task_stack_below() local 114 kasan_unpoison(base, watermark - base, false); in kasan_unpoison_task_stack_below()
|