| /mm/ |
| A D | numa_emulation.c | 132 u64 start, limit, end; in split_nodes_interleave() local 141 limit = pi->blk[phys_blk].end; in split_nodes_interleave() 153 if (end > limit) { in split_nodes_interleave() 154 end = limit; in split_nodes_interleave() 173 if (limit - end - mem_hole_size(end, limit) < size) in split_nodes_interleave() 174 end = limit; in split_nodes_interleave() 178 min(end, limit) - start); in split_nodes_interleave() 279 u64 start, limit, end; in split_nodes_size_interleave_uniform() local 289 limit = pi->blk[phys_blk].end; in split_nodes_size_interleave_uniform() 309 if ((limit - end - mem_hole_size(end, limit) < size) in split_nodes_size_interleave_uniform() [all …]
|
| A D | cma.c | 415 if (base < highmem && limit > highmem) { in cma_alloc_mem() 417 limit, nid, true); in cma_alloc_mem() 418 limit = highmem; in cma_alloc_mem() 430 phys_addr_t size, phys_addr_t limit, in __cma_declare_contiguous_nid() argument 440 __func__, &size, &base, &limit, &alignment); in __cma_declare_contiguous_nid() 465 limit &= ~(alignment - 1); in __cma_declare_contiguous_nid() 480 if (limit == 0 || limit > memblock_end) in __cma_declare_contiguous_nid() 481 limit = memblock_end; in __cma_declare_contiguous_nid() 483 if (base + size > limit) { in __cma_declare_contiguous_nid() 485 &size, &base, &limit); in __cma_declare_contiguous_nid() [all …]
|
| A D | page-writeback.c | 483 return nr_pages <= limit; in node_dirty_ok() 981 unsigned long limit) in pos_ratio_polynom() argument 987 (limit - setpoint) | 1); in pos_ratio_polynom() 1076 unsigned long limit = dtc->limit = hard_dirty_limit(dtc_dom(dtc), dtc->thresh); in wb_position_ratio() local 1087 if (unlikely(dtc->dirty >= limit)) in wb_position_ratio() 1095 setpoint = (freerun + limit) / 2; in wb_position_ratio() 1293 if (limit < thresh) { in update_dirty_limit() 1294 limit = thresh; in update_dirty_limit() 1304 if (limit > thresh) { in update_dirty_limit() 1305 limit -= (limit - thresh) >> 5; in update_dirty_limit() [all …]
|
| A D | memblock.c | 1858 if (r->base > limit) in memblock_reserved_kern_size() 1861 if (r->base + r->size > limit) in memblock_reserved_kern_size() 1862 size = limit - r->base; in memblock_reserved_kern_size() 1913 if (limit <= r->size) { in __find_max_addr() 1914 max_addr = r->base + limit; in __find_max_addr() 1917 limit -= r->size; in __find_max_addr() 1927 if (!limit) in memblock_enforce_memory_limit() 1930 max_addr = __find_max_addr(limit); in memblock_enforce_memory_limit() 1980 if (!limit) in memblock_mem_limit_remove_map() 1983 max_addr = __find_max_addr(limit); in memblock_mem_limit_remove_map() [all …]
|
| A D | hugetlb_cgroup.c | 106 unsigned long limit; in hugetlb_cgroup_init() local 125 limit = round_down(PAGE_COUNTER_MAX, in hugetlb_cgroup_init() 128 VM_BUG_ON(page_counter_set_max(fault, limit)); in hugetlb_cgroup_init() 129 VM_BUG_ON(page_counter_set_max(rsvd, limit)); in hugetlb_cgroup_init() 554 unsigned long limit; in hugetlb_cgroup_read_u64_max() local 561 limit = round_down(PAGE_COUNTER_MAX, in hugetlb_cgroup_read_u64_max() 577 if (val == limit) in hugetlb_cgroup_read_u64_max()
|
| A D | readahead.c | 466 pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; in page_cache_ra_order() local 478 limit = min(limit, index + ra->size - 1); in page_cache_ra_order() 497 while (index <= limit) { in page_cache_ra_order() 504 while (order > min_order && index + (1UL << order) - 1 > limit) in page_cache_ra_order()
|
| A D | sparse.c | 325 unsigned long goal, limit; in sparse_early_usemaps_alloc_pgdat_section() local 338 limit = goal + (1UL << PA_SECTION_SHIFT); in sparse_early_usemaps_alloc_pgdat_section() 341 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); in sparse_early_usemaps_alloc_pgdat_section() 342 if (!usage && limit) { in sparse_early_usemaps_alloc_pgdat_section() 343 limit = MEMBLOCK_ALLOC_ACCESSIBLE; in sparse_early_usemaps_alloc_pgdat_section()
|
| A D | compaction.c | 1506 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); in fast_isolate_freepages() local 1524 limit = pageblock_nr_pages >> 1; in fast_isolate_freepages() 1582 limit >>= 1; in fast_isolate_freepages() 1585 if (order_scanned >= limit) in fast_isolate_freepages() 1627 if (order_scanned >= limit) in fast_isolate_freepages() 1628 limit = max(1U, limit >> 1); in fast_isolate_freepages() 1925 unsigned int limit = freelist_scan_limit(cc); in fast_find_migrateblock() local 1981 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock() 1996 if (nr_scanned++ >= limit) { in fast_find_migrateblock()
|
| A D | util.c | 509 unsigned long locked_vm, limit; in __account_locked_vm() local 517 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT; in __account_locked_vm() 518 if (locked_vm + pages > limit) in __account_locked_vm()
|
| A D | nommu.c | 1441 static int limit; in do_munmap() local 1442 if (limit < 5) { in do_munmap() 1446 limit++; in do_munmap()
|
| A D | Kconfig | 58 not yet full, or the cgroup zswap limit has not been reached, 180 This option sets the upper limit on the number of physical pages 719 latency. This option sets the upper limit of scale factor to limit 1027 This config adds a new upper limit of default page block 1061 arch) when the RLIMIT_STACK hard limit is unlimited.
|
| A D | memcontrol.c | 1309 unsigned long limit; in mem_cgroup_margin() local 1312 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin() 1313 if (count < limit) in mem_cgroup_margin() 1314 margin = limit - count; in mem_cgroup_margin() 1318 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin() 1319 if (count < limit) in mem_cgroup_margin() 1320 margin = min(margin, limit - count); in mem_cgroup_margin()
|
| A D | slab.h | 463 unsigned int limit; member
|
| A D | slab_common.c | 1114 sinfo.limit, sinfo.batchcount, sinfo.shared); in cache_show()
|