| /mm/damon/ |
| A D | sysfs-common.c | 20 unsigned long max) in damon_sysfs_ul_range_alloc() argument 29 range->max = max; in damon_sysfs_ul_range_alloc() 65 return sysfs_emit(buf, "%lu\n", range->max); in max_show() 73 unsigned long max; in max_store() local 76 err = kstrtoul(buf, 0, &max); in max_store() 80 range->max = max; in max_store() 93 __ATTR_RW_MODE(max, 0600);
|
| A D | sysfs-common.h | 16 unsigned long max; member 21 unsigned long max);
|
| A D | sysfs-schemes.c | 582 return sysfs_emit(buf, "%lu\n", filter->sz_range.max); in max_show() 590 int err = kstrtoul(buf, 0, &filter->sz_range.max); in max_store() 645 __ATTR_RW_MODE(max, 0600); 2478 sysfs_filter->sz_range.max) { in damon_sysfs_add_scheme_filters() 2615 .max_sz_region = access_pattern->sz->max, in damon_sysfs_mk_scheme() 2617 .max_nr_accesses = access_pattern->nr_accesses->max, in damon_sysfs_mk_scheme() 2619 .max_age_region = access_pattern->age->max, in damon_sysfs_mk_scheme()
|
| /mm/ |
| A D | readahead.c | 368 static unsigned long get_init_ra_size(unsigned long size, unsigned long max) in get_init_ra_size() argument 372 if (newsize <= max / 32) in get_init_ra_size() 374 else if (newsize <= max / 4) in get_init_ra_size() 377 newsize = max; in get_init_ra_size() 387 unsigned long max) in get_next_ra_size() argument 391 if (cur < max / 16) in get_next_ra_size() 393 if (cur <= max / 2) in get_next_ra_size() 395 return max; in get_next_ra_size() 482 new_order = max(new_order, min_order); in page_cache_ra_order() 659 ra->size = max(ra->size, get_next_ra_size(ra, max_pages)); in page_cache_async_ra()
|
| A D | percpu-internal.h | 209 max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc); in pcpu_stats_area_alloc() 213 max(pcpu_stats.max_alloc_size, size); in pcpu_stats_area_alloc() 216 chunk->max_alloc_size = max(chunk->max_alloc_size, size); in pcpu_stats_area_alloc() 246 max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks); in pcpu_stats_chunk_alloc()
|
| A D | memcontrol.c | 1605 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max() local 1610 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max() 1616 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max() 1619 return max; in mem_cgroup_get_max() 4408 unsigned long max; in memory_max_write() local 4416 xchg(&memcg->memory.max, max); in memory_max_write() 5311 unsigned long max; in swap_max_write() local 5319 xchg(&memcg->swap.max, max); in swap_max_write() 5402 if (max == 0) { in obj_cgroup_may_zswap() 5410 if (pages < max) in obj_cgroup_may_zswap() [all …]
|
| A D | page_counter.c | 143 if (new > c->max) { in page_counter_try_charge() 219 old = xchg(&counter->max, nr_pages); in page_counter_set_max() 224 counter->max = old; in page_counter_set_max() 272 int page_counter_memparse(const char *buf, const char *max, in page_counter_memparse() argument 278 if (!strcmp(buf, max)) { in page_counter_memparse()
|
| A D | memblock.c | 144 .max = INIT_PHYSMEM_REGIONS, 319 end = max(start, end); in memblock_find_in_range_node() 391 memblock.reserved.max); in memblock_discard() 401 memblock.memory.max); in memblock_discard() 487 type->name, type->max, type->max * 2); in memblock_double_array() 504 type->max <<= 1; in memblock_double_array() 582 BUG_ON(type->cnt >= type->max); in memblock_insert_region() 640 if (type->cnt * 2 + 1 <= type->max) in memblock_add_range() 824 while (type->cnt + 2 > type->max) in memblock_isolate_range() 1258 max(m_start, r_start); in __next_mem_range() [all …]
|
| A D | hugetlb_cgroup.c | 533 return (u64)counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64() 535 return (u64)rsvd_counter->max * PAGE_SIZE; in hugetlb_cgroup_read_u64() 576 val = (u64)counter->max; in hugetlb_cgroup_read_u64_max() 593 const char *max) in hugetlb_cgroup_write() argument 604 ret = page_counter_memparse(buf, max, &nr_pages); in hugetlb_cgroup_write() 685 long max; in __hugetlb_events_show() local 692 max = atomic_long_read(&h_cg->events_local[idx][HUGETLB_MAX]); in __hugetlb_events_show() 694 max = atomic_long_read(&h_cg->events[idx][HUGETLB_MAX]); in __hugetlb_events_show() 696 seq_printf(seq, "max %lu\n", max); in __hugetlb_events_show()
|
| A D | vma.h | 439 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max) in vma_iter_next_range_limit() argument 441 return mas_next_range(&vmi->mas, max); in vma_iter_next_range_limit() 445 unsigned long max, unsigned long size) in vma_iter_area_lowest() argument 447 return mas_empty_area(&vmi->mas, min, max - 1, size); in vma_iter_area_lowest() 451 unsigned long max, unsigned long size) in vma_iter_area_highest() argument 453 return mas_empty_area_rev(&vmi->mas, min, max - 1, size); in vma_iter_area_highest()
|
| A D | memcontrol-v1.c | 1440 unsigned long max, bool memsw) in mem_cgroup_resize_max() argument 1459 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max() 1460 max <= memcg->memsw.max; in mem_cgroup_resize_max() 1466 if (max > counter->max) in mem_cgroup_resize_max() 1468 ret = page_counter_set_max(counter, max); in mem_cgroup_resize_max() 1581 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64() 1603 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) in memcg_update_tcp_max() argument 1609 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max() 1897 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format() 1898 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
|
| A D | page-writeback.c | 49 #define MAX_PAUSE max(HZ/5, 1) 60 #define BANDWIDTH_INTERVAL max(HZ/5, 1) 170 unsigned long long max = wb->bdi->max_ratio; in wb_min_max_ratio() local 181 if (max < 100 * BDI_RATIO_SCALE) { in wb_min_max_ratio() 182 max *= this_bw; in wb_min_max_ratio() 183 max = div64_ul(max, tot_bw); in wb_min_max_ratio() 188 *maxp = max; in wb_min_max_ratio() 830 return max(thresh, dom->dirty_limit); in hard_dirty_limit() 1274 avg = max(avg, 1LU); in wb_update_write_bandwidth() 1303 thresh = max(thresh, dtc->dirty); in update_dirty_limit() [all …]
|
| A D | mmu_gather.c | 42 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 195 if (batch->nr >= batch->max - 1) { in __tlb_remove_folio_pages_size() 200 VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); in __tlb_remove_folio_pages_size() 417 tlb->local.max = ARRAY_SIZE(tlb->__pages); in __tlb_gather_mmu()
|
| A D | mm_init.c | 500 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 1292 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); in zone_spanned_pages_in_node() 1711 *end_pfn = max(*end_pfn, this_end_pfn); in get_pfn_range_for_nid() 1849 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init() 2160 return max(cpumask_weight(node_cpumask), 1U); in deferred_page_init_max_threads() 2459 unsigned long long max = high_limit; in alloc_large_system_hash() local 2497 if (max == 0) { in alloc_large_system_hash() 2499 do_div(max, bucketsize); in alloc_large_system_hash() 2501 max = min(max, 0x80000000ULL); in alloc_large_system_hash() 2505 if (numentries > max) in alloc_large_system_hash() [all …]
|
| A D | mapping_dirty_helpers.c | 47 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in wp_pte() 106 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in clean_record_pte() 111 cwalk->end = max(cwalk->end, pgoff + 1); in clean_record_pte()
|
| A D | filemap.c | 491 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_has_page() local 498 folio = xas_find(&xas, max); in filemap_range_has_page() 648 pgoff_t max = end_byte >> PAGE_SHIFT; in filemap_range_has_writeback() local 655 xas_for_each(&xas, folio, max) { in filemap_range_has_writeback() 901 unsigned int split_order = max(forder, in __filemap_add_folio() 914 max(xas_try_split_min_order( in __filemap_add_folio() 2037 folio = xas_find(xas, max); in find_get_entry() 2039 folio = xas_find_marked(xas, max, mark); in find_get_entry() 2377 if (xas.xa_index > max || xa_is_value(folio)) in filemap_get_read_batch() 3115 pgoff_t max = (end - 1) >> PAGE_SHIFT; in mapping_seek_hole_data() local [all …]
|
| A D | percpu-stats.c | 40 max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc); in find_max_nr_alloc() 112 max_frag = max(max_frag, -1 * (*p)); in chunk_map_stats()
|
| A D | swapfile.c | 841 end = min(si->max, offset + SWAPFILE_CLUSTER); in swap_reclaim_full_clusters() 1336 if (offset >= si->max) in _swap_info_get() 1451 if (offset >= si->max) in get_swap_device() 1811 if (WARN_ON(end_offset > si->max)) in free_swap_and_cache_nr() 2276 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse() 2284 if (i == si->max) in find_next_to_unuse() 2517 ret = add_swap_extent(sis, 0, sis->max, 0); in setup_swap_extents() 2786 p->max = 0; in SYSCALL_DEFINE1() 3356 si->max = maxpages; in SYSCALL_DEFINE2() 3363 if (si->pages != si->max - 1) { in SYSCALL_DEFINE2() [all …]
|
| A D | numa_memblks.c | 253 bi->start = max(bi->start, low); in numa_cleanup_meminfo() 300 end = max(bi->end, bj->end); in numa_cleanup_meminfo() 536 blk[count - 1]->end = max(blk[count - 1]->end, end); in numa_fill_memblks()
|
| A D | page_alloc.c | 2810 high = max(pcp->count, high_min); in nr_pcp_high() 3236 batch = max(batch >> order, 2); in nr_pcp_alloc() 5896 high = max(high, batch << 2); in zone_highsize() 6235 long max = 0; in calculate_totalreserve_pages() local 6240 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages() 6241 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages() 6245 max += high_wmark_pages(zone); in calculate_totalreserve_pages() 6247 if (max > managed_pages) in calculate_totalreserve_pages() 6248 max = managed_pages; in calculate_totalreserve_pages() 6250 pgdat->totalreserve_pages += max; in calculate_totalreserve_pages() [all …]
|
| A D | page_isolation.c | 184 check_unmovable_start = max(page_to_pfn(page), start_pfn); in set_migratetype_isolate() 337 start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES), in isolate_single_pageblock()
|
| A D | compaction.c | 308 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn() 1472 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around() 1506 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); in fast_isolate_freepages() 1568 highest = max(pageblock_start_pfn(pfn), in fast_isolate_freepages() 1628 limit = max(1U, limit >> 1); in fast_isolate_freepages() 1735 block_start_pfn = max(next_pfn, low_pfn); in isolate_freepages() 2637 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone() 2845 rc = max(status, rc); in try_to_compact_pages()
|
| A D | mempool.c | 209 pool->elements = kmalloc_array_node(max(1, min_nr), sizeof(void *), in mempool_init_node() 218 while (pool->curr_nr < max(1, pool->min_nr)) { in mempool_init_node()
|
| A D | vmscan.c | 2534 cgroup_size = max(cgroup_size, protection); in apply_proportional_protection() 2543 scan = max(scan, SWAP_CLUSTER_MAX); in apply_proportional_protection() 3416 *vm_start = max(start, args->vma->vm_start); in get_next_vma() 3798 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range() 4149 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable() 4264 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around() 4823 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging() 5477 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show() 6703 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in try_to_free_mem_cgroup_pages() 7686 .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), in node_reclaim() [all …]
|
| /mm/damon/tests/ |
| A D | sysfs-kunit.h | 26 static int __damon_sysfs_test_get_any_pid(int min, int max) in __damon_sysfs_test_get_any_pid() argument 31 for (i = min; i <= max; i++) { in __damon_sysfs_test_get_any_pid()
|