| /mm/damon/tests/ |
| A D | core-kunit.h | 20 struct damon_target *t; in damon_test_regions() local 27 t = damon_new_target(); in damon_test_regions() 30 damon_add_region(r, t); in damon_test_regions() 36 damon_free_target(t); in damon_test_regions() 41 struct damon_target *t; in nr_damon_targets() local 53 struct damon_target *t; in damon_test_target() local 55 t = damon_new_target(); in damon_test_target() 58 damon_add_target(c, t); in damon_test_target() 145 damon_free_target(t); in damon_test_split_at() 177 damon_free_target(t); in damon_test_merge_two() [all …]
|
| A D | vaddr-kunit.h | 99 damon_for_each_region(r, t) { in __nth_region_of() 134 struct damon_target *t; in damon_do_test_apply_three_regions() local 138 t = damon_new_target(); in damon_do_test_apply_three_regions() 141 damon_add_region(r, t); in damon_do_test_apply_three_regions() 147 r = __nth_region_of(t, i); in damon_do_test_apply_three_regions() 152 damon_destroy_target(t, NULL); in damon_do_test_apply_three_regions() 255 damon_add_region(r, t); in damon_test_split_evenly_fail() 260 damon_for_each_region(r, t) { in damon_test_split_evenly_fail() 265 damon_free_target(t); in damon_test_split_evenly_fail() 276 damon_add_region(r, t); in damon_test_split_evenly_succ() [all …]
|
| A D | sysfs-kunit.h | 17 struct damon_target *t; in nr_damon_targets() local 20 damon_for_each_target(t, ctx) in nr_damon_targets()
|
| /mm/damon/ |
| A D | core.c | 144 t->nr_regions++; in damon_add_region() 150 t->nr_regions--; in damon_del_region() 469 t = kmalloc(sizeof(*t), GFP_KERNEL); in damon_new_target() 470 if (!t) in damon_new_target() 473 t->pid = NULL; in damon_new_target() 474 t->nr_regions = 0; in damon_new_target() 478 return t; in damon_new_target() 502 kfree(t); in damon_free_target() 1086 return t; in damon_nth_target() 1671 if (ti == t) in damos_filter_match() [all …]
|
| A D | vaddr.c | 185 mm = damon_get_mm(t); in damon_va_three_regions() 250 if (ti == t) in __damon_va_init_regions() 272 damon_add_region(r, t); in __damon_va_init_regions() 282 struct damon_target *t; in damon_va_init() local 286 if (!damon_nr_regions(t)) in damon_va_init() 297 struct damon_target *t; in damon_va_update() local 421 struct damon_target *t; in damon_va_prepare_access_checks() local 426 mm = damon_get_mm(t); in damon_va_prepare_access_checks() 592 struct damon_target *t; in damon_va_check_accesses() local 599 mm = damon_get_mm(t); in damon_va_check_accesses() [all …]
|
| A D | stat.c | 46 struct damon_target *t; in damon_stat_set_estimated_memory_bandwidth() local 50 damon_for_each_target(t, c) { in damon_stat_set_estimated_memory_bandwidth() 51 damon_for_each_region(r, t) in damon_stat_set_estimated_memory_bandwidth() 78 struct damon_target *t; in damon_stat_sort_regions() local 84 damon_for_each_target(t, c) { in damon_stat_sort_regions() 86 region_pointers = kmalloc_array(damon_nr_regions(t), in damon_stat_sort_regions() 90 damon_for_each_region(r, t) { in damon_stat_sort_regions()
|
| A D | paddr.c | 41 struct damon_target *t; in damon_pa_prepare_access_checks() local 44 damon_for_each_target(t, ctx) { in damon_pa_prepare_access_checks() 45 damon_for_each_region(r, t) in damon_pa_prepare_access_checks() 86 struct damon_target *t; in damon_pa_check_accesses() local 90 damon_for_each_target(t, ctx) { in damon_pa_check_accesses() 91 damon_for_each_region(r, t) { in damon_pa_check_accesses() 301 struct damon_target *t, struct damon_region *r, in damon_pa_apply_scheme() argument 324 struct damon_target *t, struct damon_region *r, in damon_pa_scheme_score() argument
|
| A D | sysfs.c | 1299 static int damon_sysfs_set_regions(struct damon_target *t, in damon_sysfs_set_regions() argument 1322 err = damon_set_regions(t, ranges, sysfs_regions->nr); in damon_sysfs_set_regions() 1332 struct damon_target *t = damon_new_target(); in damon_sysfs_add_target() local 1334 if (!t) in damon_sysfs_add_target() 1336 damon_add_target(ctx, t); in damon_sysfs_add_target() 1338 t->pid = find_get_pid(sys_target->pid); in damon_sysfs_add_target() 1339 if (!t->pid) in damon_sysfs_add_target() 1343 return damon_sysfs_set_regions(t, sys_target->regions); in damon_sysfs_add_target() 1599 struct damon_target *t, struct damon_region *r, in damon_sysfs_schemes_tried_regions_upd_one() argument 1607 ctx, t, r, s, walk_data->total_bytes_only, in damon_sysfs_schemes_tried_regions_upd_one()
|
| A D | sysfs-common.h | 49 struct damon_ctx *ctx, struct damon_target *t,
|
| /mm/ |
| A D | vmstat.c | 351 long t; in __mod_zone_page_state() local 382 long t; in __mod_node_page_state() local 439 s8 v, t; in __inc_zone_state() local 446 if (unlikely(v > t)) { in __inc_zone_state() 460 s8 v, t; in __inc_node_state() local 495 s8 v, t; in __dec_zone_state() local 516 s8 v, t; in __dec_node_state() local 565 long n, t, z; in mod_zone_state() local 586 if (abs(n) > t) { in mod_zone_state() 623 long n, t, z; in mod_node_state() local [all …]
|
| A D | memory-failure.c | 357 pfn, t->comm, task_pid_nr(t)); in kill_proc() 370 addr_lsb, t); in kill_proc() 373 t->comm, task_pid_nr(t), ret); in kill_proc() 566 struct task_struct *t; in find_early_kill_thread() local 568 for_each_thread(tsk, t) { in find_early_kill_thread() 571 return t; in find_early_kill_thread() 574 return t; in find_early_kill_thread() 629 if (!t) in collect_procs_anon() 634 if (vma->vm_mm != t->mm) in collect_procs_anon() 663 if (!t) in collect_procs_file() [all …]
|
| A D | oom_kill.c | 136 struct task_struct *t; in find_lock_task_mm() local 140 for_each_thread(p, t) { in find_lock_task_mm() 141 task_lock(t); in find_lock_task_mm() 142 if (likely(t->mm)) in find_lock_task_mm() 144 task_unlock(t); in find_lock_task_mm() 146 t = NULL; in find_lock_task_mm() 150 return t; in find_lock_task_mm() 495 struct task_struct *t; in process_shares_mm() local 497 for_each_thread(p, t) { in process_shares_mm() 498 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
|
| A D | page-writeback.c | 609 static void writeout_period(struct timer_list *t) in writeout_period() argument 1578 unsigned long t; in wb_max_pause() local 1588 t++; in wb_max_pause() 1590 return min_t(unsigned long, t, MAX_PAUSE); in wb_max_pause() 1601 long t; /* target pause */ in wb_min_pause() local 1606 t = max(1, HZ / 100); in wb_min_pause() 1615 t += (hi - lo) * (10 * HZ) / 1024; in wb_min_pause() 1635 t = min(t, 1 + max_pause / 2); in wb_min_pause() 1647 t = max_pause; in wb_min_pause() 1657 t = max_pause; in wb_min_pause() [all …]
|
| A D | slub.c | 1021 if (!t->addr) in print_track() 1025 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track() 6126 struct slab *t; in __kmem_cache_do_shrink() local 6605 if (t->max) in free_loc_track() 6622 memcpy(l, t->loc, sizeof(struct location) * t->count); in alloc_loc_track() 6625 t->max = max; in alloc_loc_track() 6626 t->loc = l; in alloc_loc_track() 6698 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) in add_location() 6705 t->count++; in add_location() 7751 if (!t) in slab_debug_trace_open() [all …]
|
| A D | util.c | 320 struct task_struct * __maybe_unused t = current; in vma_is_stack_for_current() local 322 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); in vma_is_stack_for_current() 779 struct ctl_table t; in overcommit_policy_handler() local 795 t = *table; in overcommit_policy_handler() 796 t.data = &new_policy; in overcommit_policy_handler() 797 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); in overcommit_policy_handler()
|
| A D | memcontrol-v1.c | 432 struct mem_cgroup_threshold_ary *t; in __mem_cgroup_threshold() local 438 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold() 440 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold() 442 if (!t) in __mem_cgroup_threshold() 452 i = t->current_threshold; in __mem_cgroup_threshold() 460 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold() 461 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold() 472 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold() 473 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold() 476 t->current_threshold = i - 1; in __mem_cgroup_threshold()
|
| A D | mm_init.c | 1439 unsigned int order, t; in zone_init_free_lists() local 1440 for_each_migratetype_order(order, t) { in zone_init_free_lists() 1441 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists() 2104 unsigned long t; in deferred_init_maxorder() local 2109 t = min(mo_pfn, *end_pfn); in deferred_init_maxorder() 2110 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder() 2122 unsigned long t; in deferred_init_maxorder() local 2127 t = min(mo_pfn, epfn); in deferred_init_maxorder() 2128 deferred_free_pages(spfn, t - spfn); in deferred_init_maxorder()
|
| A D | hugetlb.c | 634 if (iter->from >= t) { in add_reservation_in_range() 656 if (last_accounted_offset < t) in add_reservation_in_range() 765 VM_BUG_ON(t - f <= 1); in region_add() 882 if (rg->from >= t) in region_del() 907 del += t - f; in region_del() 909 resv, rg, t - f, false); in region_del() 912 nrg->from = t; in region_del() 940 del += t - rg->from; in region_del() 941 rg->from = t; in region_del() 1003 if (rg->from >= t) in region_count() [all …]
|
| A D | hugetlb_cgroup.c | 31 #define MEMFILE_OFFSET(t, m0) (((offsetof(t, m0) << 16) | sizeof_field(t, m0))) argument
|
| A D | Kconfig | 358 userspace. Since that isn't generally a problem on no-MMU systems, 476 # Don't discard allocated memory used to track "memory" and "reserved" memblocks 498 # feature. If you are not sure, don't touch it. 653 invocations for high order memory requests. You shouldn't 866 madvise(MADV_HUGEPAGE) but it won't risk to increase the 1036 Don't change if unsure.
|
| A D | ksm.c | 3109 struct task_struct *t = in collect_procs_ksm() local 3111 if (!t) in collect_procs_ksm() 3117 if (vma->vm_mm == t->mm) { in collect_procs_ksm() 3119 add_to_kill_ksm(t, page, vma, to_kill, in collect_procs_ksm()
|
| A D | mempolicy.c | 1600 unsigned long t; in get_nodes() local 1602 if (get_bitmap(&t, &nmask[(maxnode - 1) / BITS_PER_LONG], bits)) in get_nodes() 1609 t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1); in get_nodes() 1611 if (t) in get_nodes()
|
| A D | slab_common.c | 1882 schedule_page_work_fn(struct hrtimer *t) in schedule_page_work_fn() argument 1885 container_of(t, struct kfree_rcu_cpu, hrtimer); in schedule_page_work_fn()
|
| A D | Kconfig.debug | 28 For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
|
| A D | vmalloc.c | 3350 struct llist_node *t, *llnode; in delayed_vfree_work() local 3352 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in delayed_vfree_work()
|