| /linux/mm/ |
| A D | memcontrol.c | 269 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino() 719 memcg = pn->memcg; in __mod_memcg_lruvec_state() 1587 .memcg = memcg, in mem_cgroup_out_of_memory() 1674 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group() 3079 objcg->memcg = memcg; in memcg_online_kmem() 3086 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem() 3439 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info() 4656 if (ug->memcg != memcg) { in uncharge_folio() 4661 ug->memcg = memcg; in uncharge_folio() 4933 memcg = parent_mem_cgroup(memcg); in mem_cgroup_id_get_online() [all …]
|
| A D | memcontrol-v1.h | 16 if (mem_cgroup_is_root(memcg)) in try_charge() 19 return try_charge_memcg(memcg, gfp_mask, nr_pages); in try_charge() 72 bool memcg1_alloc_events(struct mem_cgroup *memcg); 73 void memcg1_free_events(struct mem_cgroup *memcg); 75 void memcg1_memcg_init(struct mem_cgroup *memcg); 80 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); in memcg1_soft_limit_reset() 89 void memcg1_css_offline(struct mem_cgroup *memcg); 101 void memcg1_oom_recover(struct mem_cgroup *memcg); 113 return memcg->tcpmem_active; in memcg1_tcpmem_active() 119 page_counter_uncharge(&memcg->tcpmem, nr_pages); in memcg1_uncharge_skmem() [all …]
|
| A D | memcontrol-v1.c | 223 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in memcg1_update_tree() 530 if (memcg && memcg->move_lock_task == current) { in __folio_memcg_unlock() 1191 mc.to = memcg; in memcg1_can_attach() 1461 while (memcg) { in mem_cgroup_threshold() 1466 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold() 1861 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local 1885 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local 1968 event->memcg = memcg; in memcg_write_event_control() 2223 if (memcg && memcg->under_oom) in memcg1_oom_recover() 2251 if (!memcg) in mem_cgroup_oom_synchronize() [all …]
|
| A D | shrinker.c | 69 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 102 free_shrinker_info(memcg); in alloc_shrinker_info() 155 struct mem_cgroup *memcg; in expand_shrinker_info() local 173 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in expand_shrinker_info() 198 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { in set_shrinker_bit() 344 if (sc->memcg && in xchg_nr_deferred() 347 sc->memcg); in xchg_nr_deferred() 361 if (sc->memcg && in add_nr_deferred() 364 sc->memcg); in add_nr_deferred() 531 .memcg = memcg, in shrink_slab_memcg() [all …]
|
| A D | shrinker_debug.c | 19 struct mem_cgroup *memcg, in shrinker_count_objects() argument 30 .memcg = memcg, in shrinker_count_objects() 51 struct mem_cgroup *memcg; in shrinker_debugfs_count_show() local 66 if (memcg && !mem_cgroup_online(memcg)) in shrinker_debugfs_count_show() 89 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); in shrinker_debugfs_count_show() 113 struct mem_cgroup *memcg = NULL; in shrinker_debugfs_scan_write() local 133 if (!memcg || IS_ERR(memcg)) in shrinker_debugfs_scan_write() 136 if (!mem_cgroup_online(memcg)) { in shrinker_debugfs_scan_write() 137 mem_cgroup_put(memcg); in shrinker_debugfs_scan_write() 145 sc.memcg = memcg; in shrinker_debugfs_scan_write() [all …]
|
| A D | vmpressure.c | 77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local 79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 80 if (!memcg) in vmpressure_parent() 82 return memcg_to_vmpressure(memcg); in vmpressure_parent() 255 vmpr = memcg_to_vmpressure(memcg); in vmpressure() 295 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure() 319 WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); in vmpressure() 351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() 374 int vmpressure_register_event(struct mem_cgroup *memcg, in vmpressure_register_event() argument 377 struct vmpressure *vmpr = memcg_to_vmpressure(memcg); in vmpressure_register_event() [all …]
|
| A D | list_lru.c | 90 struct mem_cgroup *memcg) in list_lru_add() argument 130 struct mem_cgroup *memcg) in list_lru_del() argument 182 int nid, struct mem_cgroup *memcg) in list_lru_count_one() argument 440 int src_idx = memcg->kmemcg_id; in memcg_reparent_list_lrus() 456 css_for_each_descendant_pre(css, &memcg->css) { in memcg_reparent_list_lrus() 473 int idx = memcg->kmemcg_id; in memcg_list_lru_allocated() 485 struct mem_cgroup *memcg; in memcg_list_lru_alloc() member 502 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { in memcg_list_lru_alloc() 503 if (memcg_list_lru_allocated(memcg, lru)) in memcg_list_lru_alloc() 506 table[i].memcg = memcg; in memcg_list_lru_alloc() [all …]
|
| A D | zswap.c | 702 struct mem_cgroup *memcg; in zswap_lru_add() local 726 struct mem_cgroup *memcg; in zswap_lru_del() local 1204 struct mem_cgroup *memcg = sc->memcg; in zswap_shrinker_count() local 1300 if (memcg && !mem_cgroup_online(memcg)) in shrink_memcg() 1367 } while (memcg && !mem_cgroup_tryget_online(memcg)); in shrink_worker() 1370 if (!memcg) { in shrink_worker() 1384 mem_cgroup_put(memcg); in shrink_worker() 1430 mem_cgroup_put(memcg); in zswap_store() 1433 mem_cgroup_put(memcg); in zswap_store() 1454 mem_cgroup_put(memcg); in zswap_store() [all …]
|
| A D | mmap_lock.c | 72 struct mem_cgroup *memcg; in get_mm_memcg_path() local 78 memcg = get_mem_cgroup_from_mm(mm); in get_mm_memcg_path() 79 if (memcg == NULL) in get_mm_memcg_path() 81 if (memcg->css.cgroup) in get_mm_memcg_path() 82 cgroup_path(memcg->css.cgroup, buf, buflen); in get_mm_memcg_path() 83 css_put(&memcg->css); in get_mm_memcg_path()
|
| A D | vmscan.c | 2629 if (memcg) { in get_lruvec() 2780 if (memcg) in get_mm_list() 2820 mm->lru_gen.memcg = memcg; in lru_gen_add_mm() 2848 memcg = mm->lru_gen.memcg; in lru_gen_del_mm() 2897 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm() 4015 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_age_node() 5106 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_change_state() 5217 } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); in lru_gen_seq_start() 5240 memcg = mem_cgroup_iter(NULL, memcg, NULL); in lru_gen_seq_next() 5321 if (memcg) in lru_gen_seq_show() [all …]
|
| A D | workingset.c | 248 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 268 struct mem_cgroup *memcg; in lru_gen_test_recent() local 273 memcg = mem_cgroup_from_id(memcg_id); in lru_gen_test_recent() 274 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 542 struct mem_cgroup *memcg; in workingset_refault() local 563 memcg = folio_memcg(folio); in workingset_refault() 565 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault() 594 struct mem_cgroup *memcg; in workingset_activation() local 604 memcg = folio_memcg_rcu(folio); in workingset_activation() 605 if (!mem_cgroup_disabled() && !memcg) in workingset_activation() [all …]
|
| A D | oom_kill.c | 74 return oc->memcg != NULL; in is_memcg_oom() 261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 449 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_victim() 464 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 1044 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
|
| /linux/include/linux/ |
| A D | memcontrol.h | 752 if (!memcg) in mem_cgroup_lruvec() 833 return !memcg || css_tryget(&memcg->css); in mem_cgroup_tryget() 838 return !memcg || css_tryget_online(&memcg->css); in mem_cgroup_tryget_online() 843 if (memcg) in mem_cgroup_put() 869 return memcg ? cgroup_ino(memcg->css.cgroup) : 0; in mem_cgroup_ino() 986 if (memcg) in mod_memcg_page_state() 1030 if (memcg) in count_memcg_folio_events() 1076 } while ((memcg = parent_mem_cgroup(memcg)) && in memcg_memory_event() 1546 memcg = parent_mem_cgroup(memcg); in parent_lruvec() 1673 } while ((memcg = parent_mem_cgroup(memcg))); in mem_cgroup_under_socket_pressure() [all …]
|
| A D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
| A D | list_lru.h | 70 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, 72 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); 93 struct mem_cgroup *memcg); 122 struct mem_cgroup *memcg); 150 int nid, struct mem_cgroup *memcg); 156 return list_lru_count_one(lru, sc->nid, sc->memcg); in list_lru_shrink_count() 200 int nid, struct mem_cgroup *memcg, 217 int nid, struct mem_cgroup *memcg, 228 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk() 236 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, in list_lru_shrink_walk_irq()
|
| A D | zswap.h | 33 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg); 58 static inline void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) {} in zswap_memcg_offline_cleanup() argument
|
| A D | swap.h | 415 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 621 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 628 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 631 return READ_ONCE(memcg->swappiness); in mem_cgroup_swappiness() 673 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 691 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
| A D | mmzone.h | 560 void lru_gen_init_memcg(struct mem_cgroup *memcg); 561 void lru_gen_exit_memcg(struct mem_cgroup *memcg); 562 void lru_gen_online_memcg(struct mem_cgroup *memcg); 563 void lru_gen_offline_memcg(struct mem_cgroup *memcg); 564 void lru_gen_release_memcg(struct mem_cgroup *memcg); 565 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 582 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument 586 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument 590 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument 594 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument [all …]
|
| /linux/tools/testing/selftests/cgroup/ |
| A D | test_memcontrol.c | 170 char *memcg; in test_memcg_current_peak() local 176 if (!memcg) in test_memcg_current_peak() 305 free(memcg); in test_memcg_current_peak() 619 if (!memcg) in test_memcg_high() 683 if (!memcg) in test_memcg_high_sync() 744 if (!memcg) in test_memcg_max() 838 if (!memcg) in test_memcg_reclaim() 950 if (!memcg) in test_memcg_swap_max_peak() 1140 if (!memcg) in test_memcg_oom_events() 1301 if (!memcg) in test_memcg_sock() [all …]
|
| /linux/tools/cgroup/ |
| A D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 170 memcg = MEMCGS[cgroup_id] 186 obj_cgroups.add(memcg.objcg.value_()) 188 memcg.objcg_list.address_of_(), 220 memcg.kmem_caches.address_of_(),
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| A D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own vector of LRUs (inactive anon, active anon, 138 each LRU handled under a single lru_lock for that memcg and node. 145 9.1 Small limit to memcg. 248 Besides management of swap is one of complicated parts of memcg, 275 Out-of-memory caused by memcg's limit will kill tasks under 276 the memcg. When hierarchy is used, a task under hierarchy [all …]
|
| /linux/include/linux/sched/ |
| A D | mm.h | 489 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 495 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 498 current->active_memcg = memcg; in set_active_memcg() 505 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
| /linux/Documentation/translations/zh_CN/mm/ |
| A D | hwpoison.rst | 119 corrupt-filter-memcg 120 限制注入到memgroup拥有的页面。由memcg的inode号指定。 130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|
| /linux/Documentation/admin-guide/mm/ |
| A D | shrinker_debugfs.rst | 14 trigger *count_objects()* and *scan_objects()* callbacks for each memcg and 59 If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed 112 For a non-memcg-aware shrinker or on a system with no memory
|
| /linux/kernel/bpf/ |
| A D | memalloc.c | 207 struct mem_cgroup *memcg = NULL, *old_memcg; in alloc_bulk() local 238 memcg = get_memcg(c); in alloc_bulk() 239 old_memcg = set_active_memcg(memcg); in alloc_bulk() 252 mem_cgroup_put(memcg); in alloc_bulk() 997 struct mem_cgroup *memcg, *old_memcg; in bpf_mem_cache_alloc_flags() local 999 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags() 1000 old_memcg = set_active_memcg(memcg); in bpf_mem_cache_alloc_flags() 1005 mem_cgroup_put(memcg); in bpf_mem_cache_alloc_flags()
|