Lines Matching refs:memcg
105 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
107 if (!memcg) in memcg_to_vmpressure()
108 memcg = root_mem_cgroup; in memcg_to_vmpressure()
109 return &memcg->vmpressure; in memcg_to_vmpressure()
191 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, in memcg_reparent_objcgs() argument
196 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
201 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
203 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
204 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
206 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
238 struct mem_cgroup *memcg = folio_memcg(folio); in mem_cgroup_css_from_folio() local
240 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) in mem_cgroup_css_from_folio()
241 memcg = root_mem_cgroup; in mem_cgroup_css_from_folio()
243 return &memcg->css; in mem_cgroup_css_from_folio()
261 struct mem_cgroup *memcg; in page_cgroup_ino() local
266 memcg = folio_memcg_check(page_folio(page)); in page_cgroup_ino()
268 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
269 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino()
270 if (memcg) in page_cgroup_ino()
271 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
563 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) in memcg_rstat_updated() argument
572 cgroup_rstat_updated(memcg->css.cgroup, cpu); in memcg_rstat_updated()
573 statc = this_cpu_ptr(memcg->vmstats_percpu); in memcg_rstat_updated()
591 static void do_flush_stats(struct mem_cgroup *memcg) in do_flush_stats() argument
593 if (mem_cgroup_is_root(memcg)) in do_flush_stats()
596 cgroup_rstat_flush(memcg->css.cgroup); in do_flush_stats()
608 void mem_cgroup_flush_stats(struct mem_cgroup *memcg) in mem_cgroup_flush_stats() argument
613 if (!memcg) in mem_cgroup_flush_stats()
614 memcg = root_mem_cgroup; in mem_cgroup_flush_stats()
616 if (memcg_vmstats_needs_flush(memcg->vmstats)) in mem_cgroup_flush_stats()
617 do_flush_stats(memcg); in mem_cgroup_flush_stats()
620 void mem_cgroup_flush_stats_ratelimited(struct mem_cgroup *memcg) in mem_cgroup_flush_stats_ratelimited() argument
624 mem_cgroup_flush_stats(memcg); in mem_cgroup_flush_stats_ratelimited()
637 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) in memcg_page_state() argument
645 x = READ_ONCE(memcg->vmstats->state[i]); in memcg_page_state()
675 void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx, in __mod_memcg_state() argument
686 __this_cpu_add(memcg->vmstats_percpu->state[i], val); in __mod_memcg_state()
687 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); in __mod_memcg_state()
691 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx) in memcg_page_state_local() argument
699 x = READ_ONCE(memcg->vmstats->state_local[i]); in memcg_page_state_local()
712 struct mem_cgroup *memcg; in __mod_memcg_lruvec_state() local
719 memcg = pn->memcg; in __mod_memcg_lruvec_state()
741 __this_cpu_add(memcg->vmstats_percpu->state[i], val); in __mod_memcg_lruvec_state()
746 memcg_rstat_updated(memcg, memcg_state_val_in_pages(idx, val)); in __mod_memcg_lruvec_state()
774 struct mem_cgroup *memcg; in __lruvec_stat_mod_folio() local
779 memcg = folio_memcg(folio); in __lruvec_stat_mod_folio()
781 if (!memcg) { in __lruvec_stat_mod_folio()
787 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __lruvec_stat_mod_folio()
796 struct mem_cgroup *memcg; in __mod_lruvec_kmem_state() local
800 memcg = mem_cgroup_from_slab_obj(p); in __mod_lruvec_kmem_state()
808 if (!memcg) { in __mod_lruvec_kmem_state()
811 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_kmem_state()
823 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, in __count_memcg_events() argument
835 __this_cpu_add(memcg->vmstats_percpu->events[i], count); in __count_memcg_events()
836 memcg_rstat_updated(memcg, count); in __count_memcg_events()
840 unsigned long memcg_events(struct mem_cgroup *memcg, int event) in memcg_events() argument
847 return READ_ONCE(memcg->vmstats->events[i]); in memcg_events()
850 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) in memcg_events_local() argument
857 return READ_ONCE(memcg->vmstats->events_local[i]); in memcg_events_local()
895 struct mem_cgroup *memcg; in get_mem_cgroup_from_mm() local
910 memcg = active_memcg(); in get_mem_cgroup_from_mm()
911 if (unlikely(memcg)) { in get_mem_cgroup_from_mm()
913 css_get(&memcg->css); in get_mem_cgroup_from_mm()
914 return memcg; in get_mem_cgroup_from_mm()
923 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
924 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
925 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
926 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
928 return memcg; in get_mem_cgroup_from_mm()
937 struct mem_cgroup *memcg; in get_mem_cgroup_from_current() local
944 memcg = mem_cgroup_from_task(current); in get_mem_cgroup_from_current()
945 if (!css_tryget(&memcg->css)) { in get_mem_cgroup_from_current()
950 return memcg; in get_mem_cgroup_from_current()
959 struct mem_cgroup *memcg = folio_memcg(folio); in get_mem_cgroup_from_folio() local
965 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_mem_cgroup_from_folio()
966 memcg = root_mem_cgroup; in get_mem_cgroup_from_folio()
968 return memcg; in get_mem_cgroup_from_folio()
1105 struct mem_cgroup *memcg = dead_memcg; in invalidate_reclaim_iterators() local
1109 __invalidate_reclaim_iterators(memcg, dead_memcg); in invalidate_reclaim_iterators()
1110 last = memcg; in invalidate_reclaim_iterators()
1111 } while ((memcg = parent_mem_cgroup(memcg))); in invalidate_reclaim_iterators()
1137 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, in mem_cgroup_scan_tasks() argument
1143 BUG_ON(mem_cgroup_is_root(memcg)); in mem_cgroup_scan_tasks()
1145 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_scan_tasks()
1154 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_scan_tasks()
1163 struct mem_cgroup *memcg; in lruvec_memcg_debug() local
1168 memcg = folio_memcg(folio); in lruvec_memcg_debug()
1170 if (!memcg) in lruvec_memcg_debug()
1173 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio); in lruvec_memcg_debug()
1292 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1298 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1299 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1304 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1305 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1417 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item) in memcg_page_state_output() argument
1419 return memcg_page_state(memcg, item) * in memcg_page_state_output()
1423 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) in memcg_page_state_local_output() argument
1425 return memcg_page_state_local(memcg, item) * in memcg_page_state_local_output()
1429 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) in memcg_stat_format() argument
1443 mem_cgroup_flush_stats(memcg); in memcg_stat_format()
1448 size = memcg_page_state_output(memcg, memory_stats[i].idx); in memcg_stat_format()
1452 size += memcg_page_state_output(memcg, in memcg_stat_format()
1460 memcg_events(memcg, PGSCAN_KSWAPD) + in memcg_stat_format()
1461 memcg_events(memcg, PGSCAN_DIRECT) + in memcg_stat_format()
1462 memcg_events(memcg, PGSCAN_KHUGEPAGED)); in memcg_stat_format()
1464 memcg_events(memcg, PGSTEAL_KSWAPD) + in memcg_stat_format()
1465 memcg_events(memcg, PGSTEAL_DIRECT) + in memcg_stat_format()
1466 memcg_events(memcg, PGSTEAL_KHUGEPAGED)); in memcg_stat_format()
1476 memcg_events(memcg, memcg_vm_event_stat[i])); in memcg_stat_format()
1480 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) in memory_stat_format() argument
1483 memcg_stat_format(memcg, s); in memory_stat_format()
1485 memcg1_stat_format(memcg, s); in memory_stat_format()
1499 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_context() argument
1503 if (memcg) { in mem_cgroup_print_oom_context()
1505 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1520 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) in mem_cgroup_print_oom_meminfo() argument
1529 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1530 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1533 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1534 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1538 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1539 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1541 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1542 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1547 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1550 memory_stat_format(memcg, &s); in mem_cgroup_print_oom_meminfo()
1557 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) in mem_cgroup_get_max() argument
1559 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1562 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_max()
1564 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1569 if (mem_cgroup_swappiness(memcg)) in mem_cgroup_get_max()
1570 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1576 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) in mem_cgroup_size() argument
1578 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1581 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1587 .memcg = memcg, in mem_cgroup_out_of_memory()
1596 if (mem_cgroup_margin(memcg) >= (1 << order)) in mem_cgroup_out_of_memory()
1614 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1621 memcg_memory_event(memcg, MEMCG_OOM); in mem_cgroup_oom()
1623 if (!memcg1_oom_prepare(memcg, &locked)) in mem_cgroup_oom()
1626 ret = mem_cgroup_out_of_memory(memcg, mask, order); in mem_cgroup_oom()
1628 memcg1_oom_finish(memcg, locked); in mem_cgroup_oom()
1647 struct mem_cgroup *memcg; in mem_cgroup_get_oom_group() local
1657 memcg = mem_cgroup_from_task(victim); in mem_cgroup_get_oom_group()
1658 if (mem_cgroup_is_root(memcg)) in mem_cgroup_get_oom_group()
1666 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) in mem_cgroup_get_oom_group()
1674 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group()
1675 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
1676 oom_group = memcg; in mem_cgroup_get_oom_group()
1678 if (memcg == oom_domain) in mem_cgroup_get_oom_group()
1690 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) in mem_cgroup_print_oom_group() argument
1693 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1732 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
1746 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { in consume_stock()
1805 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in __refill_stock() argument
1811 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ in __refill_stock()
1813 css_get(&memcg->css); in __refill_stock()
1814 WRITE_ONCE(stock->cached, memcg); in __refill_stock()
1823 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
1828 __refill_stock(memcg, nr_pages); in refill_stock()
1853 struct mem_cgroup *memcg; in drain_all_stock() local
1857 memcg = READ_ONCE(stock->cached); in drain_all_stock()
1858 if (memcg && READ_ONCE(stock->nr_pages) && in drain_all_stock()
1859 mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
1887 static unsigned long reclaim_high(struct mem_cgroup *memcg, in reclaim_high() argument
1896 if (page_counter_read(&memcg->memory) <= in reclaim_high()
1897 READ_ONCE(memcg->memory.high)) in reclaim_high()
1900 memcg_memory_event(memcg, MEMCG_HIGH); in reclaim_high()
1903 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, in reclaim_high()
1908 } while ((memcg = parent_mem_cgroup(memcg)) && in reclaim_high()
1909 !mem_cgroup_is_root(memcg)); in reclaim_high()
1916 struct mem_cgroup *memcg; in high_work_func() local
1918 memcg = container_of(work, struct mem_cgroup, high_work); in high_work_func()
1919 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); in high_work_func()
1993 static u64 mem_find_max_overage(struct mem_cgroup *memcg) in mem_find_max_overage() argument
1998 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
1999 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2001 } while ((memcg = parent_mem_cgroup(memcg)) && in mem_find_max_overage()
2002 !mem_cgroup_is_root(memcg)); in mem_find_max_overage()
2007 static u64 swap_find_max_overage(struct mem_cgroup *memcg) in swap_find_max_overage() argument
2012 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2013 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2015 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); in swap_find_max_overage()
2017 } while ((memcg = parent_mem_cgroup(memcg)) && in swap_find_max_overage()
2018 !mem_cgroup_is_root(memcg)); in swap_find_max_overage()
2027 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, in calculate_high_delay() argument
2071 struct mem_cgroup *memcg; in mem_cgroup_handle_over_high() local
2077 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2101 nr_reclaimed = reclaim_high(memcg, in mem_cgroup_handle_over_high()
2109 penalty_jiffies = calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2110 mem_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2112 penalty_jiffies += calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2113 swap_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2154 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2157 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge_memcg() argument
2172 if (consume_stock(memcg, nr_pages)) in try_charge_memcg()
2176 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2177 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2180 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2287 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2289 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2295 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2309 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2310 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2311 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2312 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2317 schedule_work(&memcg->high_work); in try_charge_memcg()
2337 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge_memcg()
2358 void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_cancel_charge() argument
2360 if (mem_cgroup_is_root(memcg)) in mem_cgroup_cancel_charge()
2363 page_counter_uncharge(&memcg->memory, nr_pages); in mem_cgroup_cancel_charge()
2365 page_counter_uncharge(&memcg->memsw, nr_pages); in mem_cgroup_cancel_charge()
2368 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) in commit_charge() argument
2380 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2388 void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) in mem_cgroup_commit_charge() argument
2390 css_get(&memcg->css); in mem_cgroup_commit_charge()
2391 commit_charge(folio, memcg); in mem_cgroup_commit_charge()
2392 memcg1_commit_charge(folio, memcg); in mem_cgroup_commit_charge()
2399 struct mem_cgroup *memcg; in __mod_objcg_mlstate() local
2403 memcg = obj_cgroup_memcg(objcg); in __mod_objcg_mlstate()
2404 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_objcg_mlstate()
2461 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg) in __get_obj_cgroup_from_memcg() argument
2465 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { in __get_obj_cgroup_from_memcg()
2466 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
2476 struct mem_cgroup *memcg; in current_objcg_update() local
2511 memcg = mem_cgroup_from_task(current); in current_objcg_update()
2512 objcg = __get_obj_cgroup_from_memcg(memcg); in current_objcg_update()
2527 struct mem_cgroup *memcg; in current_obj_cgroup() local
2531 memcg = current->active_memcg; in current_obj_cgroup()
2532 if (unlikely(memcg)) in current_obj_cgroup()
2545 memcg = this_cpu_read(int_active_memcg); in current_obj_cgroup()
2546 if (unlikely(memcg)) in current_obj_cgroup()
2553 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { in current_obj_cgroup()
2560 objcg = rcu_dereference_check(memcg->objcg, 1); in current_obj_cgroup()
2579 struct mem_cgroup *memcg; in get_obj_cgroup_from_folio() local
2582 memcg = __folio_memcg(folio); in get_obj_cgroup_from_folio()
2583 if (memcg) in get_obj_cgroup_from_folio()
2584 objcg = __get_obj_cgroup_from_memcg(memcg); in get_obj_cgroup_from_folio()
2600 struct mem_cgroup *memcg; in obj_cgroup_uncharge_pages() local
2602 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_uncharge_pages()
2604 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_uncharge_pages()
2605 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
2606 refill_stock(memcg, nr_pages); in obj_cgroup_uncharge_pages()
2608 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
2622 struct mem_cgroup *memcg; in obj_cgroup_charge_pages() local
2625 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_charge_pages()
2627 ret = try_charge_memcg(memcg, gfp, nr_pages); in obj_cgroup_charge_pages()
2631 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); in obj_cgroup_charge_pages()
2632 memcg1_account_kmem(memcg, nr_pages); in obj_cgroup_charge_pages()
2634 css_put(&memcg->css); in obj_cgroup_charge_pages()
2781 struct mem_cgroup *memcg; in drain_obj_stock() local
2783 memcg = get_mem_cgroup_from_objcg(old); in drain_obj_stock()
2785 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in drain_obj_stock()
2786 memcg1_account_kmem(memcg, -nr_pages); in drain_obj_stock()
2787 __refill_stock(memcg, nr_pages); in drain_obj_stock()
2789 css_put(&memcg->css); in drain_obj_stock()
2837 struct mem_cgroup *memcg; in obj_stock_flush_required() local
2840 memcg = obj_cgroup_memcg(objcg); in obj_stock_flush_required()
2841 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) in obj_stock_flush_required()
2969 struct mem_cgroup *memcg; in __memcg_slab_post_alloc_hook() local
2971 memcg = get_mem_cgroup_from_objcg(objcg); in __memcg_slab_post_alloc_hook()
2972 ret = memcg_list_lru_alloc(memcg, lru, flags); in __memcg_slab_post_alloc_hook()
2973 css_put(&memcg->css); in __memcg_slab_post_alloc_hook()
3043 unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3047 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
3058 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3060 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3065 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3072 if (unlikely(mem_cgroup_is_root(memcg))) in memcg_online_kmem()
3079 objcg->memcg = memcg; in memcg_online_kmem()
3080 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3082 memcg->orig_objcg = objcg; in memcg_online_kmem()
3086 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3091 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3098 if (unlikely(mem_cgroup_is_root(memcg))) in memcg_offline_kmem()
3101 parent = parent_mem_cgroup(memcg); in memcg_offline_kmem()
3105 memcg_reparent_objcgs(memcg, parent); in memcg_offline_kmem()
3113 memcg_reparent_list_lrus(memcg, parent); in memcg_offline_kmem()
3120 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
3122 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
3125 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
3127 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
3130 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
3132 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
3137 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain() local
3139 if (!memcg->css.parent) in mem_cgroup_wb_domain()
3142 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
3167 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats() local
3170 mem_cgroup_flush_stats_ratelimited(memcg); in mem_cgroup_wb_stats()
3172 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); in mem_cgroup_wb_stats()
3173 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); in mem_cgroup_wb_stats()
3174 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) + in mem_cgroup_wb_stats()
3175 memcg_page_state(memcg, NR_ACTIVE_FILE); in mem_cgroup_wb_stats()
3178 while ((parent = parent_mem_cgroup(memcg))) { in mem_cgroup_wb_stats()
3179 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
3180 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
3181 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
3184 memcg = parent; in mem_cgroup_wb_stats()
3235 struct mem_cgroup *memcg = folio_memcg(folio); in mem_cgroup_track_foreign_dirty_slowpath() local
3250 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
3277 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
3287 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign() local
3293 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
3314 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
3319 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
3323 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
3356 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) in mem_cgroup_id_remove() argument
3358 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
3359 xa_erase(&mem_cgroup_ids, memcg->id.id); in mem_cgroup_id_remove()
3360 memcg->id.id = 0; in mem_cgroup_id_remove()
3364 void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, in mem_cgroup_id_get_many() argument
3367 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
3370 void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_put_many() argument
3372 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
3373 mem_cgroup_id_remove(memcg); in mem_cgroup_id_put_many()
3376 css_put(&memcg->css); in mem_cgroup_id_put_many()
3380 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) in mem_cgroup_id_put() argument
3382 mem_cgroup_id_put_many(memcg, 1); in mem_cgroup_id_put()
3402 struct mem_cgroup *memcg; in mem_cgroup_get_from_ino() local
3410 memcg = container_of(css, struct mem_cgroup, css); in mem_cgroup_get_from_ino()
3412 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
3416 return memcg; in mem_cgroup_get_from_ino()
3420 static bool alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_node_info() argument
3439 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
3441 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
3449 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_node_info() argument
3451 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
3461 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
3465 obj_cgroup_put(memcg->orig_objcg); in __mem_cgroup_free()
3468 free_mem_cgroup_per_node_info(memcg, node); in __mem_cgroup_free()
3469 memcg1_free_events(memcg); in __mem_cgroup_free()
3470 kfree(memcg->vmstats); in __mem_cgroup_free()
3471 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
3472 kfree(memcg); in __mem_cgroup_free()
3475 static void mem_cgroup_free(struct mem_cgroup *memcg) in mem_cgroup_free() argument
3477 lru_gen_exit_memcg(memcg); in mem_cgroup_free()
3478 memcg_wb_domain_exit(memcg); in mem_cgroup_free()
3479 __mem_cgroup_free(memcg); in mem_cgroup_free()
3485 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
3490 memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL); in mem_cgroup_alloc()
3491 if (!memcg) in mem_cgroup_alloc()
3494 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, in mem_cgroup_alloc()
3500 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), in mem_cgroup_alloc()
3502 if (!memcg->vmstats) in mem_cgroup_alloc()
3505 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
3507 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
3510 if (!memcg1_alloc_events(memcg)) in mem_cgroup_alloc()
3516 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_alloc()
3518 statc->vmstats = memcg->vmstats; in mem_cgroup_alloc()
3522 if (!alloc_mem_cgroup_per_node_info(memcg, node)) in mem_cgroup_alloc()
3525 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) in mem_cgroup_alloc()
3528 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
3529 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
3530 INIT_LIST_HEAD(&memcg->memory_peaks); in mem_cgroup_alloc()
3531 INIT_LIST_HEAD(&memcg->swap_peaks); in mem_cgroup_alloc()
3532 spin_lock_init(&memcg->peaks_lock); in mem_cgroup_alloc()
3533 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
3534 memcg1_memcg_init(memcg); in mem_cgroup_alloc()
3535 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
3536 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
3538 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
3540 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
3544 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
3545 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
3546 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
3548 lru_gen_init_memcg(memcg); in mem_cgroup_alloc()
3549 return memcg; in mem_cgroup_alloc()
3551 mem_cgroup_id_remove(memcg); in mem_cgroup_alloc()
3552 __mem_cgroup_free(memcg); in mem_cgroup_alloc()
3560 struct mem_cgroup *memcg, *old_memcg; in mem_cgroup_css_alloc() local
3563 memcg = mem_cgroup_alloc(parent); in mem_cgroup_css_alloc()
3565 if (IS_ERR(memcg)) in mem_cgroup_css_alloc()
3566 return ERR_CAST(memcg); in mem_cgroup_css_alloc()
3568 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3569 memcg1_soft_limit_reset(memcg); in mem_cgroup_css_alloc()
3571 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
3572 WRITE_ONCE(memcg->zswap_writeback, true); in mem_cgroup_css_alloc()
3574 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3576 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
3578 page_counter_init(&memcg->memory, &parent->memory, true); in mem_cgroup_css_alloc()
3579 page_counter_init(&memcg->swap, &parent->swap, false); in mem_cgroup_css_alloc()
3581 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
3582 page_counter_init(&memcg->kmem, &parent->kmem, false); in mem_cgroup_css_alloc()
3583 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); in mem_cgroup_css_alloc()
3588 page_counter_init(&memcg->memory, NULL, true); in mem_cgroup_css_alloc()
3589 page_counter_init(&memcg->swap, NULL, false); in mem_cgroup_css_alloc()
3591 page_counter_init(&memcg->kmem, NULL, false); in mem_cgroup_css_alloc()
3592 page_counter_init(&memcg->tcpmem, NULL, false); in mem_cgroup_css_alloc()
3594 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
3595 return &memcg->css; in mem_cgroup_css_alloc()
3604 return &memcg->css; in mem_cgroup_css_alloc()
3609 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
3611 if (memcg_online_kmem(memcg)) in mem_cgroup_css_online()
3619 if (alloc_shrinker_info(memcg)) in mem_cgroup_css_online()
3622 if (unlikely(mem_cgroup_is_root(memcg)) && !mem_cgroup_disabled()) in mem_cgroup_css_online()
3625 lru_gen_online_memcg(memcg); in mem_cgroup_css_online()
3628 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
3641 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); in mem_cgroup_css_online()
3645 memcg_offline_kmem(memcg); in mem_cgroup_css_online()
3647 mem_cgroup_id_remove(memcg); in mem_cgroup_css_online()
3653 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
3655 memcg1_css_offline(memcg); in mem_cgroup_css_offline()
3657 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
3658 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
3660 zswap_memcg_offline_cleanup(memcg); in mem_cgroup_css_offline()
3662 memcg_offline_kmem(memcg); in mem_cgroup_css_offline()
3663 reparent_shrinker_deferred(memcg); in mem_cgroup_css_offline()
3664 wb_memcg_offline(memcg); in mem_cgroup_css_offline()
3665 lru_gen_offline_memcg(memcg); in mem_cgroup_css_offline()
3667 drain_all_stock(memcg); in mem_cgroup_css_offline()
3669 mem_cgroup_id_put(memcg); in mem_cgroup_css_offline()
3674 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_released() local
3676 invalidate_reclaim_iterators(memcg); in mem_cgroup_css_released()
3677 lru_gen_release_memcg(memcg); in mem_cgroup_css_released()
3682 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
3687 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
3692 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg1_tcpmem_active(memcg)) in mem_cgroup_css_free()
3698 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
3699 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
3700 memcg1_remove_from_trees(memcg); in mem_cgroup_css_free()
3701 free_shrinker_info(memcg); in mem_cgroup_css_free()
3702 mem_cgroup_free(memcg); in mem_cgroup_css_free()
3720 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
3722 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3723 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3725 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3726 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3728 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
3729 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
3730 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3731 memcg1_soft_limit_reset(memcg); in mem_cgroup_css_reset()
3732 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3733 memcg_wb_domain_size_changed(memcg); in mem_cgroup_css_reset()
3738 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_rstat_flush() local
3739 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in mem_cgroup_css_rstat_flush()
3744 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
3752 delta = memcg->vmstats->state_pending[i]; in mem_cgroup_css_rstat_flush()
3754 memcg->vmstats->state_pending[i] = 0; in mem_cgroup_css_rstat_flush()
3767 memcg->vmstats->state_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
3770 memcg->vmstats->state[i] += delta; in mem_cgroup_css_rstat_flush()
3777 delta = memcg->vmstats->events_pending[i]; in mem_cgroup_css_rstat_flush()
3779 memcg->vmstats->events_pending[i] = 0; in mem_cgroup_css_rstat_flush()
3790 memcg->vmstats->events_local[i] += delta_cpu; in mem_cgroup_css_rstat_flush()
3793 memcg->vmstats->events[i] += delta; in mem_cgroup_css_rstat_flush()
3800 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
3835 if (atomic64_read(&memcg->vmstats->stats_updates)) in mem_cgroup_css_rstat_flush()
3836 atomic64_set(&memcg->vmstats->stats_updates, 0); in mem_cgroup_css_rstat_flush()
3919 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memory_current_read() local
3921 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
3943 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); in memory_peak_show() local
3945 return peak_show(sf, v, &memcg->memory); in memory_peak_show()
3958 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in peak_release() local
3965 spin_lock(&memcg->peaks_lock); in peak_release()
3967 spin_unlock(&memcg->peaks_lock); in peak_release()
3976 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in peak_write() local
3979 spin_lock(&memcg->peaks_lock); in peak_write()
3993 spin_unlock(&memcg->peaks_lock); in peak_write()
4001 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_peak_write() local
4003 return peak_write(of, buf, nbytes, off, &memcg->memory, in memory_peak_write()
4004 &memcg->memory_peaks); in memory_peak_write()
4018 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_min_write() local
4027 page_counter_set_min(&memcg->memory, min); in memory_min_write()
4041 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
4050 page_counter_set_low(&memcg->memory, low); in memory_low_write()
4064 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
4075 page_counter_set_high(&memcg->memory, high); in memory_high_write()
4078 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
4088 drain_all_stock(memcg); in memory_high_write()
4093 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
4100 memcg_wb_domain_size_changed(memcg); in memory_high_write()
4113 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
4124 xchg(&memcg->memory.max, max); in memory_max_write()
4127 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
4136 drain_all_stock(memcg); in memory_max_write()
4142 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
4148 memcg_memory_event(memcg, MEMCG_OOM); in memory_max_write()
4149 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) in memory_max_write()
4153 memcg_wb_domain_size_changed(memcg); in memory_max_write()
4175 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_show() local
4177 __memory_events_show(m, memcg->memory_events); in memory_events_show()
4183 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_local_show() local
4185 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
4191 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_stat_show() local
4198 memory_stat_format(memcg, &s); in memory_stat_show()
4215 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_numa_stat_show() local
4217 mem_cgroup_flush_stats(memcg); in memory_numa_stat_show()
4230 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in memory_numa_stat_show()
4244 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_oom_group_show() local
4246 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
4254 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_oom_group_write() local
4268 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
4286 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_reclaim() local
4335 reclaimed = try_to_free_mem_cgroup_pages(memcg, in memory_reclaim()
4453 struct mem_cgroup *memcg) in mem_cgroup_calculate_protection() argument
4464 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); in mem_cgroup_calculate_protection()
4467 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, in charge_memcg() argument
4472 ret = try_charge(memcg, gfp, folio_nr_pages(folio)); in charge_memcg()
4476 mem_cgroup_commit_charge(folio, memcg); in charge_memcg()
4483 struct mem_cgroup *memcg; in __mem_cgroup_charge() local
4486 memcg = get_mem_cgroup_from_mm(mm); in __mem_cgroup_charge()
4487 ret = charge_memcg(folio, memcg, gfp); in __mem_cgroup_charge()
4488 css_put(&memcg->css); in __mem_cgroup_charge()
4510 int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, in mem_cgroup_hugetlb_try_charge() argument
4517 if (mem_cgroup_disabled() || !memcg || in mem_cgroup_hugetlb_try_charge()
4522 if (try_charge(memcg, gfp, nr_pages)) in mem_cgroup_hugetlb_try_charge()
4543 struct mem_cgroup *memcg; in mem_cgroup_swapin_charge_folio() local
4552 memcg = mem_cgroup_from_id(id); in mem_cgroup_swapin_charge_folio()
4553 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
4554 memcg = get_mem_cgroup_from_mm(mm); in mem_cgroup_swapin_charge_folio()
4557 ret = charge_memcg(folio, memcg, gfp); in mem_cgroup_swapin_charge_folio()
4559 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
4598 struct mem_cgroup *memcg; member
4613 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory); in uncharge_batch()
4615 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory); in uncharge_batch()
4617 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); in uncharge_batch()
4618 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
4620 memcg1_oom_recover(ug->memcg); in uncharge_batch()
4623 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); in uncharge_batch()
4626 css_put(&ug->memcg->css); in uncharge_batch()
4632 struct mem_cgroup *memcg; in uncharge_folio() local
4648 memcg = get_mem_cgroup_from_objcg(objcg); in uncharge_folio()
4650 memcg = __folio_memcg(folio); in uncharge_folio()
4653 if (!memcg) in uncharge_folio()
4656 if (ug->memcg != memcg) { in uncharge_folio()
4657 if (ug->memcg) { in uncharge_folio()
4661 ug->memcg = memcg; in uncharge_folio()
4665 css_get(&memcg->css); in uncharge_folio()
4678 if (!mem_cgroup_is_root(memcg)) in uncharge_folio()
4686 css_put(&memcg->css); in uncharge_folio()
4710 if (ug.memcg) in __mem_cgroup_uncharge_folios()
4726 struct mem_cgroup *memcg; in mem_cgroup_replace_folio() local
4741 memcg = folio_memcg(old); in mem_cgroup_replace_folio()
4742 VM_WARN_ON_ONCE_FOLIO(!memcg, old); in mem_cgroup_replace_folio()
4743 if (!memcg) in mem_cgroup_replace_folio()
4747 if (!mem_cgroup_is_root(memcg)) { in mem_cgroup_replace_folio()
4748 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_replace_folio()
4750 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_replace_folio()
4753 css_get(&memcg->css); in mem_cgroup_replace_folio()
4754 commit_charge(new, memcg); in mem_cgroup_replace_folio()
4755 memcg1_commit_charge(new, memcg); in mem_cgroup_replace_folio()
4771 struct mem_cgroup *memcg; in mem_cgroup_migrate() local
4782 memcg = folio_memcg(old); in mem_cgroup_migrate()
4788 VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(old) && !memcg, old); in mem_cgroup_migrate()
4789 if (!memcg) in mem_cgroup_migrate()
4793 commit_charge(new, memcg); in mem_cgroup_migrate()
4805 struct mem_cgroup *memcg; in mem_cgroup_sk_alloc() local
4815 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
4816 if (mem_cgroup_is_root(memcg)) in mem_cgroup_sk_alloc()
4818 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg1_tcpmem_active(memcg)) in mem_cgroup_sk_alloc()
4820 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
4821 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
4841 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, in mem_cgroup_charge_skmem() argument
4845 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); in mem_cgroup_charge_skmem()
4847 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { in mem_cgroup_charge_skmem()
4848 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
4860 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
4863 memcg1_uncharge_skmem(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
4867 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
4869 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
4922 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) in mem_cgroup_id_get_online() argument
4924 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
4929 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) { in mem_cgroup_id_get_online()
4933 memcg = parent_mem_cgroup(memcg); in mem_cgroup_id_get_online()
4934 if (!memcg) in mem_cgroup_id_get_online()
4935 memcg = root_mem_cgroup; in mem_cgroup_id_get_online()
4937 return memcg; in mem_cgroup_id_get_online()
4949 struct mem_cgroup *memcg, *swap_memcg; in mem_cgroup_swapout() local
4962 memcg = folio_memcg(folio); in mem_cgroup_swapout()
4964 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); in mem_cgroup_swapout()
4965 if (!memcg) in mem_cgroup_swapout()
4973 swap_memcg = mem_cgroup_id_get_online(memcg); in mem_cgroup_swapout()
4986 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
4987 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
4989 if (memcg != swap_memcg) { in mem_cgroup_swapout()
4992 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
4995 memcg1_swapout(folio, memcg); in mem_cgroup_swapout()
4996 css_put(&memcg->css); in mem_cgroup_swapout()
5012 struct mem_cgroup *memcg; in __mem_cgroup_try_charge_swap() local
5018 memcg = folio_memcg(folio); in __mem_cgroup_try_charge_swap()
5020 VM_WARN_ON_ONCE_FOLIO(!memcg, folio); in __mem_cgroup_try_charge_swap()
5021 if (!memcg) in __mem_cgroup_try_charge_swap()
5025 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
5029 memcg = mem_cgroup_id_get_online(memcg); in __mem_cgroup_try_charge_swap()
5031 if (!mem_cgroup_is_root(memcg) && in __mem_cgroup_try_charge_swap()
5032 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
5033 memcg_memory_event(memcg, MEMCG_SWAP_MAX); in __mem_cgroup_try_charge_swap()
5034 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
5035 mem_cgroup_id_put(memcg); in __mem_cgroup_try_charge_swap()
5041 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
5042 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); in __mem_cgroup_try_charge_swap()
5044 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in __mem_cgroup_try_charge_swap()
5056 struct mem_cgroup *memcg; in __mem_cgroup_uncharge_swap() local
5061 memcg = mem_cgroup_from_id(id); in __mem_cgroup_uncharge_swap()
5062 if (memcg) { in __mem_cgroup_uncharge_swap()
5063 if (!mem_cgroup_is_root(memcg)) { in __mem_cgroup_uncharge_swap()
5065 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
5067 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
5069 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
5070 mem_cgroup_id_put_many(memcg, nr_pages); in __mem_cgroup_uncharge_swap()
5075 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
5081 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) in mem_cgroup_get_nr_swap_pages()
5083 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
5084 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
5090 struct mem_cgroup *memcg; in mem_cgroup_swap_full() local
5099 memcg = folio_memcg(folio); in mem_cgroup_swap_full()
5100 if (!memcg) in mem_cgroup_swap_full()
5103 for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_swap_full()
5104 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
5106 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
5107 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
5130 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in swap_current_read() local
5132 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
5137 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); in swap_peak_show() local
5139 return peak_show(sf, v, &memcg->swap); in swap_peak_show()
5145 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_peak_write() local
5147 return peak_write(of, buf, nbytes, off, &memcg->swap, in swap_peak_write()
5148 &memcg->swap_peaks); in swap_peak_write()
5160 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_high_write() local
5169 page_counter_set_high(&memcg->swap, high); in swap_high_write()
5183 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_max_write() local
5192 xchg(&memcg->swap.max, max); in swap_max_write()
5199 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in swap_events_show() local
5202 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
5204 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
5206 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
5261 struct mem_cgroup *memcg, *original_memcg; in obj_cgroup_may_zswap() local
5268 for (memcg = original_memcg; !mem_cgroup_is_root(memcg); in obj_cgroup_may_zswap()
5269 memcg = parent_mem_cgroup(memcg)) { in obj_cgroup_may_zswap()
5270 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
5284 do_flush_stats(memcg); in obj_cgroup_may_zswap()
5285 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE; in obj_cgroup_may_zswap()
5305 struct mem_cgroup *memcg; in obj_cgroup_charge_zswap() local
5317 memcg = obj_cgroup_memcg(objcg); in obj_cgroup_charge_zswap()
5318 mod_memcg_state(memcg, MEMCG_ZSWAP_B, size); in obj_cgroup_charge_zswap()
5319 mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1); in obj_cgroup_charge_zswap()
5332 struct mem_cgroup *memcg; in obj_cgroup_uncharge_zswap() local
5340 memcg = obj_cgroup_memcg(objcg); in obj_cgroup_uncharge_zswap()
5341 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
5342 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
5346 bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg) in mem_cgroup_zswap_writeback_enabled() argument
5352 for (; memcg; memcg = parent_mem_cgroup(memcg)) in mem_cgroup_zswap_writeback_enabled()
5353 if (!READ_ONCE(memcg->zswap_writeback)) in mem_cgroup_zswap_writeback_enabled()
5362 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in zswap_current_read() local
5364 mem_cgroup_flush_stats(memcg); in zswap_current_read()
5365 return memcg_page_state(memcg, MEMCG_ZSWAP_B); in zswap_current_read()
5377 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in zswap_max_write() local
5386 xchg(&memcg->zswap_max, max); in zswap_max_write()
5393 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in zswap_writeback_show() local
5395 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); in zswap_writeback_show()
5402 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in zswap_writeback_write() local
5412 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); in zswap_writeback_write()