Lines Matching refs:memcg

136 	struct mem_cgroup *memcg;  member
150 int (*register_event)(struct mem_cgroup *memcg,
157 void (*unregister_event)(struct mem_cgroup *memcg,
169 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
170 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
241 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
243 if (!memcg) in memcg_to_vmpressure()
244 memcg = root_mem_cgroup; in memcg_to_vmpressure()
245 return &memcg->vmpressure; in memcg_to_vmpressure()
259 struct mem_cgroup *memcg; in obj_cgroup_release() local
289 memcg = obj_cgroup_memcg(objcg); in obj_cgroup_release()
291 __memcg_kmem_uncharge(memcg, nr_pages); in obj_cgroup_release()
293 mem_cgroup_put(memcg); in obj_cgroup_release()
319 static void memcg_reparent_objcgs(struct mem_cgroup *memcg, in memcg_reparent_objcgs() argument
324 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
329 xchg(&objcg->memcg, parent); in memcg_reparent_objcgs()
334 list_for_each_entry(iter, &memcg->objcg_list, list) { in memcg_reparent_objcgs()
336 xchg(&iter->memcg, parent); in memcg_reparent_objcgs()
337 css_put(&memcg->css); in memcg_reparent_objcgs()
339 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
406 static int memcg_expand_one_shrinker_map(struct mem_cgroup *memcg, in memcg_expand_one_shrinker_map() argument
416 mem_cgroup_nodeinfo(memcg, nid)->shrinker_map, true); in memcg_expand_one_shrinker_map()
429 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, new); in memcg_expand_one_shrinker_map()
436 static void memcg_free_shrinker_maps(struct mem_cgroup *memcg) in memcg_free_shrinker_maps() argument
442 if (mem_cgroup_is_root(memcg)) in memcg_free_shrinker_maps()
446 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_free_shrinker_maps()
454 static int memcg_alloc_shrinker_maps(struct mem_cgroup *memcg) in memcg_alloc_shrinker_maps() argument
459 if (mem_cgroup_is_root(memcg)) in memcg_alloc_shrinker_maps()
467 memcg_free_shrinker_maps(memcg); in memcg_alloc_shrinker_maps()
471 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); in memcg_alloc_shrinker_maps()
481 struct mem_cgroup *memcg; in memcg_expand_shrinker_maps() local
492 for_each_mem_cgroup(memcg) { in memcg_expand_shrinker_maps()
493 if (mem_cgroup_is_root(memcg)) in memcg_expand_shrinker_maps()
495 ret = memcg_expand_one_shrinker_map(memcg, size, old_size); in memcg_expand_shrinker_maps()
497 mem_cgroup_iter_break(NULL, memcg); in memcg_expand_shrinker_maps()
508 void memcg_set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id) in memcg_set_shrinker_bit() argument
510 if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) { in memcg_set_shrinker_bit()
514 map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); in memcg_set_shrinker_bit()
535 struct mem_cgroup *memcg; in mem_cgroup_css_from_page() local
537 memcg = page->mem_cgroup; in mem_cgroup_css_from_page()
539 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) in mem_cgroup_css_from_page()
540 memcg = root_mem_cgroup; in mem_cgroup_css_from_page()
542 return &memcg->css; in mem_cgroup_css_from_page()
560 struct mem_cgroup *memcg; in page_cgroup_ino() local
564 memcg = page->mem_cgroup; in page_cgroup_ino()
572 if ((unsigned long) memcg & 0x1UL) in page_cgroup_ino()
573 memcg = NULL; in page_cgroup_ino()
575 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
576 memcg = parent_mem_cgroup(memcg); in page_cgroup_ino()
577 if (memcg) in page_cgroup_ino()
578 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
584 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_page_nodeinfo() argument
588 return memcg->nodeinfo[nid]; in mem_cgroup_page_nodeinfo()
668 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument
670 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
671 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
680 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_update_tree() argument
693 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
694 mz = mem_cgroup_page_nodeinfo(memcg, page); in mem_cgroup_update_tree()
695 excess = soft_limit_excess(memcg); in mem_cgroup_update_tree()
717 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) in mem_cgroup_remove_from_trees() argument
724 mz = mem_cgroup_nodeinfo(memcg, nid); in mem_cgroup_remove_from_trees()
749 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
750 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
773 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) in __mod_memcg_state() argument
783 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); in __mod_memcg_state()
791 __this_cpu_add(memcg->vmstats_local->stat[idx], x); in __mod_memcg_state()
792 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in __mod_memcg_state()
796 __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); in __mod_memcg_state()
804 parent = parent_mem_cgroup(pn->memcg); in parent_nodeinfo()
814 struct mem_cgroup *memcg; in __mod_memcg_lruvec_state() local
818 memcg = pn->memcg; in __mod_memcg_lruvec_state()
821 __mod_memcg_state(memcg, idx, val); in __mod_memcg_lruvec_state()
865 struct mem_cgroup *memcg; in __mod_lruvec_slab_state() local
869 memcg = mem_cgroup_from_obj(p); in __mod_lruvec_slab_state()
877 if (!memcg) { in __mod_lruvec_slab_state()
880 lruvec = mem_cgroup_lruvec(memcg, pgdat); in __mod_lruvec_slab_state()
888 struct mem_cgroup *memcg; in mod_memcg_obj_state() local
891 memcg = mem_cgroup_from_obj(p); in mod_memcg_obj_state()
892 if (memcg) in mod_memcg_obj_state()
893 mod_memcg_state(memcg, idx, val); in mod_memcg_obj_state()
903 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, in __count_memcg_events() argument
911 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); in __count_memcg_events()
919 __this_cpu_add(memcg->vmstats_local->events[idx], x); in __count_memcg_events()
920 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in __count_memcg_events()
924 __this_cpu_write(memcg->vmstats_percpu->events[idx], x); in __count_memcg_events()
927 static unsigned long memcg_events(struct mem_cgroup *memcg, int event) in memcg_events() argument
929 return atomic_long_read(&memcg->vmevents[event]); in memcg_events()
932 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) in memcg_events_local() argument
938 x += per_cpu(memcg->vmstats_local->events[event], cpu); in memcg_events_local()
942 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, in mem_cgroup_charge_statistics() argument
948 __count_memcg_events(memcg, PGPGIN, 1); in mem_cgroup_charge_statistics()
950 __count_memcg_events(memcg, PGPGOUT, 1); in mem_cgroup_charge_statistics()
954 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
957 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, in mem_cgroup_event_ratelimit() argument
962 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events); in mem_cgroup_event_ratelimit()
963 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]); in mem_cgroup_event_ratelimit()
976 __this_cpu_write(memcg->vmstats_percpu->targets[target], next); in mem_cgroup_event_ratelimit()
986 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) in memcg_check_events() argument
989 if (unlikely(mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
993 do_softlimit = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
995 mem_cgroup_threshold(memcg); in memcg_check_events()
997 mem_cgroup_update_tree(memcg, page); in memcg_check_events()
1025 struct mem_cgroup *memcg; in get_mem_cgroup_from_mm() local
1038 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
1040 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
1041 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
1042 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
1044 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
1046 return memcg; in get_mem_cgroup_from_mm()
1059 struct mem_cgroup *memcg = page->mem_cgroup; in get_mem_cgroup_from_page() local
1066 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_mem_cgroup_from_page()
1067 memcg = root_mem_cgroup; in get_mem_cgroup_from_page()
1069 return memcg; in get_mem_cgroup_from_page()
1083 struct mem_cgroup *memcg; in get_active_memcg() local
1086 memcg = active_memcg(); in get_active_memcg()
1088 if (memcg && WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_active_memcg()
1089 memcg = root_mem_cgroup; in get_active_memcg()
1092 return memcg; in get_active_memcg()
1145 struct mem_cgroup *memcg = NULL; in mem_cgroup_iter() local
1212 memcg = mem_cgroup_from_css(css); in mem_cgroup_iter()
1220 memcg = NULL; in mem_cgroup_iter()
1229 (void)cmpxchg(&iter->position, pos, memcg); in mem_cgroup_iter()
1234 if (!memcg) in mem_cgroup_iter()
1246 return memcg; in mem_cgroup_iter()
1279 struct mem_cgroup *memcg = dead_memcg; in invalidate_reclaim_iterators() local
1283 __invalidate_reclaim_iterators(memcg, dead_memcg); in invalidate_reclaim_iterators()
1284 last = memcg; in invalidate_reclaim_iterators()
1285 } while ((memcg = parent_mem_cgroup(memcg))); in invalidate_reclaim_iterators()
1311 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, in mem_cgroup_scan_tasks() argument
1317 BUG_ON(memcg == root_mem_cgroup); in mem_cgroup_scan_tasks()
1319 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_scan_tasks()
1328 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_scan_tasks()
1346 struct mem_cgroup *memcg; in mem_cgroup_page_lruvec() local
1354 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1359 if (!memcg) in mem_cgroup_page_lruvec()
1360 memcg = root_mem_cgroup; in mem_cgroup_page_lruvec()
1362 mz = mem_cgroup_page_nodeinfo(memcg, page); in mem_cgroup_page_lruvec()
1390 struct mem_cgroup *memcg = NULL; in do_traversal_all_lruvec() local
1393 memcg = mem_cgroup_iter(NULL, NULL, NULL); in do_traversal_all_lruvec()
1395 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in do_traversal_all_lruvec()
1399 memcg = mem_cgroup_iter(NULL, memcg, NULL); in do_traversal_all_lruvec()
1400 } while (memcg); in do_traversal_all_lruvec()
1453 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1459 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1460 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1465 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1466 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1483 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) in mem_cgroup_under_move() argument
1498 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move()
1499 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
1505 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) in mem_cgroup_wait_acct_move() argument
1508 if (mem_cgroup_under_move(memcg)) { in mem_cgroup_wait_acct_move()
1585 static char *memory_stat_format(struct mem_cgroup *memcg) in memory_stat_format() argument
1608 size = memcg_page_state(memcg, memory_stats[i].idx); in memory_stat_format()
1613 size = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B) + in memory_stat_format()
1614 memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); in memory_stat_format()
1622 memcg_events(memcg, PGFAULT)); in memory_stat_format()
1624 memcg_events(memcg, PGMAJFAULT)); in memory_stat_format()
1626 memcg_events(memcg, PGREFILL)); in memory_stat_format()
1628 memcg_events(memcg, PGSCAN_KSWAPD) + in memory_stat_format()
1629 memcg_events(memcg, PGSCAN_DIRECT)); in memory_stat_format()
1631 memcg_events(memcg, PGSTEAL_KSWAPD) + in memory_stat_format()
1632 memcg_events(memcg, PGSTEAL_DIRECT)); in memory_stat_format()
1634 memcg_events(memcg, PGACTIVATE)); in memory_stat_format()
1636 memcg_events(memcg, PGDEACTIVATE)); in memory_stat_format()
1638 memcg_events(memcg, PGLAZYFREE)); in memory_stat_format()
1640 memcg_events(memcg, PGLAZYFREED)); in memory_stat_format()
1644 memcg_events(memcg, THP_FAULT_ALLOC)); in memory_stat_format()
1646 memcg_events(memcg, THP_COLLAPSE_ALLOC)); in memory_stat_format()
1665 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_context() argument
1669 if (memcg) { in mem_cgroup_print_oom_context()
1671 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1686 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) in mem_cgroup_print_oom_meminfo() argument
1691 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1692 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt); in mem_cgroup_print_oom_meminfo()
1695 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1696 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt); in mem_cgroup_print_oom_meminfo()
1699 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1700 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1702 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1703 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1707 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1709 buf = memory_stat_format(memcg); in mem_cgroup_print_oom_meminfo()
1719 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg) in mem_cgroup_get_max() argument
1721 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1724 if (mem_cgroup_swappiness(memcg)) in mem_cgroup_get_max()
1725 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1728 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_max()
1730 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1738 unsigned long mem_cgroup_size(struct mem_cgroup *memcg) in mem_cgroup_size() argument
1740 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1743 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1749 .memcg = memcg, in mem_cgroup_out_of_memory()
1758 if (mem_cgroup_margin(memcg) >= (1 << order)) in mem_cgroup_out_of_memory()
1834 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) in mem_cgroup_oom_trylock() argument
1840 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1847 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1858 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1860 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1873 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) in mem_cgroup_oom_unlock() argument
1879 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_unlock()
1884 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_mark_under_oom() argument
1889 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_mark_under_oom()
1894 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_unmark_under_oom() argument
1903 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_unmark_under_oom()
1912 struct mem_cgroup *memcg; member
1924 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1932 static void memcg_oom_recover(struct mem_cgroup *memcg) in memcg_oom_recover() argument
1942 if (memcg && memcg->under_oom) in memcg_oom_recover()
1943 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); in memcg_oom_recover()
1953 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1961 memcg_memory_event(memcg, MEMCG_OOM); in mem_cgroup_oom()
1981 if (memcg->oom_kill_disable) { in mem_cgroup_oom()
1984 css_get(&memcg->css); in mem_cgroup_oom()
1985 current->memcg_in_oom = memcg; in mem_cgroup_oom()
1992 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom()
1994 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom()
1997 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom()
1999 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom()
2000 if (mem_cgroup_out_of_memory(memcg, mask, order)) in mem_cgroup_oom()
2006 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom()
2030 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize() local
2035 if (!memcg) in mem_cgroup_oom_synchronize()
2041 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
2048 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom_synchronize()
2050 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize()
2053 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom_synchronize()
2055 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
2056 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
2058 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, in mem_cgroup_oom_synchronize()
2062 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
2067 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom_synchronize()
2073 memcg_oom_recover(memcg); in mem_cgroup_oom_synchronize()
2077 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
2095 struct mem_cgroup *memcg; in mem_cgroup_get_oom_group() local
2105 memcg = mem_cgroup_from_task(victim); in mem_cgroup_get_oom_group()
2106 if (memcg == root_mem_cgroup) in mem_cgroup_get_oom_group()
2114 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain))) in mem_cgroup_get_oom_group()
2122 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_get_oom_group()
2123 if (memcg->oom_group) in mem_cgroup_get_oom_group()
2124 oom_group = memcg; in mem_cgroup_get_oom_group()
2126 if (memcg == oom_domain) in mem_cgroup_get_oom_group()
2138 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) in mem_cgroup_print_oom_group() argument
2141 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
2159 struct mem_cgroup *memcg; in lock_page_memcg() local
2178 memcg = head->mem_cgroup; in lock_page_memcg()
2179 if (unlikely(!memcg)) in lock_page_memcg()
2182 if (atomic_read(&memcg->moving_account) <= 0) in lock_page_memcg()
2183 return memcg; in lock_page_memcg()
2185 spin_lock_irqsave(&memcg->move_lock, flags); in lock_page_memcg()
2186 if (memcg != head->mem_cgroup) { in lock_page_memcg()
2187 spin_unlock_irqrestore(&memcg->move_lock, flags); in lock_page_memcg()
2196 memcg->move_lock_task = current; in lock_page_memcg()
2197 memcg->move_lock_flags = flags; in lock_page_memcg()
2199 return memcg; in lock_page_memcg()
2209 void __unlock_page_memcg(struct mem_cgroup *memcg) in __unlock_page_memcg() argument
2211 if (memcg && memcg->move_lock_task == current) { in __unlock_page_memcg()
2212 unsigned long flags = memcg->move_lock_flags; in __unlock_page_memcg()
2214 memcg->move_lock_task = NULL; in __unlock_page_memcg()
2215 memcg->move_lock_flags = 0; in __unlock_page_memcg()
2217 spin_unlock_irqrestore(&memcg->move_lock, flags); in __unlock_page_memcg()
2278 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
2290 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2344 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
2352 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2354 css_get(&memcg->css); in refill_stock()
2355 stock->cached = memcg; in refill_stock()
2385 struct mem_cgroup *memcg; in drain_all_stock() local
2389 memcg = stock->cached; in drain_all_stock()
2390 if (memcg && stock->nr_pages && in drain_all_stock()
2391 mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
2412 struct mem_cgroup *memcg, *mi; in memcg_hotplug_cpu_dead() local
2417 for_each_mem_cgroup(memcg) { in memcg_hotplug_cpu_dead()
2424 x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); in memcg_hotplug_cpu_dead()
2426 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_hotplug_cpu_dead()
2427 atomic_long_add(x, &memcg->vmstats[i]); in memcg_hotplug_cpu_dead()
2435 pn = mem_cgroup_nodeinfo(memcg, nid); in memcg_hotplug_cpu_dead()
2447 x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); in memcg_hotplug_cpu_dead()
2449 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_hotplug_cpu_dead()
2450 atomic_long_add(x, &memcg->vmevents[i]); in memcg_hotplug_cpu_dead()
2457 static unsigned long reclaim_high(struct mem_cgroup *memcg, in reclaim_high() argument
2466 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2467 READ_ONCE(memcg->memory.high)) in reclaim_high()
2470 memcg_memory_event(memcg, MEMCG_HIGH); in reclaim_high()
2473 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, in reclaim_high()
2476 } while ((memcg = parent_mem_cgroup(memcg)) && in reclaim_high()
2477 !mem_cgroup_is_root(memcg)); in reclaim_high()
2484 struct mem_cgroup *memcg; in high_work_func() local
2486 memcg = container_of(work, struct mem_cgroup, high_work); in high_work_func()
2487 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL); in high_work_func()
2561 static u64 mem_find_max_overage(struct mem_cgroup *memcg) in mem_find_max_overage() argument
2566 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2567 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2569 } while ((memcg = parent_mem_cgroup(memcg)) && in mem_find_max_overage()
2570 !mem_cgroup_is_root(memcg)); in mem_find_max_overage()
2575 static u64 swap_find_max_overage(struct mem_cgroup *memcg) in swap_find_max_overage() argument
2580 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2581 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2583 memcg_memory_event(memcg, MEMCG_SWAP_HIGH); in swap_find_max_overage()
2585 } while ((memcg = parent_mem_cgroup(memcg)) && in swap_find_max_overage()
2586 !mem_cgroup_is_root(memcg)); in swap_find_max_overage()
2595 static unsigned long calculate_high_delay(struct mem_cgroup *memcg, in calculate_high_delay() argument
2638 struct mem_cgroup *memcg; in mem_cgroup_handle_over_high() local
2644 memcg = get_mem_cgroup_from_mm(current->mm); in mem_cgroup_handle_over_high()
2657 nr_reclaimed = reclaim_high(memcg, in mem_cgroup_handle_over_high()
2665 penalty_jiffies = calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2666 mem_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2668 penalty_jiffies += calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2669 swap_find_max_overage(memcg)); in mem_cgroup_handle_over_high()
2707 css_put(&memcg->css); in mem_cgroup_handle_over_high()
2710 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2724 if (mem_cgroup_is_root(memcg)) in try_charge()
2727 if (consume_stock(memcg, nr_pages)) in try_charge()
2731 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2732 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2735 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2841 page_counter_charge(&memcg->memory, nr_pages); in try_charge()
2843 page_counter_charge(&memcg->memsw, nr_pages); in try_charge()
2849 refill_stock(memcg, batch - nr_pages); in try_charge()
2863 mem_high = page_counter_read(&memcg->memory) > in try_charge()
2864 READ_ONCE(memcg->memory.high); in try_charge()
2865 swap_high = page_counter_read(&memcg->swap) > in try_charge()
2866 READ_ONCE(memcg->swap.high); in try_charge()
2871 schedule_work(&memcg->high_work); in try_charge()
2891 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge()
2897 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2899 if (mem_cgroup_is_root(memcg)) in cancel_charge()
2902 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2904 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2908 static void commit_charge(struct page *page, struct mem_cgroup *memcg) in commit_charge() argument
2919 page->mem_cgroup = memcg; in commit_charge()
3001 struct mem_cgroup *memcg; in get_obj_cgroup_from_current() local
3008 memcg = active_memcg(); in get_obj_cgroup_from_current()
3010 memcg = mem_cgroup_from_task(current); in get_obj_cgroup_from_current()
3012 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { in get_obj_cgroup_from_current()
3013 objcg = rcu_dereference(memcg->objcg); in get_obj_cgroup_from_current()
3074 int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, in __memcg_kmem_charge() argument
3080 ret = try_charge(memcg, gfp, nr_pages); in __memcg_kmem_charge()
3085 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { in __memcg_kmem_charge()
3093 page_counter_charge(&memcg->kmem, nr_pages); in __memcg_kmem_charge()
3096 cancel_charge(memcg, nr_pages); in __memcg_kmem_charge()
3107 void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) in __memcg_kmem_uncharge() argument
3110 page_counter_uncharge(&memcg->kmem, nr_pages); in __memcg_kmem_uncharge()
3112 refill_stock(memcg, nr_pages); in __memcg_kmem_uncharge()
3125 struct mem_cgroup *memcg; in __memcg_kmem_charge_page() local
3128 memcg = get_mem_cgroup_from_current(); in __memcg_kmem_charge_page()
3129 if (memcg && !mem_cgroup_is_root(memcg)) { in __memcg_kmem_charge_page()
3130 ret = __memcg_kmem_charge(memcg, gfp, 1 << order); in __memcg_kmem_charge_page()
3132 page->mem_cgroup = memcg; in __memcg_kmem_charge_page()
3136 css_put(&memcg->css); in __memcg_kmem_charge_page()
3148 struct mem_cgroup *memcg = page->mem_cgroup; in __memcg_kmem_uncharge_page() local
3151 if (!memcg) in __memcg_kmem_uncharge_page()
3154 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); in __memcg_kmem_uncharge_page()
3155 __memcg_kmem_uncharge(memcg, nr_pages); in __memcg_kmem_uncharge_page()
3157 css_put(&memcg->css); in __memcg_kmem_uncharge_page()
3195 struct mem_cgroup *memcg; in drain_obj_stock() local
3199 memcg = obj_cgroup_memcg(old); in drain_obj_stock()
3200 if (unlikely(!css_tryget(&memcg->css))) in drain_obj_stock()
3204 __memcg_kmem_uncharge(memcg, nr_pages); in drain_obj_stock()
3205 css_put(&memcg->css); in drain_obj_stock()
3229 struct mem_cgroup *memcg; in obj_stock_flush_required() local
3232 memcg = obj_cgroup_memcg(stock->cached_objcg); in obj_stock_flush_required()
3233 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) in obj_stock_flush_required()
3264 struct mem_cgroup *memcg; in obj_cgroup_charge() local
3283 memcg = obj_cgroup_memcg(objcg); in obj_cgroup_charge()
3284 if (unlikely(!css_tryget(&memcg->css))) in obj_cgroup_charge()
3294 ret = __memcg_kmem_charge(memcg, gfp, nr_pages); in obj_cgroup_charge()
3298 css_put(&memcg->css); in obj_cgroup_charge()
3314 struct mem_cgroup *memcg = head->mem_cgroup; in split_page_memcg() local
3318 if (mem_cgroup_disabled() || !memcg) in split_page_memcg()
3322 head[i].mem_cgroup = memcg; in split_page_memcg()
3326 css_get_many(&memcg->css, nr - 1); in split_page_memcg()
3369 static int mem_cgroup_resize_max(struct mem_cgroup *memcg, in mem_cgroup_resize_max() argument
3376 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
3389 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
3390 max <= memcg->memsw.max; in mem_cgroup_resize_max()
3405 drain_all_stock(memcg); in mem_cgroup_resize_max()
3410 if (!try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_resize_max()
3418 memcg_oom_recover(memcg); in mem_cgroup_resize_max()
3462 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in mem_cgroup_soft_limit_reclaim()
3477 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
3489 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3502 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3512 static inline bool memcg_has_children(struct mem_cgroup *memcg) in memcg_has_children() argument
3517 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
3527 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) in mem_cgroup_force_empty() argument
3534 drain_all_stock(memcg); in mem_cgroup_force_empty()
3537 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3543 progress = try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_force_empty()
3560 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_force_empty_write() local
3562 if (mem_cgroup_is_root(memcg)) in mem_cgroup_force_empty_write()
3564 return mem_cgroup_force_empty(memcg) ?: nbytes; in mem_cgroup_force_empty_write()
3577 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_hierarchy_write() local
3578 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
3580 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
3593 if (!memcg_has_children(memcg)) in mem_cgroup_hierarchy_write()
3594 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
3603 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3607 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
3608 val = memcg_page_state(memcg, NR_FILE_PAGES) + in mem_cgroup_usage()
3609 memcg_page_state(memcg, NR_ANON_MAPPED); in mem_cgroup_usage()
3611 val += memcg_page_state(memcg, MEMCG_SWAP); in mem_cgroup_usage()
3614 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3616 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3632 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_read_u64() local
3637 counter = &memcg->memory; in mem_cgroup_read_u64()
3640 counter = &memcg->memsw; in mem_cgroup_read_u64()
3643 counter = &memcg->kmem; in mem_cgroup_read_u64()
3646 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
3654 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3655 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE; in mem_cgroup_read_u64()
3656 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3657 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE; in mem_cgroup_read_u64()
3666 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3672 static void memcg_flush_percpu_vmstats(struct mem_cgroup *memcg) in memcg_flush_percpu_vmstats() argument
3680 stat[i] += per_cpu(memcg->vmstats_percpu->stat[i], cpu); in memcg_flush_percpu_vmstats()
3682 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_flush_percpu_vmstats()
3687 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in memcg_flush_percpu_vmstats()
3704 static void memcg_flush_percpu_vmevents(struct mem_cgroup *memcg) in memcg_flush_percpu_vmevents() argument
3715 events[i] += per_cpu(memcg->vmstats_percpu->events[i], in memcg_flush_percpu_vmevents()
3718 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) in memcg_flush_percpu_vmevents()
3724 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3732 BUG_ON(memcg->kmemcg_id >= 0); in memcg_online_kmem()
3733 BUG_ON(memcg->kmem_state); in memcg_online_kmem()
3744 objcg->memcg = memcg; in memcg_online_kmem()
3745 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3755 memcg->kmemcg_id = memcg_id; in memcg_online_kmem()
3756 memcg->kmem_state = KMEM_ONLINE; in memcg_online_kmem()
3761 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3767 if (memcg->kmem_state != KMEM_ONLINE) in memcg_offline_kmem()
3770 memcg->kmem_state = KMEM_ALLOCATED; in memcg_offline_kmem()
3772 parent = parent_mem_cgroup(memcg); in memcg_offline_kmem()
3776 memcg_reparent_objcgs(memcg, parent); in memcg_offline_kmem()
3778 kmemcg_id = memcg->kmemcg_id; in memcg_offline_kmem()
3790 css_for_each_descendant_pre(css, &memcg->css) { in memcg_offline_kmem()
3794 if (!memcg->use_hierarchy) in memcg_offline_kmem()
3804 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3807 if (unlikely(memcg->kmem_state == KMEM_ONLINE)) in memcg_free_kmem()
3808 memcg_offline_kmem(memcg); in memcg_free_kmem()
3811 static int memcg_online_kmem(struct mem_cgroup *memcg) in memcg_online_kmem() argument
3815 static void memcg_offline_kmem(struct mem_cgroup *memcg) in memcg_offline_kmem() argument
3818 static void memcg_free_kmem(struct mem_cgroup *memcg) in memcg_free_kmem() argument
3823 static int memcg_update_kmem_max(struct mem_cgroup *memcg, in memcg_update_kmem_max() argument
3829 ret = page_counter_set_max(&memcg->kmem, max); in memcg_update_kmem_max()
3834 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max) in memcg_update_tcp_max() argument
3840 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
3844 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
3862 memcg->tcpmem_active = true; in memcg_update_tcp_max()
3876 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_write() local
3887 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3893 ret = mem_cgroup_resize_max(memcg, nr_pages, false); in mem_cgroup_write()
3896 ret = mem_cgroup_resize_max(memcg, nr_pages, true); in mem_cgroup_write()
3902 ret = memcg_update_kmem_max(memcg, nr_pages); in mem_cgroup_write()
3905 ret = memcg_update_tcp_max(memcg, nr_pages); in mem_cgroup_write()
3910 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3920 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_reset() local
3925 counter = &memcg->memory; in mem_cgroup_reset()
3928 counter = &memcg->memsw; in mem_cgroup_reset()
3931 counter = &memcg->kmem; in mem_cgroup_reset()
3934 counter = &memcg->tcpmem; in mem_cgroup_reset()
3964 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_move_charge_write() local
3975 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3992 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_node_nr_lru_pages() argument
3995 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in mem_cgroup_node_nr_lru_pages()
4012 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_nr_lru_pages() argument
4023 nr += memcg_page_state(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
4025 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru); in mem_cgroup_nr_lru_pages()
4045 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_numa_stat_show() local
4049 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4053 mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
4061 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
4065 mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
4110 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memcg_stat_show() local
4122 nr = memcg_page_state_local(memcg, memcg1_stats[i]); in memcg_stat_show()
4132 memcg_events_local(memcg, memcg1_events[i])); in memcg_stat_show()
4136 memcg_page_state_local(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
4141 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { in memcg_stat_show()
4156 nr = memcg_page_state(memcg, memcg1_stats[i]); in memcg_stat_show()
4168 (u64)memcg_events(memcg, memcg1_events[i])); in memcg_stat_show()
4172 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * in memcg_stat_show()
4183 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id); in memcg_stat_show()
4199 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_read() local
4201 return mem_cgroup_swappiness(memcg); in mem_cgroup_swappiness_read()
4207 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_write() local
4213 memcg->swappiness = val; in mem_cgroup_swappiness_write()
4220 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
4228 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
4230 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
4235 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
4271 static void mem_cgroup_threshold(struct mem_cgroup *memcg) in mem_cgroup_threshold() argument
4273 while (memcg) { in mem_cgroup_threshold()
4274 __mem_cgroup_threshold(memcg, false); in mem_cgroup_threshold()
4276 __mem_cgroup_threshold(memcg, true); in mem_cgroup_threshold()
4278 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold()
4296 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) in mem_cgroup_oom_notify_cb() argument
4302 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
4309 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) in mem_cgroup_oom_notify() argument
4313 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_notify()
4317 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_register_event() argument
4330 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4333 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
4334 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_register_event()
4336 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
4337 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_register_event()
4343 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_register_event()
4392 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
4397 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in mem_cgroup_usage_register_event() argument
4400 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); in mem_cgroup_usage_register_event()
4403 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_register_event() argument
4406 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); in memsw_cgroup_usage_register_event()
4409 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_unregister_event() argument
4417 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4420 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
4421 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_unregister_event()
4423 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
4424 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_unregister_event()
4432 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_unregister_event()
4491 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
4494 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_usage_unregister_event() argument
4497 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); in mem_cgroup_usage_unregister_event()
4500 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_unregister_event() argument
4503 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); in memsw_cgroup_usage_unregister_event()
4506 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, in mem_cgroup_oom_register_event() argument
4518 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
4521 if (memcg->under_oom) in mem_cgroup_oom_register_event()
4528 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_oom_unregister_event() argument
4535 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
4547 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf); in mem_cgroup_oom_control_read() local
4549 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
4550 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
4552 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
4559 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_oom_control_write() local
4565 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
4567 memcg_oom_recover(memcg); in mem_cgroup_oom_control_write()
4576 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4578 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
4581 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4583 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
4586 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4588 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
4593 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain() local
4595 if (!memcg->css.parent) in mem_cgroup_wb_domain()
4598 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
4605 static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx) in memcg_exact_page_state() argument
4607 long x = atomic_long_read(&memcg->vmstats[idx]); in memcg_exact_page_state()
4611 x += per_cpu_ptr(memcg->vmstats_percpu, cpu)->stat[idx]; in memcg_exact_page_state()
4639 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats() local
4642 *pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY); in mem_cgroup_wb_stats()
4644 *pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK); in mem_cgroup_wb_stats()
4645 *pfilepages = memcg_exact_page_state(memcg, NR_INACTIVE_FILE) + in mem_cgroup_wb_stats()
4646 memcg_exact_page_state(memcg, NR_ACTIVE_FILE); in mem_cgroup_wb_stats()
4649 while ((parent = parent_mem_cgroup(memcg))) { in mem_cgroup_wb_stats()
4650 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
4651 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
4652 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
4655 memcg = parent; in mem_cgroup_wb_stats()
4706 struct mem_cgroup *memcg = page->mem_cgroup; in mem_cgroup_track_foreign_dirty_slowpath() local
4721 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
4748 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
4758 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign() local
4764 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
4785 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp) in memcg_wb_domain_init() argument
4790 static void memcg_wb_domain_exit(struct mem_cgroup *memcg) in memcg_wb_domain_exit() argument
4794 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) in memcg_wb_domain_size_changed() argument
4822 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local
4826 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4833 css_put(&memcg->css); in memcg_event_remove()
4846 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local
4859 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4868 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4896 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memcg_write_event_control() local
4923 event->memcg = memcg; in memcg_write_event_control()
5005 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
5011 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
5012 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
5013 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
5189 static void mem_cgroup_id_remove(struct mem_cgroup *memcg) in mem_cgroup_id_remove() argument
5191 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
5192 trace_android_vh_mem_cgroup_id_remove(memcg); in mem_cgroup_id_remove()
5193 idr_remove(&mem_cgroup_idr, memcg->id.id); in mem_cgroup_id_remove()
5194 memcg->id.id = 0; in mem_cgroup_id_remove()
5198 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg, in mem_cgroup_id_get_many() argument
5201 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
5204 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) in mem_cgroup_id_put_many() argument
5206 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
5207 mem_cgroup_id_remove(memcg); in mem_cgroup_id_put_many()
5210 css_put(&memcg->css); in mem_cgroup_id_put_many()
5214 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) in mem_cgroup_id_put() argument
5216 mem_cgroup_id_put_many(memcg, 1); in mem_cgroup_id_put()
5232 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_node_info() argument
5268 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
5270 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
5274 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_node_info() argument
5276 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node]; in free_mem_cgroup_per_node_info()
5286 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
5290 trace_android_vh_mem_cgroup_free(memcg); in __mem_cgroup_free()
5292 free_mem_cgroup_per_node_info(memcg, node); in __mem_cgroup_free()
5293 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
5294 free_percpu(memcg->vmstats_local); in __mem_cgroup_free()
5295 kfree(memcg); in __mem_cgroup_free()
5298 static void mem_cgroup_free(struct mem_cgroup *memcg) in mem_cgroup_free() argument
5300 memcg_wb_domain_exit(memcg); in mem_cgroup_free()
5305 memcg_flush_percpu_vmstats(memcg); in mem_cgroup_free()
5306 memcg_flush_percpu_vmevents(memcg); in mem_cgroup_free()
5307 __mem_cgroup_free(memcg); in mem_cgroup_free()
5312 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
5321 memcg = kzalloc(size, GFP_KERNEL); in mem_cgroup_alloc()
5322 if (!memcg) in mem_cgroup_alloc()
5325 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL, in mem_cgroup_alloc()
5328 if (memcg->id.id < 0) { in mem_cgroup_alloc()
5329 error = memcg->id.id; in mem_cgroup_alloc()
5333 memcg->vmstats_local = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5335 if (!memcg->vmstats_local) in mem_cgroup_alloc()
5338 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
5340 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
5344 if (alloc_mem_cgroup_per_node_info(memcg, node)) in mem_cgroup_alloc()
5347 if (memcg_wb_domain_init(memcg, GFP_KERNEL)) in mem_cgroup_alloc()
5350 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
5351 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_alloc()
5352 mutex_init(&memcg->thresholds_lock); in mem_cgroup_alloc()
5353 spin_lock_init(&memcg->move_lock); in mem_cgroup_alloc()
5354 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
5355 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_alloc()
5356 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_alloc()
5357 memcg->socket_pressure = jiffies; in mem_cgroup_alloc()
5359 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
5360 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
5363 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
5365 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
5369 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
5370 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
5371 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
5373 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); in mem_cgroup_alloc()
5374 trace_android_vh_mem_cgroup_alloc(memcg); in mem_cgroup_alloc()
5375 return memcg; in mem_cgroup_alloc()
5377 mem_cgroup_id_remove(memcg); in mem_cgroup_alloc()
5378 __mem_cgroup_free(memcg); in mem_cgroup_alloc()
5386 struct mem_cgroup *memcg, *old_memcg; in mem_cgroup_css_alloc() local
5390 memcg = mem_cgroup_alloc(); in mem_cgroup_css_alloc()
5392 if (IS_ERR(memcg)) in mem_cgroup_css_alloc()
5393 return ERR_CAST(memcg); in mem_cgroup_css_alloc()
5395 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5396 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
5397 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
5399 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_alloc()
5400 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_alloc()
5403 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
5404 page_counter_init(&memcg->swap, NULL); in mem_cgroup_css_alloc()
5405 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
5406 page_counter_init(&memcg->tcpmem, NULL); in mem_cgroup_css_alloc()
5408 memcg->use_hierarchy = true; in mem_cgroup_css_alloc()
5409 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_alloc()
5410 page_counter_init(&memcg->swap, &parent->swap); in mem_cgroup_css_alloc()
5411 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_alloc()
5412 page_counter_init(&memcg->tcpmem, &parent->tcpmem); in mem_cgroup_css_alloc()
5414 page_counter_init(&memcg->memory, &root_mem_cgroup->memory); in mem_cgroup_css_alloc()
5415 page_counter_init(&memcg->swap, &root_mem_cgroup->swap); in mem_cgroup_css_alloc()
5416 page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); in mem_cgroup_css_alloc()
5417 page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); in mem_cgroup_css_alloc()
5429 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
5430 return &memcg->css; in mem_cgroup_css_alloc()
5433 error = memcg_online_kmem(memcg); in mem_cgroup_css_alloc()
5440 return &memcg->css; in mem_cgroup_css_alloc()
5442 mem_cgroup_id_remove(memcg); in mem_cgroup_css_alloc()
5443 mem_cgroup_free(memcg); in mem_cgroup_css_alloc()
5449 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
5456 if (memcg_alloc_shrinker_maps(memcg)) { in mem_cgroup_css_online()
5457 mem_cgroup_id_remove(memcg); in mem_cgroup_css_online()
5462 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
5464 trace_android_vh_mem_cgroup_css_online(css, memcg); in mem_cgroup_css_online()
5470 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
5473 trace_android_vh_mem_cgroup_css_offline(css, memcg); in mem_cgroup_css_offline()
5479 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5480 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
5484 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
5486 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
5487 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
5489 memcg_offline_kmem(memcg); in mem_cgroup_css_offline()
5490 wb_memcg_offline(memcg); in mem_cgroup_css_offline()
5492 drain_all_stock(memcg); in mem_cgroup_css_offline()
5494 mem_cgroup_id_put(memcg); in mem_cgroup_css_offline()
5499 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_released() local
5501 invalidate_reclaim_iterators(memcg); in mem_cgroup_css_released()
5506 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
5511 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
5516 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active) in mem_cgroup_css_free()
5519 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
5520 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
5521 mem_cgroup_remove_from_trees(memcg); in mem_cgroup_css_free()
5522 memcg_free_shrinker_maps(memcg); in mem_cgroup_css_free()
5523 memcg_free_kmem(memcg); in mem_cgroup_css_free()
5524 mem_cgroup_free(memcg); in mem_cgroup_css_free()
5542 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
5544 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5545 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5546 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5547 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5548 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
5549 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
5550 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5551 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5552 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
5553 memcg_wb_domain_size_changed(memcg); in mem_cgroup_css_reset()
6025 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */ in mem_cgroup_can_attach() local
6046 memcg = mem_cgroup_from_css(css); in mem_cgroup_can_attach()
6056 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
6062 VM_BUG_ON(from == memcg); in mem_cgroup_can_attach()
6078 mc.to = memcg; in mem_cgroup_can_attach()
6300 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memory_current_read() local
6302 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
6314 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_min_write() local
6323 page_counter_set_min(&memcg->memory, min); in memory_min_write()
6337 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
6346 page_counter_set_low(&memcg->memory, low); in memory_low_write()
6360 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
6371 page_counter_set_high(&memcg->memory, high); in memory_high_write()
6374 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
6384 drain_all_stock(memcg); in memory_high_write()
6389 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
6396 memcg_wb_domain_size_changed(memcg); in memory_high_write()
6409 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
6420 xchg(&memcg->memory.max, max); in memory_max_write()
6423 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
6432 drain_all_stock(memcg); in memory_max_write()
6438 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
6444 memcg_memory_event(memcg, MEMCG_OOM); in memory_max_write()
6445 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0)) in memory_max_write()
6449 memcg_wb_domain_size_changed(memcg); in memory_max_write()
6465 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_show() local
6467 __memory_events_show(m, memcg->memory_events); in memory_events_show()
6473 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_events_local_show() local
6475 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
6481 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_stat_show() local
6484 buf = memory_stat_format(memcg); in memory_stat_show()
6496 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_numa_stat_show() local
6509 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid)); in memory_numa_stat_show()
6523 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in memory_oom_group_show() local
6525 seq_printf(m, "%d\n", memcg->oom_group); in memory_oom_group_show()
6533 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_oom_group_write() local
6547 memcg->oom_group = oom_group; in memory_oom_group_write()
6754 struct mem_cgroup *memcg) in mem_cgroup_calculate_protection() argument
6772 if (memcg == root) in mem_cgroup_calculate_protection()
6775 usage = page_counter_read(&memcg->memory); in mem_cgroup_calculate_protection()
6779 parent = parent_mem_cgroup(memcg); in mem_cgroup_calculate_protection()
6785 memcg->memory.emin = READ_ONCE(memcg->memory.min); in mem_cgroup_calculate_protection()
6786 memcg->memory.elow = READ_ONCE(memcg->memory.low); in mem_cgroup_calculate_protection()
6792 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6793 READ_ONCE(memcg->memory.min), in mem_cgroup_calculate_protection()
6797 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage, in mem_cgroup_calculate_protection()
6798 READ_ONCE(memcg->memory.low), in mem_cgroup_calculate_protection()
6818 struct mem_cgroup *memcg = NULL; in __mem_cgroup_charge() local
6838 memcg = mem_cgroup_from_id(id); in __mem_cgroup_charge()
6839 if (memcg && !css_tryget_online(&memcg->css)) in __mem_cgroup_charge()
6840 memcg = NULL; in __mem_cgroup_charge()
6844 if (!memcg) in __mem_cgroup_charge()
6845 memcg = get_mem_cgroup_from_mm(mm); in __mem_cgroup_charge()
6847 ret = try_charge(memcg, gfp_mask, nr_pages); in __mem_cgroup_charge()
6851 css_get(&memcg->css); in __mem_cgroup_charge()
6852 commit_charge(page, memcg); in __mem_cgroup_charge()
6855 mem_cgroup_charge_statistics(memcg, page, nr_pages); in __mem_cgroup_charge()
6856 memcg_check_events(memcg, page); in __mem_cgroup_charge()
6882 css_put(&memcg->css); in __mem_cgroup_charge()
6888 struct mem_cgroup *memcg; member
6904 if (!mem_cgroup_is_root(ug->memcg)) { in uncharge_batch()
6905 page_counter_uncharge(&ug->memcg->memory, ug->nr_pages); in uncharge_batch()
6907 page_counter_uncharge(&ug->memcg->memsw, ug->nr_pages); in uncharge_batch()
6909 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem); in uncharge_batch()
6910 memcg_oom_recover(ug->memcg); in uncharge_batch()
6914 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); in uncharge_batch()
6915 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); in uncharge_batch()
6916 memcg_check_events(ug->memcg, ug->dummy_page); in uncharge_batch()
6920 css_put(&ug->memcg->css); in uncharge_batch()
6938 if (ug->memcg != page->mem_cgroup) { in uncharge_page()
6939 if (ug->memcg) { in uncharge_page()
6943 ug->memcg = page->mem_cgroup; in uncharge_page()
6946 css_get(&ug->memcg->css); in uncharge_page()
6961 css_put(&ug->memcg->css); in uncharge_page()
6985 if (ug.memcg) in uncharge_list()
7033 struct mem_cgroup *memcg; in mem_cgroup_migrate() local
7051 memcg = oldpage->mem_cgroup; in mem_cgroup_migrate()
7052 if (!memcg) in mem_cgroup_migrate()
7058 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_migrate()
7060 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_migrate()
7062 css_get(&memcg->css); in mem_cgroup_migrate()
7063 commit_charge(newpage, memcg); in mem_cgroup_migrate()
7066 mem_cgroup_charge_statistics(memcg, newpage, nr_pages); in mem_cgroup_migrate()
7067 memcg_check_events(memcg, newpage); in mem_cgroup_migrate()
7076 struct mem_cgroup *memcg; in mem_cgroup_sk_alloc() local
7086 memcg = mem_cgroup_from_task(current); in mem_cgroup_sk_alloc()
7087 if (memcg == root_mem_cgroup) in mem_cgroup_sk_alloc()
7089 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active) in mem_cgroup_sk_alloc()
7091 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
7092 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
7111 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_charge_skmem() argument
7118 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in mem_cgroup_charge_skmem()
7119 memcg->tcpmem_pressure = 0; in mem_cgroup_charge_skmem()
7122 page_counter_charge(&memcg->tcpmem, nr_pages); in mem_cgroup_charge_skmem()
7123 memcg->tcpmem_pressure = 1; in mem_cgroup_charge_skmem()
7131 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
7133 if (try_charge(memcg, gfp_mask, nr_pages) == 0) in mem_cgroup_charge_skmem()
7136 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages); in mem_cgroup_charge_skmem()
7145 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
7148 page_counter_uncharge(&memcg->tcpmem, nr_pages); in mem_cgroup_uncharge_skmem()
7152 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
7154 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
7209 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) in mem_cgroup_id_get_online() argument
7211 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
7216 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { in mem_cgroup_id_get_online()
7220 memcg = parent_mem_cgroup(memcg); in mem_cgroup_id_get_online()
7221 if (!memcg) in mem_cgroup_id_get_online()
7222 memcg = root_mem_cgroup; in mem_cgroup_id_get_online()
7224 return memcg; in mem_cgroup_id_get_online()
7236 struct mem_cgroup *memcg, *swap_memcg; in mem_cgroup_swapout() local
7249 memcg = page->mem_cgroup; in mem_cgroup_swapout()
7252 if (!memcg) in mem_cgroup_swapout()
7260 swap_memcg = mem_cgroup_id_get_online(memcg); in mem_cgroup_swapout()
7272 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
7273 page_counter_uncharge(&memcg->memory, nr_entries); in mem_cgroup_swapout()
7275 if (!cgroup_memory_noswap && memcg != swap_memcg) { in mem_cgroup_swapout()
7278 page_counter_uncharge(&memcg->memsw, nr_entries); in mem_cgroup_swapout()
7288 mem_cgroup_charge_statistics(memcg, page, -nr_entries); in mem_cgroup_swapout()
7289 memcg_check_events(memcg, page); in mem_cgroup_swapout()
7291 css_put(&memcg->css); in mem_cgroup_swapout()
7307 struct mem_cgroup *memcg; in __mem_cgroup_try_charge_swap() local
7313 memcg = page->mem_cgroup; in __mem_cgroup_try_charge_swap()
7316 if (!memcg) in __mem_cgroup_try_charge_swap()
7320 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
7324 memcg = mem_cgroup_id_get_online(memcg); in __mem_cgroup_try_charge_swap()
7326 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) && in __mem_cgroup_try_charge_swap()
7327 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
7328 memcg_memory_event(memcg, MEMCG_SWAP_MAX); in __mem_cgroup_try_charge_swap()
7329 memcg_memory_event(memcg, MEMCG_SWAP_FAIL); in __mem_cgroup_try_charge_swap()
7330 mem_cgroup_id_put(memcg); in __mem_cgroup_try_charge_swap()
7336 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
7337 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); in __mem_cgroup_try_charge_swap()
7339 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in __mem_cgroup_try_charge_swap()
7351 struct mem_cgroup *memcg; in __mem_cgroup_uncharge_swap() local
7356 memcg = mem_cgroup_from_id(id); in __mem_cgroup_uncharge_swap()
7357 if (memcg) { in __mem_cgroup_uncharge_swap()
7358 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) { in __mem_cgroup_uncharge_swap()
7360 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
7362 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
7364 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
7365 mem_cgroup_id_put_many(memcg, nr_pages); in __mem_cgroup_uncharge_swap()
7370 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
7376 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) in mem_cgroup_get_nr_swap_pages()
7378 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
7379 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
7385 struct mem_cgroup *memcg; in mem_cgroup_swap_full() local
7394 memcg = page->mem_cgroup; in mem_cgroup_swap_full()
7395 if (!memcg) in mem_cgroup_swap_full()
7398 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_swap_full()
7399 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
7401 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
7402 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
7422 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in swap_current_read() local
7424 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
7436 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_high_write() local
7445 page_counter_set_high(&memcg->swap, high); in swap_high_write()
7459 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in swap_max_write() local
7468 xchg(&memcg->swap.max, max); in swap_max_write()
7475 struct mem_cgroup *memcg = mem_cgroup_from_seq(m); in swap_events_show() local
7478 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
7480 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
7482 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()