Lines Matching refs:memcg

154 	struct mem_cgroup	*memcg;		/* Back pointer, we cannot */  member
221 struct mem_cgroup *memcg; member
235 int (*register_event)(struct mem_cgroup *memcg,
242 void (*unregister_event)(struct mem_cgroup *memcg,
254 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
255 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
358 bool memcg_kmem_is_active(struct mem_cgroup *memcg) in memcg_kmem_is_active() argument
360 return memcg->kmem_acct_active; in memcg_kmem_is_active()
430 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument
432 if (!memcg) in memcg_to_vmpressure()
433 memcg = root_mem_cgroup; in memcg_to_vmpressure()
434 return &memcg->vmpressure; in memcg_to_vmpressure()
442 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument
444 return (memcg == root_mem_cgroup); in mem_cgroup_is_root()
453 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) in mem_cgroup_id() argument
455 return memcg->css.id; in mem_cgroup_id()
478 struct mem_cgroup *memcg; in sock_update_memcg() local
492 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg)); in sock_update_memcg()
493 css_get(&sk->sk_cgrp->memcg->css); in sock_update_memcg()
498 memcg = mem_cgroup_from_task(current); in sock_update_memcg()
499 cg_proto = sk->sk_prot->proto_cgroup(memcg); in sock_update_memcg()
500 if (!mem_cgroup_is_root(memcg) && in sock_update_memcg()
502 css_tryget_online(&memcg->css)) { in sock_update_memcg()
513 struct mem_cgroup *memcg; in sock_release_memcg() local
514 WARN_ON(!sk->sk_cgrp->memcg); in sock_release_memcg()
515 memcg = sk->sk_cgrp->memcg; in sock_release_memcg()
516 css_put(&sk->sk_cgrp->memcg->css); in sock_release_memcg()
520 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg) in tcp_proto_cgroup() argument
522 if (!memcg || mem_cgroup_is_root(memcg)) in tcp_proto_cgroup()
525 return &memcg->tcp_mem; in tcp_proto_cgroup()
586 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) in mem_cgroup_zone_zoneinfo() argument
591 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_zone_zoneinfo()
594 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg) in mem_cgroup_css() argument
596 return &memcg->css; in mem_cgroup_css()
600 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_page_zoneinfo() argument
605 return &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_page_zoneinfo()
674 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument
676 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
686 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) in mem_cgroup_update_tree() argument
697 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in mem_cgroup_update_tree()
698 mz = mem_cgroup_page_zoneinfo(memcg, page); in mem_cgroup_update_tree()
699 excess = soft_limit_excess(memcg); in mem_cgroup_update_tree()
721 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) in mem_cgroup_remove_from_trees() argument
729 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_remove_from_trees()
755 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
756 !css_tryget_online(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
792 static long mem_cgroup_read_stat(struct mem_cgroup *memcg, in mem_cgroup_read_stat() argument
800 val += per_cpu(memcg->stat->count[idx], cpu); in mem_cgroup_read_stat()
802 spin_lock(&memcg->pcp_counter_lock); in mem_cgroup_read_stat()
803 val += memcg->nocpu_base.count[idx]; in mem_cgroup_read_stat()
804 spin_unlock(&memcg->pcp_counter_lock); in mem_cgroup_read_stat()
810 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg, in mem_cgroup_read_events() argument
818 val += per_cpu(memcg->stat->events[idx], cpu); in mem_cgroup_read_events()
820 spin_lock(&memcg->pcp_counter_lock); in mem_cgroup_read_events()
821 val += memcg->nocpu_base.events[idx]; in mem_cgroup_read_events()
822 spin_unlock(&memcg->pcp_counter_lock); in mem_cgroup_read_events()
828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, in mem_cgroup_charge_statistics() argument
837 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS], in mem_cgroup_charge_statistics()
840 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE], in mem_cgroup_charge_statistics()
844 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], in mem_cgroup_charge_statistics()
849 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]); in mem_cgroup_charge_statistics()
851 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]); in mem_cgroup_charge_statistics()
855 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in mem_cgroup_charge_statistics()
866 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_node_nr_lru_pages() argument
882 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in mem_cgroup_node_nr_lru_pages()
889 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, in mem_cgroup_nr_lru_pages() argument
896 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); in mem_cgroup_nr_lru_pages()
900 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, in mem_cgroup_event_ratelimit() argument
905 val = __this_cpu_read(memcg->stat->nr_page_events); in mem_cgroup_event_ratelimit()
906 next = __this_cpu_read(memcg->stat->targets[target]); in mem_cgroup_event_ratelimit()
922 __this_cpu_write(memcg->stat->targets[target], next); in mem_cgroup_event_ratelimit()
932 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page) in memcg_check_events() argument
935 if (unlikely(mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
940 do_softlimit = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
943 do_numainfo = mem_cgroup_event_ratelimit(memcg, in memcg_check_events()
946 mem_cgroup_threshold(memcg); in memcg_check_events()
948 mem_cgroup_update_tree(memcg, page); in memcg_check_events()
951 atomic_inc(&memcg->numainfo_events); in memcg_check_events()
971 struct mem_cgroup *memcg = NULL; in get_mem_cgroup_from_mm() local
981 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
983 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
984 if (unlikely(!memcg)) in get_mem_cgroup_from_mm()
985 memcg = root_mem_cgroup; in get_mem_cgroup_from_mm()
987 } while (!css_tryget_online(&memcg->css)); in get_mem_cgroup_from_mm()
989 return memcg; in get_mem_cgroup_from_mm()
1015 struct mem_cgroup *memcg = NULL; in mem_cgroup_iter() local
1076 memcg = mem_cgroup_from_css(css); in mem_cgroup_iter()
1087 if (smp_load_acquire(&memcg->initialized)) in mem_cgroup_iter()
1093 memcg = NULL; in mem_cgroup_iter()
1097 if (cmpxchg(&iter->position, pos, memcg) == pos) { in mem_cgroup_iter()
1098 if (memcg) in mem_cgroup_iter()
1099 css_get(&memcg->css); in mem_cgroup_iter()
1111 if (!memcg) in mem_cgroup_iter()
1123 return memcg; in mem_cgroup_iter()
1157 struct mem_cgroup *memcg; in __mem_cgroup_count_vm_event() local
1160 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in __mem_cgroup_count_vm_event()
1161 if (unlikely(!memcg)) in __mem_cgroup_count_vm_event()
1166 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]); in __mem_cgroup_count_vm_event()
1169 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]); in __mem_cgroup_count_vm_event()
1189 struct mem_cgroup *memcg) in mem_cgroup_zone_lruvec() argument
1199 mz = mem_cgroup_zone_zoneinfo(memcg, zone); in mem_cgroup_zone_lruvec()
1224 struct mem_cgroup *memcg; in mem_cgroup_page_lruvec() local
1232 memcg = page->mem_cgroup; in mem_cgroup_page_lruvec()
1237 if (!memcg) in mem_cgroup_page_lruvec()
1238 memcg = root_mem_cgroup; in mem_cgroup_page_lruvec()
1240 mz = mem_cgroup_page_zoneinfo(memcg, page); in mem_cgroup_page_lruvec()
1277 bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) in mem_cgroup_is_descendant() argument
1279 if (root == memcg) in mem_cgroup_is_descendant()
1283 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); in mem_cgroup_is_descendant()
1286 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) in task_in_mem_cgroup() argument
1307 ret = mem_cgroup_is_descendant(task_memcg, memcg); in task_in_mem_cgroup()
1334 struct mem_cgroup *memcg; in mem_cgroup_lruvec_online() local
1340 memcg = mz->memcg; in mem_cgroup_lruvec_online()
1342 return !!(memcg->css.flags & CSS_ONLINE); in mem_cgroup_lruvec_online()
1355 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) in mem_cgroup_margin() argument
1361 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1362 limit = READ_ONCE(memcg->memory.limit); in mem_cgroup_margin()
1367 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1368 limit = READ_ONCE(memcg->memsw.limit); in mem_cgroup_margin()
1376 int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument
1379 if (mem_cgroup_disabled() || !memcg->css.parent) in mem_cgroup_swappiness()
1382 return memcg->swappiness; in mem_cgroup_swappiness()
1392 static bool mem_cgroup_under_move(struct mem_cgroup *memcg) in mem_cgroup_under_move() argument
1407 ret = mem_cgroup_is_descendant(from, memcg) || in mem_cgroup_under_move()
1408 mem_cgroup_is_descendant(to, memcg); in mem_cgroup_under_move()
1414 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) in mem_cgroup_wait_acct_move() argument
1417 if (mem_cgroup_under_move(memcg)) { in mem_cgroup_wait_acct_move()
1439 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) in mem_cgroup_print_oom_info() argument
1457 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_info()
1463 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_info()
1464 K((u64)memcg->memory.limit), memcg->memory.failcnt); in mem_cgroup_print_oom_info()
1466 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_info()
1467 K((u64)memcg->memsw.limit), memcg->memsw.failcnt); in mem_cgroup_print_oom_info()
1469 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_info()
1470 K((u64)memcg->kmem.limit), memcg->kmem.failcnt); in mem_cgroup_print_oom_info()
1472 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_print_oom_info()
1497 static int mem_cgroup_count_children(struct mem_cgroup *memcg) in mem_cgroup_count_children() argument
1502 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_count_children()
1510 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) in mem_cgroup_get_limit() argument
1514 limit = memcg->memory.limit; in mem_cgroup_get_limit()
1515 if (mem_cgroup_swappiness(memcg)) { in mem_cgroup_get_limit()
1518 memsw_limit = memcg->memsw.limit; in mem_cgroup_get_limit()
1524 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, in mem_cgroup_out_of_memory() argument
1543 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL, memcg); in mem_cgroup_out_of_memory()
1544 totalpages = mem_cgroup_get_limit(memcg) ? : 1; in mem_cgroup_out_of_memory()
1545 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_out_of_memory()
1564 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_out_of_memory()
1571 points = oom_badness(task, memcg, NULL, totalpages); in mem_cgroup_out_of_memory()
1591 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg, in mem_cgroup_out_of_memory()
1607 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg, in test_mem_cgroup_node_reclaimable() argument
1610 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE)) in test_mem_cgroup_node_reclaimable()
1614 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON)) in test_mem_cgroup_node_reclaimable()
1626 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg) in mem_cgroup_may_update_nodemask() argument
1633 if (!atomic_read(&memcg->numainfo_events)) in mem_cgroup_may_update_nodemask()
1635 if (atomic_inc_return(&memcg->numainfo_updating) > 1) in mem_cgroup_may_update_nodemask()
1639 memcg->scan_nodes = node_states[N_MEMORY]; in mem_cgroup_may_update_nodemask()
1643 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false)) in mem_cgroup_may_update_nodemask()
1644 node_clear(nid, memcg->scan_nodes); in mem_cgroup_may_update_nodemask()
1647 atomic_set(&memcg->numainfo_events, 0); in mem_cgroup_may_update_nodemask()
1648 atomic_set(&memcg->numainfo_updating, 0); in mem_cgroup_may_update_nodemask()
1663 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1667 mem_cgroup_may_update_nodemask(memcg); in mem_cgroup_select_victim_node()
1668 node = memcg->last_scanned_node; in mem_cgroup_select_victim_node()
1670 node = next_node(node, memcg->scan_nodes); in mem_cgroup_select_victim_node()
1672 node = first_node(memcg->scan_nodes); in mem_cgroup_select_victim_node()
1682 memcg->last_scanned_node = node; in mem_cgroup_select_victim_node()
1686 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) in mem_cgroup_select_victim_node() argument
1755 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) in mem_cgroup_oom_trylock() argument
1761 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1768 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1779 for_each_mem_cgroup_tree(iter, memcg) { in mem_cgroup_oom_trylock()
1781 mem_cgroup_iter_break(memcg, iter); in mem_cgroup_oom_trylock()
1794 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg) in mem_cgroup_oom_unlock() argument
1800 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_unlock()
1805 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_mark_under_oom() argument
1809 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_mark_under_oom()
1813 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg) in mem_cgroup_unmark_under_oom() argument
1822 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_unmark_under_oom()
1829 struct mem_cgroup *memcg; member
1841 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1849 static void memcg_wakeup_oom(struct mem_cgroup *memcg) in memcg_wakeup_oom() argument
1851 atomic_inc(&memcg->oom_wakeups); in memcg_wakeup_oom()
1853 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg); in memcg_wakeup_oom()
1856 static void memcg_oom_recover(struct mem_cgroup *memcg) in memcg_oom_recover() argument
1858 if (memcg && atomic_read(&memcg->under_oom)) in memcg_oom_recover()
1859 memcg_wakeup_oom(memcg); in memcg_oom_recover()
1862 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) in mem_cgroup_oom() argument
1880 css_get(&memcg->css); in mem_cgroup_oom()
1881 current->memcg_oom.memcg = memcg; in mem_cgroup_oom()
1905 struct mem_cgroup *memcg = current->memcg_oom.memcg; in mem_cgroup_oom_synchronize() local
1910 if (!memcg) in mem_cgroup_oom_synchronize()
1916 owait.memcg = memcg; in mem_cgroup_oom_synchronize()
1923 mem_cgroup_mark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1925 locked = mem_cgroup_oom_trylock(memcg); in mem_cgroup_oom_synchronize()
1928 mem_cgroup_oom_notify(memcg); in mem_cgroup_oom_synchronize()
1930 if (locked && !memcg->oom_kill_disable) { in mem_cgroup_oom_synchronize()
1931 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1933 mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask, in mem_cgroup_oom_synchronize()
1937 mem_cgroup_unmark_under_oom(memcg); in mem_cgroup_oom_synchronize()
1942 mem_cgroup_oom_unlock(memcg); in mem_cgroup_oom_synchronize()
1948 memcg_oom_recover(memcg); in mem_cgroup_oom_synchronize()
1951 current->memcg_oom.memcg = NULL; in mem_cgroup_oom_synchronize()
1952 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1971 struct mem_cgroup *memcg; in mem_cgroup_begin_page_stat() local
1991 memcg = page->mem_cgroup; in mem_cgroup_begin_page_stat()
1992 if (unlikely(!memcg)) in mem_cgroup_begin_page_stat()
1995 if (atomic_read(&memcg->moving_account) <= 0) in mem_cgroup_begin_page_stat()
1996 return memcg; in mem_cgroup_begin_page_stat()
1998 spin_lock_irqsave(&memcg->move_lock, flags); in mem_cgroup_begin_page_stat()
1999 if (memcg != page->mem_cgroup) { in mem_cgroup_begin_page_stat()
2000 spin_unlock_irqrestore(&memcg->move_lock, flags); in mem_cgroup_begin_page_stat()
2009 memcg->move_lock_task = current; in mem_cgroup_begin_page_stat()
2010 memcg->move_lock_flags = flags; in mem_cgroup_begin_page_stat()
2012 return memcg; in mem_cgroup_begin_page_stat()
2019 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg) in mem_cgroup_end_page_stat() argument
2021 if (memcg && memcg->move_lock_task == current) { in mem_cgroup_end_page_stat()
2022 unsigned long flags = memcg->move_lock_flags; in mem_cgroup_end_page_stat()
2024 memcg->move_lock_task = NULL; in mem_cgroup_end_page_stat()
2025 memcg->move_lock_flags = 0; in mem_cgroup_end_page_stat()
2027 spin_unlock_irqrestore(&memcg->move_lock, flags); in mem_cgroup_end_page_stat()
2041 void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, in mem_cgroup_update_page_stat() argument
2046 if (memcg) in mem_cgroup_update_page_stat()
2047 this_cpu_add(memcg->stat->count[idx], val); in mem_cgroup_update_page_stat()
2076 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
2085 if (memcg == stock->cached && stock->nr_pages >= nr_pages) { in consume_stock()
2125 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
2129 if (stock->cached != memcg) { /* reset if necessary */ in refill_stock()
2131 stock->cached = memcg; in refill_stock()
2153 struct mem_cgroup *memcg; in drain_all_stock() local
2155 memcg = stock->cached; in drain_all_stock()
2156 if (!memcg || !stock->nr_pages) in drain_all_stock()
2158 if (!mem_cgroup_is_descendant(memcg, root_memcg)) in drain_all_stock()
2176 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu) in mem_cgroup_drain_pcp_counter() argument
2180 spin_lock(&memcg->pcp_counter_lock); in mem_cgroup_drain_pcp_counter()
2182 long x = per_cpu(memcg->stat->count[i], cpu); in mem_cgroup_drain_pcp_counter()
2184 per_cpu(memcg->stat->count[i], cpu) = 0; in mem_cgroup_drain_pcp_counter()
2185 memcg->nocpu_base.count[i] += x; in mem_cgroup_drain_pcp_counter()
2188 unsigned long x = per_cpu(memcg->stat->events[i], cpu); in mem_cgroup_drain_pcp_counter()
2190 per_cpu(memcg->stat->events[i], cpu) = 0; in mem_cgroup_drain_pcp_counter()
2191 memcg->nocpu_base.events[i] += x; in mem_cgroup_drain_pcp_counter()
2193 spin_unlock(&memcg->pcp_counter_lock); in mem_cgroup_drain_pcp_counter()
2218 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument
2230 if (mem_cgroup_is_root(memcg)) in try_charge()
2233 if (consume_stock(memcg, nr_pages)) in try_charge()
2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2241 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2323 css_get_many(&memcg->css, batch); in try_charge()
2325 refill_stock(memcg, batch - nr_pages); in try_charge()
2333 if (page_counter_read(&memcg->memory) <= memcg->high) in try_charge()
2335 mem_cgroup_events(memcg, MEMCG_HIGH, 1); in try_charge()
2336 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true); in try_charge()
2337 } while ((memcg = parent_mem_cgroup(memcg))); in try_charge()
2342 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) in cancel_charge() argument
2344 if (mem_cgroup_is_root(memcg)) in cancel_charge()
2347 page_counter_uncharge(&memcg->memory, nr_pages); in cancel_charge()
2349 page_counter_uncharge(&memcg->memsw, nr_pages); in cancel_charge()
2351 css_put_many(&memcg->css, nr_pages); in cancel_charge()
2366 struct mem_cgroup *memcg; in try_get_mem_cgroup_from_page() local
2372 memcg = page->mem_cgroup; in try_get_mem_cgroup_from_page()
2373 if (memcg) { in try_get_mem_cgroup_from_page()
2374 if (!css_tryget_online(&memcg->css)) in try_get_mem_cgroup_from_page()
2375 memcg = NULL; in try_get_mem_cgroup_from_page()
2380 memcg = mem_cgroup_from_id(id); in try_get_mem_cgroup_from_page()
2381 if (memcg && !css_tryget_online(&memcg->css)) in try_get_mem_cgroup_from_page()
2382 memcg = NULL; in try_get_mem_cgroup_from_page()
2385 return memcg; in try_get_mem_cgroup_from_page()
2419 static void commit_charge(struct page *page, struct mem_cgroup *memcg, in commit_charge() argument
2447 page->mem_cgroup = memcg; in commit_charge()
2454 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, in memcg_charge_kmem() argument
2460 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter); in memcg_charge_kmem()
2464 ret = try_charge(memcg, gfp, nr_pages); in memcg_charge_kmem()
2481 page_counter_charge(&memcg->memory, nr_pages); in memcg_charge_kmem()
2483 page_counter_charge(&memcg->memsw, nr_pages); in memcg_charge_kmem()
2484 css_get_many(&memcg->css, nr_pages); in memcg_charge_kmem()
2487 page_counter_uncharge(&memcg->kmem, nr_pages); in memcg_charge_kmem()
2492 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages) in memcg_uncharge_kmem() argument
2494 page_counter_uncharge(&memcg->memory, nr_pages); in memcg_uncharge_kmem()
2496 page_counter_uncharge(&memcg->memsw, nr_pages); in memcg_uncharge_kmem()
2498 page_counter_uncharge(&memcg->kmem, nr_pages); in memcg_uncharge_kmem()
2500 css_put_many(&memcg->css, nr_pages); in memcg_uncharge_kmem()
2508 int memcg_cache_id(struct mem_cgroup *memcg) in memcg_cache_id() argument
2510 return memcg ? memcg->kmemcg_id : -1; in memcg_cache_id()
2559 struct mem_cgroup *memcg; member
2568 struct mem_cgroup *memcg = cw->memcg; in memcg_kmem_cache_create_func() local
2571 memcg_create_kmem_cache(memcg, cachep); in memcg_kmem_cache_create_func()
2573 css_put(&memcg->css); in memcg_kmem_cache_create_func()
2580 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, in __memcg_schedule_kmem_cache_create() argument
2589 css_get(&memcg->css); in __memcg_schedule_kmem_cache_create()
2591 cw->memcg = memcg; in __memcg_schedule_kmem_cache_create()
2598 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, in memcg_schedule_kmem_cache_create() argument
2613 __memcg_schedule_kmem_cache_create(memcg, cachep); in memcg_schedule_kmem_cache_create()
2632 struct mem_cgroup *memcg; in __memcg_kmem_get_cache() local
2641 memcg = get_mem_cgroup_from_mm(current->mm); in __memcg_kmem_get_cache()
2642 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in __memcg_kmem_get_cache()
2662 memcg_schedule_kmem_cache_create(memcg, cachep); in __memcg_kmem_get_cache()
2664 css_put(&memcg->css); in __memcg_kmem_get_cache()
2671 css_put(&cachep->memcg_params.memcg->css); in __memcg_kmem_put_cache()
2691 struct mem_cgroup *memcg; in __memcg_kmem_newpage_charge() local
2696 memcg = get_mem_cgroup_from_mm(current->mm); in __memcg_kmem_newpage_charge()
2698 if (!memcg_kmem_is_active(memcg)) { in __memcg_kmem_newpage_charge()
2699 css_put(&memcg->css); in __memcg_kmem_newpage_charge()
2703 ret = memcg_charge_kmem(memcg, gfp, 1 << order); in __memcg_kmem_newpage_charge()
2705 *_memcg = memcg; in __memcg_kmem_newpage_charge()
2707 css_put(&memcg->css); in __memcg_kmem_newpage_charge()
2711 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, in __memcg_kmem_commit_charge() argument
2714 VM_BUG_ON(mem_cgroup_is_root(memcg)); in __memcg_kmem_commit_charge()
2718 memcg_uncharge_kmem(memcg, 1 << order); in __memcg_kmem_commit_charge()
2721 page->mem_cgroup = memcg; in __memcg_kmem_commit_charge()
2726 struct mem_cgroup *memcg = page->mem_cgroup; in __memcg_kmem_uncharge_pages() local
2728 if (!memcg) in __memcg_kmem_uncharge_pages()
2731 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); in __memcg_kmem_uncharge_pages()
2733 memcg_uncharge_kmem(memcg, 1 << order); in __memcg_kmem_uncharge_pages()
2739 struct mem_cgroup *memcg = NULL; in __mem_cgroup_from_kmem() local
2747 memcg = cachep->memcg_params.memcg; in __mem_cgroup_from_kmem()
2750 memcg = page->mem_cgroup; in __mem_cgroup_from_kmem()
2752 return memcg; in __mem_cgroup_from_kmem()
2780 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, in mem_cgroup_swap_statistics() argument
2784 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val); in mem_cgroup_swap_statistics()
2826 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, in mem_cgroup_resize_limit() argument
2841 mem_cgroup_count_children(memcg); in mem_cgroup_resize_limit()
2843 oldusage = page_counter_read(&memcg->memory); in mem_cgroup_resize_limit()
2852 if (limit > memcg->memsw.limit) { in mem_cgroup_resize_limit()
2857 if (limit > memcg->memory.limit) in mem_cgroup_resize_limit()
2859 ret = page_counter_limit(&memcg->memory, limit); in mem_cgroup_resize_limit()
2865 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); in mem_cgroup_resize_limit()
2867 curusage = page_counter_read(&memcg->memory); in mem_cgroup_resize_limit()
2876 memcg_oom_recover(memcg); in mem_cgroup_resize_limit()
2881 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, in mem_cgroup_resize_memsw_limit() argument
2892 mem_cgroup_count_children(memcg); in mem_cgroup_resize_memsw_limit()
2894 oldusage = page_counter_read(&memcg->memsw); in mem_cgroup_resize_memsw_limit()
2903 if (limit < memcg->memory.limit) { in mem_cgroup_resize_memsw_limit()
2908 if (limit > memcg->memsw.limit) in mem_cgroup_resize_memsw_limit()
2910 ret = page_counter_limit(&memcg->memsw, limit); in mem_cgroup_resize_memsw_limit()
2916 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); in mem_cgroup_resize_memsw_limit()
2918 curusage = page_counter_read(&memcg->memsw); in mem_cgroup_resize_memsw_limit()
2927 memcg_oom_recover(memcg); in mem_cgroup_resize_memsw_limit()
2962 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone, in mem_cgroup_soft_limit_reclaim()
2977 excess = soft_limit_excess(mz->memcg); in mem_cgroup_soft_limit_reclaim()
2989 css_put(&mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3002 css_put(&next_mz->memcg->css); in mem_cgroup_soft_limit_reclaim()
3012 static inline bool memcg_has_children(struct mem_cgroup *memcg) in memcg_has_children() argument
3025 ret = css_next_child(NULL, &memcg->css); in memcg_has_children()
3036 static int mem_cgroup_force_empty(struct mem_cgroup *memcg) in mem_cgroup_force_empty() argument
3043 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
3049 progress = try_to_free_mem_cgroup_pages(memcg, 1, in mem_cgroup_force_empty()
3066 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_force_empty_write() local
3068 if (mem_cgroup_is_root(memcg)) in mem_cgroup_force_empty_write()
3070 return mem_cgroup_force_empty(memcg) ?: nbytes; in mem_cgroup_force_empty_write()
3083 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_hierarchy_write() local
3084 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent); in mem_cgroup_hierarchy_write()
3088 if (memcg->use_hierarchy == val) in mem_cgroup_hierarchy_write()
3101 if (!memcg_has_children(memcg)) in mem_cgroup_hierarchy_write()
3102 memcg->use_hierarchy = val; in mem_cgroup_hierarchy_write()
3114 static unsigned long tree_stat(struct mem_cgroup *memcg, in tree_stat() argument
3121 for_each_mem_cgroup_tree(iter, memcg) in tree_stat()
3129 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) in mem_cgroup_usage() argument
3133 if (mem_cgroup_is_root(memcg)) { in mem_cgroup_usage()
3134 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); in mem_cgroup_usage()
3135 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); in mem_cgroup_usage()
3137 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); in mem_cgroup_usage()
3140 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3142 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3158 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_read_u64() local
3163 counter = &memcg->memory; in mem_cgroup_read_u64()
3166 counter = &memcg->memsw; in mem_cgroup_read_u64()
3169 counter = &memcg->kmem; in mem_cgroup_read_u64()
3177 if (counter == &memcg->memory) in mem_cgroup_read_u64()
3178 return mem_cgroup_usage(memcg, false); in mem_cgroup_read_u64()
3179 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
3180 return mem_cgroup_usage(memcg, true); in mem_cgroup_read_u64()
3189 return (u64)memcg->soft_limit * PAGE_SIZE; in mem_cgroup_read_u64()
3196 static int memcg_activate_kmem(struct mem_cgroup *memcg, in memcg_activate_kmem() argument
3202 BUG_ON(memcg->kmemcg_id >= 0); in memcg_activate_kmem()
3203 BUG_ON(memcg->kmem_acct_activated); in memcg_activate_kmem()
3204 BUG_ON(memcg->kmem_acct_active); in memcg_activate_kmem()
3219 if (cgroup_has_tasks(memcg->css.cgroup) || in memcg_activate_kmem()
3220 (memcg->use_hierarchy && memcg_has_children(memcg))) in memcg_activate_kmem()
3236 err = page_counter_limit(&memcg->kmem, nr_pages); in memcg_activate_kmem()
3246 memcg->kmemcg_id = memcg_id; in memcg_activate_kmem()
3247 memcg->kmem_acct_activated = true; in memcg_activate_kmem()
3248 memcg->kmem_acct_active = true; in memcg_activate_kmem()
3253 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, in memcg_update_kmem_limit() argument
3259 if (!memcg_kmem_is_active(memcg)) in memcg_update_kmem_limit()
3260 ret = memcg_activate_kmem(memcg, limit); in memcg_update_kmem_limit()
3262 ret = page_counter_limit(&memcg->kmem, limit); in memcg_update_kmem_limit()
3267 static int memcg_propagate_kmem(struct mem_cgroup *memcg) in memcg_propagate_kmem() argument
3270 struct mem_cgroup *parent = parent_mem_cgroup(memcg); in memcg_propagate_kmem()
3281 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); in memcg_propagate_kmem()
3286 static int memcg_update_kmem_limit(struct mem_cgroup *memcg, in memcg_update_kmem_limit() argument
3300 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_write() local
3311 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ in mem_cgroup_write()
3317 ret = mem_cgroup_resize_limit(memcg, nr_pages); in mem_cgroup_write()
3320 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); in mem_cgroup_write()
3323 ret = memcg_update_kmem_limit(memcg, nr_pages); in mem_cgroup_write()
3328 memcg->soft_limit = nr_pages; in mem_cgroup_write()
3338 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in mem_cgroup_reset() local
3343 counter = &memcg->memory; in mem_cgroup_reset()
3346 counter = &memcg->memsw; in mem_cgroup_reset()
3349 counter = &memcg->kmem; in mem_cgroup_reset()
3379 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_move_charge_write() local
3390 memcg->move_charge_at_immigrate = val; in mem_cgroup_move_charge_write()
3418 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_numa_stat_show() local
3421 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask); in memcg_numa_stat_show()
3424 nr = mem_cgroup_node_nr_lru_pages(memcg, nid, in memcg_numa_stat_show()
3435 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3440 for_each_mem_cgroup_tree(iter, memcg) in memcg_numa_stat_show()
3454 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memcg_stat_show() local
3469 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); in memcg_stat_show()
3474 mem_cgroup_read_events(memcg, i)); in memcg_stat_show()
3478 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); in memcg_stat_show()
3482 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { in memcg_stat_show()
3497 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3505 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3514 for_each_mem_cgroup_tree(mi, memcg) in memcg_stat_show()
3529 mz = &memcg->nodeinfo[nid]->zoneinfo[zid]; in memcg_stat_show()
3550 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_read() local
3552 return mem_cgroup_swappiness(memcg); in mem_cgroup_swappiness_read()
3558 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_swappiness_write() local
3564 memcg->swappiness = val; in mem_cgroup_swappiness_write()
3571 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) in __mem_cgroup_threshold() argument
3579 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
3581 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
3586 usage = mem_cgroup_usage(memcg, swap); in __mem_cgroup_threshold()
3622 static void mem_cgroup_threshold(struct mem_cgroup *memcg) in mem_cgroup_threshold() argument
3624 while (memcg) { in mem_cgroup_threshold()
3625 __mem_cgroup_threshold(memcg, false); in mem_cgroup_threshold()
3627 __mem_cgroup_threshold(memcg, true); in mem_cgroup_threshold()
3629 memcg = parent_mem_cgroup(memcg); in mem_cgroup_threshold()
3647 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg) in mem_cgroup_oom_notify_cb() argument
3653 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
3660 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg) in mem_cgroup_oom_notify() argument
3664 for_each_mem_cgroup_tree(iter, memcg) in mem_cgroup_oom_notify()
3668 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_register_event() argument
3682 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3685 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
3686 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_register_event()
3688 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
3689 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_register_event()
3695 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_register_event()
3746 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
3751 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg, in mem_cgroup_usage_register_event() argument
3754 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM); in mem_cgroup_usage_register_event()
3757 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_register_event() argument
3760 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP); in memsw_cgroup_usage_register_event()
3763 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in __mem_cgroup_usage_unregister_event() argument
3771 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3774 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
3775 usage = mem_cgroup_usage(memcg, false); in __mem_cgroup_usage_unregister_event()
3777 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
3778 usage = mem_cgroup_usage(memcg, true); in __mem_cgroup_usage_unregister_event()
3786 __mem_cgroup_threshold(memcg, type == _MEMSWAP); in __mem_cgroup_usage_unregister_event()
3839 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
3842 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_usage_unregister_event() argument
3845 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM); in mem_cgroup_usage_unregister_event()
3848 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg, in memsw_cgroup_usage_unregister_event() argument
3851 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP); in memsw_cgroup_usage_unregister_event()
3854 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg, in mem_cgroup_oom_register_event() argument
3866 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
3869 if (atomic_read(&memcg->under_oom)) in mem_cgroup_oom_register_event()
3876 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg, in mem_cgroup_oom_unregister_event() argument
3883 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
3895 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf)); in mem_cgroup_oom_control_read() local
3897 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); in mem_cgroup_oom_control_read()
3898 seq_printf(sf, "under_oom %d\n", (bool)atomic_read(&memcg->under_oom)); in mem_cgroup_oom_control_read()
3905 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_oom_control_write() local
3911 memcg->oom_kill_disable = val; in mem_cgroup_oom_control_write()
3913 memcg_oom_recover(memcg); in mem_cgroup_oom_control_write()
3919 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in memcg_init_kmem() argument
3923 ret = memcg_propagate_kmem(memcg); in memcg_init_kmem()
3927 return mem_cgroup_sockets_init(memcg, ss); in memcg_init_kmem()
3930 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) in memcg_deactivate_kmem() argument
3936 if (!memcg->kmem_acct_active) in memcg_deactivate_kmem()
3945 memcg->kmem_acct_active = false; in memcg_deactivate_kmem()
3947 memcg_deactivate_kmem_caches(memcg); in memcg_deactivate_kmem()
3949 kmemcg_id = memcg->kmemcg_id; in memcg_deactivate_kmem()
3952 parent = parent_mem_cgroup(memcg); in memcg_deactivate_kmem()
3964 css_for_each_descendant_pre(css, &memcg->css) { in memcg_deactivate_kmem()
3968 if (!memcg->use_hierarchy) in memcg_deactivate_kmem()
3976 static void memcg_destroy_kmem(struct mem_cgroup *memcg) in memcg_destroy_kmem() argument
3978 if (memcg->kmem_acct_activated) { in memcg_destroy_kmem()
3979 memcg_destroy_kmem_caches(memcg); in memcg_destroy_kmem()
3981 WARN_ON(page_counter_read(&memcg->kmem)); in memcg_destroy_kmem()
3983 mem_cgroup_sockets_destroy(memcg); in memcg_destroy_kmem()
3986 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) in memcg_init_kmem() argument
3991 static void memcg_deactivate_kmem(struct mem_cgroup *memcg) in memcg_deactivate_kmem() argument
3995 static void memcg_destroy_kmem(struct mem_cgroup *memcg) in memcg_destroy_kmem() argument
4022 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove() local
4026 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
4033 css_put(&memcg->css); in memcg_event_remove()
4046 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake() local
4059 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
4068 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
4096 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in memcg_write_event_control() local
4122 event->memcg = memcg; in memcg_write_event_control()
4194 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
4200 spin_lock(&memcg->event_list_lock); in memcg_write_event_control()
4201 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
4202 spin_unlock(&memcg->event_list_lock); in memcg_write_event_control()
4334 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) in alloc_mem_cgroup_per_zone_info() argument
4358 mz->memcg = memcg; in alloc_mem_cgroup_per_zone_info()
4360 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_zone_info()
4364 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) in free_mem_cgroup_per_zone_info() argument
4366 kfree(memcg->nodeinfo[node]); in free_mem_cgroup_per_zone_info()
4371 struct mem_cgroup *memcg; in mem_cgroup_alloc() local
4377 memcg = kzalloc(size, GFP_KERNEL); in mem_cgroup_alloc()
4378 if (!memcg) in mem_cgroup_alloc()
4381 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu); in mem_cgroup_alloc()
4382 if (!memcg->stat) in mem_cgroup_alloc()
4384 spin_lock_init(&memcg->pcp_counter_lock); in mem_cgroup_alloc()
4385 return memcg; in mem_cgroup_alloc()
4388 kfree(memcg); in mem_cgroup_alloc()
4403 static void __mem_cgroup_free(struct mem_cgroup *memcg) in __mem_cgroup_free() argument
4407 mem_cgroup_remove_from_trees(memcg); in __mem_cgroup_free()
4410 free_mem_cgroup_per_zone_info(memcg, node); in __mem_cgroup_free()
4412 free_percpu(memcg->stat); in __mem_cgroup_free()
4413 kfree(memcg); in __mem_cgroup_free()
4419 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) in parent_mem_cgroup() argument
4421 if (!memcg->memory.parent) in parent_mem_cgroup()
4423 return mem_cgroup_from_counter(memcg->memory.parent, memory); in parent_mem_cgroup()
4430 struct mem_cgroup *memcg; in mem_cgroup_css_alloc() local
4434 memcg = mem_cgroup_alloc(); in mem_cgroup_css_alloc()
4435 if (!memcg) in mem_cgroup_css_alloc()
4439 if (alloc_mem_cgroup_per_zone_info(memcg, node)) in mem_cgroup_css_alloc()
4444 root_mem_cgroup = memcg; in mem_cgroup_css_alloc()
4445 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_alloc()
4446 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4447 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
4448 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_alloc()
4449 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_alloc()
4452 memcg->last_scanned_node = MAX_NUMNODES; in mem_cgroup_css_alloc()
4453 INIT_LIST_HEAD(&memcg->oom_notify); in mem_cgroup_css_alloc()
4454 memcg->move_charge_at_immigrate = 0; in mem_cgroup_css_alloc()
4455 mutex_init(&memcg->thresholds_lock); in mem_cgroup_css_alloc()
4456 spin_lock_init(&memcg->move_lock); in mem_cgroup_css_alloc()
4457 vmpressure_init(&memcg->vmpressure); in mem_cgroup_css_alloc()
4458 INIT_LIST_HEAD(&memcg->event_list); in mem_cgroup_css_alloc()
4459 spin_lock_init(&memcg->event_list_lock); in mem_cgroup_css_alloc()
4461 memcg->kmemcg_id = -1; in mem_cgroup_css_alloc()
4464 return &memcg->css; in mem_cgroup_css_alloc()
4467 __mem_cgroup_free(memcg); in mem_cgroup_css_alloc()
4474 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_online() local
4486 memcg->use_hierarchy = parent->use_hierarchy; in mem_cgroup_css_online()
4487 memcg->oom_kill_disable = parent->oom_kill_disable; in mem_cgroup_css_online()
4488 memcg->swappiness = mem_cgroup_swappiness(parent); in mem_cgroup_css_online()
4491 page_counter_init(&memcg->memory, &parent->memory); in mem_cgroup_css_online()
4492 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4493 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4494 page_counter_init(&memcg->memsw, &parent->memsw); in mem_cgroup_css_online()
4495 page_counter_init(&memcg->kmem, &parent->kmem); in mem_cgroup_css_online()
4502 page_counter_init(&memcg->memory, NULL); in mem_cgroup_css_online()
4503 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4504 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_online()
4505 page_counter_init(&memcg->memsw, NULL); in mem_cgroup_css_online()
4506 page_counter_init(&memcg->kmem, NULL); in mem_cgroup_css_online()
4517 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys); in mem_cgroup_css_online()
4526 smp_store_release(&memcg->initialized, 1); in mem_cgroup_css_online()
4533 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_offline() local
4541 spin_lock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4542 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in mem_cgroup_css_offline()
4546 spin_unlock(&memcg->event_list_lock); in mem_cgroup_css_offline()
4548 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_offline()
4550 memcg_deactivate_kmem(memcg); in mem_cgroup_css_offline()
4555 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_free() local
4557 memcg_destroy_kmem(memcg); in mem_cgroup_css_free()
4558 __mem_cgroup_free(memcg); in mem_cgroup_css_free()
4576 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_css_reset() local
4578 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4579 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4580 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
4581 memcg->low = 0; in mem_cgroup_css_reset()
4582 memcg->high = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
4583 memcg->soft_limit = PAGE_COUNTER_MAX; in mem_cgroup_css_reset()
5006 struct mem_cgroup *memcg = mem_cgroup_from_css(css); in mem_cgroup_can_attach() local
5014 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5019 VM_BUG_ON(from == memcg); in mem_cgroup_can_attach()
5034 mc.to = memcg; in mem_cgroup_can_attach()
5247 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_low_show() local
5248 unsigned long low = READ_ONCE(memcg->low); in memory_low_show()
5261 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_low_write() local
5270 memcg->low = low; in memory_low_write()
5277 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_high_show() local
5278 unsigned long high = READ_ONCE(memcg->high); in memory_high_show()
5291 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_high_write() local
5301 memcg->high = high; in memory_high_write()
5303 nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
5305 try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
5313 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_max_show() local
5314 unsigned long max = READ_ONCE(memcg->memory.limit); in memory_max_show()
5327 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); in memory_max_write() local
5336 err = mem_cgroup_resize_limit(memcg, max); in memory_max_write()
5345 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); in memory_events_show() local
5347 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW)); in memory_events_show()
5348 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH)); in memory_events_show()
5349 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX)); in memory_events_show()
5350 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM)); in memory_events_show()
5407 void mem_cgroup_events(struct mem_cgroup *memcg, in mem_cgroup_events() argument
5411 this_cpu_add(memcg->stat->events[idx], nr); in mem_cgroup_events()
5422 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) in mem_cgroup_low() argument
5433 if (memcg == root_mem_cgroup) in mem_cgroup_low()
5436 if (page_counter_read(&memcg->memory) >= memcg->low) in mem_cgroup_low()
5439 while (memcg != root) { in mem_cgroup_low()
5440 memcg = parent_mem_cgroup(memcg); in mem_cgroup_low()
5442 if (memcg == root_mem_cgroup) in mem_cgroup_low()
5445 if (page_counter_read(&memcg->memory) >= memcg->low) in mem_cgroup_low()
5471 struct mem_cgroup *memcg = NULL; in mem_cgroup_try_charge() local
5496 memcg = try_get_mem_cgroup_from_page(page); in mem_cgroup_try_charge()
5497 if (!memcg) in mem_cgroup_try_charge()
5498 memcg = get_mem_cgroup_from_mm(mm); in mem_cgroup_try_charge()
5500 ret = try_charge(memcg, gfp_mask, nr_pages); in mem_cgroup_try_charge()
5502 css_put(&memcg->css); in mem_cgroup_try_charge()
5505 memcg = root_mem_cgroup; in mem_cgroup_try_charge()
5509 *memcgp = memcg; in mem_cgroup_try_charge()
5529 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, in mem_cgroup_commit_charge() argument
5544 if (!memcg) in mem_cgroup_commit_charge()
5547 commit_charge(page, memcg, lrucare); in mem_cgroup_commit_charge()
5555 mem_cgroup_charge_statistics(memcg, page, nr_pages); in mem_cgroup_commit_charge()
5556 memcg_check_events(memcg, page); in mem_cgroup_commit_charge()
5577 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) in mem_cgroup_cancel_charge() argument
5588 if (!memcg) in mem_cgroup_cancel_charge()
5596 cancel_charge(memcg, nr_pages); in mem_cgroup_cancel_charge()
5599 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, in uncharge_batch() argument
5606 if (!mem_cgroup_is_root(memcg)) { in uncharge_batch()
5607 page_counter_uncharge(&memcg->memory, nr_pages); in uncharge_batch()
5609 page_counter_uncharge(&memcg->memsw, nr_pages); in uncharge_batch()
5610 memcg_oom_recover(memcg); in uncharge_batch()
5614 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); in uncharge_batch()
5615 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); in uncharge_batch()
5616 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); in uncharge_batch()
5617 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); in uncharge_batch()
5618 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); in uncharge_batch()
5619 memcg_check_events(memcg, dummy_page); in uncharge_batch()
5622 if (!mem_cgroup_is_root(memcg)) in uncharge_batch()
5623 css_put_many(&memcg->css, nr_pages); in uncharge_batch()
5628 struct mem_cgroup *memcg = NULL; in uncharge_list() local
5655 if (memcg != page->mem_cgroup) { in uncharge_list()
5656 if (memcg) { in uncharge_list()
5657 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, in uncharge_list()
5661 memcg = page->mem_cgroup; in uncharge_list()
5680 if (memcg) in uncharge_list()
5681 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, in uncharge_list()
5734 struct mem_cgroup *memcg; in mem_cgroup_migrate() local
5758 memcg = oldpage->mem_cgroup; in mem_cgroup_migrate()
5759 if (!memcg) in mem_cgroup_migrate()
5770 commit_charge(newpage, memcg, lrucare); in mem_cgroup_migrate()
5822 struct mem_cgroup *memcg; in mem_cgroup_swapout() local
5831 memcg = page->mem_cgroup; in mem_cgroup_swapout()
5834 if (!memcg) in mem_cgroup_swapout()
5837 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); in mem_cgroup_swapout()
5839 mem_cgroup_swap_statistics(memcg, true); in mem_cgroup_swapout()
5843 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_swapout()
5844 page_counter_uncharge(&memcg->memory, 1); in mem_cgroup_swapout()
5847 mem_cgroup_charge_statistics(memcg, page, -1); in mem_cgroup_swapout()
5848 memcg_check_events(memcg, page); in mem_cgroup_swapout()
5859 struct mem_cgroup *memcg; in mem_cgroup_uncharge_swap() local
5867 memcg = mem_cgroup_from_id(id); in mem_cgroup_uncharge_swap()
5868 if (memcg) { in mem_cgroup_uncharge_swap()
5869 if (!mem_cgroup_is_root(memcg)) in mem_cgroup_uncharge_swap()
5870 page_counter_uncharge(&memcg->memsw, 1); in mem_cgroup_uncharge_swap()
5871 mem_cgroup_swap_statistics(memcg, false); in mem_cgroup_uncharge_swap()
5872 css_put(&memcg->css); in mem_cgroup_uncharge_swap()