mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-23 08:35:19 -05:00
mm: memcg: group cgroup v1 memcg related declarations
Group all cgroup v1-related declarations at the end of memcontrol.h and mm/memcontrol-v1.h with an intention to put them all together under a config option later on. It should make things easier to follow and maintain too. Link: https://lkml.kernel.org/r/20240625005906.106920-13-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
34926e10bb
commit
6f1173d684
2 changed files with 123 additions and 110 deletions
|
@ -950,39 +950,13 @@ static inline void mem_cgroup_exit_user_fault(void)
|
|||
current->in_user_fault = 0;
|
||||
}
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
{
|
||||
return p->memcg_in_oom;
|
||||
}
|
||||
|
||||
bool mem_cgroup_oom_synchronize(bool wait);
|
||||
struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
|
||||
struct mem_cgroup *oom_domain);
|
||||
void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
|
||||
|
||||
void folio_memcg_lock(struct folio *folio);
|
||||
void folio_memcg_unlock(struct folio *folio);
|
||||
|
||||
void __mod_memcg_state(struct mem_cgroup *memcg, enum memcg_stat_item idx,
|
||||
int val);
|
||||
|
||||
/* try to stablize folio_memcg() for all the pages in a memcg */
|
||||
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
|
||||
return true;
|
||||
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_unlock_pages(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
||||
static inline void mod_memcg_state(struct mem_cgroup *memcg,
|
||||
enum memcg_stat_item idx, int val)
|
||||
|
@ -1109,10 +1083,6 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
|
|||
|
||||
void split_page_memcg(struct page *head, int old_order, int new_order);
|
||||
|
||||
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned);
|
||||
|
||||
#else /* CONFIG_MEMCG */
|
||||
|
||||
#define MEM_CGROUP_ID_SHIFT 0
|
||||
|
@ -1423,26 +1393,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void folio_memcg_lock(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void folio_memcg_unlock(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
|
||||
{
|
||||
/* to match folio_memcg_rcu() */
|
||||
rcu_read_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_unlock_pages(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
|
||||
{
|
||||
}
|
||||
|
@ -1455,16 +1405,6 @@ static inline void mem_cgroup_exit_user_fault(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct mem_cgroup *mem_cgroup_get_oom_group(
|
||||
struct task_struct *victim, struct mem_cgroup *oom_domain)
|
||||
{
|
||||
|
@ -1558,14 +1498,6 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
|
|||
static inline void split_page_memcg(struct page *head, int old_order, int new_order)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
/*
|
||||
|
@ -1916,4 +1848,80 @@ static inline bool mem_cgroup_zswap_writeback_enabled(struct mem_cgroup *memcg)
|
|||
}
|
||||
#endif
|
||||
|
||||
|
||||
/* Cgroup v1-related declarations */
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned);
|
||||
|
||||
bool mem_cgroup_oom_synchronize(bool wait);
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
{
|
||||
return p->memcg_in_oom;
|
||||
}
|
||||
|
||||
void folio_memcg_lock(struct folio *folio);
|
||||
void folio_memcg_unlock(struct folio *folio);
|
||||
|
||||
/* try to stablize folio_memcg() for all the pages in a memcg */
|
||||
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
|
||||
{
|
||||
rcu_read_lock();
|
||||
|
||||
if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
|
||||
return true;
|
||||
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_unlock_pages(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#else /* CONFIG_MEMCG */
|
||||
static inline
|
||||
unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
unsigned long *total_scanned)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void folio_memcg_lock(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void folio_memcg_unlock(struct folio *folio)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
|
||||
{
|
||||
/* to match folio_memcg_rcu() */
|
||||
rcu_read_lock();
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void mem_cgroup_unlock_pages(void)
|
||||
{
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline bool task_in_memcg_oom(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool mem_cgroup_oom_synchronize(bool wait)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MEMCG */
|
||||
|
||||
#endif /* _LINUX_MEMCONTROL_H */
|
||||
|
|
|
@ -5,15 +5,9 @@
|
|||
|
||||
#include <linux/cgroup-defs.h>
|
||||
|
||||
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
|
||||
|
||||
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
|
||||
{
|
||||
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
|
||||
}
|
||||
/* Cgroup v1 and v2 common declarations */
|
||||
|
||||
void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
|
||||
void memcg1_check_events(struct mem_cgroup *memcg, int nid);
|
||||
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
unsigned int nr_pages);
|
||||
|
||||
|
@ -29,30 +23,6 @@ static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
|||
void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n);
|
||||
void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n);
|
||||
|
||||
bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
|
||||
struct cgroup_taskset;
|
||||
int memcg1_can_attach(struct cgroup_taskset *tset);
|
||||
void memcg1_cancel_attach(struct cgroup_taskset *tset);
|
||||
void memcg1_move_task(void);
|
||||
|
||||
/*
|
||||
* Per memcg event counter is incremented at every pagein/pageout. With THP,
|
||||
* it will be incremented by the number of pages. This counter is used
|
||||
* to trigger some periodic events. This is straightforward and better
|
||||
* than using jiffies etc. to handle periodic memcg event.
|
||||
*/
|
||||
enum mem_cgroup_events_target {
|
||||
MEM_CGROUP_TARGET_THRESH,
|
||||
MEM_CGROUP_TARGET_SOFTLIMIT,
|
||||
MEM_CGROUP_NTARGETS,
|
||||
};
|
||||
|
||||
/* Whether legacy memory+swap accounting is active */
|
||||
static bool do_memsw_account(void)
|
||||
{
|
||||
return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
|
||||
}
|
||||
|
||||
/*
|
||||
* Iteration constructs for visiting all cgroups (under a tree). If
|
||||
* loops are exited prematurely (break), mem_cgroup_iter_break() must
|
||||
|
@ -68,24 +38,28 @@ static bool do_memsw_account(void)
|
|||
iter != NULL; \
|
||||
iter = mem_cgroup_iter(NULL, iter, NULL))
|
||||
|
||||
void memcg1_css_offline(struct mem_cgroup *memcg);
|
||||
/* Whether legacy memory+swap accounting is active */
|
||||
static bool do_memsw_account(void)
|
||||
{
|
||||
return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
|
||||
}
|
||||
|
||||
/* for encoding cft->private value on file */
|
||||
enum res_type {
|
||||
_MEM,
|
||||
_MEMSWAP,
|
||||
_KMEM,
|
||||
_TCP,
|
||||
/*
|
||||
* Per memcg event counter is incremented at every pagein/pageout. With THP,
|
||||
* it will be incremented by the number of pages. This counter is used
|
||||
* to trigger some periodic events. This is straightforward and better
|
||||
* than using jiffies etc. to handle periodic memcg event.
|
||||
*/
|
||||
enum mem_cgroup_events_target {
|
||||
MEM_CGROUP_TARGET_THRESH,
|
||||
MEM_CGROUP_TARGET_SOFTLIMIT,
|
||||
MEM_CGROUP_NTARGETS,
|
||||
};
|
||||
|
||||
bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
|
||||
enum mem_cgroup_events_target target);
|
||||
unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap);
|
||||
|
||||
bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
|
||||
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
|
||||
void memcg1_oom_recover(struct mem_cgroup *memcg);
|
||||
|
||||
void drain_all_stock(struct mem_cgroup *root_memcg);
|
||||
|
||||
unsigned long memcg_events(struct mem_cgroup *memcg, int event);
|
||||
|
@ -95,6 +69,37 @@ unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item);
|
|||
unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item);
|
||||
int memory_stat_show(struct seq_file *m, void *v);
|
||||
|
||||
/* Cgroup v1-specific declarations */
|
||||
|
||||
void memcg1_remove_from_trees(struct mem_cgroup *memcg);
|
||||
|
||||
static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
|
||||
{
|
||||
WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
|
||||
}
|
||||
|
||||
bool memcg1_wait_acct_move(struct mem_cgroup *memcg);
|
||||
|
||||
struct cgroup_taskset;
|
||||
int memcg1_can_attach(struct cgroup_taskset *tset);
|
||||
void memcg1_cancel_attach(struct cgroup_taskset *tset);
|
||||
void memcg1_move_task(void);
|
||||
void memcg1_css_offline(struct mem_cgroup *memcg);
|
||||
|
||||
/* for encoding cft->private value on file */
|
||||
enum res_type {
|
||||
_MEM,
|
||||
_MEMSWAP,
|
||||
_KMEM,
|
||||
_TCP,
|
||||
};
|
||||
|
||||
bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked);
|
||||
void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked);
|
||||
void memcg1_oom_recover(struct mem_cgroup *memcg);
|
||||
|
||||
void memcg1_check_events(struct mem_cgroup *memcg, int nid);
|
||||
|
||||
void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
|
||||
|
||||
extern struct cftype memsw_files[];
|
||||
|
|
Loading…
Reference in a new issue