From: Andrew Morton Cc: "Eric W. Biederman" Cc: Balbir Singh Cc: David Rientjes Cc: Herbert Poetzl Cc: KAMEZAWA Hiroyuki Cc: Kirill Korotaev Cc: Nick Piggin Cc: Paul Menage Cc: Pavel Emelianov Cc: Peter Zijlstra Cc: Vaidyanathan Srinivasan Cc: YAMAMOTO Takashi Signed-off-by: Andrew Morton --- mm/memcontrol.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff -puN mm/memcontrol.c~memory-cgroup-enhancements-add-status-accounting-function-for-memory-cgroup-uninlining mm/memcontrol.c --- a/mm/memcontrol.c~memory-cgroup-enhancements-add-status-accounting-function-for-memory-cgroup-uninlining +++ a/mm/memcontrol.c @@ -59,7 +59,7 @@ struct mem_cgroup_stat { /* * modifies value with disabling preempt. */ -static inline void __mem_cgroup_stat_add(struct mem_cgroup_stat *stat, +static void __mem_cgroup_stat_add(struct mem_cgroup_stat *stat, enum mem_cgroup_stat_index idx, int val) { int cpu = get_cpu(); @@ -71,14 +71,14 @@ static inline void __mem_cgroup_stat_add /* * For accounting under irq disable, no need for increment preempt count. */ -static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, +static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, enum mem_cgroup_stat_index idx, int val) { int cpu = smp_processor_id(); stat->cpustat[cpu].count[idx] += val; } -static inline s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, +static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, enum mem_cgroup_stat_index idx) { int cpu; @@ -162,8 +162,8 @@ enum charge_type { /* * Always modified under lru lock. Then, not necessary to preempt_disable() */ -static inline void -mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, bool charge) +static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, + bool charge) { int val = (charge)? 1 : -1; struct mem_cgroup_stat *stat = &mem->stat; @@ -177,9 +177,6 @@ mem_cgroup_charge_statistics(struct mem_ } - - - static struct mem_cgroup init_mem_cgroup; static inline @@ -254,8 +251,8 @@ static void __always_inline unlock_page_ * This can fail if the page has been tied to a page_cgroup. * If success, returns 0. */ -static inline int -page_cgroup_assign_new_page_cgroup(struct page *page, struct page_cgroup *pc) +static int page_cgroup_assign_new_page_cgroup(struct page *page, + struct page_cgroup *pc) { int ret = 0; @@ -277,8 +274,8 @@ page_cgroup_assign_new_page_cgroup(struc * clear_page_cgroup(page, pc) == pc */ -static inline struct page_cgroup * -clear_page_cgroup(struct page *page, struct page_cgroup *pc) +static struct page_cgroup *clear_page_cgroup(struct page *page, + struct page_cgroup *pc) { struct page_cgroup *ret; /* lock and clear */ @@ -290,7 +287,6 @@ clear_page_cgroup(struct page *page, str return ret; } - static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) { if (active) { _