hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9H66Y CVE: NA
--------------------------------
When swapin in async mode, if the swap entry is belong to a memcg and the memcg is bound with a hpool, then we should try to allocate page from hpool firstly. If the swapin is triggered in page fault, then we don't need to allocate from hpool proactively, since the task is belong to the memcg, the page will allocate from hpool naturally.
Signed-off-by: Liu Shixin liushixin2@huawei.com --- include/linux/memcontrol.h | 10 +++++++++ mm/memcontrol.c | 44 ++++++++++++++++++++++++++++++++++++++ mm/swap_state.c | 2 +- 3 files changed, 55 insertions(+), 1 deletion(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 450300805b21..bac4aade25b4 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -808,6 +808,9 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, page_counter_read(&memcg->memory); }
+struct page *memcg_alloc_page_vma(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr); + int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
void mem_cgroup_uncharge(struct page *page); @@ -1411,6 +1414,13 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, return false; }
+static inline struct page *memcg_alloc_page_vma(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, + unsigned long addr) +{ + return alloc_page_vma(gfp_mask, vma, addr); +} + static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f7cdcdfa81b1..3195991c98ae 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7954,6 +7954,50 @@ void mem_cgroup_calculate_protection(struct mem_cgroup *root, atomic_long_read(&parent->memory.children_low_usage))); }
+#ifdef CONFIG_DYNAMIC_HUGETLB +static struct mem_cgroup *get_mem_cgroup_from_swap(swp_entry_t entry) +{ + struct mem_cgroup *memcg; + unsigned short id; + + if (mem_cgroup_disabled()) + return NULL; + + id = lookup_swap_cgroup_id(entry); + + rcu_read_lock(); + memcg = mem_cgroup_from_id(id); + if (memcg && !css_tryget_online(&memcg->css)) + memcg = NULL; + rcu_read_unlock(); + + return memcg; +} + +struct page *memcg_alloc_page_vma(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr) +{ + struct mem_cgroup *memcg; + struct page *page = NULL; + + memcg = get_mem_cgroup_from_swap(entry); + if (memcg) { + page = alloc_page_from_dhugetlb_pool(memcg, gfp_mask, 0, 0); + css_put(&memcg->css); + } + if (!page) + page = alloc_page_vma(gfp_mask, vma, addr); + + return page; +} +#else +struct page *memcg_alloc_page_vma(swp_entry_t entry, gfp_t gfp_mask, + struct vm_area_struct *vma, unsigned long addr) +{ + return alloc_page_vma(gfp_mask, vma, addr); +} +#endif + /** * mem_cgroup_charge - charge a newly allocated page to a cgroup * @page: page to charge diff --git a/mm/swap_state.c b/mm/swap_state.c index 69d71c4be7b8..f0929da6225a 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -491,7 +491,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will * cause any racers to loop around until we add it to cache. */ - page = alloc_page_vma(gfp_mask, vma, addr); + page = memcg_alloc_page_vma(entry, gfp_mask, vma, addr); if (!page) return NULL;