From: Shakeel Butt shakeelb@google.com
mainline inclusion from mainline-v5.11-rc5 commit 5c447d274f3746fbed6e695e7b9a2d7bd8b31b71 category: bugfix bugzilla: 47675 CVE: NA
-------------------------------------------------
Currently the kernel is not correctly updating the numa stats for NR_FILE_PAGES and NR_SHMEM on THP migration. Fix that.
For NR_FILE_DIRTY and NR_ZONE_WRITE_PENDING, although at the moment there is no need to handle THP migration as kernel still does not have write support for file THP but to be more future proof, this patch adds the THP support for those stats as well.
Link: https://lkml.kernel.org/r/20210108155813.2914586-2-shakeelb@google.com Fixes: e71769ae52609 ("mm: enable thp migration for shmem thp") Signed-off-by: Shakeel Butt shakeelb@google.com Acked-by: Yang Shi shy828301@gmail.com Reviewed-by: Roman Gushchin guro@fb.com Cc: Johannes Weiner hannes@cmpxchg.org Cc: Michal Hocko mhocko@kernel.org Cc: Muchun Song songmuchun@bytedance.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/migrate.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c index 22b08aea06975..4a810183277ac 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -443,6 +443,7 @@ int migrate_page_move_mapping(struct address_space *mapping, int dirty; int expected_count = 1 + extra_count; void **pslot; + int nr = hpage_nr_pages(page);
/* * Device public or private pages have an extra refcount as they are @@ -506,7 +507,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ + page_ref_add(newpage, nr); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -529,7 +530,7 @@ int migrate_page_move_mapping(struct address_space *mapping, int i; int index = page_index(page);
- for (i = 1; i < HPAGE_PMD_NR; i++) { + for (i = 1; i < nr; i++) { pslot = radix_tree_lookup_slot(&mapping->i_pages, index + i); radix_tree_replace_slot(&mapping->i_pages, pslot, @@ -542,7 +543,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); + page_ref_unfreeze(page, expected_count - nr);
xa_unlock(&mapping->i_pages); /* Leave irq disabled to prevent preemption while updating stats */ @@ -558,17 +559,17 @@ int migrate_page_move_mapping(struct address_space *mapping, * are mapped to swap space. */ if (newzone != oldzone) { - __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); - __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); + __mod_node_page_state(oldzone->zone_pgdat, NR_FILE_PAGES, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_FILE_PAGES, nr); if (PageSwapBacked(page) && !PageSwapCache(page)) { - __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); - __inc_node_state(newzone->zone_pgdat, NR_SHMEM); + __mod_node_page_state(oldzone->zone_pgdat, NR_SHMEM, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_SHMEM, nr); } if (dirty && mapping_cap_account_dirty(mapping)) { - __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); - __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); - __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); - __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); + __mod_node_page_state(oldzone->zone_pgdat, NR_FILE_DIRTY, -nr); + __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_FILE_DIRTY, nr); + __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable();