hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9K8D1 CVE: NA
--------------------------------
do_migrate_range() will firstly isolate pages and then migrate pages. If a pages is failed to isolate, it may not be successful in a short time even if tried many times. Since migrate_pages() has already tried 10 times to migrate the pages, there is also no need to migrate pages many times.
So just call do_migrate_range() once. This will reduce the log of migration failures.
Fixes: cdbeee51d044 ("mm/dynamic_hugetlb: add migration function") Signed-off-by: Liu Shixin liushixin2@huawei.com --- mm/dynamic_hugetlb.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-)
diff --git a/mm/dynamic_hugetlb.c b/mm/dynamic_hugetlb.c index 802a36081c1b..dc4cf48a332e 100644 --- a/mm/dynamic_hugetlb.c +++ b/mm/dynamic_hugetlb.c @@ -231,9 +231,6 @@ static void reclaim_pages_from_percpu_pool(struct dhugetlb_pool *hpool, } }
-/* We only try 5 times to reclaim pages */ -#define HPOOL_RECLAIM_RETRIES 5 - static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bool force_merge) { struct huge_pages_pool *hpages_pool, *src_hpages_pool; @@ -242,7 +239,8 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo struct page *page, *next, *p; struct percpu_pages_pool *percpu_pool; bool need_migrate = false, need_initial = false; - int i, try; + bool tried_migrate, can_merge; + int i; LIST_HEAD(wait_page_list);
lockdep_assert_held(&hpool->lock); @@ -269,9 +267,11 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo return -ENOMEM;
list_for_each_entry_safe(split_page, split_next, &hpages_pool->hugepage_splitlists, head_pages) { - try = 0; + tried_migrate = false;
merge: + can_merge = true; + /* * If we are merging 4K page to 2M page, we need to get * lock of percpu pool sequentially and clear percpu pool. @@ -290,8 +290,14 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo for (i = 0; i < nr_pages; i+= block_size) { p = pfn_to_page(split_page->start_pfn + i); if (PagePool(p)) { - if (!need_migrate) - goto next; + /* + * Some pages still in use, can't merge. + * If don't need migration or have tried, + * then skip merging these pages. + */ + can_merge = false; + if (!need_migrate || tried_migrate) + break; else goto migrate; } @@ -303,6 +309,9 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo */ dhugetlb_percpu_pool_unlock_all(hpool);
+ if (!can_merge) + continue; + list_del(&split_page->head_pages); hpages_pool->split_normal_pages--; for (i = 0; i < nr_pages; i+= block_size) { @@ -333,16 +342,8 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo add_new_page_to_pool(hpool, page, hpages_pool_idx); trace_dynamic_hugetlb_split_merge(hpool, page, DHUGETLB_MERGE, page_size(page)); return 0; -next: - if (hpages_pool_idx == HUGE_PAGES_POOL_2M) - /* Unlock percpu pool before try next */ - dhugetlb_percpu_pool_unlock_all(hpool); - - continue; migrate: - /* page migration only used for HUGE_PAGES_POOL_2M */ - if (try++ >= HPOOL_RECLAIM_RETRIES) - goto next; + tried_migrate = true;
/* Isolate free page first. */ INIT_LIST_HEAD(&wait_page_list);