From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/I9K8D1 CVE: NA
--------------------------------
Use helper function to lock/unlock hugetlb related locks.
No function changed.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/dynamic_hugetlb.c | 79 ++++++++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 21 deletions(-)
diff --git a/mm/dynamic_hugetlb.c b/mm/dynamic_hugetlb.c index 8199ef893f4a..802a36081c1b 100644 --- a/mm/dynamic_hugetlb.c +++ b/mm/dynamic_hugetlb.c @@ -19,6 +19,53 @@ bool enable_dhugetlb = false; DEFINE_STATIC_KEY_FALSE(dhugetlb_enabled_key);
+/* + * Lock this to prevert any page allocation from percpu pool. + * + * Before we lock percpu_pool, must be sure hpool lock is released. + */ +static inline void dhugetlb_percpu_pool_lock_all(struct dhugetlb_pool *hpool) +{ + int i; + + for (i = 0; i < NR_PERCPU_POOL; i++) + spin_lock(&hpool->percpu_pool[i].lock); +} + +static inline void dhugetlb_percpu_pool_unlock_all(struct dhugetlb_pool *hpool) +{ + int i; + + for (i = NR_PERCPU_POOL - 1; i >= 0; i--) + spin_unlock(&hpool->percpu_pool[i].lock); +} + +/* + * Lock all before r/w percpu_pool. + * + * Each percpu_pool lock is used to block page allocated/freed by others. + * The hpool lock is used to block page allocated/freed by percpu_pool. + * + * We need to lock all in following situation: + * a) when merging pages, we have to make sure no one can alloc page from + each pool. + * b) when get the accurate pagecount. + * hpool->lock & all percpu_pool lock must be released before this. + */ +static inline void dhugetlb_lock_all(struct dhugetlb_pool *hpool) +{ + dhugetlb_percpu_pool_lock_all(hpool); + spin_lock(&hpool->lock); +} + +static inline void dhugetlb_unlock_all(struct dhugetlb_pool *hpool) +{ + lockdep_assert_held(&hpool->lock); + + spin_unlock(&hpool->lock); + dhugetlb_percpu_pool_unlock_all(hpool); +} + #define hugepage_index(pfn) ((pfn) >> (PUD_SHIFT - PAGE_SHIFT)) static void add_new_page_to_pool(struct dhugetlb_pool *hpool, struct page *page, int hpages_pool_idx) { @@ -231,9 +278,7 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo */ if (hpages_pool_idx == HUGE_PAGES_POOL_2M) { spin_unlock(&hpool->lock); - for (i = 0; i < NR_PERCPU_POOL; i++) - spin_lock(&hpool->percpu_pool[i].lock); - spin_lock(&hpool->lock); + dhugetlb_lock_all(hpool); for (i = 0; i < NR_PERCPU_POOL; i++) { percpu_pool = &hpool->percpu_pool[i]; reclaim_pages_from_percpu_pool(hpool, percpu_pool, @@ -251,14 +296,12 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo goto migrate; } } - if (hpages_pool_idx == HUGE_PAGES_POOL_2M) { + if (hpages_pool_idx == HUGE_PAGES_POOL_2M) /* * All target 4K page are in src_hpages_pool, we * can unlock percpu pool. */ - for (i = 0; i < NR_PERCPU_POOL; i++) - spin_unlock(&hpool->percpu_pool[i].lock); - } + dhugetlb_percpu_pool_unlock_all(hpool);
list_del(&split_page->head_pages); hpages_pool->split_normal_pages--; @@ -291,11 +334,10 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo trace_dynamic_hugetlb_split_merge(hpool, page, DHUGETLB_MERGE, page_size(page)); return 0; next: - if (hpages_pool_idx == HUGE_PAGES_POOL_2M) { + if (hpages_pool_idx == HUGE_PAGES_POOL_2M) /* Unlock percpu pool before try next */ - for (i = 0; i < NR_PERCPU_POOL; i++) - spin_unlock(&hpool->percpu_pool[i].lock); - } + dhugetlb_percpu_pool_unlock_all(hpool); + continue; migrate: /* page migration only used for HUGE_PAGES_POOL_2M */ @@ -313,9 +355,7 @@ static int hpool_merge_page(struct dhugetlb_pool *hpool, int hpages_pool_idx, bo }
/* Unlock and try migration. */ - for (i = 0; i < NR_PERCPU_POOL; i++) - spin_unlock(&hpool->percpu_pool[i].lock); - spin_unlock(&hpool->lock); + dhugetlb_unlock_all(hpool);
for (i = 0; i < nr_pages; i+= block_size) { p = pfn_to_page(split_page->start_pfn + i); @@ -652,8 +692,9 @@ void link_hpool(struct hugetlbfs_inode_info *p, struct hstate *h) p->hpool = find_hpool_by_task(current); if (!get_hpool_unless_zero(p->hpool)) p->hpool = NULL; - } else + } else { p->hpool = NULL; + } }
void unlink_hpool(struct hugetlbfs_inode_info *p) @@ -1137,9 +1178,7 @@ int hugetlb_pool_info_show(struct seq_file *m, void *v) if (!get_hpool_unless_zero(hpool)) return 0;
- for (i = 0; i < NR_PERCPU_POOL; i++) - spin_lock(&hpool->percpu_pool[i].lock); - spin_lock(&hpool->lock); + dhugetlb_lock_all(hpool);
free_pages = hpool->hpages_pool[HUGE_PAGES_POOL_4K].free_normal_pages; for (i = 0; i < NR_PERCPU_POOL; i++) { @@ -1175,9 +1214,7 @@ int hugetlb_pool_info_show(struct seq_file *m, void *v) free_pages, used_pages);
- spin_unlock(&hpool->lock); - for (i = NR_PERCPU_POOL - 1; i >= 0; i--) - spin_unlock(&hpool->percpu_pool[i].lock); + dhugetlb_unlock_all(hpool); put_hpool(hpool); return 0; }