
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6MH03 CVE: NA -------------------------------- When memory is fragmented, update_reserve_pages() may call migrate_pages() to collect continuous memory. This function can sleep, so we should use mutex lock instead of spin lock. Use KABI_EXTEND to fix kabi broken. Fixes: 0c06a1c068ab ("mm/dynamic_hugetlb: add interface to configure the count of hugepages") Signed-off-by: Liu Shixin <liushixin2@huawei.com> --- include/linux/dynamic_hugetlb.h | 14 +++++++++++++- mm/dynamic_hugetlb.c | 6 +++--- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/include/linux/dynamic_hugetlb.h b/include/linux/dynamic_hugetlb.h index 476a9014a83a..eff31669e210 100644 --- a/include/linux/dynamic_hugetlb.h +++ b/include/linux/dynamic_hugetlb.h @@ -66,7 +66,7 @@ enum huge_pages_pool_type { struct dhugetlb_pool { int nid; spinlock_t lock; - spinlock_t reserved_lock; + KABI_DEPRECATE(spinlock_t, reserved_lock) atomic_t refcnt; unsigned long normal_pages_disabled; @@ -74,6 +74,18 @@ struct dhugetlb_pool { unsigned long total_huge_pages; struct huge_pages_pool hpages_pool[HUGE_PAGES_POOL_MAX]; + + /* The dhugetlb_pool structures is only used by core kernel, it is + * also accessed only the memory cgroup and hugetlb core code and + * so changes made to dhugetlb_pool structure should not affect + * third-party kernel modules. + */ + KABI_EXTEND(struct mutex reserved_lock) + + /* + * The percpu_pool[] should only be used by dynamic hugetlb core. + * External kernel modules should not used it. + */ struct percpu_pages_pool percpu_pool[0]; }; diff --git a/mm/dynamic_hugetlb.c b/mm/dynamic_hugetlb.c index 86541dc54f45..2e0142576358 100644 --- a/mm/dynamic_hugetlb.c +++ b/mm/dynamic_hugetlb.c @@ -887,7 +887,7 @@ static int hugetlb_pool_create(struct mem_cgroup *memcg, unsigned long nid) return -ENOMEM; spin_lock_init(&hpool->lock); - spin_lock_init(&hpool->reserved_lock); + mutex_init(&hpool->reserved_lock); hpool->nid = nid; atomic_set(&hpool->refcnt, 1); @@ -1000,7 +1000,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp if (!get_hpool_unless_zero(hpool)) return -EINVAL; - spin_lock(&hpool->reserved_lock); + mutex_lock(&hpool->reserved_lock); spin_lock(&hpool->lock); hpages_pool = &hpool->hpages_pool[hpages_pool_idx]; if (nr_pages > hpages_pool->nr_huge_pages) { @@ -1036,7 +1036,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp hpages_pool->free_normal_pages += delta; } spin_unlock(&hpool->lock); - spin_unlock(&hpool->reserved_lock); + mutex_unlock(&hpool->reserved_lock); put_hpool(hpool); return 0; } -- 2.25.1