
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ICLL1L -------------------------------- Memory retained in Per-CPU Pages (PCP) caches can prevent hugepage allocations from succeeding despite sufficient free system memory. This occurs because hugepage allocations don't actively trigger PCP draining. Reproduction: - Alloc page and free the page via put_page to release to pcp - Observe hugepage reservation failure Solution: Actively drain PCP during hugetlb memory allocation. Verification: This issue can be reproduce easily in zone movable with the following step: w/o this patch # numactl -m 2 dd if=/dev/urandom of=/dev/shm/testfile bs=4k count=64 # rm -f /dev/shm/testfile # sync # echo 3 > /proc/sys/vm/drop_caches # echo 2048 > /sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages # cat /sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages 2029 w/ this patch # numactl -m 2 dd if=/dev/urandom of=/dev/shm/testfile bs=4k count=64 # rm -f /dev/shm/testfile # sync # echo 3 > /proc/sys/vm/drop_caches # echo 2048 > /sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages # cat /sys/devices/system/node/node2/hugepages/hugepages-2048kB/nr_hugepages 2044 Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- mm/hugetlb.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ec0ab09e1b56..28b8d7a6acf1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,6 +3012,21 @@ static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, return 1; } +static void hugetlb_drain_movable_pcp(struct hstate *h, int nid) +{ + pg_data_t *pgdat = NODE_DATA(nid); + struct zone *zone; + + /* + * only zone movable is needed to drian as it is the only + * zone that can be exclusively used by hugetlb. + */ + zone = &pgdat->node_zones[ZONE_MOVABLE]; + + if (zone_managed_pages(zone)) + drain_all_pages(zone); +} + #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, nodemask_t *nodes_allowed) @@ -3094,6 +3109,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, break; } + /* + * drain pcp from movable zone to increase the success rate for + * hugetlb memory allocation + */ + if (count > persistent_huge_pages(h)) + hugetlb_drain_movable_pcp(h, nid); + while (count > persistent_huge_pages(h)) { /* * If this allocation races such that we no longer need the -- 2.43.0