
From: Baolin Wang <baolin.wang@linux.alibaba.com> next inclusion from next-20240510 commit 6b0ed7b3c77547d2308983a26db11a0d14a60ace category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9OCYO CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- Patch series "support multi-size THP numa balancing", v2. This patchset tries to support mTHP numa balancing, as a simple solution to start, the NUMA balancing algorithm for mTHP will follow the THP strategy as the basic support. Please find details in each patch. This patch (of 2): To support large folio's numa balancing, factor out the numa mapping rebuilding into a new helper as a preparation. Link: https://lkml.kernel.org/r/cover.1712132950.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/cover.1711683069.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/8bc2586bdd8dbbe6d83c09b77b360ec8fcac3736.171168306... Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Liu Shixin <liushixin2@huawei.com> --- mm/memory.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index dcb94f8eefb2..33edf6a888b3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5043,6 +5043,20 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, return mpol_misplaced(folio, vma, addr); } +static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma, + bool writable) +{ + pte_t pte, old_pte; + + old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); + pte = pte_modify(old_pte, vma->vm_page_prot); + pte = pte_mkyoung(pte); + if (writable) + pte = pte_mkwrite(pte, vma); + ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); + update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); +} + static vm_fault_t do_numa_page(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; @@ -5148,13 +5162,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Make it present again, depending on how arch implements * non-accessible ptes, some can allow access by kernel mode. */ - old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte); - pte = pte_modify(old_pte, vma->vm_page_prot); - pte = pte_mkyoung(pte); - if (writable) - pte = pte_mkwrite(pte, vma); - ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte); - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + numa_rebuild_single_mapping(vmf, vma, writable); pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } -- 2.25.1