hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8YLWW
--------------------------------
Add pmd/pte update and tlb flush helper function to update page table. This refactoring patch is designed to facilitate each architecture to implement its own special logic in preparation for the arm64 architecture to follow the necessary break-before-make sequence when updating page tables.
Signed-off-by: Nanyong Sun sunnanyong@huawei.com Reviewed-by: Muchun Song songmuchun@bytedance.com --- mm/sparse-vmemmap.c | 45 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 38 insertions(+), 7 deletions(-)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 6803c89c5d21..bf5bb7872e9c 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -54,6 +54,37 @@ struct vmemmap_remap_walk { struct list_head *vmemmap_pages; };
+#ifndef vmemmap_update_pmd +static inline void vmemmap_update_pmd(unsigned long addr, + pmd_t *pmdp, pte_t *ptep) +{ + pmd_populate_kernel(&init_mm, pmdp, ptep); +} +#endif + +#ifndef vmemmap_update_pte +static inline void vmemmap_update_pte(unsigned long addr, + pte_t *ptep, pte_t pte) +{ + set_pte_at(&init_mm, addr, ptep, pte); +} +#endif + +#ifndef vmemmap_flush_tlb_all +static inline void vmemmap_flush_tlb_all(void) +{ + flush_tlb_all(); +} +#endif + +#ifndef vmemmap_flush_tlb_range +static inline void vmemmap_flush_tlb_range(unsigned long start, + unsigned long end) +{ + flush_tlb_kernel_range(start, end); +} +#endif + static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) { pmd_t __pmd; @@ -88,8 +119,8 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
/* Make pte visible before pmd. See comment in __pte_alloc(). */ smp_wmb(); - pmd_populate_kernel(&init_mm, pmd, pgtable); - flush_tlb_kernel_range(start, start + PMD_SIZE); + vmemmap_update_pmd(start, pmd, pgtable); + vmemmap_flush_tlb_range(start, start + PMD_SIZE); } else { pte_free_kernel(&init_mm, pgtable); } @@ -221,7 +252,7 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end, return ret; } while (pgd++, addr = next, addr != end);
- flush_tlb_kernel_range(start, end); + vmemmap_flush_tlb_range(start, end);
return 0; } @@ -269,15 +300,15 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
/* * Makes sure that preceding stores to the page contents from - * vmemmap_remap_free() become visible before the set_pte_at() - * write. + * vmemmap_remap_free() become visible before the + * vmemmap_update_pte() write. */ smp_wmb(); }
entry = mk_pte(walk->reuse_page, pgprot); list_add_tail(&page->lru, walk->vmemmap_pages); - set_pte_at(&init_mm, addr, pte, entry); + vmemmap_update_pte(addr, pte, entry); }
/* @@ -315,7 +346,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, copy_page(to, (void *)walk->reuse_addr); reset_struct_pages(to);
- set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); + vmemmap_update_pte(addr, pte, mk_pte(page, pgprot)); }
/**