ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7YF5R
---------------------------------------------
Add hugetlb_insert_hugepage_pte[_by_pa] to insert hugepages into process page table. The by_pa version performs like remap_pfn_range() that make the pte special and can be used for reserved physical memory.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- arch/arm64/Kconfig | 9 ++++++ include/linux/hugetlb.h | 32 +++++++++++++++++++ mm/Kconfig | 5 +++ mm/hugetlb.c | 70 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 792688beb757..4648b5b23552 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2360,3 +2360,12 @@ source "drivers/acpi/Kconfig"
source "arch/arm64/kvm/Kconfig"
+menuconfig ASCEND_FEATURES + bool "Support Ascend Features" + depends on ARM64 + select HUGETLB_INSERT_PAGE + help + The Ascend chip use the Hisilicon DaVinci architecture, and mainly + focus on AI and machine leanring area, contains many external features. + Enable this config to enable selective list of these features. + If unsure, say N diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 6d041aa9f0fe..e28c53198c5c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -264,6 +264,26 @@ long hugetlb_change_protection(struct vm_area_struct *vma, bool is_hugetlb_entry_migration(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
+#ifdef CONFIG_HUGETLB_INSERT_PAGE +int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, struct page *hpage); +int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, + unsigned long vir_addr, + pgprot_t prot, unsigned long phy_addr); +#else /* CONFIG_HUGETLB_INSERT_PAGE */ +static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, struct page *hpage) +{ + return -EPERM; +} +static inline int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, + unsigned long vir_addr, + pgprot_t prot, unsigned long phy_addr) +{ + return -EPERM; +} +#endif /* CONFIG_HUGETLB_INSERT_PAGE */ + #else /* !CONFIG_HUGETLB_PAGE */
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) @@ -470,6 +490,18 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
+static inline int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, struct page *hpage) +{ + return -EPERM; +} +static inline int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, + unsigned long vir_addr, + pgprot_t prot, unsigned long phy_addr) +{ + return -EPERM; +} + #endif /* !CONFIG_HUGETLB_PAGE */ /* * hugepages at page global directory. If arch support diff --git a/mm/Kconfig b/mm/Kconfig index 84f9cf32390c..fdd4a037e084 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1239,6 +1239,11 @@ config ETMEM high-performance storage media to release memory space and reduce memory costs.
+config HUGETLB_INSERT_PAGE + bool + help + This allowed a driver to insert hugetlb mapping into user address space. + source "mm/damon/Kconfig"
endmenu diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 04db457a32ab..36265751440b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7726,3 +7726,73 @@ static void __init hugetlb_cma_check(void) }
#endif /* CONFIG_CMA */ + +#ifdef CONFIG_HUGETLB_INSERT_PAGE +static pte_t *hugetlb_huge_pte_alloc(struct mm_struct *mm, unsigned long addr, + unsigned long size) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) + return NULL; + + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); + + return ptep; +} + +static int __hugetlb_insert_hugepage(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, unsigned long pfn) +{ + int ret = 0; + pte_t *ptep, entry; + struct hstate *h; + spinlock_t *ptl; + + h = size_to_hstate(PMD_SIZE); + if (!h) + return -EINVAL; + + ptep = hugetlb_huge_pte_alloc(mm, addr, huge_page_size(h)); + if (!ptep) + return -ENXIO; + + if (WARN_ON(ptep && !pte_none(*ptep) && !pmd_huge(*(pmd_t *)ptep))) + return -ENXIO; + + entry = pfn_pte(pfn, prot); + entry = huge_pte_mkdirty(entry); + if (!(pgprot_val(prot) & PTE_RDONLY)) + entry = huge_pte_mkwrite(entry); + entry = pte_mkyoung(entry); + entry = pte_mkhuge(entry); + entry = pte_mkspecial(entry); + + ptl = huge_pte_lockptr(h, mm, ptep); + spin_lock(ptl); + set_huge_pte_at(mm, addr, ptep, entry); + spin_unlock(ptl); + + return ret; +} + +int hugetlb_insert_hugepage_pte(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, struct page *hpage) +{ + return __hugetlb_insert_hugepage(mm, addr, prot, page_to_pfn(hpage)); +} +EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte); + +int hugetlb_insert_hugepage_pte_by_pa(struct mm_struct *mm, unsigned long addr, + pgprot_t prot, unsigned long phy_addr) +{ + return __hugetlb_insert_hugepage(mm, addr, prot, phy_addr >> PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(hugetlb_insert_hugepage_pte_by_pa); +#endif /* CONFIG_HUGETLB_INSERT_PAGE */