From: Nicholas Piggin npiggin@gmail.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI CVE: NA
https://lwn.net/ml/linux-kernel/20200825145753.529284-6-npiggin@gmail.com/ --------------
This changes the awkward approach where architectures provide init functions to determine which levels they can provide large mappings for, to one where the arch is queried for each call.
This removes code and indirection, and allows constant-folding of dead code for unsupported levels.
This also adds a prot argument to the arch query. This is unused currently but could help with some architectures (e.g., some powerpc processors can't map uncacheable memory with large pages).
Cc: linuxppc-dev@lists.ozlabs.org Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will@kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: Thomas Gleixner tglx@linutronix.de Cc: Ingo Molnar mingo@redhat.com Cc: Borislav Petkov bp@alien8.de Cc: x86@kernel.org Cc: "H. Peter Anvin" hpa@zytor.com Signed-off-by: Nicholas Piggin npiggin@gmail.com Signed-off-by: Rui Xiang rui.xiang@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Zefan Li lizefan@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/vmalloc.h | 8 +++ arch/arm64/mm/mmu.c | 10 ++-- arch/powerpc/include/asm/vmalloc.h | 8 +++ arch/x86/include/asm/vmalloc.h | 8 +++ arch/x86/mm/ioremap.c | 10 ++-- include/linux/io.h | 9 --- include/linux/vmalloc.h | 6 ++ init/main.c | 1 - mm/ioremap.c | 91 ++++++++++++------------------ 9 files changed, 77 insertions(+), 74 deletions(-)
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20b..597b40405319d 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H
+#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 437ee4ab41a19..cc44645a997bc 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -921,12 +921,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) return dt_virt; }
-int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; }
-int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* * Only 4k granule supports level 1 block mappings. @@ -936,9 +936,9 @@ int __init arch_ioremap_pud_supported(void) !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); }
-int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { - /* See arch_ioremap_pud_supported() */ + /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS); }
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa1618..105abb73f075c 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H
+#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index ba6295fa58762..17864fb62e4b3 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H
+#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 6ce8a2b593e09..944de9aaa0cd5 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -431,21 +431,21 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap);
-int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; }
-int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else - return 0; + return false; #endif }
-int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } diff --git a/include/linux/io.h b/include/linux/io.h index 58514cebfce6e..890c1913969da 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -43,15 +43,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif
-#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -void __init ioremap_huge_init(void); -int arch_ioremap_p4d_supported(void); -int arch_ioremap_pud_supported(void); -int arch_ioremap_pmd_supported(void); -#else -static inline void ioremap_huge_init(void) { } -#endif - /* * Managed iomap interface */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 086013413a1c3..6cbbebc97ea89 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -59,6 +59,12 @@ struct vmap_area { struct vm_struct *vm; };
+#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP +static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +#endif + /* * Highlevel APIs for driver use */ diff --git a/init/main.c b/init/main.c index 664f8a3d6c073..4e041fc2a689c 100644 --- a/init/main.c +++ b/init/main.c @@ -521,7 +521,6 @@ static void __init mm_init(void) kmem_cache_init(); pgtable_init(); vmalloc_init(); - ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ diff --git a/mm/ioremap.c b/mm/ioremap.c index 0297d2e840327..e4c5044b1ab21 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -15,49 +15,16 @@ #include <asm/pgtable.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static int __read_mostly ioremap_p4d_capable; -static int __read_mostly ioremap_pud_capable; -static int __read_mostly ioremap_pmd_capable; -static int __read_mostly ioremap_huge_disabled; +static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT;
static int __init set_nohugeiomap(char *str) { - ioremap_huge_disabled = 1; + iomap_max_page_shift = P4D_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); - -void __init ioremap_huge_init(void) -{ - if (!ioremap_huge_disabled) { - if (arch_ioremap_p4d_supported()) - ioremap_p4d_capable = 1; - if (arch_ioremap_pud_supported()) - ioremap_pud_capable = 1; - if (arch_ioremap_pmd_supported()) - ioremap_pmd_capable = 1; - } -} - -static inline int ioremap_p4d_enabled(void) -{ - return ioremap_p4d_capable; -} - -static inline int ioremap_pud_enabled(void) -{ - return ioremap_pud_capable; -} - -static inline int ioremap_pmd_enabled(void) -{ - return ioremap_pmd_capable; -} - -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int ioremap_p4d_enabled(void) { return 0; } -static inline int ioremap_pud_enabled(void) { return 0; } -static inline int ioremap_pmd_enabled(void) { return 0; } +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, @@ -79,9 +46,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, }
static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pmd_enabled()) + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) return 0;
if ((end - addr) != PMD_SIZE) @@ -97,7 +68,8 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, }
static int vmap_pmd_range(pud_t *pud, unsigned long addr, - unsigned long end, phys_addr_t phys_addr, pgprot_t prot) + unsigned long end, phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pmd_t *pmd; unsigned long next; @@ -108,7 +80,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, do { next = pmd_addr_end(addr, end);
- if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, max_page_shift)) continue;
if (vmap_pte_range(pmd, addr, next, phys_addr, prot)) @@ -118,9 +90,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, }
static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pud_enabled()) + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) return 0;
if ((end - addr) != PUD_SIZE) @@ -136,7 +112,8 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, }
static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pud_t *pud; unsigned long next; @@ -147,19 +124,23 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { next = pud_addr_end(addr, end);
- if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, max_page_shift)) continue;
- if (vmap_pmd_range(pud, addr, next, phys_addr, prot)) + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, max_page_shift)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; }
static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_p4d_enabled()) + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) return 0;
if ((end - addr) != P4D_SIZE) @@ -175,7 +156,8 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, }
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { p4d_t *p4d; unsigned long next; @@ -186,17 +168,18 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, do { next = p4d_addr_end(addr, end);
- if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, max_page_shift)) continue;
- if (vmap_pud_range(p4d, addr, next, phys_addr, prot)) + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, max_page_shift)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; }
static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; @@ -210,7 +193,7 @@ static int vmap_range(unsigned long addr, unsigned long end, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, max_page_shift); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); @@ -223,5 +206,5 @@ static int vmap_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { - return vmap_range(addr, end, phys_addr, prot); + return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); }