From: f00440829 fanglinxu@huawei.com
euler inclusion category: feature bugzilla: 46792 CVE: NA
----------------------------------------
It's a improved version of Steve Capper's RFC patch, see: https://lists.linaro.org/pipermail/linaro-kernel/2013-October/008031.html
Signed-off-by: Linxu Fang fanglinxu@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: zhangyi (F) yi.zhang@huawei.com Signed-off-by: Jing Xiangfeng jingxiangfeng@huawei.com Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/arm/Kconfig | 2 +- arch/arm/include/asm/hugetlb-2level.h | 148 +++++++++++++++++++ arch/arm/include/asm/hugetlb.h | 6 +- arch/arm/include/asm/pgtable-2level-hwdef.h | 2 + arch/arm/include/asm/pgtable-2level.h | 154 +++++++++++++++++++- arch/arm/include/asm/pgtable-3level.h | 2 +- arch/arm/include/asm/pgtable.h | 14 ++ arch/arm/kernel/head.S | 4 + arch/arm/mm/fault.c | 26 ++-- arch/arm/mm/fsr-2level.c | 4 +- arch/arm/mm/hugetlbpage.c | 2 +- arch/arm/mm/mmu.c | 25 ++++ 12 files changed, 369 insertions(+), 20 deletions(-) create mode 100644 arch/arm/include/asm/hugetlb-2level.h
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 002e0cf025f5..3efce886d7a0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1545,7 +1545,7 @@ config HW_PERF_EVENTS
config SYS_SUPPORTS_HUGETLBFS def_bool y - depends on ARM_LPAE + depends on ARM_LPAE || (!CPU_USE_DOMAINS && !MEMORY_FAILURE && !CPU_SW_DOMAIN_PAN)
config HAVE_ARCH_TRANSPARENT_HUGEPAGE def_bool y diff --git a/arch/arm/include/asm/hugetlb-2level.h b/arch/arm/include/asm/hugetlb-2level.h new file mode 100644 index 000000000000..ce746dae2497 --- /dev/null +++ b/arch/arm/include/asm/hugetlb-2level.h @@ -0,0 +1,148 @@ +/* + * arch/arm/include/asm/hugetlb-2level.h + * + * Copyright (C) 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_ARM_HUGETLB_2LEVEL_H +#define _ASM_ARM_HUGETLB_2LEVEL_H + +#define __HAVE_ARCH_HUGE_PTEP_GET +static inline pte_t huge_ptep_get(pte_t *ptep) +{ + pmd_t pmd = *((pmd_t *)ptep); + pte_t retval; + + if (!pmd_val(pmd)) + return __pte(0); + + retval = __pte((pteval_t) (pmd_val(pmd) & HPAGE_MASK) + | arm_hugepteprotval); + + if (pmd_exec(pmd)) + retval = pte_mkexec(retval); + else + retval = pte_mknexec(retval); + + if (pmd_young(pmd)) + retval = pte_mkyoung(retval); + else + retval = pte_mkold(retval); + + if (pmd_dirty(pmd)) + retval = pte_mkdirty(retval); + else + retval = pte_mkclean(retval); + + if (pmd_write(pmd)) + retval = pte_mkwrite(retval); + else + retval = pte_wrprotect(retval); + + if (pmd & PMD_SECT_BUFFERABLE) + retval |= PMD_SECT_BUFFERABLE; + else + retval &= ~PMD_SECT_BUFFERABLE; + + if (pmd & PMD_SECT_CACHEABLE) + retval |= PMD_SECT_CACHEABLE; + else + retval &= ~PMD_SECT_CACHEABLE; + + if (pmd & PMD_SECT_TEX(1)) + retval |= L_PTE_MT_DEV_SHARED; + else + retval &= ~L_PTE_MT_DEV_SHARED; + + if (pmd & PMD_SECT_S) + retval |= L_PTE_SHARED; + else + retval &= ~(L_PTE_SHARED); + + if (pmd_protnone(pmd)) + retval = pte_mkprotnone(retval); + else + retval = pte_rmprotnone(retval); + + return retval; +} + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + pmdval_t pmdval = (pmdval_t) pte_val(pte); + pmd_t *pmdp = (pmd_t *) ptep; + + /* take the target address bits from the pte only */ + pmdval &= HPAGE_MASK; + + /* + * now use pmd_modify to translate the permission bits from the pte + * and set the memory type information. + */ + pmdval = pmd_val(pmd_modify(__pmd(pmdval), __pgprot(pte_val(pte)))); + + __sync_icache_dcache(pte); + + set_pmd_at(mm, addr, pmdp, __pmd(pmdval)); +} + +static inline pte_t pte_mkhuge(pte_t pte) { return pte; } + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + pmd_t *pmdp = (pmd_t *)ptep; + + pmd_clear(pmdp); + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pmd_t *pmdp = (pmd_t *) ptep; + + set_pmd_at(mm, addr, pmdp, pmd_wrprotect(*pmdp)); +} + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pmd_t *pmdp = (pmd_t *)ptep; + pte_t pte = huge_ptep_get(ptep); + + pmd_clear(pmdp); + + return pte; +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int changed = !pte_same(huge_ptep_get(ptep), pte); + + if (changed) { + set_huge_pte_at(vma->vm_mm, addr, ptep, pte); + flush_tlb_range(vma, addr, addr + HPAGE_SIZE); + } + + return changed; +} + +#endif /* _ASM_ARM_HUGETLB_2LEVEL_H */ diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h index d02d6ca88e92..44fd1a5f3a32 100644 --- a/arch/arm/include/asm/hugetlb.h +++ b/arch/arm/include/asm/hugetlb.h @@ -11,7 +11,11 @@ #define _ASM_ARM_HUGETLB_H
#include <asm/page.h> -#include <asm/hugetlb-3level.h> +#ifdef CONFIG_ARM_LPAE + #include <asm/hugetlb-3level.h> +#else +#include <asm/hugetlb-2level.h> +#endif #include <asm-generic/hugetlb.h>
static inline void arch_clear_hugepage_flags(struct page *page) diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h index 556937e1790e..b3ea5d00b5c7 100644 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h @@ -20,7 +20,9 @@ #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) +#ifndef PMD_DOMAIN_MASK #define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f) +#endif #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ /* * - section diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index baf7d0204eb5..000feaf88248 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -7,6 +7,8 @@ #ifndef _ASM_PGTABLE_2LEVEL_H #define _ASM_PGTABLE_2LEVEL_H
+#include <asm/tlbflush.h> + #define __PAGETABLE_PMD_FOLDED 1
/* @@ -185,13 +187,27 @@ #define pud_clear(pudp) do { } while (0) #define set_pud(pud,pudp) do { } while (0)
+static inline int pmd_large(pmd_t pmd) +{ + if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_FAULT) + return pmd_val(pmd); + + return ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_SECT); +} + +static inline int pte_huge(pte_t pte) +{ + pmd_t pmd = (pmd_t)pte; + + return pmd_large(pmd); +} + static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) { return (pmd_t *)pud; } #define pmd_offset pmd_offset
-#define pmd_large(pmd) (pmd_val(pmd) & 2) #define pmd_leaf(pmd) (pmd_val(pmd) & 2) #define pmd_bad(pmd) (pmd_val(pmd) & 2) #define pmd_present(pmd) (pmd_val(pmd)) @@ -215,6 +231,142 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
+/* + * now follows some of the definitions to allow huge page support, we can't put + * these in the hugetlb source files as they are also required for transparent + * hugepage support. + */ + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_LINUX_PTE_COUNT (PAGE_OFFSET >> HPAGE_SHIFT) +#define HUGE_LINUX_PTE_SIZE (HUGE_LINUX_PTE_COUNT * sizeof(pte_t *)) +#define HUGE_LINUX_PTE_INDEX(addr) (addr >> HPAGE_SHIFT) + +/* + * We re-purpose the following domain bits in the section descriptor + */ +#ifndef PMD_DOMAIN_MASK +#define PMD_DOMAIN_MASK (_AT(pmdval_t, 0xF) << 5) +#endif +#define PMD_DSECT_PROT_NONE (_AT(pmdval_t, 1) << 5) +#define PMD_DSECT_DIRTY (_AT(pmdval_t, 1) << 6) +#define PMD_DSECT_AF (_AT(pmdval_t, 1) << 7) + +#define PMD_BIT_FUNC(fn, op) \ +static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } + +extern pmdval_t arm_hugepmdprotval; +extern pteval_t arm_hugepteprotval; + +#define pmd_mkhuge(pmd) (__pmd((pmd_val(pmd) & ~PMD_TYPE_MASK) | PMD_TYPE_SECT)) + +PMD_BIT_FUNC(mkold, &= ~PMD_DSECT_AF); +PMD_BIT_FUNC(mkdirty, |= PMD_DSECT_DIRTY); +PMD_BIT_FUNC(mkclean, &= ~PMD_DSECT_DIRTY); +PMD_BIT_FUNC(mkyoung, |= PMD_DSECT_AF); +PMD_BIT_FUNC(mkwrite, |= PMD_SECT_AP_WRITE); +PMD_BIT_FUNC(wrprotect, &= ~PMD_SECT_AP_WRITE); +PMD_BIT_FUNC(mknotpresent, &= ~PMD_TYPE_MASK); +PMD_BIT_FUNC(mkexec, &= ~PMD_SECT_XN); +PMD_BIT_FUNC(mknexec, |= PMD_SECT_XN); +PMD_BIT_FUNC(mkprotnone, |= PMD_DSECT_PROT_NONE); +PMD_BIT_FUNC(rmprotnone, &= ~PMD_DSECT_PROT_NONE); + +#ifdef CONFIG_NUMA_BALANCING +#define pmd_protnone(pmd) (pmd_val(pmd) & PMD_DSECT_PROT_NONE) +#else +static inline int pmd_protnone(pmd_t pmd); +#endif +#define pmd_young(pmd) (pmd_val(pmd) & PMD_DSECT_AF) +#define pmd_write(pmd) (pmd_val(pmd) & PMD_SECT_AP_WRITE) +#define pmd_exec(pmd) (!(pmd_val(pmd) & PMD_SECT_XN)) +#define pmd_dirty(pmd) (pmd_val(pmd) & PMD_DSECT_DIRTY) + +#define __HAVE_ARCH_PMD_WRITE + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + /* + * we can sometimes be passed a pmd pointing to a level 2 descriptor + * from collapse_huge_page. + */ + if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) { + pmdp[0] = __pmd(pmd_val(pmd)); + pmdp[1] = __pmd(pmd_val(pmd) + 256 * sizeof(pte_t)); + } else { + if (pmd_protnone(pmd)) + pmd_val(pmd) &= ~PMD_TYPE_MASK; + else + pmd_val(pmd) |= PMD_TYPE_SECT; + + pmdp[0] = __pmd(pmd_val(pmd)); + pmdp[1] = __pmd(pmd_val(pmd) + SECTION_SIZE); + } + + flush_pmd_entry(pmdp); +} + +#define pmd_modify(pmd, prot) \ +({ \ + pmd_t pmdret = __pmd((pmd_val(pmd) \ + & (PMD_MASK | PMD_DOMAIN_MASK)) \ + | arm_hugepmdprotval); \ + pgprot_t inprot = prot; \ + pte_t newprot = __pte(pgprot_val(inprot)); \ + \ + if (pte_dirty(newprot)) \ + pmdret = pmd_mkdirty(pmdret); \ + else \ + pmdret = pmd_mkclean(pmdret); \ + \ + if (pte_exec(newprot)) \ + pmdret = pmd_mkexec(pmdret); \ + else \ + pmdret = pmd_mknexec(pmdret); \ + \ + if (pte_write(newprot)) \ + pmdret = pmd_mkwrite(pmdret); \ + else \ + pmdret = pmd_wrprotect(pmdret); \ + \ + if (pte_young(newprot)) \ + pmdret = pmd_mkyoung(pmdret); \ + else \ + pmdret = pmd_mkold(pmdret); \ + \ + if (pte_protnone(newprot)) \ + pmdret = pmd_mkprotnone(pmdret); \ + else \ + pmdret = pmd_rmprotnone(pmdret); \ + \ + if (pte_val(newprot) & PMD_SECT_BUFFERABLE) \ + pmdret |= PMD_SECT_BUFFERABLE; \ + else \ + pmdret &= ~PMD_SECT_BUFFERABLE; \ + \ + if (pte_val(newprot) & PMD_SECT_CACHEABLE) \ + pmdret |= PMD_SECT_CACHEABLE; \ + else \ + pmdret &= ~PMD_SECT_CACHEABLE; \ + \ + if (pte_val(newprot) & L_PTE_MT_DEV_SHARED) \ + pmdret |= PMD_SECT_TEX(1); \ + else \ + pmdret &= ~(PMD_SECT_TEX(1)); \ + \ + if (pte_val(newprot) & L_PTE_SHARED) \ + pmdret |= PMD_SECT_S; \ + else \ + pmdret &= ~PMD_SECT_S; \ + \ + pmdret; \ +}) + /* * We don't have huge page support for short descriptors, for the moment * define empty stubs for use by pin_page_for_write. diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 2b85d175e999..af53b40d1627 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -55,7 +55,7 @@ * Hugetlb definitions. */ #define HPAGE_SHIFT PMD_SHIFT -#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_SIZE (_AC(1, ULL) << HPAGE_SHIFT) #define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c02f24400369..e3a6ebcfe415 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -196,6 +196,10 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
+#ifdef CONFIG_NUMA_BALANCING +#define pte_protnone(pte) (pte_val(pte) & L_PTE_NONE) +#endif + #define pte_valid_user(pte) \ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
@@ -274,6 +278,16 @@ static inline pte_t pte_mknexec(pte_t pte) return set_pte_bit(pte, __pgprot(L_PTE_XN)); }
+static inline pte_t pte_mkprotnone(pte_t pte) +{ + return set_pte_bit(pte, __pgprot(L_PTE_NONE)); +} + +static inline pte_t pte_rmprotnone(pte_t pte) +{ + return clear_pte_bit(pte, __pgprot(L_PTE_NONE)); +} + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index f8904227e7fd..dfc95659bb89 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -460,8 +460,12 @@ __enable_mmu: #endif #ifdef CONFIG_ARM_LPAE mcrr p15, 0, r4, r5, c2 @ load TTBR0 +#else +#ifdef CONFIG_SYS_SUPPORTS_HUGETLBFS + ldr r5, =0x55555555 #else mov r5, #DACR_INIT +#endif mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index efa402025031..9985bab5ee66 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -26,6 +26,10 @@
#ifdef CONFIG_MMU
+#if defined(CONFIG_SYS_SUPPORTS_HUGETLBFS) && !defined(CONFIG_ARM_LPAE) +#include <linux/hugetlb.h> +#endif + /* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'. @@ -76,6 +80,15 @@ void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr) break;
if (pmd_bad(*pmd)) { +#if !defined(CONFIG_ARM_LPAE) && defined(CONFIG_HUGETLBFS) + if (pte_huge((pte_t)*pgd)) { + pte_t huge_pte = huge_ptep_get((pte_t *)pgd); + + pr_alert("[%08lx] *ppgd=%08llx", addr, + (long long)pmd_val(huge_pte)); + break; + } +#endif pr_cont("(bad)"); break; } @@ -462,19 +475,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr, } #endif /* CONFIG_MMU */
-/* - * Some section permission faults need to be handled gracefully. - * They can happen due to a __{get,put}_user during an oops. - */ -#ifndef CONFIG_ARM_LPAE -static int -do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) -{ - do_bad_area(addr, fsr, regs); - return 0; -} -#endif /* CONFIG_ARM_LPAE */ - /* * This abort handler always returns "fault". */ diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c index f2be95197265..f22e33ab0dc6 100644 --- a/arch/arm/mm/fsr-2level.c +++ b/arch/arm/mm/fsr-2level.c @@ -17,7 +17,7 @@ static struct fsr_info fsr_info[] = { { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, { do_bad, SIGBUS, 0, "external abort on translation" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, /* @@ -57,7 +57,7 @@ static struct fsr_info ifsr_info[] = { { do_bad, SIGBUS, 0, "unknown 10" }, { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, { do_bad, SIGBUS, 0, "external abort on translation" }, { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, { do_bad, SIGBUS, 0, "unknown 16" }, diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c index dd7a0277c5c0..5ede7ef5e531 100644 --- a/arch/arm/mm/hugetlbpage.c +++ b/arch/arm/mm/hugetlbpage.c @@ -30,5 +30,5 @@ int pud_huge(pud_t pud)
int pmd_huge(pmd_t pmd) { - return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); + return pmd_large(pmd); } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab69250a86bc..911ce69f04a0 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -402,6 +402,20 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); }
+/* + * If the system supports huge pages and we are running with short descriptors, + * then compute the pmd and linux pte prot values for a huge page. + * + * These values are used by both the HugeTLB and THP code. + */ +#if defined(CONFIG_SYS_SUPPORTS_HUGETLBFS) && !defined(CONFIG_ARM_LPAE) +pmdval_t arm_hugepmdprotval; +EXPORT_SYMBOL(arm_hugepmdprotval); + +pteval_t arm_hugepteprotval; +EXPORT_SYMBOL(arm_hugepteprotval); +#endif + /* * Adjust the PMD section entries according to the CPU in use. */ @@ -667,6 +681,17 @@ static void __init build_mem_type_table(void) if (t->prot_sect) t->prot_sect |= PMD_DOMAIN(t->domain); } + +#if defined(CONFIG_SYS_SUPPORTS_HUGETLBFS) && !defined(CONFIG_ARM_LPAE) + /* + * we assume all huge pages are user pages and that hardware access + * flag updates are disabled (i.e. SCTLR.AFE == 0b). + */ + arm_hugepteprotval = mem_types[MT_MEMORY_RWX].prot_pte | L_PTE_USER | L_PTE_VALID; + + arm_hugepmdprotval = mem_types[MT_MEMORY_RWX].prot_sect | PMD_SECT_AP_READ + | PMD_SECT_nG; +#endif }
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE