From: Zhou Guanghui zhouguanghui1@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I931MI CVE: NA
-----------------------------------------------------------------
A range is reserved from the user address space to ensure that the process cannot apply for the address range.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com --- fs/hugetlbfs/inode.c | 13 ++++++++ include/linux/mman.h | 43 ++++++++++++++++++++++++++ include/uapi/asm-generic/mman-common.h | 1 + mm/mmap.c | 33 ++++++++++++++++++++ 4 files changed, 90 insertions(+)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index f001dfe52a14..f151370afdc8 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -197,6 +197,9 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, info.high_limit = arch_get_mmap_end(addr, len, flags); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + + svsp_mmap_get_area(&info, flags); + return vm_unmapped_area(&info); }
@@ -213,6 +216,9 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + + svsp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info);
/* @@ -226,6 +232,9 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.flags = 0; info.low_limit = current->mm->mmap_base; info.high_limit = arch_get_mmap_end(addr, len, flags); + + svsp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info); }
@@ -255,6 +264,10 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { addr = ALIGN(addr, huge_page_size(h)); + + if (svsp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma(mm, addr); if (mmap_end - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) diff --git a/include/linux/mman.h b/include/linux/mman.h index 40d94411d492..9f0250993ef7 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -8,6 +8,49 @@ #include <linux/atomic.h> #include <uapi/linux/mman.h>
+extern int enable_mmap_svsp; + +#ifdef CONFIG_ASCEND_SVSP +#define SVSP_MMAP_BASE 0x180000000000UL +#define SVSP_MMAP_SIZE 0x080000000000UL + +static inline int svsp_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + if (enable_mmap_svsp && (flags & MAP_SVSP) && + (addr < (SVSP_MMAP_BASE + SVSP_MMAP_SIZE)) && + (addr > SVSP_MMAP_BASE)) + return -EINVAL; + else + return 0; +} + +static inline void svsp_mmap_get_area(struct vm_unmapped_area_info *info, + unsigned long flags) +{ + if (enable_mmap_svsp && (flags & MAP_SVSP)) { + info->low_limit = SVSP_MMAP_BASE; + info->high_limit = SVSP_MMAP_BASE + SVSP_MMAP_SIZE; + } else { + info->low_limit = max(info->low_limit, TASK_UNMAPPED_BASE); + info->high_limit = min(info->high_limit, SVSP_MMAP_BASE); + } +} + +#else +#define SVSP_MMAP_BASE 0 +#define SVSP_MMAP_SIZE 0 +static inline int svsp_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + return 0; +} + +static inline void svsp_mmap_get_area(struct vm_unmapped_area_info *info, + unsigned long flags) +{ } +#endif + /* * Arrange for legacy / undefined architecture specific flags to be * ignored by mmap handling code. diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 14e5498efd7a..096c9018d2d5 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -30,6 +30,7 @@ #define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */ #define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#define MAP_SVSP 0x400000 #define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be * uninitialized */
diff --git a/mm/mmap.c b/mm/mmap.c index 5d78ebaeed60..94077c5356eb 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1728,6 +1728,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { addr = PAGE_ALIGN(addr); + + if (svsp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -1741,6 +1745,9 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.high_limit = mmap_end; info.align_mask = 0; info.align_offset = 0; + + svsp_mmap_get_area(&info, flags); + return vm_unmapped_area(&info); }
@@ -1781,6 +1788,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); + + if (svsp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -1794,6 +1805,9 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; info.align_offset = 0; + + svsp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info);
/* @@ -1807,6 +1821,9 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; + + svsp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info); }
@@ -3946,3 +3963,19 @@ static int __meminit init_reserve_notifier(void) return 0; } subsys_initcall(init_reserve_notifier); + +int enable_mmap_svsp __read_mostly; + +#ifdef CONFIG_ASCEND_SVSP + +static int __init ascend_enable_mmap_svsp(char *s) +{ + enable_mmap_svsp = 1; + + pr_info("Ascend enable svsp mmap features\n"); + + return 1; +} +__setup("enable_mmap_svsp", ascend_enable_mmap_svsp); + +#endif