From: Lijun Fang fanglijun3@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4M24Q CVE: NA
-------------------------------------------------
The DvPP means Davinci Video Pre-Processor, add new config ASCEND_FEATURES and DVPP_MMAP to enable the DvPP features for Ascend platform.
The DvPP could only use a limit range of virtual address, just like the Ascend310/910 could only use the 4 GB range of virtual address, so add a new mmap flag which is named MAP_DVPP to use the DvPP processor by mmap syscall, the new flag is only valid for Ascend platform.
You should alloc the memory for dvpp like this:
addr = mmap(NULL, length, PROT_READ, MAP_ANONYMOUS | MAP_DVPP, -1, 0);
Signed-off-by: Lijun Fang fanglijun3@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/Kconfig | 29 ++++++++++++++++++++ fs/hugetlbfs/inode.c | 16 +++++++++++ include/linux/mman.h | 64 ++++++++++++++++++++++++++++++++++++++++++++ mm/mmap.c | 40 +++++++++++++++++++++++++++ 4 files changed, 149 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6062a52a084f..e36340d804dd 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2206,6 +2206,35 @@ config UNWIND_PATCH_PAC_INTO_SCS select UNWIND_TABLES select DYNAMIC_SCS
+menuconfig ASCEND_FEATURES + bool "Support Ascend Features" + depends on ARM64 + help + The Ascend chip use the Hisilicon DaVinci architecture, and mainly + focus on AI and machine leanring area, contains many external features. + + Enable this config to enable selective list of these features. + + If unsure, say Y + +if ASCEND_FEATURES + +config ASCEND_DVPP_MMAP + bool "Enable support for the DvPP mmap" + default y + help + The DvPP means Davinci Video Pre-Processor, are mainly consist of VDEC + (Video Decode), VENC(Video Encode), JPEG D/E (Decode/Encode), PNGD + (PNG Decode) and VPC (Video Process) processors. + + The DvPP could only use a limit range of virtual address, just like the + Ascend310/910 could only use the limit range of virtual address (default + 4 GB), so add a new mmap flag which is named MAP_DVPP to allocate the + special memory for DvPP processor, the new flag is only valid for Ascend + platform. + +endif + endmenu # "Kernel Features"
menu "Boot options" diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index b0edf5fe8132..ce11ecbafc82 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -196,6 +196,10 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, info.high_limit = arch_get_mmap_end(addr, len, flags); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + return vm_unmapped_area(&info); }
@@ -212,6 +216,10 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info);
/* @@ -225,6 +233,10 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, info.flags = 0; info.low_limit = current->mm->mmap_base; info.high_limit = arch_get_mmap_end(addr, len, flags); + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info); }
@@ -254,6 +266,10 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
if (addr) { addr = ALIGN(addr, huge_page_size(h)); + + if (dvpp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma(mm, addr); if (mmap_end - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) diff --git a/include/linux/mman.h b/include/linux/mman.h index 40d94411d492..e76d887094f7 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -8,6 +8,70 @@ #include <linux/atomic.h> #include <uapi/linux/mman.h>
+extern int enable_mmap_dvpp; +/* + * Enable MAP_32BIT for Ascend Platform + */ +#ifdef CONFIG_ASCEND_DVPP_MMAP + +#define MAP_DVPP 0x200 + +#define DVPP_MMAP_SIZE (0x100000000UL) +#define DVPP_MMAP_BASE (TASK_SIZE - DVPP_MMAP_SIZE) + +static inline int dvpp_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + if (enable_mmap_dvpp && (flags & MAP_DVPP) && + (addr < DVPP_MMAP_BASE + DVPP_MMAP_SIZE) && + (addr > DVPP_MMAP_BASE)) + return -EINVAL; + else + return 0; +} + +static inline void dvpp_mmap_get_area(struct vm_unmapped_area_info *info, + unsigned long flags) +{ + if (flags & MAP_DVPP) { + info->low_limit = DVPP_MMAP_BASE; + info->high_limit = DVPP_MMAP_BASE + DVPP_MMAP_SIZE; + } else { + info->low_limit = max(info->low_limit, TASK_UNMAPPED_BASE); + info->high_limit = min(info->high_limit, DVPP_MMAP_BASE); + } +} + +static inline int dvpp_mmap_zone(unsigned long addr) +{ + if (addr >= DVPP_MMAP_BASE) + return 1; + else + return 0; +} +#else + +#define MAP_DVPP (0) + +static inline int dvpp_mmap_check(unsigned long addr, unsigned long len, + unsigned long flags) +{ + return 0; +} + +static inline void dvpp_mmap_get_area(struct vm_unmapped_area_info *info, + unsigned long flags) +{ +} + +static inline int dvpp_mmap_zone(unsigned long addr) { return 0; } + +#define DVPP_MMAP_BASE (0) + +#define DVPP_MMAP_SIZE (0) + +#endif + /* * Arrange for legacy / undefined architecture specific flags to be * ignored by mmap handling code. diff --git a/mm/mmap.c b/mm/mmap.c index 469f2c0ec43f..f1ce05935c6e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1709,6 +1709,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) { addr = PAGE_ALIGN(addr); + + if (dvpp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -1722,6 +1726,10 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.high_limit = mmap_end; info.align_mask = 0; info.align_offset = 0; + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + return vm_unmapped_area(&info); }
@@ -1759,6 +1767,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); + + if (dvpp_mmap_check(addr, len, flags)) + return -ENOMEM; + vma = find_vma_prev(mm, addr, &prev); if (mmap_end - len >= addr && addr >= mmap_min_addr && (!vma || addr + len <= vm_start_gap(vma)) && @@ -1772,6 +1784,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); info.align_mask = 0; info.align_offset = 0; + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info);
/* @@ -1785,6 +1801,10 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = mmap_end; + + if (enable_mmap_dvpp) + dvpp_mmap_get_area(&info, flags); + addr = vm_unmapped_area(&info); }
@@ -3913,3 +3933,23 @@ static int __meminit init_reserve_notifier(void) return 0; } subsys_initcall(init_reserve_notifier); + + +/* + * Enable the MAP_32BIT (mmaps and hugetlb). + */ +int enable_mmap_dvpp __read_mostly; + +#ifdef CONFIG_ASCEND_DVPP_MMAP + +static int __init ascend_enable_mmap_dvpp(char *s) +{ + enable_mmap_dvpp = 1; + + pr_info("Ascend enable dvpp mmap features\n"); + + return 1; +} +__setup("enable_mmap_dvpp", ascend_enable_mmap_dvpp); + +#endif