From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
The situation below is not allowed:
int *result = mmap(ADDR, sizeof(int), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
As share pool uses an independent UVA allocation algorithm, it may produce an address that is conflicted with user-specified address.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 14 ++++++++++++++ mm/mmap.c | 12 ++++++++++++ mm/mremap.c | 4 ++++ mm/share_pool.c | 38 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index c03b83beaf63c..9650f257b3ad7 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -292,6 +292,9 @@ static inline void sp_free_pages(struct page *page, struct vm_struct *area) __free_pages(page, is_vmalloc_huge(area->flags) ? PMD_SHIFT - PAGE_SHIFT : 0); }
+extern bool sp_check_addr(unsigned long addr); +extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags); + #else
static inline int sp_group_add_task(int pid, int spg_id) @@ -495,6 +498,17 @@ static inline int sp_node_id(struct vm_area_struct *vma) { return numa_node_id(); } + +static inline bool sp_check_addr(unsigned long addr) +{ + return false; +} + +static inline bool sp_check_mmap_addr(unsigned long addr, unsigned long flags) +{ + return false; +} + #endif
#endif /* LINUX_SHARE_POOL_H */ diff --git a/mm/mmap.c b/mm/mmap.c index a17373895bc33..e2b53084f2a71 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2356,6 +2356,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr;
+ if (sp_check_mmap_addr(addr, flags)) + return -EINVAL; + if (addr) { addr = PAGE_ALIGN(addr);
@@ -2407,6 +2410,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (flags & MAP_FIXED) return addr;
+ if (sp_check_mmap_addr(addr, flags)) + return -EINVAL; + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); @@ -3113,6 +3119,9 @@ int vm_munmap(unsigned long start, size_t len) struct mm_struct *mm = current->mm; LIST_HEAD(uf);
+ if (sp_check_addr(start)) + return -EINVAL; + if (down_write_killable(&mm->mmap_sem)) return -EINTR;
@@ -3129,6 +3138,9 @@ int do_vm_munmap(struct task_struct *tsk, unsigned long start, size_t len) struct mm_struct *mm = tsk->mm; LIST_HEAD(uf);
+ if (sp_check_addr(start)) + return -EINVAL; + if (down_write_killable(&mm->mmap_sem)) return -EINTR;
diff --git a/mm/mremap.c b/mm/mremap.c index 2ac9eaa041d95..238c169dc9694 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -24,6 +24,7 @@ #include <linux/uaccess.h> #include <linux/mm-arch-hooks.h> #include <linux/userfaultfd_k.h> +#include <linux/share_pool.h>
#include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -534,6 +535,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, if (offset_in_page(addr)) return ret;
+ if (sp_check_addr(addr) || sp_check_addr(new_addr)) + return ret; + old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len);
diff --git a/mm/share_pool.c b/mm/share_pool.c index 2d9c0a8916211..eb5eaa3e0d05a 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -58,6 +58,8 @@ #define byte2mb(size) ((size) >> 20) #define page2kb(page_num) ((page_num) << (PAGE_SHIFT - 10))
+#define PF_DOMAIN_CORE 0x10000000 /* AOS CORE processes in sched.h */ + /* mdc scene hack */ static int __read_mostly enable_mdc_default_group; static const int mdc_default_group_id = 1; @@ -334,6 +336,14 @@ static inline void check_interrupt_context(void) panic("share_pool: can't be used in interrupt context\n"); }
+static inline bool check_aoscore_process(struct task_struct *tsk) +{ + if (tsk->flags & PF_DOMAIN_CORE) + return true; + else + return false; +} + static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, struct sp_area *spa, unsigned long *populate); static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size); @@ -675,6 +685,14 @@ int sp_group_add_task(int pid, int spg_id) goto out; }
+ if (check_aoscore_process(tsk)) { + up_write(&sp_group_sem); + ret = -EACCES; + free_new_spg_id(id_newly_generated, spg_id); + sp_dump_stack(); + goto out_put_task; + } + /* * group_leader: current thread may be exiting in a multithread process * @@ -3030,6 +3048,26 @@ void __init proc_sharepool_init(void)
/*** End of tatistical and maintenance functions ***/
+bool sp_check_addr(unsigned long addr) +{ + if (enable_ascend_share_pool && is_sharepool_addr(addr) && + !check_aoscore_process(current)) { + sp_dump_stack(); + return true; + } else + return false; +} + +bool sp_check_mmap_addr(unsigned long addr, unsigned long flags) +{ + if (enable_ascend_share_pool && is_sharepool_addr(addr) && + !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL)) { + sp_dump_stack(); + return true; + } else + return false; +} + vm_fault_t sharepool_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx,