From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Once sp group is created, the generated id will be freed in sp_group_drop. Before that, we should call free_sp_group_id() when error occurs.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 6a4da9ac83e14..2d9c0a8916211 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -349,6 +349,12 @@ static void free_sp_group_id(unsigned int spg_id) ida_free(&sp_group_id_ida, spg_id); }
+static void free_new_spg_id(bool new, int spg_id) +{ + if (new) + free_sp_group_id(spg_id); +} + static void free_sp_group(struct sp_group *spg) { fput(spg->file); @@ -665,7 +671,8 @@ int sp_group_add_task(int pid, int spg_id) rcu_read_unlock(); if (ret) { up_write(&sp_group_sem); - goto out_free_id; + free_new_spg_id(id_newly_generated, spg_id); + goto out; }
/* @@ -682,12 +689,14 @@ int sp_group_add_task(int pid, int spg_id) */ mm = get_task_mm(tsk->group_leader); if (!mm) { - ret = -ESRCH; up_write(&sp_group_sem); + ret = -ESRCH; + free_new_spg_id(id_newly_generated, spg_id); goto out_put_task; } else if (mm->sp_group) { - ret = -EEXIST; up_write(&sp_group_sem); + ret = -EEXIST; + free_new_spg_id(id_newly_generated, spg_id); goto out_put_mm; }
@@ -695,6 +704,7 @@ int sp_group_add_task(int pid, int spg_id) if (IS_ERR(spg)) { up_write(&sp_group_sem); ret = PTR_ERR(spg); + free_new_spg_id(id_newly_generated, spg_id); goto out_put_mm; }
@@ -813,9 +823,7 @@ int sp_group_add_task(int pid, int spg_id) mmput(mm); out_put_task: put_task_struct(tsk); -out_free_id: - if (unlikely(ret) && id_newly_generated) - free_sp_group_id((unsigned int)spg_id); +out: return ret == 0 ? spg_id : ret; } EXPORT_SYMBOL_GPL(sp_group_add_task);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
The situation below is not allowed:
int *result = mmap(ADDR, sizeof(int), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
As share pool uses an independent UVA allocation algorithm, it may produce an address that is conflicted with user-specified address.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 14 ++++++++++++++ mm/mmap.c | 12 ++++++++++++ mm/mremap.c | 4 ++++ mm/share_pool.c | 38 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 68 insertions(+)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index c03b83beaf63c..9650f257b3ad7 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -292,6 +292,9 @@ static inline void sp_free_pages(struct page *page, struct vm_struct *area) __free_pages(page, is_vmalloc_huge(area->flags) ? PMD_SHIFT - PAGE_SHIFT : 0); }
+extern bool sp_check_addr(unsigned long addr); +extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags); + #else
static inline int sp_group_add_task(int pid, int spg_id) @@ -495,6 +498,17 @@ static inline int sp_node_id(struct vm_area_struct *vma) { return numa_node_id(); } + +static inline bool sp_check_addr(unsigned long addr) +{ + return false; +} + +static inline bool sp_check_mmap_addr(unsigned long addr, unsigned long flags) +{ + return false; +} + #endif
#endif /* LINUX_SHARE_POOL_H */ diff --git a/mm/mmap.c b/mm/mmap.c index a17373895bc33..e2b53084f2a71 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2356,6 +2356,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr;
+ if (sp_check_mmap_addr(addr, flags)) + return -EINVAL; + if (addr) { addr = PAGE_ALIGN(addr);
@@ -2407,6 +2410,9 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, if (flags & MAP_FIXED) return addr;
+ if (sp_check_mmap_addr(addr, flags)) + return -EINVAL; + /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); @@ -3113,6 +3119,9 @@ int vm_munmap(unsigned long start, size_t len) struct mm_struct *mm = current->mm; LIST_HEAD(uf);
+ if (sp_check_addr(start)) + return -EINVAL; + if (down_write_killable(&mm->mmap_sem)) return -EINTR;
@@ -3129,6 +3138,9 @@ int do_vm_munmap(struct task_struct *tsk, unsigned long start, size_t len) struct mm_struct *mm = tsk->mm; LIST_HEAD(uf);
+ if (sp_check_addr(start)) + return -EINVAL; + if (down_write_killable(&mm->mmap_sem)) return -EINTR;
diff --git a/mm/mremap.c b/mm/mremap.c index 2ac9eaa041d95..238c169dc9694 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -24,6 +24,7 @@ #include <linux/uaccess.h> #include <linux/mm-arch-hooks.h> #include <linux/userfaultfd_k.h> +#include <linux/share_pool.h>
#include <asm/cacheflush.h> #include <asm/tlbflush.h> @@ -534,6 +535,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, if (offset_in_page(addr)) return ret;
+ if (sp_check_addr(addr) || sp_check_addr(new_addr)) + return ret; + old_len = PAGE_ALIGN(old_len); new_len = PAGE_ALIGN(new_len);
diff --git a/mm/share_pool.c b/mm/share_pool.c index 2d9c0a8916211..eb5eaa3e0d05a 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -58,6 +58,8 @@ #define byte2mb(size) ((size) >> 20) #define page2kb(page_num) ((page_num) << (PAGE_SHIFT - 10))
+#define PF_DOMAIN_CORE 0x10000000 /* AOS CORE processes in sched.h */ + /* mdc scene hack */ static int __read_mostly enable_mdc_default_group; static const int mdc_default_group_id = 1; @@ -334,6 +336,14 @@ static inline void check_interrupt_context(void) panic("share_pool: can't be used in interrupt context\n"); }
+static inline bool check_aoscore_process(struct task_struct *tsk) +{ + if (tsk->flags & PF_DOMAIN_CORE) + return true; + else + return false; +} + static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, struct sp_area *spa, unsigned long *populate); static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size); @@ -675,6 +685,14 @@ int sp_group_add_task(int pid, int spg_id) goto out; }
+ if (check_aoscore_process(tsk)) { + up_write(&sp_group_sem); + ret = -EACCES; + free_new_spg_id(id_newly_generated, spg_id); + sp_dump_stack(); + goto out_put_task; + } + /* * group_leader: current thread may be exiting in a multithread process * @@ -3030,6 +3048,26 @@ void __init proc_sharepool_init(void)
/*** End of tatistical and maintenance functions ***/
+bool sp_check_addr(unsigned long addr) +{ + if (enable_ascend_share_pool && is_sharepool_addr(addr) && + !check_aoscore_process(current)) { + sp_dump_stack(); + return true; + } else + return false; +} + +bool sp_check_mmap_addr(unsigned long addr, unsigned long flags) +{ + if (enable_ascend_share_pool && is_sharepool_addr(addr) && + !check_aoscore_process(current) && !(flags & MAP_SHARE_POOL)) { + sp_dump_stack(); + return true; + } else + return false; +} + vm_fault_t sharepool_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx,