From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: 46925 CVE: NA
-------------------------------------------------
__sp_area_drop_locked() checks null pointer of spa, so remove null pointer checks before calling __sp_area_drop_locked().
Reported-by: Cui Bixuan cuibixuan@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 4316625defac..2cfac4642e0b 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -443,8 +443,7 @@ static void sp_munmap_task_areas(struct mm_struct *mm, struct list_head *stop) if (&spa->link == stop) break;
- if (prev) - __sp_area_drop_locked(prev); + __sp_area_drop_locked(prev); prev = spa;
atomic_inc(&spa->use_count); @@ -459,8 +458,7 @@ static void sp_munmap_task_areas(struct mm_struct *mm, struct list_head *stop)
spin_lock(&sp_area_lock); } - if (prev) - __sp_area_drop_locked(prev); + __sp_area_drop_locked(prev);
spin_unlock(&sp_area_lock); } @@ -607,8 +605,7 @@ int sp_group_add_task(int pid, int spg_id) struct file *file = spa_file(spa); unsigned long addr;
- if (prev) - __sp_area_drop_locked(prev); + __sp_area_drop_locked(prev); prev = spa;
atomic_inc(&spa->use_count); @@ -651,8 +648,7 @@ int sp_group_add_task(int pid, int spg_id)
spin_lock(&sp_area_lock); } - if (prev) - __sp_area_drop_locked(prev); + __sp_area_drop_locked(prev); spin_unlock(&sp_area_lock);
if (unlikely(ret)) {
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: 46925 CVE: NA
-------------------------------------------------
If alignment is 1, there won't be any bugs due to the implementation of __vmalloc_node_range().
Changing alignment to PMD_SIZE is more readable.
Reported-by: Xu Qiang xuqiang36@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 2cfac4642e0b..686e8927c348 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2797,7 +2797,7 @@ void *vmalloc_hugepage(unsigned long size) /* PMD hugepage aligned */ size = PMD_ALIGN(size);
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range(size, PMD_SIZE, VMALLOC_START, VMALLOC_END, GFP_KERNEL, PAGE_KERNEL, VM_HUGE_PAGES, NUMA_NO_NODE, __builtin_return_address(0)); @@ -2820,7 +2820,7 @@ void *vmalloc_hugepage_user(unsigned long size) /* PMD hugepage aligned */ size = PMD_ALIGN(size);
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range(size, PMD_SIZE, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_HUGE_PAGES | VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); @@ -2866,7 +2866,7 @@ void *buff_vzalloc_hugepage_user(unsigned long size) /* PMD hugepage aligned */ size = PMD_ALIGN(size);
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range(size, PMD_SIZE, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT, PAGE_KERNEL, VM_HUGE_PAGES | VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0));
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: 46925 CVE: NA
-------------------------------------------------
All the share pool functions shouldn't be used in interrupt context. Add a checker function and call it at the beginning of them.
Reported-by: Xu Qiang xuqiang36@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Kefeng Wangwangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 686e8927c348..ac02b9d624a8 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -44,6 +44,7 @@ #include <linux/rmap.h> #include <linux/hugetlb.h> #include <linux/compaction.h> +#include <linux/preempt.h>
/* access control mode macros */ #define AC_NONE 0 @@ -275,6 +276,12 @@ static int spa_dec_usage(enum spa_type type, unsigned long size, bool is_dvpp) return 0; }
+static inline void check_interrupt_context(void) +{ + if (unlikely(in_interrupt())) + panic("share_pool: can't be used in interrupt context\n"); +} + static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, struct sp_area *spa, unsigned long *populate); static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size); @@ -341,6 +348,8 @@ int sp_group_id_by_pid(int pid) struct sp_group *spg; int spg_id = -ENODEV;
+ check_interrupt_context(); + mutex_lock(&sp_mutex); spg = __sp_find_spg(pid, SPG_ID_DEFAULT); if (spg_valid(spg)) @@ -494,6 +503,8 @@ int sp_group_add_task(int pid, int spg_id) struct sp_area *spa, *prev = NULL; struct sp_proc_stat *stat;
+ check_interrupt_context(); + /* mdc scene hack */ if (enable_mdc_default_group) spg_id = mdc_default_group_id; @@ -1165,6 +1176,8 @@ int sp_free(unsigned long addr) loff_t offset; int ret = 0;
+ check_interrupt_context(); + mutex_lock(&sp_mutex);
/* @@ -1278,6 +1291,8 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) unsigned long mode, offset; unsigned int noreclaim_flag;
+ check_interrupt_context(); + /* mdc scene hack */ if (enable_mdc_default_group) spg_id = mdc_default_group_id; @@ -1701,6 +1716,8 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, struct sp_proc_stat *stat; int ret = 0, is_hugepage;
+ check_interrupt_context(); + if (sp_flags & ~SP_DVPP) { if (printk_ratelimit()) pr_err("share pool: k2u sp_flags %lu error\n", sp_flags); @@ -2017,6 +2034,8 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) }; struct vm_struct *area;
+ check_interrupt_context(); + rcu_read_lock(); tsk = find_task_by_vpid(pid); if (!tsk || (tsk->flags & PF_EXITING)) @@ -2309,6 +2328,8 @@ int sp_unshare(unsigned long va, unsigned long size, int pid, int spg_id) { int ret = 0;
+ check_interrupt_context(); + if (va < TASK_SIZE) { /* user address */ ret = sp_unshare_uva(va, size, pid, spg_id); @@ -2336,6 +2357,8 @@ int sp_walk_page_range(unsigned long uva, unsigned long size, struct mm_struct *mm; int ret = 0;
+ check_interrupt_context(); + if (unlikely(!sp_walk_data)) { if (printk_ratelimit()) pr_err("share pool: null pointer when walk page range\n"); @@ -2368,6 +2391,8 @@ void sp_walk_page_free(struct sp_walk_data *sp_walk_data) struct page *page; unsigned int i = 0;
+ check_interrupt_context(); + if (!sp_walk_data) return;
@@ -2405,6 +2430,8 @@ bool sp_config_dvpp_range(size_t start, size_t size, int device_id, int pid) { struct sp_group *spg;
+ check_interrupt_context(); + if (device_id < 0 || device_id >= MAX_DEVID || pid < 0 || size <= 0 || size> MMAP_SHARE_POOL_16G_SIZE) return false;
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: 46925 CVE: NA
-------------------------------------------------
sp_group_add_task() may be called with a valid spg_id as input parameter. It should not be freed in abnormal branch.
Reported-by: Wang Wensheng wangwensheng4@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index ac02b9d624a8..0978c32704e5 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -500,6 +500,7 @@ int sp_group_add_task(int pid, int spg_id) struct mm_struct *mm; struct sp_group *spg; int ret = 0; + bool id_newly_generated = false; struct sp_area *spa, *prev = NULL; struct sp_proc_stat *stat;
@@ -538,6 +539,7 @@ int sp_group_add_task(int pid, int spg_id) "generate group id failed\n"); return spg_id; } + id_newly_generated = true; }
if (spg_id == SPG_ID_DVPP_PASS_THROUGH) { @@ -550,6 +552,7 @@ int sp_group_add_task(int pid, int spg_id) "generate group id failed in DVPP pass through\n"); return spg_id; } + id_newly_generated = true; }
mutex_lock(&sp_mutex); @@ -564,7 +567,8 @@ int sp_group_add_task(int pid, int spg_id)
rcu_read_unlock(); if (ret) { - free_sp_group_id((unsigned int)spg_id); + if (id_newly_generated) + free_sp_group_id((unsigned int)spg_id); goto out_unlock; }
@@ -581,7 +585,8 @@ int sp_group_add_task(int pid, int spg_id) spg = find_or_alloc_sp_group(spg_id); if (IS_ERR(spg)) { ret = PTR_ERR(spg); - free_sp_group_id((unsigned int)spg_id); + if (id_newly_generated) + free_sp_group_id((unsigned int)spg_id); goto out_put_mm; }
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: 46925 CVE: NA
-------------------------------------------------
If vmap() or vmap_hugepage() fails in sp_make_share_u2k(), we need to decrease the reference count of the pages in struct sp_walk_data otherwise memleak happens.
There are also some additional cleanups to do.
Reported-by: Zhou Guanghui zhouguanghui1@huawei.com Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 0978c32704e5..e326c95104da 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -2019,6 +2019,22 @@ static int __sp_walk_page_range(unsigned long uva, unsigned long size, return ret; }
+static void __sp_walk_page_free(struct sp_walk_data *data) +{ + int i = 0; + struct page *page; + + while (i < data->page_count) { + page = data->pages[i++]; + put_page(page); + } + + kvfree(data->pages); + /* prevent repeated release */ + data->page_count = 0; + data->pages = NULL; +} + /** * Share user memory of a specified process to kernel * @uva: the VA of shared user memory @@ -2078,8 +2094,9 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) if (!p) { if (printk_ratelimit()) pr_err("share pool: vmap(huge) in u2k failed\n"); + __sp_walk_page_free(&sp_walk_data); p = ERR_PTR(-ENOMEM); - goto out_free_pages; + goto out_put_task; } else { p = p + (uva - sp_walk_data.uva_aligned); } @@ -2092,7 +2109,6 @@ void *sp_make_share_u2k(unsigned long uva, unsigned long size, int pid) area = find_vm_area(p); area->flags |= VM_USERMAP;
-out_free_pages: kvfree(sp_walk_data.pages); out_put_task: put_task_struct(tsk); @@ -2393,20 +2409,12 @@ EXPORT_SYMBOL_GPL(sp_walk_page_range);
void sp_walk_page_free(struct sp_walk_data *sp_walk_data) { - struct page *page; - unsigned int i = 0; - check_interrupt_context();
if (!sp_walk_data) return;
- while (i < sp_walk_data->page_count) { - page = sp_walk_data->pages[i++]; - put_page(page); - } - - kvfree(sp_walk_data->pages); + __sp_walk_page_free(sp_walk_data); } EXPORT_SYMBOL_GPL(sp_walk_page_free);