From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Let memory compact to be configurable.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/mm/share_pool.c b/mm/share_pool.c index e5a71e25964c1..ea2ea37111b9c 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -3226,6 +3226,7 @@ struct page *sp_alloc_pages(struct vm_struct *area, gfp_t mask, noreclaim_flag = memalloc_noreclaim_save(); page = hugetlb_alloc_hugepage(NUMA_NO_NODE, HUGETLB_ALLOC_NONE); memalloc_noreclaim_restore(noreclaim_flag); + sp_try_to_compact(); return page; } else return alloc_pages_node(node, mask, page_order);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Only root can enable pr_debug printing.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index ea2ea37111b9c..ebc2d04c01145 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -1601,7 +1601,7 @@ static int is_vmap_hugepage(unsigned long addr)
area = find_vm_area((void *)addr); if (unlikely(!area)) { - pr_err_ratelimited("share pool: failed to find vm area(%lx)\n", addr); + pr_debug("share pool: failed to find vm area(%lx)\n", addr); return -EINVAL; }
@@ -1659,7 +1659,7 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
ret_addr = sp_mmap(mm, file, spa, &populate); if (IS_ERR_VALUE(ret_addr)) { - pr_err("share pool: k2u mmap failed %lx\n", ret_addr); + pr_debug("share pool: k2u mmap failed %lx\n", ret_addr); goto put_mm; } BUG_ON(ret_addr != spa->va_start); @@ -1915,7 +1915,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size,
if (!vmalloc_area_set_flag(spa, kva_aligned, VM_SHAREPOOL)) { up_read(&spg->rw_lock); - pr_err("share pool: %s: the kva %lx is not valid\n", __func__, (unsigned long)kva_aligned); + pr_debug("share pool: %s: the kva %lx is not valid\n", __func__, (unsigned long)kva_aligned); goto out_drop_spa; }
@@ -1938,7 +1938,7 @@ void *sp_make_share_k2u(unsigned long kva, unsigned long size, } else { /* associate vma and spa */ if (!vmalloc_area_clr_flag(spa, kva_aligned, VM_SHAREPOOL)) - pr_warn("share pool: %s: the kva %lx is not valid\n", + pr_debug("share pool: %s: the kva %lx is not valid\n", __func__, (unsigned long)kva_aligned); }
@@ -2032,7 +2032,7 @@ static int sp_hugetlb_entry(pte_t *ptep, unsigned long hmask, struct sp_walk_data *sp_walk_data;
if (unlikely(!pte_present(pte))) { - pr_err_ratelimited("share pool: the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr); + pr_debug("share pool: the page of addr %lx unexpectedly not in RAM\n", (unsigned long)addr); return -EFAULT; }
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Increase the value of sp_stat_sem when failed.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/mm/share_pool.c b/mm/share_pool.c index ebc2d04c01145..607a5f0097a41 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -135,6 +135,7 @@ static struct sp_proc_stat *sp_init_proc_stat(struct task_struct *tsk, up_write(&sp_stat_sem); return stat; } else { + up_write(&sp_stat_sem); /* if enter this branch, that's our mistake */ pr_err_ratelimited("share pool: proc stat invalid id %d\n", id); return ERR_PTR(-EBUSY);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
Free id at the end of sp_group_add_task when failed. Benefits are below: 1. Less time to hold locks. 2. Avoid to forget freeing id in other error handling branches.
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/share_pool.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 607a5f0097a41..6a4da9ac83e14 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -664,10 +664,8 @@ int sp_group_add_task(int pid, int spg_id)
rcu_read_unlock(); if (ret) { - if (id_newly_generated) - free_sp_group_id((unsigned int)spg_id); up_write(&sp_group_sem); - goto out_unlock; + goto out_free_id; }
/* @@ -695,10 +693,8 @@ int sp_group_add_task(int pid, int spg_id)
spg = find_or_alloc_sp_group(spg_id); if (IS_ERR(spg)) { - ret = PTR_ERR(spg); - if (id_newly_generated) - free_sp_group_id((unsigned int)spg_id); up_write(&sp_group_sem); + ret = PTR_ERR(spg); goto out_put_mm; }
@@ -817,7 +813,9 @@ int sp_group_add_task(int pid, int spg_id) mmput(mm); out_put_task: put_task_struct(tsk); -out_unlock: +out_free_id: + if (unlikely(ret) && id_newly_generated) + free_sp_group_id((unsigned int)spg_id); return ret == 0 ? spg_id : ret; } EXPORT_SYMBOL_GPL(sp_group_add_task);
From: Tang Yizhou tangyizhou@huawei.com
ascend inclusion category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
e80000600000-e80000603000 rw-s 00600000 00:05 1025 /sp_group_1 (deleted) Size: 12 kB KernelPageSize: 4 kB MMUPageSize: 4 kB Rss: 0 kB Pss: 0 kB Shared_Clean: 0 kB Shared_Dirty: 0 kB Private_Clean: 0 kB Private_Dirty: 0 kB Referenced: 0 kB Anonymous: 0 kB LazyFree: 0 kB AnonHugePages: 0 kB ShmemPmdMapped: 0 kB Shared_Hugetlb: 0 kB Private_Hugetlb: 0 kB Swap: 0 kB SwapPss: 0 kB Locked: 0 kB THPeligible: 0 VmFlags: rd wr sh mr mw me ms pf io dc de nr dd sp ~~
Signed-off-by: Tang Yizhou tangyizhou@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- fs/proc/task_mmu.c | 3 +++ 1 file changed, 3 insertions(+)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0417343481cd4..8b8129d658e04 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -667,6 +667,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) #endif /* CONFIG_ARCH_HAS_PKEYS */ #ifdef CONFIG_USERSWAP [ilog2(VM_USWAP)] = "us", +#endif +#ifdef CONFIG_ASCEND_SHARE_POOL + [ilog2(VM_SHARE_POOL)] = "sp", #endif }; size_t i;