From: Bixuan Cui cuibixuan@huawei.com
ascend inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4EUVI CVE: NA
-------------------------------------------------
After the MAP_LOCKED flag is enabled, resource contention occurs when multiple tasks map/munmap memory at the same time, causing performance loss.
Add sysctl_share_pool_map_lock_enable to control whether the map is locked in sp_mmap().
Signed-off-by: Bixuan Cui cuibixuan@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com Reviewed-by: Weilong Chen chenweilong@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 2 ++ kernel/sysctl.c | 10 ++++++++++ mm/share_pool.c | 12 ++++++++++-- mm/vmalloc.c | 2 +- 4 files changed, 23 insertions(+), 3 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index fb7237351a995..c3120b7b24948 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -38,6 +38,8 @@ extern int sysctl_sp_debug_mode;
extern int enable_ascend_share_pool;
+extern int sysctl_share_pool_map_lock_enable; + #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC extern bool vmap_allow_huge; #endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b88e12d942166..97a24290f0750 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1280,6 +1280,16 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + /* 0: map_unlock, 1: map_lock */ + .procname = "share_pool_map_lock_enable", + .data = &sysctl_share_pool_map_lock_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #endif { } }; diff --git a/mm/share_pool.c b/mm/share_pool.c index 94e2be2ce96bc..36e33d1de4f8c 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -69,6 +69,8 @@ int sysctl_ac_mode = AC_NONE; /* debug mode */ int sysctl_sp_debug_mode;
+int sysctl_share_pool_map_lock_enable; + /* idr of all sp_groups */ static DEFINE_IDR(sp_group_idr);
@@ -1227,11 +1229,17 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file, unsigned long addr = spa->va_start; unsigned long size = spa_size(spa); unsigned long prot = PROT_READ | PROT_WRITE; - unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_LOCKED | - MAP_POPULATE | MAP_SHARE_POOL; + unsigned long flags = MAP_FIXED | MAP_SHARED | MAP_POPULATE | + MAP_SHARE_POOL; unsigned long vm_flags = VM_NORESERVE | VM_SHARE_POOL | VM_DONTCOPY; unsigned long pgoff = (addr - MMAP_SHARE_POOL_START) >> PAGE_SHIFT;
+ /* Mark the mapped region to be locked. After the MAP_LOCKED is enable, + * multiple tasks will preempt resources, causing performance loss. + */ + if (sysctl_share_pool_map_lock_enable) + flags |= MAP_LOCKED; + atomic_inc(&spa->use_count); addr = __do_mmap(mm, file, addr, size, prot, flags, vm_flags, pgoff, populate, NULL); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4b0970d6fc913..da610bc88ae92 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2650,7 +2650,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { - struct vm_struct *area; + struct vm_struct *area = NULL; void *addr; unsigned long real_size = size; unsigned long real_align = align;