ascend inclusion category: Feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7YF5R
---------------------------------------------
Use the mmap_write_[un]lock helper. Since the PER_VMA_LOCK series been merged, the mmpa_write_unlock is not equal to down_write(&mm->mmap_lock).
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- mm/share_pool.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c index 1ca5b867147d..66f14fd38d25 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -1777,9 +1777,9 @@ static void sp_munmap(struct mm_struct *mm, unsigned long addr, { int err;
- down_write(&mm->mmap_lock); + mmap_write_lock(mm); if (unlikely(!mmget_not_zero(mm))) { - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); pr_warn("munmap: target mm is exiting\n"); return; } @@ -1789,7 +1789,7 @@ static void sp_munmap(struct mm_struct *mm, unsigned long addr, if (err) pr_err("failed to unmap VA %pK when sp munmap, %d\n", (void *)addr, err);
- up_write(&mm->mmap_lock); + mmap_write_unlock(mm); mmput_async(mm); }
@@ -2090,9 +2090,9 @@ static int sp_map_spa_to_mm(struct mm_struct *mm, struct sp_area *spa, unsigned long mmap_addr; unsigned long populate = 0;
- down_write(&mm->mmap_lock); + mmap_write_lock(mm); if (unlikely(!mmget_not_zero(mm))) { - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); pr_warn("sp_map: target mm is exiting\n"); return SP_SKIP_ERR; } @@ -2100,19 +2100,19 @@ static int sp_map_spa_to_mm(struct mm_struct *mm, struct sp_area *spa, /* when success, mmap_addr == spa->va_start */ mmap_addr = sp_mmap(mm, spa_file(spa), spa, &populate, prot); if (IS_ERR_VALUE(mmap_addr)) { - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); mmput_async(mm); pr_err("%s, sp mmap failed %ld\n", str, mmap_addr); return (int)mmap_addr; }
if (spa->type == SPA_TYPE_ALLOC) { - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); ret = sp_alloc_populate(mm, spa, populate, ac); if (ret) { - down_write(&mm->mmap_lock); + mmap_write_lock(mm); do_munmap(mm, mmap_addr, spa_size(spa), NULL); - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); } } else { ret = sp_k2u_populate(mm, spa); @@ -2120,7 +2120,7 @@ static int sp_map_spa_to_mm(struct mm_struct *mm, struct sp_area *spa, do_munmap(mm, mmap_addr, spa_size(spa), NULL); pr_info("k2u populate failed, %d\n", ret); } - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); } mmput_async(mm);
@@ -2735,11 +2735,11 @@ void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid) return ERR_PTR(-EPERM); }
- down_write(&mm->mmap_lock); + mmap_write_lock(mm); ret = __sp_walk_page_range(uva, size, mm, &sp_walk_data); if (ret) { pr_err_ratelimited("walk page range failed %d\n", ret); - up_write(&mm->mmap_lock); + mmap_write_unlock(mm); return ERR_PTR(ret); }
@@ -2749,7 +2749,7 @@ void *mg_sp_make_share_u2k(unsigned long uva, unsigned long size, int tgid) else p = vmap(sp_walk_data.pages, sp_walk_data.page_count, VM_MAP, PAGE_KERNEL); - up_write(&mm->mmap_lock); + mmap_write_unlock(mm);
if (!p) { pr_err("vmap(huge) in u2k failed\n"); @@ -2892,9 +2892,9 @@ int mg_sp_walk_page_range(unsigned long uva, unsigned long size, return -ESRCH; }
- down_write(&mm->mmap_lock); + mmap_write_lock(mm); ret = __sp_walk_page_range(uva, size, mm, sp_walk_data); - up_write(&mm->mmap_lock); + mmap_write_unlock(mm);
mmput(mm); put_task_struct(tsk);