
From: Liu Shixin <liushixin2@huawei.com> stable inclusion from stable-v5.10.150 commit 45c33966759ea1b4040c08dacda99ef623c0ca29 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I62WRY CVE: NA Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=... -------------------------------- commit 958f32ce832ba781ac20e11bb2d12a9352ea28fc upstream. The vma_lock and hugetlb_fault_mutex are dropped before handling userfault and reacquire them again after handle_userfault(), but reacquire the vma_lock could lead to UAF[1,2] due to the following race, hugetlb_fault hugetlb_no_page /*unlock vma_lock */ hugetlb_handle_userfault handle_userfault /* unlock mm->mmap_lock*/ vm_mmap_pgoff do_mmap mmap_region munmap_vma_range /* clean old vma */ /* lock vma_lock again <--- UAF */ /* unlock vma_lock */ Since the vma_lock will unlock immediately after hugetlb_handle_userfault(), let's drop the unneeded lock and unlock in hugetlb_handle_userfault() to fix the issue. [1] https://lore.kernel.org/linux-mm/000000000000d5e00a05e834962e@google.com/ [2] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com... Link: https://lkml.kernel.org/r/20220923042113.137273-1-liushixin2@huawei.com Fixes: 1a1aad8a9b7b ("userfaultfd: hugetlbfs: add userfaultfd hugetlb hook") Signed-off-by: Liu Shixin <liushixin2@huawei.com> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reported-by: syzbot+193f9cee8638750b23cf@syzkaller.appspotmail.com Reported-by: Liu Zixian <liuzixian4@huawei.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: <stable@vger.kernel.org> [4.14+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Conflicts: mm/hugetlb.c Signed-off-by: Liu Shixin <liushixin2@huawei.com> Reviewed-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Chen Wandun <chenwandun@huawei.com> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> --- mm/hugetlb.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 312ecc15a4e4..5b0d2264b99b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4683,6 +4683,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, spinlock_t *ptl; unsigned long haddr = address & huge_page_mask(h); bool new_page = false; + u32 hash = hugetlb_fault_mutex_hash(mapping, idx); /* * Currently, we are forced to kill the process in the event the @@ -4692,7 +4693,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", current->pid); - return ret; + goto out; } /* @@ -4711,7 +4712,6 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, * Check for page in userfault range */ if (userfaultfd_missing(vma)) { - u32 hash; struct vm_fault vmf = { .vma = vma, .address = haddr, @@ -4726,17 +4726,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, }; /* - * hugetlb_fault_mutex and i_mmap_rwsem must be - * dropped before handling userfault. Reacquire - * after handling fault to make calling code simpler. + * vma_lock and hugetlb_fault_mutex must be dropped + * before handling userfault. Also mmap_lock will + * be dropped during handling userfault, any vma + * operation should be careful from here. */ - hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_unlock(&hugetlb_fault_mutex_table[hash]); i_mmap_unlock_read(mapping); - ret = handle_userfault(&vmf, VM_UFFD_MISSING); - i_mmap_lock_read(mapping); - mutex_lock(&hugetlb_fault_mutex_table[hash]); - goto out; + return handle_userfault(&vmf, VM_UFFD_MISSING); } page = alloc_huge_page(vma, haddr, 0); @@ -4843,6 +4840,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, unlock_page(page); out: + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + i_mmap_unlock_read(mapping); return ret; backout: @@ -4942,7 +4941,11 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (sp_check_vm_share_pool(vma->vm_flags)) ret = sharepool_no_page(mm, vma, mapping, idx, address, ptep, flags); else - ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); + /* + * hugetlb_no_page will drop vma lock and hugetlb fault + * mutex internally, which make us return immediately. + */ + return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); goto out_mutex; } -- 2.20.1