From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4AHP2 CVE: NA
-------------------------------------------------
When userswap is enabled, the memory pointed by 'pages' is not freed in abnormal branch in do_mmap(). To fix the issue and keep do_mmap() mostly unchanged, we rename do_mmap() to __do_mmap() and extract the memory alloc and free code out of __do_mmap(). When __do_mmap() returns a error value, we goto the error label to free the memory.
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/mmap.c | 404 +++++++++++++++++++++++++++--------------------------- 1 file changed, 204 insertions(+), 200 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c index 378e1869ac7a0..69848726063c7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1384,172 +1384,6 @@ static unsigned long __mmap_region(struct mm_struct *mm, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf);
-#ifdef CONFIG_USERSWAP -/* - * Check if pages between 'addr ~ addr+len' can be user swapped. If so, get - * the reference of the pages and return the pages through input parameters - * 'ppages'. - */ -int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, - unsigned long len, struct page ***ppages) -{ - struct vm_area_struct *vma; - struct page *page = NULL; - struct page **pages = NULL; - unsigned long addr_start, addr_end; - unsigned long ret; - int i, page_num = 0; - - pages = kmalloc(sizeof(struct page *) * (len / PAGE_SIZE), GFP_KERNEL); - if (!pages) - return -ENOMEM; - - addr_start = addr; - addr_end = addr + len; - while (addr < addr_end) { - vma = find_vma(mm, addr); - if (!vma || !vma_is_anonymous(vma) || - (vma->vm_flags & VM_LOCKED) || vma->vm_file - || (vma->vm_flags & VM_STACK) || (vma->vm_flags & (VM_IO | VM_PFNMAP))) { - ret = -EINVAL; - goto out; - } - if (!(vma->vm_flags & VM_UFFD_MISSING)) { - ret = -EAGAIN; - goto out; - } -get_again: - /* follow_page will inc page ref, dec the ref after we remap the page */ - page = follow_page(vma, addr, FOLL_GET); - if (IS_ERR_OR_NULL(page)) { - ret = -ENODEV; - goto out; - } - pages[page_num] = page; - page_num++; - if (!PageAnon(page) || !PageSwapBacked(page) || PageHuge(page) || PageSwapCache(page)) { - ret = -EINVAL; - goto out; - } else if (PageTransCompound(page)) { - if (trylock_page(page)) { - if (!split_huge_page(page)) { - put_page(page); - page_num--; - unlock_page(page); - goto get_again; - } else { - unlock_page(page); - ret = -EINVAL; - goto out; - } - } else { - ret = -EINVAL; - goto out; - } - } - if (page_mapcount(page) > 1 || page_mapcount(page) + 1 != page_count(page)) { - ret = -EBUSY; - goto out; - } - addr += PAGE_SIZE; - } - - *ppages = pages; - return 0; - -out: - for (i = 0; i < page_num; i++) - put_page(pages[i]); - if (pages) - kfree(pages); - *ppages = NULL; - return ret; -} - -/* - * In uswap situation, we use the bit 0 of the returned address to indicate - * whether the pages are dirty. - */ -#define USWAP_PAGES_DIRTY 1 - -/* unmap the pages between 'addr ~ addr+len' and remap them to a new address */ -unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, - unsigned long len, struct page **pages, unsigned long new_addr) -{ - struct vm_area_struct *vma; - struct page *page; - pmd_t *pmd; - pte_t *pte, old_pte; - spinlock_t *ptl; - unsigned long addr, addr_end; - bool pages_dirty = false; - int i, err; - - addr_end = addr_start + len; - lru_add_drain(); - mmu_notifier_invalidate_range_start(mm, addr_start, addr_end); - addr = addr_start; - i = 0; - while (addr < addr_end) { - page = pages[i]; - vma = find_vma(mm, addr); - if (!vma) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("find_vma failed\n"); - return -EINVAL; - } - pmd = mm_find_pmd(mm, addr); - if (!pmd) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("mm_find_pmd failed, addr:%llx\n"); - return -ENXIO; - } - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - flush_cache_page(vma, addr, pte_pfn(*pte)); - old_pte = ptep_clear_flush(vma, addr, pte); - if (pte_dirty(old_pte) || PageDirty(page)) - pages_dirty = true; - set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, page_to_pfn(page)))); - dec_mm_counter(mm, MM_ANONPAGES); - page_remove_rmap(page, false); - put_page(page); - - pte_unmap_unlock(pte, ptl); - vma->vm_flags |= VM_USWAP; - page->mapping = NULL; - addr += PAGE_SIZE; - i++; - } - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - - addr_start = new_addr; - addr_end = new_addr + len; - addr = addr_start; - vma = find_vma(mm, addr); - i = 0; - while (addr < addr_end) { - page = pages[i]; - if (addr > vma->vm_end - 1) - vma = find_vma(mm, addr); - err = vm_insert_page(vma, addr, page); - if (err) { - pr_err("vm_insert_page failed:%d\n", err); - } - i++; - addr += PAGE_SIZE; - } - vma->vm_flags |= VM_USWAP; - - if (pages_dirty) - new_addr = new_addr | USWAP_PAGES_DIRTY; - - return new_addr; -} -#endif - -/* - * The caller must hold down_write(¤t->mm->mmap_sem). - */ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -1557,12 +1391,6 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, unsigned long *populate, struct list_head *uf) { int pkey = 0; -#ifdef CONFIG_USERSWAP - struct page **pages = NULL; - unsigned long addr_start = addr; - int i, page_num = 0; - unsigned long ret; -#endif
*populate = 0;
@@ -1579,17 +1407,6 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, if (!(file && path_noexec(&file->f_path))) prot |= PROT_EXEC;
-#ifdef CONFIG_USERSWAP - if (enable_userswap && (flags & MAP_REPLACE)) { - if (offset_in_page(addr) || (len % PAGE_SIZE)) - return -EINVAL; - page_num = len / PAGE_SIZE; - ret = pages_can_be_swapped(mm, addr, len, &pages); - if (ret) - return ret; - } -#endif - /* force arch specific MAP_FIXED handling in get_unmapped_area */ if (flags & MAP_FIXED_NOREPLACE) flags |= MAP_FIXED; @@ -1766,25 +1583,203 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, if (flags & MAP_CHECKNODE) set_vm_checknode(&vm_flags, flags);
-#ifdef CONFIG_USERSWAP - /* mark the vma as special to avoid merging with other vmas */ - if (enable_userswap && (flags & MAP_REPLACE)) - vm_flags |= VM_SPECIAL; -#endif - addr = __mmap_region(mm, file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) *populate = len; -#ifndef CONFIG_USERSWAP return addr; -#else - if (!enable_userswap || !(flags & MAP_REPLACE)) - return addr; +}
+#ifdef CONFIG_USERSWAP +/* + * Check if pages between 'addr ~ addr+len' can be user swapped. If so, get + * the reference of the pages and return the pages through input parameters + * 'ppages'. + */ +int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, + unsigned long len, struct page ***ppages) +{ + struct vm_area_struct *vma; + struct page *page = NULL; + struct page **pages = NULL; + unsigned long addr_start, addr_end; + unsigned long ret; + int i, page_num = 0; + + pages = kmalloc(sizeof(struct page *) * (len / PAGE_SIZE), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + addr_start = addr; + addr_end = addr + len; + while (addr < addr_end) { + vma = find_vma(mm, addr); + if (!vma || !vma_is_anonymous(vma) || + (vma->vm_flags & VM_LOCKED) || vma->vm_file + || (vma->vm_flags & VM_STACK) || (vma->vm_flags & (VM_IO | VM_PFNMAP))) { + ret = -EINVAL; + goto out; + } + if (!(vma->vm_flags & VM_UFFD_MISSING)) { + ret = -EAGAIN; + goto out; + } +get_again: + /* follow_page will inc page ref, dec the ref after we remap the page */ + page = follow_page(vma, addr, FOLL_GET); + if (IS_ERR_OR_NULL(page)) { + ret = -ENODEV; + goto out; + } + pages[page_num] = page; + page_num++; + if (!PageAnon(page) || !PageSwapBacked(page) || PageHuge(page) || PageSwapCache(page)) { + ret = -EINVAL; + goto out; + } else if (PageTransCompound(page)) { + if (trylock_page(page)) { + if (!split_huge_page(page)) { + put_page(page); + page_num--; + unlock_page(page); + goto get_again; + } else { + unlock_page(page); + ret = -EINVAL; + goto out; + } + } else { + ret = -EINVAL; + goto out; + } + } + if (page_mapcount(page) > 1 || page_mapcount(page) + 1 != page_count(page)) { + ret = -EBUSY; + goto out; + } + addr += PAGE_SIZE; + } + + *ppages = pages; + return 0; + +out: + for (i = 0; i < page_num; i++) + put_page(pages[i]); + if (pages) + kfree(pages); + *ppages = NULL; + return ret; +} + +/* + * In uswap situation, we use the bit 0 of the returned address to indicate + * whether the pages are dirty. + */ +#define USWAP_PAGES_DIRTY 1 + +/* unmap the pages between 'addr ~ addr+len' and remap them to a new address */ +unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, + unsigned long len, struct page **pages, unsigned long new_addr) +{ + struct vm_area_struct *vma; + struct page *page; + pmd_t *pmd; + pte_t *pte, old_pte; + spinlock_t *ptl; + unsigned long addr, addr_end; + bool pages_dirty = false; + int i, err; + + addr_end = addr_start + len; + lru_add_drain(); + mmu_notifier_invalidate_range_start(mm, addr_start, addr_end); + addr = addr_start; + i = 0; + while (addr < addr_end) { + page = pages[i]; + vma = find_vma(mm, addr); + if (!vma) { + mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); + WARN_ON("find_vma failed\n"); + return -EINVAL; + } + pmd = mm_find_pmd(mm, addr); + if (!pmd) { + mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); + WARN_ON("mm_find_pmd failed, addr:%llx\n"); + return -ENXIO; + } + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + flush_cache_page(vma, addr, pte_pfn(*pte)); + old_pte = ptep_clear_flush(vma, addr, pte); + if (pte_dirty(old_pte) || PageDirty(page)) + pages_dirty = true; + set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, page_to_pfn(page)))); + dec_mm_counter(mm, MM_ANONPAGES); + page_remove_rmap(page, false); + put_page(page); + + pte_unmap_unlock(pte, ptl); + vma->vm_flags |= VM_USWAP; + page->mapping = NULL; + addr += PAGE_SIZE; + i++; + } + mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); + + addr_start = new_addr; + addr_end = new_addr + len; + addr = addr_start; + vma = find_vma(mm, addr); + i = 0; + while (addr < addr_end) { + page = pages[i]; + if (addr > vma->vm_end - 1) + vma = find_vma(mm, addr); + err = vm_insert_page(vma, addr, page); + if (err) { + pr_err("vm_insert_page failed:%d\n", err); + } + i++; + addr += PAGE_SIZE; + } + vma->vm_flags |= VM_USWAP; + + if (pages_dirty) + new_addr = new_addr | USWAP_PAGES_DIRTY; + + return new_addr; +} + +static inline +unsigned long do_uswap_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) +{ + struct mm_struct *mm = current->mm; + unsigned long addr_start = addr; + struct page **pages = NULL; + unsigned long ret; + int i, page_num = 0; + + if (!len || offset_in_page(addr) || (len % PAGE_SIZE)) + return -EINVAL; + + page_num = len / PAGE_SIZE; + ret = pages_can_be_swapped(mm, addr, len, &pages); + if (ret) + return ret; + + /* mark the vma as special to avoid merging with other vmas */ + vm_flags |= VM_SPECIAL; + + addr = __do_mmap(current->mm, file, addr, len, prot, flags, vm_flags, + pgoff, populate, uf); if (IS_ERR_VALUE(addr)) { - pr_info("mmap_region failed, return addr:%lx\n", addr); ret = addr; goto out; } @@ -1794,23 +1789,32 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, /* follow_page() above increased the reference*/ for (i = 0; i < page_num; i++) put_page(pages[i]); + if (pages) kfree(pages); + return ret; -#endif } +#endif
/* * The caller must hold down_write(¤t->mm->mmap_sem). */ -unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, - unsigned long prot, unsigned long flags, vm_flags_t vm_flags, - unsigned long pgoff, unsigned long *populate, struct list_head *uf) +unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { - return __do_mmap(current->mm, file, addr, len, prot, flags, vm_flags, pgoff, populate, uf); +#ifdef CONFIG_USERSWAP + if (enable_userswap && (flags & MAP_REPLACE)) + return do_uswap_mmap(file, addr, len, prot, flags, vm_flags, + pgoff, populate, uf); +#endif + return __do_mmap(current->mm, file, addr, len, prot, flags, vm_flags, + pgoff, populate, uf); }
- unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff)
From: Kefeng Wang wangkefeng.wang@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4AHP2 CVE: NA
-------------------------------------------------
Fix some format issues in mm/mmap.c.
Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/mmap.c | 85 ++++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 45 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c index 69848726063c7..cff48fde7beaa 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1431,7 +1431,6 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, if (mm->map_count > sysctl_max_map_count) return -ENOMEM;
- #ifdef CONFIG_ASCEND_AUTO_TUNING_HUGEPAGE /* only notify flags with MAP_HUGETLB */ if (flags & MAP_HUGETLB && mmap_notifier_enable) @@ -1597,13 +1596,13 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, * the reference of the pages and return the pages through input parameters * 'ppages'. */ -int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, - unsigned long len, struct page ***ppages) +static int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, + unsigned long len, struct page ***ppages) { struct vm_area_struct *vma; struct page *page = NULL; struct page **pages = NULL; - unsigned long addr_start, addr_end; + unsigned long addr_end = addr + len; unsigned long ret; int i, page_num = 0;
@@ -1611,13 +1610,11 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, if (!pages) return -ENOMEM;
- addr_start = addr; - addr_end = addr + len; while (addr < addr_end) { vma = find_vma(mm, addr); - if (!vma || !vma_is_anonymous(vma) || - (vma->vm_flags & VM_LOCKED) || vma->vm_file - || (vma->vm_flags & VM_STACK) || (vma->vm_flags & (VM_IO | VM_PFNMAP))) { + if (!vma || !vma_is_anonymous(vma) || vma->vm_file || + (vma->vm_flags & VM_LOCKED) || (vma->vm_flags & VM_STACK) || + (vma->vm_flags & (VM_IO | VM_PFNMAP))) { ret = -EINVAL; goto out; } @@ -1632,9 +1629,9 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, ret = -ENODEV; goto out; } - pages[page_num] = page; - page_num++; - if (!PageAnon(page) || !PageSwapBacked(page) || PageHuge(page) || PageSwapCache(page)) { + pages[page_num++] = page; + if (!PageAnon(page) || !PageSwapBacked(page) || + PageHuge(page) || PageSwapCache(page)) { ret = -EINVAL; goto out; } else if (PageTransCompound(page)) { @@ -1654,7 +1651,8 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, goto out; } } - if (page_mapcount(page) > 1 || page_mapcount(page) + 1 != page_count(page)) { + if (page_mapcount(page) > 1 || + page_mapcount(page) + 1 != page_count(page)) { ret = -EBUSY; goto out; } @@ -1680,8 +1678,9 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, #define USWAP_PAGES_DIRTY 1
/* unmap the pages between 'addr ~ addr+len' and remap them to a new address */ -unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, - unsigned long len, struct page **pages, unsigned long new_addr) +static unsigned long +do_user_swap(struct mm_struct *mm, unsigned long addr_start, unsigned long len, + struct page **pages, unsigned long new_addr) { struct vm_area_struct *vma; struct page *page; @@ -1690,25 +1689,24 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, spinlock_t *ptl; unsigned long addr, addr_end; bool pages_dirty = false; - int i, err; + int i = 0;
+ addr = addr_start; addr_end = addr_start + len; lru_add_drain(); mmu_notifier_invalidate_range_start(mm, addr_start, addr_end); - addr = addr_start; - i = 0; while (addr < addr_end) { page = pages[i]; vma = find_vma(mm, addr); if (!vma) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("find_vma failed\n"); + mmu_notifier_invalidate_range_end(mm, addr_start, + addr_end); return -EINVAL; } pmd = mm_find_pmd(mm, addr); if (!pmd) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("mm_find_pmd failed, addr:%llx\n"); + mmu_notifier_invalidate_range_end(mm, addr_start, + addr_end); return -ENXIO; } pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1716,7 +1714,8 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, old_pte = ptep_clear_flush(vma, addr, pte); if (pte_dirty(old_pte) || PageDirty(page)) pages_dirty = true; - set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, page_to_pfn(page)))); + set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, + page_to_pfn(page)))); dec_mm_counter(mm, MM_ANONPAGES); page_remove_rmap(page, false); put_page(page); @@ -1729,20 +1728,19 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, } mmu_notifier_invalidate_range_end(mm, addr_start, addr_end);
- addr_start = new_addr; - addr_end = new_addr + len; - addr = addr_start; + addr = new_addr; vma = find_vma(mm, addr); i = 0; - while (addr < addr_end) { - page = pages[i]; + while (addr < new_addr + len) { if (addr > vma->vm_end - 1) vma = find_vma(mm, addr); - err = vm_insert_page(vma, addr, page); - if (err) { - pr_err("vm_insert_page failed:%d\n", err); - } - i++; + if (!vma) + return -ENODEV; + + page = pages[i++]; + if (vm_insert_page(vma, addr, page)) + return -EFAULT; + addr += PAGE_SIZE; } vma->vm_flags |= VM_USWAP; @@ -1753,23 +1751,21 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, return new_addr; }
-static inline -unsigned long do_uswap_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flags, vm_flags_t vm_flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) +static inline unsigned long +do_uswap_mmap(struct file *file, unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { struct mm_struct *mm = current->mm; - unsigned long addr_start = addr; + unsigned long old_addr = addr; struct page **pages = NULL; unsigned long ret; - int i, page_num = 0; + int i;
if (!len || offset_in_page(addr) || (len % PAGE_SIZE)) return -EINVAL;
- page_num = len / PAGE_SIZE; ret = pages_can_be_swapped(mm, addr, len, &pages); if (ret) return ret; @@ -1784,12 +1780,11 @@ unsigned long do_uswap_mmap(struct file *file, unsigned long addr, goto out; }
- ret = do_user_swap(mm, addr_start, len, pages, addr); + ret = do_user_swap(mm, old_addr, len, pages, addr); out: /* follow_page() above increased the reference*/ - for (i = 0; i < page_num; i++) + for (i = 0; i < len / PAGE_SIZE; i++) put_page(pages[i]); - if (pages) kfree(pages);