From: Kefeng Wang wangkefeng.wang@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4AHP2 CVE: NA
-------------------------------------------------
Fix some format issues in mm/mmap.c.
Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/mmap.c | 85 ++++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 45 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c index 69848726063c7..cff48fde7beaa 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1431,7 +1431,6 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, if (mm->map_count > sysctl_max_map_count) return -ENOMEM;
- #ifdef CONFIG_ASCEND_AUTO_TUNING_HUGEPAGE /* only notify flags with MAP_HUGETLB */ if (flags & MAP_HUGETLB && mmap_notifier_enable) @@ -1597,13 +1596,13 @@ unsigned long __do_mmap(struct mm_struct *mm, struct file *file, * the reference of the pages and return the pages through input parameters * 'ppages'. */ -int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, - unsigned long len, struct page ***ppages) +static int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, + unsigned long len, struct page ***ppages) { struct vm_area_struct *vma; struct page *page = NULL; struct page **pages = NULL; - unsigned long addr_start, addr_end; + unsigned long addr_end = addr + len; unsigned long ret; int i, page_num = 0;
@@ -1611,13 +1610,11 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, if (!pages) return -ENOMEM;
- addr_start = addr; - addr_end = addr + len; while (addr < addr_end) { vma = find_vma(mm, addr); - if (!vma || !vma_is_anonymous(vma) || - (vma->vm_flags & VM_LOCKED) || vma->vm_file - || (vma->vm_flags & VM_STACK) || (vma->vm_flags & (VM_IO | VM_PFNMAP))) { + if (!vma || !vma_is_anonymous(vma) || vma->vm_file || + (vma->vm_flags & VM_LOCKED) || (vma->vm_flags & VM_STACK) || + (vma->vm_flags & (VM_IO | VM_PFNMAP))) { ret = -EINVAL; goto out; } @@ -1632,9 +1629,9 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, ret = -ENODEV; goto out; } - pages[page_num] = page; - page_num++; - if (!PageAnon(page) || !PageSwapBacked(page) || PageHuge(page) || PageSwapCache(page)) { + pages[page_num++] = page; + if (!PageAnon(page) || !PageSwapBacked(page) || + PageHuge(page) || PageSwapCache(page)) { ret = -EINVAL; goto out; } else if (PageTransCompound(page)) { @@ -1654,7 +1651,8 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, goto out; } } - if (page_mapcount(page) > 1 || page_mapcount(page) + 1 != page_count(page)) { + if (page_mapcount(page) > 1 || + page_mapcount(page) + 1 != page_count(page)) { ret = -EBUSY; goto out; } @@ -1680,8 +1678,9 @@ int pages_can_be_swapped(struct mm_struct *mm, unsigned long addr, #define USWAP_PAGES_DIRTY 1
/* unmap the pages between 'addr ~ addr+len' and remap them to a new address */ -unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, - unsigned long len, struct page **pages, unsigned long new_addr) +static unsigned long +do_user_swap(struct mm_struct *mm, unsigned long addr_start, unsigned long len, + struct page **pages, unsigned long new_addr) { struct vm_area_struct *vma; struct page *page; @@ -1690,25 +1689,24 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, spinlock_t *ptl; unsigned long addr, addr_end; bool pages_dirty = false; - int i, err; + int i = 0;
+ addr = addr_start; addr_end = addr_start + len; lru_add_drain(); mmu_notifier_invalidate_range_start(mm, addr_start, addr_end); - addr = addr_start; - i = 0; while (addr < addr_end) { page = pages[i]; vma = find_vma(mm, addr); if (!vma) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("find_vma failed\n"); + mmu_notifier_invalidate_range_end(mm, addr_start, + addr_end); return -EINVAL; } pmd = mm_find_pmd(mm, addr); if (!pmd) { - mmu_notifier_invalidate_range_end(mm, addr_start, addr_end); - WARN_ON("mm_find_pmd failed, addr:%llx\n"); + mmu_notifier_invalidate_range_end(mm, addr_start, + addr_end); return -ENXIO; } pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1716,7 +1714,8 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, old_pte = ptep_clear_flush(vma, addr, pte); if (pte_dirty(old_pte) || PageDirty(page)) pages_dirty = true; - set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, page_to_pfn(page)))); + set_pte(pte, swp_entry_to_pte(swp_entry(SWP_USERSWAP_ENTRY, + page_to_pfn(page)))); dec_mm_counter(mm, MM_ANONPAGES); page_remove_rmap(page, false); put_page(page); @@ -1729,20 +1728,19 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, } mmu_notifier_invalidate_range_end(mm, addr_start, addr_end);
- addr_start = new_addr; - addr_end = new_addr + len; - addr = addr_start; + addr = new_addr; vma = find_vma(mm, addr); i = 0; - while (addr < addr_end) { - page = pages[i]; + while (addr < new_addr + len) { if (addr > vma->vm_end - 1) vma = find_vma(mm, addr); - err = vm_insert_page(vma, addr, page); - if (err) { - pr_err("vm_insert_page failed:%d\n", err); - } - i++; + if (!vma) + return -ENODEV; + + page = pages[i++]; + if (vm_insert_page(vma, addr, page)) + return -EFAULT; + addr += PAGE_SIZE; } vma->vm_flags |= VM_USWAP; @@ -1753,23 +1751,21 @@ unsigned long do_user_swap(struct mm_struct *mm, unsigned long addr_start, return new_addr; }
-static inline -unsigned long do_uswap_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flags, vm_flags_t vm_flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) +static inline unsigned long +do_uswap_mmap(struct file *file, unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) { struct mm_struct *mm = current->mm; - unsigned long addr_start = addr; + unsigned long old_addr = addr; struct page **pages = NULL; unsigned long ret; - int i, page_num = 0; + int i;
if (!len || offset_in_page(addr) || (len % PAGE_SIZE)) return -EINVAL;
- page_num = len / PAGE_SIZE; ret = pages_can_be_swapped(mm, addr, len, &pages); if (ret) return ret; @@ -1784,12 +1780,11 @@ unsigned long do_uswap_mmap(struct file *file, unsigned long addr, goto out; }
- ret = do_user_swap(mm, addr_start, len, pages, addr); + ret = do_user_swap(mm, old_addr, len, pages, addr); out: /* follow_page() above increased the reference*/ - for (i = 0; i < page_num; i++) + for (i = 0; i < len / PAGE_SIZE; i++) put_page(pages[i]); - if (pages) kfree(pages);