From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Item all vmas with find_vma_intersection rather walk all pages.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/gmem.c | 98 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 36 deletions(-)
diff --git a/mm/gmem.c b/mm/gmem.c index 90a5b5fda284..ca2213307919 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -676,50 +676,25 @@ static int hmadvise_do_prefetch(gm_dev_t *dev, unsigned long addr, size_t size) return res; }
-static int hmadvise_do_eagerfree(unsigned long addr, size_t size) +static int gmem_unmap_vma_pages(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int page_size) { - int page_size = HPAGE_SIZE; - struct vm_area_struct *vma; - int ret = GM_RET_SUCCESS; - unsigned long start, end; - gm_mapping_t *gm_mapping; struct gm_fault_t gmf = { .mm = current->mm, .size = page_size, .copy = false, }; - unsigned long old_start; + gm_mapping_t *gm_mapping; vm_object_t *obj; + int ret;
- /* overflow */ - if (check_add_overflow(addr, size, &end)) - return -EINVAL; - - old_start = addr; - - /* Align addr by rounding inward to avoid excessive page release. */ - end = round_down(end, page_size); - start = round_up(addr, page_size); - if (start >= end) - return ret; - - /* Check to see whether len was rounded up from small -ve to zero */ - if (old_start && !start) + obj = vma->vm_obj; + if (!obj) { + pr_err("gmem: peer-shared vma should have vm_object\n"); return -EINVAL; + }
- mmap_read_lock(current->mm); - do { - vma = find_vma(current->mm, start); - if (!vma || !vma_is_peer_shared(vma)) { - pr_info_ratelimited("gmem: not peer-shared vma, skip dontneed\n"); - continue; - } - obj = vma->vm_obj; - if (!obj) { - pr_err("gmem: peer-shared vma should have vm_object\n"); - mmap_read_unlock(current->mm); - return -EINVAL; - } + for (; start < end; start += page_size) { xa_lock(obj->logical_page_table); gm_mapping = vm_object_lookup(obj, start); if (!gm_mapping) { @@ -738,14 +713,65 @@ static int hmadvise_do_eagerfree(unsigned long addr, size_t size) gmf.dev = gm_mapping->dev; ret = gm_mapping->dev->mmu->peer_unmap(&gmf); if (ret) { - pr_err("gmem: peer_unmap failed. ret %d\n", ret); + pr_err("gmem: peer_unmap failed. ret %d\n", + ret); mutex_unlock(&gm_mapping->lock); continue; } } gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_unlock(&gm_mapping->lock); - } while (start += page_size, start != end); + } + + return 0; +} + +static int hmadvise_do_eagerfree(unsigned long addr, size_t size) +{ + unsigned long start, end, i_start, i_end; + int page_size = HPAGE_SIZE; + struct vm_area_struct *vma; + int ret = GM_RET_SUCCESS; + unsigned long old_start; + + /* overflow */ + if (check_add_overflow(addr, size, &end)) + return -EINVAL; + + old_start = addr; + + /* Align addr by rounding inward to avoid excessive page release. */ + end = round_down(end, page_size); + start = round_up(addr, page_size); + if (start >= end) + return ret; + + /* Check to see whether len was rounded up from small -ve to zero */ + if (old_start && !start) + return -EINVAL; + + mmap_read_lock(current->mm); + do { + vma = find_vma_intersection(current->mm, start, end); + if (!vma) { + pr_info("gmem: there is no valid vma\n"); + break; + } + + if (!vma_is_peer_shared(vma)) { + pr_debug("gmem: not peer-shared vma, skip dontneed\n"); + start = vma->vm_end; + continue; + } + + i_start = start > vma->vm_start ? start : vma->vm_start; + i_end = end < vma->vm_end ? end : vma->vm_end; + ret = gmem_unmap_vma_pages(vma, i_start, i_end, page_size); + if (ret) + break; + + start = vma->vm_end; + } while (start < end);
mmap_read_unlock(current->mm); return ret;