From: Ma Wupeng mawupeng1@huawei.com
bug fixes for gmem.
Ma Wupeng (2): mm: gmem: Reture false if hnid is bigger than MAX_NUMNODES mm: gmem: Iter all vmas via find_vma_intersection
include/linux/gmem.h | 7 ++-- mm/gmem.c | 98 ++++++++++++++++++++++++++++---------------- 2 files changed, 66 insertions(+), 39 deletions(-)
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Reture false if hnid is bigger than MAX_NUMNODES.
Fixes: 46a7894b5e4c ("mm: gmem: Introduce GMEM") Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/gmem.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/include/linux/gmem.h b/include/linux/gmem.h index e198180b8085..1786f8676376 100644 --- a/include/linux/gmem.h +++ b/include/linux/gmem.h @@ -327,13 +327,14 @@ extern struct hnode *hnodes[];
static inline bool is_hnode(int node) { - return !node_isset(node, node_possible_map) - && node_isset(node, hnode_map); + return (node < MAX_NUMNODES) && !node_isset(node, node_possible_map) && + node_isset(node, hnode_map); }
static inline bool is_hnode_allowed(int node) { - return is_hnode(node) && node_isset(node, current->mems_allowed); + return (node < MAX_NUMNODES) && is_hnode(node) && + node_isset(node, current->mems_allowed); }
static inline struct hnode *get_hnode(unsigned int hnid)
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Item all vmas with find_vma_intersection rather walk all pages.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/gmem.c | 98 +++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 62 insertions(+), 36 deletions(-)
diff --git a/mm/gmem.c b/mm/gmem.c index 90a5b5fda284..ca2213307919 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -676,50 +676,25 @@ static int hmadvise_do_prefetch(gm_dev_t *dev, unsigned long addr, size_t size) return res; }
-static int hmadvise_do_eagerfree(unsigned long addr, size_t size) +static int gmem_unmap_vma_pages(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int page_size) { - int page_size = HPAGE_SIZE; - struct vm_area_struct *vma; - int ret = GM_RET_SUCCESS; - unsigned long start, end; - gm_mapping_t *gm_mapping; struct gm_fault_t gmf = { .mm = current->mm, .size = page_size, .copy = false, }; - unsigned long old_start; + gm_mapping_t *gm_mapping; vm_object_t *obj; + int ret;
- /* overflow */ - if (check_add_overflow(addr, size, &end)) - return -EINVAL; - - old_start = addr; - - /* Align addr by rounding inward to avoid excessive page release. */ - end = round_down(end, page_size); - start = round_up(addr, page_size); - if (start >= end) - return ret; - - /* Check to see whether len was rounded up from small -ve to zero */ - if (old_start && !start) + obj = vma->vm_obj; + if (!obj) { + pr_err("gmem: peer-shared vma should have vm_object\n"); return -EINVAL; + }
- mmap_read_lock(current->mm); - do { - vma = find_vma(current->mm, start); - if (!vma || !vma_is_peer_shared(vma)) { - pr_info_ratelimited("gmem: not peer-shared vma, skip dontneed\n"); - continue; - } - obj = vma->vm_obj; - if (!obj) { - pr_err("gmem: peer-shared vma should have vm_object\n"); - mmap_read_unlock(current->mm); - return -EINVAL; - } + for (; start < end; start += page_size) { xa_lock(obj->logical_page_table); gm_mapping = vm_object_lookup(obj, start); if (!gm_mapping) { @@ -738,14 +713,65 @@ static int hmadvise_do_eagerfree(unsigned long addr, size_t size) gmf.dev = gm_mapping->dev; ret = gm_mapping->dev->mmu->peer_unmap(&gmf); if (ret) { - pr_err("gmem: peer_unmap failed. ret %d\n", ret); + pr_err("gmem: peer_unmap failed. ret %d\n", + ret); mutex_unlock(&gm_mapping->lock); continue; } } gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_unlock(&gm_mapping->lock); - } while (start += page_size, start != end); + } + + return 0; +} + +static int hmadvise_do_eagerfree(unsigned long addr, size_t size) +{ + unsigned long start, end, i_start, i_end; + int page_size = HPAGE_SIZE; + struct vm_area_struct *vma; + int ret = GM_RET_SUCCESS; + unsigned long old_start; + + /* overflow */ + if (check_add_overflow(addr, size, &end)) + return -EINVAL; + + old_start = addr; + + /* Align addr by rounding inward to avoid excessive page release. */ + end = round_down(end, page_size); + start = round_up(addr, page_size); + if (start >= end) + return ret; + + /* Check to see whether len was rounded up from small -ve to zero */ + if (old_start && !start) + return -EINVAL; + + mmap_read_lock(current->mm); + do { + vma = find_vma_intersection(current->mm, start, end); + if (!vma) { + pr_info("gmem: there is no valid vma\n"); + break; + } + + if (!vma_is_peer_shared(vma)) { + pr_debug("gmem: not peer-shared vma, skip dontneed\n"); + start = vma->vm_end; + continue; + } + + i_start = start > vma->vm_start ? start : vma->vm_start; + i_end = end < vma->vm_end ? end : vma->vm_end; + ret = gmem_unmap_vma_pages(vma, i_start, i_end, page_size); + if (ret) + break; + + start = vma->vm_end; + } while (start < end);
mmap_read_unlock(current->mm); return ret;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/2493 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Q...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/2493 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/Q...