From: Ma Wupeng mawupeng1@huawei.com
Cleanup for gmem.
Ma Wupeng (4): mm: gmem: Stop use gm_fault during alloc/free mm: gmem: Remove unused gm_mappings_free mm: gmem: Cleanup in gmem_handle_evict_page mm: gmem: Cleanup in gm flag
drivers/remote_pager/msg_handler_origin.c | 40 ++++++------- include/linux/gmem.h | 69 +++++------------------ mm/gmem.c | 31 ++-------- mm/huge_memory.c | 3 +- mm/mmap.c | 16 +----- mm/vm_object.c | 2 +- 6 files changed, 46 insertions(+), 115 deletions(-)
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Stop use gm_fault during alloc/free sinice they are not related to fault.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- drivers/remote_pager/msg_handler_origin.c | 17 ++++++++--------- include/linux/gmem.h | 6 ++++-- mm/mmap.c | 16 ++-------------- 3 files changed, 14 insertions(+), 25 deletions(-)
diff --git a/drivers/remote_pager/msg_handler_origin.c b/drivers/remote_pager/msg_handler_origin.c index 25a772eef031..59801560fe55 100644 --- a/drivers/remote_pager/msg_handler_origin.c +++ b/drivers/remote_pager/msg_handler_origin.c @@ -207,18 +207,18 @@ gm_ret_t gmem_unmap(struct gm_fault_t *gmf) return GM_RET_SUCCESS; }
-gm_ret_t gmem_alloc(struct gm_fault_t *gmf) +gm_ret_t gmem_alloc(struct mm_struct *mm, unsigned long va, unsigned long size, + unsigned long prot) { int ret = 0; struct wait_station *ws; struct comm_msg_rsp *rsp; - struct mm_struct *mm = gmf->mm; struct svm_proc *proc = search_svm_proc_by_mm(mm); struct gm_pager_msg_rq req = { .peer_pid = proc->peer_pid, - .va = gmf->va, - .size = gmf->size, - .prot = gmf->prot, + .va = va, + .size = size, + .prot = prot, };
if (!proc) { @@ -243,17 +243,16 @@ gm_ret_t gmem_alloc(struct gm_fault_t *gmf) return GM_RET_SUCCESS; }
-gm_ret_t gmem_free(struct gm_fault_t *gmf) +gm_ret_t gmem_free(struct mm_struct *mm, unsigned long va, unsigned long size) { int ret = 0; struct wait_station *ws; struct comm_msg_rsp *rsp; - struct mm_struct *mm = gmf->mm; struct svm_proc *proc = search_svm_proc_by_mm(mm); struct gm_pager_msg_rq req = { .peer_pid = proc->peer_pid, - .va = gmf->va, - .size = gmf->size, + .va = va, + .size = size, };
if (!proc) { diff --git a/include/linux/gmem.h b/include/linux/gmem.h index 128d9c4d88fd..6fd4580166f6 100644 --- a/include/linux/gmem.h +++ b/include/linux/gmem.h @@ -133,8 +133,10 @@ struct gm_mmu { unsigned long cookie;
/* Synchronize VMA in a peer OS to interact with the host OS */ - gm_ret_t (*peer_va_alloc_fixed)(struct gm_fault_t *gmf); - gm_ret_t (*peer_va_free)(struct gm_fault_t *gmf); + gm_ret_t (*peer_va_alloc_fixed)(struct mm_struct *mm, unsigned long va, + unsigned long size, unsigned long prot); + gm_ret_t (*peer_va_free)(struct mm_struct *mm, unsigned long va, + unsigned long size);
/* Create physical mappings on peer host. * If copy is set, copy data [dma_addr, dma_addr + size] to peer host diff --git a/mm/mmap.c b/mm/mmap.c index a28b17c3cc83..91cad54a353f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2473,11 +2473,7 @@ static void munmap_in_peer_devices(struct mm_struct *mm, if (!ctx->dev->mmu->peer_va_free) continue;
- gmf.va = start; - gmf.size = end - start; - gmf.dev = ctx->dev; - - ret = ctx->dev->mmu->peer_va_free(&gmf); + ret = ctx->dev->mmu->peer_va_free(mm, start, end - start); if (ret != GM_RET_SUCCESS) pr_debug("gmem: free_vma(start:%lx, len:%lx) ret %d\n", start, end - start, ret); @@ -2733,12 +2729,6 @@ static int alloc_va_in_peer_devices(struct mm_struct *mm, { gm_context_t *ctx, *tmp; gm_ret_t ret; - struct gm_fault_t gmf = { - .mm = mm, - .va = addr, - .size = len, - .prot = vm_flags, - };
pr_debug("gmem: start mmap, as %p\n", mm->gm_as); if (!mm->gm_as) @@ -2761,10 +2751,8 @@ static int alloc_va_in_peer_devices(struct mm_struct *mm, continue; }
- gmf.dev = ctx->dev; - pr_debug("gmem: call vma_alloc\n"); - ret = ctx->dev->mmu->peer_va_alloc_fixed(&gmf); + ret = ctx->dev->mmu->peer_va_alloc_fixed(mm, addr, len, vm_flags); if (ret != GM_RET_SUCCESS) { pr_debug("gmem: alloc_vma ret %d\n", ret); return ret;
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Remove unused gm_mappings_free.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/gmem.h | 1 - mm/gmem.c | 19 ------------------- 2 files changed, 20 deletions(-)
diff --git a/include/linux/gmem.h b/include/linux/gmem.h index 6fd4580166f6..000fe446f1e3 100644 --- a/include/linux/gmem.h +++ b/include/linux/gmem.h @@ -338,7 +338,6 @@ vm_fault_t gm_host_fault_locked(struct vm_fault *vmf, enum page_entry_size pe_si extern gm_ret_t gm_dev_register_physmem(gm_dev_t *dev, gm_pa_t begin, gm_pa_t end); extern void gm_dev_unregister_physmem(gm_dev_t *dev, unsigned int nid); extern gm_mapping_t *gm_mappings_alloc(unsigned int nid, unsigned int order); -extern void gm_mappings_free(gm_mapping_t *mapping, unsigned int order); extern gm_ret_t gm_as_create(gm_va_t begin, gm_va_t end, gm_as_alloc_t policy, gm_va_t cache_quantum, gm_as_t **new_as); extern gm_ret_t gm_as_destroy(gm_as_t *as); extern gm_ret_t gm_as_attach(gm_as_t *as, gm_dev_t *dev, gm_mmu_mode_t mode, bool activate, gm_context_t **out_ctx); diff --git a/mm/gmem.c b/mm/gmem.c index 73ce5b2235a4..a8c52cc87fb8 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -433,25 +433,6 @@ gm_mapping_t *gm_mappings_alloc(unsigned int nid, unsigned int order) } EXPORT_SYMBOL_GPL(gm_mappings_alloc);
-void gm_mappings_free(gm_mapping_t *mapping, unsigned int order) -{ - gm_mapping_t *entry; - struct hnode *node = get_hnode(mapping->node_id); - XA_STATE(xas, &node->pages, 0); - - /* TODO: support order > 0 */ - if (order != 0) - return; - - xas_for_each(&xas, entry, ULONG_MAX) { - if (entry == mapping) { - xas_set_mark(&xas, XA_MARK_0); - break; - } - } -} -EXPORT_SYMBOL_GPL(gm_mappings_free); - /* GMEM Virtual Address Space API */ gm_ret_t gm_as_create(gm_va_t begin, gm_va_t end, gm_as_alloc_t policy, gm_va_t cache_quantum, gm_as_t **new_as)
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Change variable name gm_page to gm_mapping to fit type.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- drivers/remote_pager/msg_handler_origin.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/remote_pager/msg_handler_origin.c b/drivers/remote_pager/msg_handler_origin.c index 59801560fe55..d56d94842fc5 100644 --- a/drivers/remote_pager/msg_handler_origin.c +++ b/drivers/remote_pager/msg_handler_origin.c @@ -289,7 +289,7 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) struct vm_area_struct *vma; struct page *page; dma_addr_t dma_addr; - gm_mapping_t *gm_page; + gm_mapping_t *gm_mapping; struct device *dma_dev; struct gm_fault_t gmf; struct svm_proc *proc; @@ -338,22 +338,22 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) goto put_mm; }
- gm_page = vm_object_lookup(vma->vm_obj, addr); - if (!gm_page) { + gm_mapping = vm_object_lookup(vma->vm_obj, addr); + if (!gm_mapping) { pr_err("evictim gm_page is NULL\n"); ret = -EINVAL; goto put_mm; }
- mutex_lock(&gm_page->lock); - if (gm_mapping_willneed(gm_page)) { + mutex_lock(&gm_mapping->lock); + if (gm_mapping_willneed(gm_mapping)) { pr_info("gmem: racing with prefetch or willneed so cancel evict\n"); - clear_gm_mapping_willneed(gm_page); + clear_gm_mapping_willneed(gm_mapping); ret = -EINVAL; goto unlock; }
- if (!gm_mapping_device(gm_page)) { + if (!gm_mapping_device(gm_mapping)) { pr_info("gmem: page is not in device\n"); ret = -EINVAL; goto unlock; @@ -372,9 +372,9 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) goto unlock; }
- dma_dev = gm_page->dev->dma_dev; + dma_dev = gm_mapping->dev->dma_dev; dma_addr = dma_map_page(dma_dev, page, 0, size, DMA_BIDIRECTIONAL); - gmf.dev = gm_page->dev; + gmf.dev = gm_mapping->dev; gmf.dma_addr = dma_addr;
ret = gmem_unmap(&gmf); @@ -385,10 +385,10 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) goto unlock; }
- set_gm_mapping_host(gm_page, page); + set_gm_mapping_host(gm_mapping, page);
unlock: - mutex_unlock(&gm_page->lock); + mutex_unlock(&gm_mapping->lock); put_mm: mmput(mm); put_task:
From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Unify gm_mapping_flags_xxx to gm_mapping_flags_set.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- drivers/remote_pager/msg_handler_origin.c | 5 +- include/linux/gmem.h | 62 ++++------------------- mm/gmem.c | 12 ++--- mm/huge_memory.c | 3 +- mm/vm_object.c | 2 +- 5 files changed, 23 insertions(+), 61 deletions(-)
diff --git a/drivers/remote_pager/msg_handler_origin.c b/drivers/remote_pager/msg_handler_origin.c index d56d94842fc5..29bd352e7d0f 100644 --- a/drivers/remote_pager/msg_handler_origin.c +++ b/drivers/remote_pager/msg_handler_origin.c @@ -348,7 +348,7 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) mutex_lock(&gm_mapping->lock); if (gm_mapping_willneed(gm_mapping)) { pr_info("gmem: racing with prefetch or willneed so cancel evict\n"); - clear_gm_mapping_willneed(gm_mapping); + gm_mapping_flags_clear(gm_mapping, GM_PAGE_WILLNEED); ret = -EINVAL; goto unlock; } @@ -385,7 +385,8 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) goto unlock; }
- set_gm_mapping_host(gm_mapping, page); + gm_mapping_flags_set(gm_mapping, GM_PAGE_CPU); + gm_mapping->page = page;
unlock: mutex_unlock(&gm_mapping->lock); diff --git a/include/linux/gmem.h b/include/linux/gmem.h index 000fe446f1e3..e198180b8085 100644 --- a/include/linux/gmem.h +++ b/include/linux/gmem.h @@ -221,8 +221,6 @@ struct gm_dev { gm_mapping_t *gm_mapping; };
-#define HOST_NODE_ID (-1) - #define GM_PAGE_DIRTY 0x8 /* Whether the page is dirty */ #define GM_PAGE_CPU 0x10 /* Determines whether page is a pointer or a pfn number. */ #define GM_PAGE_DEVICE 0x20 @@ -234,15 +232,6 @@ struct gm_dev {
/* Records the status of a page-size physical page */ struct gm_mapping { - /* - * The node index may have three definitions: - * 1. a common CPU node - * 2. a hetero-node, e.g. GPU (that not necessarily supports CC ld/st) - * 3. a network ip (another OS that may have multiple hNUMA nodes), dynamically attached by dsm_attach - * Among these definitions, #1 and #2 in combination defines an h-NUMA topology - */ - unsigned int node_id; - unsigned int flag;
union { @@ -254,29 +243,27 @@ struct gm_mapping { struct mutex lock; };
-static inline bool gm_mapping_cpu(gm_mapping_t *gm_mapping) +static inline void gm_mapping_flags_set(gm_mapping_t *gm_mapping, int flags) { - return !!(gm_mapping->flag & GM_PAGE_CPU); + if (flags & GM_PAGE_TYPE_MASK) + gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; + + gm_mapping->flag |= flags; }
-static inline void set_gm_mapping_host(gm_mapping_t *gm_mapping, struct page *page) +static inline void gm_mapping_flags_clear(gm_mapping_t *gm_mapping, int flags) { - gm_mapping->node_id = HOST_NODE_ID; - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_CPU; - gm_mapping->page = page; + gm_mapping->flag &= ~flags; }
-static inline bool gm_mapping_device(gm_mapping_t *gm_mapping) +static inline bool gm_mapping_cpu(gm_mapping_t *gm_mapping) { - return !!(gm_mapping->flag & GM_PAGE_DEVICE); + return !!(gm_mapping->flag & GM_PAGE_CPU); }
-static inline void set_gm_mapping_device(gm_mapping_t *gm_mapping, gm_dev_t *dev) +static inline bool gm_mapping_device(gm_mapping_t *gm_mapping) { - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_DEVICE; - gm_mapping->dev = dev; + return !!(gm_mapping->flag & GM_PAGE_DEVICE); }
static inline bool gm_mapping_nomap(gm_mapping_t *gm_mapping) @@ -284,38 +271,11 @@ static inline bool gm_mapping_nomap(gm_mapping_t *gm_mapping) return !!(gm_mapping->flag & GM_PAGE_NOMAP); }
-static inline void set_gm_mapping_nomap(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_NOMAP; - gm_mapping->page = NULL; -} - -static inline void set_gm_mapping_willneed(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag |= GM_PAGE_WILLNEED; -} - -static inline void clear_gm_mapping_willneed(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_WILLNEED; -} - static inline bool gm_mapping_willneed(gm_mapping_t *gm_mapping) { return !!(gm_mapping->flag & GM_PAGE_WILLNEED); }
-static inline void set_gm_mapping_pinned(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag |= GM_PAGE_PINNED; -} - -static inline void clear_gm_mapping_pinned(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_PINNED; -} - static inline bool gm_mapping_pinned(gm_mapping_t *gm_mapping) { return !!(gm_mapping->flag & GM_PAGE_PINNED); diff --git a/mm/gmem.c b/mm/gmem.c index a8c52cc87fb8..b6a36bec8668 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -254,7 +254,7 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh page = gm_mapping->page; if (!page) { pr_err("gmem: host gm_mapping page is NULL. Set nomap\n"); - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); goto unlock; } get_page(page); @@ -274,13 +274,13 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh * gmem page is migrating due to overcommit. * update page to willneed and this will stop page evicting */ - set_gm_mapping_willneed(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_WILLNEED); gmem_state_counter(NR_PAGE_MIGRATING, 1); ret = GM_RET_SUCCESS; } else { pr_err("gmem: peer map failed\n"); if (page) { - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); put_page(page); } } @@ -292,7 +292,8 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh put_page(page); }
- set_gm_mapping_device(gm_mapping, dev); + gm_mapping_flags_set(gm_mapping, GM_PAGE_DEVICE); + gm_mapping->dev = dev; unlock: mutex_unlock(&gm_mapping->lock); mmap_unlock: @@ -368,7 +369,6 @@ gm_ret_t gm_dev_register_physmem(gm_dev_t *dev, gm_pa_t begin, gm_pa_t end) goto deinit_hnode;
for (i = 0; i < page_num; i++, addr += PAGE_SIZE) { - mapping[i].node_id = hnode->id; mapping[i].pfn = addr >> PAGE_SHIFT; mapping[i].flag = 0; } @@ -698,7 +698,7 @@ static int hmadvise_do_eagerfree(unsigned long addr, size_t size) continue; } } - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_unlock(&gm_mapping->lock); } while (start += page_size, start != end);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7f90f8fb6b0c..aac116da2552 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -742,7 +742,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); #ifdef CONFIG_GMEM if (vma_is_peer_shared(vma)) { - set_gm_mapping_host(gm_mapping, page); + gm_mapping_flags_set(gm_mapping, GM_PAGE_CPU); + gm_mapping->page = page; mutex_unlock(&gm_mapping->lock); } #endif diff --git a/mm/vm_object.c b/mm/vm_object.c index ac1a115e4ee1..6030a0d01595 100644 --- a/mm/vm_object.c +++ b/mm/vm_object.c @@ -51,7 +51,7 @@ gm_mapping_t *alloc_gm_mapping(void) if (!gm_mapping) return NULL;
- set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_init(&gm_mapping->lock);
return gm_mapping;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/1982 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/E...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/1982 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/E...