From: Ma Wupeng mawupeng1@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7WLVX
---------------------------------------------
Unify gm_mapping_flags_xxx to gm_mapping_flags_set.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- drivers/remote_pager/msg_handler_origin.c | 5 +- include/linux/gmem.h | 62 ++++------------------- mm/gmem.c | 12 ++--- mm/huge_memory.c | 3 +- mm/vm_object.c | 2 +- 5 files changed, 23 insertions(+), 61 deletions(-)
diff --git a/drivers/remote_pager/msg_handler_origin.c b/drivers/remote_pager/msg_handler_origin.c index d56d94842fc5..29bd352e7d0f 100644 --- a/drivers/remote_pager/msg_handler_origin.c +++ b/drivers/remote_pager/msg_handler_origin.c @@ -348,7 +348,7 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) mutex_lock(&gm_mapping->lock); if (gm_mapping_willneed(gm_mapping)) { pr_info("gmem: racing with prefetch or willneed so cancel evict\n"); - clear_gm_mapping_willneed(gm_mapping); + gm_mapping_flags_clear(gm_mapping, GM_PAGE_WILLNEED); ret = -EINVAL; goto unlock; } @@ -385,7 +385,8 @@ int gmem_handle_evict_page(struct rpg_kmsg_message *msg) goto unlock; }
- set_gm_mapping_host(gm_mapping, page); + gm_mapping_flags_set(gm_mapping, GM_PAGE_CPU); + gm_mapping->page = page;
unlock: mutex_unlock(&gm_mapping->lock); diff --git a/include/linux/gmem.h b/include/linux/gmem.h index 000fe446f1e3..e198180b8085 100644 --- a/include/linux/gmem.h +++ b/include/linux/gmem.h @@ -221,8 +221,6 @@ struct gm_dev { gm_mapping_t *gm_mapping; };
-#define HOST_NODE_ID (-1) - #define GM_PAGE_DIRTY 0x8 /* Whether the page is dirty */ #define GM_PAGE_CPU 0x10 /* Determines whether page is a pointer or a pfn number. */ #define GM_PAGE_DEVICE 0x20 @@ -234,15 +232,6 @@ struct gm_dev {
/* Records the status of a page-size physical page */ struct gm_mapping { - /* - * The node index may have three definitions: - * 1. a common CPU node - * 2. a hetero-node, e.g. GPU (that not necessarily supports CC ld/st) - * 3. a network ip (another OS that may have multiple hNUMA nodes), dynamically attached by dsm_attach - * Among these definitions, #1 and #2 in combination defines an h-NUMA topology - */ - unsigned int node_id; - unsigned int flag;
union { @@ -254,29 +243,27 @@ struct gm_mapping { struct mutex lock; };
-static inline bool gm_mapping_cpu(gm_mapping_t *gm_mapping) +static inline void gm_mapping_flags_set(gm_mapping_t *gm_mapping, int flags) { - return !!(gm_mapping->flag & GM_PAGE_CPU); + if (flags & GM_PAGE_TYPE_MASK) + gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; + + gm_mapping->flag |= flags; }
-static inline void set_gm_mapping_host(gm_mapping_t *gm_mapping, struct page *page) +static inline void gm_mapping_flags_clear(gm_mapping_t *gm_mapping, int flags) { - gm_mapping->node_id = HOST_NODE_ID; - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_CPU; - gm_mapping->page = page; + gm_mapping->flag &= ~flags; }
-static inline bool gm_mapping_device(gm_mapping_t *gm_mapping) +static inline bool gm_mapping_cpu(gm_mapping_t *gm_mapping) { - return !!(gm_mapping->flag & GM_PAGE_DEVICE); + return !!(gm_mapping->flag & GM_PAGE_CPU); }
-static inline void set_gm_mapping_device(gm_mapping_t *gm_mapping, gm_dev_t *dev) +static inline bool gm_mapping_device(gm_mapping_t *gm_mapping) { - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_DEVICE; - gm_mapping->dev = dev; + return !!(gm_mapping->flag & GM_PAGE_DEVICE); }
static inline bool gm_mapping_nomap(gm_mapping_t *gm_mapping) @@ -284,38 +271,11 @@ static inline bool gm_mapping_nomap(gm_mapping_t *gm_mapping) return !!(gm_mapping->flag & GM_PAGE_NOMAP); }
-static inline void set_gm_mapping_nomap(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_TYPE_MASK; - gm_mapping->flag |= GM_PAGE_NOMAP; - gm_mapping->page = NULL; -} - -static inline void set_gm_mapping_willneed(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag |= GM_PAGE_WILLNEED; -} - -static inline void clear_gm_mapping_willneed(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_WILLNEED; -} - static inline bool gm_mapping_willneed(gm_mapping_t *gm_mapping) { return !!(gm_mapping->flag & GM_PAGE_WILLNEED); }
-static inline void set_gm_mapping_pinned(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag |= GM_PAGE_PINNED; -} - -static inline void clear_gm_mapping_pinned(gm_mapping_t *gm_mapping) -{ - gm_mapping->flag &= ~GM_PAGE_PINNED; -} - static inline bool gm_mapping_pinned(gm_mapping_t *gm_mapping) { return !!(gm_mapping->flag & GM_PAGE_PINNED); diff --git a/mm/gmem.c b/mm/gmem.c index a8c52cc87fb8..b6a36bec8668 100644 --- a/mm/gmem.c +++ b/mm/gmem.c @@ -254,7 +254,7 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh page = gm_mapping->page; if (!page) { pr_err("gmem: host gm_mapping page is NULL. Set nomap\n"); - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); goto unlock; } get_page(page); @@ -274,13 +274,13 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh * gmem page is migrating due to overcommit. * update page to willneed and this will stop page evicting */ - set_gm_mapping_willneed(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_WILLNEED); gmem_state_counter(NR_PAGE_MIGRATING, 1); ret = GM_RET_SUCCESS; } else { pr_err("gmem: peer map failed\n"); if (page) { - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); put_page(page); } } @@ -292,7 +292,8 @@ gm_ret_t gm_dev_fault(struct mm_struct *mm, gm_va_t addr, gm_dev_t *dev, int beh put_page(page); }
- set_gm_mapping_device(gm_mapping, dev); + gm_mapping_flags_set(gm_mapping, GM_PAGE_DEVICE); + gm_mapping->dev = dev; unlock: mutex_unlock(&gm_mapping->lock); mmap_unlock: @@ -368,7 +369,6 @@ gm_ret_t gm_dev_register_physmem(gm_dev_t *dev, gm_pa_t begin, gm_pa_t end) goto deinit_hnode;
for (i = 0; i < page_num; i++, addr += PAGE_SIZE) { - mapping[i].node_id = hnode->id; mapping[i].pfn = addr >> PAGE_SHIFT; mapping[i].flag = 0; } @@ -698,7 +698,7 @@ static int hmadvise_do_eagerfree(unsigned long addr, size_t size) continue; } } - set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_unlock(&gm_mapping->lock); } while (start += page_size, start != end);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7f90f8fb6b0c..aac116da2552 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -742,7 +742,8 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf, count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); #ifdef CONFIG_GMEM if (vma_is_peer_shared(vma)) { - set_gm_mapping_host(gm_mapping, page); + gm_mapping_flags_set(gm_mapping, GM_PAGE_CPU); + gm_mapping->page = page; mutex_unlock(&gm_mapping->lock); } #endif diff --git a/mm/vm_object.c b/mm/vm_object.c index ac1a115e4ee1..6030a0d01595 100644 --- a/mm/vm_object.c +++ b/mm/vm_object.c @@ -51,7 +51,7 @@ gm_mapping_t *alloc_gm_mapping(void) if (!gm_mapping) return NULL;
- set_gm_mapping_nomap(gm_mapping); + gm_mapping_flags_set(gm_mapping, GM_PAGE_NOMAP); mutex_init(&gm_mapping->lock);
return gm_mapping;