From: Peng Wu wupeng58@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
-------------------------------------------
Adding a function for getting node id, which can be used to alloc share pool memory on a specified memory node.
Signed-off-by: Peng Wu wupeng58@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: chenweilong chenweilong@huawei.com Reviewed-by: Tang Yizhou tangyizhou@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/linux/share_pool.h | 15 +++++++++--- mm/share_pool.c | 47 +++++++++++++++++++++++++++++++++----- 2 files changed, 53 insertions(+), 9 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h index 164efeb81889d..b0b2750e7bbe1 100644 --- a/include/linux/share_pool.h +++ b/include/linux/share_pool.h @@ -10,6 +10,10 @@ #define SP_HUGEPAGE (1 << 0) #define SP_HUGEPAGE_ONLY (1 << 1) #define SP_DVPP (1 << 2) +#define DEVICE_ID_MASK 0x3ff +#define DEVICE_ID_SHIFT 32 +#define SP_FLAG_MASK (SP_HUGEPAGE | SP_HUGEPAGE_ONLY | SP_DVPP | \ + (_AC(DEVICE_ID_MASK, UL) << DEVICE_ID_SHIFT))
#define SPG_ID_NONE -1 /* not associated with sp_group, only for specified thread */ #define SPG_ID_DEFAULT 0 /* use the spg id of current thread */ @@ -22,7 +26,7 @@ #define SPG_ID_DVPP_PASS_THROUGH_MAX 899999 #define SPG_ID_DVPP_PASS_THROUGH 900000
-#define MAX_DEVID 1 /* the max num of Da-vinci devices */ +#define MAX_DEVID 2 /* the max num of Da-vinci devices */
/* to align the pointer to the (next) PMD boundary */ #define PMD_ALIGN(addr) ALIGN(addr, PMD_SIZE) @@ -54,9 +58,9 @@ extern bool vmap_allow_huge; * |-------------------- 8T -------------------|---|------ 8T ------------| * | Device 0 | Device 1 |...| | * |----------------------------------------------------------------------| - * |- 16G -|- 16G -|- 16G -|- 16G -| | | | | + * |------------- 16G -------------| 16G | | | * | DVPP GROUP0 | DVPP GROUP1 | ... | ... |...| sp normal memory | - * | svm | sp | svm | sp | | | | | + * | sp | sp | | | | | * |----------------------------------------------------------------------| * * The host SVM feature reserves 8T virtual memory by mmap, and due to the @@ -181,6 +185,7 @@ extern void sp_proc_stat_drop(struct sp_proc_stat *stat); extern void spa_overview_show(struct seq_file *seq); extern void spg_overview_show(struct seq_file *seq); extern void proc_sharepool_init(void); +extern int sp_node_id(struct vm_area_struct *vma);
static inline struct task_struct *sp_get_task(struct mm_struct *mm) { @@ -485,6 +490,10 @@ static inline void sp_free_pages(struct page *page, struct vm_struct *area) { }
+static inline int sp_node_id(struct vm_area_struct *vma) +{ + return numa_node_id(); +} #endif
#endif /* LINUX_SHARE_POOL_H */ diff --git a/mm/share_pool.c b/mm/share_pool.c index b44af9a7c233e..90930e4a8dfe4 100644 --- a/mm/share_pool.c +++ b/mm/share_pool.c @@ -241,6 +241,7 @@ struct sp_area { struct mm_struct *mm; /* owner of k2u(task) */ unsigned long kva; /* shared kva */ pid_t applier; /* the original applier process */ + int node_id; /* memory node */ }; static DEFINE_SPINLOCK(sp_area_lock); static struct rb_root sp_area_root = RB_ROOT; @@ -863,11 +864,13 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, unsigned long vend = MMAP_SHARE_POOL_16G_START; unsigned long addr; unsigned long size_align = PMD_ALIGN(size); /* va aligned to 2M */ + int node_id = (flags >> DEVICE_ID_SHIFT) & DEVICE_ID_MASK;
if ((flags & SP_DVPP)) { if (sp_area_customized == false) { - vstart = MMAP_SHARE_POOL_16G_START; - vend = MMAP_SHARE_POOL_16G_START + MMAP_SHARE_POOL_16G_SIZE; + vstart = MMAP_SHARE_POOL_16G_START + + node_id * MMAP_SHARE_POOL_16G_SIZE; + vend = vstart + MMAP_SHARE_POOL_16G_SIZE; } else { if (!spg) { pr_err_ratelimited("share pool: don't allow k2u(task) in host svm multiprocess scene\n"); @@ -878,7 +881,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, } }
- spa = kmalloc(sizeof(struct sp_area), GFP_KERNEL); + spa = __kmalloc_node(sizeof(struct sp_area), GFP_KERNEL, node_id); if (unlikely(!spa)) { pr_err_ratelimited("share pool: alloc spa failed due to lack of memory\n"); return ERR_PTR(-ENOMEM); @@ -973,6 +976,7 @@ static struct sp_area *sp_alloc_area(unsigned long size, unsigned long flags, spa->mm = NULL; spa->kva = 0; /* NULL pointer */ spa->applier = applier; + spa->node_id = node_id;
if (spa_inc_usage(type, size, (flags & SP_DVPP))) { err = ERR_PTR(-EINVAL); @@ -1379,7 +1383,7 @@ void *sp_alloc(unsigned long size, unsigned long sp_flags, int spg_id) return ERR_PTR(-EINVAL); }
- if (sp_flags & ~(SP_HUGEPAGE_ONLY | SP_HUGEPAGE | SP_DVPP)) { + if (sp_flags & (~SP_FLAG_MASK)) { pr_err_ratelimited("share pool: allocation failed, invalid flag %lx\n", sp_flags); return ERR_PTR(-EINVAL); } @@ -2606,7 +2610,8 @@ EXPORT_SYMBOL_GPL(sp_config_dvpp_range); static bool is_sp_normal_addr(unsigned long addr) { return addr >= MMAP_SHARE_POOL_START && - addr < MMAP_SHARE_POOL_16G_START + MMAP_SHARE_POOL_16G_SIZE; + addr < MMAP_SHARE_POOL_16G_START + + MAX_DEVID * MMAP_SHARE_POOL_16G_SIZE; }
/** @@ -2634,6 +2639,26 @@ bool is_sharepool_addr(unsigned long addr) } EXPORT_SYMBOL_GPL(is_sharepool_addr);
+int sp_node_id(struct vm_area_struct *vma) +{ + struct sp_area *spa; + int node_id = numa_node_id(); + + if (!enable_ascend_share_pool) + return node_id; + + if (vma) { + spa = __find_sp_area(vma->vm_start); + if (spa) { + node_id = spa->node_id; + __sp_area_drop(spa); + } + } + + return node_id; +} +EXPORT_SYMBOL_GPL(sp_node_id); + static int __init mdc_default_group(char *s) { enable_mdc_default_group = 1; @@ -2999,6 +3024,16 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, unsigned long haddr = address & huge_page_mask(h); bool new_page = false; int err; + int node_id; + struct sp_area *spa; + + spa = __find_sp_area(vma->vm_start); + if (!spa) { + pr_err("share pool: vma is invalid, not from sp mmap\n"); + return ret; + } + node_id = spa->node_id; + __sp_area_drop(spa);
retry: page = find_lock_page(mapping, idx); @@ -3010,7 +3045,7 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm, page = alloc_huge_page(vma, haddr, 0); if (IS_ERR(page)) { page = alloc_huge_page_node(hstate_file(vma->vm_file), - numa_mem_id()); + node_id); if (!page) page = ERR_PTR(-ENOMEM); }
From: Peng Wu wupeng58@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
---------------------------------------------
In some scenarios, users need to specify memory nodes to apply for shared memory.
Signed-off-by: Peng Wu wupeng58@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: chenweilong chenweilong@huawei.com Reviewed-by: Tang Yizhou tangyizhou@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/shmem.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c index bd20f3c601bd6..f08d5ce17a092 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -84,6 +84,7 @@ static struct vfsmount *shm_mnt; #include <asm/pgtable.h>
#include "internal.h" +#include <linux/share_pool.h>
#define BLOCKS_PER_PAGE (PAGE_SIZE/512) #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) @@ -1542,8 +1543,13 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, return page; }
+static int shmem_node_id(struct vm_area_struct *vma) +{ + return sp_node_id(vma); +} + static struct page *shmem_alloc_hugepage(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, int node_id) { struct vm_area_struct pvma; struct inode *inode = &info->vfs_inode; @@ -1566,7 +1572,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_init(&pvma, info, hindex); page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + HPAGE_PMD_ORDER, &pvma, 0, node_id, true); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); @@ -1574,13 +1580,14 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, }
static struct page *shmem_alloc_page(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, + int node_id) { struct vm_area_struct pvma; struct page *page;
shmem_pseudo_vma_init(&pvma, info, index); - page = alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma_node(gfp, &pvma, 0, node_id); shmem_pseudo_vma_destroy(&pvma);
return page; @@ -1588,7 +1595,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, struct inode *inode, - pgoff_t index, bool huge) + pgoff_t index, bool huge, int node_id) { struct shmem_inode_info *info = SHMEM_I(inode); struct page *page; @@ -1603,9 +1610,9 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, goto failed;
if (huge) - page = shmem_alloc_hugepage(gfp, info, index); + page = shmem_alloc_hugepage(gfp, info, index, node_id); else - page = shmem_alloc_page(gfp, info, index); + page = shmem_alloc_page(gfp, info, index, node_id); if (page) { __SetPageLocked(page); __SetPageSwapBacked(page); @@ -1654,7 +1661,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, * limit chance of success by further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; - newpage = shmem_alloc_page(gfp, info, index); + newpage = shmem_alloc_page(gfp, info, index, numa_node_id()); if (!newpage) return -ENOMEM;
@@ -1730,6 +1737,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, int error; int once = 0; int alloced = 0; + int node_id;
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; @@ -1881,11 +1889,15 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, goto alloc_nohuge; }
+ node_id = shmem_node_id(vma); + alloc_huge: - page = shmem_alloc_and_acct_page(gfp, inode, index, true); + page = shmem_alloc_and_acct_page(gfp, inode, index, true, + node_id); if (IS_ERR(page)) { -alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, - index, false); +alloc_nohuge: + page = shmem_alloc_and_acct_page(gfp, inode, + index, false, node_id); } if (IS_ERR(page)) { int retry = 5; @@ -2377,7 +2389,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, }
if (!*pagep) { - page = shmem_alloc_page(gfp, info, pgoff); + page = shmem_alloc_page(gfp, info, pgoff, numa_node_id()); if (!page) goto out_unacct_blocks;