From: Peng Wu wupeng58@huawei.com
ascend inclusion category: feature bugzilla: NA CVE: NA
---------------------------------------------
In some scenarios, users need to specify memory nodes to apply for shared memory.
Signed-off-by: Peng Wu wupeng58@huawei.com Reviewed-by: Ding Tianhong dingtianhong@huawei.com Reviewed-by: chenweilong chenweilong@huawei.com Reviewed-by: Tang Yizhou tangyizhou@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/shmem.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c index bd20f3c601bd6..f08d5ce17a092 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -84,6 +84,7 @@ static struct vfsmount *shm_mnt; #include <asm/pgtable.h>
#include "internal.h" +#include <linux/share_pool.h>
#define BLOCKS_PER_PAGE (PAGE_SIZE/512) #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) @@ -1542,8 +1543,13 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, return page; }
+static int shmem_node_id(struct vm_area_struct *vma) +{ + return sp_node_id(vma); +} + static struct page *shmem_alloc_hugepage(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, int node_id) { struct vm_area_struct pvma; struct inode *inode = &info->vfs_inode; @@ -1566,7 +1572,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_init(&pvma, info, hindex); page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, - HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + HPAGE_PMD_ORDER, &pvma, 0, node_id, true); shmem_pseudo_vma_destroy(&pvma); if (page) prep_transhuge_page(page); @@ -1574,13 +1580,14 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, }
static struct page *shmem_alloc_page(gfp_t gfp, - struct shmem_inode_info *info, pgoff_t index) + struct shmem_inode_info *info, pgoff_t index, + int node_id) { struct vm_area_struct pvma; struct page *page;
shmem_pseudo_vma_init(&pvma, info, index); - page = alloc_page_vma(gfp, &pvma, 0); + page = alloc_page_vma_node(gfp, &pvma, 0, node_id); shmem_pseudo_vma_destroy(&pvma);
return page; @@ -1588,7 +1595,7 @@ static struct page *shmem_alloc_page(gfp_t gfp,
static struct page *shmem_alloc_and_acct_page(gfp_t gfp, struct inode *inode, - pgoff_t index, bool huge) + pgoff_t index, bool huge, int node_id) { struct shmem_inode_info *info = SHMEM_I(inode); struct page *page; @@ -1603,9 +1610,9 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, goto failed;
if (huge) - page = shmem_alloc_hugepage(gfp, info, index); + page = shmem_alloc_hugepage(gfp, info, index, node_id); else - page = shmem_alloc_page(gfp, info, index); + page = shmem_alloc_page(gfp, info, index, node_id); if (page) { __SetPageLocked(page); __SetPageSwapBacked(page); @@ -1654,7 +1661,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, * limit chance of success by further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; - newpage = shmem_alloc_page(gfp, info, index); + newpage = shmem_alloc_page(gfp, info, index, numa_node_id()); if (!newpage) return -ENOMEM;
@@ -1730,6 +1737,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, int error; int once = 0; int alloced = 0; + int node_id;
if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; @@ -1881,11 +1889,15 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, goto alloc_nohuge; }
+ node_id = shmem_node_id(vma); + alloc_huge: - page = shmem_alloc_and_acct_page(gfp, inode, index, true); + page = shmem_alloc_and_acct_page(gfp, inode, index, true, + node_id); if (IS_ERR(page)) { -alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, inode, - index, false); +alloc_nohuge: + page = shmem_alloc_and_acct_page(gfp, inode, + index, false, node_id); } if (IS_ERR(page)) { int retry = 5; @@ -2377,7 +2389,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, }
if (!*pagep) { - page = shmem_alloc_page(gfp, info, pgoff); + page = shmem_alloc_page(gfp, info, pgoff, numa_node_id()); if (!page) goto out_unacct_blocks;