ascend inclusion category: Feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8LNGH
---------------------------------------------
The do_mmap/mmap_region/__mm_populate/do_mbind could only be used to handle the current process, now the share pool need to handle the other process and create memory mmaping, so need to export new function to distinguish different process and handle it, it would not break the current logic and only valid for share pool.
Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- include/linux/mempolicy.h | 11 +++++++++++ include/linux/mm.h | 11 +++++++++++ mm/gup.c | 23 ++++++++++++++--------- mm/mempolicy.c | 12 +++++++++--- mm/mmap.c | 34 +++++++++++++++++++++++++++------- 5 files changed, 72 insertions(+), 19 deletions(-)
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 6c2754d7bfed..ca62e945e4f7 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -184,6 +184,9 @@ static inline bool mpol_is_preferred_many(struct mempolicy *pol)
extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
+extern long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm); #else
struct mempolicy {}; @@ -294,5 +297,13 @@ static inline bool mpol_is_preferred_many(struct mempolicy *pol) return false; }
+static inline long __do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) +{ + return 0; +} + + #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index 8cf86b56aba5..4fe823ead243 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3288,6 +3288,10 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf); +extern unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, + unsigned long *populate, struct list_head *uf); extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); @@ -3301,6 +3305,8 @@ extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct list_head *uf, bool unlock); extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); +extern int do_mm_populate(struct mm_struct *mm, unsigned long start, unsigned long len, + int ignore_errors); static inline void mm_populate(unsigned long addr, unsigned long len) { /* Ignore errors */ @@ -3308,6 +3314,11 @@ static inline void mm_populate(unsigned long addr, unsigned long len) } #else static inline void mm_populate(unsigned long addr, unsigned long len) {} +static inline int do_mm_populate(struct mm_struct *mm, unsigned long start, unsigned long len, + int ignore_errors) +{ + return -EPERM; +} #endif
/* These take the mm semaphore themselves */ diff --git a/mm/gup.c b/mm/gup.c index 2f8a2d89fde1..813add0d3a74 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1727,16 +1727,9 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, return ret; }
-/* - * __mm_populate - populate and/or mlock pages within a range of address space. - * - * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap - * flags. VMAs must be already marked with the desired vm_flags, and - * mmap_lock must not be held. - */ -int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) +int do_mm_populate(struct mm_struct *mm, unsigned long start, unsigned long len, + int ignore_errors) { - struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; @@ -1787,6 +1780,18 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) mmap_read_unlock(mm); return ret; /* 0 or negative error code */ } + +/* + * __mm_populate - populate and/or mlock pages within a range of address space. + * + * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap + * flags. VMAs must be already marked with the desired vm_flags, and + * mmap_lock must not be held. + */ +int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) +{ + return do_mm_populate(current->mm, start, len, ignore_errors); +} #else /* CONFIG_MMU */ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 4b8447f8175b..b23a239de750 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1262,11 +1262,10 @@ static struct folio *new_folio(struct folio *src, unsigned long start) } #endif
-static long do_mbind(unsigned long start, unsigned long len, +long __do_mbind(unsigned long start, unsigned long len, unsigned short mode, unsigned short mode_flags, - nodemask_t *nmask, unsigned long flags) + nodemask_t *nmask, unsigned long flags, struct mm_struct *mm) { - struct mm_struct *mm = current->mm; struct vm_area_struct *vma, *prev; struct vma_iterator vmi; struct mempolicy *new; @@ -1377,6 +1376,13 @@ static long do_mbind(unsigned long start, unsigned long len, return err; }
+static long do_mbind(unsigned long start, unsigned long len, + unsigned short mode, unsigned short mode_flags, + nodemask_t *nmask, unsigned long flags) +{ + return __do_mbind(start, len, mode, mode_flags, nmask, flags, current->mm); +} + /* * User space interface with variable sized bitmaps for nodelists. */ diff --git a/mm/mmap.c b/mm/mmap.c index 9e018d8dd7d6..df2624e48119 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1197,16 +1197,19 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, return true; }
+static unsigned long __mmap_region(struct mm_struct *mm, + struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, + unsigned long pgoff, struct list_head *uf); /* * The caller must write-lock current->mm->mmap_lock. */ -unsigned long do_mmap(struct file *file, unsigned long addr, +unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, struct list_head *uf) { - struct mm_struct *mm = current->mm; int pkey = 0;
*populate = 0; @@ -1371,7 +1374,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= VM_NORESERVE; }
- addr = mmap_region(file, addr, len, vm_flags, pgoff, uf); + addr = __mmap_region(mm, file, addr, len, vm_flags, pgoff, uf); if (!IS_ERR_VALUE(addr) && ((vm_flags & VM_LOCKED) || (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) @@ -1379,6 +1382,16 @@ unsigned long do_mmap(struct file *file, unsigned long addr, return addr; }
+unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flags, vm_flags_t vm_flags, + unsigned long pgoff, unsigned long *populate, + struct list_head *uf) +{ + return __do_mmap_mm(current->mm, file, addr, len, prot, flags, + vm_flags, pgoff, populate, uf); +} + unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) @@ -2659,11 +2672,11 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); }
-unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf) +static unsigned long __mmap_region(struct mm_struct *mm, + struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, + unsigned long pgoff, struct list_head *uf) { - struct mm_struct *mm = current->mm; struct vm_area_struct *vma = NULL; struct vm_area_struct *next, *prev, *merge; pgoff_t pglen = len >> PAGE_SHIFT; @@ -2915,6 +2928,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return error; }
+unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf) +{ + return __mmap_region(current->mm, file, addr, len, vm_flags, pgoff, uf); +} + static int __vm_munmap(unsigned long start, size_t len, bool unlock) { int ret;