
From: Nikita Panov <panov.nikita@huawei.com> Acked-by: Artem Kuzin <artem.kuzin@huawei.com> Acked-by: Alexander Grubnikov <alexander.grubnikov@huawei.com> Acked-by: Ilya Hanov <ilya.hanov@huawei-partners.com> Acked-by: Denis Darvish <darvish.denis@huawei.com> Signed-off-by: Nikita Panov <panov.nikita@huawei.com> --- include/linux/mm.h | 4 ++++ mm/memory.c | 30 ++++++++++++++++++++++++++---- 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index adef4b5a0307..9da5e64c23d9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3787,6 +3787,10 @@ extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, extern int apply_to_existing_page_range(struct mm_struct *mm, unsigned long address, unsigned long size, pte_fn_t fn, void *data); +#ifdef CONFIG_KERNEL_REPLICATION +int apply_to_page_range_replicas(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data); +#endif /* CONFIG_KERNEL_REPLICATION && CONFIG_ARM64 */ #ifdef CONFIG_PAGE_POISONING extern void __kernel_poison_pages(struct page *page, int numpages); diff --git a/mm/memory.c b/mm/memory.c index c846dde12de9..0c4f3b1956d4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3014,7 +3014,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, return err; } -static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, +static int __apply_to_page_range(struct mm_struct *mm, pgd_t *pgtable, unsigned long addr, unsigned long size, pte_fn_t fn, void *data, bool create) { @@ -3027,7 +3027,7 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, if (WARN_ON(addr >= end)) return -EINVAL; - pgd = pgd_offset(mm, addr); + pgd = pgd_offset_pgd(pgtable, addr); do { next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) @@ -3058,10 +3058,32 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, int apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { - return __apply_to_page_range(mm, addr, size, fn, data, true); + return __apply_to_page_range(mm, mm->pgd, addr, size, fn, data, true); } EXPORT_SYMBOL_GPL(apply_to_page_range); +#ifdef CONFIG_KERNEL_REPLICATION +/* + * Same as apply_to_page_range(), but taking into account per-NUMA node + * replicas. + */ +int apply_to_page_range_replicas(struct mm_struct *mm, unsigned long addr, + unsigned long size, pte_fn_t fn, void *data) +{ + int nid; + int ret = 0; + + for_each_memory_node(nid) { + ret = __apply_to_page_range(mm, per_node_pgd(mm, nid), + addr, size, fn, data, true); + if (ret) + break; + } + + return ret; +} +#endif /* CONFIG_KERNEL_REPLICATION && CONFIG_ARM64 */ + /* * Scan a region of virtual memory, calling a provided function on * each leaf page table where it exists. @@ -3072,7 +3094,7 @@ EXPORT_SYMBOL_GPL(apply_to_page_range); int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) { - return __apply_to_page_range(mm, addr, size, fn, data, false); + return __apply_to_page_range(mm, mm->pgd, addr, size, fn, data, false); } EXPORT_SYMBOL_GPL(apply_to_existing_page_range); -- 2.34.1