From: Kemeng Shi shikemeng@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7RO5Q CVE: NA Reference: https://gitee.com/openeuler/kernel/commit/50d5bf1b6da9f74bf93f9dec601c09d45d...
-------------------------------------------------
Add proc/sys/vm/hugepage_nocache_copy switch. Set 1 to copy hugepage with movnt SSE instructoin if cpu support it. Set 0 to copy hugepage as usual.
Signed-off-by: Kemeng Shi shikemeng@huawei.com Reviewed-by: louhongxiang louhongxiang@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/x86/include/asm/page_64.h | 7 +++ arch/x86/lib/Makefile | 1 + arch/x86/lib/copy_highpages.c | 107 +++++++++++++++++++++++++++++++++ arch/x86/lib/copy_page_64.S | 73 ++++++++++++++++++++++ include/linux/highmem.h | 17 ++++++ mm/util.c | 11 +--- 6 files changed, 207 insertions(+), 9 deletions(-) create mode 100644 arch/x86/lib/copy_highpages.c
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index cc6b8e087192..f869dec42f34 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -60,6 +60,13 @@ static inline void clear_page(void *page)
void copy_page(void *to, void *from);
+void copy_page_nocache(void *to, void *from); +void copy_page_nocache_barrir(void); + +struct folio; +#define __HAVE_ARCH_COPY_HUGEPAGES 1 +void copy_highpages(struct folio *dst, struct folio *src); + #ifdef CONFIG_X86_5LEVEL /* * User space process size. This is the first address outside the user range. diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 01932af64193..f3a8fa45c010 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -73,4 +73,5 @@ endif lib-y += memmove_64.o memset_64.o lib-y += copy_user_64.o copy_user_uncached_64.o lib-y += cmpxchg16b_emu.o + lib-y += copy_highpages.o endif diff --git a/arch/x86/lib/copy_highpages.c b/arch/x86/lib/copy_highpages.c new file mode 100644 index 000000000000..e587af73835f --- /dev/null +++ b/arch/x86/lib/copy_highpages.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * accelerate copying page to pmem with non-temproal stroes + */ +#include <linux/sched.h> +#include <linux/mmzone.h> +#include <linux/highmem.h> +#include <linux/sysctl.h> + +DEFINE_STATIC_KEY_FALSE(hugepage_nocache_copy); +#ifdef CONFIG_SYSCTL +static void set_hugepage_nocache_copy(bool enabled) +{ + if (enabled) + static_branch_enable(&hugepage_nocache_copy); + else + static_branch_disable(&hugepage_nocache_copy); +} + +int sysctl_hugepage_nocache_copy(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table t; + int err; + int state; + + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + state = static_branch_unlikely(&hugepage_nocache_copy); + t = *table; + t.data = &state; + err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); + if (err < 0) + return err; + if (write) + set_hugepage_nocache_copy(state); + return err; +} + +static struct ctl_table copy_highpages_table[] = { + { + .procname = "hugepage_nocache_copy", + .data = NULL, + .maxlen = sizeof(unsigned int), + .mode = 0600, + .proc_handler = sysctl_hugepage_nocache_copy, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + {} +}; + +static struct ctl_table copy_highpages_root_table[] = { + { + .procname = "vm", + .mode = 0555, + .child = copy_highpages_table, + }, + {} +}; + +static __init int copy_highpages_init(void) +{ + return register_sysctl_table(copy_highpages_root_table) ? 0 : -ENOMEM; +} +__initcall(copy_highpages_init); +#endif + +static void copy_highpages_nocache(struct folio *dst, struct folio *src) +{ + char *vfrom, *vto; + int i; + int nr = folio_nr_pages(src); + + for (i = 0; i < nr; i++) { + cond_resched(); + vfrom = kmap_atomic(folio_page(src, i)); + vto = kmap_atomic(folio_page(dst, i)); + copy_page_nocache(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + } + copy_page_nocache_barrir(); +} + +static void copy_highpages_cache(struct folio *dst, struct folio *src) +{ + int i; + int nr = folio_nr_pages(src); + + for (;;) { + copy_highpage(folio_page(dst, i), folio_page(src, i)); + if (++i == nr) + break; + cond_resched(); + } +} + +void copy_highpages(struct folio *dst, struct folio *src) +{ + if (static_branch_unlikely(&hugepage_nocache_copy) && + get_node_type(page_to_nid(folio_page(dst, 0))) == NODE_TYPE_PMEM) + return copy_highpages_nocache(dst, src); + + return copy_highpages_cache(dst, src); +} diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S index 30ea644bf446..c31a9a79b18e 100644 --- a/arch/x86/lib/copy_page_64.S +++ b/arch/x86/lib/copy_page_64.S @@ -87,3 +87,76 @@ SYM_FUNC_START_LOCAL(copy_page_regs) addq $2*8, %rsp RET SYM_FUNC_END(copy_page_regs) + +SYM_FUNC_START(copy_page_nocache) + ALTERNATIVE "jmp copy_page", "", X86_FEATURE_XMM2 + subq $2*8, %rsp + movq %rbx, (%rsp) + movq %r12, 1*8(%rsp) + + movl $(4096/64)-5, %ecx + .p2align 4 +.LoopNT64: + dec %rcx + movq 0x8*0(%rsi), %rax + movq 0x8*1(%rsi), %rbx + movq 0x8*2(%rsi), %rdx + movq 0x8*3(%rsi), %r8 + movq 0x8*4(%rsi), %r9 + movq 0x8*5(%rsi), %r10 + movq 0x8*6(%rsi), %r11 + movq 0x8*7(%rsi), %r12 + + prefetcht0 5*64(%rsi) + + movnti %rax, 0x8*0(%rdi) + movnti %rbx, 0x8*1(%rdi) + movnti %rdx, 0x8*2(%rdi) + movnti %r8, 0x8*3(%rdi) + movnti %r9, 0x8*4(%rdi) + movnti %r10, 0x8*5(%rdi) + movnti %r11, 0x8*6(%rdi) + movnti %r12, 0x8*7(%rdi) + + leaq 64 (%rsi), %rsi + leaq 64 (%rdi), %rdi + + jnz .LoopNT64 + + movl $5, %ecx + .p2align 4 +.LoopNT2: + decl %ecx + + movq 0x8*0(%rsi), %rax + movq 0x8*1(%rsi), %rbx + movq 0x8*2(%rsi), %rdx + movq 0x8*3(%rsi), %r8 + movq 0x8*4(%rsi), %r9 + movq 0x8*5(%rsi), %r10 + movq 0x8*6(%rsi), %r11 + movq 0x8*7(%rsi), %r12 + + movnti %rax, 0x8*0(%rdi) + movnti %rbx, 0x8*1(%rdi) + movnti %rdx, 0x8*2(%rdi) + movnti %r8, 0x8*3(%rdi) + movnti %r9, 0x8*4(%rdi) + movnti %r10, 0x8*5(%rdi) + movnti %r11, 0x8*6(%rdi) + movnti %r12, 0x8*7(%rdi) + + leaq 64(%rdi), %rdi + leaq 64(%rsi), %rsi + jnz .LoopNT2 + + movq (%rsp), %rbx + movq 1*8(%rsp), %r12 + addq $2*8, %rsp + ret +SYM_FUNC_END(copy_page_nocache) + +SYM_FUNC_START(copy_page_nocache_barrir) + ALTERNATIVE "", "sfence", X86_FEATURE_XMM2 + ret +SYM_FUNC_END(copy_page_nocache_barrir) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 4de1dbcd3ef6..a8c13f14b82f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -513,4 +513,21 @@ static inline void put_and_unmap_page(struct page *page, void *addr) put_page(page); }
+#ifndef __HAVE_ARCH_COPY_HUGEPAGES + +static inline void copy_highpages(struct folio *dst, struct folio *src) +{ + int i; + int nr = folio_nr_pages(src); + + for (;;) { + copy_highpage(folio_page(dst, i), folio_page(src, i)); + if (++i == nr) + break; + cond_resched(); + } +} + +#endif /* __HAVE_ARCH_COPY_HUGEPAGES */ + #endif /* _LINUX_HIGHMEM_H */ diff --git a/mm/util.c b/mm/util.c index dd12b9531ac4..6593ad7b84ee 100644 --- a/mm/util.c +++ b/mm/util.c @@ -23,6 +23,7 @@ #include <linux/processor.h> #include <linux/sizes.h> #include <linux/compat.h> +#include <linux/page-flags.h>
#include <linux/uaccess.h>
@@ -792,15 +793,7 @@ EXPORT_SYMBOL(folio_mapping); */ void folio_copy(struct folio *dst, struct folio *src) { - long i = 0; - long nr = folio_nr_pages(src); - - for (;;) { - copy_highpage(folio_page(dst, i), folio_page(src, i)); - if (++i == nr) - break; - cond_resched(); - } + copy_highpages(dst, src); }
int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;