From: Jingxian He hejingxian@huawei.com
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9CC0X
--------------------------------
Enable swiotlb alloc for cvm share mem: 1. Cvm guest mapped memory is secure memory. 2. Qemu/kvm cannot access the secure memory. 3. Use swiotlb buffer as memory shared by cvm guest and qemu/kvm.
Signed-off-by: Jingxian He hejingxian@huawei.com --- arch/arm64/Kconfig | 8 +++ arch/arm64/include/asm/Kbuild | 1 - arch/arm64/include/asm/cvm_guest.h | 46 ++++++++++++++ arch/arm64/include/asm/set_memory.h | 9 +++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/cvm_guest.c | 93 +++++++++++++++++++++++++++++ arch/arm64/mm/init.c | 3 + arch/arm64/mm/mmu.c | 6 +- arch/arm64/mm/pageattr.c | 3 + kernel/dma/swiotlb.c | 15 +++++ 10 files changed, 181 insertions(+), 4 deletions(-) create mode 100644 arch/arm64/include/asm/cvm_guest.h create mode 100644 arch/arm64/include/asm/set_memory.h create mode 100644 arch/arm64/kernel/cvm_guest.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a1eab2b7d..496d1d279 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2279,6 +2279,14 @@ config ARCH_ENABLE_THP_MIGRATION def_bool y depends on TRANSPARENT_HUGEPAGE
+config CVM_GUEST + bool "Enable cvm guest run" + depends on DMA_RESTRICTED_POOL + help + Support CVM guest based on S-EL2 + + If unsure, say N. + menu "Power management options"
source "kernel/power/Kconfig" diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index a3426b61f..b63d86a52 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -2,5 +2,4 @@ generic-y += early_ioremap.h generic-y += mcs_spinlock.h generic-y += qrwlock.h -generic-y += set_memory.h generic-y += user.h diff --git a/arch/arm64/include/asm/cvm_guest.h b/arch/arm64/include/asm/cvm_guest.h new file mode 100644 index 000000000..f16c34edb --- /dev/null +++ b/arch/arm64/include/asm/cvm_guest.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#ifndef __CVM_GUEST_H +#define __CVM_GUEST_H + +#ifdef CONFIG_CVM_GUEST +struct device; + +extern int set_cvm_memory_encrypted(unsigned long addr, int numpages); + +extern int set_cvm_memory_decrypted(unsigned long addr, int numpages); + +extern bool is_cvm_world(void); + +#define is_swiotlb_for_alloc is_swiotlb_for_alloc +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + /* Force dma alloc by swiotlb in Confidential VMs */ + return is_cvm_world(); +} + +extern void __init swiotlb_cvm_update_mem_attributes(void); + +#else + +static inline int set_cvm_memory_encrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline int set_cvm_memory_decrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline bool is_cvm_world(void) +{ + return false; +} + +static inline void __init swiotlb_cvm_update_mem_attributes(void) {} + +#endif /* CONFIG_CVM_GUEST */ +#endif /* __CVM_GUEST_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h new file mode 100644 index 000000000..38cecbf44 --- /dev/null +++ b/arch/arm64/include/asm/set_memory.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_ARM64_SET_MEMORY_H +#define _ASM_ARM64_SET_MEMORY_H + +#include <asm-generic/set_memory.h> +#include <asm/cvm_guest.h> + +#endif /* _ASM_ARM64_SET_MEMORY_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 312c164db..4c6eb5e78 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -73,6 +73,7 @@ obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-$(CONFIG_MPAM) += mpam/ +obj-$(CONFIG_CVM_GUEST) += cvm_guest.o
obj-y += vdso/ probes/ obj-$(CONFIG_COMPAT_VDSO) += vdso32/ diff --git a/arch/arm64/kernel/cvm_guest.c b/arch/arm64/kernel/cvm_guest.c new file mode 100644 index 000000000..c1f27992a --- /dev/null +++ b/arch/arm64/kernel/cvm_guest.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> + +#include <asm/cacheflush.h> +#include <asm/set_memory.h> +#include <asm/tlbflush.h> + +#define CVM_PTE_NS_BIT 5 +#define CVM_PTE_NS_MASK (1 << CVM_PTE_NS_BIT) + +static bool cvm_guest_enable __read_mostly; + +/* please use 'cvm_guest=1' to enable cvm guest feature */ +static int __init setup_cvm_guest(char *str) +{ + int ret; + unsigned int val; + + if (!str) + return 0; + + ret = kstrtouint(str, 10, &val); + if (ret) { + pr_warn("Unable to parse cvm_guest.\n"); + } else { + if (val) + cvm_guest_enable = true; + } + return ret; +} +early_param("cvm_guest", setup_cvm_guest); + +bool is_cvm_world(void) +{ + return cvm_guest_enable; +} + +static int change_page_range_cvm(pte_t *ptep, unsigned long addr, void *data) +{ + bool encrypt = (bool)data; + pte_t pte = READ_ONCE(*ptep); + + if (encrypt) { + if (!(pte.pte & CVM_PTE_NS_MASK)) + return 0; + pte.pte = pte.pte & (~CVM_PTE_NS_MASK); + } else { + if (pte.pte & CVM_PTE_NS_MASK) + return 0; + /* Set NS BIT */ + pte.pte = pte.pte | CVM_PTE_NS_MASK; + } + set_pte(ptep, pte); + + return 0; +} + +static int __change_memory_common_cvm(unsigned long start, unsigned long size, bool encrypt) +{ + int ret; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range_cvm, (void *)encrypt); + flush_tlb_kernel_range(start, start + size); + return ret; +} + +static int __set_memory_encrypted(unsigned long addr, + int numpages, + bool encrypt) +{ + if (!is_cvm_world()) + return 0; + + WARN_ON(!__is_lm_address(addr)); + return __change_memory_common_cvm(addr, PAGE_SIZE * numpages, encrypt); +} + +int set_cvm_memory_encrypted(unsigned long addr, int numpages) +{ + return __set_memory_encrypted(addr, numpages, true); +} + +int set_cvm_memory_decrypted(unsigned long addr, int numpages) +{ + return __set_memory_encrypted(addr, numpages, false); +} diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index be67a9c42..afcede934 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -47,6 +47,7 @@ #include <asm/tlb.h> #include <asm/alternative.h> #include <asm/cpu_park.h> +#include <asm/set_memory.h>
#include "internal.h"
@@ -674,6 +675,8 @@ void __init mem_init(void) else swiotlb_force = SWIOTLB_NO_FORCE;
+ swiotlb_cvm_update_mem_attributes(); + set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
#ifndef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 804d5197c..095c192c7 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -38,6 +38,7 @@ #include <asm/ptdump.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> +#include <asm/set_memory.h>
#define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -494,7 +495,7 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0, eflags = 0; u64 i;
- if (rodata_full || debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled() || is_cvm_world()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
#ifdef CONFIG_KFENCE @@ -1513,8 +1514,7 @@ int arch_add_memory(int nid, u64 start, u64 size, return -EINVAL; }
- - if (rodata_full || debug_pagealloc_enabled()) + if (rodata_full || debug_pagealloc_enabled() || is_cvm_world()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0bc12dbf2..e84a57c4d 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -188,6 +188,9 @@ int set_direct_map_default_noflush(struct page *page)
void __kernel_map_pages(struct page *page, int numpages, int enable) { + if (is_cvm_world()) + return; + if (!debug_pagealloc_enabled() && !rodata_full) return;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 901ce32b7..e321c023d 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -848,4 +848,19 @@ bool swiotlb_free(struct device *dev, struct page *page, size_t size) return true; }
+#ifdef CONFIG_CVM_GUEST +void __init swiotlb_cvm_update_mem_attributes(void) +{ + void *vaddr; + unsigned long bytes; + + if (!is_cvm_world() || !io_tlb_start) + return; + vaddr = phys_to_virt(io_tlb_start); + bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); + set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); + memset(vaddr, 0, bytes); +} +#endif + #endif /* CONFIG_DMA_RESTRICTED_POOL */