From: Jingxian He hejingxian@huawei.com
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGYKI
--------------------------------
Enable swiotlb alloc for cvm share mem: 1. Cvm guest mapped memory is secure memory. 2. Qemu/kvm cannot access the secure memory. 3. Use swiotlb buffer as memory shared by cvm guest and qemu/kvm.
Signed-off-by: Jingxian He hejingxian@huawei.com --- arch/arm64/Kconfig | 8 ++ arch/arm64/include/asm/set_memory.h | 1 + arch/arm64/include/asm/virtcca_cvm_guest.h | 39 +++++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/virtcca_cvm_guest.c | 93 ++++++++++++++++++++++ arch/arm64/mm/init.c | 3 + arch/arm64/mm/mmu.c | 5 +- arch/arm64/mm/pageattr.c | 3 + kernel/dma/swiotlb.c | 16 ++++ 9 files changed, 167 insertions(+), 2 deletions(-) create mode 100644 arch/arm64/include/asm/virtcca_cvm_guest.h create mode 100644 arch/arm64/kernel/virtcca_cvm_guest.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a162b83bd..c0fc7b4ca 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2576,6 +2576,14 @@ config COMPAT def_bool y depends on AARCH32_EL0 || ARM64_ILP32
+config HISI_VIRTCCA_GUEST + bool "Enable cvm guest run" + depends on DMA_RESTRICTED_POOL + help + Support VIRTCCA CVM guest based on S-EL2 + + If unsure, say N. + menu "Power management options"
source "kernel/power/Kconfig" diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b781..2031b31c0 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -4,6 +4,7 @@ #define _ASM_ARM64_SET_MEMORY_H
#include <asm-generic/set_memory.h> +#include <asm/virtcca_cvm_guest.h>
bool can_set_direct_map(void); #define can_set_direct_map can_set_direct_map diff --git a/arch/arm64/include/asm/virtcca_cvm_guest.h b/arch/arm64/include/asm/virtcca_cvm_guest.h new file mode 100644 index 000000000..2a68626a4 --- /dev/null +++ b/arch/arm64/include/asm/virtcca_cvm_guest.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#ifndef __VIRTCCA_CVM_GUEST_H +#define __VIRTCCA_CVM_GUEST_H + +#ifdef CONFIG_HISI_VIRTCCA_GUEST +struct device; + +extern int set_cvm_memory_encrypted(unsigned long addr, int numpages); + +extern int set_cvm_memory_decrypted(unsigned long addr, int numpages); + +extern bool is_virtcca_cvm_world(void); + +extern void __init swiotlb_cvm_update_mem_attributes(void); + +#else + +static inline int set_cvm_memory_encrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline int set_cvm_memory_decrypted(unsigned long addr, int numpages) +{ + return 0; +} + +static inline bool is_virtcca_cvm_world(void) +{ + return false; +} + +static inline void __init swiotlb_cvm_update_mem_attributes(void) {} + +#endif /* CONFIG_HISI_VIRTCCA_GUEST */ +#endif /* __VIRTCCA_CVM_GUEST_H */ diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 21ef9c21a..cccf8332e 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o obj-$(CONFIG_ARM64_ILP32) += vdso-ilp32/ obj-$(CONFIG_UNWIND_PATCH_PAC_INTO_SCS) += patch-scs.o obj-$(CONFIG_IPI_AS_NMI) += ipi_nmi.o +obj-$(CONFIG_HISI_VIRTCCA_GUEST) += virtcca_cvm_guest.o CFLAGS_patch-scs.o += -mbranch-protection=none
# Force dependency (vdso*-wrap.S includes vdso.so through incbin) diff --git a/arch/arm64/kernel/virtcca_cvm_guest.c b/arch/arm64/kernel/virtcca_cvm_guest.c new file mode 100644 index 000000000..1ce458959 --- /dev/null +++ b/arch/arm64/kernel/virtcca_cvm_guest.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> + +#include <asm/cacheflush.h> +#include <asm/set_memory.h> +#include <asm/tlbflush.h> + +#define CVM_PTE_NS_BIT 5 +#define CVM_PTE_NS_MASK (1 << CVM_PTE_NS_BIT) + +static bool cvm_guest_enable __read_mostly; + +/* please use 'virtcca_cvm_guest=1' to enable cvm guest feature */ +static int __init setup_cvm_guest(char *str) +{ + int ret; + unsigned int val; + + if (!str) + return 0; + + ret = kstrtouint(str, 10, &val); + if (ret) { + pr_warn("Unable to parse cvm_guest.\n"); + } else { + if (val) + cvm_guest_enable = true; + } + return ret; +} +early_param("virtcca_cvm_guest", setup_cvm_guest); + +bool is_virtcca_cvm_world(void) +{ + return cvm_guest_enable; +} + +static int change_page_range_cvm(pte_t *ptep, unsigned long addr, void *data) +{ + bool encrypt = (bool)data; + pte_t pte = READ_ONCE(*ptep); + + if (encrypt) { + if (!(pte.pte & CVM_PTE_NS_MASK)) + return 0; + pte.pte = pte.pte & (~CVM_PTE_NS_MASK); + } else { + if (pte.pte & CVM_PTE_NS_MASK) + return 0; + /* Set NS BIT */ + pte.pte = pte.pte | CVM_PTE_NS_MASK; + } + set_pte(ptep, pte); + + return 0; +} + +static int __change_memory_common_cvm(unsigned long start, unsigned long size, bool encrypt) +{ + int ret; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range_cvm, (void *)encrypt); + flush_tlb_kernel_range(start, start + size); + return ret; +} + +static int __set_memory_encrypted(unsigned long addr, + int numpages, + bool encrypt) +{ + if (!is_cvm_world()) + return 0; + + WARN_ON(!__is_lm_address(addr)); + return __change_memory_common_cvm(addr, PAGE_SIZE * numpages, encrypt); +} + +int set_cvm_memory_encrypted(unsigned long addr, int numpages) +{ + return __set_memory_encrypted(addr, numpages, true); +} + +int set_cvm_memory_decrypted(unsigned long addr, int numpages) +{ + return __set_memory_encrypted(addr, numpages, false); +} diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 8c8d7653b..66a7fff9f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -45,6 +45,7 @@ #include <asm/tlb.h> #include <asm/alternative.h> #include <asm/xen/swiotlb-xen.h> +#include <asm/set_memory.h>
#include "internal.h"
@@ -610,6 +611,8 @@ void __init mem_init(void)
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
+ swiotlb_cvm_update_mem_attributes(); + /* this will put all unused low memory onto the freelists */ memblock_free_all();
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4142a75a4..31f04f19b 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,6 +40,7 @@ #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/kfence.h> +#include <asm/set_memory.h>
#define NO_BLOCK_MAPPINGS BIT(0) #define NO_CONT_MAPPINGS BIT(1) @@ -589,7 +590,7 @@ static void __init map_mem(pgd_t *pgdp)
early_kfence_pool = arm64_kfence_alloc_pool();
- if (can_set_direct_map()) + if (can_set_direct_map() || is_virtcca_cvm_world()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* @@ -1343,7 +1344,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true));
- if (can_set_direct_map()) + if (can_set_direct_map() || is_virtcca_cvm_world()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 0e270a1c5..06e81d1db 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -195,6 +195,9 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { + if (is_virtcca_cvm_world()) + return; + if (!can_set_direct_map()) return;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index a7d5fb473..ef444544e 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1734,4 +1734,20 @@ static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) }
RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); + +#ifdef CONFIG_HISI_VIRTCCA_GUEST +void __init swiotlb_cvm_update_mem_attributes(void) +{ + void *vaddr; + unsigned long bytes; + + if (!is_cvm_world() || !is_swiotlb_allocated()) + return; + vaddr = phys_to_virt(io_tlb_default_mem.defpool.start); + bytes = PAGE_ALIGN(io_tlb_default_mem.defpool.nslabs << IO_TLB_SHIFT); + set_cvm_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); + memset(vaddr, 0, bytes); + io_tlb_default_mem.for_alloc = true; +} +#endif #endif /* CONFIG_DMA_RESTRICTED_POOL */