virtCCA feature
Xiangkai Yang (1): secure smmu init
arch/arm64/include/asm/kvm_tmi.h | 244 +++-- arch/arm64/include/asm/kvm_tmm.h | 22 + arch/arm64/kvm/tmi.c | 128 ++- arch/arm64/kvm/virtcca_cvm.c | 10 +- drivers/iommu/arm/arm-smmu-v3/Makefile | 1 + drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c | 841 ++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h | 198 +++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 168 +++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 16 + include/linux/iommu.h | 4 + 10 files changed, 1548 insertions(+), 84 deletions(-) create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IADD42
--------------------------------
virtCCA feature Signed-off-by: Xiangkai Yang yangxiangkai@huawei.com --- arch/arm64/include/asm/kvm_tmi.h | 244 +++-- arch/arm64/include/asm/kvm_tmm.h | 22 + arch/arm64/kvm/tmi.c | 128 ++- arch/arm64/kvm/virtcca_cvm.c | 10 +- drivers/iommu/arm/arm-smmu-v3/Makefile | 1 + drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c | 841 ++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h | 198 +++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 168 +++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 16 + include/linux/iommu.h | 4 + 10 files changed, 1548 insertions(+), 84 deletions(-) create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c create mode 100644 drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h
diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index fc4fe9a711..4100b46853 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -11,8 +11,6 @@ #include <linux/virtio_ring.h> #include <asm/sysreg.h>
-#define GRANULE_SIZE 4096 - #define NO_NUMA 0 /* numa bitmap */
#define TMM_TTT_LEVEL_2 2 @@ -68,26 +66,59 @@ struct tmi_tec_params { uint64_t ram_size; };
-#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U) -#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U) -#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U) -#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U) +struct tmi_smmu_ste_params { + uint64_t ns_src; /* non-secure STE source address */ + uint64_t sid; /* stream id */ + uint64_t smmu_id; /* smmu id */ +}; + +struct tmi_smmu_cfg_params { + uint64_t smmu_id; + uint64_t ioaddr; + uint8_t strtab_base_RA_bit : 1; + uint8_t q_base_RA_WA_bit : 1; + uint8_t is_cmd_queue : 1; +}; + +#define TMI_SMMU_CMD_QUEUE 1 +#define TMI_SMMU_EVT_QUEUE 2 +struct tmi_smmu_queue_params { + uint64_t ns_src; /* non-secure queue source address */ + uint64_t smmu_base_addr; /* smmu base address */ + uint64_t size; /* queue size */ + uint64_t smmu_id; /* smmu id */ + uint64_t type; /* cmdq or evtq */ +}; + +#define MAX_DEV_PER_PORT 256 +struct tmi_dev_delegate_params { + /* BDF of PCIe root bus, F=0. BD are used to calculate APB base and port number. */ + uint16_t root_bd; + uint16_t num_dev; /* number of attachable devices */ + uint32_t _reserved; /* padding for 64-bit alignment */ + uint16_t devs[MAX_DEV_PER_PORT]; /* BDF of each attachable device */ +}; + +#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U) +#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U) +#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U) +#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U)
-#define TMI_EXIT_SYNC 0 -#define TMI_EXIT_IRQ 1 -#define TMI_EXIT_FIQ 2 -#define TMI_EXIT_PSCI 3 -#define TMI_EXIT_HOST_CALL 5 -#define TMI_EXIT_SERROR 6 +#define TMI_EXIT_SYNC 0 +#define TMI_EXIT_IRQ 1 +#define TMI_EXIT_FIQ 2 +#define TMI_EXIT_PSCI 3 +#define TMI_EXIT_HOST_CALL 5 +#define TMI_EXIT_SERROR 6
/* * The number of GPRs (starting from X0) per voluntary exit context. * Per SMCCC. */ - #define TEC_EXIT_NR_GPRS (31U) + #define TEC_EXIT_NR_GPRS (31U)
/* Maximum number of Interrupt Controller List Registers. */ -#define TEC_GIC_NUM_LRS (16U) +#define TEC_GIC_NUM_LRS (16U)
struct tmi_tec_entry { uint64_t flags; @@ -125,45 +156,45 @@ struct tmi_tec_run { /****************************************************************************** * Bit definitions inside the function id as per the SMC calling convention ******************************************************************************/ -#define FUNCID_TYPE_SHIFT 31 -#define FUNCID_CC_SHIFT 30 -#define FUNCID_OEN_SHIFT 24 -#define FUNCID_NUM_SHIFT 0 - -#define FUNCID_TYPE_MASK 0x1 -#define FUNCID_CC_MASK 0x1 -#define FUNCID_OEN_MASK 0x3f -#define FUNCID_NUM_MASK 0xffff - -#define FUNCID_TYPE_WIDTH 1 -#define FUNCID_CC_WIDTH 1 -#define FUNCID_OEN_WIDTH 6 -#define FUNCID_NUM_WIDTH 16 - -#define SMC_64 1 -#define SMC_32 0 -#define SMC_TYPE_FAST 1 -#define SMC_TYPE_STD 0 +#define FUNCID_TYPE_SHIFT 31 +#define FUNCID_CC_SHIFT 30 +#define FUNCID_OEN_SHIFT 24 +#define FUNCID_NUM_SHIFT 0 + +#define FUNCID_TYPE_MASK 0x1 +#define FUNCID_CC_MASK 0x1 +#define FUNCID_OEN_MASK 0x3f +#define FUNCID_NUM_MASK 0xffff + +#define FUNCID_TYPE_WIDTH 1 +#define FUNCID_CC_WIDTH 1 +#define FUNCID_OEN_WIDTH 6 +#define FUNCID_NUM_WIDTH 16 + +#define SMC_64 1 +#define SMC_32 0 +#define SMC_TYPE_FAST 1 +#define SMC_TYPE_STD 0
/***************************************************************************** * Owning entity number definitions inside the function id as per the SMC * calling convention *****************************************************************************/ -#define OEN_ARM_START 0 -#define OEN_ARM_END 0 -#define OEN_CPU_START 1 -#define OEN_CPU_END 1 -#define OEN_SIP_START 2 -#define OEN_SIP_END 2 -#define OEN_OEM_START 3 -#define OEN_OEM_END 3 -#define OEN_STD_START 4 /* Standard Calls */ -#define OEN_STD_END 4 -#define OEN_TAP_START 48 /* Trusted Applications */ -#define OEN_TAP_END 49 -#define OEN_TOS_START 50 /* Trusted OS */ -#define OEN_TOS_END 63 -#define OEN_LIMIT 64 +#define OEN_ARM_START 0 +#define OEN_ARM_END 0 +#define OEN_CPU_START 1 +#define OEN_CPU_END 1 +#define OEN_SIP_START 2 +#define OEN_SIP_END 2 +#define OEN_OEM_START 3 +#define OEN_OEM_END 3 +#define OEN_STD_START 4 /* Standard Calls */ +#define OEN_STD_END 4 +#define OEN_TAP_START 48 /* Trusted Applications */ +#define OEN_TAP_END 49 +#define OEN_TOS_START 50 /* Trusted OS */ +#define OEN_TOS_END 63 +#define OEN_LIMIT 64
/* Get TMI fastcall std FID from function number */ #define TMI_FID(smc_cc, func_num) \ @@ -185,45 +216,75 @@ struct tmi_tec_run { * always invoked by the Normal world, forward by SPMD and handled by the * TMM. */ -#define TMI_FNUM_VERSION_REQ U(0x260) -#define TMI_FNUM_MEM_INFO_SHOW U(0x261) -#define TMI_FNUM_DATA_CREATE U(0x262) -#define TMI_FNUM_DATA_DESTROY U(0x263) -#define TMI_FNUM_CVM_ACTIVATE U(0x264) -#define TMI_FNUM_CVM_CREATE U(0x265) -#define TMI_FNUM_CVM_DESTROY U(0x266) -#define TMI_FNUM_TEC_CREATE U(0x267) -#define TMI_FNUM_TEC_DESTROY U(0x268) -#define TMI_FNUM_TEC_ENTER U(0x269) -#define TMI_FNUM_TTT_CREATE U(0x26A) -#define TMI_FNUM_PSCI_COMPLETE U(0x26B) -#define TMI_FNUM_FEATURES U(0x26C) -#define TMI_FNUM_TTT_MAP_RANGE U(0x26D) -#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) -#define TMI_FNUM_INF_TEST U(0x270) +#define TMI_FNUM_VERSION_REQ U(0x260) +#define TMI_FNUM_MEM_INFO_SHOW U(0x261) +#define TMI_FNUM_DATA_CREATE U(0x262) +#define TMI_FNUM_DATA_DESTROY U(0x263) +#define TMI_FNUM_CVM_ACTIVATE U(0x264) +#define TMI_FNUM_CVM_CREATE U(0x265) +#define TMI_FNUM_CVM_DESTROY U(0x266) +#define TMI_FNUM_TEC_CREATE U(0x267) +#define TMI_FNUM_TEC_DESTROY U(0x268) +#define TMI_FNUM_TEC_ENTER U(0x269) +#define TMI_FNUM_TTT_CREATE U(0x26A) +#define TMI_FNUM_PSCI_COMPLETE U(0x26B) +#define TMI_FNUM_FEATURES U(0x26C) +#define TMI_FNUM_TTT_MAP_RANGE U(0x26D) +#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) +#define TMI_FNUM_INF_TEST U(0x270) + +#define TMI_FNUM_SMMU_QUEUE_CREATE U(0x277) +#define TMI_FNUM_SMMU_QUEUE_WRITE U(0x278) +#define TMI_FNUM_SMMU_STE_CREATE U(0x279) +#define TMI_FNUM_MMIO_MAP U(0x27A) +#define TMI_FNUM_MMIO_UNMAP U(0x27B) +#define TMI_FNUM_MMIO_WRITE U(0x27C) +#define TMI_FNUM_MMIO_READ U(0x27D) +#define TMI_FNUM_DEV_DELEGATE U(0x27E) +#define TMI_FNUM_DEV_ATTACH U(0x27F) +#define TMI_FNUM_HANDLE_S_EVTQ U(0x280) +#define TMI_FNUM_SMMU_DEVICE_RESET U(0x281) +#define TMI_FNUM_SMMU_WRITE U(0x282) +#define TMI_FNUM_SMMU_READ U(0x283) +#define TMI_FNUM_SMMU_PCIE_CORE_CHECK U(0x284)
/* TMI SMC64 PIDs handled by the SPMD */ -#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) -#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) -#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY) -#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE) -#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE) -#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY) -#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE) -#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY) -#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER) -#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE) -#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE) -#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) -#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) -#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) -#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE) +#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) +#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) +#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY) +#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE) +#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE) +#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY) +#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE) +#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY) +#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER) +#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE) +#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE) +#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) +#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) +#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) +#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE) #define TMI_TMM_INF_TEST TMI_FID(SMC_64, TMI_FNUM_INF_TEST)
+#define TMI_TMM_SMMU_QUEUE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_CREATE) +#define TMI_TMM_SMMU_QUEUE_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_WRITE) +#define TMI_TMM_SMMU_STE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_STE_CREATE) +#define TMI_TMM_MMIO_MAP TMI_FID(SMC_64, TMI_FNUM_MMIO_MAP) +#define TMI_TMM_MMIO_UNMAP TMI_FID(SMC_64, TMI_FNUM_MMIO_UNMAP) +#define TMI_TMM_MMIO_WRITE TMI_FID(SMC_64, TMI_FNUM_MMIO_WRITE) +#define TMI_TMM_MMIO_READ TMI_FID(SMC_64, TMI_FNUM_MMIO_READ) +#define TMI_TMM_DEV_DELEGATE TMI_FID(SMC_64, TMI_FNUM_DEV_DELEGATE) +#define TMI_TMM_DEV_ATTACH TMI_FID(SMC_64, TMI_FNUM_DEV_ATTACH) +#define TMI_TMM_HANDLE_S_EVTQ TMI_FID(SMC_64, TMI_FNUM_HANDLE_S_EVTQ) +#define TMI_TMM_SMMU_DEVICE_RESET TMI_FID(SMC_64, TMI_FNUM_SMMU_DEVICE_RESET) +#define TMI_TMM_SMMU_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_WRITE) +#define TMI_TMM_SMMU_READ TMI_FID(SMC_64, TMI_FNUM_SMMU_READ) +#define TMI_TMM_SMMU_PCIE_CORE_CHECK TMI_FID(SMC_64, TMI_FNUM_SMMU_PCIE_CORE_CHECK) + #define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) #define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF)
-#define TMI_ABI_VERSION_MAJOR U(0x1) +#define TMI_ABI_VERSION_MAJOR U(0x2)
/* KVM_CAP_ARM_TMM on VM fd */ #define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 @@ -321,6 +382,22 @@ u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_n u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id); u64 tmi_mem_info_show(u64 mem_info_addr);
+ +u64 tmi_smmu_queue_create(u64 params_ptr); +u64 tmi_smmu_queue_write(uint64_t cmd0, uint64_t cmd1, u64 smmu_id); +u64 tmi_smmu_ste_create(u64 params_ptr); +u64 tmi_mmio_map(u64 rd, u64 map_addr, u64 level, u64 ttte); +u64 tmi_mmio_unmap(u64 rd, u64 map_addr, u64 level); +u64 tmi_mmio_write(u64 addr, u64 val, u64 bits, u64 dev_num); +u64 tmi_mmio_read(u64 addr, u64 bits, u64 dev_num); +u64 tmi_dev_delegate(u64 params); +u64 tmi_dev_attach(u64 vdev, u64 rd, u64 smmu_id); +u64 tmi_handle_s_evtq(u64 smmu_id); +u64 tmi_smmu_device_reset(u64 params); +u64 tmi_smmu_pcie_core_check(u64 smmu_base); +u64 tmi_smmu_write(u64 smmu_base, u64 reg_offset, u64 val, u64 bits); +u64 tmi_smmu_read(u64 smmu_base, u64 reg_offset, u64 bits); + void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); int kvm_load_user_data(struct kvm *kvm, unsigned long arg); unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, @@ -329,5 +406,12 @@ int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, bool serror_pending, bool ext_dabt_pending); int kvm_init_cvm_vm(struct kvm *kvm); int kvm_enable_virtcca_cvm(struct kvm *kvm); + +bool virtcca_is_available(void); +#else +static inline bool virtcca_is_available(void) +{ + return false; +} #endif #endif diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index 96532ca8fd..2fd6d2bdb9 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -7,6 +7,9 @@
#include <uapi/linux/kvm.h>
+#define CVM_MSI_ORIG_IOVA 0x8000000 +#define CVM_MSI_IOVA_OFFSET (-0x1000000) + enum virtcca_cvm_state { CVM_STATE_NONE = 1, CVM_STATE_NEW, @@ -59,6 +62,7 @@ struct virtcca_cvm { u64 ram_size; struct kvm_numa_info numa_info; struct tmi_cvm_params *params; + bool is_mapped; };
/* @@ -70,6 +74,13 @@ struct virtcca_cvm_tec { void *tec_run; };
+struct cvm_ttt_addr { + struct list_head list; + u64 addr; +}; + +struct iommu_group {}; + int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); void kvm_destroy_cvm(struct kvm *kvm); @@ -83,9 +94,20 @@ int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);
void kvm_cvm_unmap_destroy_range(struct kvm *kvm);
+int kvm_cvm_map_range(struct kvm *kvm); +int cvm_arm_smmu_domain_set_kvm(struct iommu_group *group); +int kvm_cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, phys_addr_t pa, + unsigned long map_size, uint32_t is_map); +int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t pa, unsigned long map_size); + #define CVM_TTT_BLOCK_LEVEL 2 #define CVM_TTT_MAX_LEVEL 3
+#define CVM_MAP_IPA_RAM 1 +#define CVM_MAP_IPA_SMMU 2 +#define CVM_MAP_IPA_UNPROTECTED 4 + #define CVM_PAGE_SHIFT 12 #define CVM_PAGE_SIZE BIT(CVM_PAGE_SHIFT) #define CVM_TTT_LEVEL_SHIFT(l) \ diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c index c1f22139d7..006f658645 100644 --- a/arch/arm64/kvm/tmi.c +++ b/arch/arm64/kvm/tmi.c @@ -138,4 +138,130 @@ u64 tmi_tmm_inf_test(u64 x1, u64 x2, u64 x3, u64 x4, u64 x5) arm_smccc_1_1_smc(TMI_TMM_INF_TEST, x1, vttbr_el2_pa, cvm_params_pa, tec_params_pa, x5, &res); return res.a1; } -EXPORT_SYMBOL_GPL(tmi_tmm_inf_test); \ No newline at end of file +EXPORT_SYMBOL_GPL(tmi_tmm_inf_test); + +u64 tmi_smmu_queue_create(u64 params_ptr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_QUEUE_CREATE, params_ptr, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_smmu_queue_create); + + +u64 tmi_smmu_queue_write(uint64_t cmd0, uint64_t cmd1, u64 smmu_id) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_QUEUE_WRITE, cmd0, cmd1, smmu_id, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_smmu_queue_write); + +u64 tmi_smmu_ste_create(u64 params_ptr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_STE_CREATE, params_ptr, &res); + return res.a1; +} +EXPORT_SYMBOL_GPL(tmi_smmu_ste_create); + +u64 tmi_mmio_map(u64 rd, u64 map_addr, u64 level, u64 ttte) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MMIO_MAP, rd, map_addr, level, ttte, &res); + return res.a1; +} + +u64 tmi_mmio_unmap(u64 rd, u64 map_addr, u64 level) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MMIO_UNMAP, rd, map_addr, level, &res); + return res.a1; +} + +u64 tmi_mmio_write(u64 addr, u64 val, u64 bits, u64 dev_num) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MMIO_WRITE, addr, val, bits, dev_num, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_mmio_write); + +u64 tmi_mmio_read(u64 addr, u64 bits, u64 dev_num) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MMIO_READ, addr, bits, dev_num, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_mmio_read); + +u64 tmi_dev_delegate(u64 params) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_DEV_DELEGATE, params, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_dev_delegate); + +u64 tmi_dev_attach(u64 vdev, u64 rd, u64 smmu_id) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_DEV_ATTACH, vdev, rd, smmu_id, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_dev_attach); + +u64 tmi_handle_s_evtq(u64 smmu_id) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_HANDLE_S_EVTQ, smmu_id, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_handle_s_evtq); + +u64 tmi_smmu_device_reset(u64 params) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_DEVICE_RESET, params, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_smmu_device_reset); + +u64 tmi_smmu_pcie_core_check(u64 smmu_base) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_PCIE_CORE_CHECK, smmu_base, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_smmu_pcie_core_check); + +u64 tmi_smmu_write(u64 smmu_base, u64 reg_offset, u64 val, u64 bits) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_WRITE, smmu_base, reg_offset, val, bits, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_smmu_write); + +u64 tmi_smmu_read(u64 smmu_base, u64 reg_offset, u64 bits) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_SMMU_READ, smmu_base, reg_offset, bits, &res); + return res.a1; +} +EXPORT_SYMBOL(tmi_smmu_read); + diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c index 367bbf4fa3..73e61528a7 100644 --- a/arch/arm64/kvm/virtcca_cvm.c +++ b/arch/arm64/kvm/virtcca_cvm.c @@ -521,7 +521,7 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) return r; }
-static int kvm_cvm_map_range(struct kvm *kvm) +int kvm_cvm_map_range(struct kvm *kvm) { int ret; u64 curr_numa_set; @@ -887,3 +887,11 @@ int kvm_init_cvm_vm(struct kvm *kvm)
return 0; } + +bool virtcca_is_available(void) +{ + if (static_branch_unlikely(&virtcca_cvm_is_available)) + return true; + return false; +} +EXPORT_SYMBOL(virtcca_is_available); diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile index 54feb1eccc..e4020cafcc 100644 --- a/drivers/iommu/arm/arm-smmu-v3/Makefile +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o arm_smmu_v3-objs-y += arm-smmu-v3.o +arm_smmu_v3-objs-$(CONFIG_HISI_VIRTCCA_HOST) += arm-s-smmu-v3.o arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-objs := $(arm_smmu_v3-objs-y) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c new file mode 100644 index 0000000000..4e596ff19d --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#include <linux/crash_dump.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/iopoll.h> +#include <linux/pci.h> +#include <linux/hashtable.h> +#include <asm/kvm_tmi.h> + +#include "arm-smmu-v3.h" +#include "arm-s-smmu-v3.h" +#include "../../dma-iommu.h" + +struct cc_dev_config { + u32 sid; /* BDF number of the device */ + u32 vmid; + u32 root_bd; /* root bus and device number. Multiple sid can have the same root_bd. */ + bool secure; + struct hlist_node node; +}; + +static bool g_smmu_id_map_init; + +static DEFINE_HASHTABLE(g_cc_dev_htable, MAX_CC_DEV_NUM_ORDER); +static DECLARE_BITMAP(g_smmu_id_map, ARM_SMMU_MAX_IDS); + +/* Traverse pcie topology to find the root <bus,device> number + * return -1 if error + * return -1 if not pci device + */ +int get_root_bd(struct device *dev) +{ + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return -1; + pdev = to_pci_dev(dev); + if (pdev->bus == NULL) + return -1; + while (pdev->bus->parent != NULL) + pdev = pdev->bus->self; + + return pci_dev_id(pdev) & 0xfff8; +} + +/* if dev is a bridge, get all it's children. + * if dev is a regular device, get itself. + */ +void get_child_devices_rec(struct pci_dev *dev, uint16_t *devs, int max_devs, int *ndev) +{ + struct pci_bus *bus = dev->subordinate; + + if (bus) { /* dev is a bridge */ + struct pci_dev *child; + + list_for_each_entry(child, &bus->devices, bus_list) { + get_child_devices_rec(child, devs, max_devs, ndev); + } + } else { /* dev is a regular device */ + uint16_t bdf = pci_dev_id(dev); + int i; + /* check if bdf is already in devs */ + for (i = 0; i < *ndev; i++) { + if (devs[i] == bdf) + return; + } + /* check overflow */ + if (*ndev >= max_devs) { + WARN_ON(1); + return; + } + devs[*ndev] = bdf; + *ndev = *ndev + 1; + } +} + +/* get all devices which share the same root_bd as dev + * return 0 on failure + */ +int get_sibling_devices(struct device *dev, uint16_t *devs, int max_devs) +{ + int ndev = 0; + struct pci_dev *pdev; + + if (!dev_is_pci(dev)) + return ndev; + + pdev = to_pci_dev(dev); + if (pdev->bus == NULL) + return ndev; + + while (pdev->bus->parent != NULL) + pdev = pdev->bus->self; + + get_child_devices_rec(pdev, devs, max_devs, &ndev); + return ndev; +} + +void set_g_cc_dev(u32 sid, u32 vmid, u32 root_bd, bool secure) +{ + struct cc_dev_config *obj; + + hash_for_each_possible(g_cc_dev_htable, obj, node, sid) { + if (obj->sid == sid) { + obj->vmid = vmid; + obj->root_bd = root_bd; + obj->secure = secure; + return; + } + } + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) { + WARN_ON(1); + return; + } + + obj->sid = sid; + obj->vmid = vmid; + obj->root_bd = root_bd; + obj->secure = secure; + + hash_add(g_cc_dev_htable, &obj->node, sid); +} + +void free_g_cc_dev_htable(void) +{ + int i; + struct cc_dev_config *obj; + struct hlist_node *tmp; + + if (!virtcca_is_available()) + return; + + hash_for_each_safe(g_cc_dev_htable, i, tmp, obj, node) { + hash_del(&obj->node); + kfree(obj); + } +} + +/* Has the root bus device number switched to secure? */ +bool is_cc_root_bd(u32 root_bd) +{ + int bkt; + struct cc_dev_config *obj; + + hash_for_each(g_cc_dev_htable, bkt, obj, node) { + if (obj->root_bd == root_bd && obj->secure) + return true; + } + + return false; +} + +static bool is_cc_vmid(u32 vmid) +{ + int bkt; + struct cc_dev_config *obj; + + hash_for_each(g_cc_dev_htable, bkt, obj, node) { + if (vmid > 0 && obj->vmid == vmid) + return true; + } + + return false; +} + +bool is_cc_dev(u32 sid) +{ + struct cc_dev_config *obj; + + hash_for_each_possible(g_cc_dev_htable, obj, node, sid) { + if (obj != NULL && obj->sid == sid) + return obj->secure; + } + + return false; +} +EXPORT_SYMBOL(is_cc_dev); + +void s_smmu_cmdq_need_forward(u64 cmd0, u64 cmd1, u64 *forward) +{ + u64 opcode = FIELD_GET(CMDQ_0_OP, cmd0); + + switch (opcode) { + case CMDQ_OP_TLBI_EL2_ALL: + case CMDQ_OP_TLBI_NSNH_ALL: + *forward = 1; + break; + case CMDQ_OP_PREFETCH_CFG: + case CMDQ_OP_CFGI_CD: + case CMDQ_OP_CFGI_STE: + case CMDQ_OP_CFGI_CD_ALL: + *forward = (uint64_t)is_cc_dev(FIELD_GET(CMDQ_CFGI_0_SID, cmd0)); + break; + + case CMDQ_OP_CFGI_ALL: + *forward = 1; + break; + case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_TLBI_S2_IPA: + case CMDQ_OP_TLBI_NH_ASID: + case CMDQ_OP_TLBI_S12_VMALL: + *forward = (uint64_t)is_cc_vmid(FIELD_GET(CMDQ_TLBI_0_VMID, cmd0)); + break; + case CMDQ_OP_TLBI_EL2_VA: + case CMDQ_OP_TLBI_EL2_ASID: + *forward = 0; + break; + case CMDQ_OP_ATC_INV: + *forward = (uint64_t)is_cc_dev(FIELD_GET(CMDQ_ATC_0_SID, cmd0)); + break; + case CMDQ_OP_PRI_RESP: + *forward = (uint64_t)is_cc_dev(FIELD_GET(CMDQ_PRI_0_SID, cmd0)); + break; + case CMDQ_OP_RESUME: + *forward = (uint64_t)is_cc_dev(FIELD_GET(CMDQ_RESUME_0_SID, cmd0)); + break; + case CMDQ_OP_CMD_SYNC: + *forward = 0; + break; + default: + *forward = 0; + } +} + +void s_queue_write(struct arm_smmu_device *smmu, u64 *src, size_t n_dwords) +{ + u64 cmd0, cmd1; + u64 forward = 0; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + if (n_dwords == 2) { + cmd0 = cpu_to_le64(src[0]); + cmd1 = cpu_to_le64(src[1]); + s_smmu_cmdq_need_forward(cmd0, cmd1, &forward); + + /* need forward queue command to TMM */ + if (forward) { + if (tmi_smmu_queue_write(cmd0, cmd1, smmu->virtcca_id)) + dev_err(smmu->dev, "S_SMMU: s queue write failed\n"); + } + } + } +} + +void arm_s_smmu_cmdq_write_entries(struct arm_smmu_device *smmu, u64 *cmds, int n) +{ + int i; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + for (i = 0; i < n; ++i) { + u64 *cmd = &cmds[i * CMDQ_ENT_DWORDS]; + + s_queue_write(smmu, cmd, CMDQ_ENT_DWORDS); + } + } +} + +void arm_s_smmu_init_one_queue(struct arm_smmu_device *smmu, + struct arm_smmu_queue *q, + size_t qsz, const char *name) +{ + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + struct tmi_smmu_queue_params *params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + + if (!params_ptr) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + if (!strcmp(name, "cmdq")) { + params_ptr->ns_src = q->base_dma; + params_ptr->smmu_base_addr = smmu->ioaddr; + params_ptr->size = qsz; + params_ptr->smmu_id = smmu->virtcca_id; + params_ptr->type = TMI_SMMU_CMD_QUEUE; + tmi_smmu_queue_create(__pa(params_ptr)); + } + + if (!strcmp(name, "evtq")) { + params_ptr->ns_src = q->base_dma; + params_ptr->smmu_base_addr = smmu->ioaddr; + params_ptr->size = qsz; + params_ptr->smmu_id = smmu->virtcca_id; + params_ptr->type = TMI_SMMU_EVT_QUEUE; + tmi_smmu_queue_create(__pa(params_ptr)); + } + + kfree(params_ptr); + } + +} + +int arm_smmu_write_s_reg_sync(struct arm_smmu_device *smmu, u32 val, u32 cmp_val, + unsigned int reg_off, unsigned int ack_off) +{ + u32 reg; + + if (tmi_smmu_write(smmu->ioaddr, reg_off, val, 32)) + return -ENXIO; + + return virtcca_cvm_read_poll_timeout_atomic(tmi_smmu_read, reg, reg == cmp_val, + 1, ARM_SMMU_POLL_TIMEOUT_US, false, + smmu->ioaddr, ack_off, 32); +} + +int arm_smmu_update_s_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) +{ + int ret; + u32 reg; + + ret = virtcca_cvm_read_poll_timeout_atomic(tmi_smmu_read, reg, !(reg & S_GBPA_UPDATE), + 1, ARM_SMMU_POLL_TIMEOUT_US, false, + smmu->ioaddr, ARM_SMMU_S_GBPA, 32); + if (ret) + return ret; + + reg &= ~clr; + reg |= set; + + ret = tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_GBPA, reg | S_GBPA_UPDATE, 32); + if (ret) + return ret; + + ret = virtcca_cvm_read_poll_timeout_atomic(tmi_smmu_read, reg, !(reg & S_GBPA_UPDATE), + 1, ARM_SMMU_POLL_TIMEOUT_US, false, + smmu->ioaddr, ARM_SMMU_S_GBPA, 32); + if (ret) + dev_err(smmu->dev, "S_SMMU: S_GBPA not responding to update\n"); + return ret; +} + +irqreturn_t arm_smmu_s_evtq_thread(int irq, void *dev) +{ + struct arm_smmu_device *smmu = dev; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) + tmi_handle_s_evtq(smmu->virtcca_id); + + return IRQ_HANDLED; +} + +irqreturn_t arm_smmu_s_gerror_handler(int irq, void *dev) +{ + u32 gerror, gerrorn, active; + u64 ret; + struct arm_smmu_device *smmu = dev; + + ret = tmi_smmu_read(smmu->ioaddr, ARM_SMMU_S_GERROR, 32); + if (ret >> 32) { + dev_err(smmu->dev, "Get ARM_SMMU_S_GERROR register failed\n"); + return IRQ_NONE; + } + gerror = (u32)ret; + + ret = tmi_smmu_read(smmu->ioaddr, ARM_SMMU_S_GERRORN, 32); + if (ret >> 32) { + dev_err(smmu->dev, "Get ARM_SMMU_S_GERRORN register failed\n"); + return IRQ_NONE; + } + gerrorn = (u32)ret; + + active = gerror ^ gerrorn; + if (!(active & GERROR_ERR_MASK)) + return IRQ_NONE; /* No errors pending */ + + dev_warn(smmu->dev, + "unexpected secure global error reported, this could be serious, active %x\n", + active); + + if (active & GERROR_SFM_ERR) { + dev_err(smmu->dev, "device has entered Service Failure Mode!\n"); + arm_s_smmu_device_disable(smmu); + } + + if (active & GERROR_MSI_GERROR_ABT_ERR) + dev_warn(smmu->dev, "GERROR MSI write aborted\n"); + + if (active & GERROR_MSI_PRIQ_ABT_ERR) + dev_warn(smmu->dev, "PRIQ MSI write aborted\n"); + + if (active & GERROR_MSI_EVTQ_ABT_ERR) + dev_warn(smmu->dev, "EVTQ MSI write aborted\n"); + + if (active & GERROR_MSI_CMDQ_ABT_ERR) + dev_warn(smmu->dev, "CMDQ MSI write aborted\n"); + + if (active & GERROR_PRIQ_ABT_ERR) + dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n"); + + if (active & GERROR_EVTQ_ABT_ERR) + dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n"); + + if (active & GERROR_CMDQ_ERR) + dev_warn(smmu->dev, "CMDQ ERR\n"); + + if (tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_GERRORN, gerror, 32)) { + dev_err(smmu->dev, "SMMU write ARM_SMMU_S_GERRORN failed\n"); + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +void arm_smmu_disable_s_irq(struct arm_smmu_device *smmu) +{ + int ret; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + ret = arm_smmu_write_s_reg_sync(smmu, 0, 0, + ARM_SMMU_S_IRQ_CTRL, ARM_SMMU_S_IRQ_CTRLACK); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to disable secure irqs\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + } + } +} + +void arm_smmu_enable_s_irq(struct arm_smmu_device *smmu, u32 irqen_flags) +{ + int ret; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + ret = arm_smmu_write_s_reg_sync(smmu, irqen_flags, + irqen_flags, ARM_SMMU_S_IRQ_CTRL, ARM_SMMU_S_IRQ_CTRLACK); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to enable irq for secure evtq\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + } + } +} + +void arm_s_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) +{ + int irq, ret; + + irq = smmu->s_evtq_irq; + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID && irq) { + ret = devm_request_threaded_irq(smmu->dev, irq, NULL, + arm_smmu_s_evtq_thread, + IRQF_ONESHOT, + "arm-smmu-v3-s_evtq", smmu); + if (ret < 0) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + dev_warn(smmu->dev, "S_SMMU: failed to enable s_evtq irq\n"); + } + } + + irq = smmu->s_gerr_irq; + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID && irq) { + ret = devm_request_irq(smmu->dev, irq, arm_smmu_s_gerror_handler, + 0, "arm-smmu-v3-s_gerror", smmu); + if (ret < 0) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + dev_warn(smmu->dev, "S_SMMU: failed to enable s_gerror irq\n"); + } + } +} + +void arm_smmu_write_s_msi_msg(struct arm_smmu_device *smmu, phys_addr_t *cfg, + struct msi_msg *msg, phys_addr_t doorbell) +{ + tmi_smmu_write((u64)smmu->ioaddr, cfg[0], doorbell, 64); + tmi_smmu_write((u64)smmu->ioaddr, cfg[1], msg->data, 32); + tmi_smmu_write((u64)smmu->ioaddr, cfg[2], ARM_SMMU_MEMATTR_DEVICE_nGnRE, 32); +} + +void platform_get_s_irq_byname_optional(struct platform_device *pdev, struct arm_smmu_device *smmu) +{ + int irq; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + irq = platform_get_irq_byname_optional(pdev, "s_eventq"); + if (irq > 0) + smmu->s_evtq_irq = irq; + + irq = platform_get_irq_byname_optional(pdev, "s_gerror"); + if (irq > 0) + smmu->s_gerr_irq = irq; + } +} + +int arm_smmu_enable_secure(struct iommu_domain *domain) +{ + int ret = 0; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + mutex_lock(&smmu_domain->init_mutex); + if (smmu_domain->smmu) + ret = -EPERM; + else + smmu_domain->secure = true; + mutex_unlock(&smmu_domain->init_mutex); + + return ret; +} + +u32 arm_smmu_tmi_dev_attach(struct arm_smmu_domain *arm_smmu_domain, + struct kvm *kvm) +{ + int ret = -1; + u64 cmd[CMDQ_ENT_DWORDS] = {0}; + unsigned long flags; + int i, j; + struct arm_smmu_master *master; + struct virtcca_cvm *virtcca_cvm = (struct virtcca_cvm *)kvm->arch.virtcca_cvm; + + spin_lock_irqsave(&arm_smmu_domain->devices_lock, flags); + list_for_each_entry(master, &arm_smmu_domain->devices, domain_head) { + if (master && master->num_streams >= 0) { + for (i = 0; i < master->num_streams; ++i) { + u32 sid = master->streams[i].id; + + for (j = 0; j < i; j++) + if (master->streams[j].id == sid) + break; + if (j < i) + continue; + ret = tmi_dev_attach(sid, virtcca_cvm->rd, + arm_smmu_domain->smmu->virtcca_id); + if (ret) { + kvm_err("dev protected failed!\n"); + ret = -ENXIO; + goto out; + } + cmd[0] |= FIELD_PREP(CMDQ_0_OP, CMDQ_OP_CFGI_STE); + cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid); + cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_LEAF, true); + tmi_smmu_queue_write(cmd[0], cmd[1], + arm_smmu_domain->smmu->virtcca_id); + } + } + } + +out: + spin_unlock_irqrestore(&arm_smmu_domain->devices_lock, flags); + return ret; +} + +int arm_smmu_secure_dev_ste_create(struct arm_smmu_device *smmu, + struct arm_smmu_master *master, u32 sid) +{ + struct tmi_smmu_ste_params *params_ptr; + struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; + struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; + + params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params_ptr) + return -ENOMEM; + + /* Sync Level 2 STE to TMM */ + params_ptr->ns_src = desc->l2ptr_dma + ((sid & ((1 << STRTAB_SPLIT) - 1)) * STE_ENTRY_SIZE); + params_ptr->sid = sid; + params_ptr->smmu_id = smmu->virtcca_id; + + if (tmi_smmu_ste_create(__pa(params_ptr)) != 0) + dev_err(smmu->dev, "failed to create STE level 2"); + + kfree(params_ptr); + + return 0; +} + +int arm_smmu_secure_set_dev(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master *master, + struct device *dev) +{ + int i, j; + u64 ret = 0; + uint16_t root_bd = get_root_bd(dev); + + WARN_ON_ONCE(root_bd < 0); + if (!is_cc_root_bd(root_bd)) { + struct tmi_dev_delegate_params *params = kzalloc( + sizeof(struct tmi_dev_delegate_params), GFP_KERNEL); + + params->root_bd = root_bd; + params->num_dev = get_sibling_devices(dev, params->devs, MAX_DEV_PER_PORT); + WARN_ON_ONCE(params->num_dev == 0); + pr_info("Delegate %d devices as %02x:%02x to secure\n", + params->num_dev, root_bd >> 8, (root_bd & 0xff) >> 3); + ret = tmi_dev_delegate(__pa(params)); + if (!ret) { + for (i = 0; i < params->num_dev; i++) + set_g_cc_dev(params->devs[i], 0, root_bd, true); + } + kfree(params); + } + + if (ret) { + pr_info("S_SMMU: failed to Delegate device to secure\n"); + return ret; + } + + for (i = 0; i < master->num_streams; ++i) { + u32 sid = master->streams[i].id; + + for (j = 0; j < i; j++) + if (master->streams[j].id == sid) + break; + if (j < i) + continue; + WARN_ON_ONCE(!is_cc_dev(sid)); + set_g_cc_dev(sid, smmu_domain->s2_cfg.vmid, root_bd, true); + } + + return ret; +} + +int arm_smmu_id_alloc(void) +{ + int idx; + + do { + idx = find_first_zero_bit(g_smmu_id_map, ARM_SMMU_MAX_IDS); + if (idx == ARM_SMMU_MAX_IDS) + return -ENOSPC; + } while (test_and_set_bit(idx, g_smmu_id_map)); + + return idx; +} + +void arm_smmu_id_free(int idx) +{ + if (idx != ARM_SMMU_INVALID_ID) + clear_bit(idx, g_smmu_id_map); +} + +void arm_smmu_map_init(struct arm_smmu_device *smmu, resource_size_t ioaddr) +{ + if (!virtcca_is_available()) + return; + + if (!g_smmu_id_map_init) { + set_bit(0, g_smmu_id_map); + g_smmu_id_map_init = true; + } + smmu->ioaddr = ioaddr; + + if (tmi_smmu_pcie_core_check(ioaddr)) + smmu->virtcca_id = arm_smmu_id_alloc(); + else + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + + hash_init(g_cc_dev_htable); +} + +int arm_s_smmu_device_disable(struct arm_smmu_device *smmu) +{ + int ret = 0; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + ret = arm_smmu_write_s_reg_sync(smmu, 0, 0, ARM_SMMU_S_CR0, ARM_SMMU_S_CR0ACK); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to clear s_cr0\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return ret; + } + } + return 0; +} + +void arm_s_smmu_device_reset(struct arm_smmu_device *smmu) +{ + int ret; + u64 rv; + u32 reg, enables; + struct tmi_smmu_cfg_params *params_ptr; + + if (smmu->virtcca_id == ARM_SMMU_INVALID_ID) + return; + + rv = tmi_smmu_read(smmu->ioaddr, ARM_SMMU_S_CR0, 32); + if (rv >> 32) { + dev_err(smmu->dev, "S_SMMU: failed to read s_cr0\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + ret = (int)rv; + if (ret & S_CR0_SMMUEN) { + dev_warn(smmu->dev, "Secure SMMU currently enabled! Resetting...\n"); + arm_smmu_update_s_gbpa(smmu, S_GBPA_ABORT, 0); + } + + ret = arm_s_smmu_device_disable(smmu); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to disable s smmu\n"); + return; + } + + /* CR1 (table and queue memory attributes) */ + reg = FIELD_PREP(CR1_TABLE_SH, ARM_SMMU_SH_ISH) | + FIELD_PREP(CR1_TABLE_OC, CR1_CACHE_WB) | + FIELD_PREP(CR1_TABLE_IC, CR1_CACHE_WB) | + FIELD_PREP(CR1_QUEUE_SH, ARM_SMMU_SH_ISH) | + FIELD_PREP(CR1_QUEUE_OC, CR1_CACHE_WB) | + FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB); + + ret = tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_CR1, reg, 32); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to write s_cr1\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + /* CR2 (random crap) */ + reg = CR2_PTM | CR2_RECINVSID; + + if (smmu->features & ARM_SMMU_FEAT_E2H) + reg |= CR2_E2H; + + ret = tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_CR2, reg, 32); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to write s_cr2\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!params_ptr) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + params_ptr->is_cmd_queue = 1; + params_ptr->smmu_id = smmu->virtcca_id; + params_ptr->ioaddr = smmu->ioaddr; + params_ptr->strtab_base_RA_bit = + (smmu->strtab_cfg.strtab_base >> S_STRTAB_BASE_RA_SHIFT) & 0x1; + params_ptr->q_base_RA_WA_bit = + (smmu->cmdq.q.q_base >> S_CMDQ_BASE_RA_SHIFT) & 0x1; + if (tmi_smmu_device_reset(__pa(params_ptr)) != 0) { + dev_err(smmu->dev, "S_SMMU: failed to set s cmd queue regs\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + enables = CR0_CMDQEN; + ret = arm_smmu_write_s_reg_sync(smmu, enables, enables, ARM_SMMU_S_CR0, + ARM_SMMU_S_CR0ACK); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to enable secure command queue\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + enables |= CR0_EVTQEN; + + /* Secure event queue */ + memset(params_ptr, 0, sizeof(struct tmi_smmu_ste_params)); + params_ptr->is_cmd_queue = 0; + params_ptr->ioaddr = smmu->ioaddr; + params_ptr->smmu_id = smmu->virtcca_id; + params_ptr->q_base_RA_WA_bit = + (smmu->evtq.q.q_base >> S_EVTQ_BASE_WA_SHIFT) & 0x1; + params_ptr->strtab_base_RA_bit = + (smmu->strtab_cfg.strtab_base >> S_STRTAB_BASE_RA_SHIFT) & 0x1; + if (tmi_smmu_device_reset(__pa(params_ptr)) != 0) { + dev_err(smmu->dev, "S_SMMU: failed to set s event queue regs\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + /* Enable secure eventq */ + ret = arm_smmu_write_s_reg_sync(smmu, enables, enables, ARM_SMMU_S_CR0, + ARM_SMMU_S_CR0ACK); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to disable secure event queue\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + ret = arm_smmu_write_s_reg_sync(smmu, SMMU_S_INIT_INV_ALL, 0, + ARM_SMMU_S_INIT, ARM_SMMU_S_INIT); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to write S_INIT\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } +} + +void arm_s_smmu_device_enable(struct arm_smmu_device *smmu, + u32 enables, bool bypass, bool disable_bypass) +{ + int ret = 0; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + /* Enable the SMMU interface, or ensure bypass */ + if (!bypass || disable_bypass) { + enables |= CR0_SMMUEN; + } else { + ret = arm_smmu_update_s_gbpa(smmu, 0, S_GBPA_ABORT); + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to update s gbpa!\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + } + /* Mask BIT1 and BIT4 which are RES0 in SMMU_S_CRO */ + ret = arm_smmu_write_s_reg_sync(smmu, enables & ~SMMU_S_CR0_RESERVED, + enables & ~SMMU_S_CR0_RESERVED, ARM_SMMU_S_CR0, ARM_SMMU_S_CR0ACK); + dev_info(smmu->dev, "S_SMMU: secure smmu id:%lld init end!\n", smmu->virtcca_id); + + if (ret) { + dev_err(smmu->dev, "S_SMMU: failed to enable s smmu!\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + } + } +} + +void arm_smmu_s_idr1_support_secure(struct arm_smmu_device *smmu) +{ + u32 ret; + u64 rv; + + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + rv = tmi_smmu_read(smmu->ioaddr, ARM_SMMU_S_IDR1, 32); + if (rv >> 32) { + dev_err(smmu->dev, "S_SMMU: get ARM_SMMU_S_IDR1 register failed!\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + ret = (u32)rv; + if (!(ret & S_IDR1_SECURE_IMPL)) { + dev_err(smmu->dev, "S_SMMU: does not implement secure state!\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + + if (!(ret & S_IDR1_SEL2)) { + dev_err(smmu->dev, "S_SMMU: secure stage2 translation not supported!\n"); + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + return; + } + dev_info(smmu->dev, "S_SMMU: secure smmu id:%lld start init!\n", smmu->virtcca_id); + } +} diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h new file mode 100644 index 0000000000..5237439f85 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024, The Linux Foundation. All rights reserved. + */ +#ifndef _ARM_S_SMMU_V3_H +#define _ARM_S_SMMU_V3_H +#ifdef CONFIG_HISI_VIRTCCA_HOST + +#include <linux/platform_device.h> + +#define MAX_CC_DEV_NUM_ORDER 8 +#define STE_ENTRY_SIZE 0x40 + +#define ARM_SMMU_MAX_IDS (1 << 5) +#define ARM_SMMU_INVALID_ID 0 + +/* Secure MMIO registers */ +#define ARM_SMMU_S_IDR0 0x8000 +#define S_IDR0_STALL_MODEL GENMASK(25, 24) +#define S_IDR0_ECMDQ (1 << 31) +#define S_IDR0_MSI (1 << 13) + +#define ARM_SMMU_S_IDR1 0x8004 +#define S_IDR1_SECURE_IMPL (1 << 31) +#define S_IDR1_SEL2 (1 << 29) +#define S_IDR1_SIDSIZE GENMASK(5, 0) + +#define ARM_SMMU_S_IDR3 0x800c +#define S_IDR3_SAMS (1 << 6) + +#define ARM_SMMU_S_CR0 0x8020 +#define S_CR0_SIF (1 << 9) +#define S_CR0_NSSTALLD (1 << 5) +#define S_CR0_CMDQEN (1 << 3) +#define S_CR0_EVTQEN (1 << 2) +#define S_CR0_SMMUEN (1 << 0) + +#define ARM_SMMU_S_CR0ACK 0x8024 + +#define ARM_SMMU_S_CR1 0x8028 +#define S_CR1_TABLE_SH GENMASK(11, 10) +#define S_CR1_TABLE_OC GENMASK(9, 8) +#define S_CR1_TABLE_IC GENMASK(7, 6) +#define S_CR1_QUEUE_SH GENMASK(5, 4) +#define S_CR1_QUEUE_OC GENMASK(3, 2) +#define S_CR1_QUEUE_IC GENMASK(1, 0) + +/* S_CR1 cacheability fields don't quite follow the usual TCR-style encoding */ +#define S_CR1_CACHE_NC 0 +#define S_CR1_CACHE_WB 1 +#define S_CR1_CACHE_WT 2 + +#define ARM_SMMU_S_CR2 0x802c +#define S_CR2_PTM (1 << 2) +#define S_CR2_RECINVSID (1 << 1) +#define S_CR2_E2H (1 << 0) + +#define ARM_SMMU_S_INIT U(0x803c) +/* SMMU_S_INIT register fields */ +#define SMMU_S_INIT_INV_ALL (1UL << 0) + +#define ARM_SMMU_S_GBPA 0x8044 +#define S_GBPA_UPDATE (1 << 31) +#define S_GBPA_ABORT (1 << 20) + +#define ARM_SMMU_S_IRQ_CTRL 0x8050 +#define S_IRQ_CTRL_EVTQ_IRQEN (1 << 2) +#define S_IRQ_CTRL_GERROR_IRQEN (1 << 0) + +#define ARM_SMMU_S_IRQ_CTRLACK 0x8054 + +#define ARM_SMMU_S_GERROR 0x8060 +#define S_GERROR_SFM_ERR (1 << 8) +#define S_GERROR_MSI_GERROR_ABT_ERR (1 << 7) +#define S_GERROR_MSI_EVTQ_ABT_ERR (1 << 5) +#define S_GERROR_MSI_CMDQ_ABT_ERR (1 << 4) +#define S_GERROR_EVTQ_ABT_ERR (1 << 2) +#define S_GERROR_CMDQ_ERR (1 << 0) + +#define ARM_SMMU_S_GERRORN 0x8064 + +#define ARM_SMMU_S_GERROR_IRQ_CFG0 0x8068 +#define ARM_SMMU_S_GERROR_IRQ_CFG1 0x8070 +#define ARM_SMMU_S_GERROR_IRQ_CFG2 0x8074 + +#define ARM_SMMU_S_STRTAB_BASE 0x8080 +#define S_STRTAB_BASE_RA_SHIFT 62 +#define S_STRTAB_BASE_RA (1UL << S_STRTAB_BASE_RA_SHIFT) +#define S_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6) + +#define ARM_SMMU_S_STRTAB_BASE_CFG 0x8088 +#define S_STRTAB_BASE_CFG_FMT GENMASK(17, 16) +#define S_STRTAB_BASE_CFG_SPLIT GENMASK(10, 6) +#define S_STRTAB_BASE_CFG_LOG2SIZE GENMASK(5, 0) + +#define ARM_SMMU_S_CMDQ_BASE 0x8090 +#define ARM_SMMU_S_CMDQ_PROD 0x8098 +#define ARM_SMMU_S_CMDQ_CONS 0x809c +#define S_CMDQ_BASE_ADDR_MASK GENMASK_ULL(51, 5) +#define S_CMDQ_BASE_RA_SHIFT 62 + +#define ARM_SMMU_S_EVTQ_BASE 0x80a0 +#define ARM_SMMU_S_EVTQ_PROD 0x80a8 +#define ARM_SMMU_S_EVTQ_CONS 0x80ac +#define ARM_SMMU_S_EVTQ_IRQ_CFG0 0x80b0 +#define ARM_SMMU_S_EVTQ_IRQ_CFG1 0x80b8 +#define ARM_SMMU_S_EVTQ_IRQ_CFG2 0x80bc +#define S_EVTQ_BASE_ADDR_MASK GENMASK_ULL(51, 5) +#define S_EVTQ_BASE_WA_SHIFT 62 + +/* + * BIT1 is PRIQEN, BIT4 is ATSCHK in SMMU_CRO + * BIT1 and BIT4 are RES0 in SMMU_S_CRO + */ +#define SMMU_S_CR0_RESERVED 0xFFFFFC12 + +#define virtcca_cvm_read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ + delay_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + u64 rv = 0; \ + int result = 0; \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + if (delay_before_read && __delay_us) \ + udelay(__delay_us); \ + for (;;) { \ + rv = op(args); \ + if (rv >> 32) { \ + result = -ENXIO; \ + break; \ + } \ + (val) = (u32)rv; \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + rv = op(args); \ + if (rv >> 32) { \ + result = -ENXIO; \ + break; \ + } \ + (val) = (u32)rv; \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + cpu_relax(); \ + } \ + result ? result : ((cond) ? 0 : -ETIMEDOUT); \ +}) + +int get_root_bd(struct device *dev); +void get_child_devices_rec(struct pci_dev *dev, uint16_t *devs, int max_devs, int *ndev); +int get_sibling_devices(struct device *dev, uint16_t *devs, int max_devs); +void set_g_cc_dev(u32 sid, u32 vmid, u32 root_bd, bool secure); +void free_g_cc_dev_htable(void); +/* Has the root bus device number switched to secure? */ +bool is_cc_root_bd(u32 root_bd); +bool is_cc_dev(u32 sid); + +void s_smmu_cmdq_need_forward(u64 cmd0, u64 cmd1, u64 *forward); +void s_queue_write(struct arm_smmu_device *smmu, u64 *src, size_t n_dwords); +void arm_s_smmu_cmdq_write_entries(struct arm_smmu_device *smmu, u64 *cmds, int n); +void arm_s_smmu_init_one_queue(struct arm_smmu_device *smmu, + struct arm_smmu_queue *q, + size_t qsz, const char *name); +int arm_smmu_write_s_reg_sync(struct arm_smmu_device *smmu, u32 val, u32 cmp_val, + unsigned int reg_off, unsigned int ack_off); +int arm_smmu_update_s_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr); + +irqreturn_t arm_smmu_s_evtq_thread(int irq, void *dev); +irqreturn_t arm_smmu_s_gerror_handler(int irq, void *dev); +void arm_smmu_disable_s_irq(struct arm_smmu_device *smmu); +void arm_smmu_enable_s_irq(struct arm_smmu_device *smmu, u32 irqen_flags); +void arm_s_smmu_setup_unique_irqs(struct arm_smmu_device *smmu); +void arm_smmu_write_s_msi_msg(struct arm_smmu_device *smmu, phys_addr_t *cfg, + struct msi_msg *msg, phys_addr_t doorbell); +void platform_get_s_irq_byname_optional(struct platform_device *pdev, struct arm_smmu_device *smmu); +int arm_smmu_enable_secure(struct iommu_domain *domain); +u32 arm_smmu_tmi_dev_attach(struct arm_smmu_domain *arm_smmu_domain, + struct kvm *kvm); +int arm_smmu_secure_dev_ste_create(struct arm_smmu_device *smmu, + struct arm_smmu_master *master, u32 sid); +int arm_smmu_secure_set_dev(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master *master, + struct device *dev); + +int arm_smmu_id_alloc(void); +void arm_smmu_id_free(int idx); +void arm_smmu_map_init(struct arm_smmu_device *smmu, resource_size_t ioaddr); +int arm_s_smmu_device_disable(struct arm_smmu_device *smmu); +void arm_s_smmu_device_reset(struct arm_smmu_device *smmu); +void arm_s_smmu_device_enable(struct arm_smmu_device *smmu, + u32 enables, bool bypass, bool disable_bypass); +void arm_smmu_s_idr1_support_secure(struct arm_smmu_device *smmu); + +#endif +#endif /* _ARM_S_SMMU_V3_H */ diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7148c336b3..57d884db53 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -30,6 +30,11 @@ #include "arm-smmu-v3.h" #include "../../dma-iommu.h"
+#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <asm/kvm_tmi.h> +#include "arm-s-smmu-v3.h" +#endif + static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); MODULE_PARM_DESC(disable_bypass, @@ -102,6 +107,10 @@ static int arm_smmu_bypass_dev_domain_type(struct device *dev) enum arm_smmu_msi_index { EVTQ_MSI_INDEX, GERROR_MSI_INDEX, +#ifdef CONFIG_HISI_VIRTCCA_HOST + S_EVTQ_MSI_INDEX, + S_GERROR_MSI_INDEX, +#endif PRIQ_MSI_INDEX, ARM_SMMU_MAX_MSIS, }; @@ -117,6 +126,18 @@ static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = { ARM_SMMU_GERROR_IRQ_CFG1, ARM_SMMU_GERROR_IRQ_CFG2, }, +#ifdef CONFIG_HISI_VIRTCCA_HOST + [S_EVTQ_MSI_INDEX] = { + ARM_SMMU_S_EVTQ_IRQ_CFG0, + ARM_SMMU_S_EVTQ_IRQ_CFG1, + ARM_SMMU_S_EVTQ_IRQ_CFG2, + }, + [S_GERROR_MSI_INDEX] = { + ARM_SMMU_S_GERROR_IRQ_CFG0, + ARM_SMMU_S_GERROR_IRQ_CFG1, + ARM_SMMU_S_GERROR_IRQ_CFG2, + }, +#endif [PRIQ_MSI_INDEX] = { ARM_SMMU_PRIQ_IRQ_CFG0, ARM_SMMU_PRIQ_IRQ_CFG1, @@ -995,11 +1016,16 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, * Dependency ordering from the cmpxchg() loop above. */ arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n); +#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_cmdq_write_entries(smmu, cmds, n); +#endif if (sync) { prod = queue_inc_prod_n(&llq, n); arm_smmu_cmdq_build_sync_cmd(cmd_sync, smmu, &cmdq->q, prod); queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); - +#ifdef CONFIG_HISI_VIRTCCA_HOST + s_queue_write(smmu, cmd_sync, CMDQ_ENT_DWORDS); +#endif /* * In order to determine completion of our CMD_SYNC, we must * ensure that the queue can't wrap twice without us noticing. @@ -2376,6 +2402,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu_domain->secure) + smmu_domain->stage = ARM_SMMU_DOMAIN_S2; +#endif + switch (smmu_domain->stage) { case ARM_SMMU_DOMAIN_S1: ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; @@ -2596,6 +2627,52 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL); }
+#ifdef CONFIG_HISI_VIRTCCA_HOST +static void arm_smmu_secure_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) +{ + struct arm_smmu_cmdq_ent prefetch_cmd = { + .opcode = CMDQ_OP_PREFETCH_CFG, + .prefetch = { + .sid = sid, + }, + }; + arm_smmu_sync_ste_for_sid(smmu, sid); + + /* It's likely that we'll want to use the new STE soon */ + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); +} + +static int arm_smmu_secure_dev_operator(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu, struct arm_smmu_master *master, struct device *dev) +{ + int i, j; + int ret; + + ret = arm_smmu_secure_set_dev(smmu_domain, master, dev); + if (ret) + return ret; + + for (i = 0; i < master->num_streams; ++i) { + u32 sid = master->streams[i].id; + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (master->streams[j].id == sid) + break; + if (j < i) + continue; + if (arm_smmu_secure_dev_ste_create(smmu, master, sid)) + return -ENOMEM; + + arm_smmu_secure_sync_ste_for_sid(smmu, sid); + } + + dev_info(smmu->dev, "attach confidential dev: %s", dev_name(dev)); + + return ret; +} +#endif + static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; @@ -2677,6 +2754,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_install_ste_for_dev(master);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID && smmu_domain->secure) + ret = arm_smmu_secure_dev_operator(smmu_domain, smmu, master, dev); + + if (ret) + return ret; +#endif + arm_smmu_enable_ats(master); return 0;
@@ -3311,6 +3396,9 @@ static struct iommu_ops arm_smmu_ops = { .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + #ifdef CONFIG_HISI_VIRTCCA_HOST + .iommu_enable_secure = arm_smmu_enable_secure, + #endif .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, .map_pages = arm_smmu_map_pages, @@ -3373,6 +3461,11 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift);
q->llq.prod = q->llq.cons = 0; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_init_one_queue(smmu, q, qsz, name); +#endif + return 0; }
@@ -3676,6 +3769,15 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) desc->msg.data = msg->data; #endif
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID && + (desc->msi_index == S_EVTQ_MSI_INDEX || + desc->msi_index == S_GERROR_MSI_INDEX)) { + arm_smmu_write_s_msi_msg(smmu, cfg, msg, doorbell); + return; + } +#endif + writeq_relaxed(doorbell, smmu->base + cfg[0]); writel_relaxed(msg->data, smmu->base + cfg[1]); writel_relaxed(ARM_SMMU_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]); @@ -3690,6 +3792,19 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0); writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + if (tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_GERROR_IRQ_CFG0, 0, 64)) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + dev_err(smmu->dev, "S_SMMU: s gerror irq cfg0 failed\n"); + } + if (tmi_smmu_write(smmu->ioaddr, ARM_SMMU_S_EVTQ_IRQ_CFG0, 0, 64)) { + smmu->virtcca_id = ARM_SMMU_INVALID_ID; + dev_err(smmu->dev, "S_SMMU: write s evtq irq cfg0 failed\n"); + } + } +#endif + if (smmu->features & ARM_SMMU_FEAT_PRI) writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0); else @@ -3714,6 +3829,13 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX); smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + if (smmu->virtcca_id != ARM_SMMU_INVALID_ID) { + smmu->s_evtq_irq = msi_get_virq(dev, S_EVTQ_MSI_INDEX); + smmu->s_gerr_irq = msi_get_virq(dev, S_GERROR_MSI_INDEX); + } +#endif + /* Add callback to free MSIs on teardown */ devm_add_action(dev, arm_smmu_free_msis, dev); } @@ -3802,6 +3924,11 @@ static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu, bool resume dev_warn(smmu->dev, "no priq irq - PRI will be broken\n"); } } + +#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_setup_unique_irqs(smmu); +#endif + }
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) @@ -3817,6 +3944,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) return ret; }
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_smmu_disable_s_irq(smmu); +#endif + irq = smmu->combined_irq; if (irq) { /* @@ -3842,6 +3973,10 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu, bool resume) if (ret) dev_warn(smmu->dev, "failed to enable irqs\n");
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_smmu_enable_s_irq(smmu, IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN); +#endif + return 0; }
@@ -3853,6 +3988,10 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu) if (ret) dev_err(smmu->dev, "failed to clear cr0\n");
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_device_disable(smmu); +#endif + return ret; }
@@ -3946,6 +4085,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) FIELD_PREP(CR1_QUEUE_IC, CR1_CACHE_WB); writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_device_reset(smmu); +#endif + /* CR2 (random crap) */ reg = CR2_PTM | CR2_RECINVSID;
@@ -4038,6 +4181,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool resume) if (is_kdump_kernel()) enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_s_smmu_device_enable(smmu, enables, smmu->bypass, disable_bypass); +#endif + /* Enable the SMMU interface, or ensure bypass */ if (!smmu->bypass || disable_bypass) { enables |= CR0_SMMUEN; @@ -4404,7 +4551,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) }
#ifdef CONFIG_ARM_SMMU_V3_ECMDQ - if (reg & IDR1_ECMDQ) + if ((reg & IDR1_ECMDQ) && !virtcca_is_available()) smmu->features |= ARM_SMMU_FEAT_ECMDQ; #endif
@@ -4440,6 +4587,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_smmu_s_idr1_support_secure(smmu); +#endif + /* IDR3 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); switch (FIELD_GET(IDR3_BBML, reg)) { @@ -4800,6 +4951,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } ioaddr = res->start;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + arm_smmu_map_init(smmu, ioaddr); +#endif + /* * Don't map the IMPLEMENTATION DEFINED regions, since they may contain * the PMCG registers which are reserved by the PMU driver. @@ -4834,6 +4989,11 @@ static int arm_smmu_device_probe(struct platform_device *pdev) irq = platform_get_irq_byname_optional(pdev, "gerror"); if (irq > 0) smmu->gerr_irq = irq; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + platform_get_s_irq_byname_optional(pdev, smmu); +#endif + } /* Probe the h/w */ ret = arm_smmu_device_hw_probe(smmu); @@ -4881,6 +5041,10 @@ static void arm_smmu_device_remove(struct platform_device *pdev) arm_smmu_device_disable(smmu); iopf_queue_free(smmu->evtq.iopf); ida_destroy(&smmu->vmid_map); +#ifdef CONFIG_HISI_VIRTCCA_HOST + free_g_cc_dev_htable(); + arm_smmu_id_free(smmu->virtcca_id); +#endif }
static void arm_smmu_device_shutdown(struct platform_device *pdev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index bc34d5a6aa..fe445b9925 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -13,6 +13,9 @@ #include <linux/kernel.h> #include <linux/mmzone.h> #include <linux/sizes.h> +#ifdef CONFIG_HISI_VIRTCCA_HOST +#include <linux/kvm_host.h> +#endif
/* MMIO registers */ #define ARM_SMMU_IDR0 0x0 @@ -753,6 +756,13 @@ struct arm_smmu_device { struct mutex streams_mutex;
bool bypass; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + int s_evtq_irq; + int s_gerr_irq; + resource_size_t ioaddr; + uint64_t virtcca_id; +#endif };
struct arm_smmu_stream { @@ -805,6 +815,12 @@ struct arm_smmu_domain { spinlock_t devices_lock;
struct list_head mmu_notifiers; + +#ifdef CONFIG_HISI_VIRTCCA_HOST + bool secure; + struct list_head node; + struct kvm *kvm; +#endif };
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 293219ee6f..16791cf835 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -635,7 +635,11 @@ struct iommu_ops { struct iommu_domain *blocked_domain; struct iommu_domain *default_domain;
+#ifdef CONFIG_HISI_VIRTCCA_HOST + KABI_USE(1, int (*iommu_enable_secure)(struct iommu_domain *domain)) +#else KABI_RESERVE(1) +#endif KABI_RESERVE(2) KABI_RESERVE(3) KABI_RESERVE(4)
On 2024/8/6 20:46, Xiangkai Yang wrote:
virtcca inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IADD42
virtCCA feature Signed-off-by: Xiangkai Yang yangxiangkai@huawei.com
arch/arm64/include/asm/kvm_tmi.h | 244 +++-- arch/arm64/include/asm/kvm_tmm.h | 22 + arch/arm64/kvm/tmi.c | 128 ++- arch/arm64/kvm/virtcca_cvm.c | 10 +- drivers/iommu/arm/arm-smmu-v3/Makefile | 1 + drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.c | 841 ++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-s-smmu-v3.h | 198 +++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 168 +++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 16 + include/linux/iommu.h | 4 + 10 files changed, 1548 insertions(+), 84 deletions(-)
请问你这是啥?为啥要这个东西?怎么保持跟现有系统兼容?你的侵入式改动对现有系统有什么影响?
1600+行代码的修改,就一个补丁,大家怎么review?