
From: Yicong Yang <yangyicong@hisilicon.com> Avoid the limitation of the vhe code section exceeding one page under the CONFIG_FUNCTION_ALIGNMENT_64B configuration. Do not enable LS64 when CONFIG_FUNCTION_ALIGNMENT_64B is enabled. Signed-off-by: Yicong Yang <yangyicong@hisilicon.com> Signed-off-by: Hongye Lin <linhongye@h-partners.com> Signed-off-by: Qi Xi <xiqi2@huawei.com> --- arch/arm64/Kconfig | 4 ++++ arch/arm64/include/asm/el2_setup.h | 2 ++ arch/arm64/kernel/cpufeature.c | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3829167e97fc..9c8600123457 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -116,6 +116,7 @@ config ARM64 select ARM_GIC_V3 select ARM_GIC_V3_ITS if PCI select ARM_PSCI_FW + select ARM64_LS64 if !FUNCTION_ALIGNMENT_64B select BUILDTIME_TABLE_SORT select CLONE_BACKWARDS select COMMON_CLK @@ -2197,6 +2198,9 @@ config ARM64_TWED help Delayed Trapping of WFE (part of the ARMv8.6 Extensions) +config ARM64_LS64 + bool + endmenu menu "ARMv8.7 architectural features" diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 1da6a8eb496c..6f65322e2fd0 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -28,6 +28,7 @@ cbz x0, .Lskip_hcrx_\@ mov_q x0, HCRX_HOST_FLAGS +#ifdef CONFIG_ARM64_LS64 /* Enable LS64, LS64_V if supported */ mrs_s x1, SYS_ID_AA64ISAR1_EL1 ubfx x1, x1, #ID_AA64ISAR1_EL1_LS64_SHIFT, #4 @@ -36,6 +37,7 @@ cmp x1, #ID_AA64ISAR1_EL1_LS64_LS64_V b.lt .Lset_hcrx_\@ orr x0, x0, #HCRX_EL2_EnASR +#endif .Lset_hcrx_\@ : msr_s SYS_HCRX_EL2, x0 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 84031be9df2a..021d0980d186 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2169,6 +2169,7 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap) static bool enable_pseudo_nmi; #endif +#ifdef CONFIG_ARM64_LS64 static bool has_ls64(const struct arm64_cpu_capabilities *entry, int __unused) { u64 ls64; @@ -2200,6 +2201,7 @@ static void cpu_enable_ls64_v(struct arm64_cpu_capabilities const *cap) { sysreg_clear_set(sctlr_el1, SCTLR_EL1_EnASR, SCTLR_EL1_EnASR); } +#endif #ifdef CONFIG_ARM64_PSEUDO_NMI static int __init early_enable_pseudo_nmi(char *p) @@ -2929,6 +2931,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ARM64_CPUID_FIELDS(ID_AA64MMFR1_EL1, TWED, IMP) }, #endif +#ifdef CONFIG_ARM64_LS64 { .desc = "LS64", .capability = ARM64_HAS_LS64, @@ -2945,6 +2948,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ls64_v, ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LS64, LS64_V) }, +#endif {}, }; -- 2.33.0