From: Ma Wupeng mawupeng1@huawei.com
Patch 1: move FDT init out of kaslr_early_init for future use. Patch 2 to 8: enable feature PBHA for arm64. Patch 9-18: Control the usage of HBM cache for kernel and task precisely. Patch 19: Enable feature PBHA for arm64 by default.
Changelog since v4: - update pbha_bit0_update_pgprot to pgprot_pbha_bit0
Changelog since v3: - update desc for patch #15
Changelog since v2: - correct the error Documentation in patch #18
Changelog since v1: - fix kabi broken due to include files
James Morse (7): KVM: arm64: Detect and enable PBHA for stage2 dt-bindings: Rename the description of cpu nodes cpu.yaml dt-bindings: arm: Add binding for Page Based Hardware Attributes arm64: cpufeature: Enable PBHA bits for stage1 arm64: mm: Add pgprot_pbha() to allow drivers to request PBHA values KVM: arm64: Configure PBHA bits for stage2 Documentation: arm64: Describe the support and expectations for PBHA
Ma Wupeng (11): arm64: cpufeature: Enable PBHA for stage1 early via FDT arm64: mm: Detect and enable PBHA bit0 at early startup arm64: mm: Update kernel pte entries if pbha bit0 enabled arm64: mm: Show PBHA bit 59 as PBHA0 in ptdump arm64: mm: Introduce VM_PBHA_BIT0 to enable pbha bit0 for single vma arm64: mm: Set PBHA0 bit for VM_PBHA_BIT0 arm64: mm: Introduce procfs interface to update PBHA0 bit arm64: mm: Set flag VM_PBHA_BIT0 for global init task arm64: mm: Introduce prctl to control pbha behavior arm64: mm: Introduce kernel param pbha openeuler: configs: arm64: Enable PBHA by default
Marc Zyngier (1): arm64: Extract early FDT mapping from kaslr_early_init()
.../admin-guide/kernel-parameters.txt | 8 + Documentation/arm64/index.rst | 1 + Documentation/arm64/pbha.rst | 85 +++ .../devicetree/bindings/arm/cpu.yaml | 537 ++++++++++++++++ .../devicetree/bindings/arm/cpus.yaml | 584 +++--------------- arch/arm64/Kconfig | 20 + arch/arm64/configs/openeuler_defconfig | 1 + arch/arm64/include/asm/cpucaps.h | 3 + arch/arm64/include/asm/cpufeature.h | 15 + arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/kvm_pgtable.h | 9 + arch/arm64/include/asm/mman.h | 10 + arch/arm64/include/asm/pgtable-hwdef.h | 6 + arch/arm64/include/asm/pgtable.h | 26 + arch/arm64/include/asm/setup.h | 3 + arch/arm64/include/uapi/asm/mman.h | 1 + arch/arm64/kernel/cpufeature.c | 258 ++++++++ arch/arm64/kernel/head.S | 6 +- arch/arm64/kernel/image-vars.h | 3 + arch/arm64/kernel/kaslr.c | 7 +- arch/arm64/kernel/setup.c | 15 + arch/arm64/kvm/reset.c | 15 +- arch/arm64/mm/hugetlbpage.c | 2 + arch/arm64/mm/mmu.c | 14 +- arch/arm64/mm/ptdump.c | 5 + .../firmware/efi/libstub/efi-stub-helper.c | 3 + drivers/firmware/efi/libstub/fdt.c | 57 ++ drivers/soc/hisilicon/Makefile | 1 + drivers/soc/hisilicon/pbha.c | 204 ++++++ fs/proc/base.c | 103 +++ fs/proc/task_mmu.c | 3 + include/linux/mm.h | 8 +- include/linux/pbha.h | 66 ++ include/uapi/asm-generic/mman-common.h | 1 + include/uapi/linux/prctl.h | 2 + kernel/sys.c | 10 + mm/memory.c | 4 + mm/vmalloc.c | 5 + 38 files changed, 1579 insertions(+), 523 deletions(-) create mode 100644 Documentation/arm64/pbha.rst create mode 100644 Documentation/devicetree/bindings/arm/cpu.yaml create mode 100644 drivers/soc/hisilicon/pbha.c create mode 100644 include/linux/pbha.h
From: Marc Zyngier maz@kernel.org
mainline inclusion from mainline-v5.12-rc1 commit f6f0c4362f070cab4a0cec432e82428d702ce0a6 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
As we want to parse more options very early in the kernel lifetime, let's always map the FDT early. This is achieved by moving that code out of kaslr_early_init().
No functional change expected.
Signed-off-by: Marc Zyngier maz@kernel.org Acked-by: Catalin Marinas catalin.marinas@arm.com Acked-by: David Brazdil dbrazdil@google.com Link: https://lore.kernel.org/r/20210208095732.3267263-13-maz@kernel.org [will: Ensue KASAN is enabled before running C code] Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/include/asm/setup.h Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/setup.h | 3 +++ arch/arm64/kernel/head.S | 3 ++- arch/arm64/kernel/kaslr.c | 7 +++---- arch/arm64/kernel/setup.c | 15 +++++++++++++++ 4 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/setup.h b/arch/arm64/include/asm/setup.h index 29bcb5bb45a3..fc56198ec28e 100644 --- a/arch/arm64/include/asm/setup.h +++ b/arch/arm64/include/asm/setup.h @@ -21,4 +21,7 @@ static inline bool arch_parse_debug_rodata(char *arg) } #define arch_parse_debug_rodata arch_parse_debug_rodata
+void *get_early_fdt_ptr(void); +void early_fdt_map(u64 dt_phys); + #endif diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6e3f04b12bcb..b65f95f25ec8 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -452,10 +452,11 @@ SYM_FUNC_START_LOCAL(__primary_switched) #ifdef CONFIG_KASAN bl kasan_early_init #endif + mov x0, x21 // pass FDT address in x0 + bl early_fdt_map // Try mapping the FDT early #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f - mov x0, x21 // pass FDT address in x0 bl kaslr_early_init // parse FDT for KASLR options cbz x0, 0f // KASLR disabled? just proceed orr x23, x23, x0 // record KASLR offset diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 9181d2856be3..d612ac8ae855 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -19,6 +19,7 @@ #include <asm/memory.h> #include <asm/mmu.h> #include <asm/sections.h> +#include <asm/setup.h>
enum kaslr_status { KASLR_ENABLED, @@ -79,13 +80,12 @@ static __init const u8 *kaslr_get_cmdline(void *fdt) * containing function pointers) to be reinitialized, and zero-initialized * .bss variables will be reset to 0. */ -u64 __init kaslr_early_init(u64 dt_phys) +u64 __init kaslr_early_init(void) { void *fdt; u64 seed, offset, mask, module_range; const u8 *cmdline, *str; unsigned long raw; - int size;
/* * Set a reasonable default for module_alloc_base in case @@ -99,8 +99,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * and proceed with KASLR disabled. We will make another * attempt at mapping the FDT in setup_machine() */ - early_fixmap_init(); - fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); + fdt = get_early_fdt_ptr(); if (!fdt) { kaslr_status = KASLR_DISABLED_FDT_REMAP; return 0; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 0d6cd99f73f5..c687866612d9 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -185,6 +185,21 @@ static void __init smp_build_mpidr_hash(void) pr_warn("Large number of MPIDR hash buckets detected\n"); }
+static void *early_fdt_ptr __initdata; + +void __init *get_early_fdt_ptr(void) +{ + return early_fdt_ptr; +} + +asmlinkage void __init early_fdt_map(u64 dt_phys) +{ + int fdt_size; + + early_fixmap_init(); + early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL); +} + static void __init setup_machine_fdt(phys_addr_t dt_phys) { int size;
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
Page Based Hardware Attributes (PBHA, aka HPDS2) allow a page table entry to specify up to four bits that can be used by the hardware for some implementation defined purpose.
This is a problem for KVM guests as the host may swap guest memory using a different combination of PBHA bits than the guest used when writing the data. Without knowing what the PBHA bits do, its not possible to know if this will corrupt the guest's data.
The arm-arm doesn't describe how the PBHA bits are combined between stage1 and stage2. Arm's Cortex CPUs appear to all do the same thing: stage2 wins.
Enable PBHA for stage2, where the configured value is zero. This has no effect if PBHA isn't in use. On Cortex cores that have the 'stage2 wins' behaviour, this disables whatever the guest may be doing. For any other core with a sensible combination policy, it should be harmless.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/cpucaps.h | 1 + arch/arm64/include/asm/kvm_arm.h | 1 + arch/arm64/include/asm/kvm_pgtable.h | 9 +++++++++ arch/arm64/kernel/cpufeature.c | 9 +++++++++ arch/arm64/kvm/reset.c | 9 +++++++++ 5 files changed, 29 insertions(+)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index c7fe08dce205..f7e15076a12c 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -76,6 +76,7 @@ #define ARM64_HAS_WFXT 68 #define ARM64_WORKAROUND_HISILICON_ERRATUM_162100125 69 #define ARM64_HAS_LDAPR 70 +#define ARM64_HAS_PBHA 71
#define ARM64_NCAPS 80
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index cf35d1968c93..05b56370ded5 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -126,6 +126,7 @@ #define VTCR_EL2_VS_SHIFT 19 #define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) #define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) +#define VTCR_EL2_PBHA_MASK GENMASK(28, 25)
#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h index 8886d43cfb11..f5dff6d40529 100644 --- a/arch/arm64/include/asm/kvm_pgtable.h +++ b/arch/arm64/include/asm/kvm_pgtable.h @@ -35,6 +35,10 @@ struct kvm_pgtable { * @KVM_PGTABLE_PROT_W: Write permission. * @KVM_PGTABLE_PROT_R: Read permission. * @KVM_PGTABLE_PROT_DEVICE: Device attributes. + * @KVM_PGTABLE_PROT_PBHA0: Page-Based Hardware Attribute 0. + * @KVM_PGTABLE_PROT_PBHA1: Page-Based Hardware Attribute 1. + * @KVM_PGTABLE_PROT_PBHA2: Page-Based Hardware Attribute 2. + * @KVM_PGTABLE_PROT_PBHA3: Page-Based Hardware Attribute 3. */ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_X = BIT(0), @@ -42,6 +46,11 @@ enum kvm_pgtable_prot { KVM_PGTABLE_PROT_R = BIT(2),
KVM_PGTABLE_PROT_DEVICE = BIT(3), + + KVM_PGTABLE_PROT_PBHA0 = BIT(59), + KVM_PGTABLE_PROT_PBHA1 = BIT(60), + KVM_PGTABLE_PROT_PBHA2 = BIT(61), + KVM_PGTABLE_PROT_PBHA3 = BIT(62), };
#define PAGE_HYP (KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 90700ce19e66..6590ea28add3 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2295,6 +2295,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .min_field_value = 1, }, + { + .capability = ARM64_HAS_PBHA, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64MMFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR1_HPD_SHIFT, + .matches = has_cpuid_feature, + .min_field_value = 2, + }, {}, };
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 6f85c1821c3f..d151da73f89e 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -474,6 +474,15 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) */ vtcr |= VTCR_EL2_HA;
+ /* + * Enable PBHA for stage2 on systems that support it. The configured + * value will always be 0, which is defined as the safe default + * setting. On Cortex cores, enabling PBHA for stage2 effectively + * disables it for stage1. + */ + if (cpus_have_final_cap(ARM64_HAS_PBHA)) + vtcr |= FIELD_PREP(VTCR_EL2_PBHA_MASK, 0xf); + /* Set the vmid bits */ vtcr |= (kvm_get_vmid_bits() == 16) ? VTCR_EL2_VS_16BIT :
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
The cpus.yaml file describes the cpu nodes, not the cpus node. Rename it to allow integration properties of all the cpus to be described in the cpus node.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- Documentation/devicetree/bindings/arm/{cpus.yaml => cpu.yaml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename Documentation/devicetree/bindings/arm/{cpus.yaml => cpu.yaml} (99%)
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpu.yaml similarity index 99% rename from Documentation/devicetree/bindings/arm/cpus.yaml rename to Documentation/devicetree/bindings/arm/cpu.yaml index 14cd727d3c4b..8ae27f370a12 100644 --- a/Documentation/devicetree/bindings/arm/cpus.yaml +++ b/Documentation/devicetree/bindings/arm/cpu.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 %YAML 1.2 --- -$id: http://devicetree.org/schemas/arm/cpus.yaml# +$id: http://devicetree.org/schemas/arm/cpu.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml#
title: ARM CPUs bindings
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
ARM CPUs with the FEAT_HPDS2 feature allow an IMPLEMENTATION DEFINED hardware attribute to be encoded in the leaf page table entries and sent with any transaction that makes an access via that entry.
Some designs are using these bits as a hint to the system-cache that it should apply a particular policy to this access. e.g. to prioritise the caching of particular workload data.
The arm-arm doesn't define what these bits mean. Implementations could use this to encrypt, or otherwise corrupt data. Setting an 'incorrect' value may lead to correctness or coherency issues. The arm-arm only defines '0' as a safe default value.
As there are only four bits, it is likely these will be combined and treated as a four-bit value by some hardware. This binding expects values. Using values allows firmware to describe that two bits should not be set at the same time.
To allow these hints to be used, add a way of describing which values only have a performance impact, and which can only be used if all mappings use the same PBHA value. This goes in the cpus node binding, as it must be the same for all CPUs.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- .../devicetree/bindings/arm/cpus.yaml | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/cpus.yaml
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml new file mode 100644 index 000000000000..326e393d4de1 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/cpus.yaml @@ -0,0 +1,91 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/arm/cpus.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: CPUS, a container for CPU subnodes + +description: | + The device tree allows to describe the layout of CPUs in a system through + the "cpus" node, which in turn contains a number of subnodes (ie "cpu") + defining properties for every CPU. + + Properties of the CPU integration that are common to all CPUs can be described + in the cpus node. + + ARM CPUs with the FEAT_HPDS2 feature allow an IMPLEMENTATION DEFINED + hardware attribute to be encoded in the leaf page table entries and + sent with any transaction that makes an access via that entry. + + Four bits are used in the page-tables. It is likely the individual bits will + be combined and used a a four bit value. The impact of any particular value is + up to the implementation. + + 0 is defined as a 'safe default setting' that behaves as if the feature + were not implemented. Other values may be unsafe, having coherency or + correctness issues leading to data-corruption or deadlock. + + This binding lists the additional values that only have a performance cost + (or benefit), and values that can only be used if all mappings have the same + PBHA value. + For both cases, all affected values should be listed. If setting bit-2 + requires no aliases, then the values 2, 4, 6 etc should be listed. + + A hypervisor can only control individual bits, and may choose to only enable + bits that can only be used to build other performance-only values. + e.g. the value 5 is listed, but enabling bit-0 and bit-2 would allow a guest + to configure the values 1 or 4 too. If these 'decomposed' values only + affected performance, they should also be listed. + + The list does not need to be in numeric order, but a hypervisor may make use + of the order when enabling bits. + + The presence of a 'arm,pbha-no-aliases' property indicates that higher + exception levels and secure-world firmware do not have a mapping of any memory + in the memory node or UEFI memory map, other than those with a reserved-memory + entry or EFIReserved memory attribute. + Firmware mappings created based on requests from the normal world do not use + any of the arm,pbha-no-aliases values, or take the PBHA value to use as an + argument. + +properties: + $nodename: + const: cpus + + arm,pbha-performance-only: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: PBHA values that only affect performance + minItems: 1 + maxItems: 15 + items: + maximum: 15 + + arm,pbha-no-aliases: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: PBHA values that must only be used if all mappings have the + same value. + minItems: 1 + maxItems: 15 + items: + maximum: 15 + + +additionalProperties: true + +examples: + -| + /{ + cpus { + arm,pbha-performance-only = /bits/ 8 <0x01 0x05 0x09>; + arm,pbha-no-aliases = /bits/ 8 <0x02 0x04 0x06 0x08>; + + cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + ... + }; + + }; + }; +...
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
If the CPUs support HPDS2, and there is a DT description of PBHA values that only affect performance, enable those bits for both TTBR0 and TTBR1.
Enabling PBHA requires the hierarchical-permissions to be disabled. Commit 87143f404f33 ("arm64: mm: use XN table mapping attributes for the linear region") used these, but only as an optimisation.
Only the necessary PBHA bits are enabled to reduce the risk of an unsafe bit/value being used by accident.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/Kconfig | 13 +++++ arch/arm64/include/asm/cpucaps.h | 1 + arch/arm64/include/asm/pgtable-hwdef.h | 4 ++ arch/arm64/kernel/cpufeature.c | 81 ++++++++++++++++++++++++++ 4 files changed, 99 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 143c2209368d..48bd8258bd34 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1691,6 +1691,19 @@ config ARM64_CNP at runtime, and does not affect PEs that do not implement this feature.
+config ARM64_PBHA + bool "Enable support for Page Based Hardware Attributes (PBHA)" + default n + help + Page Based Hardware Attributes (PBHA) allow the SoC hardware to + change behaviour depending on which mapping was used to access + a page of memory. e.g. access via one mapping may always result + in the data being cached, whereas using another mapping of the same + physical memory. + + The behaviour of each PBHA bit is not defined. Say no unless you + are very sure you want this + endmenu
menu "ARMv8.3 architectural features" diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index f7e15076a12c..5afb9b1e10e8 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -77,6 +77,7 @@ #define ARM64_WORKAROUND_HISILICON_ERRATUM_162100125 69 #define ARM64_HAS_LDAPR 70 #define ARM64_HAS_PBHA 71 +#define ARM64_HAS_PBHA_STAGE1 72
#define ARM64_NCAPS 80
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 01a96d07ae74..bb80c1f0405f 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -260,6 +260,10 @@ #define TCR_TBI1 (UL(1) << 38) #define TCR_HA (UL(1) << 39) #define TCR_HD (UL(1) << 40) +#define TCR_HPD0 (UL(1) << 41) +#define TCR_HPD1 (UL(1) << 42) +#define TCR_HWU0nn_MASK (UL(0xf) << 43) +#define TCR_HWU1nn_MASK (UL(0xf) << 47) #define TCR_NFD0 (UL(1) << 53) #define TCR_NFD1 (UL(1) << 54) #define TCR_E0PD0 (UL(1) << 55) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 6590ea28add3..a55040cddab9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -70,6 +70,7 @@ #include <linux/stop_machine.h> #include <linux/types.h> #include <linux/mm.h> +#include <linux/of.h> #include <linux/cpu.h>
#include <asm/cpu.h> @@ -110,6 +111,8 @@ EXPORT_SYMBOL(arm64_use_ng_mappings);
DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
+unsigned long __ro_after_init arm64_pbha_perf_only_values; + /* * Flag to indicate if we have computed the system wide * capabilities based on the boot time active CPUs. This @@ -1574,6 +1577,71 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
#endif
+#ifdef CONFIG_ARM64_PBHA +static u8 pbha_stage1_enable_bits; + +static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, + int scope) +{ + u8 val; + struct device_node *cpus; + const u8 *perf_only_vals; + int num_perf_only_vals, i; + + if (!has_cpuid_feature(cap, scope)) + return false; + + /* + * Calls with scope == SCOPE_LOCAL_CPU need only testing whether this + * cpu has the feature. A later 'system' scope call will check for a + * firmware description. + */ + if (scope == SCOPE_LOCAL_CPU) + return true; + + cpus = of_find_node_by_path("/cpus"); + if (!cpus) + goto done; + + perf_only_vals = of_get_property(cpus, "arm,pbha-performance-only", + &num_perf_only_vals); + if (!perf_only_vals) + goto done; + + /* any listed value is usable at stage 1 */ + for (i = 0 ; i < num_perf_only_vals; i++) { + val = perf_only_vals[i]; + if (val > 0xf) + continue; + + pbha_stage1_enable_bits |= val; + set_bit(val, &arm64_pbha_perf_only_values); + } + +done: + of_node_put(cpus); + + return !!pbha_stage1_enable_bits; +} + +static void cpu_enable_pbha(struct arm64_cpu_capabilities const *cap) +{ + u64 tcr; + + if (!pbha_stage1_enable_bits) + return; + + tcr = read_sysreg(tcr_el1); + tcr |= FIELD_PREP(TCR_HWU0nn_MASK, pbha_stage1_enable_bits); + tcr |= FIELD_PREP(TCR_HWU1nn_MASK, pbha_stage1_enable_bits); + tcr |= FIELD_PREP(TCR_HPD0, 1) | FIELD_PREP(TCR_HPD1, 1); + + write_sysreg(tcr, tcr_el1); + isb(); + local_flush_tlb_all(); +} +#endif /* CONFIG_ARM64_PBHA */ + #ifdef CONFIG_ARM64_AMU_EXTN
/* @@ -2304,6 +2372,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, .min_field_value = 2, }, +#ifdef CONFIG_ARM64_PBHA + { + .desc = "Page Based Hardware Attributes (PBHA)", + .capability = ARM64_HAS_PBHA_STAGE1, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .sys_reg = SYS_ID_AA64MMFR1_EL1, + .sign = FTR_UNSIGNED, + .field_pos = ID_AA64MMFR1_HPD_SHIFT, + .matches = plat_can_use_pbha_stage1, + .min_field_value = 2, + .cpu_enable = cpu_enable_pbha, + }, +#endif {}, };
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
Add a pgprot_pbha() helper that modifies a pgprot_t to include a pbha value. The value is checked against those that were listed as only affecting performance.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/include/asm/pgtable.h | 12 ++++++++++++ arch/arm64/kernel/cpufeature.c | 1 + 3 files changed, 14 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index bb80c1f0405f..3e332d47e889 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -146,6 +146,7 @@ #define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ +#define PTE_PBHA_MASK (_AT(pteval_t, 0xf) << 59) /* Page Base Hardware Attributes */
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) #ifdef CONFIG_ARM64_PA_BITS_52 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f2843785f2ec..396627467238 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -515,6 +515,18 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd) __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
+ +extern unsigned long arm64_pbha_perf_only_values; +static inline unsigned long __pbha_check_perf_only(unsigned long pbha_val) +{ + if (test_bit(pbha_val, &arm64_pbha_perf_only_values)) + return FIELD_PREP(PTE_PBHA_MASK, pbha_val); + return 0; +} + +#define pgprot_pbha(prot, pbha_val) \ + __pgprot_modify(prot, PTE_PBHA_MASK, __pbha_check_perf_only(pbha_val)) + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index a55040cddab9..ac794c12c72b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -112,6 +112,7 @@ EXPORT_SYMBOL(arm64_use_ng_mappings); DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
unsigned long __ro_after_init arm64_pbha_perf_only_values; +EXPORT_SYMBOL(arm64_pbha_perf_only_values);
/* * Flag to indicate if we have computed the system wide
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
There are two conflicting use-cases for PBHA at stage2. We could copy the stage1 PBHA bits to stage2, this would ensure the VMMs memory is exactly reproduced for the guest, including the PBHA bits. The problem here is how the VMM's memory is allocated with the PBHA bits set.
The other is allowing the guest to configure PBHA directly. This would allow guest device drivers to map memory with the appropriate PBHA bits. This would only be safe if the guest can be trusted to only generate PBHA values that only affect performance.
The arm-arm doesn't describe how the stage1 and stage2 bits are combined. Arm's implementations appear to all have the same behaviour, according to the TRM: stage2 wins.
For these CPUs, we can allow a guest to use a PBHA bit by disabling it in VTCR_EL2. We just need to know which bits...
The DT describes the values that only affect performance, but if value-5 is safe for use, we can't prevent the guest from using value-1 and value-4. These 'decomposed' values would also need to be listed as only affecting performance.
Add a cpufeature for CPUs that have this 'stage2 wins' behaviour. Decompose each performance-only value (5 -> 5, 4, 1), and check each of these values is listed as only affecting performance. If so, the bits of the original value (5) can be used by the guest at stage1. (by clearing the bits from VTCR_EL2)
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/cpucaps.h | 1 + arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/kernel/cpufeature.c | 105 ++++++++++++++++++++++++++++ arch/arm64/kernel/image-vars.h | 3 + arch/arm64/kvm/reset.c | 8 ++- 5 files changed, 116 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 5afb9b1e10e8..ae9e88c28d43 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -78,6 +78,7 @@ #define ARM64_HAS_LDAPR 70 #define ARM64_HAS_PBHA 71 #define ARM64_HAS_PBHA_STAGE1 72 +#define ARM64_HAS_PBHA_STAGE2 73
#define ARM64_NCAPS 80
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cce2f74ae618..d6227f23d2ef 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -78,6 +78,7 @@ struct arm64_ftr_reg { };
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0; +extern unsigned long arm64_pbha_stage2_safe_bits;
int arm64_cpu_ftr_regs_traverse(int (*op)(u32, u64, void *), void *argp);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index ac794c12c72b..4a0a46981db9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -76,6 +76,7 @@ #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> +#include <asm/cputype.h> #include <asm/fpsimd.h> #include <asm/hwcap.h> #include <asm/mmu_context.h> @@ -113,6 +114,7 @@ DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
unsigned long __ro_after_init arm64_pbha_perf_only_values; EXPORT_SYMBOL(arm64_pbha_perf_only_values); +unsigned long __ro_after_init arm64_pbha_stage2_safe_bits;
/* * Flag to indicate if we have computed the system wide @@ -1578,13 +1580,50 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
#endif
+ #ifdef CONFIG_ARM64_PBHA static u8 pbha_stage1_enable_bits; +static DEFINE_SPINLOCK(pbha_dt_lock); + +/* For the value 5, return a bitmap with bits 5, 4, and 1 set. */ +static unsigned long decompose_pbha_values(u8 val) +{ + int i; + unsigned long mask = 0; + + for (i = 1; i <= 15; i++) { + if ((i & val) == i) + set_bit(i, &mask); + } + + return mask; +} + +/* + * The bits of a value are safe if all values that can be built from those + * enabled bits are listed as only affecting performance. + * e.g. 5 would also need 1 and 4 to be listed. + * + * When there is a conflict with the bits already enabled, the new value is + * skipped. + * e.g. if 5 already caused bit-0 and bit-2 to be enabled, adding 3 to the list + * would need to test 7 as bit-2 is already enabled. If 7 is not listed, 3 is + * skipped and bit-1 is not enabled. + */ +static void stage2_test_pbha_value(u8 val) +{ + unsigned long mask; + + mask = decompose_pbha_values(val | arm64_pbha_stage2_safe_bits); + if ((arm64_pbha_perf_only_values & mask) == mask) + arm64_pbha_stage2_safe_bits |= val; +}
static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, int scope) { u8 val; + static bool dt_check_done; struct device_node *cpus; const u8 *perf_only_vals; int num_perf_only_vals, i; @@ -1600,6 +1639,10 @@ static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, if (scope == SCOPE_LOCAL_CPU) return true;
+ spin_lock(&pbha_dt_lock); + if (dt_check_done) + goto out_unlock; + cpus = of_find_node_by_path("/cpus"); if (!cpus) goto done; @@ -1619,9 +1662,24 @@ static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, set_bit(val, &arm64_pbha_perf_only_values); }
+ /* + * for stage2 the values are collapsed back to 4 bits that can only + * enable values in the arm64_pbha_perf_only_values mask. + */ + for (i = 0 ; i < num_perf_only_vals; i++) { + val = perf_only_vals[i]; + if (val > 0xf) + continue; + + stage2_test_pbha_value(val); + } + done: of_node_put(cpus); + dt_check_done = true;
+out_unlock: + spin_unlock(&pbha_dt_lock); return !!pbha_stage1_enable_bits; }
@@ -1641,6 +1699,47 @@ static void cpu_enable_pbha(struct arm64_cpu_capabilities const *cap) isb(); local_flush_tlb_all(); } + +/* + * PBHA's behaviour is implementation defined, as is the way it combines + * stage1 and stage2 attributes. If the kernel has KVM supported, and booted + * at EL2, only these CPUs can allow PBHA in a guest, as KVM knows how the PBHA + * bits are combined. This prevents the host being affected by some + * implementation defined behaviour from the guest. + * + * The TRM for these CPUs describe stage2 as overriding stage1. + */ +static const struct midr_range pbha_stage2_wins[] = { + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + {}, +}; + +static bool plat_can_use_pbha_stage2(const struct arm64_cpu_capabilities *cap, + int scope) +{ + /* Booted at EL2? */ + if (!is_hyp_mode_available() && !is_kernel_in_hyp_mode()) + return false; + + if (!is_midr_in_range_list(read_cpuid_id(), cap->midr_range_list)) + return false; + + /* + * Calls with scope == SCOPE_LOCAL_CPU need only testing whether this + * cpu has the feature. A later 'system' scope call will check for a + * firmware description. + */ + if (scope == SCOPE_LOCAL_CPU) + return true; + + if (!__system_matches_cap(ARM64_HAS_PBHA_STAGE1)) + return false; + + return !!arm64_pbha_stage2_safe_bits; +} #endif /* CONFIG_ARM64_PBHA */
#ifdef CONFIG_ARM64_AMU_EXTN @@ -2385,6 +2484,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .min_field_value = 2, .cpu_enable = cpu_enable_pbha, }, + { + .capability = ARM64_HAS_PBHA_STAGE2, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = plat_can_use_pbha_stage2, + .midr_range_list = pbha_stage2_wins, + }, #endif {}, }; diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 8d3fede89ae1..3a68772a63fb 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -95,6 +95,9 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities); KVM_NVHE_ALIAS(__start___kvm_ex_table); KVM_NVHE_ALIAS(__stop___kvm_ex_table);
+/* PBHA bits for stage2 */ +KVM_NVHE_ALIAS(arm64_pbha_stage2_safe_bits); + #endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index d151da73f89e..225cf4ffa108 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -431,7 +431,7 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) { u64 vtcr = VTCR_EL2_FLAGS, mmfr0; u32 parange, phys_shift; - u8 lvls; + u8 lvls, pbha = 0xf;
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) return -EINVAL; @@ -479,9 +479,13 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) * value will always be 0, which is defined as the safe default * setting. On Cortex cores, enabling PBHA for stage2 effectively * disables it for stage1. + * When the HAS_PBHA_STAGE2 feature is supported, clear the 'safe' + * bits to allow the guest's stage1 to use these bits. */ + if (cpus_have_final_cap(ARM64_HAS_PBHA_STAGE2)) + pbha = pbha ^ arm64_pbha_stage2_safe_bits; if (cpus_have_final_cap(ARM64_HAS_PBHA)) - vtcr |= FIELD_PREP(VTCR_EL2_PBHA_MASK, 0xf); + vtcr |= FIELD_PREP(VTCR_EL2_PBHA_MASK, pbha);
/* Set the vmid bits */ vtcr |= (kvm_get_vmid_bits() == 16) ?
From: James Morse james.morse@arm.com
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
Reference: https://lore.kernel.org/all/20211015161416.2196-1-james.morse@arm.com/t/#u
--------------------------------
PBHA isn't defined by the Arm CPU architecture, so may have surprising side-effects.
Document what is, and what is not supported. List the arch code's expectations regarding how PBHA behaves.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- Documentation/arm64/index.rst | 1 + Documentation/arm64/pbha.rst | 85 +++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 Documentation/arm64/pbha.rst
diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst index 937634c49979..b65de0107c97 100644 --- a/Documentation/arm64/index.rst +++ b/Documentation/arm64/index.rst @@ -17,6 +17,7 @@ ARM64 Architecture legacy_instructions memory memory-tagging-extension + pbha perf pointer-authentication silicon-errata diff --git a/Documentation/arm64/pbha.rst b/Documentation/arm64/pbha.rst new file mode 100644 index 000000000000..d8a3df8982a5 --- /dev/null +++ b/Documentation/arm64/pbha.rst @@ -0,0 +1,85 @@ +======================================================= +Page Based Hardware Attribute support for AArch64 Linux +======================================================= + +Page Based Hardware Attributes (PBHA) allow the OS to trigger IMPLEMENTATION +DEFINED behaviour associated with a memory access. For example, this may be +taken as a hint to a System Cache whether it should cache the location that +has been accessed. + +PBHA consists of four bits in the leaf page table entries for a virtual +address, that are sent with any memory access via that virtual address. + +IMPLEMENTATION DEFINED behaviour is not specified by the arm-arm, meaning +it varies between SoCs. There may be unexpected side effects when PBHA +bits are used or combined. +For example, a PBHA bit may be taken as a hint to the Memory Controller that +it should encrypt/decrypt the memory in DRAM. If the CPU has multiple virtual +aliases of the address, accesses that are made without this PBHA bit set may +cause corruption. + + +Use by virtual machines using KVM +--------------------------------- + +KVM allows an OS in a virtual machine to configure its own page tables. A +virtual machine can also configure PBHA bits in its page tables. To prevent +side effects that could affect the hypervisor, KVM will only allow +combinations of PBHA bits that only affect performance. Values that cause +changes to the data are forbidden as the Hypervisor and VMM have aliases of +the guest memory, and may swap it to/from disk. + +The list of bits to allow is built from the firmware list of PBHA bit +combinations that only affect performance. Because the guest can choose +not to set all the bits in a value, (e.g. allowing 5 implicitly allows 1 +and 4), the values supported may differ between a host and guest. + +PBHA is only supported for a guest if KVM supports the mechanism the CPU uses +to combine the values from stage1 and stage2 translation. The mechanism is not +advertised, so which mechanism each CPU uses must also be known by the kernel. + + +Use by device drivers +--------------------- + +Device drivers should discover the PBHA value to use for a mapping from the +device's firmware description as these will vary between SoCs. If the value +is also listed by firmware as only affecting performance, it can be added to +the pgprot with pgprot_pbha(). + +Values that require all other aliases to be removed are not supported. + + +Linux's expectations around PBHA +-------------------------------- + +'IMPLEMENTATION DEFINED' describes a huge range of possible behaviours. +Linux expects PBHA to behave in the same way as the read/write allocate hints +for a memory type. Below is an incomplete list of expectations: + + * PBHA values have the same meaning for all CPUs in the SoC. + * Use of the PBHA value does not cause mismatched type, shareability or + cacheability, it does not take precedence over the stage2 attributes, or + HCR_EL2 controls. + * If a PBHA value requires all other aliases to be removed, higher exception + levels do not have a concurrent alias. (This includes Secure World). + * Break before make is sufficient when changing the PBHA value. + * PBHA values used by a page can be changed independently without further side + effects. + * Save/restoring the page contents via a PBHA=0 mapping does not corrupt the + values once a non-zero PBHA mapping is re-created. + * The hypervisor may clean+invalidate to the PoC via a PBHA=0 mapping prior to + save/restore to cleanup mismatched attributes. This does not corrupt the + values after save/restore once a non-zero PBHA mapping is re-created. + * Cache maintenance via a PBHA=0 mapping to prevent stale data being visible + when mismatched attributes occur is sufficient even if the subsequent + mapping has a non-zero PBHA value. + * The OS/hypervisor can clean-up a page by removing all non-zero PBHA mappings, + then writing new data via PBHA=0 mapping of the same type, shareability and + cacheability. After this, only the new data is visible for data accesses. + * For instruction-fetch, the same maintenance as would be performed against a + PBHA=0 page is sufficient. (which with DIC+IDC, may be none at all). + * The behaviour enabled by PBHA should not depend on the size of the access, or + whether other SoC hardware under the control of the OS is enabled and + configured. + * EL2 is able to at least force stage1 PBHA bits to zero.
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H CVE: NA
--------------------------------
Now PBHA can be enabled via FDT table at early startup.
Here is an example for item in FDT table:
cpus { arm,pbha-performance-only = <0x01000000 0x00000000 0x00000000 0x00000000>; };
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/cpufeature.h | 14 +++++ arch/arm64/kernel/cpufeature.c | 85 ++++++++++++++++++++++++----- arch/arm64/kernel/head.S | 3 + 3 files changed, 89 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index d6227f23d2ef..711f64fa1aa0 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -768,6 +768,20 @@ static inline bool system_supports_tlb_range(void) cpus_have_const_cap(ARM64_HAS_TLB_RANGE); }
+#ifdef CONFIG_ARM64_PBHA +extern bool pbha_enabled; + +static inline bool system_supports_pbha(void) +{ + return pbha_enabled; +} +#else +static inline bool system_supports_pbha(void) +{ + return false; +} +#endif + extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4a0a46981db9..b7d7da84df98 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -72,6 +72,8 @@ #include <linux/mm.h> #include <linux/of.h> #include <linux/cpu.h> +#include <linux/init.h> +#include <linux/libfdt.h>
#include <asm/cpu.h> #include <asm/cpufeature.h> @@ -86,6 +88,7 @@ #include <asm/traps.h> #include <asm/vectors.h> #include <asm/virt.h> +#include <asm/setup.h>
/* Kernel representation of AT_HWCAP and AT_HWCAP2 */ static unsigned long elf_hwcap __read_mostly; @@ -1585,6 +1588,8 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap, static u8 pbha_stage1_enable_bits; static DEFINE_SPINLOCK(pbha_dt_lock);
+bool pbha_enabled; + /* For the value 5, return a bitmap with bits 5, 4, and 1 set. */ static unsigned long decompose_pbha_values(u8 val) { @@ -1619,11 +1624,26 @@ static void stage2_test_pbha_value(u8 val) arm64_pbha_stage2_safe_bits |= val; }
+void update_pbha_perf_only_bit(const u8 *bits, int cnt) +{ + u8 val; + int i; + + /* any listed value is usable at stage 1 */ + for (i = 0 ; i < cnt; i++) { + val = bits[i]; + if (val > 0xf) + continue; + + pbha_stage1_enable_bits |= val; + set_bit(val, &arm64_pbha_perf_only_values); + } +} + static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, int scope) { u8 val; - static bool dt_check_done; struct device_node *cpus; const u8 *perf_only_vals; int num_perf_only_vals, i; @@ -1640,7 +1660,7 @@ static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, return true;
spin_lock(&pbha_dt_lock); - if (dt_check_done) + if (pbha_enabled) goto out_unlock;
cpus = of_find_node_by_path("/cpus"); @@ -1652,15 +1672,7 @@ static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap, if (!perf_only_vals) goto done;
- /* any listed value is usable at stage 1 */ - for (i = 0 ; i < num_perf_only_vals; i++) { - val = perf_only_vals[i]; - if (val > 0xf) - continue; - - pbha_stage1_enable_bits |= val; - set_bit(val, &arm64_pbha_perf_only_values); - } + update_pbha_perf_only_bit(perf_only_vals, num_perf_only_vals);
/* * for stage2 the values are collapsed back to 4 bits that can only @@ -1676,14 +1688,14 @@ static bool plat_can_use_pbha_stage1(const struct arm64_cpu_capabilities *cap,
done: of_node_put(cpus); - dt_check_done = true; + pbha_enabled = true;
out_unlock: spin_unlock(&pbha_dt_lock); return !!pbha_stage1_enable_bits; }
-static void cpu_enable_pbha(struct arm64_cpu_capabilities const *cap) +static void enable_pbha_inner(void) { u64 tcr;
@@ -1700,6 +1712,53 @@ static void cpu_enable_pbha(struct arm64_cpu_capabilities const *cap) local_flush_tlb_all(); }
+static void cpu_enable_pbha(struct arm64_cpu_capabilities const *cap) +{ + enable_pbha_inner(); +} + +static inline bool cpu_has_pbha(void) +{ + u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + int val = cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_HPD_SHIFT); + + return val == 2; +} + +void __init early_pbha_init(void) +{ + void *fdt; + int node; + const u8 *prop; + int size; + + spin_lock(&pbha_dt_lock); + + fdt = get_early_fdt_ptr(); + if (!fdt) + goto unlock; + + node = fdt_path_offset(fdt, "/cpus"); + if (node < 0) + goto unlock; + + prop = fdt_getprop(fdt, node, "arm,pbha-performance-only", &size); + if (!prop) + goto unlock; + + if (!cpu_has_pbha()) + goto unlock; + + update_pbha_perf_only_bit(prop, size); + enable_pbha_inner(); + + pbha_enabled = true; + +unlock: + spin_unlock(&pbha_dt_lock); +} + /* * PBHA's behaviour is implementation defined, as is the way it combines * stage1 and stage2 attributes. If the kernel has KVM supported, and booted diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index b65f95f25ec8..5bc343fc2a91 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -454,6 +454,9 @@ SYM_FUNC_START_LOCAL(__primary_switched) #endif mov x0, x21 // pass FDT address in x0 bl early_fdt_map // Try mapping the FDT early +#ifdef CONFIG_ARM64_PBHA + bl early_pbha_init // Init PBHA early via FDT +#endif #ifdef CONFIG_RANDOMIZE_BASE tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? b.ne 0f
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Enable PBHA bit0 via FDT table at startup. PBHA bit 0 is currently needed initialized.
Here is an example to enable PBHA bit0 in FDT table:
cpus { arm,pbha-performance-only = <0x01000000 0x00000000 0x00000000 0x00000000>; }; chosen { linux,pbha-bit0 = [0/1]; };
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/Kconfig | 6 ++++ arch/arm64/kernel/cpufeature.c | 3 ++ drivers/firmware/efi/libstub/fdt.c | 52 ++++++++++++++++++++++++++++++ drivers/soc/hisilicon/Makefile | 1 + drivers/soc/hisilicon/pbha.c | 42 ++++++++++++++++++++++++ include/linux/pbha.h | 30 +++++++++++++++++ 6 files changed, 134 insertions(+) create mode 100644 drivers/soc/hisilicon/pbha.c create mode 100644 include/linux/pbha.h
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 48bd8258bd34..e450ccac2664 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1704,6 +1704,12 @@ config ARM64_PBHA The behaviour of each PBHA bit is not defined. Say no unless you are very sure you want this
+ For PBHA_BIT0: It has the following features: + a) kernel pte entry will be set PBHA 59 bit during pagetable startup + b) Introduce VM_PBHA_BIT0 for feature use. + c) User pte entry will be set PBHA 59 bit for vma with VM_PBHA_BIT0. + d) Introduce /proc/<pid>/pbha_bit0 to update whole user task. + endmenu
menu "ARMv8.3 architectural features" diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index b7d7da84df98..97744503704d 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -74,6 +74,7 @@ #include <linux/cpu.h> #include <linux/init.h> #include <linux/libfdt.h> +#include <linux/pbha.h>
#include <asm/cpu.h> #include <asm/cpufeature.h> @@ -1755,6 +1756,8 @@ void __init early_pbha_init(void)
pbha_enabled = true;
+ early_pbha_bit0_init(); + unlock: spin_unlock(&pbha_dt_lock); } diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index d48b0de05b62..d212bd2ad418 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -9,6 +9,7 @@
#include <linux/efi.h> #include <linux/libfdt.h> +#include <linux/pbha.h> #include <asm/efi.h>
#include "efistub.h" @@ -27,6 +28,54 @@ static void fdt_update_cell_size(void *fdt) fdt_setprop_u32(fdt, offset, "#size-cells", EFI_DT_SIZE_CELLS_DEFAULT); }
+#ifdef CONFIG_ARM64_PBHA +static efi_status_t fdt_init_hbm_mode(void *fdt, int node) +{ + efi_guid_t oem_config_guid = EFI_OEMCONFIG_VARIABLE_GUID; + unsigned long size; + efi_status_t efi_status; + u8 hbm_mode; + int status; + u8 fdt_val32; + u8 arr[16] = { 0x1 }; + + efi_status = get_efi_var(L"HBMMode", &oem_config_guid, NULL, &size, + &hbm_mode); + if (efi_status != EFI_SUCCESS) + goto out; + + if (hbm_mode != HBM_MODE_CACHE) + goto out; + + fdt_val32 = 1; + status = fdt_setprop_var(fdt, node, "linux,pbha-bit0", fdt_val32); + if (status) + return EFI_LOAD_ERROR; + + node = fdt_subnode_offset(fdt, 0, "cpus"); + if (node < 0) { + node = fdt_add_subnode(fdt, 0, "cpus"); + if (node < 0) + return EFI_LOAD_ERROR; + } + + /* Current PBHA bit59 is need to enable PBHA bit0 mode. */ + status = fdt_setprop_var(fdt, node, "arm,pbha-performance-only", arr); + if (status) { + efi_err("PBHA: arm,pbha-performance-only failed\n"); + return EFI_LOAD_ERROR; + } + +out: + return EFI_SUCCESS; +} +#else +static inline efi_status_t fdt_init_hbm_mode(void *fdt, int node) +{ + return EFI_SUCCESS; +} +#endif + static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, void *fdt, int new_fdt_size, char *cmdline_ptr, u64 initrd_addr, u64 initrd_size) @@ -148,6 +197,9 @@ static efi_status_t update_fdt(void *orig_fdt, unsigned long orig_fdt_size, } }
+ if (fdt_init_hbm_mode(fdt, node) != EFI_SUCCESS) + goto fdt_set_fail; + /* Shrink the FDT back to its minimum size: */ fdt_pack(fdt);
diff --git a/drivers/soc/hisilicon/Makefile b/drivers/soc/hisilicon/Makefile index cbc01ad5b8fd..e0f966d1ed6d 100644 --- a/drivers/soc/hisilicon/Makefile +++ b/drivers/soc/hisilicon/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_KUNPENG_HCCS) += kunpeng_hccs.o
obj-$(CONFIG_HISI_HBMDEV) += hisi_hbmdev.o obj-$(CONFIG_HISI_HBMCACHE) += hisi_hbmcache.o +obj-$(CONFIG_ARM64_PBHA) += pbha.o diff --git a/drivers/soc/hisilicon/pbha.c b/drivers/soc/hisilicon/pbha.c new file mode 100644 index 000000000000..95bf9de0e9e9 --- /dev/null +++ b/drivers/soc/hisilicon/pbha.c @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved. + */ + +#define pr_fmt(fmt) "pbha: " fmt + +#include <linux/init.h> +#include <linux/libfdt.h> +#include <linux/printk.h> +#include <linux/cpufeature.h> + +#include <asm/setup.h> + +#define HBM_MODE_CACHE 1 + +bool __ro_after_init pbha_bit0_enabled; + +void __init early_pbha_bit0_init(void) +{ + const u8 *prop; + void *fdt; + int node; + + /* Check whether PBHA is enabled or not. */ + if (!system_supports_pbha()) + return; + + fdt = get_early_fdt_ptr(); + if (!fdt) + return; + + node = fdt_path_offset(fdt, "/chosen"); + if (node < 0) + return; + + prop = fdt_getprop(fdt, node, "linux,pbha-bit0", NULL); + if (!prop) + return; + if (*prop == HBM_MODE_CACHE) + pbha_bit0_enabled = true; +} diff --git a/include/linux/pbha.h b/include/linux/pbha.h new file mode 100644 index 000000000000..45261d163b1b --- /dev/null +++ b/include/linux/pbha.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) Huawei Technologies Co., Ltd. 2023. All rights reserved. + */ +#ifndef __LINUX_PBHA_H +#define __LINUX_PBHA_H + +#include <linux/efi.h> +#include <linux/libfdt.h> + +#define EFI_OEMCONFIG_VARIABLE_GUID \ + EFI_GUID(0x21f3b3c5, 0x946d, 0x41c1, 0x83, 0x8c, 0x19, 0x4e, 0x48, \ + 0xaa, 0x41, 0xe2) + +#define HBM_MODE_MEMORY 0 +#define HBM_MODE_CACHE 1 + +#ifdef CONFIG_ARM64_PBHA +extern bool __ro_after_init pbha_bit0_enabled; +extern void __init early_pbha_bit0_init(void); + +static inline bool system_support_pbha_bit0(void) +{ + return pbha_bit0_enabled; +} +#else +static inline bool system_support_pbha_bit0(void) { return false; } +#endif + +#endif
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Update kernel pte entries if pbha bit0 enabled. This can be used as a hit for kernel page entry for MMU.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable-hwdef.h | 1 + arch/arm64/mm/mmu.c | 14 +++++++++++++- include/linux/pbha.h | 13 ++++++++++++- mm/vmalloc.c | 5 +++++ 4 files changed, 31 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 3e332d47e889..a5cff5b376f6 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -147,6 +147,7 @@ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ #define PTE_PBHA_MASK (_AT(pteval_t, 0xf) << 59) /* Page Base Hardware Attributes */ +#define PTE_PBHA0 (_AT(pteval_t, 1) << 59) /* PBHA 59 bit */
#define PTE_ADDR_LOW (((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT) #ifdef CONFIG_ARM64_PA_BITS_52 diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 78b9e489d8f6..44c595550ae6 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -23,6 +23,7 @@ #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> +#include <linux/pbha.h>
#include <asm/barrier.h> #include <asm/cputype.h> @@ -126,6 +127,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) */ pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+#ifdef CONFIG_ARM64_PBHA + mask |= PTE_PBHA0; +#endif + /* creating or taking down mappings is always safe */ if (old == 0 || new == 0) return true; @@ -372,6 +377,8 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) return;
+ prot = pgprot_pbha_bit0(prot); + phys &= PAGE_MASK; addr = virt & PAGE_MASK; end = PAGE_ALIGN(virt + size); @@ -1152,6 +1159,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, { unsigned long addr = start; unsigned long next; + pgprot_t prot; pgd_t *pgdp; p4d_t *p4dp; pud_t *pudp; @@ -1180,7 +1188,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, if (!p) return -ENOMEM;
- pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); + prot = __pgprot(PROT_SECT_NORMAL); + prot = pgprot_pbha_bit0(prot); + + pmd_set_huge(pmdp, __pa(p), prot); } else vmemmap_verify((pte_t *)pmdp, node, addr, next); } while (addr = next, addr != end); @@ -1300,6 +1311,7 @@ void __set_fixmap(enum fixed_addresses idx, ptep = fixmap_pte(addr);
if (pgprot_val(flags)) { + flags = pgprot_pbha_bit0(flags); set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); } else { pte_clear(&init_mm, addr, ptep); diff --git a/include/linux/pbha.h b/include/linux/pbha.h index 45261d163b1b..8c5c79bfc42a 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -5,8 +5,10 @@ #ifndef __LINUX_PBHA_H #define __LINUX_PBHA_H
-#include <linux/efi.h> #include <linux/libfdt.h> +#include <linux/pgtable.h> + +#define PBHA_VAL_BIT0 1UL
#define EFI_OEMCONFIG_VARIABLE_GUID \ EFI_GUID(0x21f3b3c5, 0x946d, 0x41c1, 0x83, 0x8c, 0x19, 0x4e, 0x48, \ @@ -23,8 +25,17 @@ static inline bool system_support_pbha_bit0(void) { return pbha_bit0_enabled; } + +static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) +{ + if (!system_support_pbha_bit0()) + return prot; + + return pgprot_pbha(prot, PBHA_VAL_BIT0); +} #else static inline bool system_support_pbha_bit0(void) { return false; } +static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) { return prot; } #endif
#endif diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e27cd716ca95..caba5659d137 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -38,6 +38,7 @@ #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <linux/share_pool.h> +#include <linux/pbha.h> #include <asm/io.h> #include <asm/tlbflush.h> #include <asm/shmparam.h> @@ -307,6 +308,8 @@ int vmap_range(unsigned long addr, unsigned long end, { int err;
+ prot = pgprot_pbha_bit0(prot); + err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift); flush_cache_vmap(addr, end);
@@ -549,6 +552,8 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
WARN_ON(page_shift < PAGE_SHIFT);
+ prot = pgprot_pbha_bit0(prot); + if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) || page_shift == PAGE_SHIFT) return vmap_small_pages_range_noflush(addr, end, prot, pages);
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Show PBHA bit 59 as PBHA0 in ptdump to better debug kernel page entries.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/mm/ptdump.c | 5 +++++ 1 file changed, 5 insertions(+)
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 807dc634bbd2..000819979b96 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -151,6 +151,11 @@ static const struct prot_bits pte_bits[] = { .val = PTE_GP, .set = "GP", .clear = " ", + }, { + .mask = PTE_PBHA0, + .val = PTE_PBHA0, + .set = "PBHA0", + .clear = " ", }, { .mask = PTE_ATTRINDX_MASK, .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Introduce VM_PBHA_BIT0 to enable pbha bit0 for single vma.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/mman.h | 10 ++++++++++ arch/arm64/include/uapi/asm/mman.h | 1 + fs/proc/task_mmu.c | 3 +++ include/linux/mm.h | 6 ++++++ include/uapi/asm-generic/mman-common.h | 1 + 6 files changed, 22 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e450ccac2664..af3d833544ba 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1694,6 +1694,7 @@ config ARM64_CNP config ARM64_PBHA bool "Enable support for Page Based Hardware Attributes (PBHA)" default n + select ARCH_USES_HIGH_VMA_FLAGS help Page Based Hardware Attributes (PBHA) allow the SoC hardware to change behaviour depending on which mapping was used to access diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index e3e28f7daf62..3c96193953e9 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -5,6 +5,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <uapi/asm/mman.h> +#include <linux/pbha.h>
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey __always_unused) @@ -17,6 +18,9 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, if (system_supports_mte() && (prot & PROT_MTE)) ret |= VM_MTE;
+ if (system_support_pbha_bit0() && (prot & PROT_PBHA_BIT0)) + ret |= VM_PBHA_BIT0; + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) @@ -55,6 +59,9 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+ if (vm_flags & VM_PBHA_BIT0) + prot |= PROT_PBHA_BIT0; /* select PBHA BIT 0 for pbha */ + return __pgprot(prot); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) @@ -70,6 +77,9 @@ static inline bool arch_validate_prot(unsigned long prot, if (system_supports_mte()) supported |= PROT_MTE;
+ if (system_support_pbha_bit0()) + supported |= PROT_PBHA_BIT0; + return (prot & ~supported) == 0; } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) diff --git a/arch/arm64/include/uapi/asm/mman.h b/arch/arm64/include/uapi/asm/mman.h index 1e6482a838e1..af6ffde748f2 100644 --- a/arch/arm64/include/uapi/asm/mman.h +++ b/arch/arm64/include/uapi/asm/mman.h @@ -6,5 +6,6 @@
#define PROT_BTI 0x10 /* BTI guarded page */ #define PROT_MTE 0x20 /* Normal Tagged mapping */ +#define PROT_PBHA_BIT0 0x40 /* PBHA 59 bit */
#endif /* ! _UAPI__ASM_MMAN_H */ diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9182d0c6d22c..d54e0e3474cc 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -670,6 +670,9 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) [ilog2(VM_MTE)] = "mt", [ilog2(VM_MTE_ALLOWED)] = "", #endif +#ifdef CONFIG_ARM64_PBHA + [ilog2(VM_PBHA_BIT0)] = "p0", +#endif #ifdef CONFIG_ARCH_HAS_PKEYS /* These come out via ProtectionKey: */ [ilog2(VM_PKEY_BIT0)] = "", diff --git a/include/linux/mm.h b/include/linux/mm.h index 34400f909335..465a47afd2a9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -392,6 +392,12 @@ extern unsigned int kobjsize(const void *objp); # define VM_SHARE_POOL VM_NONE #endif
+#if defined(CONFIG_ARM64_PBHA) +# define VM_PBHA_BIT0 VM_HIGH_ARCH_2 /* Page Base Hardware Attributes 4 bit*/ +#else +# define VM_PBHA_BIT0 VM_NONE +#endif + #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 66c408ccc6c6..4d23e72a2603 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -13,6 +13,7 @@ #define PROT_SEM 0x8 /* page may be used for atomic ops */ /* 0x10 reserved for arch-specific use */ /* 0x20 reserved for arch-specific use */ +/* 0x40 reserved for arch-specific use */ #define PROT_NONE 0x0 /* page can not be accessed */ #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Set PBHA0 bit in pte entry for VM_PBHA_BIT0 during #PF.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable.h | 5 +++++ arch/arm64/mm/hugetlbpage.c | 2 ++ include/linux/pbha.h | 16 ++++++++++++++++ mm/memory.c | 4 ++++ 4 files changed, 27 insertions(+)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 396627467238..1ca5e427c603 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -527,6 +527,11 @@ static inline unsigned long __pbha_check_perf_only(unsigned long pbha_val) #define pgprot_pbha(prot, pbha_val) \ __pgprot_modify(prot, PTE_PBHA_MASK, __pbha_check_perf_only(pbha_val))
+static inline pte_t pte_mkpbha(pte_t pte, unsigned long pbha_val) +{ + return set_pte_bit(pte, __pgprot(__pbha_check_perf_only(pbha_val))); +} + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 99cd6e718408..4effa2dd0518 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -224,6 +224,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, */ WARN_ON(!pte_present(pte));
+ pte = maybe_mk_pbha_bit0(pte, find_vma(mm, addr)); + if (!pte_cont(pte)) { set_pte_at(mm, addr, ptep, pte); return; diff --git a/include/linux/pbha.h b/include/linux/pbha.h index 8c5c79bfc42a..b2b256696af3 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -5,6 +5,7 @@ #ifndef __LINUX_PBHA_H #define __LINUX_PBHA_H
+#include <linux/mm.h> #include <linux/libfdt.h> #include <linux/pgtable.h>
@@ -33,9 +34,24 @@ static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot)
return pgprot_pbha(prot, PBHA_VAL_BIT0); } + +static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) +{ + if (!system_support_pbha_bit0()) + return pte; + + if (vma->vm_flags & VM_PBHA_BIT0) + pte = pte_mkpbha(pte, PBHA_VAL_BIT0); + + return pte; +} #else static inline bool system_support_pbha_bit0(void) { return false; } static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) { return prot; } +static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) +{ + return pte; +} #endif
#endif diff --git a/mm/memory.c b/mm/memory.c index 5893c178251a..55d4375d4b27 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -74,6 +74,7 @@ #include <linux/ptrace.h> #include <linux/vmalloc.h> #include <linux/userswap.h> +#include <linux/pbha.h>
#include <trace/events/kmem.h>
@@ -2969,6 +2970,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) entry = mk_pte(new_page, vma->vm_page_prot); entry = pte_sw_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); + entry = maybe_mk_pbha_bit0(entry, vma); /* * Clear the pte entry and flush it first, before updating the * pte with the new entry. This will avoid a race condition @@ -3709,6 +3711,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) __SetPageUptodate(page);
entry = mk_pte(page, vma->vm_page_prot); + entry = maybe_mk_pbha_bit0(entry, vma); entry = pte_sw_mkyoung(entry); if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); @@ -4016,6 +4019,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page) inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page, false); } + entry = maybe_mk_pbha_bit0(entry, vma); set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
/* no need to invalidate: a not-present page won't be cached */
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Introduce /proc/<pid>/pbha_bit0 to update PBHA bit0.
Value 0/1 can be accepted by this procfs: - 0: iter all vmas of this task, clear VM_PBHA_BIT0 for all vmas. New vma will stop bring VM_PBHA_BIT0 if exists. clear PTE 59 bit for all pte entries. - 1: iter all vmas of this task, set VM_PBHA_BIT0 for all vmas. New vma will bring VM_PBHA_BIT0 by default. set PTE 59 bit for all pte entries.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/include/asm/pgtable.h | 9 ++ drivers/soc/hisilicon/pbha.c | 142 +++++++++++++++++++++++++++++++ fs/proc/base.c | 103 ++++++++++++++++++++++ include/linux/mm.h | 2 +- include/linux/pbha.h | 3 + 5 files changed, 258 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 1ca5e427c603..1999bda3be61 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -532,6 +532,15 @@ static inline pte_t pte_mkpbha(pte_t pte, unsigned long pbha_val) return set_pte_bit(pte, __pgprot(__pbha_check_perf_only(pbha_val))); }
+#define pmd_mkpbha(pmd, pbha_val) pte_pmd(pte_mkpbha(pmd_pte(pmd), pbha_val)) + +static inline pte_t pte_rmpbha(pte_t pte, unsigned long pbha_val) +{ + return clear_pte_bit(pte, __pgprot(__pbha_check_perf_only(pbha_val))); +} + +#define pmd_rmpbha(pmd, pbha_val) pte_pmd(pte_rmpbha(pmd_pte(pmd), pbha_val)) + #define __HAVE_PHYS_MEM_ACCESS_PROT struct file; extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, diff --git a/drivers/soc/hisilicon/pbha.c b/drivers/soc/hisilicon/pbha.c index 95bf9de0e9e9..d26f2c32cecb 100644 --- a/drivers/soc/hisilicon/pbha.c +++ b/drivers/soc/hisilicon/pbha.c @@ -9,6 +9,9 @@ #include <linux/libfdt.h> #include <linux/printk.h> #include <linux/cpufeature.h> +#include <linux/mmu_notifier.h> +#include <linux/pagewalk.h> +#include <linux/pbha.h>
#include <asm/setup.h>
@@ -40,3 +43,142 @@ void __init early_pbha_bit0_init(void) if (*prop == HBM_MODE_CACHE) pbha_bit0_enabled = true; } + +#define pte_pbha_bit0(pte) \ + (!!(pte_val(pte) & (PBHA_VAL_BIT0 << PBHA_BITS_SHIFT))) + +enum { + CLEAR_PBHA_BIT0_FLAG, + SET_PBHA_BIT0_FLAG, +}; + +static inline void pbha_bit0_update_pte_bits(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte, bool set) +{ + pte_t ptent = *pte; + + if (pte_present(ptent)) { + pte_t old_pte; + + old_pte = ptep_modify_prot_start(vma, addr, pte); + if (set) + ptent = pte_mkpbha(old_pte, PBHA_VAL_BIT0); + else + ptent = pte_rmpbha(old_pte, PBHA_VAL_BIT0); + ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); + } +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pbha_bit0_update_pmd_bits(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, bool set) +{ + pmd_t pmd = *pmdp; + + if (pmd_present(pmd)) { + if (set) + pmd = pmd_mkpbha(pmd, PBHA_VAL_BIT0); + else + pmd = pmd_rmpbha(pmd, PBHA_VAL_BIT0); + + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); + } +} +#else +static inline void pbha_bit0_update_pmd_bits(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, + bool set) +{ +} +#endif + +static int pbha_bit0_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + int *op = (int *)walk->private; + struct vm_area_struct *vma = walk->vma; + pte_t *pte, ptent; + spinlock_t *ptl; + bool set = (*op == SET_PBHA_BIT0_FLAG); + + ptl = pmd_trans_huge_lock(pmd, vma); + if (ptl) { + pbha_bit0_update_pmd_bits(vma, addr, pmd, set); + + spin_unlock(ptl); + return 0; + } + + if (pmd_trans_unstable(pmd)) + return 0; + + pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + for (; addr != end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + + pbha_bit0_update_pte_bits(vma, addr, pte, set); + } + pte_unmap_unlock(pte - 1, ptl); + cond_resched(); + return 0; +} + +static int pbha_bit0_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + struct vm_area_struct *vma = walk->vma; + + if (vma->vm_flags & VM_PFNMAP) + return 1; + + return 0; +} + +struct mm_walk_ops pbha_bit0_walk_ops = { + .pmd_entry = pbha_bit0_pte_range, + .test_walk = pbha_bit0_test_walk, +}; + +int pbha_bit0_update_vma(struct mm_struct *mm, int val) +{ + struct mmu_notifier_range range; + struct vm_area_struct *vma; + int old_val; + + old_val = (mm->def_flags & VM_PBHA_BIT0) ? 1 : 0; + if (val == old_val) + return 0; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + if (val == SET_PBHA_BIT0_FLAG) { + mm->def_flags |= VM_PBHA_BIT0; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (vma->vm_flags & VM_PBHA_BIT0) + continue; + vma->vm_flags |= VM_PBHA_BIT0; + vma_set_page_prot(vma); + } + } else { + mm->def_flags &= ~VM_PBHA_BIT0; + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (!(vma->vm_flags & VM_PBHA_BIT0)) + continue; + vma->vm_flags &= ~VM_PBHA_BIT0; + vma_set_page_prot(vma); + } + } + + inc_tlb_flush_pending(mm); + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, 0, -1UL); + mmu_notifier_invalidate_range_start(&range); + walk_page_range(mm, 0, mm->highest_vm_end, &pbha_bit0_walk_ops, + &val); + mmu_notifier_invalidate_range_end(&range); + flush_tlb_mm(mm); + dec_tlb_flush_pending(mm); + + mmap_write_unlock(mm); + return 0; +} diff --git a/fs/proc/base.c b/fs/proc/base.c index 24c70ff923b8..2a4cc5c796c7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -98,6 +98,7 @@ #include <linux/resctrl.h> #include <linux/share_pool.h> #include <linux/ksm.h> +#include <linux/pbha.h> #include <trace/events/oom.h> #include "internal.h" #include "fd.h" @@ -1351,6 +1352,102 @@ static const struct file_operations proc_reliable_operations = { }; #endif
+#ifdef CONFIG_ARM64_PBHA +static inline int pbha_bit0_check(struct task_struct *task, struct pid *pid) +{ + if (!system_support_pbha_bit0()) + return -EACCES; + + if (is_global_init(task)) + return -EACCES; + + if (!task->mm || (task->flags & PF_KTHREAD) || + (task->flags & PF_EXITING)) + return -EACCES; + + return 0; +} + +static ssize_t pbha_bit0_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task = get_proc_task(file_inode(file)); + struct pid *pid = proc_pid(file_inode(file)); + char buffer[PROC_NUMBUF]; + size_t len; + short val; + int err; + + if (!task) + return -ESRCH; + + err = pbha_bit0_check(task, pid); + if (err) { + put_task_struct(task); + return err; + } + + val = task->mm->def_flags & VM_PBHA_BIT0 ? 1 : 0; + put_task_struct(task); + len = snprintf(buffer, sizeof(buffer), "%hd\n", val); + return simple_read_from_buffer(buf, count, ppos, buffer, len); +} + +static ssize_t pbha_bit0_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_struct *task = get_proc_task(file_inode(file)); + struct pid *pid = proc_pid(file_inode(file)); + char buffer[PROC_NUMBUF]; + struct mm_struct *mm; + int val, err; + + if (!task) + return -ESRCH; + + err = pbha_bit0_check(task, pid); + if (err) + goto out; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoint(strstrip(buffer), 0, &val); + if (err) + goto out; + if (val != 0 && val != 1) { + err = -EINVAL; + goto out; + } + + mm = get_task_mm(task); + if (!mm) { + err = -ENOENT; + goto out; + } + + err = pbha_bit0_update_vma(mm, val); + if (err) + count = -EINTR; + + mmput(mm); +out: + put_task_struct(task); + return err < 0 ? err : count; +} + +static const struct file_operations proc_pbha_bit0_ops = { + .read = pbha_bit0_read, + .write = pbha_bit0_write, + .llseek = generic_file_llseek, +}; +#endif + #ifdef CONFIG_AUDIT #define TMPBUFLEN 11 static ssize_t proc_loginuid_read(struct file * file, char __user * buf, @@ -3483,6 +3580,9 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_MEMORY_RELIABLE REG("reliable", S_IRUGO|S_IWUSR, proc_reliable_operations), #endif +#ifdef CONFIG_ARM64_PBHA + REG("pbha_bit0", 0644, proc_pbha_bit0_ops), +#endif #ifdef CONFIG_AUDIT REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), @@ -3902,6 +4002,9 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_MEMORY_RELIABLE REG("reliable", S_IRUGO|S_IWUSR, proc_reliable_operations), #endif +#ifdef CONFIG_ARM64_PBHA + REG("pbha_bit0", 0644, proc_pbha_bit0_ops), +#endif #ifdef CONFIG_AUDIT REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), REG("sessionid", S_IRUGO, proc_sessionid_operations), diff --git a/include/linux/mm.h b/include/linux/mm.h index 465a47afd2a9..a5316ffd4e1f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -457,7 +457,7 @@ static inline bool arch_is_platform_page(u64 paddr) #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
/* This mask defines which mm->def_flags a process can inherit its parent */ -#define VM_INIT_DEF_MASK VM_NOHUGEPAGE +#define VM_INIT_DEF_MASK (VM_NOHUGEPAGE | VM_PBHA_BIT0)
/* This mask is used to clear all the VMA flags used by mlock */ #define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT)) diff --git a/include/linux/pbha.h b/include/linux/pbha.h index b2b256696af3..d34ec0f4e1e8 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -10,6 +10,7 @@ #include <linux/pgtable.h>
#define PBHA_VAL_BIT0 1UL +#define PBHA_BITS_SHIFT 59
#define EFI_OEMCONFIG_VARIABLE_GUID \ EFI_GUID(0x21f3b3c5, 0x946d, 0x41c1, 0x83, 0x8c, 0x19, 0x4e, 0x48, \ @@ -20,7 +21,9 @@
#ifdef CONFIG_ARM64_PBHA extern bool __ro_after_init pbha_bit0_enabled; +extern struct mm_walk_ops pbha_bit0_walk_ops; extern void __init early_pbha_bit0_init(void); +extern int pbha_bit0_update_vma(struct mm_struct *mm, int val);
static inline bool system_support_pbha_bit0(void) {
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Set flag VM_PBHA_BIT0 for global init task during #PF.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/pbha.h | 4 ++++ 1 file changed, 4 insertions(+)
diff --git a/include/linux/pbha.h b/include/linux/pbha.h index d34ec0f4e1e8..f6b764ae3fa7 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -43,6 +43,10 @@ static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) if (!system_support_pbha_bit0()) return pte;
+ if (unlikely(is_global_init(current)) && + !(vma->vm_flags & VM_PBHA_BIT0)) + vma->vm_flags |= VM_PBHA_BIT0; + if (vma->vm_flags & VM_PBHA_BIT0) pte = pte_mkpbha(pte, PBHA_VAL_BIT0);
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Introduce prctl to control pbha behavior.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/uapi/linux/prctl.h | 2 ++ kernel/sys.c | 10 ++++++++++ 2 files changed, 12 insertions(+)
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index c9e363906530..00ffc2dbbf2f 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -260,4 +260,6 @@ struct prctl_mm_map {
#define PR_SET_MEMORY_MERGE 67 #define PR_GET_MEMORY_MERGE 68 + +#define PR_UPDATE_PBHA_BIT0 0x82312f0 #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 15efe4667397..68885eabad06 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2550,6 +2550,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags); break; +#endif +#ifdef CONFIG_ARM64_PBHA + case PR_UPDATE_PBHA_BIT0: + if (arg3 || arg4 || arg5) + return -EINVAL; + if (arg2 != 0 && arg2 != 1) + return -EINVAL; + + error = pbha_bit0_update_vma(me->mm, arg2); + break; #endif default: error = -EINVAL;
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Introduce kernel param pbha to control the overall PBHA and PBHA bit0 behavior. If pbha is added in bootarg, PBHA and PBHA bit 0 will be initialized.
For PBHA bit 0, it accepts the following arg:
enable: kernel and user will update PBHA bit0 for their pte entry. user: only select user task will update PBHA bit0 for pte entry.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- .../admin-guide/kernel-parameters.txt | 8 +++++++ .../firmware/efi/libstub/efi-stub-helper.c | 3 +++ drivers/firmware/efi/libstub/fdt.c | 5 +++++ drivers/soc/hisilicon/pbha.c | 22 ++++++++++++++++++- include/linux/pbha.h | 6 +++-- 5 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 71567aa7eca9..eab29f749a5f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6446,3 +6446,11 @@ memory, and other data can't be written using xmon commands. off xmon is disabled. + + pbha= [ARM64] + Format: { enable | user } + Enabled PBHA bit0. + enable kernel and user will update PBHA bit0 for their + pte entry. + user only select user task will update PBHA bit0 for + their pte entry. diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 537db49c31b9..8670654bf561 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -23,6 +23,7 @@ bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE); bool efi_noinitrd; int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT; bool efi_novamap = IS_ENABLED(CONFIG_LOONGARCH); /* LoongArch call svam() in kernel */; +bool efi_pbha;
static bool efi_nosoftreserve; static bool efi_disable_pci_dma = IS_ENABLED(CONFIG_EFI_DISABLE_PCI_DMA); @@ -234,6 +235,8 @@ efi_status_t efi_parse_options(char const *cmdline) efi_parse_option_graphics(val + strlen("efifb:")); } else if (!strcmp(param, "memmap") && val) { efi_parse_option_memmap(val); + } else if (!strcmp(param, "pbha")) { + efi_pbha = true; } } efi_bs_call(free_pool, buf); diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index d212bd2ad418..cb8e8e4e3f63 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -29,6 +29,8 @@ static void fdt_update_cell_size(void *fdt) }
#ifdef CONFIG_ARM64_PBHA +extern bool efi_pbha; + static efi_status_t fdt_init_hbm_mode(void *fdt, int node) { efi_guid_t oem_config_guid = EFI_OEMCONFIG_VARIABLE_GUID; @@ -39,6 +41,9 @@ static efi_status_t fdt_init_hbm_mode(void *fdt, int node) u8 fdt_val32; u8 arr[16] = { 0x1 };
+ if (!efi_pbha) + goto out; + efi_status = get_efi_var(L"HBMMode", &oem_config_guid, NULL, &size, &hbm_mode); if (efi_status != EFI_SUCCESS) diff --git a/drivers/soc/hisilicon/pbha.c b/drivers/soc/hisilicon/pbha.c index d26f2c32cecb..8914ebbca4cf 100644 --- a/drivers/soc/hisilicon/pbha.c +++ b/drivers/soc/hisilicon/pbha.c @@ -18,6 +18,8 @@ #define HBM_MODE_CACHE 1
bool __ro_after_init pbha_bit0_enabled; +bool __ro_after_init pbha_bit0_kernel_enabled; +static bool pbha_enabled_phase_1;
void __init early_pbha_bit0_init(void) { @@ -41,7 +43,7 @@ void __init early_pbha_bit0_init(void) if (!prop) return; if (*prop == HBM_MODE_CACHE) - pbha_bit0_enabled = true; + pbha_enabled_phase_1 = true; }
#define pte_pbha_bit0(pte) \ @@ -182,3 +184,21 @@ int pbha_bit0_update_vma(struct mm_struct *mm, int val) mmap_write_unlock(mm); return 0; } + +static int __init setup_pbha(char *str) +{ + if (!pbha_enabled_phase_1) + return 0; + + if (strcmp(str, "enable") == 0) { + pbha_bit0_enabled = true; + pbha_bit0_kernel_enabled = true; + } else if (strcmp(str, "user") == 0) { + pbha_bit0_enabled = true; + } + + pr_info("pbha bit_0 enabled, kernel: %d\n", pbha_bit0_kernel_enabled); + + return 0; +} +early_param("pbha", setup_pbha); diff --git a/include/linux/pbha.h b/include/linux/pbha.h index f6b764ae3fa7..25cb88aa3d1c 100644 --- a/include/linux/pbha.h +++ b/include/linux/pbha.h @@ -21,6 +21,7 @@
#ifdef CONFIG_ARM64_PBHA extern bool __ro_after_init pbha_bit0_enabled; +extern bool __ro_after_init pbha_bit0_kernel_enabled; extern struct mm_walk_ops pbha_bit0_walk_ops; extern void __init early_pbha_bit0_init(void); extern int pbha_bit0_update_vma(struct mm_struct *mm, int val); @@ -32,7 +33,7 @@ static inline bool system_support_pbha_bit0(void)
static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) { - if (!system_support_pbha_bit0()) + if (!system_support_pbha_bit0() || !pbha_bit0_kernel_enabled) return prot;
return pgprot_pbha(prot, PBHA_VAL_BIT0); @@ -43,7 +44,8 @@ static inline pte_t maybe_mk_pbha_bit0(pte_t pte, struct vm_area_struct *vma) if (!system_support_pbha_bit0()) return pte;
- if (unlikely(is_global_init(current)) && + /* global init task will update pbha bit0 iff kernel can do this */ + if (unlikely(is_global_init(current)) && pbha_bit0_kernel_enabled && !(vma->vm_flags & VM_PBHA_BIT0)) vma->vm_flags |= VM_PBHA_BIT0;
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H
--------------------------------
Enable feature PBHA for arm64 by default.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/configs/openeuler_defconfig | 1 + 1 file changed, 1 insertion(+)
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 813927edcdc2..498c470cf5ef 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -478,6 +478,7 @@ CONFIG_ARM64_VHE=y CONFIG_ARM64_PMEM=y CONFIG_ARM64_RAS_EXTN=y CONFIG_ARM64_CNP=y +CONFIG_ARM64_PBHA=y # end of ARMv8.2 architectural features
#
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/2432 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/R...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/2432 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/R...