hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
MPAM driver temporarily force initializing cache/memory unwillingly, Futher We should make it compatible with P2, P4, P6 machine in the future, it's eager to introduce new method to get MSCs' info from BIOS/DT.
Note arm hasn't reached a final conclusion about MPAM ACPI Description, BIOS implemented one temporary version according to v1 spec.
MPAM ACPI Table organized as follow:
*******Address increased by raw******** [DIE0] Memory node: mc[proximity:0] [DIE1] Memory node: mc[proximity:1] [DIE0] Memory node: mc[proximity:0] [DIE2] Memory node: mc[proximity:2] [DIE3] Memory node: mc[proximity:3] [DIE0] Cache node: cache[PPTT_ref:$a] [PARTITION0] Cache node: cache[PPTT_ref:$a] [PARTITION1] Cache node: cache[PPTT_ref:$a] [PARTITION2] Cache node: cache[PPTT_ref:$a] [PARTITION3] Cache node: cache[PPTT_ref:$a] [PARTITION4] Cache node: cache[PPTT_ref:$a] [PARTITION5] Cache node: cache[PPTT_ref:$a] [PARTITION6] Cache node: cache[PPTT_ref:$a] [PARTITION7] Cache node: cache[PPTT_ref:$a] [DIE1] Cache node: cache[PPTT_ref:$b] [PARTITION0] Cache node: cache[PPTT_ref:$b] [PARTITION1] Cache node: cache[PPTT_ref:$b] ...
We use PPTT_ref to decide which DIE the cache node belongs to, and use proximity to decide memory node. it is tricky, but still usefull.
James Morse (3): ACPI / processor: Add helper to convert acpi_id to a phys_cpuid ACPI / PPTT: Add helper to validate cache nodes from an offset [dead] ACPI / PPTT: Filthy hack to find _a_ backwards reference in the PPTT [ROTTEN]
Wang ShaoBo (3): ACPI / PPTT: cacheinfo: Label caches based on fw_token ACPI 6.x: Add definitions for MPAM table MPAM / ACPI: Refactoring MPAM init process and set MPAM ACPI as entrance
arch/arm64/Kconfig | 2 + arch/arm64/include/asm/acpi.h | 4 + arch/arm64/include/asm/mpam_resource.h | 15 +- arch/arm64/include/asm/mpam_sched.h | 8 +- arch/arm64/kernel/mpam.c | 246 ++++++++++++++++++------ drivers/acpi/arm64/Kconfig | 3 + drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/mpam.c | 249 +++++++++++++++++++++++++ drivers/acpi/pptt.c | 143 ++++++++++++++ drivers/acpi/processor_core.c | 16 ++ include/acpi/actbl2.h | 58 ++++++ include/acpi/processor.h | 1 + include/linux/acpi.h | 8 + 13 files changed, 698 insertions(+), 56 deletions(-) create mode 100644 drivers/acpi/arm64/mpam.c
From: James Morse james.morse@arm.com
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
The PPTT parsing code only has access to an acpi_id, we need a hardware property, preferably the corresponding phys_cpuid_t.
acpi_get_cpuid() requires us to have the acpi_handle, which would imply we already have the acpi_device or acpi_processor structure. This call is useful when the CPU may not have been mapped, e.g. when walking the namespace.
The PPTT is parsed after CPUs have been discovered and mapped, add a helper to walk the possible CPUs and test whether the acpi_processor matches our acpi_id.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/acpi/processor_core.c | 16 ++++++++++++++++ include/acpi/processor.h | 1 + 2 files changed, 17 insertions(+)
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 8c0a54d50d0e..333547bf7845 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -9,7 +9,9 @@ * Yinghai Lu yinghai@kernel.org * Jiang Liu jiang.liu@intel.com */ +#include <linux/percpu.h> #include <linux/export.h> +#include <linux/cpumask.h> #include <linux/acpi.h> #include <acpi/processor.h>
@@ -263,6 +265,20 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) } EXPORT_SYMBOL_GPL(acpi_get_cpuid);
+phys_cpuid_t acpi_id_to_phys_cpuid(u32 acpi_id) +{ + int cpu; + struct acpi_processor *pr; + + for_each_possible_cpu(cpu) { + pr = per_cpu(processors, cpu); + if (pr && pr->acpi_id == acpi_id) + return pr->phys_id; + } + + return PHYS_CPUID_INVALID; +} + #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, u64 *phys_addr, int *ioapic_id) diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 1194a4c78d55..9235b41a9d52 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -333,6 +333,7 @@ phys_cpuid_t acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); phys_cpuid_t acpi_map_madt_entry(u32 acpi_id); int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); +phys_cpuid_t acpi_id_to_phys_cpuid(u32 acpi_id);
#ifdef CONFIG_ACPI_CPPC_LIB extern int acpi_cppc_processor_probe(struct acpi_processor *pr);
From: James Morse james.morse@arm.com
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
The MPAM table holds a PPTT-offset that describes a cache. Add a helper acpi_pptt_validate_cache_node(), which checks this offset really is in the PPTT, on a node boundary, and that node really is a cache.
With this the MPAM probe code can go poking through struct cacheinfo looking for a level with a matching firmware_node.
Signed-off-by: James Morse james.morse@arm.com Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/acpi/pptt.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 4 ++++ 2 files changed, 42 insertions(+)
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 705719342f2b..057c7c92d8d6 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -276,6 +276,44 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
return NULL; } +/** + * acpi_validate_cache_node() - Given an offset in the table, check this is + * a cache node. + * Used for cross-table pointers. + * + * Return the cache pointer for a valid cache, or NULL. + */ +struct acpi_pptt_cache * +acpi_pptt_validate_cache_node(struct acpi_table_header *table_hdr, u32 offset) +{ + struct acpi_subtable_header *entry, *cache; + unsigned long table_end; + + if ((offset < sizeof(*table_hdr)) || (offset >= table_hdr->length)) + return NULL; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + + cache = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, offset); + + /* Walk every node to check offset is on a node boundary */ + while ((unsigned long)(entry + 1) < table_end) { + if (entry->length == 0) { + pr_err("Invalid zero length subtable\n"); + break; + } + if ((entry->type == ACPI_PPTT_TYPE_CACHE) && (entry == cache)) + return (struct acpi_pptt_cache *)entry; + + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + return NULL; +} +
static int acpi_find_cache_levels(struct acpi_table_header *table_hdr, u32 acpi_cpu_id) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4edd872b4f98..7ebb80874284 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -638,6 +638,10 @@ static inline u64 acpi_arch_get_root_pointer(void) } #endif
+struct acpi_pptt_cache * +acpi_pptt_validate_cache_node(struct acpi_table_header *table_hdr, + u32 offset); + #else /* !CONFIG_ACPI */
#define acpi_disabled 1
From: James Morse james.morse@arm.com
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
The alpha MPAM table contains a pointer to the PPTT cache, which it expects to be unique, which isn't guaranteed.
Ideally we'd take a cache-id, but the hardware doesn't have a suitable property, instead arm64 will generate an id from the cpu affinity ids.
To find the cache id we need to find the cacheinfo structure, which we can do if we have a pptt cpu_node (different to the cache node), as this is the fw_token used to match the Processor Container that contains all the CPUs that share this cache.
How can we find the expected-to-be-unique cpu_node from the cache_node? ... add acpi_pptt_find_cache_backwards() to find a PPTT processor node given a PPTT cache node. This is totally broken as many processor nodes may point at the same PPTT cache indicating different instances of the cache. (e.g. all the L1 caches are the same shape, but they aren't the same cache).
This only works if you cooked your PPTT table to look like this.
Signed-off-by: James Morse james.morse@arm.com # ... but its still GPLv2 Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/acpi/pptt.c | 50 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/acpi.h | 4 ++++ 2 files changed, 54 insertions(+)
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 057c7c92d8d6..867b7ce82cc7 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -276,6 +276,56 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
return NULL; } + + +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + /** * acpi_validate_cache_node() - Given an offset in the table, check this is * a cache node. diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7ebb80874284..32c51887074c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1323,4 +1323,8 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) } #endif
+struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache); + #endif /*_LINUX_ACPI_H*/
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
According to James's implementation: http://www.linux-arm.org/git?p=linux- jm.git;a=commit;h=413eb4281b072e1ee60f88b814f2a418358f2155, "ACPI / PPTT: cacheinfo: Label caches based on fw_token".
For resctrl ABI, cache node labeled by min_physid of leaf cpu node can not be good recognized, for this, we use to label each cache node by numa node id, it can be acquired by leaf cpu node with min_physid.
But there also has some problems when doing this, with current MPAM ACPI Description 1.0, we haven't enough information to label those cache node when using partition mode, we hope fixing that when getting unified des- cription.
Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/include/asm/acpi.h | 4 +++ drivers/acpi/pptt.c | 55 +++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+)
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 0705663ba978..f364115ac2ef 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -113,6 +113,10 @@ static inline u32 get_acpi_id_for_cpu(unsigned int cpu)
static inline void arch_fix_phys_package_id(int num, u32 slot) { } void __init acpi_init_cpus(void); +void acpi_pptt_find_min_physid_cpu_node(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + phys_cpuid_t *min_physid, + struct acpi_pptt_processor **min_cpu_node);
#else static inline void acpi_init_cpus(void) { } diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 867b7ce82cc7..879b9155b7b4 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -426,6 +426,61 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta return found; }
+/** + * acpi_pptt_min_physid_from_cpu_node() - Recursivly find @min_physid for all + * leaf CPUs below @cpu_node. + * @table_hdr: Pointer to the head of the PPTT table + * @cpu_node: The point in the toplogy to start the walk + * @min_physid: The min_physid to update with leaf CPUs. + * @min_cpu_node: The min_cpu_node to update with leaf CPUs. + */ +void acpi_pptt_find_min_physid_cpu_node(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + phys_cpuid_t *min_physid, + struct acpi_pptt_processor **min_cpu_node) +{ + bool leaf = true; + u32 acpi_processor_id; + phys_cpuid_t cpu_node_phys_id; + struct acpi_subtable_header *iter; + struct acpi_pptt_processor *iter_node = NULL; + u32 target_node = ACPI_PTR_DIFF(cpu_node, table_hdr); + u32 proc_sz = sizeof(struct acpi_pptt_processor *); + unsigned long table_end = (unsigned long)table_hdr + table_hdr->length; + + /* + * Walk the PPTT, looking for nodes that reference cpu_node + * as parent. + */ + iter = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + + while ((unsigned long)iter + proc_sz < table_end) { + iter_node = (struct acpi_pptt_processor *)iter; + + if (iter->type == ACPI_PPTT_TYPE_PROCESSOR && + iter_node->parent == target_node) { + leaf = false; + acpi_pptt_find_min_physid_cpu_node(table_hdr, iter_node, + min_physid, min_cpu_node); + } + + if (iter->length == 0) + return; + iter = ACPI_ADD_PTR(struct acpi_subtable_header, iter, + iter->length); + } + + acpi_processor_id = cpu_node->acpi_processor_id; + cpu_node_phys_id = acpi_id_to_phys_cpuid(acpi_processor_id); + if (!invalid_phys_cpuid(cpu_node_phys_id) && + *min_physid > cpu_node_phys_id && + leaf == true) { + *min_physid = cpu_node_phys_id; + *min_cpu_node = cpu_node; + } +} + /** * update_cache_properties() - Update cacheinfo for the given processor * @this_leaf: Kernel cache info structure being updated
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
Add structures for the ACPI MPAM table.
Code was partially borrowed from James's: http://www.linux-arm.org/git?p=linux-jm.git;a=commit;h=b307d07f49d11a b6841153315309a67e3163cac7, "ACPI 6.x: Add definitions for MPAM table [dead]".
Different with James's, we add several fields in acpi_mpam_header structure according to MPAM ACPI Description 1.0.
Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/acpi/actbl2.h | 58 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+)
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 26d65f7fb043..20fa5e60e328 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -32,6 +32,7 @@ #define ACPI_SIG_MCHI "MCHI" /* Management Controller Host Interface table */ #define ACPI_SIG_MPST "MPST" /* Memory Power State Table */ #define ACPI_SIG_MSCT "MSCT" /* Maximum System Characteristics Table */ +#define ACPI_SIG_MPAM "MPAM" /* Memory Partitioning and Monitoring Table */ #define ACPI_SIG_MSDM "MSDM" /* Microsoft Data Management Table */ #define ACPI_SIG_MTMR "MTMR" /* MID Timer table */ #define ACPI_SIG_NFIT "NFIT" /* NVDIMM Firmware Interface Table */ @@ -905,6 +906,63 @@ struct acpi_msct_proximity { u64 memory_capacity; /* In bytes */ };
+/* + * MPAM - Memory Partitioning and Monitoring table + * + * Conforms to "MPAM ACPI Description 1.0", + * Null 0, 2017. Copyright 2017 ARM Limited or its affiliates. + * + ******************************************************************************/ +struct acpi_table_mpam { + struct acpi_table_header header;/* Common ACPI table header */ +}; + +/* Subtable header for MPAM */ + +struct acpi_mpam_header { + u8 type; + u16 length; + u8 reserved; + u64 base_address; + u32 overflow_interrupt; + u32 overflow_flags; + u32 error_interrupt; + u32 error_interrupt_flags; + u32 not_ready_max; + u32 offset; +}; + +/* Values for subtable type in ACPI_MPAM_NODE_HEADER */ + +enum AcpiMpamType { + ACPI_MPAM_TYPE_SMMU = 0, + ACPI_MPAM_TYPE_CACHE = 1, + ACPI_MPAM_TYPE_MEMORY = 2, + ACPI_MPAM_TYPE_UNKNOWN = 3 +}; + +/* Flags */ +#define ACPI_MPAM_IRQ_FLAGS (1) /* Interrupt mode */ + +/* + * MPAM Subtables + */ +struct acpi_mpam_node_smmu { + struct acpi_mpam_header header; + u32 IORT_ref; +}; + +struct acpi_mpam_node_cache { + struct acpi_mpam_header header; + u32 PPTT_ref; +}; + +struct acpi_mpam_node_memory { + struct acpi_mpam_header header; + u8 proximity_domain; + u8 reserved1[3]; +}; + /******************************************************************************* * * MSDM - Microsoft Data Management table
hulk inclusion category: feature bugzilla: 28055 CVE: NA
------------------------------
Renaming mpam_late_init() in arch/arm64/kernel/mpam.c to mpam_init(), traveling each MPAM ACPI cache / memory node and adding them to a list, with that, we use the numa node id it belongs to label cache node and proximity_domain to label memory node, once it ends, call mpam_init() to initialize all like before.
Code was partially borrowed from James's: http://www.linux-arm.org/git?p=linux-jm.git;a=commit;h=10fe7d6363ae96b 25f584d4a91f9d0f2fd5faf3b,"ACPI / MPAM: Parse the (draft) MPAM table [dead]"
v3->v5: mpam.c in drivers/acpi/arm64 should not be compiled when MPAM disabled, so we should add CONFIG_ACPI_MPAM macro and make CONFIG_MPAM select it. Not only that, as mpam init procedure is strong correlated to ACPI for Now (follow-up might be dependent on device tree), and CONFIG_ ACPI is not always selected under configuration, we should make CONFIG_ MPAM depends on CONFIG_ACPI before selecting CONFIG_ACPI_MPAM.
Signed-off-by: Wang ShaoBo bobo.shaobowang@huawei.com Reviewed-By: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm64/Kconfig | 2 + arch/arm64/include/asm/mpam_resource.h | 15 +- arch/arm64/include/asm/mpam_sched.h | 8 +- arch/arm64/kernel/mpam.c | 246 ++++++++++++++++++------ drivers/acpi/arm64/Kconfig | 3 + drivers/acpi/arm64/Makefile | 1 + drivers/acpi/arm64/mpam.c | 249 +++++++++++++++++++++++++ 7 files changed, 468 insertions(+), 56 deletions(-) create mode 100644 drivers/acpi/arm64/mpam.c
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 269737b98763..95f8b6b39506 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -770,7 +770,9 @@ config ARM64_ERR_RECOV config MPAM bool "Support Memory Partitioning and Monitoring" default n + depends on ACPI select RESCTRL + select ACPI_MPAM if ACPI help Memory Partitioning and Monitoring. More exactly Memory system performance resource Partitioning and Monitoring diff --git a/arch/arm64/include/asm/mpam_resource.h b/arch/arm64/include/asm/mpam_resource.h index beadd2c64c31..ab90596c9194 100644 --- a/arch/arm64/include/asm/mpam_resource.h +++ b/arch/arm64/include/asm/mpam_resource.h @@ -95,6 +95,8 @@ */
struct mpam_node { + /* for label mpam_node instance*/ + u32 component_id; /* MPAM node header */ u8 type; /* MPAM_SMMU, MPAM_CACHE, MPAM_MC */ u64 addr; @@ -105,8 +107,19 @@ struct mpam_node { /* for debug */ char *cpus_list; char *name; + struct list_head list; };
-int mpam_nodes_init(void); +int __init mpam_force_init(void); + +int __init mpam_nodes_discovery_start(void); + +void __init mpam_nodes_discovery_failed(void); + +int __init mpam_nodes_discovery_complete(void); + +int mpam_create_cache_node(u32 component_id, phys_addr_t hwpage_address); + +int mpam_create_memory_node(u32 component_id, phys_addr_t hwpage_address);
#endif /* _ASM_ARM64_MPAM_RESOURCE_H */ diff --git a/arch/arm64/include/asm/mpam_sched.h b/arch/arm64/include/asm/mpam_sched.h index f0552e6dc9bc..350296157087 100644 --- a/arch/arm64/include/asm/mpam_sched.h +++ b/arch/arm64/include/asm/mpam_sched.h @@ -40,7 +40,13 @@ static inline void mpam_sched_in(void) __mpam_sched_in(); }
-extern int __read_mostly mpam_enabled; +enum mpam_enable_type { + enable_denied = 0, + enable_default, + enable_acpi, +}; + +extern enum mpam_enable_type __read_mostly mpam_enabled;
#else
diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c index c728b1fe91dd..202e49a1d3f9 100644 --- a/arch/arm64/kernel/mpam.c +++ b/arch/arm64/kernel/mpam.c @@ -62,12 +62,6 @@ int max_name_width, max_data_width; */ bool rdt_alloc_capable;
-char *mpam_types_str[] = { - "MPAM_RESOURCE_SMMU", - "MPAM_RESOURCE_CACHE", - "MPAM_RESOURCE_MC", -}; - /* * Hi1620 2P Base Address Map * @@ -81,35 +75,30 @@ char *mpam_types_str[] = { * AFF2: MPIDR.AFF2 */
-#define MPAM_BASE(suffix, offset) ((suffix) << 24 | (offset) << 16) -#define MPAM_NODE(n, t, suffix, offset) \ - { \ - .name = #n, \ - .type = t, \ - .addr = MPAM_BASE(suffix, (offset)), \ - .cpus_list = "0", \ - } +static inline void mpam_node_assign_val(struct mpam_node *n, + char *name, + u8 type, + phys_addr_t hwpage_address, + u32 component_id) +{ + n->name = name; + n->type = type; + n->addr = hwpage_address; + n->component_id = component_id; + n->cpus_list = "0"; +}
-struct mpam_node mpam_node_all[] = { - MPAM_NODE(L3TALL0, MPAM_RESOURCE_CACHE, 0x000098ULL, 0xB9), - MPAM_NODE(L3TALL1, MPAM_RESOURCE_CACHE, 0x000090ULL, 0xB9), - MPAM_NODE(L3TALL2, MPAM_RESOURCE_CACHE, 0x200098ULL, 0xB9), - MPAM_NODE(L3TALL3, MPAM_RESOURCE_CACHE, 0x200090ULL, 0xB9), +#define MPAM_NODE_NAME_SIZE (10)
- MPAM_NODE(HHAALL0, MPAM_RESOURCE_MC, 0x000098ULL, 0xC1), - MPAM_NODE(HHAALL1, MPAM_RESOURCE_MC, 0x000090ULL, 0xC1), - MPAM_NODE(HHAALL2, MPAM_RESOURCE_MC, 0x200098ULL, 0xC1), - MPAM_NODE(HHAALL3, MPAM_RESOURCE_MC, 0x200090ULL, 0xC1), -}; +struct mpam_node *mpam_nodes_ptr;
-void mpam_nodes_unmap(void) +static int __init mpam_init(void); + +static void mpam_nodes_unmap(void) { - int i; - size_t num_nodes = ARRAY_SIZE(mpam_node_all); struct mpam_node *n;
- for (i = 0; i < num_nodes; i++) { - n = &mpam_node_all[i]; + list_for_each_entry(n, &mpam_nodes_ptr->list, list) { if (n->base) { iounmap(n->base); n->base = NULL; @@ -117,14 +106,12 @@ void mpam_nodes_unmap(void) } }
-int mpam_nodes_init(void) +static int mpam_nodes_init(void) { - int i, ret = 0; - size_t num_nodes = ARRAY_SIZE(mpam_node_all); + int ret = 0; struct mpam_node *n;
- for (i = 0; i < num_nodes; i++) { - n = &mpam_node_all[i]; + list_for_each_entry(n, &mpam_nodes_ptr->list, list) { ret |= cpulist_parse(n->cpus_list, &n->cpu_mask); n->base = ioremap(n->addr, 0x10000); if (!n->base) { @@ -136,6 +123,160 @@ int mpam_nodes_init(void) return ret; }
+static void mpam_nodes_destroy(void) +{ + struct mpam_node *n, *tmp; + + if (!mpam_nodes_ptr) + return; + + list_for_each_entry_safe(n, tmp, &mpam_nodes_ptr->list, list) { + kfree(n->name); + list_del(&n->list); + kfree(n); + } + + list_del(&mpam_nodes_ptr->list); + kfree(mpam_nodes_ptr); + mpam_nodes_ptr = NULL; +} + +int __init mpam_nodes_discovery_start(void) +{ + if (!mpam_enabled) + return -EINVAL; + + mpam_nodes_ptr = kzalloc(sizeof(struct mpam_node), GFP_KERNEL); + if (!mpam_nodes_ptr) + return -ENOMEM; + + INIT_LIST_HEAD(&mpam_nodes_ptr->list); + + return 0; +} + +void __init mpam_nodes_discovery_failed(void) +{ + mpam_nodes_destroy(); +} + +int __init mpam_nodes_discovery_complete(void) +{ + return mpam_init(); +} + +static inline int validate_mpam_node(int type, + int component_id) +{ + int ret = 0; + struct mpam_node *n; + + list_for_each_entry(n, &mpam_nodes_ptr->list, list) { + if (n->component_id == component_id && + n->type == type) { + ret = -EINVAL; + break; + } + } + + return ret; +} + +int mpam_create_cache_node(u32 component_id, + phys_addr_t hwpage_address) +{ + struct mpam_node *new; + char *name; + + if (validate_mpam_node(MPAM_RESOURCE_CACHE, component_id)) + goto skip; + + new = kzalloc(sizeof(struct mpam_node), GFP_KERNEL); + if (!new) + return -ENOMEM; + + name = kzalloc(MPAM_NODE_NAME_SIZE, GFP_KERNEL); + if (!name) { + kfree(new); + return -ENOMEM; + } + snprintf(name, MPAM_NODE_NAME_SIZE, "%s%d", "L3TALL", component_id); + + mpam_node_assign_val(new, + name, + MPAM_RESOURCE_CACHE, + hwpage_address, + component_id); + list_add_tail(&new->list, &mpam_nodes_ptr->list); + +skip: + return 0; +} + +int mpam_create_memory_node(u32 component_id, + phys_addr_t hwpage_address) +{ + struct mpam_node *new; + char *name; + + if (validate_mpam_node(MPAM_RESOURCE_MC, component_id)) + goto skip; + + new = kzalloc(sizeof(struct mpam_node), GFP_KERNEL); + if (!new) + return -ENOMEM; + + name = kzalloc(MPAM_NODE_NAME_SIZE, GFP_KERNEL); + if (!name) { + kfree(new); + return -ENOMEM; + } + snprintf(name, MPAM_NODE_NAME_SIZE, "%s%d", "HHAALL", component_id); + + mpam_node_assign_val(new, + name, + MPAM_RESOURCE_MC, + hwpage_address, + component_id); + list_add_tail(&new->list, &mpam_nodes_ptr->list); + +skip: + return 0; + +} + +int __init mpam_force_init(void) +{ + int ret; + + if (mpam_enabled != enable_default) + return 0; + + ret = mpam_nodes_discovery_start(); + if (ret) + return ret; + + ret |= mpam_create_cache_node(0, 0x000098b90000ULL); + ret |= mpam_create_cache_node(1, 0x000090b90000ULL); + ret |= mpam_create_cache_node(2, 0x200098b90000ULL); + ret |= mpam_create_cache_node(3, 0x200090b90000ULL); + ret |= mpam_create_memory_node(0, 0x000098c10000ULL); + ret |= mpam_create_memory_node(1, 0x000090c10000ULL); + ret |= mpam_create_memory_node(2, 0x200098c10000ULL); + ret |= mpam_create_memory_node(3, 0x200090c10000ULL); + if (ret) { + mpam_nodes_discovery_failed(); + pr_err("Failed to force create mpam node\n"); + return -EINVAL; + } + + ret = mpam_nodes_discovery_complete(); + if (!ret) + pr_info("Successfully init mpam by hardcode.\n"); + + return 1; +} + static void cat_wrmsr(struct rdt_domain *d, int partid); static void @@ -1137,16 +1278,14 @@ static void mpam_domains_destroy(struct resctrl_resource *r)
static void mpam_domains_init(struct resctrl_resource *r) { - int i, id = 0; - size_t num_nodes = ARRAY_SIZE(mpam_node_all); + int id = 0; struct mpam_node *n; struct list_head *add_pos = NULL; struct rdt_domain *d; struct raw_resctrl_resource *rr = (struct raw_resctrl_resource *)r->res; u32 val;
- for (i = 0; i < num_nodes; i++) { - n = &mpam_node_all[i]; + list_for_each_entry(n, &mpam_nodes_ptr->list, list) { if (r->rid != n->type) continue;
@@ -1215,25 +1354,22 @@ static void mpam_domains_init(struct resctrl_resource *r) } }
-int __read_mostly mpam_enabled; +enum mpam_enable_type __read_mostly mpam_enabled; static int __init mpam_setup(char *str) { - mpam_enabled = 1; + if (!strcmp(str, "=acpi")) + mpam_enabled = enable_acpi; + else + mpam_enabled = enable_default; return 1; } __setup("mpam", mpam_setup);
-static int __init mpam_late_init(void) +static int __init mpam_init(void) { struct resctrl_resource *r; int state, ret;
- if (!mpam_enabled) - return 0; - - if (!cpus_have_const_cap(ARM64_HAS_MPAM)) - return -ENODEV; - rdt_alloc_capable = 1; rdt_mon_capable = 1;
@@ -1242,7 +1378,7 @@ static int __init mpam_late_init(void) ret = mpam_nodes_init(); if (ret) { pr_err("internal error: bad cpu list\n"); - return ret; + goto out; }
mpam_domains_init(&resctrl_resources_all[MPAM_RESOURCE_CACHE]); @@ -1251,8 +1387,10 @@ static int __init mpam_late_init(void) state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/mpam:online:", mpam_online_cpu, mpam_offline_cpu); - if (state < 0) - return state; + if (state < 0) { + ret = state; + goto out; + }
register_resctrl_specific_files(res_specific_files, ARRAY_SIZE(res_specific_files));
@@ -1262,7 +1400,7 @@ static int __init mpam_late_init(void) ret = resctrl_group_init(); if (ret) { cpuhp_remove_state(state); - return ret; + goto out; }
for_each_resctrl_resource(r) { @@ -1275,11 +1413,11 @@ static int __init mpam_late_init(void) pr_info("MPAM %s monitoring detected\n", r->name); }
- return 0; +out: + mpam_nodes_destroy(); + return ret; }
-late_initcall(mpam_late_init); - /* * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index 5a6f80fce0d6..4ef04f0ea94c 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -7,3 +7,6 @@ config ACPI_IORT
config ACPI_GTDT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 1017def2ea12..81408ce40506 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 000000000000..1f82dce33e07 --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Common code for ARM v8 MPAM ACPI + * + * Copyright (C) 2019-2020 Huawei Technologies Co., Ltd + * + * Author: Wang ShaoBo bobo.shaobowang@huawei.com + * + * Code was partially borrowed from http://www.linux-arm.org/git?p= + * linux-jm.git;a=commit;h=10fe7d6363ae96b25f584d4a91f9d0f2fd5faf3b. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include <linux/acpi.h> +#include <acpi/processor.h> +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/cacheinfo.h> +#include <linux/string.h> +#include <linux/nodemask.h> +#include <asm/mpam_resource.h> + +/** + * acpi_mpam_label_cache_component_id() - Recursivly find @min_physid + * for all leaf CPUs below @cpu_node, use numa node id of @min_cpu_node + * to label mpam cache node, which be signed by @component_id. + * @table_hdr: Pointer to the head of the PPTT table + * @cpu_node: The point in the toplogy to start the walk + * @component_id: The id labels the structure mpam_node cache + */ +static int +acpi_mpam_label_cache_component_id(struct acpi_table_header *table_hdr, + struct acpi_pptt_processor *cpu_node, + u32 *component_id) +{ + phys_cpuid_t min_physid = PHYS_CPUID_INVALID; + struct acpi_pptt_processor *min_cpu_node = NULL; + u32 logical_cpuid; + u32 acpi_processor_id; + + acpi_pptt_find_min_physid_cpu_node(table_hdr, + cpu_node, + &min_physid, + &min_cpu_node); + WARN_ON_ONCE(invalid_phys_cpuid(min_physid)); + if (min_cpu_node == NULL) + return -EINVAL; + + acpi_processor_id = min_cpu_node->acpi_processor_id; + logical_cpuid = acpi_map_cpuid(min_physid, acpi_processor_id); + if (invalid_logical_cpuid(logical_cpuid) || + !cpu_present(logical_cpuid)) { + pr_err_once("Invalid logical cpuid.\n"); + return -EINVAL; + } + + *component_id = cpu_to_node(logical_cpuid); + + return 0; +} + +/** + * acpi_mpam_label_memory_component_id() - Use proximity_domain id to + * label mpam memory node, which be signed by @component_id. + * @proximity_domain: proximity_domain of ACPI MPAM memory node + * @component_id: The id labels the structure mpam_node memory + */ +static int acpi_mpam_label_memory_component_id(u8 proximity_domain, + u32 *component_id) +{ + u32 nid = (u32)proximity_domain; + + if (nid >= nr_online_nodes) { + pr_err_once("Invalid proximity domain\n"); + return -EINVAL; + } + + *component_id = nid; + return 0; +} + +static int __init acpi_mpam_parse_memory(struct acpi_mpam_header *h) +{ + int ret = 0; + u32 component_id; + struct acpi_mpam_node_memory *node = (struct acpi_mpam_node_memory *)h; + + ret = acpi_mpam_label_memory_component_id(node->proximity_domain, + &component_id); + if (ret) { + pr_err("Failed to label memory component id\n"); + return -EINVAL; + } + + ret = mpam_create_memory_node(component_id, + node->header.base_address); + if (ret) { + pr_err("Failed to create memory node\n"); + return -EINVAL; + } + + return ret; +} + +static int __init acpi_mpam_parse_cache(struct acpi_mpam_header *h, + struct acpi_table_header *pptt) +{ + int ret = 0; + u32 component_id; + struct acpi_pptt_cache *pptt_cache; + struct acpi_pptt_processor *pptt_cpu_node; + struct acpi_mpam_node_cache *node = (struct acpi_mpam_node_cache *)h; + + if (!pptt) { + pr_err("No PPTT table found, MPAM cannot be configured\n"); + return -EINVAL; + } + + pptt_cache = acpi_pptt_validate_cache_node(pptt, node->PPTT_ref); + if (!pptt_cache) { + pr_err("Broken PPTT reference in the MPAM table\n"); + return -EINVAL; + } + + /* + * We actually need a cpu_node, as a pointer to the PPTT cache + * description isn't unique. + */ + pptt_cpu_node = acpi_pptt_find_cache_backwards(pptt, pptt_cache); + + ret = acpi_mpam_label_cache_component_id(pptt, pptt_cpu_node, + &component_id); + + if (ret) { + pr_err("Failed to label cache component id\n"); + return -EINVAL; + } + + ret = mpam_create_cache_node(component_id, + node->header.base_address); + if (ret) { + pr_err("Failed to create cache node\n"); + return -EINVAL; + } + + return ret; +} + +static int __init acpi_mpam_parse_table(struct acpi_table_header *table, + struct acpi_table_header *pptt) +{ + char *table_offset = (char *)(table + 1); + char *table_end = (char *)table + table->length; + struct acpi_mpam_header *node_hdr; + int ret = 0; + + ret = mpam_nodes_discovery_start(); + if (ret) + return ret; + + node_hdr = (struct acpi_mpam_header *)table_offset; + while (table_offset < table_end) { + switch (node_hdr->type) { + + case ACPI_MPAM_TYPE_CACHE: + ret = acpi_mpam_parse_cache(node_hdr, pptt); + break; + case ACPI_MPAM_TYPE_MEMORY: + ret = acpi_mpam_parse_memory(node_hdr); + break; + default: + pr_warn_once("Unknown node type %u offset %ld.", + node_hdr->type, + (table_offset-(char *)table)); + /* fall through */ + case ACPI_MPAM_TYPE_SMMU: + /* not yet supported */ + /* fall through */ + case ACPI_MPAM_TYPE_UNKNOWN: + break; + } + if (ret) + break; + + table_offset += node_hdr->length; + node_hdr = (struct acpi_mpam_header *)table_offset; + } + + if (ret) { + pr_err("discovery failed: %d\n", ret); + mpam_nodes_discovery_failed(); + } else { + ret = mpam_nodes_discovery_complete(); + if (!ret) + pr_info("Successfully init mpam by ACPI.\n"); + } + + return ret; +} + +int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam, *pptt; + acpi_status status; + int ret; + + if (!cpus_have_const_cap(ARM64_HAS_MPAM)) + return 0; + + ret = mpam_force_init(); + if (ret) + return 0; + + if (acpi_disabled) + return 0; + + status = acpi_get_table(ACPI_SIG_MPAM, 0, &mpam); + if (ACPI_FAILURE(status)) + return -ENOENT; + + /* PPTT is optional, there may be no mpam cache controls */ + acpi_get_table(ACPI_SIG_PPTT, 0, &pptt); + if (ACPI_FAILURE(status)) + pptt = NULL; + + ret = acpi_mpam_parse_table(mpam, pptt); + acpi_put_table(pptt); + acpi_put_table(mpam); + + return ret; +} + +/* + * We want to run after cacheinfo_sysfs_init() has caused the cacheinfo + * structures to be populated. That runs as a device_initcall. + */ +device_initcall_sync(acpi_mpam_parse);