Kernel
Threads by month
- ----- 2025 -----
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 26 participants
- 18028 discussions

[PATCH OLK-5.10 v5 1/3] x86: pmem: move persistent memory(legacy) code into nvdimm
by Zhuling 21 Jan '22
by Zhuling 21 Jan '22
21 Jan '22
hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4O31I
Move x86's pmem.c into nvdimm, and rename X86_PMEM_LEGACY_DEVICE to
PMEM_LEGACY_DEVICE, also add PMEM_LEGACY to control the built of
nd_e820.o, then the code could be reused by other architectures.
Signed-off-by: Zhuling <zhuling8(a)huawei.com>
---
arch/x86/Kconfig | 6 ++----
arch/x86/kernel/Makefile | 1 -
drivers/nvdimm/Kconfig | 6 ++++++
drivers/nvdimm/Makefile | 2 ++
arch/x86/kernel/pmem.c => drivers/nvdimm/pmem_legacy_device.c | 0
tools/testing/nvdimm/Kbuild | 2 +-
6 files changed, 11 insertions(+), 6 deletions(-)
rename arch/x86/kernel/pmem.c => drivers/nvdimm/pmem_legacy_device.c (100%)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c77ef59..1d3176a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1667,14 +1667,12 @@ config ILLEGAL_POINTER_VALUE
default 0 if X86_32
default 0xdead000000000000 if X86_64
-config X86_PMEM_LEGACY_DEVICE
- bool
-
config X86_PMEM_LEGACY
tristate "Support non-standard NVDIMMs and ADR protected memory"
depends on PHYS_ADDR_T_64BIT
depends on BLK_DEV
- select X86_PMEM_LEGACY_DEVICE
+ select PMEM_LEGACY
+ select PMEM_LEGACY_DEVICE
select NUMA_KEEP_MEMINFO if NUMA
select LIBNVDIMM
help
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f0606f8..1e12751 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -130,7 +130,6 @@ obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
-obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
obj-$(CONFIG_JAILHOUSE_GUEST) += jailhouse.o
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index b7d1eb3..632b6ed 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -19,6 +19,12 @@ menuconfig LIBNVDIMM
if LIBNVDIMM
+config PMEM_LEGACY
+ tristate
+
+config PMEM_LEGACY_DEVICE
+ bool
+
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
diff --git a/drivers/nvdimm/Makefile b/drivers/nvdimm/Makefile
index 0407753..2098221 100644
--- a/drivers/nvdimm/Makefile
+++ b/drivers/nvdimm/Makefile
@@ -3,6 +3,8 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
+obj-$(CONFIG_PMEM_LEGACY_DEVICE) += pmem_legacy_device.o
+obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_OF_PMEM) += of_pmem.o
obj-$(CONFIG_VIRTIO_PMEM) += virtio_pmem.o nd_virtio.o
diff --git a/arch/x86/kernel/pmem.c b/drivers/nvdimm/pmem_legacy_device.c
similarity index 100%
rename from arch/x86/kernel/pmem.c
rename to drivers/nvdimm/pmem_legacy_device.c
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 47f9cc9..77aa117 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -28,7 +28,7 @@ obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
obj-$(CONFIG_ND_BTT) += nd_btt.o
obj-$(CONFIG_ND_BLK) += nd_blk.o
-obj-$(CONFIG_X86_PMEM_LEGACY) += nd_e820.o
+obj-$(CONFIG_PMEM_LEGACY) += nd_e820.o
obj-$(CONFIG_ACPI_NFIT) += nfit.o
ifeq ($(CONFIG_DAX),m)
obj-$(CONFIG_DAX) += dax.o
--
2.9.5
1
2

[PATCH openEuler-1.0-LTS] ipmi_si: Phytium S2500 workaround for MMIO-based IPMI
by Laibin Qiu 20 Jan '22
by Laibin Qiu 20 Jan '22
20 Jan '22
phytium inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4RK58
CVE: NA
--------------------------------
The system would hang up when the Phytium S2500 communicates with
some BMCs after several rounds of transactions, unless we reset
the controller timeout counter manually by calling firmware through
SMC.
Signed-off-by: Wang Yinfeng <wangyinfeng(a)phytium.com.cn>
Signed-off-by: Chen Baozi <chenbaozi(a)phytium.com.cn> #openEuler_contributor
Signed-off-by: Laibin Qiu <qiulaibin(a)huawei.com>
---
drivers/char/ipmi/ipmi_si_mem_io.c | 117 ++++++++++++++++++++++++++++-
1 file changed, 113 insertions(+), 4 deletions(-)
diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
index 75583612ab10..73a3d6979eaa 100644
--- a/drivers/char/ipmi/ipmi_si_mem_io.c
+++ b/drivers/char/ipmi/ipmi_si_mem_io.c
@@ -3,6 +3,105 @@
#include <linux/io.h>
#include "ipmi_si.h"
+#ifdef CONFIG_ARM_GIC_PHYTIUM_2500
+#include <linux/arm-smccc.h>
+
+#define CTL_RST_FUNC_ID 0xC2000011
+
+static void ctl_smc(unsigned long arg0, unsigned long arg1,
+ unsigned long arg2, unsigned long arg3)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res);
+ if (res.a0 != 0)
+ pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n",
+ (int)res.a0, arg2);
+}
+
+static void ctl_timeout_reset(void)
+{
+ ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1);
+ ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1);
+}
+
+static unsigned char intf_mem_inb_2500(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ ctl_timeout_reset();
+
+ return readb((io->addr)+(offset * io->regspacing));
+}
+
+static unsigned char intf_mem_inw_2500(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ ctl_timeout_reset();
+
+ return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static unsigned char intf_mem_inl_2500(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ ctl_timeout_reset();
+
+ return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+
+static unsigned char mem_inq_2500(const struct si_sm_io *io,
+ unsigned int offset)
+{
+ ctl_timeout_reset();
+
+ return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
+ & 0xff;
+}
+#else
+#define intf_mem_inb_2500 intf_mem_inb
+#define intf_mem_inw_2500 intf_mem_inw
+#define intf_mem_inl_2500 intf_mem_inl
+#define mem_inq_2500 mem_inq
+#endif
+
+static bool apply_phytium2500_workaround;
+
+#ifdef CONFIG_ACPI
+struct ipmi_workaround_oem_info {
+ char oem_id[ACPI_OEM_ID_SIZE + 1];
+};
+
+static struct ipmi_workaround_oem_info wa_info[] = {
+ {
+ .oem_id = "KPSVVJ",
+ }
+};
+
+static void ipmi_check_phytium_workaround(void)
+{
+ struct acpi_table_header tbl;
+ int i;
+
+ if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl)))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
+ if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE))
+ continue;
+
+ apply_phytium2500_workaround = true;
+ break;
+ }
+}
+#else
+static void ipmi_check_phytium_workaround(void)
+{
+ apply_phytium2500_workaround = false;
+}
+#endif
+
static unsigned char intf_mem_inb(const struct si_sm_io *io,
unsigned int offset)
{
@@ -81,26 +180,36 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
if (!addr)
return -ENODEV;
+ ipmi_check_phytium_workaround();
+
/*
* Figure out the actual readb/readw/readl/etc routine to use based
* upon the register size.
*/
switch (io->regsize) {
case 1:
- io->inputb = intf_mem_inb;
+ io->inputb = apply_phytium2500_workaround ?
+ intf_mem_inb_2500 :
+ intf_mem_inb;
io->outputb = intf_mem_outb;
break;
case 2:
- io->inputb = intf_mem_inw;
+ io->inputb = apply_phytium2500_workaround ?
+ intf_mem_inw_2500 :
+ intf_mem_inw;
io->outputb = intf_mem_outw;
break;
case 4:
- io->inputb = intf_mem_inl;
+ io->inputb = apply_phytium2500_workaround ?
+ intf_mem_inl_2500 :
+ intf_mem_inl;
io->outputb = intf_mem_outl;
break;
#ifdef readq
case 8:
- io->inputb = mem_inq;
+ io->inputb = apply_phytium2500_workaround ?
+ mem_inq_2500 :
+ mem_inq;
io->outputb = mem_outq;
break;
#endif
--
2.22.0
1
0

20 Jan '22
From: Smita Koralahalli <Smita.KoralahalliChannabasappa(a)amd.com>
mainline inclusion
from mainline-v5.13-rc1
commit da66658638c947cab0fb157289f03698453ff8d5
category: feature
feature: milan cpu
bugzilla: https://gitee.com/openeuler/kernel/issues/I4OBEH?from=project-issue
CVE: NA
--------------------------------
Add PMU events for AMD Zen3 processors as documented in the AMD Processor
Programming Reference for Family 19h and Model 01h [1].
Below are the events which are new on Zen3:
PMCx041 ls_mab_alloc.{all_allocations|hardware_prefetcher_allocations|load_store_allocations}
PMCx043 ls_dmnd_fills_from_sys.ext_cache_local
PMCx044 ls_any_fills_from_sys.{mem_io_remote|ext_cache_remote|mem_io_local|ext_cache_local|int_cache|lcl_l2}
PMCx047 ls_misal_loads.{ma4k|ma64}
PMCx059 ls_sw_pf_dc_fills.ext_cache_local
PMCx05a ls_hw_pf_dc_fills.ext_cache_local
PMCx05f ls_alloc_mab_count
PMCx085 bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k
PMCx0ab de_dis_cops_from_decoder.disp_op_type.{any_integer_dispatch|any_fp_dispatch}
PMCx0cc ex_ret_ind_brch_instr
PMCx18e ic_tag_hit_miss.{all_instruction_cache_accesses|instruction_cache_miss|instruction_cache_hit}
PMCx1c7 ex_ret_msprd_brnch_instr_dir_msmtch
PMCx28f op_cache_hit_miss.{all_op_cache_accesses|op_cache_miss|op_cache_hit}
Section 2.1.17.2 "Performance Measurement" of "PPR for AMD Family 19h,
Model 01h, Revision B1 Processors - 55898 Rev 0.35 - Feb 5, 2021." lists
new metrics. Add them.
Preserve the events for Zen3 if they are measurable and non-zero as taken
from Zen2 directory even if the PPR of Zen3 [1] omits them. Those events
are the following:
PMCx000 fpu_pipe_assignment.{total|total0|total1|total2|total3}
PMCx004 fp_num_mov_elim_scal_op.{optimized|opt_potential|sse_mov_ops_elim|sse_mov_ops}
PMCx02D ls_rdtsc
PMCx040 ls_dc_accesses
PMCx046 ls_tablewalker.{iside|ic_type1|ic_type0|dside|dc_type1|dc_type0}
PMCx061 l2_request_g2.{group1|ls_rd_sized|ls_rd_sized_nc|ic_rd_sized|ic_rd_sized_nc|smc_inval|bus_lock_originator|bus_locks_responses}
PMCx062 l2_latency.l2_cycles_waiting_on_fills
PMCx063 l2_wcb_req.{wcb_write|wcb_close|zero_byte_store|cl_zero}
PMCx06d l2_fill_pending.l2_fill_busy
PMCx080 ic_fw32
PMCx081 ic_fw32_miss
PMCx086 bp_snp_re_sync
PMCx087 ic_fetch_stall.{ic_stall_any|ic_stall_dq_empty|ic_stall_back_pressure}
PMCx08a bp_l1_btb_correct
PMCx08c ic_cache_inval.{l2_invalidating_probe|fill_invalidated}
PMCx099 bp_tlb_rel
PMCx0a9 de_dis_uop_queue_empty_di0
PMCx0c7 ex_ret_brn_resync
PMCx28a ic_oc_mode_switch.{oc_ic_mode_switch|ic_oc_mode_switch}
L3PMCx01 l3_request_g1.caching_l3_cache_accesses
L3PMCx06 l3_comb_clstr_state.{other_l3_miss_typs|request_miss}
[1] Processor Programming Reference (PPR) for AMD Family 19h, Model 01h,
Revision B1 Processors - 55898 Rev 0.35 - Feb 5, 2021.
[2] Processor Programming Reference (PPR) for AMD Family 17h Model 71h,
Revision B0 Processors, 56176 Rev 3.06 - Jul 17, 2019.
[3] Processor Programming Reference (PPR) for AMD Family 17h Models
01h,08h, Revision B2 Processors, 54945 Rev 3.03 - Jun 14, 2019.
All of the PPRs can be found at:
https://bugzilla.kernel.org/show_bug.cgi?id=206537
Reviewed-by: Robert Richter <rrichter(a)amd.com>
Signed-off-by: Smita Koralahalli <Smita.KoralahalliChannabasappa(a)amd.com>
Cc: Alexander Shishkin <alexander.shishkin(a)linux.intel.com>
Cc: Ian Rogers <irogers(a)google.com>
Cc: Ingo Molnar <mingo(a)redhat.com>
Cc: Jiri Olsa <jolsa(a)redhat.com>
Cc: Kim Phillips <kim.phillips(a)amd.com>
Cc: Mark Rutland <mark.rutland(a)arm.com>
Cc: Martin Liška <mliska(a)suse.cz>
Cc: Michael Petlan <mpetlan(a)redhat.com>
Cc: Namhyung Kim <namhyung(a)kernel.org>
Cc: Peter Zijlstra <peterz(a)infradead.org>
Cc: Vijay Thakkar <vijaythakkar(a)me.com>
Cc: linux-perf-users(a)vger.kernel.org
Link: https://lore.kernel.org/r/20210406215944.113332-5-Smita.KoralahalliChannaba…
Signed-off-by: Arnaldo Carvalho de Melo <acme(a)redhat.com>
Reviewed-by: Yang Jihong <yangjihong1(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
.../pmu-events/arch/x86/amdzen3/branch.json | 53 +++
.../pmu-events/arch/x86/amdzen3/cache.json | 402 ++++++++++++++++
.../pmu-events/arch/x86/amdzen3/core.json | 137 ++++++
.../arch/x86/amdzen3/data-fabric.json | 98 ++++
.../arch/x86/amdzen3/floating-point.json | 139 ++++++
.../pmu-events/arch/x86/amdzen3/memory.json | 428 ++++++++++++++++++
.../pmu-events/arch/x86/amdzen3/other.json | 103 +++++
.../arch/x86/amdzen3/recommended.json | 214 +++++++++
tools/perf/pmu-events/arch/x86/mapfile.csv | 2 +-
9 files changed, 1575 insertions(+), 1 deletion(-)
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/branch.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/core.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/branch.json b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
new file mode 100644
index 000000000000..018a7fe94fb9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
@@ -0,0 +1,53 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic Indirect Predictions.",
+ "PublicDescription": "The number of times a branch used the indirect predictor to make a prediction."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Decode Redirects",
+ "PublicDescription": "The number of times the instruction decoder overrides the predicted target."
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if1g",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (1G page size).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if2m",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (2M page size).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if4k",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instrcution TLB hit (4K or 16K page size).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/cache.json b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
new file mode 100644
index 000000000000..fa1d7499a2e3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
@@ -0,0 +1,402 @@
+[
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache reads (including hardware and software prefetch).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "UMask": "0xf9"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g1 (PMCx060).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized non-cacheable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB write requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache shared read hit in L2",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit in L2. Modifiable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit non-modifiable line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types). Use l2_cache_misses_from_dc_misses instead.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit non-modifiable line in L2.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2. Use l2_cache_misses_from_ic_miss instead.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request hit in L2 and Data cache request hit in L2 (all types).",
+ "UMask": "0xf6"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_hit_l2",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetcher misses in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "Instruction Cache Refills from L2. The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "Instruction Cache Refills from System. The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "L1 ITLB Miss, L2 ITLB Hit. The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for >4K Coalesced page.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 1G page.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 2M page.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk to 4K page.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses",
+ "EventCode": "0x18e",
+ "BriefDescription": "All Instruction Cache Accesses. Counts various IC tag related hit and miss events.",
+ "UMask": "0x1f"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_miss",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Miss. Counts various IC tag related hit and miss events.",
+ "UMask": "0x18"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_hit",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Hit. Counts various IC tag related hit and miss events.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "op_cache_hit_miss.all_op_cache_accesses",
+ "EventCode": "0x28f",
+ "BriefDescription": "All Op Cache accesses. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_miss",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Miss. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_hit",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Hit. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types. All L3 cache Requests",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1",
+ "EventCode": "0x9a",
+ "BriefDescription": "L3 Misses by Request Type. Ignores SliceID, EnAllSlices, CoreID, EnAllCores and ThreadMask. Requires unit mask 0xFF to engage event for counting.",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/core.json b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
new file mode 100644
index 000000000000..4e27a2be359e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
@@ -0,0 +1,137 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_ops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Ops. Use macro_ops_retired instead.",
+ "PublicDescription": "The number of macro-ops retired."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired branch instructions, that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of indirect branches retired that were not correctly predicted. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction. Note that only EX mispredicts are counted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_ind_brch_instr",
+ "EventCode": "0xcc",
+ "BriefDescription": "Retired Indirect Branch Instructions. The number of indirect branches retired."
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch",
+ "EventCode": "0x1c7",
+ "BriefDescription": "Retired Mispredicted Branch Instructions due to Direction Mismatch",
+ "PublicDescription": "The number of retired conditional branch instructions that were not correctly predicted because of a branch direction mismatch."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_fused_instr",
+ "EventCode": "0x1d0",
+ "BriefDescription": "Counts retired Fused Instructions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
new file mode 100644
index 000000000000..40271df40015
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
@@ -0,0 +1,98 @@
+[
+ {
+ "EventName": "remote_outbound_data_controller_0",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 0",
+ "EventCode": "0x7c7",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_1",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 1",
+ "EventCode": "0x807",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_2",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 2",
+ "EventCode": "0x847",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_3",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 3",
+ "EventCode": "0x887",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_0",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x07",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_1",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x47",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_2",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x87",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_3",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0xc7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_4",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x107",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_5",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x147",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_6",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x187",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_7",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x1c7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
new file mode 100644
index 000000000000..98cfcb9c78ec
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
@@ -0,0 +1,139 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps.",
+ "PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 3.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 2.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 1.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 0.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply-Accumulate FLOPs. Each MAC operation is counted as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Divide/square root FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Add/subtract FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX control word mispredict traps. The number of serializing Ops retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_spill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_disp_faults.xmm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_disp_faults.x87_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/memory.json b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
new file mode 100644
index 000000000000..a2833955dcd2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
@@ -0,0 +1,428 @@
+[
+ {
+ "EventName": "ls_bad_status2.stli_other",
+ "EventCode": "0x24",
+ "BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
+ "PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_hi_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_lo_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_locks.non_spec_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Comparable to legacy bus lock.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_ret_cl_flush",
+ "EventCode": "0x26",
+ "BriefDescription": "The number of retired CLFLUSH instructions. This is a non-speculative event."
+ },
+ {
+ "EventName": "ls_ret_cpuid",
+ "EventCode": "0x27",
+ "BriefDescription": "The number of CPUID instructions retired."
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Store Dispatch. Dispatch of a single op that performs a load from and store to the same memory address. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory store. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory load. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_smi_rx",
+ "EventCode": "0x2b",
+ "BriefDescription": "Counts the number of SMIs received."
+ },
+ {
+ "EventName": "ls_int_taken",
+ "EventCode": "0x2c",
+ "BriefDescription": "Counts the number of interrupts taken."
+ },
+ {
+ "EventName": "ls_rdtsc",
+ "EventCode": "0x2d",
+ "BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full",
+ "EventCode": "0x37",
+ "BriefDescription": "A non-cacheable store and the non-cacheable commit buffer is full.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "Number of accesses to the dcache for load/store references.",
+ "PublicDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_mab_alloc.all_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "All Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x7f"
+ },
+ {
+ "EventName": "ls_mab_alloc.hardware_prefetcher_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Hardware Prefetcher Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_mab_alloc.load_store_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Load Store Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x3f"
+ },
+ {
+ "EventName": "ls_mab_alloc.dc_prefetcher",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_mab_alloc.stores",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_mab_alloc.loads",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.int_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.lcl_l2",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.int_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.lcl_l2",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "All L1 DTLB Misses or Reloads. Use l1_dtlb_misses instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that also missed in the L2 TLB.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that also missed in the L2 TLB.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload coalesced page that also missed in the L2 TLB.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that missed the L2 TLB.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a coalesced page that hit in the L2 TLB.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_tablewalker.iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on I-side.",
+ "UMask": "0x0c"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 1.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 0.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_tablewalker.dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on D-side.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_misal_loads.ma4k",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 4KB misaligned (i.e., page crossing) loads.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_misal_loads.ma64",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 64B misaligned (i.e., cacheline crossing) loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_pref_instr_disp",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative).",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchW instruction. See docAPM3 PREFETCHW.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.int_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.int_cache",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_alloc_mab_count",
+ "EventCode": "0x5f",
+ "BriefDescription": "Count of Allocated Mabs",
+ "PublicDescription": "This event counts the in-flight L1 data cache misses (allocated Miss Address Buffers) divided by 4 and rounded down each cycle unless used with the MergeEvent functionality. If the MergeEvent is used, it counts the exact number of outstanding L1 data cache misses. See 2.1.17.3 [Large Increment per Cycle Events]."
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ },
+ {
+ "EventName": "ls_tlb_flush.all_tlb_flushes",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLB Flushes. Requires unit mask 0xFF to engage event for counting. Use all_tlbs_flushed instead",
+ "UMask": "0xff"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/other.json b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
new file mode 100644
index 000000000000..7da5d0791ea3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
@@ -0,0 +1,103 @@
+[
+ {
+ "EventName": "de_dis_uop_queue_empty_di0",
+ "EventCode": "0xa9",
+ "BriefDescription": "Cycles where the Micro-Op Queue is empty."
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any Integer dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any FP dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_flush_recovery_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP Flush recovery stall.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_sch_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP scheduler resource stall. Applies to ops that use the FP scheduler.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Floating point register file resource stall. Applies to all FP ops that have a destination register.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.taken_brnch_buffer_rsrc",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Taken branch buffer resource stall.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.store_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Store Queue resource stall. Applies to all ops with store semantics.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.load_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Load Queue resource stall. Applies to all ops with load semantics.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Integer Physical Register File resource stall. Integer Physical Register File, applies to all ops that have an integer destination register.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Insufficient Retire Queue tokens available.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 3 available.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 2 available.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 1 available.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 0 available.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
new file mode 100644
index 000000000000..988cf68ae825
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
@@ -0,0 +1,214 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-Time Branch Misprediction Ratio (Non-Speculative)",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_data_cache_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All L1 Data Cache Accesses",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 Cache Accesses",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_ic_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Instruction Cache Misses (including prefetch)",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_dc_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Data Cache Misses (including prefetch)",
+ "UMask": "0xe8"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Accesses from L2 HWPF",
+ "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 Cache Misses",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_misses_from_ic_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Instruction Cache Misses",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_misses_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Data Cache Misses",
+ "UMask": "0x08"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Misses from L2 Cache HWPF",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 Cache Hits",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_hits_from_ic_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Instruction Cache Misses",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_hits_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Data Cache Misses",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 Cache Hits from L2 Cache HWPF",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l3_cache_accesses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Cache Accesses",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_misses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Misses (includes cacheline state change requests)",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 Read Miss Latency (in core clocks)",
+ "MetricExpr": "(xi_sys_fill_latency * 16) / xi_ccx_sdp_req1",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "op_cache_fetch_miss_ratio",
+ "BriefDescription": "Op Cache (64B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "Instruction Cache (32B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)",
+ "MetricGroup": "l2_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_memory",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Memory",
+ "UMask": "0x48"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_remote_node",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Remote Node",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_within_same_ccx",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From within same CCX",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_external_ccx_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From External CCX Cache",
+ "UMask": "0x14"
+ },
+ {
+ "EventName": "l1_data_cache_fills_all",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: All",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 ITLB Misses",
+ "MetricExpr": "bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "EventName": "l2_itlb_misses",
+ "EventCode": "0x85",
+ "BriefDescription": "L2 ITLB Misses & Instruction page walks",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l1_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Misses",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L2 DTLB Misses & Data page walks",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "all_tlbs_flushed",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLBs Flushed",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "macro_ops_dispatched",
+ "BriefDescription": "Macro-ops Dispatched",
+ "MetricExpr": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch + de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "MetricGroup": "decoder"
+ },
+ {
+ "EventName": "sse_avx_stalls",
+ "EventCode": "0x0e",
+ "BriefDescription": "Mixed SSE/AVX Stalls",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "macro_ops_retired",
+ "EventCode": "0xc1",
+ "BriefDescription": "Macro-ops Retired"
+ },
+ {
+ "MetricName": "all_remote_links_outbound",
+ "BriefDescription": "Approximate: Outbound data bytes for all Remote Links for a node (die)",
+ "MetricExpr": "remote_outbound_data_controller_0 + remote_outbound_data_controller_1 + remote_outbound_data_controller_2 + remote_outbound_data_controller_3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3e-5MiB"
+ },
+ {
+ "MetricName": "nps1_die_to_dram",
+ "BriefDescription": "Approximate: Combined DRAM B/bytes of all channels on a NPS1 node (die) (may need --metric-no-group)",
+ "MetricExpr": "dram_channel_data_controller_0 + dram_channel_data_controller_1 + dram_channel_data_controller_2 + dram_channel_data_controller_3 + dram_channel_data_controller_4 + dram_channel_data_controller_5 + dram_channel_data_controller_6 + dram_channel_data_controller_7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.1e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 2f2a209e87e1..9f97b32533a0 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -38,4 +38,4 @@ GenuineIntel-6-7E,v1,icelake,core
GenuineIntel-6-86,v1,tremontx,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
-AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen2,core
+AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen3,core
--
2.20.1
1
21

20 Jan '22
您好,您的补丁已经提交 CI 进行检查。
后续合入进展可以在以下issue下关注
https://gitee.com/openeuler/kernel/issues/I4RK58
谢谢
BR
Laibin
On 2022/1/20 10:35, Chen Baozi wrote:
> phytium inclusion
> category: bugfix
> bugzilla: NA
> CVE: NA
>
> --------------------------------
>
> The system would hang up when the Phytium S2500 communicates with
> some BMCs after several rounds of transactions, unless we reset
> the controller timeout counter manually by calling firmware through
> SMC.
>
> Signed-off-by: Wang Yinfeng <wangyinfeng(a)phytium.com.cn>
> Signed-off-by: Chen Baozi <chenbaozi(a)phytium.com.cn>
> ---
> drivers/char/ipmi/ipmi_si_mem_io.c | 117 ++++++++++++++++++++++++++++-
> 1 file changed, 113 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c
> index 75583612ab10..73a3d6979eaa 100644
> --- a/drivers/char/ipmi/ipmi_si_mem_io.c
> +++ b/drivers/char/ipmi/ipmi_si_mem_io.c
> @@ -3,6 +3,105 @@
> #include <linux/io.h>
> #include "ipmi_si.h"
>
> +#ifdef CONFIG_ARM_GIC_PHYTIUM_2500
> +#include <linux/arm-smccc.h>
> +
> +#define CTL_RST_FUNC_ID 0xC2000011
> +
> +static void ctl_smc(unsigned long arg0, unsigned long arg1,
> + unsigned long arg2, unsigned long arg3)
> +{
> + struct arm_smccc_res res;
> +
> + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res);
> + if (res.a0 != 0)
> + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n",
> + (int)res.a0, arg2);
> +}
> +
> +static void ctl_timeout_reset(void)
> +{
> + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1);
> + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1);
> +}
> +
> +static unsigned char intf_mem_inb_2500(const struct si_sm_io *io,
> + unsigned int offset)
> +{
> + ctl_timeout_reset();
> +
> + return readb((io->addr)+(offset * io->regspacing));
> +}
> +
> +static unsigned char intf_mem_inw_2500(const struct si_sm_io *io,
> + unsigned int offset)
> +{
> + ctl_timeout_reset();
> +
> + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
> + & 0xff;
> +}
> +
> +static unsigned char intf_mem_inl_2500(const struct si_sm_io *io,
> + unsigned int offset)
> +{
> + ctl_timeout_reset();
> +
> + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
> + & 0xff;
> +}
> +
> +static unsigned char mem_inq_2500(const struct si_sm_io *io,
> + unsigned int offset)
> +{
> + ctl_timeout_reset();
> +
> + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
> + & 0xff;
> +}
> +#else
> +#define intf_mem_inb_2500 intf_mem_inb
> +#define intf_mem_inw_2500 intf_mem_inw
> +#define intf_mem_inl_2500 intf_mem_inl
> +#define mem_inq_2500 mem_inq
> +#endif
> +
> +static bool apply_phytium2500_workaround;
> +
> +#ifdef CONFIG_ACPI
> +struct ipmi_workaround_oem_info {
> + char oem_id[ACPI_OEM_ID_SIZE + 1];
> +};
> +
> +static struct ipmi_workaround_oem_info wa_info[] = {
> + {
> + .oem_id = "KPSVVJ",
> + }
> +};
> +
> +static void ipmi_check_phytium_workaround(void)
> +{
> + struct acpi_table_header tbl;
> + int i;
> +
> + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl)))
> + return;
> +
> + for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
> + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE))
> + continue;
> +
> + apply_phytium2500_workaround = true;
> + break;
> + }
> +}
> +#else
> +static void ipmi_check_phytium_workaround(void)
> +{
> + apply_phytium2500_workaround = false;
> +}
> +#endif
> +
> static unsigned char intf_mem_inb(const struct si_sm_io *io,
> unsigned int offset)
> {
> @@ -81,26 +180,36 @@ int ipmi_si_mem_setup(struct si_sm_io *io)
> if (!addr)
> return -ENODEV;
>
> + ipmi_check_phytium_workaround();
> +
> /*
> * Figure out the actual readb/readw/readl/etc routine to use based
> * upon the register size.
> */
> switch (io->regsize) {
> case 1:
> - io->inputb = intf_mem_inb;
> + io->inputb = apply_phytium2500_workaround ?
> + intf_mem_inb_2500 :
> + intf_mem_inb;
> io->outputb = intf_mem_outb;
> break;
> case 2:
> - io->inputb = intf_mem_inw;
> + io->inputb = apply_phytium2500_workaround ?
> + intf_mem_inw_2500 :
> + intf_mem_inw;
> io->outputb = intf_mem_outw;
> break;
> case 4:
> - io->inputb = intf_mem_inl;
> + io->inputb = apply_phytium2500_workaround ?
> + intf_mem_inl_2500 :
> + intf_mem_inl;
> io->outputb = intf_mem_outl;
> break;
> #ifdef readq
> case 8:
> - io->inputb = mem_inq;
> + io->inputb = apply_phytium2500_workaround ?
> + mem_inq_2500 :
> + mem_inq;
> io->outputb = mem_outq;
> break;
> #endif
>
1
0

[PATCH openEuler-1.0-LTS] audit: bugfix for infinite loop when flush the hold queue
by Yang Yingliang 20 Jan '22
by Yang Yingliang 20 Jan '22
20 Jan '22
From: Cui GaoSheng <cuigaosheng1(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 186105, https://gitee.com/openeuler/kernel/issues/I4RGWS?from=project-issue
CVE: NA
-----------------------------------------------------------------
When we add "audit=1" to the cmdline, if we keep the audit_hold_queue
non-empty, flush the hold queue will fall into an infinite loop. So we
need to fix it by stoping flush the hold queue when netlink abnormal.
Fixes: 3413ddc91e02a ("audit: improve robustness of the audit queue handling")
Signed-off-by: Cui GaoSheng <cuigaosheng1(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Reviewed-by: weiyang wang <wangweiyang2(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/audit.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/kernel/audit.c b/kernel/audit.c
index c5e034fe14bbb..3de5ebb945592 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -740,6 +740,8 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
if (!sk) {
if (err_hook)
(*err_hook)(skb);
+ if (queue == &audit_hold_queue)
+ goto out;
continue;
}
@@ -756,6 +758,8 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
(*err_hook)(skb);
if (rc == -EAGAIN)
rc = 0;
+ if (queue == &audit_hold_queue)
+ goto out;
/* continue to drain the queue */
continue;
} else
@@ -767,6 +771,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
}
}
+out:
return (rc >= 0 ? 0 : rc);
}
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS] blk-throttle: enable hierarchical throttle in cgroup v1
by Yang Yingliang 19 Jan '22
by Yang Yingliang 19 Jan '22
19 Jan '22
From: Yu Kuai <yukuai3(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 186072, https://gitee.com/openeuler/kernel/issues/I4RH0V
CVE: NA
-----------------------------------------------
blkio subsytem is not under default hierarchy in cgroup v1 by default,
which means configurations will only be effective on current cgroup
for io throttle.
This patch introduces a new feature that enable default hierarchy for
io throttle, which means configurations will be effective on child cgroups.
Such feature is disabled by default, and can be enabled by adding
"blkcg_global_limit=1" or "blkcg_global_limit=Y" or "blkcg_global_limit=y"
in boot cmd.
Signed-off-by: Yu Kuai <yukuai3(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/blk-throttle.c | 16 +++++++++++++++-
1 file changed, 15 insertions(+), 1 deletion(-)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ceeb17104aa82..6b3ced8ae1a5f 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -43,6 +43,19 @@ static struct blkcg_policy blkcg_policy_throtl;
/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
+/* True if global limit is enabled in cgroup v1 */
+static bool global_limit;
+
+static int __init setup_global_limit(char *str)
+{
+ if (!strcmp(str, "1") || !strcmp(str, "Y") || !strcmp(str, "y"))
+ global_limit = true;
+
+ return 1;
+}
+
+__setup("blkcg_global_limit=", setup_global_limit);
+
/*
* To implement hierarchical throttling, throtl_grps form a tree and bios
* are dispatched upwards level by level until they reach the top and get
@@ -537,7 +550,8 @@ static void throtl_pd_init(struct blkg_policy_data *pd)
* regardless of the position of the group in the hierarchy.
*/
sq->parent_sq = &td->service_queue;
- if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
+ if ((cgroup_subsys_on_dfl(io_cgrp_subsys) || global_limit) &&
+ blkg->parent)
sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
tg->td = td;
}
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS] xfs: map unwritten blocks in XFS_IOC_{ALLOC,FREE}SP just like fallocate
by Yang Yingliang 19 Jan '22
by Yang Yingliang 19 Jan '22
19 Jan '22
From: "Darrick J. Wong" <djwong(a)kernel.org>
mainline inclusion
from mainline-v5.16-rc5
commit 983d8e60f50806f90534cc5373d0ce867e5aaf79
category: bugfix
bugzilla: 186083
CVE: CVE-2021-4155
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
The old ALLOCSP/FREESP ioctls in XFS can be used to preallocate space at
the end of files, just like fallocate and RESVSP. Make the behavior
consistent with the other ioctls.
Reported-by: Kirill Tkhai <ktkhai(a)virtuozzo.com>
Signed-off-by: Darrick J. Wong <djwong(a)kernel.org>
Signed-off-by: Darrick J. Wong <darrick.wong(a)oracle.com>
Reviewed-by: Dave Chinner <dchinner(a)redhat.com>
Reviewed-by: Eric Sandeen <sandeen(a)redhat.com>
Signed-off-by: Guo Xuenan <guoxuenan(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/xfs/xfs_ioctl.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 263436977c39e..b6d85da7f3508 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -701,7 +701,8 @@ xfs_ioc_space(
flags |= XFS_PREALLOC_CLEAR;
if (bf->l_start > XFS_ISIZE(ip)) {
error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
- bf->l_start - XFS_ISIZE(ip), 0);
+ bf->l_start - XFS_ISIZE(ip),
+ XFS_BMAPI_PREALLOC);
if (error)
goto out_unlock;
}
--
2.25.1
1
0
Hi Liu
感谢您的提交,针对先前提交的AMD适配milan V2相关patch,我们总共有两条意
见,麻烦再确认下。
1、Essential下 08ed77e414ab v5.10-rc1 perf vendor events amd: Add
recommended events 未合入
2、41cd02c6f7f6:kvm: x86: Expose RDPID in KVM_GET_SUPPORTED_CPUID 后向
有两个bugfix需要确认
new fix: 36fa06f9ff39 ("KVM: x86: Add support for RDPID without RDTSCP")
new fix: 8aec21c04caa ("KVM: VMX: Do not advertise RDPID if
ENABLE_RDTSCP control is unsupported")
BR
Laibin
On 2021/12/16 10:27, Jackie Liu wrote:
> Hi, ALL
>
> 附件是加入到 KYLIN 内核的 AMD patch 并且基于 欧拉分支 rebase 了一下
> ,head 的创建规则没搞懂,这个脚本好像也不合适,就没有更改。请查收。
>
> --
> Jackie Liu
>
> 在 2021/12/15 上午11:44, Zheng Zengkai 写道:
>> #!/bin/sh
>> #
>> # Usage:
>> #
>> # prerequisite:
>> # The patches to add header should be patches from linux mainline.
>> #
>> # command:
>> # add-openeuler-patch-header.sh commit1..commit2 gitee_issue_url
>> #
>> # commit1: mainline start commit id;
>> # commit2: mainline end commit id
>> # gitee_issue_url: for example:
>> https://gitee.com/openeuler/kernel/issues/I4MKP4
>>
>> # Example:
>> # add-openeuler-patch-header.sh afd2ff9b7e1b..8bb7eca972a
>> https://gitee.com/openeuler/kernel/issues/I4MKP4
>> #
>>
>> for p in `ls *.patch`
>> do
>> if [ $p == "0000-cover-letter.patch" ]; then
>> continue
>> fi
>>
>> title=$(sed '/^Subject: \[PATCH .*\]/{N;s/\n / /g}' $p | grep
>> -E "^Subject: \[PATCH .*\]" | cut -d ']' -f 2)
>> commit=$(git log --pretty=oneline $1 | grep -F "$title" | cut
>> -d ' ' -f 1)
>> if [ -n "$multi" ]; then
>> echo "$p" has multi-commits
>> commit="multi"
>> continue
>> fi
>> if [ -z "$commit" ]; then
>> echo "$p" commit is null
>> commit="null"
>> continue
>> fi
>>
>> # find insert line
>> i=5
>> while true
>> do
>> ln=""$i"p"
>> sub=`cat $p | sed -n "$ln"`
>> i=$(($i+1))
>> if [ -z "$sub" ]; then
>> break
>> fi
>> done
>> i=$(($i-1))
>>
>> version=`git name-rev $commit | awk -F '[v~^]' '{print $2}'`
>>
>> sed ""$i"a mainline inclusion\nfrom
>> mainline-v$version\ncommit $commit\ncategory: feature\nbugzilla:
>> $2\nCVE: NA\n\n--------------------------------\n" -i $p
>> done
>>
>
> No virus found
> Checked by Hillstone Network AntiVirus
>
1
0

[PATCH openEuler-1.0-LTS V2 00/90] Enabling AMD Milan series processor support
by Laibin Qiu 19 Jan '22
by Laibin Qiu 19 Jan '22
19 Jan '22
bugzilla: https://gitee.com/openeuler/kernel/issues/I4MKP4
Babu Moger (1):
KVM: SVM: Clear the CR4 register on reset
David Edmondson (1):
KVM: x86: clflushopt should be treated as a no-op by emulation
Fenghua Yu (2):
x86/cpufeatures: Enumerate MOVDIRI instruction
x86/cpufeatures: Enumerate MOVDIR64B instruction
Haiyan Song (2):
perf vendor events intel: Add Icelake V1.00 event file
perf vendor events intel: Add Tremontx event file v1.02
Isaac Vaughn (1):
EDAC/amd64: Add PCI device IDs for family 17h, model 70h
Jan H. Schönherr (1):
x86/mce: Fix use of uninitialized MCE message string
Jim Mattson (1):
kvm: x86: Expose RDPID in KVM_GET_SUPPORTED_CPUID
John Allen (2):
kvm/svm: PKU not currently supported
x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
Kai Huang (3):
kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c
kvm: x86: Fix reserved bits related calculation errors caused by MKTME
kvm: x86: Fix L1TF mitigation for shadow MMU
Kan Liang (1):
perf vendor events intel: Add uncore_upi JSON support
Kim Phillips (17):
x86/cpu/amd: Call init_amd_zn() om Family 19h processors too
perf/amd/uncore: Prepare L3 thread mask code for Family 19h
perf/amd/uncore: Make L3 thread mask code more readable
perf/amd/uncore: Add support for Family 19h L3 PMU
arch/x86/amd/ibs: Fix re-arming IBS Fetch
perf/x86/amd/ibs: Fix raw sample data accumulation
perf/amd/uncore: Set all slices and threads to restore perf stat -a
behaviour
perf/x86/amd/ibs: Don't include randomized bits in get_ibs_op_count()
perf/amd/uncore: Prepare to scale for more attributes that vary per
family
perf/amd/uncore: Allow F17h user threadmask and slicemask
specification
perf/amd/uncore: Allow F19h user coreid, threadmask, and sliceid
specification
perf vendor events amd: Add L3 cache events for Family 17h
perf vendor events amd: Remove redundant '['
perf vendor events amd: Enable Family 19h users by matching Zen2
events
perf/x86/amd/ibs: Support 27-bit extended Op/cycle counter
tools/power turbostat: Support AMD Family 19h
perf vendor events amd: Add L2 Prefetch events for zen1
Krish Sadhukhan (1):
KVM: SVM: Replace hard-coded value with #define
Like Xu (1):
perf/x86/amd: Don't touch the AMD64_EVENTSEL_HOSTONLY bit inside the
guest
Liu Jingqi (2):
KVM: x86: expose MOVDIRI CPU feature into VM.
KVM: x86: expose MOVDIR64B CPU feature into VM.
Maciej S. Szmigiero (1):
KVM: mmu: Fix SPTE encoding of MMIO generation upper half
Marcel Bocu (1):
x86/amd_nb: Add PCI device IDs for family 17h, model 70h
Martin Liška (1):
perf vendor events amd: perf PMU events for AMD Family 17h
Nathan Chancellor (1):
perf/amd/uncore: Fix sysfs type mismatch
Paolo Bonzini (3):
KVM: x86: only do L1TF workaround on affected processors
KVM: x86: assign two bits to track SPTE kinds
KVM: x86: fix overlap between SPTE_MMIO_MASK and generation
Rasmus Villemoes (1):
build_bug.h: add wrapper for _Static_assert
Sean Christopherson (13):
KVM: nVMX: Allocate and configure VM{READ,WRITE} bitmaps iff
enable_shadow_vmcs
KVM: x86: Add requisite includes to kvm_cache_regs.h
KVM: x86: Add requisite includes to hyperv.h
KVM: x86: Use a u64 when passing the MMIO gen around
KVM: Explicitly define the "memslot update in-progress" bit
KVM: x86: Refactor the MMIO SPTE generation handling
KVM: x86: Rename access permissions cache member in struct
kvm_vcpu_arch
KVM: x86/mmu: Add explicit access mask for MMIO SPTEs
KVM: x86/mmu: Consolidate "is MMIO SPTE" code
KVM: x86/mmu: Apply max PA check for MMIO sptes to 32-bit KVM
KVM: x86/mmu: Set mmio_value to '0' if reserved #PF can't be generated
KVM: Remove the hack to trigger memslot generation wraparound
KVM: Move the memslot update in-progress flag to bit 63
Sebastian Andrzej Siewior (3):
x86/pkeys: Provide *pkru() helpers
x86/fpu: Only write PKRU if it is different from current
x86/pkeys: Don't check if PKRU is zero before writing it
Tom Lendacky (1):
KVM: SVM: Override default MMIO mask if memory encryption is enabled
Vijay Thakkar (3):
perf vendor events amd: Restrict model detection for zen1 based
processors
perf vendor events amd: Add Zen2 events
perf vendor events amd: Update Zen1 events to V2
Woods, Brian (2):
hwmon/k10temp, x86/amd_nb: Consolidate shared device IDs
x86/amd_nb: Add PCI device IDs for family 17h, model 30h
Yazen Ghannam (24):
EDAC/amd64: Drop some family checks for newer systems
x86/amd_nb: Add Family 19h PCI IDs
EDAC/mce_amd: Always load on SMCA systems
x86/MCE/AMD, EDAC/mce_amd: Add new MP5, NBIO, and PCIE SMCA bank types
x86/MCE/AMD, EDAC/mce_amd: Add new McaTypes for CS, PSP, and SMU units
x86/MCE/AMD, EDAC/mce_amd: Add new error descriptions for some SMCA
bank types
x86/MCE/AMD, EDAC/mce_amd: Add new Load Store unit McaType
EDAC/amd64: Use a macro for iterating over Unified Memory Controllers
EDAC/amd64: Support more than two controllers for chip selects
handling
EDAC/amd64: Initialize DIMM info for systems with more than two
channels
EDAC/amd64: Add Family 17h Model 30h PCI IDs
EDAC/amd64: Support more than two Unified Memory Controllers
EDAC/amd64: Set maximum channel layer size depending on family
EDAC/amd64: Recognize x16 symbol size
EDAC/amd64: Adjust printed chip select sizes when interleaved
EDAC/amd64: Find Chip Select memory size using Address Mask
EDAC/amd64: Cache secondary Chip Select registers
EDAC/amd64: Support asymmetric dual-rank DIMMs
EDAC/amd64: Set grain per DIMM
EDAC/amd64: Make struct amd64_family_type global
EDAC/amd64: Gather hardware information early
EDAC/amd64: Save max number of controllers to family type
EDAC/amd64: Add family ops for Family 19h Models 00h-0Fh
EDAC/amd64: Handle three rank interleaving mode
Documentation/virtual/kvm/mmu.txt | 13 +-
arch/x86/events/amd/ibs.c | 93 +-
arch/x86/events/amd/uncore.c | 179 ++--
arch/x86/events/perf_event.h | 3 +-
arch/x86/include/asm/cpufeatures.h | 4 +-
arch/x86/include/asm/kvm_host.h | 10 +-
arch/x86/include/asm/mce.h | 7 +
arch/x86/include/asm/microcode_amd.h | 2 +-
arch/x86/include/asm/msr-index.h | 1 +
arch/x86/include/asm/perf_event.h | 16 +-
arch/x86/include/asm/pgtable.h | 2 +-
arch/x86/include/asm/special_insns.h | 19 +-
arch/x86/kernel/amd_nb.c | 15 +-
arch/x86/kernel/cpu/amd.c | 3 +-
arch/x86/kernel/cpu/mce/amd.c | 28 +-
arch/x86/kernel/cpu/mce/core.c | 4 +-
arch/x86/kvm/cpuid.c | 6 +-
arch/x86/kvm/emulate.c | 8 +-
arch/x86/kvm/hyperv.h | 2 +
arch/x86/kvm/kvm_cache_regs.h | 2 +
arch/x86/kvm/mmu.c | 215 +++--
arch/x86/kvm/mmu.h | 2 +-
arch/x86/kvm/svm.c | 53 +-
arch/x86/kvm/vmx.c | 53 +-
arch/x86/kvm/x86.c | 33 +-
arch/x86/kvm/x86.h | 4 +-
arch/x86/mm/pkeys.c | 7 -
drivers/edac/amd64_edac.c | 609 ++++++++----
drivers/edac/amd64_edac.h | 31 +-
drivers/edac/mce_amd.c | 134 ++-
drivers/hwmon/k10temp.c | 9 +-
include/linux/build_bug.h | 19 +
include/linux/kvm_host.h | 21 +
include/linux/pci_ids.h | 5 +
.../pmu-events/arch/x86/amdzen1/branch.json | 23 +
.../pmu-events/arch/x86/amdzen1/cache.json | 312 ++++++
.../pmu-events/arch/x86/amdzen1/core.json | 125 +++
.../arch/x86/amdzen1/floating-point.json | 224 +++++
.../pmu-events/arch/x86/amdzen1/memory.json | 184 ++++
.../pmu-events/arch/x86/amdzen1/other.json | 56 ++
.../pmu-events/arch/x86/amdzen2/branch.json | 52 +
.../pmu-events/arch/x86/amdzen2/cache.json | 338 +++++++
.../pmu-events/arch/x86/amdzen2/core.json | 130 +++
.../arch/x86/amdzen2/floating-point.json | 140 +++
.../pmu-events/arch/x86/amdzen2/memory.json | 341 +++++++
.../pmu-events/arch/x86/amdzen2/other.json | 115 +++
.../pmu-events/arch/x86/icelake/cache.json | 552 +++++++++++
.../arch/x86/icelake/floating-point.json | 102 ++
.../pmu-events/arch/x86/icelake/frontend.json | 424 +++++++++
.../pmu-events/arch/x86/icelake/memory.json | 410 ++++++++
.../pmu-events/arch/x86/icelake/other.json | 121 +++
.../pmu-events/arch/x86/icelake/pipeline.json | 892 ++++++++++++++++++
.../arch/x86/icelake/virtual-memory.json | 236 +++++
tools/perf/pmu-events/arch/x86/mapfile.csv | 6 +
.../pmu-events/arch/x86/tremontx/cache.json | 111 +++
.../arch/x86/tremontx/frontend.json | 26 +
.../pmu-events/arch/x86/tremontx/memory.json | 26 +
.../pmu-events/arch/x86/tremontx/other.json | 26 +
.../arch/x86/tremontx/pipeline.json | 111 +++
.../arch/x86/tremontx/uncore-memory.json | 73 ++
.../arch/x86/tremontx/uncore-other.json | 431 +++++++++
.../arch/x86/tremontx/uncore-power.json | 11 +
.../arch/x86/tremontx/virtual-memory.json | 86 ++
tools/perf/pmu-events/jevents.c | 2 +
tools/power/x86/turbostat/turbostat.c | 34 +-
virt/kvm/kvm_main.c | 36 +-
66 files changed, 6839 insertions(+), 529 deletions(-)
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/branch.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/core.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen1/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/branch.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/core.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/amdzen2/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/floating-point.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/frontend.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/pipeline.json
create mode 100644 tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/cache.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/frontend.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/other.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json
create mode 100644 tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json
--
2.22.0
2
89
openEuler inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4OFDI
CVE: NA
--------------------------------
Chao Liu (2):
change arm64 configs
change x86 configs
arch/arm64/configs/openeuler_defconfig | 288 ++++++++-----
arch/x86/configs/openeuler_defconfig | 544 +++++++++----------------
2 files changed, 376 insertions(+), 456 deletions(-)
--
2.27.0
1
2