
From: Kan Liang <kan.liang@linux.intel.com> mainline inclusion from mainline-v5.3-rc1 commit c8872d90e0a3651a096860d3241625ccfa1647e0 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I47H3V CVE: NA -------------------------------- commit c8872d90e0a3651a096860d3241625ccfa1647e0 upstream. Backport summary: Backport to kernel 4.19.57 for ICX uncore support. For uncore box which can only be accessed by MSR, its reference box->refcnt is updated in CPU hot plug. The uncore boxes need to be initalized and exited accordingly for the first/last CPU of a socket. Starts from Snow Ridge server, a new type of uncore box is introduced, which can only be accessed by MMIO. The driver needs to map/unmap MMIO space for the first/last CPU of a socket. Extract the codes of box ref/unref and init/exit for reuse later. There is no functional change. Signed-off-by: Kan Liang <kan.liang@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@kernel.org Cc: eranian@google.com Link: https://lkml.kernel.org/r/1556672028-119221-4-git-send-email-kan.liang@linux... Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Yunying Sun <yunying.sun@intel.com> Signed-off-by: Jackie Liu <liuyun01@kylinos.cn> Signed-off-by: Zheng Zengkai <zhengzengkai@huawei.com> Reviewed-by: Yang Jihong <yangjihong1@huawei.com> Reviewed-by: Xie XiuQi <xiexiuqi@huawei.com> Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> --- arch/x86/events/intel/uncore.c | 56 ++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 85d4364d2167c..972fb36d794c6 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1179,12 +1179,27 @@ static void uncore_change_context(struct intel_uncore_type **uncores, uncore_change_type_ctx(*uncores, old_cpu, new_cpu); } -static int uncore_event_cpu_offline(unsigned int cpu) +static void uncore_box_unref(struct intel_uncore_type **types, int id) { - struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; - int i, pkg, target; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int die, target; /* Check if exiting cpu is used for collecting uncore events */ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) @@ -1203,16 +1218,8 @@ static int uncore_event_cpu_offline(unsigned int cpu) unref: /* Clear the references */ - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (box && atomic_dec_return(&box->refcnt) == 0) - uncore_box_exit(box); - } - } + die = topology_logical_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, die); return 0; } @@ -1255,15 +1262,15 @@ static int allocate_boxes(struct intel_uncore_type **types, return -ENOMEM; } -static int uncore_event_cpu_online(unsigned int cpu) +static int uncore_box_ref(struct intel_uncore_type **types, + int id, unsigned int cpu) { - struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_type *type; struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; - int i, ret, pkg, target; + int i, ret; - pkg = topology_logical_package_id(cpu); - ret = allocate_boxes(types, pkg, cpu); + ret = allocate_boxes(types, id, cpu); if (ret) return ret; @@ -1271,11 +1278,22 @@ static int uncore_event_cpu_online(unsigned int cpu) type = *types; pmu = type->pmus; for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; + box = pmu->boxes[id]; if (box && atomic_inc_return(&box->refcnt) == 1) uncore_box_init(box); } } + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int ret, die, target; + + die = topology_logical_die_id(cpu); + ret = uncore_box_ref(uncore_msr_uncores, die, cpu); + if (ret) + return ret; /* * Check if there is an online cpu in the package -- 2.25.1