mailweb.openeuler.org
Manage this list

Keyboard Shortcuts

Thread View

  • j: Next unread message
  • k: Previous unread message
  • j a: Jump to all threads
  • j l: Jump to MailingList overview

Kernel

Threads by month
  • ----- 2025 -----
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2024 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2023 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2022 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2021 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2020 -----
  • December
  • November
  • October
  • September
  • August
  • July
  • June
  • May
  • April
  • March
  • February
  • January
  • ----- 2019 -----
  • December
kernel@openeuler.org

  • 30 participants
  • 19678 discussions
[PATCH kernel-4.19 3/3] x86/mce: Add Zhaoxin LMCE support
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.5 commit 70f0c230031dfef3c9b3e37b2a8c18d3f7186fb2 category: x86/mce Add support for more Zhaoxin CPUs. -------------------------------- Newer Zhaoxin CPUs support LMCE compatible with Intel. Add support for that. [ bp: Export functions and massage. ] Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Borislav Petkov <bp(a)suse.de> Cc: CooperYan(a)zhaoxin.com Cc: DavidWang(a)zhaoxin.com Cc: HerryYang(a)zhaoxin.com Cc: "H. Peter Anvin" <hpa(a)zytor.com> Cc: Ingo Molnar <mingo(a)redhat.com> Cc: linux-edac <linux-edac(a)vger.kernel.org> Cc: QiyuanWang(a)zhaoxin.com Cc: Thomas Gleixner <tglx(a)linutronix.de> Cc: Tony Luck <tony.luck(a)intel.com> Cc: x86-ml <x86(a)kernel.org> Link: https://lkml.kernel.org/r/1568787573-1297-5-git-send-email-TonyWWang-oc@zha… Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/mce/core.c | 25 +++++++++++++++++++++++-- arch/x86/kernel/cpu/mce/intel.c | 4 ++-- arch/x86/kernel/cpu/mce/internal.h | 4 ++++ 3 files changed, 29 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 71ec7afbabdf..8534b952af76 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1125,6 +1125,13 @@ static bool __mc_check_crashing_cpu(int cpu) u64 mcgstatus; mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); + + if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) { + if (mcgstatus & MCG_STATUS_LMCES) + return false; + } + if (mcgstatus & MCG_STATUS_RIPV) { mce_wrmsrl(MSR_IA32_MCG_STATUS, 0); return true; @@ -1274,9 +1281,11 @@ void do_machine_check(struct pt_regs *regs, long error_code) /* * Check if this MCE is signaled to only this logical processor, - * on Intel only. + * on Intel, Zhaoxin only. */ - if (m.cpuvendor == X86_VENDOR_INTEL) + if (m.cpuvendor == X86_VENDOR_INTEL || + m.cpuvendor == X86_VENDOR_ZHAOXIN || + m.cpuvendor == X86_VENDOR_CENTAUR) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1745,9 +1754,15 @@ static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) } intel_init_cmci(); + intel_init_lmce(); mce_adjust_timer = cmci_intel_adjust_timer; } +static void mce_zhaoxin_feature_clear(struct cpuinfo_x86 *c) +{ + intel_clear_lmce(); +} + static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) { switch (c->x86_vendor) { @@ -1781,6 +1796,12 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) case X86_VENDOR_INTEL: mce_intel_feature_clear(c); break; + + case X86_VENDOR_ZHAOXIN: + case X86_VENDOR_CENTAUR: + mce_zhaoxin_feature_clear(c); + break; + default: break; } diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 6a220c999a01..f6f3b2675164 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -445,7 +445,7 @@ void intel_init_cmci(void) cmci_recheck(); } -static void intel_init_lmce(void) +void intel_init_lmce(void) { u64 val; @@ -458,7 +458,7 @@ static void intel_init_lmce(void) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); } -static void intel_clear_lmce(void) +void intel_clear_lmce(void) { u64 val; diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index 99d73d18f2c4..22e8aa8c8fe7 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -53,12 +53,16 @@ bool mce_intel_cmci_poll(void); void mce_intel_hcpu_update(unsigned long cpu); void cmci_disable_bank(int bank); void intel_init_cmci(void); +void intel_init_lmce(void); +void intel_clear_lmce(void); #else # define cmci_intel_adjust_timer mce_adjust_timer_default static inline bool mce_intel_cmci_poll(void) { return false; } static inline void mce_intel_hcpu_update(unsigned long cpu) { } static inline void cmci_disable_bank(int bank) { } static inline void intel_init_cmci(void) { } +static inline void intel_init_lmce(void) { } +static inline void intel_clear_lmce(void) { } #endif void mce_timer_kick(unsigned long interval); -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 2/3] x86/mce: Add Zhaoxin CMCI support
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.5 commit 5a3d56a034be9e8e87a6cb9ed3f2928184db1417 category: x86/mce Add support for more Zhaoxin CPUs. -------------------------------- All newer Zhaoxin CPUs support CMCI and are compatible with Intel's Machine-Check Architecture. Add that support for Zhaoxin CPUs. [ bp: Massage comments and export intel_init_cmci(). ] Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Borislav Petkov <bp(a)suse.de> Cc: CooperYan(a)zhaoxin.com Cc: DavidWang(a)zhaoxin.com Cc: HerryYang(a)zhaoxin.com Cc: "H. Peter Anvin" <hpa(a)zytor.com> Cc: Ingo Molnar <mingo(a)redhat.com> Cc: linux-edac <linux-edac(a)vger.kernel.org> Cc: QiyuanWang(a)zhaoxin.com Cc: Thomas Gleixner <tglx(a)linutronix.de> Cc: Tony Luck <tony.luck(a)intel.com> Cc: x86-ml <x86(a)kernel.org> Link: https://lkml.kernel.org/r/1568787573-1297-4-git-send-email-TonyWWang-oc@zha… Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/mce/core.c | 30 +++++++++++++++++++----------- arch/x86/kernel/cpu/mce/intel.c | 7 +++++-- arch/x86/kernel/cpu/mce/internal.h | 2 ++ 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index dce0fbd4cb0f..71ec7afbabdf 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -1726,19 +1726,26 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) +static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; + /* + * These CPUs have MCA bank 8 which reports only one error type called + * SVAD (System View Address Decoder). The reporting of that error is + * controlled by IA32_MC8.CTL.0. + * + * If enabled, prefetching on these CPUs will cause SVAD MCE when + * virtual machines start and result in a system panic. Always disable + * bank 8 SVAD error by default. + */ + if ((c->x86 == 7 && c->x86_model == 0x1b) || + (c->x86_model == 0x19 || c->x86_model == 0x1f)) { + if (cfg->banks > 8) + mce_banks[8].ctl = 0; } + + intel_init_cmci(); + mce_adjust_timer = cmci_intel_adjust_timer; } static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) @@ -1759,7 +1766,8 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); + case X86_VENDOR_ZHAOXIN: + mce_zhaoxin_feature_init(c); break; default: diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index 693c8cfac75d..6a220c999a01 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -85,8 +85,11 @@ static int cmci_supported(int *banks) * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ - if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) return 0; + if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); @@ -423,7 +426,7 @@ void cmci_disable_bank(int bank) raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); } -static void intel_init_cmci(void) +void intel_init_cmci(void) { int banks; diff --git a/arch/x86/kernel/cpu/mce/internal.h b/arch/x86/kernel/cpu/mce/internal.h index ceb67cd5918f..99d73d18f2c4 100644 --- a/arch/x86/kernel/cpu/mce/internal.h +++ b/arch/x86/kernel/cpu/mce/internal.h @@ -52,11 +52,13 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval); bool mce_intel_cmci_poll(void); void mce_intel_hcpu_update(unsigned long cpu); void cmci_disable_bank(int bank); +void intel_init_cmci(void); #else # define cmci_intel_adjust_timer mce_adjust_timer_default static inline bool mce_intel_cmci_poll(void) { return false; } static inline void mce_intel_hcpu_update(unsigned long cpu) { } static inline void cmci_disable_bank(int bank) { } +static inline void intel_init_cmci(void) { } #endif void mce_timer_kick(unsigned long interval); -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 2/2] x86/acpi/cstate: Add Zhaoxin processors support for cache flush policy in C3
by LeoLiu-oc 25 Mar '21

25 Mar '21
Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/acpi/cstate.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 45745ecaa624..5eebe05b00fb 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -63,6 +63,21 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, c->x86_stepping >= 0x0e)) flags->bm_check = 1; } + + if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + /* + * All Zhaoxin CPUs that support C3 share cache. + * And caches should not be flushed by software while + * entering C3 type state. + */ + flags->bm_check = 1; + /* + * On all recent Zhaoxin platforms, ARB_DISABLE is a nop. + * So, set bm_control to zero to indicate that ARB_DISABLE + * is not required while entering C3 type state. + */ + flags->bm_control = 0; + } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 1/2] x86/power: Optimize C3 entry on Centaur CPUs
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.2 commit 987ddbe4870b53623d76ac64044c55a13e368113 category: x86/power -------------------------------- For new Centaur CPUs the ucode will take care of the preservation of cache coherence between CPU cores in C-states regardless of how deep the C-states are. So, it is not necessary to flush the caches in software befor entering C3. This useless operation will cause performance drop for the cores which share some caches with the idling core. Signed-off-by: David Wang <davidwang(a)zhaoxin.com> Reviewed-by: Thomas Gleixner <tglx(a)linutronix.de> Acked-by: Pavel Machek <pavel(a)ucw.cz> Cc: Linus Torvalds <torvalds(a)linux-foundation.org> Cc: Peter Zijlstra <peterz(a)infradead.org> Cc: brucechang(a)via-alliance.com Cc: cooperyan(a)zhaoxin.com Cc: len.brown(a)intel.com Cc: linux-pm(a)kernel.org Cc: qiyuanwang(a)zhaoxin.com Cc: rjw(a)rjwysocki.net Cc: timguo(a)zhaoxin.com Link: http://lkml.kernel.org/r/1545900110-2757-1-git-send-email-davidwang@zhaoxin… [ Tidy up the comment. ] Signed-off-by: Ingo Molnar <mingo(a)kernel.org> Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/acpi/cstate.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 92539a1c3e31..45745ecaa624 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -51,6 +51,18 @@ void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, if (c->x86_vendor == X86_VENDOR_INTEL && (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f))) flags->bm_control = 0; + /* + * For all recent Centaur CPUs, the ucode will make sure that each + * core can keep cache coherence with each other while entering C3 + * type state. So, set bm_check to 1 to indicate that the kernel + * doesn't need to execute a cache flush operation (WBINVD) when + * entering C3 type state. + */ + if (c->x86_vendor == X86_VENDOR_CENTAUR) { + if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f && + c->x86_stepping >= 0x0e)) + flags->bm_check = 1; + } } EXPORT_SYMBOL(acpi_processor_power_init_bm_check); -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 6/6] x86/cpu: Add detect extended topology for Zhaoxin CPUs
by LeoLiu-oc 25 Mar '21

25 Mar '21
Detect the extended topology information of Zhaoxin CPUs if available. The patch is scheduled to be submitted to the kernel mainline in 2021. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/centaur.c | 20 +++++++++++++++++++- arch/x86/kernel/cpu/zhaoxin.c | 7 ++++++- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8735be464bc1..49b33cc78751 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -115,6 +115,21 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + if (c->cpuid_level >= 0x00000001) { + u32 eax, ebx, ecx, edx; + + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + /* + * If HTT (EDX[28]) is set EBX[16:23] contain the number of + * apicids which are reserved per package. Store the resulting + * shift value for the package management code. + */ + if (edx & (1U << 28)) + c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); + } + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void centaur_detect_vmx_virtcap(struct cpuinfo_x86 *c) @@ -158,8 +173,11 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) + detect_num_cpu_cores(c); + #ifdef CONFIG_X86_32 detect_ht(c); #endif diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 452fd0a6bc61..b6fc969b3e74 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -85,6 +85,8 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c) @@ -115,8 +117,11 @@ static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c) static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) + detect_num_cpu_cores(c); + #ifdef CONFIG_X86_32 detect_ht(c); #endif -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 5/6] x86/cpufeatures: Add Zhaoxin feature bits
by LeoLiu-oc 25 Mar '21

25 Mar '21
Add Zhaoxin feature bits on Zhaoxin CPUs. The patch is scheduled to be submitted to the kernel mainline in 2021. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/include/asm/cpufeatures.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f7f9604b10cc..48535113efa6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -145,8 +145,12 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32+0) /* sm2 present*/ +#define X86_FEATURE_SM2_EN (5*32+1) /* sm2 enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32+4) /* "sm3 sm4" present */ +#define X86_FEATURE_CCS_EN (5*32+5) /* "sm3_en sm4_en" enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ @@ -155,6 +159,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 instructions are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2instructions are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 4/6] x86/cpu/centaur: Add Centaur family >=7 CPUs initialization support
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.9 commit 33b4711df4c1b3aec7c267c60fc24abccfadd40c category: x86/cpu -------------------------------- Add Centaur family >=7 CPUs specific initialization support. Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Borislav Petkov <bp(a)suse.de> Link: https://lkml.kernel.org/r/1599562666-31351-3-git-send-email-TonyWWang-oc@zh… Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/centaur.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index b3be281334e4..8735be464bc1 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -71,6 +71,9 @@ static void init_c3(struct cpuinfo_x86 *c) c->x86_cache_alignment = c->x86_clflush_size * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } + + if (c->x86 >= 7) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); } enum { @@ -101,7 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c) if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); #endif - if (c->x86 == 6 && c->x86_model >= 0xf) + if ((c->x86 == 6 && c->x86_model >= 0xf) || + (c->x86 >= 7)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); #ifdef CONFIG_X86_64 @@ -235,7 +239,7 @@ static void init_centaur(struct cpuinfo_x86 *c) sprintf(c->x86_model_id, "WinChip %s", name); } #endif - if (c->x86 == 6) + if (c->x86 == 6 || c->x86 >= 7) init_c3(c); #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 3/6] x86/cpu/centaur: Replace two-condition switch-case with an if statement
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.9 commit 8687bdc04128b2bd16faaae11db10128ad0da7b8 category: x86/cpu -------------------------------- Use a normal if statements instead of a two-condition switch-case. [ bp: Massage commit message. ] Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Borislav Petkov <bp(a)suse.de> Link: https://lkml.kernel.org/r/1599562666-31351-2-git-send-email-TonyWWang-oc@zh… Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/centaur.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index b98529e50d6f..b3be281334e4 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -96,18 +96,14 @@ enum { static void early_init_centaur(struct cpuinfo_x86 *c) { - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: - /* Emulate MTRRs using Centaur's MCR. */ + /* Emulate MTRRs using Centaur's MCR. */ + if (c->x86 == 5) set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR); - break; #endif - case 6: - if (c->x86_model >= 0xf) - set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); - break; - } + if (c->x86 == 6 && c->x86_model >= 0xf) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif @@ -176,9 +172,8 @@ static void init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); } - switch (c->x86) { #ifdef CONFIG_X86_32 - case 5: + if (c->x86 == 5) { switch (c->x86_model) { case 4: name = "C6"; @@ -238,12 +233,10 @@ static void init_centaur(struct cpuinfo_x86 *c) c->x86_cache_size = (cc>>24)+(dd>>24); } sprintf(c->x86_model_id, "WinChip %s", name); - break; + } #endif - case 6: + if (c->x86 == 6) init_c3(c); - break; - } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); #endif -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 2/6] x86/cpu: Remove redundant cpu_detect_cache_sizes() call
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.6 commit 283bab9809786cf41798512f5c1e97f4b679ba96 category: x86/cpu -------------------------------- Both functions call init_intel_cacheinfo() which computes L2 and L3 cache sizes from CPUID(4). But then they also call cpu_detect_cache_sizes() a bit later which computes ->x86_tlbsize and L2 size from CPUID(80000006). However, the latter call is not needed because - on these CPUs, CPUID(80000006).EBX for ->x86_tlbsize is reserved - CPUID(80000006).ECX for the L2 size has the same result as CPUID(4) Therefore, remove the latter call to simplify the code. [ bp: Rewrite commit message. ] Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Borislav Petkov <bp(a)suse.de> Link: https://lkml.kernel.org/r/1579075257-6985-1-git-send-email-TonyWWang-oc@zha…. Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- arch/x86/kernel/cpu/centaur.c | 2 -- arch/x86/kernel/cpu/zhaoxin.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 14433ff5b828..b98529e50d6f 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -71,8 +71,6 @@ static void init_c3(struct cpuinfo_x86 *c) c->x86_cache_alignment = c->x86_clflush_size * 2; set_cpu_cap(c, X86_FEATURE_REP_GOOD); } - - cpu_detect_cache_sizes(c); } enum { diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 8e6f2f4b4afe..452fd0a6bc61 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -58,8 +58,6 @@ static void init_zhaoxin_cap(struct cpuinfo_x86 *c) if (c->x86 >= 0x6) set_cpu_cap(c, X86_FEATURE_REP_GOOD); - - cpu_detect_cache_sizes(c); } static void early_init_zhaoxin(struct cpuinfo_x86 *c) -- 2.20.1
1 0
0 0
[PATCH kernel-4.19 1/6] x86/cpu: Create Zhaoxin processors architecture support file
by LeoLiu-oc 25 Mar '21

25 Mar '21
mainline inclusion from mainline-5.2 commit 761fdd5e3327db6c646a09bab5ad48cd42680cd2 category: x86/cpu -------------------------------- Add x86 architecture support for new Zhaoxin processors. Carve out initialization code needed by Zhaoxin processors into a separate compilation unit. To identify Zhaoxin CPU, add a new vendor type X86_VENDOR_ZHAOXIN for system recognition. Signed-off-by: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de> Cc: "hpa(a)zytor.com" <hpa(a)zytor.com> Cc: "gregkh(a)linuxfoundation.org" <gregkh(a)linuxfoundation.org> Cc: "rjw(a)rjwysocki.net" <rjw(a)rjwysocki.net> Cc: "lenb(a)kernel.org" <lenb(a)kernel.org> Cc: David Wang <DavidWang(a)zhaoxin.com> Cc: "Cooper Yan(BJ-RD)" <CooperYan(a)zhaoxin.com> Cc: "Qiyuan Wang(BJ-RD)" <QiyuanWang(a)zhaoxin.com> Cc: "Herry Yang(BJ-RD)" <HerryYang(a)zhaoxin.com> Link: https://lkml.kernel.org/r/01042674b2f741b2aed1f797359bdffb@zhaoxin.com Signed-off-by: LeoLiu-oc <LeoLiu-oc(a)zhaoxin.com> --- MAINTAINERS | 6 ++ arch/x86/Kconfig.cpu | 13 +++ arch/x86/include/asm/processor.h | 3 +- arch/x86/kernel/cpu/Makefile | 1 + arch/x86/kernel/cpu/zhaoxin.c | 167 +++++++++++++++++++++++++++++++ 5 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kernel/cpu/zhaoxin.c diff --git a/MAINTAINERS b/MAINTAINERS index ada8fbdd1d71..210fdd54b496 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16265,6 +16265,12 @@ Q: https://patchwork.linuxtv.org/project/linux-media/list/ S: Maintained F: drivers/media/dvb-frontends/zd1301_demod* +ZHAOXIN PROCESSOR SUPPORT +M: Tony W Wang-oc <TonyWWang-oc(a)zhaoxin.com> +L: linux-kernel(a)vger.kernel.org +S: Maintained +F: arch/x86/kernel/cpu/zhaoxin.c + ZPOOL COMPRESSED PAGE STORAGE API M: Dan Streetman <ddstreet(a)ieee.org> L: linux-mm(a)kvack.org diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 76e274a0fd0a..d1a51794c587 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -480,3 +480,16 @@ config CPU_SUP_UMC_32 CPU might render the kernel unbootable. If unsure, say N. + +config CPU_SUP_ZHAOXIN + default y + bool "Support Zhaoxin processors" if PROCESSOR_SELECT + help + This enables detection, tunings and quirks for Zhaoxin processors + + You need this enabled if you want your kernel to run on a + Zhaoxin CPU. Disabling this option on other types of CPUs + makes the kernel a tiny bit smaller. Disabling it on a Zhaoxin + CPU might render the kernel unbootable. + + If unsure, say N. diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index af99d4137db9..e5b9308c312f 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -156,7 +156,8 @@ enum cpuid_regs_idx { #define X86_VENDOR_TRANSMETA 7 #define X86_VENDOR_NSC 8 #define X86_VENDOR_HYGON 9 -#define X86_VENDOR_NUM 10 +#define X86_VENDOR_ZHAOXIN 10 +#define X86_VENDOR_NUM 11 #define X86_VENDOR_UNKNOWN 0xff diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index e46d718ba4cc..69bba2b1ef08 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zhaoxin.o obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c new file mode 100644 index 000000000000..8e6f2f4b4afe --- /dev/null +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/sched.h> +#include <linux/sched/clock.h> + +#include <asm/cpufeature.h> + +#include "cpu.h" + +#define MSR_ZHAOXIN_FCR57 0x00001257 + +#define ACE_PRESENT (1 << 6) +#define ACE_ENABLED (1 << 7) +#define ACE_FCR (1 << 7) /* MSR_ZHAOXIN_FCR */ + +#define RNG_PRESENT (1 << 2) +#define RNG_ENABLED (1 << 3) +#define RNG_ENABLE (1 << 8) /* MSR_ZHAOXIN_RNG */ + +#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 +#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 +#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 +#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 +#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 +#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 + +static void init_zhaoxin_cap(struct cpuinfo_x86 *c) +{ + u32 lo, hi; + + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0xC0000000) >= 0xC0000001) { + u32 tmp = cpuid_edx(0xC0000001); + + /* Enable ACE unit, if present and disabled */ + if ((tmp & (ACE_PRESENT | ACE_ENABLED)) == ACE_PRESENT) { + rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); + /* Enable ACE unit */ + lo |= ACE_FCR; + wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); + pr_info("CPU: Enabled ACE h/w crypto\n"); + } + + /* Enable RNG unit, if present and disabled */ + if ((tmp & (RNG_PRESENT | RNG_ENABLED)) == RNG_PRESENT) { + rdmsr(MSR_ZHAOXIN_FCR57, lo, hi); + /* Enable RNG unit */ + lo |= RNG_ENABLE; + wrmsr(MSR_ZHAOXIN_FCR57, lo, hi); + pr_info("CPU: Enabled h/w RNG\n"); + } + + /* + * Store Extended Feature Flags as word 5 of the CPU + * capability bit array + */ + c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001); + } + + if (c->x86 >= 0x6) + set_cpu_cap(c, X86_FEATURE_REP_GOOD); + + cpu_detect_cache_sizes(c); +} + +static void early_init_zhaoxin(struct cpuinfo_x86 *c) +{ + if (c->x86 >= 0x6) + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_SYSENTER32); +#endif + if (c->x86_power & (1 << 8)) { + set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); + set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); + } + + if (c->cpuid_level >= 0x00000001) { + u32 eax, ebx, ecx, edx; + + cpuid(0x00000001, &eax, &ebx, &ecx, &edx); + /* + * If HTT (EDX[28]) is set EBX[16:23] contain the number of + * apicids which are reserved per package. Store the resulting + * shift value for the package management code. + */ + if (edx & (1U << 28)) + c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); + } + +} + +static void zhaoxin_detect_vmx_virtcap(struct cpuinfo_x86 *c) +{ + u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; + + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); + msr_ctl = vmx_msr_high | vmx_msr_low; + + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) + set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) + set_cpu_cap(c, X86_FEATURE_VNMI); + if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { + rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, + vmx_msr_low, vmx_msr_high); + msr_ctl2 = vmx_msr_high | vmx_msr_low; + if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && + (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) + set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) + set_cpu_cap(c, X86_FEATURE_EPT); + if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) + set_cpu_cap(c, X86_FEATURE_VPID); + } +} + +static void init_zhaoxin(struct cpuinfo_x86 *c) +{ + early_init_zhaoxin(c); + init_intel_cacheinfo(c); + detect_num_cpu_cores(c); +#ifdef CONFIG_X86_32 + detect_ht(c); +#endif + + if (c->cpuid_level > 9) { + unsigned int eax = cpuid_eax(10); + + /* + * Check for version and the number of counters + * Version(eax[7:0]) can't be 0; + * Counters(eax[15:8]) should be greater than 1; + */ + if ((eax & 0xff) && (((eax >> 8) & 0xff) > 1)) + set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); + } + + if (c->x86 >= 0x6) + init_zhaoxin_cap(c); +#ifdef CONFIG_X86_64 + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); +#endif + + if (cpu_has(c, X86_FEATURE_VMX)) + zhaoxin_detect_vmx_virtcap(c); +} + +#ifdef CONFIG_X86_32 +static unsigned int +zhaoxin_size_cache(struct cpuinfo_x86 *c, unsigned int size) +{ + return size; +} +#endif + +static const struct cpu_dev zhaoxin_cpu_dev = { + .c_vendor = "zhaoxin", + .c_ident = { " Shanghai " }, + .c_early_init = early_init_zhaoxin, + .c_init = init_zhaoxin, +#ifdef CONFIG_X86_32 + .legacy_cache_size = zhaoxin_size_cache, +#endif + .c_x86_vendor = X86_VENDOR_ZHAOXIN, +}; + +cpu_dev_register(zhaoxin_cpu_dev); -- 2.20.1
1 0
0 0
  • ← Newer
  • 1
  • ...
  • 1903
  • 1904
  • 1905
  • 1906
  • 1907
  • 1908
  • 1909
  • ...
  • 1968
  • Older →

HyperKitty Powered by HyperKitty