From: LeoLiu-oc LeoLiu-oc@zhaoxin.com
zhaoxin inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I40QDN CVE: NA
----------------------------------------------------------------
The kernel driver now will disable interrupt when port is suspended, causing plugin not work. It is useful to make plugin work,when the plugin interrupt is enabled with power management status. Adding pm_request_resume() is to reume port for plugin or PME signal waking up controller which in D3.
with the AHCI controller frequently enters into D3 and leaves from D3, the identify cmd may be timeout when controller resumes and establishes a connect with the device.it is effective to delay 10ms between controller resume and port resume,with link’s smooth transition.
with non power management request and power management competing with each other in queue, it is often found that block IO hang 120s when system disk is suspending or resuming.it is now guaranteed that PM requests will enter the queue no matter other non-PM requests are waiting. Increase the pm_only counter before checking whether any non-PM blk_queue_enter() calls are in progress. Meanwhile, the new blk_pm_request_resume() call is necessary to occur during request assigned to a queue when device is suspended.
Signed-off-by: LeoLiu-oc LeoLiu-oc@zhaoxin.com Reviewed-by: Xiongfeng Wang wangxiongfeng2@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/ata/ahci.c | 13 +++++++++++++ drivers/ata/libahci.c | 15 +++++++++++++++ 2 files changed, 28 insertions(+)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ff2add0101fe..8a7140ee88b5 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -869,6 +869,19 @@ static int ahci_pci_device_runtime_resume(struct device *dev) if (rc) return rc; ahci_pci_init_controller(host); + + /* Port resume for Zhaoxin platform */ + if (pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) { + if (pdev->revision == 0x01) + ata_msleep(NULL, 10); + + for (rc = 0; rc < host->n_ports; rc++) { + struct ata_port *ap = host->ports[rc]; + + pm_request_resume(&ap->tdev); + } + } + return 0; }
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index fec2e9754aed..4514f3f28b7c 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -823,9 +823,15 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, static void ahci_power_down(struct ata_port *ap) { struct ahci_host_priv *hpriv = ap->host->private_data; + struct pci_dev *pdev; void __iomem *port_mmio = ahci_port_base(ap); u32 cmd, scontrol;
+ /* port suspended enable Plugin intr for Zhaoxin platform */ + pdev = (ap->dev && dev_is_pci(ap->dev)) ? to_pci_dev(ap->dev) : NULL; + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && !ap->link.device->sdev) + writel(PORT_IRQ_CONNECT, port_mmio + PORT_IRQ_MASK); + if (!(hpriv->cap & HOST_CAP_SSS)) return;
@@ -1701,6 +1707,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) struct ata_link *link = NULL; struct ata_queued_cmd *active_qc; struct ata_eh_info *active_ehi; + struct pci_dev *pdev; bool fbs_need_dec = false; u32 serror;
@@ -1791,6 +1798,14 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) ata_ehi_push_desc(host_ehi, "%s", irq_stat & PORT_IRQ_CONNECT ? "connection status changed" : "PHY RDY changed"); + + /* When plugin intr happen, now resume suspended port for Zhaoxin platform */ + pdev = (ap->dev && dev_is_pci(ap->dev)) ? to_pci_dev(ap->dev) : NULL; + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + (ap->pflags & ATA_PFLAG_SUSPENDED)) { + pm_request_resume(&ap->tdev); + return; + } }
/* okay, let's hand over to EH */
From: Peter Zijlstra peterz@infradead.org
mainline inclusion from mainline-v5.11-rc1 commit 7a9f50a05843fee8366bd3a65addbebaa7cf7f07 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4U05V CVE: NA
-------------------------------------------------------------------------
Get rid of the __call_single_node union and clean up the API a little to avoid external code relying on the structure layout as much.
Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Reviewed-by: Frederic Weisbecker frederic@kernel.org Signed-off-by: Zhen Lei thunder.leizhen@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/gpu/drm/i915/i915_request.c | 4 ++-- include/linux/irq_work.h | 33 ++++++++++++++++++----------- include/linux/irqflags.h | 4 ++-- kernel/bpf/stackmap.c | 2 +- kernel/irq_work.c | 18 ++++++++-------- kernel/printk/printk.c | 6 ++---- kernel/rcu/tree.c | 3 +-- kernel/time/tick-sched.c | 6 ++---- kernel/trace/bpf_trace.c | 2 +- 9 files changed, 41 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 896389f93029..79d835bcb8b7 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -197,7 +197,7 @@ __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
llist_for_each_entry_safe(cb, cn, llist_del_all(&rq->execute_cb), - work.llnode) + work.node.llist) fn(&cb->work); }
@@ -460,7 +460,7 @@ __await_execution(struct i915_request *rq, * callback first, then checking the ACTIVE bit, we serialise with * the completed/retired request. */ - if (llist_add(&cb->work.llnode, &signal->execute_cb)) { + if (llist_add(&cb->work.node.llist, &signal->execute_cb)) { if (i915_request_is_active(signal) || __request_in_flight(signal)) __notify_execute_cb_imm(signal); diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 30823780c192..ec2a47a81e42 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -14,28 +14,37 @@ */
struct irq_work { - union { - struct __call_single_node node; - struct { - struct llist_node llnode; - atomic_t flags; - }; - }; + struct __call_single_node node; void (*func)(struct irq_work *); };
+#define __IRQ_WORK_INIT(_func, _flags) (struct irq_work){ \ + .node = { .u_flags = (_flags), }, \ + .func = (_func), \ +} + +#define IRQ_WORK_INIT(_func) __IRQ_WORK_INIT(_func, 0) +#define IRQ_WORK_INIT_LAZY(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_LAZY) +#define IRQ_WORK_INIT_HARD(_func) __IRQ_WORK_INIT(_func, IRQ_WORK_HARD_IRQ) + +#define DEFINE_IRQ_WORK(name, _f) \ + struct irq_work name = IRQ_WORK_INIT(_f) + static inline void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) { - atomic_set(&work->flags, 0); - work->func = func; + *work = IRQ_WORK_INIT(func); }
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \ - .flags = ATOMIC_INIT(0), \ - .func = (_f) \ +static inline bool irq_work_is_pending(struct irq_work *work) +{ + return atomic_read(&work->node.a_flags) & IRQ_WORK_PENDING; }
+static inline bool irq_work_is_busy(struct irq_work *work) +{ + return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY; +}
bool irq_work_queue(struct irq_work *work); bool irq_work_queue_on(struct irq_work *work, int cpu); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 3ed4e8771b64..fef2d43a7a1d 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -109,12 +109,12 @@ do { \
# define lockdep_irq_work_enter(__work) \ do { \ - if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\ current->irq_config = 1; \ } while (0) # define lockdep_irq_work_exit(__work) \ do { \ - if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\ + if (!(atomic_read(&__work->node.a_flags) & IRQ_WORK_HARD_IRQ))\ current->irq_config = 0; \ } while (0)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 4b5b390e22ea..92b38a5da61a 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -301,7 +301,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, if (irqs_disabled()) { if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { work = this_cpu_ptr(&up_read_work); - if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) { + if (irq_work_is_busy(&work->irq_work)) { /* cannot queue more up_read, fallback */ irq_work_busy = true; } diff --git a/kernel/irq_work.c b/kernel/irq_work.c index eca83965b631..fbff25adb574 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -31,7 +31,7 @@ static bool irq_work_claim(struct irq_work *work) { int oflags;
- oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); + oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); /* * If the work is already pending, no need to raise the IPI. * The pairing atomic_fetch_andnot() in irq_work_run() makes sure @@ -53,12 +53,12 @@ void __weak arch_irq_work_raise(void) static void __irq_work_queue_local(struct irq_work *work) { /* If the work is "lazy", handle it from next tick if any */ - if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && + if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) { + if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list))) arch_irq_work_raise(); } } @@ -102,7 +102,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); - __smp_call_single_queue(cpu, &work->llnode); + __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } @@ -142,7 +142,7 @@ void irq_work_single(void *arg) * to claim that work don't rely on us to handle their data * while we are in the middle of the func. */ - flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); + flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->node.a_flags);
lockdep_irq_work_enter(work); work->func(work); @@ -152,7 +152,7 @@ void irq_work_single(void *arg) * no-one else claimed it meanwhile. */ flags &= ~IRQ_WORK_PENDING; - (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); + (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); }
static void irq_work_run_list(struct llist_head *list) @@ -166,7 +166,7 @@ static void irq_work_run_list(struct llist_head *list) return;
llnode = llist_del_all(list); - llist_for_each_entry_safe(work, tmp, llnode, llnode) + llist_for_each_entry_safe(work, tmp, llnode, node.llist) irq_work_single(work); }
@@ -198,7 +198,7 @@ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled();
- while (atomic_read(&work->flags) & IRQ_WORK_BUSY) + while (irq_work_is_busy(work)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e237ac1a6533..43f8f2573eac 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3078,10 +3078,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work) wake_up_interruptible(&log_wait); }
-static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { - .func = wake_up_klogd_work_func, - .flags = ATOMIC_INIT(IRQ_WORK_LAZY), -}; +static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = + IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
void wake_up_klogd(void) { diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b74e7ace4376..018a77b64a2e 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1319,8 +1319,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && (rnp->ffmask & rdp->grpmask)) { - init_irq_work(&rdp->rcu_iw, rcu_iw_handler); - atomic_set(&rdp->rcu_iw.flags, IRQ_WORK_HARD_IRQ); rdp->rcu_iw_pending = true; rdp->rcu_iw_gp_seq = rnp->gp_seq; irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); @@ -4008,6 +4006,7 @@ int rcutree_prepare_cpu(unsigned int cpu) rdp->cpu_no_qs.b.norm = true; rdp->core_needs_qs = false; rdp->rcu_iw_pending = false; + rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler); rdp->rcu_iw_gp_seq = rdp->gp_seq - 1; trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl")); raw_spin_unlock_irqrestore_rcu_node(rnp, flags); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 49423dffdfc3..2d7899700b7a 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -271,10 +271,8 @@ static void nohz_full_kick_func(struct irq_work *work) /* Empty, the tick restart happens on tick_nohz_irq_exit() */ }
-static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { - .func = nohz_full_kick_func, - .flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ), -}; +static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = + IRQ_WORK_INIT_HARD(nohz_full_kick_func);
/* * Kick this CPU if it's full dynticks in order to force it to diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index ba644760f507..1228e56853e0 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1081,7 +1081,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) return -EINVAL;
work = this_cpu_ptr(&send_signal_work); - if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) + if (irq_work_is_busy(&work->irq_work)) return -EBUSY;
/* Add the current task, which is the target of sending signal,
From: "Paul E. McKenney" paulmck@kernel.org
mainline inclusion from mainline-v5.14-rc1 commit cf868c2af244417ed276ba7f716b980841a71340 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4U05V CVE: NA
-------------------------------------------------------------------------
Heavy networking load can cause a CPU to execute continuously and indefinitely within ksoftirqd, in which case there will be no voluntary task switches and thus no RCU-tasks quiescent states. This commit therefore causes the exiting rcu_softirq_qs() to provide an RCU-tasks quiescent state.
This of course means that __do_softirq() and its callers cannot be invoked from within a tracing trampoline.
Reported-by: Toke Høiland-Jørgensen toke@redhat.com Tested-by: Toke Høiland-Jørgensen toke@redhat.com Reviewed-by: Masami Hiramatsu mhiramat@kernel.org Signed-off-by: Paul E. McKenney paulmck@kernel.org Cc: Steven Rostedt rostedt@goodmis.org Cc: Masami Hiramatsu mhiramat@kernel.org Signed-off-by: Zhen Lei thunder.leizhen@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- kernel/rcu/tree.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 1f6c78aa7bfd..b9c45b2d7690 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -236,6 +236,7 @@ void rcu_softirq_qs(void) { rcu_qs(); rcu_preempt_deferred_qs(current); + rcu_tasks_qs(current, false); }
/*
From: Qi Liu liuqi115@huawei.com
mainline inclusion from mainline-v5.11-rc1 commit e72550928ff0 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4UA33 CVE: NA
Reference: https://lore.kernel.org/r/20201208182651.1597945-4-mathieu.poirier@linaro.or...
-----------------------------------------
The ETM device can't keep up with the core pipeline when cpu core is at full speed. This may cause overflow within core and its ETM. This is a common phenomenon on ETM devices.
On HiSilicon Hip08 platform, a specific feature is added to set core pipeline. So commit rate can be reduced manually to avoid ETM overflow.
Reviewed-by: Suzuki K Poulose suzuki.poulose@arm.com Signed-off-by: Qi Liu liuqi115@huawei.com [Modified changelog title and Kconfig description] Signed-off-by: Mathieu Poirier mathieu.poirier@linaro.org Link: https://lore.kernel.org/r/20201208182651.1597945-4-mathieu.poirier@linaro.or... Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Qi Liu liuqi115@huawei.com Reviewed-by: Jay Fang f.fangjian@huawei.com Acked-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/hwtracing/coresight/Kconfig | 8 ++ .../coresight/coresight-etm4x-core.c | 98 +++++++++++++++++++ drivers/hwtracing/coresight/coresight-etm4x.h | 8 ++ 3 files changed, 114 insertions(+)
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index c1198245461d..7b44ba22cbe1 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -110,6 +110,14 @@ config CORESIGHT_SOURCE_ETM4X To compile this driver as a module, choose M here: the module will be called coresight-etm4x.
+config ETM4X_IMPDEF_FEATURE + bool "Control implementation defined overflow support in ETM 4.x driver" + depends on CORESIGHT_SOURCE_ETM4X + help + This control provides implementation define control for CoreSight + ETM 4.x tracer module that can't reduce commit rate automatically. + This avoids overflow between the ETM tracer module and the cpu core. + config CORESIGHT_STM tristate "CoreSight System Trace Macrocell driver" depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64 diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 74d3e2fe43d4..02d0b92cf510 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -3,6 +3,7 @@ * Copyright (c) 2014, The Linux Foundation. All rights reserved. */
+#include <linux/bitops.h> #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/init.h> @@ -28,7 +29,9 @@ #include <linux/perf_event.h> #include <linux/pm_runtime.h> #include <linux/property.h> + #include <asm/sections.h> +#include <asm/sysreg.h> #include <asm/local.h> #include <asm/virt.h>
@@ -103,6 +106,97 @@ struct etm4_enable_arg { int rc; };
+#ifdef CONFIG_ETM4X_IMPDEF_FEATURE + +#define HISI_HIP08_AMBA_ID 0x000b6d01 +#define ETM4_AMBA_MASK 0xfffff +#define HISI_HIP08_CORE_COMMIT_MASK 0x3000 +#define HISI_HIP08_CORE_COMMIT_SHIFT 12 +#define HISI_HIP08_CORE_COMMIT_FULL 0b00 +#define HISI_HIP08_CORE_COMMIT_LVL_1 0b01 +#define HISI_HIP08_CORE_COMMIT_REG sys_reg(3, 1, 15, 2, 5) + +struct etm4_arch_features { + void (*arch_callback)(bool enable); +}; + +static bool etm4_hisi_match_pid(unsigned int id) +{ + return (id & ETM4_AMBA_MASK) == HISI_HIP08_AMBA_ID; +} + +static void etm4_hisi_config_core_commit(bool enable) +{ + u8 commit = enable ? HISI_HIP08_CORE_COMMIT_LVL_1 : + HISI_HIP08_CORE_COMMIT_FULL; + u64 val; + + /* + * bit 12 and 13 of HISI_HIP08_CORE_COMMIT_REG are used together + * to set core-commit, 2'b00 means cpu is at full speed, 2'b01, + * 2'b10, 2'b11 mean reduce pipeline speed, and 2'b01 means level-1 + * speed(minimun value). So bit 12 and 13 should be cleared together. + */ + val = read_sysreg_s(HISI_HIP08_CORE_COMMIT_REG); + val &= ~HISI_HIP08_CORE_COMMIT_MASK; + val |= commit << HISI_HIP08_CORE_COMMIT_SHIFT; + write_sysreg_s(val, HISI_HIP08_CORE_COMMIT_REG); +} + +static struct etm4_arch_features etm4_features[] = { + [ETM4_IMPDEF_HISI_CORE_COMMIT] = { + .arch_callback = etm4_hisi_config_core_commit, + }, + {}, +}; + +static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) +{ + struct etm4_arch_features *ftr; + int bit; + + for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { + ftr = &etm4_features[bit]; + + if (ftr->arch_callback) + ftr->arch_callback(true); + } +} + +static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) +{ + struct etm4_arch_features *ftr; + int bit; + + for_each_set_bit(bit, drvdata->arch_features, ETM4_IMPDEF_FEATURE_MAX) { + ftr = &etm4_features[bit]; + + if (ftr->arch_callback) + ftr->arch_callback(false); + } +} + +static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, + unsigned int id) +{ + if (etm4_hisi_match_pid(id)) + set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features); +} +#else +static void etm4_enable_arch_specific(struct etmv4_drvdata *drvdata) +{ +} + +static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata) +{ +} + +static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, + unsigned int id) +{ +} +#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -110,6 +204,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) struct device *etm_dev = &drvdata->csdev->dev;
CS_UNLOCK(drvdata->base); + etm4_enable_arch_specific(drvdata);
etm4_os_unlock(drvdata);
@@ -480,6 +575,7 @@ static void etm4_disable_hw(void *info) int i;
CS_UNLOCK(drvdata->base); + etm4_disable_arch_specific(drvdata);
if (!drvdata->skip_power_up) { /* power can be removed from the trace unit now */ @@ -1563,6 +1659,8 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) drvdata->boot_enable = true; }
+ etm4_check_arch_features(drvdata, id->id); + return 0; }
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index eefc7371c6c4..3dd3e0633328 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -8,6 +8,7 @@
#include <asm/local.h> #include <linux/spinlock.h> +#include <linux/types.h> #include "coresight-priv.h"
/* @@ -203,6 +204,11 @@ /* Interpretation of resource numbers change at ETM v4.3 architecture */ #define ETM4X_ARCH_4V3 0x43
+enum etm_impdef_type { + ETM4_IMPDEF_HISI_CORE_COMMIT, + ETM4_IMPDEF_FEATURE_MAX, +}; + /** * struct etmv4_config - configuration information related to an ETMv4 * @mode: Controls various modes supported by this ETM. @@ -415,6 +421,7 @@ struct etmv4_save_state { * @state_needs_restore: True when there is context to restore after PM exit * @skip_power_up: Indicates if an implementation can skip powering up * the trace unit. + * @arch_features: Bitmap of arch features of etmv4 devices. */ struct etmv4_drvdata { void __iomem *base; @@ -463,6 +470,7 @@ struct etmv4_drvdata { struct etmv4_save_state *save_state; bool state_needs_restore; bool skip_power_up; + DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX); };
/* Address comparator access types */