Hi,
On 11/3/2023 5:41 PM, Xiongfeng Wang wrote:
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZBTV CVE: N/A
When NOHZ_FULL is enabled, such as in HPC situation, CPUs are divided into housekeeping CPUs and non-housekeeping CPUs. Non-housekeeping CPUs are NOHZ_FULL CPUs and are often monopolized by the userspace process, such HPC application process. Any sort of interruption is not expected.
blk_mq_hctx_next_cpu() selects each cpu in 'hctx->cpumask' alternately to schedule the work thread blk_mq_run_work_fn(). When 'hctx->cpumask' contains housekeeping CPU and non-housekeeping CPU at the same time, a housekeeping CPU, which want to request a IO, may schedule a worker on a non-housekeeping CPU. This may affect the performance of the userspace application running on non-housekeeping CPUs.
So let's just schedule the worker thread on the current CPU when the current CPU is housekeeping CPU.
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com
block/blk-mq.c | 11 ++++++++++- include/linux/sched/isolation.h | 2 ++ kernel/sched/isolation.c | 8 ++++++++ 3 files changed, 20 insertions(+), 1 deletion(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index 1d1200afb771..d0dce9ef9499 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -23,6 +23,7 @@ #include <linux/sched/sysctl.h> #include <linux/sched/topology.h> #include <linux/sched/signal.h> +#include <linux/sched/isolation.h> #include <linux/delay.h> #include <linux/crash_dump.h> #include <linux/prefetch.h> @@ -1676,6 +1677,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, unsigned long msecs) {
- int work_cpu;
- if (unlikely(blk_mq_hctx_stopped(hctx))) return;
@@ -1697,7 +1700,13 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, if (!percpu_ref_tryget(&hctx->queue->q_usage_counter)) return;
- kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
- if (enhanced_isolcpus && tick_nohz_full_enabled() &&
housekeeping_cpu(smp_processor_id(), HK_FLAG_WQ))
work_cpu = smp_processor_id();
smp_processor_id() needs the preemption is disabled, so either disabling preemption when invoking smp_proccessor_id() or using raw_smp_processor_id() instead.
- else
work_cpu = blk_mq_hctx_next_cpu(hctx);
- kblockd_mod_delayed_work_on(work_cpu, &hctx->run_work, msecs_to_jiffies(msecs));
Beside run_work, kblockd_workqueue has other users as well. Why don't you update these call sites as well ?
Or can we create kblockd_workqueue as an unbound workqueue when enhanced_ioslcpus is enabled and using sysfs to control the CPU affinity of kblocked_workqueue ?
percpu_ref_put(&hctx->queue->q_usage_counter); } diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index cc9f393e2a70..2f93081ad7a0 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -18,6 +18,7 @@ enum hk_flags { };
#ifdef CONFIG_CPU_ISOLATION +extern bool enhanced_isolcpus; DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); @@ -28,6 +29,7 @@ extern void __init housekeeping_init(void);
#else
+#define enhanced_isolcpus 0 static inline int housekeeping_any_cpu(enum hk_flags flags) { return smp_processor_id(); diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 5a6ea03f9882..785ef5201116 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -198,3 +198,11 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup);
+bool enhanced_isolcpus; +static int __init enhanced_isolcpus_setup(char *str) +{
- enhanced_isolcpus = true;
- return 0;
+} +__setup("enhanced_isolcpus", enhanced_isolcpus_setup);