From: Zheng Yejian zhengyejian1@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I6NVPT
--------------------------------
Concurrently enable/disable livepatch and online/offline cpu cause deadlock: __klp_{enable,disable}_patch() _cpu_{up,down}() -------- -------- mutex_lock(&text_mutex); cpus_write_lock(); cpus_read_lock(); mutex_lock(&text_mutex);
__klp_{enable,disable}_patch() hold text_mutex then wait cpu_hotplug_lock, while _cpu_{up,down}() hold cpu_hotplug_lock but wait text_mutex, finally result in the deadlock.
Cpu hotplug locking is a "percpu" rw semaphore, however write lock and read lock on it are globally mutual exclusive, that is cpus_write_lock() on one cpu can block all cpus_read_lock() on other cpus, vice versa.
Similar lock issue was solved in commit 2d1e38f56622 ("kprobes: Cure hotplug lock ordering issues") which change lock order to be: kprobe_mutex -> cpus_rwsem -> jump_label_mutex -> text_mutex
Therefore take cpus_read_lock() before text_mutex to avoid deadlock.
Fixes: f5a6746743d8 ("livepatch/x86: support livepatch without ftrace") Signed-off-by: Zheng Yejian zhengyejian1@huawei.com Reviewed-by: Xu Kuohai xukuohai@huawei.com Signed-off-by: Jialin Zhang zhangjialin11@huawei.com --- kernel/livepatch/core.c | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 957f16f6c6c4..40259a2988e9 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -1438,6 +1438,27 @@ static int klp_mem_prepare(struct klp_patch *patch) return 0; }
+static int klp_stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) +{ + int ret; + + /* + * Cpu hotplug locking is a "percpu" rw semaphore, however write + * lock and read lock on it are globally mutual exclusive, that is + * cpus_write_lock() on one cpu can block all cpus_read_lock() + * on other cpus, vice versa. + * + * Since cpu hotplug take the cpus_write_lock() before text_mutex, + * here take cpus_read_lock() before text_mutex to avoid deadlock. + */ + cpus_read_lock(); + arch_klp_code_modify_prepare(); + ret = stop_machine_cpuslocked(fn, data, cpus); + arch_klp_code_modify_post_process(); + cpus_read_unlock(); + return ret; +} + static int __klp_disable_patch(struct klp_patch *patch) { int ret; @@ -1458,9 +1479,7 @@ static int __klp_disable_patch(struct klp_patch *patch) } #endif
- arch_klp_code_modify_prepare(); - ret = stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); + ret = klp_stop_machine(klp_try_disable_patch, &patch_data, cpu_online_mask); if (ret) return ret;
@@ -1699,9 +1718,7 @@ static int __klp_enable_patch(struct klp_patch *patch) ret = klp_mem_prepare(patch); if (ret) return ret; - arch_klp_code_modify_prepare(); - ret = stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); - arch_klp_code_modify_post_process(); + ret = klp_stop_machine(klp_try_enable_patch, &patch_data, cpu_online_mask); if (ret) { klp_mem_recycle(patch); return ret;