From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4HYY4?from=project-issue CVE: NA
-------------------------------------------------
Per-CPU variables cpc_desc_ptr are initialized in acpi_cppc_processor_probe() when the processor devices are present and added into the system. But when cpu_possible_mask and cpu_present_mask is not equal, only cpc_desc_ptr in cpu_present_mask are initialized, this will cause acpi_get_psd_map() failed in cppc_cpufreq_init().
To fix this issue, we parse the _PSD method for all possible CPUs to get the P-State topology and modify acpi_get_psd_map() to rely on this information.
Signed-off-by: Xiongfeng Wang wangxiongfeng@huawei.com Reviewed-by: Keqian Zhu zhukeqian1@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/acpi/cppc_acpi.c | 93 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 89 insertions(+), 4 deletions(-)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 6134f20a13f0c..e71c0e0572bea 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -415,7 +415,7 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle) * * Return: 0 for success or negative value for err. */ -int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) +static int __acpi_get_psd_map(struct cppc_cpudata **all_cpu_data, struct cpc_desc **cpc_pptr) { int count_target; int retval = 0; @@ -441,7 +441,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (cpumask_test_cpu(i, covered_cpus)) continue;
- cpc_ptr = per_cpu(cpc_desc_ptr, i); + cpc_ptr = cpc_pptr[i]; if (!cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -466,7 +466,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (i == j) continue;
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j); + match_cpc_ptr = cpc_pptr[j]; if (!match_cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -499,7 +499,7 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) if (!match_pr) continue;
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j); + match_cpc_ptr = cpc_pptr[j]; if (!match_cpc_ptr) { retval = -EFAULT; goto err_ret; @@ -532,6 +532,91 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) free_cpumask_var(covered_cpus); return retval; } + +static acpi_status acpi_parse_cpc(acpi_handle handle, u32 lvl, void *data, + void **ret_p) +{ + struct acpi_device *adev = NULL; + struct cpc_desc *cpc_ptr, **cpc_pptr; + acpi_status status = AE_OK; + const int device_declaration = 1; + unsigned long long uid; + phys_cpuid_t phys_id; + int logical_id, ret; + int *parsed_core_num = (int *)ret_p; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (strcmp(acpi_device_hid(adev), ACPI_PROCESSOR_DEVICE_HID)) + return AE_OK; + + status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); + if (ACPI_FAILURE(status)) + return AE_OK; + phys_id = acpi_get_phys_id(handle, device_declaration, uid); + if (invalid_phys_cpuid(phys_id)) + return AE_OK; + logical_id = acpi_map_cpuid(phys_id, uid); + if (logical_id < 0) + return AE_OK; + + cpc_pptr = (struct cpc_desc **)data; + cpc_ptr = cpc_pptr[logical_id]; + cpc_ptr->cpu_id = logical_id; + + ret = acpi_get_psd(cpc_ptr, handle); + if (ret) + return ret; + + (*parsed_core_num)++; + + return AE_OK; +} + +int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) +{ + struct cpc_desc **cpc_pptr, *cpc_ptr; + int parsed_core_num = 0; + int i, ret; + + cpc_pptr = kcalloc(num_possible_cpus(), sizeof(void *), GFP_KERNEL); + if (!cpc_pptr) + return -ENOMEM; + for_each_possible_cpu(i) { + cpc_pptr[i] = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL); + if (!cpc_pptr[i]) { + ret = -ENOMEM; + goto out; + } + } + + /* + * We can not use acpi_get_devices() to walk the processor devices + * because some processor device is not present. + */ + ret = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, acpi_parse_cpc, NULL, + cpc_pptr, (void **)&parsed_core_num); + if (ret) + goto out; + if (parsed_core_num != num_possible_cpus()) { + ret = -EINVAL; + goto out; + } + + ret = __acpi_get_psd_map(all_cpu_data, cpc_pptr); + +out: + for_each_possible_cpu(i) { + cpc_ptr = cpc_pptr[i]; + if (cpc_ptr) + kfree(cpc_ptr); + } + kfree(cpc_pptr); + + return ret; +} EXPORT_SYMBOL_GPL(acpi_get_psd_map);
static int register_pcc_channel(int pcc_ss_idx)
From: Xiongfeng Wang wangxiongfeng2@huawei.com
hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4HYY4?from=project-issue CVE: NA
-------------------------------------------------
When I hot added a CPU, I found 'cpufreq' directory is not created below /sys/devices/system/cpu/cpuX/. It is because get_cpu_device() failed in add_cpu_dev_symlink().
cpufreq_add_dev() is the .add_dev callback of a CPU subsys interface. It will be called when the CPU device registered into the system. The stack is as follows. register_cpu() ->device_register() ->device_add() ->bus_probe_device() ->cpufreq_add_dev()
But only after the CPU device has been registered, we can get the CPU device by get_cpu_device(), otherwise it will return NULL. Since we already have the CPU device in cpufreq_add_dev(), pass it to add_cpu_dev_symlink().
Signed-off-by: Xiongfeng Wang wangxiongfeng2@huawei.com Reviewed-by: Keqian Zhu zhukeqian1@huawei.com Reviewed-by: Hanjun Guo guohanjun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/cpufreq/cpufreq.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index e35c397b1259f..99ca9c50a88f3 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -965,10 +965,9 @@ static struct kobj_type ktype_cpufreq = { .release = cpufreq_sysfs_release, };
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu) +static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu, + struct device *dev) { - struct device *dev = get_cpu_device(cpu); - if (!dev) return;
@@ -1241,7 +1240,7 @@ static int cpufreq_online(unsigned int cpu)
for_each_cpu(j, policy->related_cpus) { per_cpu(cpufreq_cpu_data, j) = policy; - add_cpu_dev_symlink(policy, j); + add_cpu_dev_symlink(policy, j, get_cpu_device(j)); } } else { policy->min = policy->user_policy.min; @@ -1366,7 +1365,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* Create sysfs link on CPU registration */ policy = per_cpu(cpufreq_cpu_data, cpu); if (policy) - add_cpu_dev_symlink(policy, cpu); + add_cpu_dev_symlink(policy, cpu, dev);
return 0; }
From: Pavel Begunkov asml.silence@gmail.com
mainline inclusion from mainline-v5.13-rc1 commit f70865db5ff35f5ed0c7e9ef63e7cca3d4947f04 category: bugfix bugzilla: 185739 CVE: NA
-----------------------------------------------
Revert of revert of "io_uring: wait potential ->release() on resurrect", which adds a helper for resurrect not racing completion reinit, as was removed because of a strange bug with no clear root or link to the patch.
Was improved, instead of rcu_synchronize(), just wait_for_completion() because we're at 0 refs and it will happen very shortly. Specifically use non-interruptible version to ignore all pending signals that may have ended prior interruptible wait.
This reverts commit cb5e1b81304e089ee3ca948db4d29f71902eb575.
Signed-off-by: Pavel Begunkov asml.silence@gmail.com Link: https://lore.kernel.org/r/7a080c20f686d026efade810b116b72f88abaff9.161810175... Signed-off-by: Jens Axboe axboe@kernel.dk
conflicts: fs/io_uring.c
Signed-off-by: Ye Bin yebin10@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- fs/io_uring.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c index da61eeaf64e88..d07388600bbed 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8669,6 +8669,18 @@ static bool io_register_op_must_quiesce(int op) } }
+static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl) +{ + bool got = percpu_ref_tryget(ref); + + /* already at zero, wait for ->release() */ + if (!got) + wait_for_completion(compl); + percpu_ref_resurrect(ref); + if (got) + percpu_ref_put(ref); +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -8699,9 +8711,8 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ret = wait_for_completion_interruptible(&ctx->ref_comp); mutex_lock(&ctx->uring_lock); if (ret) { - percpu_ref_resurrect(&ctx->refs); - ret = -EINTR; - goto out; + io_refs_resurrect(&ctx->refs, &ctx->ref_comp); + return ret; } }
@@ -8772,7 +8783,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, if (io_register_op_must_quiesce(opcode)) { /* bring the ctx back to life */ percpu_ref_reinit(&ctx->refs); -out: reinit_completion(&ctx->ref_comp); } return ret;