From: Shuai Xue xueshuai@linux.alibaba.com
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I9GCZS CVE: NA
Reference: https://lore.kernel.org/lkml/20240204080144.7977-4-xueshuai@linux.alibaba.co...
--------------------------------
Hardware errors could be signaled by asynchronous interrupt, e.g. when an error is detected by a background scrubber, or signaled by synchronous exception, e.g. when a CPU tries to access a poisoned cache line. Since commit a70297d22132 ("ACPI: APEI: set memory failure flags as MF_ACTION_REQUIRED on synchronous events")', the flag MF_ACTION_REQUIRED could be used to determine whether a synchronous exception occurs on ARM64 platform. When a synchronous exception is detected, the kernel should terminate the current process which accessing the poisoned page. This is done by sending a SIGBUS signal with an error code BUS_MCEERR_AR, indicating an action-required machine check error on read.
However, the memory failure recovery is incorrectly sending a SIGBUS with wrong error code BUS_MCEERR_AO for synchronous errors in early kill mode, even MF_ACTION_REQUIRED is set. The main problem is that synchronous errors are queued as a memory_failure() work, and are executed within a kernel thread context, not the user-space process that encountered the corrupted memory on ARM64 platform. As a result, when kill_proc() is called to terminate the process, it sends the incorrect SIGBUS error code because the context in which it operates is not the one where the error was triggered.
To this end, queue memory_failure() as a task_work so that it runs in the context of the process that is actually consuming the poisoned data, and it will send SIBBUS with si_code BUS_MCEERR_AR.
Signed-off-by: Shuai Xue xueshuai@linux.alibaba.com Tested-by: Ma Wupeng mawupeng1@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com Reviewed-by: Xiaofei Tan tanxiaofei@huawei.com Reviewed-by: Baolin Wang baolin.wang@linux.alibaba.com
conflicts: drivers/acpi/apei/ghes.c
Signed-off-by: Tong Tiangen tongtiangen@huawei.com --- drivers/acpi/apei/ghes.c | 77 +++++++++++++++++++++++----------------- include/acpi/ghes.h | 3 -- mm/memory-failure.c | 13 ------- 3 files changed, 44 insertions(+), 49 deletions(-)
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 2136d6072fa0..eab8cb40b601 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -464,28 +464,41 @@ static void ghes_clear_estatus(struct ghes *ghes, }
/* - * Called as task_work before returning to user-space. - * Ensure any queued work has been done before we return to the context that - * triggered the notification. + * struct sync_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: fine tune action taken + * + * Structure to pass task work to be handled before + * ret_to_user via task_work_add(). */ -static void ghes_kick_task_work(struct callback_head *head) +struct sync_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) { - struct acpi_hest_generic_status *estatus; - struct ghes_estatus_node *estatus_node; - u32 node_len; + int ret; + struct sync_task_work *twcb = + container_of(twork, struct sync_task_work, twork);
- estatus_node = container_of(head, struct ghes_estatus_node, task_work); - if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) - memory_failure_queue_kick(estatus_node->task_work_cpu); + ret = memory_failure(twcb->pfn, twcb->flags); + gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node); - node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus)); - gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); + if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP) + return; + + pr_err("Sending SIGBUS to current task due to memory error not recovered"); + force_sig(SIGBUS); }
static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; + struct sync_task_work *twcb;
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; @@ -498,6 +511,18 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; }
+ if (flags == MF_ACTION_REQUIRED && current->mm) { + twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb)); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return true; + } + memory_failure_queue(pfn, flags); return true; } @@ -676,7 +701,7 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); }
-static bool ghes_do_proc(struct ghes *ghes, +static void ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { int sev, sec_sev; @@ -734,8 +759,6 @@ static bool ghes_do_proc(struct ghes *ghes, pr_err("Sending SIGBUS to current task due to memory error not recovered"); force_sig(SIGBUS); } - - return queued; }
static void __ghes_print_estatus(const char *pfx, @@ -1037,9 +1060,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) struct ghes_estatus_node *estatus_node; struct acpi_hest_generic *generic; struct acpi_hest_generic_status *estatus; - bool task_work_pending; u32 len, node_len; - int ret;
llnode = llist_del_all(&ghes_estatus_llist); /* @@ -1054,25 +1075,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) estatus = GHES_ESTATUS_FROM_NODE(estatus_node); len = cper_estatus_len(estatus); node_len = GHES_ESTATUS_NODE_LEN(len); - task_work_pending = ghes_do_proc(estatus_node->ghes, estatus); + + ghes_do_proc(estatus_node->ghes, estatus); + if (!ghes_estatus_cached(estatus)) { generic = estatus_node->generic; if (ghes_print_estatus(NULL, generic, estatus)) ghes_estatus_cache_add(generic, estatus); } - - if (task_work_pending && current->mm) { - estatus_node->task_work.func = ghes_kick_task_work; - estatus_node->task_work_cpu = smp_processor_id(); - ret = task_work_add(current, &estatus_node->task_work, - TWA_RESUME); - if (ret) - estatus_node->task_work.func = NULL; - } - - if (!estatus_node->task_work.func) - gen_pool_free(ghes_estatus_pool, - (unsigned long)estatus_node, node_len); + gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, + node_len);
llnode = next; } @@ -1133,7 +1145,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes; estatus_node->generic = ghes->generic; - estatus_node->task_work.func = NULL; estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) { diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index be1dd4c1a917..ebd21b05fe6e 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -35,9 +35,6 @@ struct ghes_estatus_node { struct llist_node llnode; struct acpi_hest_generic *generic; struct ghes *ghes; - - int task_work_cpu; - struct callback_head task_work; };
struct ghes_estatus_cache { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 40f6b4d540b2..d710f6c55080 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2462,19 +2462,6 @@ static void memory_failure_work_func(struct work_struct *work) } }
-/* - * Process memory_failure work queued on the specified CPU. - * Used to avoid return-to-userspace racing with the memory_failure workqueue. - */ -void memory_failure_queue_kick(int cpu) -{ - struct memory_failure_cpu *mf_cpu; - - mf_cpu = &per_cpu(memory_failure_cpu, cpu); - cancel_work_sync(&mf_cpu->work); - memory_failure_work_func(&mf_cpu->work); -} - static int __init memory_failure_init(void) { struct memory_failure_cpu *mf_cpu;