hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBFZSL
--------------------------------
When there are two kretprobes on one stack and unwind the stack
in the top kretprobe handler, there will be a AA dead lock as below:
kretprobe_find_ret_addr <- try to lock hash lock
unwind_stack
kretp_handler
__kretprobe_trampoline_handler <- holding hash lock
1 trampoline_probe_handler
2 kretprobe_trampoline+0 <- unwinding this frame
...
Fix this if the unwinding task is current because hash table for current
task can't be changed in unwinding, so there is no need to lock the hash
ptr.
Fixes: 88fef946364b ("kprobes: Add kretprobe_find_ret_addr() for searching return address")
Signed-off-by: Chen Zhongjin <chenzhongjin(a)huawei.com>
---
kernel/kprobes.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5d64d97975ba..9df872b3818a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1995,7 +1995,11 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
unsigned long flags;
kprobe_opcode_t *correct_ret_addr = NULL;
- kretprobe_hash_lock(tsk, &head, &flags);
+ if (tsk != current)
+ kretprobe_hash_lock(tsk, &head, &flags);
+ else
+ head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
+
hlist_for_each_entry(ri, head, hlist) {
if (ri->task != tsk)
continue;
@@ -2006,7 +2010,8 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
break;
}
}
- kretprobe_hash_unlock(tsk, &flags);
+ if (tsk != current)
+ kretprobe_hash_unlock(tsk, &flags);
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
--
2.25.1
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBFZSL
--------------------------------
When there are two kretprobes on one stack and unwind the stack
in the top kretprobe handler, there will be a AA dead lock as below:
kretprobe_find_ret_addr <- try to lock hash lock
unwind_stack
kretp_handler
__kretprobe_trampoline_handler <- holding hash lock
1 trampoline_probe_handler
2 kretprobe_trampoline+0 <- unwinding this frame
...
Fix this if the unwinding task is current because hash table for current
task can't be changed in unwinding, so there is no need to lock the hash
ptr.
Fixes: b67815b05d67 ("[Backport] kprobes: Add kretprobe_find_ret_addr() for searching return address")
Signed-off-by: Chen Zhongjin <chenzhongjin(a)huawei.com>
---
kernel/kprobes.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5d64d97975ba..9df872b3818a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1995,7 +1995,11 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
unsigned long flags;
kprobe_opcode_t *correct_ret_addr = NULL;
- kretprobe_hash_lock(tsk, &head, &flags);
+ if (tsk != current)
+ kretprobe_hash_lock(tsk, &head, &flags);
+ else
+ head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
+
hlist_for_each_entry(ri, head, hlist) {
if (ri->task != tsk)
continue;
@@ -2006,7 +2010,8 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
break;
}
}
- kretprobe_hash_unlock(tsk, &flags);
+ if (tsk != current)
+ kretprobe_hash_unlock(tsk, &flags);
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
--
2.25.1
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBFZSL
--------------------------------
When there are two kretprobes on one stack and unwind the stack
in the top kretprobe handler, there will be a AA dead lock as below:
kretprobe_find_ret_addr <- try to lock hash lock
unwind_stack
kretp_handler
__kretprobe_trampoline_handler <- holding hash lock
1 trampoline_probe_handler
2 kretprobe_trampoline+0 <- unwinding this frame
...
Fix this if the unwinding task is current because hash table for current
task can't be changed in unwinding, so there is no need to lock the hash
ptr.
Fixes: b67815b05d67 ("[Backport] kprobes: Add kretprobe_find_ret_addr() for searching return address")
Signed-off-by: Chen Zhongjin <chenzhongjin(a)huawei.com>
---
kernel/kprobes.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5d64d97975ba..9df872b3818a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1995,7 +1995,11 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
unsigned long flags;
kprobe_opcode_t *correct_ret_addr = NULL;
- kretprobe_hash_lock(tsk, &head, &flags);
+ if (tsk != current)
+ kretprobe_hash_lock(tsk, &head, &flags);
+ else
+ head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
+
hlist_for_each_entry(ri, head, hlist) {
if (ri->task != tsk)
continue;
@@ -2006,7 +2010,8 @@ unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp)
break;
}
}
- kretprobe_hash_unlock(tsk, &flags);
+ if (tsk != current)
+ kretprobe_hash_unlock(tsk, &flags);
return (unsigned long)correct_ret_addr;
}
NOKPROBE_SYMBOL(kretprobe_find_ret_addr);
--
2.25.1
From: Oliver Upton <oliver.upton(a)linux.dev>
stable inclusion
from stable-v6.6.66
commit ea6b5d98fea4ee8cb443ea98fda520909e90d30e
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBEAG3
CVE: CVE-2024-53196
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
[ Upstream commit e735a5da64420a86be370b216c269b5dd8e830e2 ]
Returning an abort to the guest for an unsupported MMIO access is a
documented feature of the KVM UAPI. Nevertheless, it's clear that this
plumbing has seen limited testing, since userspace can trivially cause a
WARN in the MMIO return:
WARNING: CPU: 0 PID: 30558 at arch/arm64/include/asm/kvm_emulate.h:536 kvm_handle_mmio_return+0x46c/0x5c4 arch/arm64/include/asm/kvm_emulate.h:536
Call trace:
kvm_handle_mmio_return+0x46c/0x5c4 arch/arm64/include/asm/kvm_emulate.h:536
kvm_arch_vcpu_ioctl_run+0x98/0x15b4 arch/arm64/kvm/arm.c:1133
kvm_vcpu_ioctl+0x75c/0xa78 virt/kvm/kvm_main.c:4487
__do_sys_ioctl fs/ioctl.c:51 [inline]
__se_sys_ioctl fs/ioctl.c:893 [inline]
__arm64_sys_ioctl+0x14c/0x1c8 fs/ioctl.c:893
__invoke_syscall arch/arm64/kernel/syscall.c:35 [inline]
invoke_syscall+0x98/0x2b8 arch/arm64/kernel/syscall.c:49
el0_svc_common+0x1e0/0x23c arch/arm64/kernel/syscall.c:132
do_el0_svc+0x48/0x58 arch/arm64/kernel/syscall.c:151
el0_svc+0x38/0x68 arch/arm64/kernel/entry-common.c:712
el0t_64_sync_handler+0x90/0xfc arch/arm64/kernel/entry-common.c:730
el0t_64_sync+0x190/0x194 arch/arm64/kernel/entry.S:598
The splat is complaining that KVM is advancing PC while an exception is
pending, i.e. that KVM is retiring the MMIO instruction despite a
pending synchronous external abort. Womp womp.
Fix the glaring UAPI bug by skipping over all the MMIO emulation in
case there is a pending synchronous exception. Note that while userspace
is capable of pending an asynchronous exception (SError, IRQ, or FIQ),
it is still safe to retire the MMIO instruction in this case as (1) they
are by definition asynchronous, and (2) KVM relies on hardware support
for pending/delivering these exceptions instead of the software state
machine for advancing PC.
Cc: stable(a)vger.kernel.org
Fixes: da345174ceca ("KVM: arm/arm64: Allow user injection of external data aborts")
Reported-by: Alexander Potapenko <glider(a)google.com>
Reviewed-by: Marc Zyngier <maz(a)kernel.org>
Link: https://lore.kernel.org/r/20241025203106.3529261-2-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton(a)linux.dev>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Conflicts:
arch/arm64/kvm/mmio.c
[ different return polarity introduced by this commit not merged:
cc81b6dfc3bc8 ("KVM: arm64: Change kvm_handle_mmio_return() return polarity") ]
Signed-off-by: Zhang Kunbo <zhangkunbo(a)huawei.com>
---
arch/arm64/kvm/mmio.c | 32 ++++++++++++++++++++++++++++++--
1 file changed, 30 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c
index 40d6d056e261..bf5de05cb7a8 100644
--- a/arch/arm64/kvm/mmio.c
+++ b/arch/arm64/kvm/mmio.c
@@ -73,6 +73,31 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
return data;
}
+static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION))
+ return false;
+
+ if (vcpu_el1_is_32bit(vcpu)) {
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA32_UND):
+ case unpack_vcpu_flag(EXCEPT_AA32_IABT):
+ case unpack_vcpu_flag(EXCEPT_AA32_DABT):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) {
+ case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC):
+ case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC):
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
@@ -85,8 +110,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu)
unsigned int len;
int mask;
- /* Detect an already handled MMIO return */
- if (unlikely(!vcpu->mmio_needed))
+ /*
+ * Detect if the MMIO return was already handled or if userspace aborted
+ * the MMIO access.
+ */
+ if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu)))
return 0;
vcpu->mmio_needed = 0;
--
2.34.1