From: Huacai Chen <chenhuacai(a)loongson.cn>
stable inclusion
from stable-v6.5-rc4
commit 03c53eb90c0c61885b2175adf8675fb56df7f8db
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I80YEI
CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=…
---------------------------
Binutils 2.41 enables linker relaxation by default, but the kernel
module loader doesn't support that, so just disable it. Otherwise we
get such an error when loading modules:
"Unknown relocation type 102"
As an alternative, we could add linker relaxation support in the kernel
module loader. But it is relatively large complexity that may or may not
bring a similar gain, and we don't really want to include this linker
pass in the kernel.
Reviewed-by: WANG Xuerui <git(a)xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai(a)loongson.cn>
---
arch/loongarch/Makefile | 2 ++
1 file changed, 2 insertions(+)
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index 345dc10576d4..a0f194da592b 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -55,6 +55,8 @@ LDFLAGS_vmlinux += -G0 -static -n -nostdlib
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
cflags-y += -mexplicit-relocs
KBUILD_CFLAGS_KERNEL += -mdirect-extern-access
+KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
+KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
else
cflags-y += $(call cc-option,-mno-explicit-relocs)
KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel
--
2.33.0
From: Jens Axboe <axboe(a)kernel.dk>
stable inclusion
from stable-v5.10.188
commit 810e401b34c4c4c244d8b93b9947ea5b3d4d49f8
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I7KXLN
CVE: CVE-2023-21400
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
No direct upstream commit exists for this issue. It was fixed in
5.18 as part of a larger rework of the completion side.
io_commit_cqring() writes the CQ ring tail to make it visible, but it
also kicks off any deferred work we have. A ring setup with IOPOLL
does not need any locking around the CQ ring updates, as we're always
under the ctx uring_lock. But if we have deferred work that needs
processing, then io_queue_deferred() assumes that the completion_lock
is held, as it is for !IOPOLL.
Add a lockdep assertion to check and document this fact, and have
io_iopoll_complete() check if we have deferred work and run that
separately with the appropriate lock grabbed.
Cc: stable(a)vger.kernel.org # 5.10, 5.15
Reported-by: dghost david <daviduniverse18(a)gmail.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Lin Yujun <linyujun809(a)huawei.com>
Signed-off-by: Zhihao Cheng <chengzhihao1(a)huawei.com>
---
io_uring/io_uring.c | 25 +++++++++++++++++++++----
1 file changed, 21 insertions(+), 4 deletions(-)
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 3d35f5d13666..781af0b05d8c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1521,6 +1521,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
static void io_queue_deferred(struct io_ring_ctx *ctx)
{
+ lockdep_assert_held(&ctx->completion_lock);
+
while (!list_empty(&ctx->defer_list)) {
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
struct io_defer_entry, list);
@@ -1572,14 +1574,24 @@ static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
io_queue_deferred(ctx);
}
-static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+static inline bool io_commit_needs_flush(struct io_ring_ctx *ctx)
+{
+ return ctx->off_timeout_used || ctx->drain_active;
+}
+
+static inline void __io_commit_cqring(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active))
- __io_commit_cqring_flush(ctx);
/* order cqe stores with ring update */
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
+static inline void io_commit_cqring(struct io_ring_ctx *ctx)
+{
+ if (unlikely(io_commit_needs_flush(ctx)))
+ __io_commit_cqring_flush(ctx);
+ __io_commit_cqring(ctx);
+}
+
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{
struct io_rings *r = ctx->rings;
@@ -2509,7 +2521,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
io_req_free_batch(&rb, req, &ctx->submit_state);
}
- io_commit_cqring(ctx);
+ if (io_commit_needs_flush(ctx)) {
+ spin_lock(&ctx->completion_lock);
+ __io_commit_cqring_flush(ctx);
+ spin_unlock(&ctx->completion_lock);
+ }
+ __io_commit_cqring(ctx);
io_cqring_ev_posted_iopoll(ctx);
io_req_free_batch_finish(ctx, &rb);
}
--
2.31.1