From: Laibin Qiu qiulaibin@huawei.com
hulk inclusion category: bugfix bugzilla: 186352, https://gitee.com/openeuler/kernel/issues/I4YADX CVE: NA
--------------------------------
In case of BLK_MQ_F_BLOCKING, per-hctx srcu is used to protect dispatch critical area. But the current process is not aware when memory of srcu allocation failed in blk_mq_alloc_hctx, which will leads to illegal address BUG. Add return value validation to avoid this problem.
Signed-off-by: Laibin Qiu qiulaibin@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- block/blk-mq.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index c3beaca1f4fb..9ae1663348ac 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2852,12 +2852,16 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, if (!hctx->fq) goto free_bitmap;
- if (hctx->flags & BLK_MQ_F_BLOCKING) - init_srcu_struct(hctx->srcu); + if (hctx->flags & BLK_MQ_F_BLOCKING) { + if (init_srcu_struct(hctx->srcu) != 0) + goto free_flush_queue; + } blk_mq_hctx_kobj_init(hctx);
return hctx;
+ free_flush_queue: + blk_free_flush_queue(hctx->fq); free_bitmap: sbitmap_free(&hctx->ctx_map); free_ctxs:
From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 186389, https://gitee.com/openeuler/kernel/issues/I4Y43S CVE: NA
--------------------------------
blk_mq_realloc_hw_ctxs() will free the 'queue_hw_ctx'(e.g. undate submit_queues through configfs for null_blk), while it might still be used from other context(e.g. switch elevator to none):
t1 t2 elevator_switch blk_mq_unquiesce_queue blk_mq_run_hw_queues queue_for_each_hw_ctx // assembly code for hctx = (q)->queue_hw_ctx[i] mov 0x48(%rbp),%rdx -> read old queue_hw_ctx
__blk_mq_update_nr_hw_queues blk_mq_realloc_hw_ctxs hctxs = q->queue_hw_ctx q->queue_hw_ctx = new_hctxs kfree(hctxs) movslq %ebx,%rax mov (%rdx,%rax,8),%rdi ->uaf
Sicne the queue is freezed in __blk_mq_update_nr_hw_queues(), fix the problem by protecting 'queue_hw_ctx' through rcu where it can be accessed without grabbing 'q_usage_counter'.
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Ming Lei ming.lei@redhat.com Signed-off-by: Zhang Wensheng zhangwensheng5@huawei.com Reviewed-by: Jason Yan yanaijie@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- block/blk-mq.c | 10 +++++++++- include/linux/blk-mq.h | 13 ++++++++++++- include/linux/blkdev.h | 2 +- 3 files changed, 22 insertions(+), 3 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9ae1663348ac..cedc355218db 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3280,7 +3280,15 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, if (hctxs) memcpy(new_hctxs, hctxs, q->nr_hw_queues * sizeof(*hctxs)); - q->queue_hw_ctx = new_hctxs; + + rcu_assign_pointer(q->queue_hw_ctx, new_hctxs); + /* + * Make sure reading the old queue_hw_ctx from other + * context concurrently won't trigger uaf. and when + * it is in start up time, no need to sync rcu. + */ + if (hctxs) + synchronize_rcu(); kfree(hctxs); hctxs = new_hctxs; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b2db9a5c10e8..c9210fb70e4d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -612,9 +612,20 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) return rq + 1; }
+static inline struct blk_mq_hw_ctx *queue_hctx(struct request_queue *q, int id) +{ + struct blk_mq_hw_ctx *hctx; + + rcu_read_lock(); + hctx = *(rcu_dereference(q->queue_hw_ctx) + id); + rcu_read_unlock(); + + return hctx; +} + #define queue_for_each_hw_ctx(q, hctx, i) \ for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + ({ hctx = queue_hctx((q), i); 1; }); (i)++)
#define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 433485f8b1cc..23dfe7608e79 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -421,7 +421,7 @@ struct request_queue { unsigned int queue_depth;
/* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct blk_mq_hw_ctx __rcu **queue_hw_ctx; unsigned int nr_hw_queues;
struct backing_dev_info *backing_dev_info;
From: Trond Myklebust trond.myklebust@hammerspace.com
mainline inclusion from mainline-v5.17-rc5 commit e0caaf75d443e02e55e146fd75fe2efc8aed5540 category: bugfix bugzilla: 186205 https://gitee.com/openeuler/kernel/issues/I4YQRW CVE: CVE-2022-24448
--------------------------------
Commit ac795161c936 (NFSv4: Handle case where the lookup of a directory fails) [1], part of Linux since 5.17-rc2, introduced a regression, where a symbolic link on an NFS mount to a directory on another NFS does not resolve(?) the first time it is accessed:
Reported-by: Paul Menzel pmenzel@molgen.mpg.de Fixes: ac795161c936 ("NFSv4: Handle case where the lookup of a directory fails") Signed-off-by: Trond Myklebust trond.myklebust@hammerspace.com Tested-by: Donald Buczek buczek@molgen.mpg.de Signed-off-by: Anna Schumaker Anna.Schumaker@Netapp.com Signed-off-by: ChenXiaoSong chenxiaosong2@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- fs/nfs/dir.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 1276437b48de..8428e4135b0d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1780,14 +1780,14 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, if (!res) { inode = d_inode(dentry); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !S_ISDIR(inode->i_mode)) + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) res = ERR_PTR(-ENOTDIR); else if (inode && S_ISREG(inode->i_mode)) res = ERR_PTR(-EOPENSTALE); } else if (!IS_ERR(res)) { inode = d_inode(res); if ((lookup_flags & LOOKUP_DIRECTORY) && inode && - !S_ISDIR(inode->i_mode)) { + !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) { dput(res); res = ERR_PTR(-ENOTDIR); } else if (inode && S_ISREG(inode->i_mode)) {
From: Jamie Hill-Daniel jamie@hill-daniel.co.uk
mainline inclusion from mainline-v5.17-rc1 commit 722d94847de29310e8aa03fcbdb41fc92c521756 category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I4YD3S CVE: CVE-2022-0185
--------------------------------
The "PAGE_SIZE - 2 - size" calculation in legacy_parse_param() is an unsigned type so a large value of "size" results in a high positive value instead of a negative value as expected. Fix this by getting rid of the subtraction.
Signed-off-by: Jamie Hill-Daniel jamie@hill-daniel.co.uk Signed-off-by: William Liu willsroot@protonmail.com Tested-by: Salvatore Bonaccorso carnil@debian.org Tested-by: Thadeu Lima de Souza Cascardo cascardo@canonical.com Acked-by: Dan Carpenter dan.carpenter@oracle.com Acked-by: Al Viro viro@zeniv.linux.org.uk Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org Signed-off-by: Luo Meng luomeng12@huawei.com Reviewed-by: Zhang Yi yi.zhang@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- fs/fs_context.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/fs_context.c b/fs/fs_context.c index b7e43a780a62..24ce12f0db32 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -548,7 +548,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) param->key); }
- if (len > PAGE_SIZE - 2 - size) + if (size + len + 2 > PAGE_SIZE) return invalf(fc, "VFS: Legacy: Cumulative options too large"); if (strchr(param->key, ',') || (param->type == fs_value_is_string &&