From: Bob Liu bob.liu@oracle.com
mainline inclusion from mainline-v5.2-rc2 commit 7996a8b5511a72465b0b286763c2d8f412b8874a category: bugfix bugzilla: 173119 CVE: NA
-----------------------------------------------
The following is a description of a hang in blk_mq_freeze_queue_wait(). The hang happens on attempt to freeze a queue while another task does queue unfreeze.
The root cause is an incorrect sequence of percpu_ref_resurrect() and percpu_ref_kill() and as a result those two can be swapped:
CPU#0 CPU#1 ---------------- ----------------- q1 = blk_mq_init_queue(shared_tags)
q2 = blk_mq_init_queue(shared_tags): blk_mq_add_queue_tag_set(shared_tags): blk_mq_update_tag_set_depth(shared_tags): list_for_each_entry() blk_mq_freeze_queue(q1) > percpu_ref_kill() > blk_mq_freeze_queue_wait()
blk_cleanup_queue(q1) blk_mq_freeze_queue(q1)
percpu_ref_kill()
^^^^^^ freeze_depth can't guarantee the order
blk_mq_unfreeze_queue() > percpu_ref_resurrect()
blk_mq_freeze_queue_wait()
^^^^^^ Hang here!!!!
This wrong sequence raises kernel warning: percpu_ref_kill_and_confirm called more than once on blk_queue_usage_counter_release! WARNING: CPU: 0 PID: 11854 at lib/percpu-refcount.c:336 percpu_ref_kill_and_confirm+0x99/0xb0
But the most unpleasant effect is a hang of a blk_mq_freeze_queue_wait(), which waits for a zero of a q_usage_counter, which never happens because percpu-ref was reinited (instead of being killed) and stays in PERCPU state forever.
How to reproduce: - "insmod null_blk.ko shared_tags=1 nr_devices=0 queue_mode=2" - cpu0: python Script.py 0; taskset the corresponding process running on cpu0 - cpu1: python Script.py 1; taskset the corresponding process running on cpu1
Script.py: ------ #!/usr/bin/python3
import os import sys
while True: on = "echo 1 > /sys/kernel/config/nullb/%s/power" % sys.argv[1] off = "echo 0 > /sys/kernel/config/nullb/%s/power" % sys.argv[1] os.system(on) os.system(off) ------
This bug was first reported and fixed by Roman, previous discussion: [1] Message id: 1443287365-4244-7-git-send-email-akinobu.mita@gmail.com [2] Message id: 1443563240-29306-6-git-send-email-tj@kernel.org [3] https://patchwork.kernel.org/patch/9268199/
Reviewed-by: Hannes Reinecke hare@suse.com Reviewed-by: Ming Lei ming.lei@redhat.com Reviewed-by: Bart Van Assche bvanassche@acm.org Reviewed-by: Christoph Hellwig hch@lst.de Signed-off-by: Roman Pen roman.penyaev@profitbricks.com Signed-off-by: Bob Liu bob.liu@oracle.com Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-core.c | 3 ++- block/blk-mq.c | 19 ++++++++++--------- include/linux/blkdev.h | 7 ++++++- 3 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index 2f66d43c476f3..b67cb175b9737 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -955,7 +955,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) smp_rmb();
wait_event(q->mq_freeze_wq, - (atomic_read(&q->mq_freeze_depth) == 0 && + (!q->mq_freeze_depth && (pm || !blk_queue_pm_only(q))) || blk_queue_dying(q)); if (blk_queue_dying(q)) @@ -1071,6 +1071,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq); + mutex_init(&q->mq_freeze_lock);
/* * Init percpu_ref in atomic mode so that it's faster to shutdown. diff --git a/block/blk-mq.c b/block/blk-mq.c index 607ee5dafa2e9..0de720aa544fa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -136,13 +136,14 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
void blk_freeze_queue_start(struct request_queue *q) { - int freeze_depth; - - freeze_depth = atomic_inc_return(&q->mq_freeze_depth); - if (freeze_depth == 1) { + mutex_lock(&q->mq_freeze_lock); + if (++q->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); + mutex_unlock(&q->mq_freeze_lock); if (q->mq_ops) blk_mq_run_hw_queues(q, false); + } else { + mutex_unlock(&q->mq_freeze_lock); } } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); @@ -193,14 +194,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) { - int freeze_depth; - - freeze_depth = atomic_dec_return(&q->mq_freeze_depth); - WARN_ON_ONCE(freeze_depth < 0); - if (!freeze_depth) { + mutex_lock(&q->mq_freeze_lock); + q->mq_freeze_depth--; + WARN_ON_ONCE(q->mq_freeze_depth < 0); + if (!q->mq_freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } + mutex_unlock(&q->mq_freeze_lock); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ad2b10c4a0797..bb0ac8cbbb4c0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,7 +655,7 @@ struct request_queue { spinlock_t unused_hctx_lock;
int bypass_depth; - atomic_t mq_freeze_depth; + int mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG) bsg_job_fn *bsg_job_fn; @@ -668,6 +668,11 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; + /* + * Protect concurrent access to q_usage_counter by + * percpu_ref_kill() and percpu_ref_reinit(). + */ + struct mutex mq_freeze_lock; struct percpu_ref q_usage_counter; struct list_head all_q_node;
From: Yu Kuai yukuai3@huawei.com
hulk inclusion category: bugfix bugzilla: 173119 CVE: NA
-----------------------------------------------
Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-core.c | 17 ++++++++++------- block/blk-mq.c | 24 +++++++++++++++--------- block/blk-sysfs.c | 2 +- block/blk.h | 16 ++++++++++++++++ include/linux/blkdev.h | 12 ++++++------ 5 files changed, 48 insertions(+), 23 deletions(-)
diff --git a/block/blk-core.c b/block/blk-core.c index b67cb175b9737..7760bebeabcd7 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -955,7 +955,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) smp_rmb();
wait_event(q->mq_freeze_wq, - (!q->mq_freeze_depth && + (!queue_to_wrapper(q)->mq_freeze_depth && (pm || !blk_queue_pm_only(q))) || blk_queue_dying(q)); if (blk_queue_dying(q)) @@ -1004,13 +1004,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, spinlock_t *lock) { struct request_queue *q; + struct request_queue_wrapper *q_wrapper; int ret;
- q = kmem_cache_alloc_node(blk_requestq_cachep, - gfp_mask | __GFP_ZERO, node_id); - if (!q) + q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep, + gfp_mask | __GFP_ZERO, node_id); + if (!q_wrapper) return NULL;
+ q = &q_wrapper->q; INIT_LIST_HEAD(&q->queue_head); q->last_merge = NULL; q->end_sector = 0; @@ -1071,7 +1073,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq); - mutex_init(&q->mq_freeze_lock); + mutex_init(&q_wrapper->mq_freeze_lock);
/* * Init percpu_ref in atomic mode so that it's faster to shutdown. @@ -1098,7 +1100,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, fail_id: ida_simple_remove(&blk_queue_ida, q->id); fail_q: - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, q_wrapper); return NULL; } EXPORT_SYMBOL(blk_alloc_queue_node); @@ -3961,7 +3963,8 @@ int __init blk_dev_init(void) sizeof(struct request), 0, SLAB_PANIC, NULL);
blk_requestq_cachep = kmem_cache_create("request_queue", - sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, + NULL);
#ifdef CONFIG_DEBUG_FS blk_debugfs_root = debugfs_create_dir("block", NULL); diff --git a/block/blk-mq.c b/block/blk-mq.c index 0de720aa544fa..b6bc8769a6ffa 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -136,14 +136,16 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
void blk_freeze_queue_start(struct request_queue *q) { - mutex_lock(&q->mq_freeze_lock); - if (++q->mq_freeze_depth == 1) { + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); + + mutex_lock(&q_wrapper->mq_freeze_lock); + if (++q_wrapper->mq_freeze_depth == 1) { percpu_ref_kill(&q->q_usage_counter); - mutex_unlock(&q->mq_freeze_lock); + mutex_unlock(&q_wrapper->mq_freeze_lock); if (q->mq_ops) blk_mq_run_hw_queues(q, false); } else { - mutex_unlock(&q->mq_freeze_lock); + mutex_unlock(&q_wrapper->mq_freeze_lock); } } EXPORT_SYMBOL_GPL(blk_freeze_queue_start); @@ -194,14 +196,18 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) { - mutex_lock(&q->mq_freeze_lock); - q->mq_freeze_depth--; - WARN_ON_ONCE(q->mq_freeze_depth < 0); - if (!q->mq_freeze_depth) { + struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q); + + mutex_lock(&q_wrapper->mq_freeze_lock); + q_wrapper->mq_freeze_depth--; + + WARN_ON_ONCE(q_wrapper->mq_freeze_depth < 0); + if (!q_wrapper->mq_freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); wake_up_all(&q->mq_freeze_wq); } - mutex_unlock(&q->mq_freeze_lock); + + mutex_unlock(&q_wrapper->mq_freeze_lock); } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 2d905a8b14730..a05b2844f01cf 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -792,7 +792,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) { struct request_queue *q = container_of(rcu_head, struct request_queue, rcu_head); - kmem_cache_free(blk_requestq_cachep, q); + kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q)); }
/** diff --git a/block/blk.h b/block/blk.h index 1a5b67b57e6b2..8bb450ef6659b 100644 --- a/block/blk.h +++ b/block/blk.h @@ -37,6 +37,22 @@ struct blk_flush_queue { spinlock_t mq_flush_lock; };
+/* + * The wrapper of request_queue to fix kabi while adding members. + */ +struct request_queue_wrapper { + struct request_queue q; + /* + * Protect concurrent access to q_usage_counter by + * percpu_ref_kill() and percpu_ref_reinit(). + */ + struct mutex mq_freeze_lock; + int mq_freeze_depth; +}; + +#define queue_to_wrapper(q) \ + container_of(q, struct request_queue_wrapper, q) + extern struct kmem_cache *blk_requestq_cachep; extern struct kmem_cache *request_cachep; extern struct kobj_type blk_queue_ktype; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bb0ac8cbbb4c0..d60280e47a2ca 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -655,7 +655,12 @@ struct request_queue { spinlock_t unused_hctx_lock;
int bypass_depth; - int mq_freeze_depth; + /* + * use 'mq_freeze_depth' in request_queue_wrapper instead of here, which + * is protected by 'mq_freeze_lock' in request_queue_wrapper. The reason + * to keep unused 'mq_freeze_depth' here is to avoid kabi broken. + */ + atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG) bsg_job_fn *bsg_job_fn; @@ -668,11 +673,6 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; - /* - * Protect concurrent access to q_usage_counter by - * percpu_ref_kill() and percpu_ref_reinit(). - */ - struct mutex mq_freeze_lock; struct percpu_ref q_usage_counter; struct list_head all_q_node;