From: Yang Yang <yang.yang(a)vivo.com>
mainline inclusion
from mainline-5.12-rc1
commit ffa772cfe9356ce94d3061335c2681f60e7c1c5b
category: bugfix
bugzilla: 182133
CVE: NA
-------------------------------------------------
Hang occurs when user changes the scheduler queue depth, by writing to
the 'nr_requests' sysfs file of that device.
The details of the environment that we found the problem are as follows:
an eMMC block device
total driver tags: 16
default queue_depth: 32
kqd->async_depth initialized in kyber_init_sched() with queue_depth=32
Then we change queue_depth to 256, by writing to the 'nr_requests' sysfs
file. But kqd->async_depth don't be updated after queue_depth changes.
Now the value of async depth is too small for queue_depth=256, this may
cause hang.
This patch introduces kyber_depth_updated(), so that kyber can update
async depth when queue depth changes.
Signed-off-by: Yang Yang <yang.yang(a)vivo.com>
Reviewed-by: Omar Sandoval <osandov(a)fb.com>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Conflict: block/kyber-iosched.c
Signed-off-by: Yu Kuai <yukuai3(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/kyber-iosched.c | 29 +++++++++++++----------------
1 file changed, 13 insertions(+), 16 deletions(-)
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index a1660bafc9124..9eeb60d97f0ca 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -288,15 +288,6 @@ static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
blk_stat_activate_msecs(kqd->cb, 100);
}
-static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)
-{
- /*
- * All of the hardware queues have the same depth, so we can just grab
- * the shift of the first one.
- */
- return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
-}
-
static int kyber_bucket_fn(const struct request *rq)
{
return kyber_sched_domain(rq->cmd_flags);
@@ -306,7 +297,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
unsigned int max_tokens;
- unsigned int shift;
int ret = -ENOMEM;
int i;
@@ -341,9 +331,6 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
}
- shift = kyber_sched_tags_shift(kqd);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
-
kqd->read_lat_nsec = 2000000ULL;
kqd->write_lat_nsec = 10000000ULL;
@@ -403,9 +390,19 @@ static void kyber_ctx_queue_init(struct kyber_ctx_queue *kcq)
INIT_LIST_HEAD(&kcq->rq_list[i]);
}
-static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+static void kyber_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data;
+ struct blk_mq_tags *tags = hctx->sched_tags;
+ unsigned int shift = tags->bitmap_tags.sb.shift;
+
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, kqd->async_depth);
+}
+
+static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
+{
struct kyber_hctx_data *khd;
int i;
@@ -446,8 +443,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
khd->batching = 0;
hctx->sched_data = khd;
- sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags,
- kqd->async_depth);
+ kyber_depth_updated(hctx);
return 0;
@@ -966,6 +962,7 @@ static struct elevator_type kyber_sched = {
.completed_request = kyber_completed_request,
.dispatch_request = kyber_dispatch_request,
.has_work = kyber_has_work,
+ .depth_updated = kyber_depth_updated,
},
.uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
--
2.25.1