hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGRKP CVE: NA
--------------------------------
If blk-wbt is enabled, following new debugfs entries will be created as well:
/sys/kernel/debug/block/sda/blk_io_hierarchy/ |-- wbt | |-- io_dump | |-- stats | `-- threshold
User can use them to analyze how IO behaves in blk-wbt
Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/blk-io-hierarchy/Kconfig | 11 +++++++++++ block/blk-io-hierarchy/debugfs.c | 3 +++ block/blk-wbt.c | 12 ++++++++++-- include/linux/blk_types.h | 3 +++ 4 files changed, 27 insertions(+), 2 deletions(-)
diff --git a/block/blk-io-hierarchy/Kconfig b/block/blk-io-hierarchy/Kconfig index 2c15b5a7a006..ad1b7abc7610 100644 --- a/block/blk-io-hierarchy/Kconfig +++ b/block/blk-io-hierarchy/Kconfig @@ -37,4 +37,15 @@ config HIERARCHY_THROTTLE
If unsure, say N.
+config HIERARCHY_WBT + bool "Enable hierarchy stats layer blk-wbt" + default n + depends on BLK_WBT + help + Enabling this lets blk hierarchy stats to record additional information + for blk-wbt. Such information can be helpful to debug performance + and problems like io hang. + + If unsure, say N. + endif diff --git a/block/blk-io-hierarchy/debugfs.c b/block/blk-io-hierarchy/debugfs.c index 5b61646553ae..327ed5c88edc 100644 --- a/block/blk-io-hierarchy/debugfs.c +++ b/block/blk-io-hierarchy/debugfs.c @@ -22,6 +22,9 @@ static const char *stage_name[NR_STAGE_GROUPS] = { #ifdef CONFIG_HIERARCHY_THROTTLE [STAGE_THROTTLE] = "throtl", #endif +#ifdef CONFIG_HIERARCHY_WBT + [STAGE_WBT] = "wbt", +#endif };
const char *hierarchy_stage_name(enum stage_group stage) diff --git a/block/blk-wbt.c b/block/blk-wbt.c index d8314038dc08..cf098d2a7262 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c @@ -30,6 +30,9 @@
#include "blk-wbt.h" #include "blk-rq-qos.h" +#ifndef __GENKSYMS__ +#include "blk-io-hierarchy/stats.h" +#endif
#define CREATE_TRACE_POINTS #include <trace/events/wbt.h> @@ -534,11 +537,12 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode, * the timer to kick off queuing again. */ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, - unsigned long rw, spinlock_t *lock) + struct bio *bio, spinlock_t *lock) __releases(lock) __acquires(lock) { struct rq_wait *rqw = get_rq_wait(rwb, wb_acct); + unsigned long rw = bio->bi_opf; struct wbt_wait_data data = { .wq = { .func = wbt_wake_function, @@ -555,6 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw))) return;
+ bio_hierarchy_start_io_acct(bio, STAGE_WBT); has_sleeper = !__prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); do { @@ -589,6 +594,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct, } while (1);
finish_wait(&rqw->wait, &data.wq); + bio_hierarchy_end_io_acct(bio, STAGE_WBT); }
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) @@ -653,7 +659,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) return; }
- __wbt_wait(rwb, flags, bio->bi_opf, lock); + __wbt_wait(rwb, flags, bio, lock);
if (!blk_stat_is_active(rwb->cb)) rwb_arm_timer(rwb); @@ -771,6 +777,7 @@ static void wbt_exit(struct rq_qos *rqos) struct rq_wb *rwb = RQWB(rqos); struct request_queue *q = rqos->q;
+ blk_mq_unregister_hierarchy(q, STAGE_WBT); blk_stat_remove_callback(q, rwb->cb); blk_stat_free_callback(rwb->cb); kfree(rwb); @@ -849,6 +856,7 @@ int wbt_init(struct request_queue *q)
blk_mq_unfreeze_queue(q); wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); + blk_mq_register_hierarchy(q, STAGE_WBT);
return 0; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ea5564ac6f11..7ffe59d6d64e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -388,6 +388,9 @@ enum stat_group { enum stage_group { #ifdef CONFIG_BLK_DEV_THROTTLING STAGE_THROTTLE, +#endif +#ifdef CONFIG_BLK_WBT + STAGE_WBT, #endif STAGE_BIO_RESERVE, NR_BIO_STAGE_GROUPS,