Currently, io_ticks is accounted based on sampling, specifically update_io_ticks() will always account io_ticks by 1 jiffies from bdev_start_io_acct()/blk_account_io_start(), and the result can be inaccurate, for example(HZ is 250):
Test script: fio -filename=/dev/sda -bs=4k -rw=write -direct=1 -name=test -thinktime=4ms
Test result: util is about 90%, while the disk is really idle.
In order to account io_ticks precisely, update_io_ticks() must know if there are IO inflight already, and this requires overhead slightly, hence precise io accounting is disabled by default, and user can enable it through sysfs entry.
Noted that for rq-based devcie, part_stat_local_inc/dec() and part_in_flight() is used to track inflight instead of iterating tags, which is not supposed to be used in fast path because 'tags->lock' is grabbed in blk_mq_find_and_get_req().
Signed-off-by: Yu Kuai yukuai3@huawei.com --- Documentation/ABI/stable/sysfs-block | 8 ++++-- block/blk-core.c | 27 ++++++++++++++++-- block/blk-merge.c | 3 ++ block/blk-mq-debugfs.c | 2 ++ block/blk-mq.c | 12 ++++++-- block/blk-sysfs.c | 42 ++++++++++++++++++++++++++-- block/blk.h | 1 + block/genhd.c | 2 +- include/linux/blk-mq.h | 1 + include/linux/blkdev.h | 3 ++ 10 files changed, 91 insertions(+), 10 deletions(-)
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 1fe9a553c37b..e5fedecf7bdf 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -358,8 +358,12 @@ What: /sys/block/<disk>/queue/iostats Date: January 2009 Contact: linux-block@vger.kernel.org Description: - [RW] This file is used to control (on/off) the iostats - accounting of the disk. + [RW] This file is used to control the iostats accounting of the + disk. If this value is 0, iostats accounting is disabled; If + this value is 1, iostats accounting is enabled, but io_ticks is + accounted by sampling and the result is not accurate; If this + value is 2, iostats accounting is enabled and io_ticks is + accounted precisely, but there will be slightly overhead.
What: /sys/block/<disk>/queue/logical_block_size diff --git a/block/blk-core.c b/block/blk-core.c index fdf25b8d6e78..13a27d1d8540 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -71,6 +71,21 @@ static struct kmem_cache *blk_requestq_cachep; */ static struct workqueue_struct *kblockd_workqueue;
+static bool precise_iostat; + +static int __init precise_iostat_setup(char *str) +{ + bool precise; + + if (!strtobool(str, &precise)) { + precise_iostat = precise; + pr_info("precise iostat %d\n", precise_iostat); + } + + return 1; +} +__setup("precise_iostat=", precise_iostat_setup); + /** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set @@ -441,6 +456,8 @@ struct request_queue *blk_alloc_queue(int node_id)
blk_set_default_limits(&q->limits); q->nr_requests = BLKDEV_DEFAULT_RQ; + if (precise_iostat) + blk_queue_flag_set(QUEUE_FLAG_PRECISE_IO_STAT, q);
return q;
@@ -938,11 +955,15 @@ EXPORT_SYMBOL_GPL(iocb_bio_iopoll); void update_io_ticks(struct block_device *part, unsigned long now, bool end) { unsigned long stamp; + bool precise = blk_queue_precise_io_stat(part->bd_queue); again: stamp = READ_ONCE(part->bd_stamp); - if (unlikely(time_after(now, stamp))) { - if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now))) - __part_stat_add(part, io_ticks, end ? now - stamp : 1); + if (unlikely(time_after(now, stamp)) && + likely(try_cmpxchg(&part->bd_stamp, &stamp, now))) { + if (end || (precise && part_in_flight(part))) + __part_stat_add(part, io_ticks, now - stamp); + else if (!precise) + __part_stat_add(part, io_ticks, 1); } if (part->bd_partno) { part = bdev_whole(part); diff --git a/block/blk-merge.c b/block/blk-merge.c index 65e75efa9bd3..5db8228c46fc 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -783,6 +783,9 @@ static void blk_account_io_merge_request(struct request *req) if (blk_do_io_stat(req)) { part_stat_lock(); part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); + if (req->rq_flags & RQF_PRECISE_IO_STAT) + part_stat_local_dec(req->part, + in_flight[op_is_write(req_op(req))]); part_stat_unlock(); } } diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index aa7a1357c3e1..9ad108307344 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -86,6 +86,7 @@ static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(NONROT), QUEUE_FLAG_NAME(IO_STAT), + QUEUE_FLAG_NAME(PRECISE_IO_STAT), QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(ADD_RANDOM), QUEUE_FLAG_NAME(SYNCHRONOUS), @@ -254,6 +255,7 @@ static const char *const rqf_name[] = { RQF_NAME(FAILED), RQF_NAME(QUIET), RQF_NAME(IO_STAT), + RQF_NAME(PRECISE_IO_STAT), RQF_NAME(PM), RQF_NAME(HASHED), RQF_NAME(STATS), diff --git a/block/blk-mq.c b/block/blk-mq.c index dd7c9e3eca1b..9c894e0d0b19 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -360,8 +360,11 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
if (data->flags & BLK_MQ_REQ_PM) data->rq_flags |= RQF_PM; - if (blk_queue_io_stat(q)) + if (blk_queue_io_stat(q)) { data->rq_flags |= RQF_IO_STAT; + if (blk_queue_precise_io_stat(q)) + data->rq_flags |= RQF_PRECISE_IO_STAT; + } rq->rq_flags = data->rq_flags;
if (data->rq_flags & RQF_SCHED_TAGS) { @@ -994,6 +997,9 @@ static inline void blk_account_io_done(struct request *req, u64 now) update_io_ticks(req->part, jiffies, true); part_stat_inc(req->part, ios[sgrp]); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + if (req->rq_flags & RQF_PRECISE_IO_STAT) + part_stat_local_dec(req->part, + in_flight[op_is_write(req_op(req))]); part_stat_unlock(); } } @@ -1001,7 +1007,6 @@ static inline void blk_account_io_done(struct request *req, u64 now) static inline void blk_account_io_start(struct request *req) { trace_block_io_start(req); - if (blk_do_io_stat(req)) { /* * All non-passthrough requests are created from a bio with one @@ -1016,6 +1021,9 @@ static inline void blk_account_io_start(struct request *req)
part_stat_lock(); update_io_ticks(req->part, jiffies, false); + if (req->rq_flags & RQF_PRECISE_IO_STAT) + part_stat_local_inc(req->part, + in_flight[op_is_write(req_op(req))]); part_stat_unlock(); } } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 63e481262336..d498cf0942d3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -303,7 +303,6 @@ queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); -QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); #undef QUEUE_SYSFS_BIT_FNS
@@ -473,6 +472,45 @@ static ssize_t queue_dax_show(struct request_queue *q, char *page) return queue_var_show(blk_queue_dax(q), page); }
+static ssize_t queue_iostats_show(struct request_queue *q, char *page) +{ + int val = 0; + + if (blk_queue_io_stat(q)) + val = blk_queue_precise_io_stat(q) ? 2 : 1; + + return sprintf(page, "%u\n", val); +} + +static ssize_t +queue_iostats_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long nr; + int ret = queue_var_store(&nr, page, count); + + if (ret < 0) + return ret; + + switch (nr) { + case 0: + blk_queue_flag_clear(QUEUE_FLAG_IO_STAT, q); + blk_queue_flag_clear(QUEUE_FLAG_PRECISE_IO_STAT, q); + break; + case 1: + blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); + blk_queue_flag_clear(QUEUE_FLAG_PRECISE_IO_STAT, q); + break; + case 2: + blk_queue_flag_set(QUEUE_FLAG_IO_STAT, q); + blk_queue_flag_set(QUEUE_FLAG_PRECISE_IO_STAT, q); + break; + default: + return -EINVAL; + } + + return count; +} + #define QUEUE_RO_ENTRY(_prefix, _name) \ static struct queue_sysfs_entry _prefix##_entry = { \ .attr = { .name = _name, .mode = 0444 }, \ @@ -494,6 +532,7 @@ QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); QUEUE_RW_ENTRY(elv_iosched, "scheduler"); +QUEUE_RW_ENTRY(queue_iostats, "iostats");
QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); @@ -539,7 +578,6 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = { };
QUEUE_RW_ENTRY(queue_nonrot, "rotational"); -QUEUE_RW_ENTRY(queue_iostats, "iostats"); QUEUE_RW_ENTRY(queue_random, "add_random"); QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes");
diff --git a/block/blk.h b/block/blk.h index 08a358bc0919..67915b04b3c1 100644 --- a/block/blk.h +++ b/block/blk.h @@ -344,6 +344,7 @@ static inline bool blk_do_io_stat(struct request *rq) }
void update_io_ticks(struct block_device *part, unsigned long now, bool end); +unsigned int part_in_flight(struct block_device *part);
static inline void req_set_nomerge(struct request_queue *q, struct request *req) { diff --git a/block/genhd.c b/block/genhd.c index 4a16a424f57d..b3ff653f3e50 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -118,7 +118,7 @@ static void part_stat_read_all(struct block_device *part, } }
-static unsigned int part_in_flight(struct block_device *part) +unsigned int part_in_flight(struct block_device *part) { unsigned int inflight = 0; int cpu; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3d74f3e5b995..9dfee0644f9c 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -46,6 +46,7 @@ typedef __u32 __bitwise req_flags_t; #define RQF_QUIET ((__force req_flags_t)(1 << 11)) /* account into disk and partition IO statistics */ #define RQF_IO_STAT ((__force req_flags_t)(1 << 13)) +#define RQF_PRECISE_IO_STAT ((__force req_flags_t)(1 << 14)) /* runtime pm request */ #define RQF_PM ((__force req_flags_t)(1 << 15)) /* on IO scheduler merge hash */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 9f3bcbcb156d..bea0b5fdac74 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -536,6 +536,7 @@ struct request_queue { #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ +#define QUEUE_FLAG_PRECISE_IO_STAT 8 /* do disk/partitions IO accounting precisely */ #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */ @@ -576,6 +577,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); #define blk_queue_stable_writes(q) \ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) +#define blk_queue_precise_io_stat(q) \ + test_bit(QUEUE_FLAG_PRECISE_IO_STAT, &(q)->queue_flags) #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) #define blk_queue_zone_resetall(q) \ test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/3820 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/O...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/3820 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/O...