From: Tejun Heo tj@kernel.org
mainline inclusion from mainline-5.4-rc1 commit 25d41e4aadb0788b4fae8a8fca90f437b9ebd727 category: feature bugzilla: 38688 CVE: NA
---------------------------
vrate_adj tracepoint traces vrate changes; however, it does so only when busy_level is non-zero. busy_level turning to zero can sometimes be as interesting an event. This patch also enables vrate_adj tracepoint on other vrate related events - busy_level changes and non-zero nr_lagging.
Signed-off-by: Tejun Heo tj@kernel.org Signed-off-by: Jens Axboe axboe@kernel.dk Signed-off-by: Yu Kuai yukuai3@huawei.com Reviewed-by: Hou Tao houtao1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- block/blk-iocost.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 5a8d443ab08ee..3bc9b2f3e9897 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1343,7 +1343,7 @@ static void ioc_timer_fn(struct timer_list *timer) u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; u32 missed_ppm[2], rq_wait_pct; u64 period_vtime; - int i; + int prev_busy_level, i;
/* how were the latencies during the period? */ ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); @@ -1531,6 +1531,7 @@ static void ioc_timer_fn(struct timer_list *timer) * and experiencing shortages but not surpluses, we're too stingy * and should increase vtime rate. */ + prev_busy_level = ioc->busy_level; if (rq_wait_pct > RQ_WAIT_BUSY_PCT || missed_ppm[READ] > ppm_rthr || missed_ppm[WRITE] > ppm_wthr) { @@ -1592,6 +1593,10 @@ static void ioc_timer_fn(struct timer_list *timer) atomic64_set(&ioc->vtime_rate, vrate); ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP( ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); + } else if (ioc->busy_level != prev_busy_level || nr_lagging) { + trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), + &missed_ppm, rq_wait_pct, nr_lagging, + nr_shortages, nr_surpluses); }
ioc_refresh_params(ioc, false);