From: Jens Axboe axboe@kernel.dk
mainline inclusion from mainline-v6.9-rc1 commit 08420cf70cfb32eed2a0abfeb5c54c5651bd0c99 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IAGRKP CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Convert any user of ktime_get_ns() to use blk_time_get_ns(), and ktime_get() to blk_time_get(), so we have a unified API for querying the current time in nanoseconds or as ktime.
No functional changes intended, this patch just wraps ktime_get_ns() and ktime_get() with a block helper.
Reviewed-by: Johannes Thumshirn johannes.thumshirn@wdc.com Signed-off-by: Jens Axboe axboe@kernel.dk
Conflicts: block/bfq-cgroup.c block/bfq-iosched.c block/blk-cgroup.c block/blk-flush.c block/blk-iocost.c block/blk-iolatency.c block/blk-core.c block/blk-mq-debugfs.h block/blk-mq.c block/blk-throttle.c block/blk-wbt.c block/blk.h block/kyber-iosched.c block/cfq-iosched.c
[Lots of conficts in context, reimplement the patch for current context, and using new helper in blk-wbt.c will cause kabi to change, hence blk-wbt.c is not modified] Signed-off-by: Yu Kuai yukuai3@huawei.com --- block/bfq-cgroup.c | 15 ++++++----- block/bfq-iosched.c | 23 +++++++++-------- block/blk-cgroup.c | 2 +- block/blk-core.c | 8 +++--- block/blk-iolatency.c | 6 ++--- block/blk-mq.c | 6 ++--- block/blk-throttle.c | 6 ++--- block/blk.h | 9 +++++++ block/cfq-iosched.c | 60 +++++++++++++++++++++---------------------- block/kyber-iosched.c | 2 +- 10 files changed, 74 insertions(+), 63 deletions(-)
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index b663cd8b9e46..25a407e5142d 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -22,6 +22,7 @@ #include <linux/sbitmap.h> #include <linux/delay.h>
+#include "blk.h" #include "bfq-iosched.h"
#if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) @@ -60,7 +61,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) if (!bfqg_stats_waiting(stats)) return;
- now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_group_wait_time) blkg_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); @@ -77,7 +78,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, return; if (bfqg == curr_bfqg) return; - stats->start_group_wait_time = ktime_get_ns(); + stats->start_group_wait_time = blk_time_get_ns(); bfqg_stats_mark_waiting(stats); }
@@ -89,7 +90,7 @@ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) if (!bfqg_stats_empty(stats)) return;
- now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_empty_time) blkg_stat_add(&stats->empty_time, now - stats->start_empty_time); @@ -116,7 +117,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) if (bfqg_stats_empty(stats)) return;
- stats->start_empty_time = ktime_get_ns(); + stats->start_empty_time = blk_time_get_ns(); bfqg_stats_mark_empty(stats); }
@@ -125,7 +126,7 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg) struct bfqg_stats *stats = &bfqg->stats;
if (bfqg_stats_idling(stats)) { - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
if (now > stats->start_idle_time) blkg_stat_add(&stats->idle_time, @@ -138,7 +139,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { struct bfqg_stats *stats = &bfqg->stats;
- stats->start_idle_time = ktime_get_ns(); + stats->start_idle_time = blk_time_get_ns(); bfqg_stats_mark_idling(stats); }
@@ -175,7 +176,7 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, u64 io_start_time_ns, unsigned int op) { struct bfqg_stats *stats = &bfqg->stats; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
if (now > io_start_time_ns) blkg_rwstat_add(&stats->service_time, op, diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 473d9e31ff87..669c5bc92ba8 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -844,7 +844,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,
rq = rq_entry_fifo(bfqq->fifo.next);
- if (rq == last || ktime_get_ns() < rq->fifo_time) + if (rq == last || blk_time_get_ns() < rq->fifo_time) return NULL;
bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); @@ -1566,7 +1566,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, * bfq_bfqq_update_budg_for_activation for * details on the usage of the next variable. */ - arrived_in_time = ktime_get_ns() <= + arrived_in_time = blk_time_get_ns() <= bfqq->ttime.last_end_request + bfqd->bfq_slice_idle * 3;
@@ -2468,7 +2468,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd, else timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
- bfqd->last_budget_start = ktime_get(); + bfqd->last_budget_start = blk_time_get();
bfqq->budget_timeout = jiffies + bfqd->bfq_timeout * timeout_coeff; @@ -2568,7 +2568,7 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) else if (bfqq->wr_coeff > 1) sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
- bfqd->last_idling_start = ktime_get(); + bfqd->last_idling_start = blk_time_get(); hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), HRTIMER_MODE_REL); bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); @@ -2605,7 +2605,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) { if (rq != NULL) { /* new rq dispatch now, reset accordingly */ - bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); + bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns(); bfqd->peak_rate_samples = 1; bfqd->sequential_samples = 0; bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = @@ -2762,7 +2762,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) */ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) { - u64 now_ns = ktime_get_ns(); + u64 now_ns = blk_time_get_ns();
if (bfqd->peak_rate_samples == 0) { /* first dispatch */ bfq_log(bfqd, "update_peak_rate: goto reset, samples %d", @@ -3099,7 +3099,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (compensate) delta_ktime = bfqd->last_idling_start; else - delta_ktime = ktime_get(); + delta_ktime = blk_time_get(); delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); delta_usecs = ktime_to_us(delta_ktime);
@@ -4410,7 +4410,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_clear_bfqq_sync(bfqq);
/* set end request to minus infinity from now */ - bfqq->ttime.last_end_request = ktime_get_ns() + 1; + bfqq->ttime.last_end_request = blk_time_get_ns() + 1;
bfq_mark_bfqq_IO_bound(bfqq);
@@ -4528,7 +4528,7 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_ttime *ttime = &bfqq->ttime; - u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; + u64 elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request;
elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);
@@ -4697,7 +4697,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) bfq_add_request(rq); idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);
- rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; + rq->fifo_time = blk_time_get_ns() + + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &bfqq->fifo);
bfq_rq_enqueued(bfqd, bfqq, rq); @@ -4853,7 +4854,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) bfq_weights_tree_remove(bfqd, bfqq); }
- now_ns = ktime_get_ns(); + now_ns = blk_time_get_ns();
bfqq->ttime.last_end_request = now_ns;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d4a8d8fbe1a0..c0187bf00f71 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1729,7 +1729,7 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now) */ static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay) { - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); u64 exp; u64 delay_nsec = 0; int tok; diff --git a/block/blk-core.c b/block/blk-core.c index acf5585b0557..847fd7585952 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -454,7 +454,7 @@ void __blk_rq_init(struct request_queue *q, struct request *rq) RB_CLEAR_NODE(&rq->rb_node); rq->tag = -1; rq->internal_tag = -1; - rq->start_time_ns = ktime_get_ns(); + rq->start_time_ns = blk_time_get_ns(); rq->part = NULL; }
@@ -2952,7 +2952,7 @@ blk_status_t __blk_insert_cloned_request(struct request_queue *q, u64 now = 0;
if (blk_mq_need_time_stamp(rq)) - now = ktime_get_ns(); + now = blk_time_get_ns();
blk_account_io_done(rq, now); } @@ -3304,7 +3304,7 @@ void blk_start_request(struct request *req) blk_dequeue_request(req);
if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { - req->io_start_time_ns = ktime_get_ns(); + req->io_start_time_ns = blk_time_get_ns(); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW req->throtl_size = blk_rq_sectors(req); #endif @@ -3509,7 +3509,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request); void blk_finish_request(struct request *req, blk_status_t error) { struct request_queue *q = req->q; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
lockdep_assert_held(req->q->queue_lock); WARN_ON_ONCE(q->mq_ops); diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index 1baa3c49e2e3..6f81794eb6e6 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c @@ -557,7 +557,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) struct rq_wait *rqw; struct iolatency_grp *iolat; u64 window_start; - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); bool issue_as_root = bio_issue_as_root_blkg(bio); bool enabled = false; int inflight = 0; @@ -624,7 +624,7 @@ static void blkiolatency_timer_fn(struct timer_list *t) struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer); struct blkcg_gq *blkg; struct cgroup_subsys_state *pos_css; - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns();
rcu_read_lock(); blkg_for_each_descendant_pre(blkg, pos_css, @@ -895,7 +895,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd) struct blkcg_gq *blkg = lat_to_blkg(iolat); struct rq_qos *rqos = blkcg_rq_qos(blkg->q); struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); - u64 now = ktime_to_ns(ktime_get()); + u64 now = blk_time_get_ns(); int cpu;
for_each_possible_cpu(cpu) { diff --git a/block/blk-mq.c b/block/blk-mq.c index aa4b3c608249..76dd32ee6172 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -366,7 +366,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, RB_CLEAR_NODE(&rq->rb_node); rq->rq_disk = NULL; rq->part = NULL; - rq->start_time_ns = ktime_get_ns(); + rq->start_time_ns = blk_time_get_ns(); rq->io_start_time_ns = 0; rq->nr_phys_segments = 0; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -576,7 +576,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, blk_status_t error) { - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
if (rq->rq_flags & RQF_STATS) { blk_mq_poll_stats_start(rq->q); @@ -724,7 +724,7 @@ void blk_mq_start_request(struct request *rq) trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { - rq->io_start_time_ns = ktime_get_ns(); + rq->io_start_time_ns = blk_time_get_ns(); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW rq->throtl_size = blk_rq_sectors(rq); #endif diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 598191286557..0795935574d3 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1910,7 +1910,7 @@ static bool throtl_tg_is_idle(struct throtl_grp *tg) time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); ret = tg->latency_target == DFL_LATENCY_TARGET || tg->idletime_threshold == DFL_IDLE_THRESHOLD || - (ktime_get_ns() >> 10) - tg->last_finish_time > time || + (blk_time_get_ns() >> 10) - tg->last_finish_time > time || tg->avg_idletime > tg->idletime_threshold || (tg->latency_target && tg->bio_cnt && tg->bad_bio_cnt * 5 < tg->bio_cnt); @@ -2140,7 +2140,7 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
static void blk_throtl_update_idletime(struct throtl_grp *tg) { - unsigned long now = ktime_get_ns() >> 10; + unsigned long now = blk_time_get_ns() >> 10; unsigned long last_finish_time = tg->last_finish_time;
if (now <= last_finish_time || last_finish_time == 0 || @@ -2403,7 +2403,7 @@ void blk_throtl_bio_endio(struct bio *bio) return; tg = blkg_to_tg(blkg);
- finish_time_ns = ktime_get_ns(); + finish_time_ns = blk_time_get_ns(); tg->last_finish_time = finish_time_ns >> 10;
start_time = bio_issue_time(&bio->bi_issue) >> 10; diff --git a/block/blk.h b/block/blk.h index 965e9c507654..afe220ee380c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -479,4 +479,13 @@ static inline void blk_free_queue_dispatch_async(struct request_queue *q) } #endif
+static inline u64 blk_time_get_ns(void) +{ + return ktime_get_ns(); +} + +static inline ktime_t blk_time_get(void) +{ + return ns_to_ktime(blk_time_get_ns()); +} #endif /* BLK_INTERNAL_H */ diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 130854ad8cdb..4988f4d23333 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -101,7 +101,7 @@ struct cfq_rb_root { }; #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \ .rb_rightmost = NULL, \ - .ttime = {.last_end_request = ktime_get_ns(),},} + .ttime = {.last_end_request = blk_time_get_ns(),},}
/* * Per process-grouping structure @@ -496,7 +496,7 @@ static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) if (!cfqg_stats_waiting(stats)) return;
- now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_group_wait_time) blkg_stat_add(&stats->group_wait_time, now - stats->start_group_wait_time); @@ -513,7 +513,7 @@ static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, return; if (cfqg == curr_cfqg) return; - stats->start_group_wait_time = ktime_get_ns(); + stats->start_group_wait_time = blk_time_get_ns(); cfqg_stats_mark_waiting(stats); }
@@ -525,7 +525,7 @@ static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) if (!cfqg_stats_empty(stats)) return;
- now = ktime_get_ns(); + now = blk_time_get_ns(); if (now > stats->start_empty_time) blkg_stat_add(&stats->empty_time, now - stats->start_empty_time); @@ -552,7 +552,7 @@ static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) if (cfqg_stats_empty(stats)) return;
- stats->start_empty_time = ktime_get_ns(); + stats->start_empty_time = blk_time_get_ns(); cfqg_stats_mark_empty(stats); }
@@ -561,7 +561,7 @@ static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) struct cfqg_stats *stats = &cfqg->stats;
if (cfqg_stats_idling(stats)) { - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
if (now > stats->start_idle_time) blkg_stat_add(&stats->idle_time, @@ -576,7 +576,7 @@ static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
BUG_ON(cfqg_stats_idling(stats));
- stats->start_idle_time = ktime_get_ns(); + stats->start_idle_time = blk_time_get_ns(); cfqg_stats_mark_idling(stats); }
@@ -706,7 +706,7 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, unsigned int op) { struct cfqg_stats *stats = &cfqg->stats; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
if (now > io_start_time_ns) blkg_rwstat_add(&stats->service_time, op, @@ -1056,7 +1056,7 @@ static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq); - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
cfqq->slice_start = now; cfqq->slice_end = now + slice; @@ -1073,7 +1073,7 @@ static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) return false; - if (ktime_get_ns() < cfqq->slice_end) + if (blk_time_get_ns() < cfqq->slice_end) return false;
return true; @@ -1429,7 +1429,7 @@ static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq, u64 *unaccounted_time) { u64 slice_used; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
/* * Queue got expired before even a single request completed or @@ -1466,7 +1466,7 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) - cfqg->service_tree_idle.count; unsigned int vfr; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
BUG_ON(nr_sync < 0); used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); @@ -1521,7 +1521,7 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) *st = CFQ_RB_ROOT; RB_CLEAR_NODE(&cfqg->rb_node);
- cfqg->ttime.last_end_request = ktime_get_ns(); + cfqg->ttime.last_end_request = blk_time_get_ns(); }
#ifdef CONFIG_CFQ_GROUP_IOSCHED @@ -2208,7 +2208,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, struct cfq_rb_root *st; bool leftmost = true; int new_cfqq = 1; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); if (cfq_class_idle(cfqq)) { @@ -2622,7 +2622,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, cfqd->serving_wl_class, cfqd->serving_wl_type); cfqg_stats_update_avg_queue_size(cfqq->cfqg); cfqq->slice_start = 0; - cfqq->dispatch_start = ktime_get_ns(); + cfqq->dispatch_start = blk_time_get_ns(); cfqq->allocated_slice = 0; cfqq->slice_end = 0; cfqq->slice_dispatch = 0; @@ -2671,7 +2671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfq_cfqq_slice_new(cfqq)) cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else - cfqq->slice_resid = cfqq->slice_end - ktime_get_ns(); + cfqq->slice_resid = cfqq->slice_end - blk_time_get_ns(); cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid); }
@@ -2909,7 +2909,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) struct cfq_rb_root *st = cfqq->service_tree; struct cfq_io_cq *cic; u64 sl, group_idle = 0; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
/* * SSD device without seek penalty, disable idling. But only do so @@ -3018,7 +3018,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) return NULL;
rq = rq_entry_fifo(cfqq->fifo.next); - if (ktime_get_ns() < rq->fifo_time) + if (blk_time_get_ns() < rq->fifo_time) rq = NULL;
return rq; @@ -3120,7 +3120,7 @@ choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) struct cfq_rb_root *st; u64 group_slice; enum wl_class_t original_class = cfqd->serving_wl_class; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
/* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) @@ -3211,7 +3211,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) static void cfq_choose_cfqg(struct cfq_data *cfqd) { struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
cfqd->serving_group = cfqg;
@@ -3233,7 +3233,7 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) { struct cfq_queue *cfqq, *new_cfqq = NULL; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
cfqq = cfqd->active_queue; if (!cfqq) @@ -3383,7 +3383,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
/* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) @@ -3466,7 +3466,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) * based on the last sync IO we serviced */ if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { - u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync; + u64 last_sync = blk_time_get_ns() - cfqd->last_delayed_sync; unsigned int depth;
depth = div64_u64(last_sync, cfqd->cfq_slice[1]); @@ -3557,7 +3557,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || cfq_class_idle(cfqq))) { - cfqq->slice_end = ktime_get_ns() + 1; + cfqq->slice_end = blk_time_get_ns() + 1; cfq_slice_expired(cfqd, 0); }
@@ -3635,7 +3635,7 @@ static void cfq_init_icq(struct io_cq *icq) { struct cfq_io_cq *cic = icq_to_cic(icq);
- cic->ttime.last_end_request = ktime_get_ns(); + cic->ttime.last_end_request = blk_time_get_ns(); }
static void cfq_exit_icq(struct io_cq *icq) @@ -3866,7 +3866,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, static void __cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle) { - u64 elapsed = ktime_get_ns() - ttime->last_end_request; + u64 elapsed = blk_time_get_ns() - ttime->last_end_request; elapsed = min(elapsed, 2UL * slice_idle);
ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; @@ -4131,7 +4131,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) cfq_log_cfqq(cfqd, cfqq, "insert_request"); cfq_init_prio_data(cfqq, RQ_CIC(rq));
- rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; + rq->fifo_time = blk_time_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &cfqq->fifo); cfq_add_rq_rb(rq); cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, @@ -4179,7 +4179,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_io_cq *cic = cfqd->active_cic; - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
/* If the queue already has requests, don't wait */ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) @@ -4219,7 +4219,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) struct cfq_queue *cfqq = RQ_CFQQ(rq); struct cfq_data *cfqd = cfqq->cfqd; const int sync = rq_is_sync(rq); - u64 now = ktime_get_ns(); + u64 now = blk_time_get_ns();
cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
@@ -4657,7 +4657,7 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) * we optimistically start assuming sync ops weren't delayed in last * second, in order to have larger depth for async operations. */ - cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC; + cfqd->last_delayed_sync = blk_time_get_ns() - NSEC_PER_SEC; return 0;
out_free: diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 833e9eaae640..f370d3e3f6e0 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c @@ -584,7 +584,7 @@ static void kyber_completed_request(struct request *rq) if (blk_stat_is_active(kqd->cb)) return;
- now = ktime_get_ns(); + now = blk_time_get_ns(); if (now < rq->io_start_time_ns) return;