From: Xu Wei xuwei56@huawei.com
euleros inclusion category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=327 CVE: NA
When kernel config don't enbale CONFIG_BCACHE, compiling bcache module will fail. This patch add the judgment for CONFIG_BCACHE macro to make sure compiling bcache module success.
Signed-off-by: qinghaixiang xuweiqhx@163.com Signed-off-by: Xu Wei xuwei56@huawei.com Acked-by: Xie XiuQi xiexiuqi@huawei.com Reviewed-by: Li Ruilin liruilin4@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/bcache/request.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 50303841dd45b..984f400d3908c 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -358,6 +358,7 @@ unsigned int bch_get_congested(const struct cache_set *c) return i > 0 ? i : 1; }
+#if IS_ENABLED(CONFIG_BCACHE) static void add_sequential(struct task_struct *t) { ewma_add(t->sequential_io_avg, @@ -365,6 +366,7 @@ static void add_sequential(struct task_struct *t)
t->sequential_io = 0; } +#endif
static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) { @@ -376,7 +378,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) struct cache_set *c = dc->disk.c; unsigned int mode = cache_mode(dc); unsigned int sectors, congested; +#if IS_ENABLED(CONFIG_BCACHE) struct task_struct *task = current; +#endif struct io *i;
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || @@ -432,7 +436,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
i = list_first_entry(&dc->io_lru, struct io, lru);
+#if IS_ENABLED(CONFIG_BCACHE) add_sequential(task); +#endif i->sequential = 0; found: if (i->sequential + bio->bi_iter.bi_size > i->sequential) @@ -440,7 +446,9 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
i->last = bio_end_sector(bio); i->jiffies = jiffies + msecs_to_jiffies(5000); +#if IS_ENABLED(CONFIG_BCACHE) task->sequential_io = i->sequential; +#endif
hlist_del(&i->hash); hlist_add_head(&i->hash, iohash(dc, i->last)); @@ -448,8 +456,12 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
spin_unlock(&dc->io_lock);
+#if IS_ENABLED(CONFIG_BCACHE) sectors = max(task->sequential_io, task->sequential_io_avg) >> 9; +#else + sectors = i->sequential >> 9; +#endif
if (dc->sequential_cutoff && sectors >= dc->sequential_cutoff >> 9) {