From: Li Ruilin liruilin4@huawei.com
euleros inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I4LOJ6 CVE: NA
------------------------------
The recently pushd bugfix patch "Delay to invalidate cache data in writearound write" has a stupid copy&paste bug, which causes bypassed write request will never invalidate data in cache device, causing a data corruption. This patch fixes this corruption. This patch also ensures that the writeback lock is released after data insert.
Fixes: 6a1d9c41b367 ("bcache: Delay to invalidate cache data in writearound write") Signed-off-by: Li Ruilin liruilin4@huawei.com Signed-off-by: Song Chao chao.song@huawei.com Reviewed-by: Peng Junyi pengjunyi1@huawei.com Acked-by: Xie Xiuqi xiexiuqi@huawei.com Signed-off-by: Cheng Jian cj.chengjian@huawei.com Reviewed-by: Guangxing Deng dengguangxing@huawei.com Reviewed-by: chao song chao.song@huawei.com Reviewed-by: chao song chao.song@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- drivers/md/bcache/request.c | 15 +++++++++++---- drivers/md/bcache/request.h | 13 +++++++++++++ 2 files changed, 24 insertions(+), 4 deletions(-)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 04a779573fdd..bad70906e8a2 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -735,6 +735,8 @@ struct search *search_alloc(struct bio *bio, /* Count on the bcache device */ s->start_time = part_start_io_acct(d->disk, &s->part, bio); s->prefetch = prefetch; + s->write_inval_data_putoff = false; + s->iop.c = d->c; s->iop.bio = NULL; s->iop.inode = d->id; @@ -755,6 +757,10 @@ static void cached_dev_bio_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ /* eusure this lock is released after data_insert */ + if (s->write_inval_data_putoff) + up_read_non_owner(&dc->writeback_lock); + cached_dev_put(dc); search_free(cl); } @@ -993,10 +999,10 @@ static void cached_dev_write_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- if (!s->iop.bypass) + if (s->write_inval_data_putoff) closure_call(&s->iop.cl, bch_data_insert, NULL, cl); - - up_read_non_owner(&dc->writeback_lock); + else + up_read_non_owner(&dc->writeback_lock); continue_at(cl, cached_dev_bio_complete, NULL); }
@@ -1048,6 +1054,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl);
+ s->write_inval_data_putoff = true; } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; @@ -1080,7 +1087,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) }
insert_data: - if (!s->iop.bypass) + if (!s->write_inval_data_putoff) closure_call(&s->iop.cl, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 21678037d215..42bf280d4625 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -65,6 +65,19 @@ struct search { unsigned long start_time; /* for prefetch, we do not need copy data to bio */ bool prefetch; + /* + * The function bch_data_insert() is invoked asynchronously as the bio + * subbmited to backend block device, therefore there may be a read + * request subbmited after the bch_data_insert() done and ended before + * the backend bio is end. This read request will read data from the + * backend block device, and insert dirty data to cache device. However + * by writearound cache mode, bcache will not invalidate data again, + * so that read request after will read dirty data from the cache, + * causing a data corruption. + * So that we should put off this invalidation. This switch is for this + * put off. + */ + bool write_inval_data_putoff; struct list_head list_node; wait_queue_head_t wqh; struct acache_info smp;