From: Li Ruilin liruilin4@huawei.com
euleros inclusion category: feature bugzilla: https://bugzilla.openeuler.org/show_bug.cgi?id=26 CVE: NA
------------------------------
The recently pushd bugfix patch "Delay to invalidate cache data in writearound write" has a stupid copy&paste bug, which causes bypassed write request will never invalidate data in cache device, causing a data corruption. This patch fixes this corruption. This patch also ensures that the writeback lock is released after data insert.
Fixes: 5239e24e76ac ("bcache: Delay to invalidate cache data in writearound write") Signed-off-by: Li Ruilin liruilin4@huawei.com Signed-off-by: Song Chao chao.song@huawei.com Reviewed-by: Peng Junyi pengjunyi1@huawei.com Acked-by: Xie XiuQi xiexiuqi@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/bcache/request.c | 14 ++++++++++---- drivers/md/bcache/request.h | 12 ++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 18c8e5baa0116..2f3ecd8fc5801 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -734,6 +734,7 @@ struct search *search_alloc(struct bio *bio, s->read_dirty_data = 0; s->start_time = jiffies; s->prefetch = prefetch; + s->write_inval_data_putoff = false;
s->iop.c = d->c; s->iop.bio = NULL; @@ -755,6 +756,10 @@ static void cached_dev_bio_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ /* eusure this lock is released after data_insert */ + if (s->write_inval_data_putoff) + up_read_non_owner(&dc->writeback_lock); + cached_dev_put(dc); search_free(cl); } @@ -993,10 +998,10 @@ static void cached_dev_write_complete(struct closure *cl) struct search *s = container_of(cl, struct search, cl); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
- if (!s->iop.bypass) + if (s->write_inval_data_putoff) closure_call(&s->iop.cl, bch_data_insert, NULL, cl); - - up_read_non_owner(&dc->writeback_lock); + else + up_read_non_owner(&dc->writeback_lock); continue_at(cl, cached_dev_bio_complete, NULL); }
@@ -1048,6 +1053,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl);
+ s->write_inval_data_putoff = true; } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; @@ -1080,7 +1086,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) }
insert_data: - if (!s->iop.bypass) + if (!s->write_inval_data_putoff) closure_call(&s->iop.cl, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h index 6366b88619745..7682630db73f6 100644 --- a/drivers/md/bcache/request.h +++ b/drivers/md/bcache/request.h @@ -61,6 +61,18 @@ struct search { unsigned long start_time; /* for prefetch, we do not need copy data to bio */ bool prefetch; + /* + * The function bch_data_insert() is invoked asynchronously as the bio + * subbmited to backend block device, therefore there may be a read + * request subbmited after the bch_data_insert() done and ended before + * the backend bio is end. This read request will read data from the + * backend block device, and insert dirty data to cache device. However + * by writearound cache mode, bcache will not invalidate data again, + * so that read request after will read dirty data from the cache, + * causing a data corruption. + * So that we should put off this invalidation. This switch is for + */ + bool write_inval_data_putoff; struct list_head list_node; wait_queue_head_t wqh; struct acache_info smp;