From: Mikulas Patocka mpatocka@redhat.com
stable inclusion from linux-4.19.199 commit 797b950e8aaec3fc30312bb9a44d702c6cc9d25e
--------------------------------
commit 4134455f2aafdfeab50cabb4cccb35e916034b93 upstream.
Do not attempt to write any data beyond the end of the underlying data device while shrinking it.
The DM writecache device must be suspended when the underlying data device is shrunk.
Signed-off-by: Mikulas Patocka mpatocka@redhat.com Cc: stable@vger.kernel.org Signed-off-by: Mike Snitzer snitzer@redhat.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/md/dm-writecache.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+)
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index c7ea0085fb474..6c6c23634bcfa 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -142,6 +142,7 @@ struct dm_writecache { size_t metadata_sectors; size_t n_blocks; uint64_t seq_count; + sector_t data_device_sectors; void *block_start; struct wc_entry *entries; unsigned block_size; @@ -929,6 +930,8 @@ static void writecache_resume(struct dm_target *ti)
wc_lock(wc);
+ wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT; + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); } else { @@ -1499,6 +1502,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t void *address = memory_data(wc, e);
persistent_memory_flush_cache(address, block_size); + + if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) + return true; + return bio_add_page(&wb->bio, persistent_memory_page(address), block_size, persistent_memory_page_offset(address)) != 0; } @@ -1571,6 +1578,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(&wb->bio); + } else if (unlikely(!bio_sectors(&wb->bio))) { + bio->bi_status = BLK_STS_OK; + bio_endio(&wb->bio); } else { submit_bio(&wb->bio); } @@ -1614,6 +1624,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac e = f; }
+ if (unlikely(to.sector + to.count > wc->data_device_sectors)) { + if (to.sector >= wc->data_device_sectors) { + writecache_copy_endio(0, 0, c); + continue; + } + from.count = to.count = wc->data_device_sectors - to.sector; + } + dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
__writeback_throttle(wc, wbl);