dm writecache: fix writing beyond end of underlying device when shrinking
authorMikulas Patocka <mpatocka@redhat.com>
Tue, 9 Feb 2021 15:56:20 +0000 (10:56 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 4 Mar 2021 10:38:45 +0000 (11:38 +0100)
commit 4134455f2aafdfeab50cabb4cccb35e916034b93 upstream.

Do not attempt to write any data beyond the end of the underlying data
device while shrinking it.

The DM writecache device must be suspended when the underlying data
device is shrunk.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/dm-writecache.c

index 3a01c1e..8628c4a 100644 (file)
@@ -148,6 +148,7 @@ struct dm_writecache {
        size_t metadata_sectors;
        size_t n_blocks;
        uint64_t seq_count;
+       sector_t data_device_sectors;
        void *block_start;
        struct wc_entry *entries;
        unsigned block_size;
@@ -977,6 +978,8 @@ static void writecache_resume(struct dm_target *ti)
 
        wc_lock(wc);
 
+       wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+
        if (WC_MODE_PMEM(wc)) {
                persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
        } else {
@@ -1646,6 +1649,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
        void *address = memory_data(wc, e);
 
        persistent_memory_flush_cache(address, block_size);
+
+       if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
+               return true;
+
        return bio_add_page(&wb->bio, persistent_memory_page(address),
                            block_size, persistent_memory_page_offset(address)) != 0;
 }
@@ -1717,6 +1724,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
                if (writecache_has_error(wc)) {
                        bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
+               } else if (unlikely(!bio_sectors(bio))) {
+                       bio->bi_status = BLK_STS_OK;
+                       bio_endio(bio);
                } else {
                        submit_bio(bio);
                }
@@ -1760,6 +1770,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
                        e = f;
                }
 
+               if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
+                       if (to.sector >= wc->data_device_sectors) {
+                               writecache_copy_endio(0, 0, c);
+                               continue;
+                       }
+                       from.count = to.count = wc->data_device_sectors - to.sector;
+               }
+
                dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
 
                __writeback_throttle(wc, wbl);