From 9eb7109a5bfc5b8226e9517e9f3cc6d414391884 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:38 -0400 Subject: [PATCH 01/29] dm: don't change md if dm_table_set_restrictions() fails __bind was changing the disk capacity, geometry and mempools of the mapped device before calling dm_table_set_restrictions() which could fail, forcing dm to drop the new table. Failing here would leave the device using the old table but with the wrong capacity and mempools. Move dm_table_set_restrictions() earlier in __bind(). Since it needs the capacity to be set, save the old version and restore it on failure. Fixes: bb37d77239af2 ("dm: introduce zone append emulation") Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 5ab7574c0c76..f5c5ccb6f8d2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2421,21 +2421,29 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, struct queue_limits *limits) { struct dm_table *old_map; - sector_t size; + sector_t size, old_size; int ret; lockdep_assert_held(&md->suspend_lock); size = dm_table_get_size(t); + old_size = dm_get_size(md); + set_capacity(md->disk, size); + + ret = dm_table_set_restrictions(t, md->queue, limits); + if (ret) { + set_capacity(md->disk, old_size); + old_map = ERR_PTR(ret); + goto out; + } + /* * Wipe any geometry if the size of the table changed. */ - if (size != dm_get_size(md)) + if (size != old_size) memset(&md->geometry, 0, sizeof(md->geometry)); - set_capacity(md->disk, size); - dm_table_event_callback(t, event_callback, md); if (dm_table_request_based(t)) { @@ -2468,12 +2476,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, t->mempools = NULL; } - ret = dm_table_set_restrictions(t, md->queue, limits); - if (ret) { - old_map = ERR_PTR(ret); - goto out; - } - old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); md->immutable_target_type = dm_table_get_immutable_target_type(t); From e8819e7f03470c5b468720630d9e4e1d5b99159e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:39 -0400 Subject: [PATCH 02/29] dm: free table mempools if not used in __bind With request-based dm, the mempools don't need reloading when switching tables, but the unused table mempools are not freed until the active table is finally freed. Free them immediately if they are not needed. Fixes: 29dec90a0f1d9 ("dm: fix bio_set allocation") Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f5c5ccb6f8d2..292414da871d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2461,10 +2461,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * requests in the queue may refer to bio from the old bioset, * so you must walk through the queue to unprep. */ - if (!md->mempools) { + if (!md->mempools) md->mempools = t->mempools; - t->mempools = NULL; - } + else + dm_free_md_mempools(t->mempools); } else { /* * The md may already have mempools that need changing. @@ -2473,8 +2473,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, */ dm_free_md_mempools(md->mempools); md->mempools = t->mempools; - t->mempools = NULL; } + t->mempools = NULL; old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); rcu_assign_pointer(md->map, (void *)t); From 4ea30ec6fb3bb598bd1df04cdfab13b1140074d2 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:40 -0400 Subject: [PATCH 03/29] dm: handle failures in dm_table_set_restrictions If dm_table_set_restrictions() fails while swapping tables, device-mapper will continue using the previous table. It must be sure to leave the mapped_device in it's previous state on failure. Otherwise device-mapper could end up using the old table with settings from the unused table. Do not update the mapped device in dm_set_zones_restrictions(). Wait till after dm_table_set_restrictions() is sure to succeed to update the md zoned settings. Do the same with the dax settings, and if dm_revalidate_zones() fails, restore the original queue limits. Fixes: 7f91ccd8a608d ("dm: Call dm_revalidate_zones() after setting the queue limits") Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 26 +++++++++++++++++--------- drivers/md/dm-zone.c | 26 ++++++++++++++++++-------- drivers/md/dm.h | 1 + 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 35100a435c88..279f1e024796 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1835,6 +1835,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { int r; + struct queue_limits old_limits; if (!dm_table_supports_nowait(t)) limits->features &= ~BLK_FEAT_NOWAIT; @@ -1861,16 +1862,11 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_flush(t)) limits->features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; - if (dm_table_supports_dax(t, device_not_dax_capable)) { + if (dm_table_supports_dax(t, device_not_dax_capable)) limits->features |= BLK_FEAT_DAX; - if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) - set_dax_synchronous(t->md->dax_dev); - } else + else limits->features &= ~BLK_FEAT_DAX; - if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) - dax_write_cache(t->md->dax_dev, true); - /* For a zoned table, setup the zone related queue attributes. */ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && (limits->features & BLK_FEAT_ZONED)) { @@ -1882,7 +1878,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (dm_table_supports_atomic_writes(t)) limits->features |= BLK_FEAT_ATOMIC_WRITES; - r = queue_limits_set(q, limits); + old_limits = queue_limits_start_update(q); + r = queue_limits_commit_update(q, limits); if (r) return r; @@ -1893,10 +1890,21 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && (limits->features & BLK_FEAT_ZONED)) { r = dm_revalidate_zones(t, q); - if (r) + if (r) { + queue_limits_set(q, &old_limits); return r; + } } + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) + dm_finalize_zone_settings(t, limits); + + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable)) + set_dax_synchronous(t->md->dax_dev); + + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) + dax_write_cache(t->md->dax_dev, true); + dm_update_crypto_profile(q, t); return 0; } diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 20edd3fabbab..681058feb63b 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -340,12 +340,8 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, * mapped device queue as needing zone append emulation. */ WARN_ON_ONCE(queue_is_mq(q)); - if (dm_table_supports_zone_append(t)) { - clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - } else { - set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + if (!dm_table_supports_zone_append(t)) lim->max_hw_zone_append_sectors = 0; - } /* * Determine the max open and max active zone limits for the mapped @@ -383,9 +379,6 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, lim->zone_write_granularity = 0; lim->chunk_sectors = 0; lim->features &= ~BLK_FEAT_ZONED; - clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - md->nr_zones = 0; - disk->nr_zones = 0; return 0; } @@ -408,6 +401,23 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, return 0; } +void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim) +{ + struct mapped_device *md = t->md; + + if (lim->features & BLK_FEAT_ZONED) { + if (dm_table_supports_zone_append(t)) + clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + else + set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + } else { + clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); + md->nr_zones = 0; + md->disk->nr_zones = 0; + } +} + + /* * IO completion callback called from clone_endio(). */ diff --git a/drivers/md/dm.h b/drivers/md/dm.h index a0a8ff119815..e5d3a9f46a91 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -102,6 +102,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *lim); int dm_revalidate_zones(struct dm_table *t, struct request_queue *q); +void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim); void dm_zone_endio(struct dm_io *io, struct bio *clone); #ifdef CONFIG_BLK_DEV_ZONED int dm_blk_report_zones(struct gendisk *disk, sector_t sector, From 37f53a2c60d03743e0eacf7a0c01c279776fef4e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:41 -0400 Subject: [PATCH 04/29] dm: fix dm_blk_report_zones If dm_get_live_table() returned NULL, dm_put_live_table() was never called. Also, it is possible that md->zone_revalidate_map will change while calling this function. Only read it once, so that we are always using the same value. Otherwise we might miss a call to dm_put_live_table(). Finally, while md->zone_revalidate_map is set and a process is calling blk_revalidate_disk_zones() to set up the zone append emulation resources, it is possible that another process, perhaps triggered by blkdev_report_zones_ioctl(), will call dm_blk_report_zones(). If blk_revalidate_disk_zones() fails, these resources can be freed while the other process is still using them, causing a use-after-free error. blk_revalidate_disk_zones() will only ever be called when initially setting up the zone append emulation resources, such as when setting up a zoned dm-crypt table for the first time. Further table swaps will not set md->zone_revalidate_map or call blk_revalidate_disk_zones(). However it must be called using the new table (referenced by md->zone_revalidate_map) and the new queue limits while the DM device is suspended. dm_blk_report_zones() needs some way to distinguish between a call from blk_revalidate_disk_zones(), which must be allowed to use md->zone_revalidate_map to access this not yet activated table, and all other calls to dm_blk_report_zones(), which should not be allowed while the device is suspended and cannot use md->zone_revalidate_map, since the zone resources might be freed by the process currently calling blk_revalidate_disk_zones(). Solve this by tracking the process that sets md->zone_revalidate_map in dm_revalidate_zones() and only allowing that process to make use of it in dm_blk_report_zones(). Fixes: f211268ed1f9b ("dm: Use the block layer zone append emulation") Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-core.h | 1 + drivers/md/dm-zone.c | 25 +++++++++++++++++-------- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 3637761f3585..f3a3f2ef6322 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -141,6 +141,7 @@ struct mapped_device { #ifdef CONFIG_BLK_DEV_ZONED unsigned int nr_zones; void *zone_revalidate_map; + struct task_struct *revalidate_map_task; #endif #ifdef CONFIG_IMA diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 681058feb63b..ff9a1a94eea9 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -56,24 +56,31 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector, { struct mapped_device *md = disk->private_data; struct dm_table *map; - int srcu_idx, ret; + struct dm_table *zone_revalidate_map = md->zone_revalidate_map; + int srcu_idx, ret = -EIO; + bool put_table = false; - if (!md->zone_revalidate_map) { - /* Regular user context */ + if (!zone_revalidate_map || md->revalidate_map_task != current) { + /* + * Regular user context or + * Zone revalidation during __bind() is in progress, but this + * call is from a different process + */ if (dm_suspended_md(md)) return -EAGAIN; map = dm_get_live_table(md, &srcu_idx); - if (!map) - return -EIO; + put_table = true; } else { /* Zone revalidation during __bind() */ - map = md->zone_revalidate_map; + map = zone_revalidate_map; } - ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); + if (map) + ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, + data); - if (!md->zone_revalidate_map) + if (put_table) dm_put_live_table(md, srcu_idx); return ret; @@ -175,7 +182,9 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) * our table for dm_blk_report_zones() to use directly. */ md->zone_revalidate_map = t; + md->revalidate_map_task = current; ret = blk_revalidate_disk_zones(disk); + md->revalidate_map_task = NULL; md->zone_revalidate_map = NULL; if (ret) { From 121218bef4c1df165181f5cd8fc3a2246bac817e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:42 -0400 Subject: [PATCH 05/29] dm: limit swapping tables for devices with zone write plugs dm_revalidate_zones() only allowed new or previously unzoned devices to call blk_revalidate_disk_zones(). If the device was already zoned, disk->nr_zones would always equal md->nr_zones, so dm_revalidate_zones() returned without doing any work. This would make the zoned settings for the device not match the new table. If the device had zone write plug resources, it could run into errors like bdev_zone_is_seq() reading invalid memory because disk->conv_zones_bitmap was the wrong size. If the device doesn't have any zone write plug resources, calling blk_revalidate_disk_zones() will always correctly update device. If blk_revalidate_disk_zones() fails, it can still overwrite or clear the current disk->nr_zones value. In this case, DM must restore the previous value of disk->nr_zones, so that the zoned settings will continue to match the previous value that it fell back to. If the device already has zone write plug resources, blk_revalidate_disk_zones() will not correctly update them, if it is called for arbitrary zoned device changes. Since there is not much need for this ability, the easiest solution is to disallow any table reloads that change the zoned settings, for devices that already have zone plug resources. Specifically, if a device already has zone plug resources allocated, it can only switch to another zoned table that also emulates zone append. Also, it cannot change the device size or the zone size. A device can switch to an error target. Fixes: bb37d77239af2 ("dm: introduce zone append emulation") Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 41 ++++++++++++++++++++++++++++++++++++----- drivers/md/dm-zone.c | 35 ++++++++++++++++++++++++++--------- drivers/md/dm.c | 6 ++++++ drivers/md/dm.h | 5 +++++ 4 files changed, 73 insertions(+), 14 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 279f1e024796..69d4a0e0e613 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1491,6 +1491,18 @@ bool dm_table_has_no_data_devices(struct dm_table *t) return true; } +bool dm_table_is_wildcard(struct dm_table *t) +{ + for (unsigned int i = 0; i < t->num_targets; i++) { + struct dm_target *ti = dm_table_get_target(t, i); + + if (!dm_target_is_wildcard(ti->type)) + return false; + } + + return true; +} + static int device_not_zoned(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1831,6 +1843,19 @@ static bool dm_table_supports_atomic_writes(struct dm_table *t) return true; } +bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size, + sector_t new_size) +{ + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && dm_has_zone_plugs(t->md) && + old_size != new_size) { + DMWARN("%s: device has zone write plug resources. " + "Cannot change size", + dm_device_name(t->md)); + return false; + } + return true; +} + int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { @@ -1868,11 +1893,17 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, limits->features &= ~BLK_FEAT_DAX; /* For a zoned table, setup the zone related queue attributes. */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - (limits->features & BLK_FEAT_ZONED)) { - r = dm_set_zones_restrictions(t, q, limits); - if (r) - return r; + if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { + if (limits->features & BLK_FEAT_ZONED) { + r = dm_set_zones_restrictions(t, q, limits); + if (r) + return r; + } else if (dm_has_zone_plugs(t->md)) { + DMWARN("%s: device has zone write plug resources. " + "Cannot switch to non-zoned table.", + dm_device_name(t->md)); + return -EINVAL; + } } if (dm_table_supports_atomic_writes(t)) diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index ff9a1a94eea9..4af78111d0b4 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -160,22 +160,22 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) { struct mapped_device *md = t->md; struct gendisk *disk = md->disk; + unsigned int nr_zones = disk->nr_zones; int ret; if (!get_capacity(disk)) return 0; - /* Revalidate only if something changed. */ - if (!disk->nr_zones || disk->nr_zones != md->nr_zones) { - DMINFO("%s using %s zone append", - disk->disk_name, - queue_emulates_zone_append(q) ? "emulated" : "native"); - md->nr_zones = 0; - } - - if (md->nr_zones) + /* + * Do not revalidate if zone write plug resources have already + * been allocated. + */ + if (dm_has_zone_plugs(md)) return 0; + DMINFO("%s using %s zone append", disk->disk_name, + queue_emulates_zone_append(q) ? "emulated" : "native"); + /* * Our table is not live yet. So the call to dm_get_live_table() * in dm_blk_report_zones() will fail. Set a temporary pointer to @@ -189,6 +189,7 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) if (ret) { DMERR("Revalidate zones failed %d", ret); + disk->nr_zones = nr_zones; return ret; } @@ -385,12 +386,28 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, lim->max_open_zones = 0; lim->max_active_zones = 0; lim->max_hw_zone_append_sectors = 0; + lim->max_zone_append_sectors = 0; lim->zone_write_granularity = 0; lim->chunk_sectors = 0; lim->features &= ~BLK_FEAT_ZONED; return 0; } + if (get_capacity(disk) && dm_has_zone_plugs(t->md)) { + if (q->limits.chunk_sectors != lim->chunk_sectors) { + DMWARN("%s: device has zone write plug resources. " + "Cannot change zone size", + disk->disk_name); + return -EINVAL; + } + if (lim->max_hw_zone_append_sectors != 0 && + !dm_table_is_wildcard(t)) { + DMWARN("%s: device has zone write plug resources. " + "New table must emulate zone append", + disk->disk_name); + return -EINVAL; + } + } /* * Warn once (when the capacity is not yet set) if the mapped device is * partially using zone resources of the target devices as that leads to diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 292414da871d..240f6dab8dda 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2429,6 +2429,12 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, size = dm_table_get_size(t); old_size = dm_get_size(md); + + if (!dm_table_supports_size_change(t, old_size, size)) { + old_map = ERR_PTR(-EINVAL); + goto out; + } + set_capacity(md->disk, size); ret = dm_table_set_restrictions(t, md->queue, limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index e5d3a9f46a91..245f52b59215 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -58,6 +58,7 @@ void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); bool dm_table_has_no_data_devices(struct dm_table *table); +bool dm_table_is_wildcard(struct dm_table *t); int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits); int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, @@ -72,6 +73,8 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); +bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size, + sector_t new_size); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); @@ -111,12 +114,14 @@ bool dm_is_zone_write(struct mapped_device *md, struct bio *bio); int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, sector_t sector, unsigned int nr_zones, unsigned long *need_reset); +#define dm_has_zone_plugs(md) ((md)->disk->zone_wplugs_hash != NULL) #else #define dm_blk_report_zones NULL static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) { return false; } +#define dm_has_zone_plugs(md) false #endif /* From ad320ae27661f91585e00c114d1b264130f1bebe Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 10 Apr 2025 15:49:43 -0400 Subject: [PATCH 06/29] dm: fix native zone append devices on top of emulated ones If a DM device that can pass down zone append commands is stacked on top of a device that emulates zone append commands, it will allocate zone append emulation resources, even though it doesn't use them. This is because the underlying device will have max_hw_zone_append_sectors set to 0 to request zone append emulation. When the DM device is stacked on top of it, it will inherit that max_hw_zone_append_sectors limit, despite being able to pass down zone append bios. Solve this by making sure max_hw_zone_append_sectors is non-zero for DM devices that do not need zone append emulation. Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-zone.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 4af78111d0b4..0d989651eb15 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -347,11 +347,15 @@ int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, /* * Check if zone append is natively supported, and if not, set the - * mapped device queue as needing zone append emulation. + * mapped device queue as needing zone append emulation. If zone + * append is natively supported, make sure that + * max_hw_zone_append_sectors is not set to 0. */ WARN_ON_ONCE(queue_is_mq(q)); if (!dm_table_supports_zone_append(t)) lim->max_hw_zone_append_sectors = 0; + else if (lim->max_hw_zone_append_sectors == 0) + lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors; /* * Determine the max open and max active zone limits for the mapped From 33304b75df651bda2d34394e59cd9ee4e3c07602 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Mon, 14 Apr 2025 15:28:36 +0200 Subject: [PATCH 07/29] dm-delay: don't busy-wait in kthread When using a kthread to delay the IOs, dm-delay would continuously loop, checking if IOs were ready to submit. It had a cond_resched() call in the loop, but might still loop hundreds of millions of times waiting for an IO that was scheduled to be submitted 10s of ms in the future. With the change to make dm-delay over zoned devices always use kthreads regardless of the length of the delay, this wasted work only gets worse. To solve this and still keep roughly the same precision for very short delays, dm-delay now calls fsleep() for 1/8th of the smallest non-zero delay it will place on IOs, or 1 ms, whichever is smaller. The reason that dm-delay doesn't just use the actual expiration time of the next delayed IO to calculated the sleep time is that delay_dtr() must wait for the kthread to finish before deleting the table. If a zoned device with a long delay queued an IO shortly before being suspended and removed, the IO would be flushed in delay_presuspend(), but the removing the device would still have to wait for the remainder of the long delay. This time is now capped at 1 ms. Signed-off-by: Benjamin Marzinski Reviewed-by: Damien Le Moal Tested-by: Damien Le Moal Signed-off-by: Mikulas Patocka --- drivers/md/dm-delay.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index d4cf0ac2a7aa..16d3d454fb0a 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -14,11 +14,14 @@ #include #include #include +#include #include #define DM_MSG_PREFIX "delay" +#define SLEEP_SHIFT 3 + struct delay_class { struct dm_dev *dev; sector_t start; @@ -34,6 +37,7 @@ struct delay_c { struct work_struct flush_expired_bios; struct list_head delayed_bios; struct task_struct *worker; + unsigned int worker_sleep_us; bool may_delay; struct delay_class read; @@ -136,6 +140,7 @@ static int flush_worker_fn(void *data) schedule(); } else { spin_unlock(&dc->delayed_bios_lock); + fsleep(dc->worker_sleep_us); cond_resched(); } } @@ -212,7 +217,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; int ret; - unsigned int max_delay; + unsigned int max_delay, min_delay; if (argc != 3 && argc != 6 && argc != 9) { ti->error = "Requires exactly 3, 6 or 9 arguments"; @@ -235,7 +240,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) ret = delay_class_ctr(ti, &dc->read, argv); if (ret) goto bad; - max_delay = dc->read.delay; + min_delay = max_delay = dc->read.delay; if (argc == 3) { ret = delay_class_ctr(ti, &dc->write, argv); @@ -251,6 +256,7 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret) goto bad; max_delay = max(max_delay, dc->write.delay); + min_delay = min_not_zero(min_delay, dc->write.delay); if (argc == 6) { ret = delay_class_ctr(ti, &dc->flush, argv + 3); @@ -263,9 +269,14 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret) goto bad; max_delay = max(max_delay, dc->flush.delay); + min_delay = min_not_zero(min_delay, dc->flush.delay); out: if (max_delay < 50) { + if (min_delay >> SLEEP_SHIFT) + dc->worker_sleep_us = 1000; + else + dc->worker_sleep_us = (min_delay * 1000) >> SLEEP_SHIFT; /* * In case of small requested delays, use kthread instead of * timers and workqueue to achieve better latency. @@ -438,7 +449,7 @@ out: static struct target_type delay_target = { .name = "delay", - .version = {1, 4, 0}, + .version = {1, 5, 0}, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .module = THIS_MODULE, .ctr = delay_ctr, From f1e24048edb4c51678948cc6a08d20b57cde1471 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 22 Apr 2025 21:19:30 +0200 Subject: [PATCH 08/29] dm: use generic functions instead of disable_discard and disable_write_zeroes A small code cleanup: use blk_queue_disable_discard and blk_queue_disable_write_zeroes instead of disable_discard and disable_write_zeroes. Signed-off-by: Mikulas Patocka --- drivers/md/dm-core.h | 3 --- drivers/md/dm-rq.c | 4 ++-- drivers/md/dm.c | 20 ++------------------ 3 files changed, 4 insertions(+), 23 deletions(-) diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index f3a3f2ef6322..c889332e533b 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -163,9 +163,6 @@ struct mapped_device { #define DMF_POST_SUSPENDING 8 #define DMF_EMULATE_ZONE_APPEND 9 -void disable_discard(struct mapped_device *md); -void disable_write_zeroes(struct mapped_device *md); - static inline sector_t dm_get_size(struct mapped_device *md) { return get_capacity(md->disk); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index e23076f7ece2..a6ca92049c10 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -217,10 +217,10 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) if (unlikely(error == BLK_STS_TARGET)) { if (req_op(clone) == REQ_OP_DISCARD && !clone->q->limits.max_discard_sectors) - disable_discard(tio->md); + blk_queue_disable_discard(tio->md->queue); else if (req_op(clone) == REQ_OP_WRITE_ZEROES && !clone->q->limits.max_write_zeroes_sectors) - disable_write_zeroes(tio->md); + blk_queue_disable_write_zeroes(tio->md->queue); } switch (r) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 240f6dab8dda..ccccc098b30e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1082,22 +1082,6 @@ static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md) return &md->queue->limits; } -void disable_discard(struct mapped_device *md) -{ - struct queue_limits *limits = dm_get_queue_limits(md); - - /* device doesn't really support DISCARD, disable it */ - limits->max_hw_discard_sectors = 0; -} - -void disable_write_zeroes(struct mapped_device *md) -{ - struct queue_limits *limits = dm_get_queue_limits(md); - - /* device doesn't really support WRITE ZEROES, disable it */ - limits->max_write_zeroes_sectors = 0; -} - static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) { return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); @@ -1115,10 +1099,10 @@ static void clone_endio(struct bio *bio) if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && !bdev_max_discard_sectors(bio->bi_bdev)) - disable_discard(md); + blk_queue_disable_discard(md->queue); else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !bdev_write_zeroes_sectors(bio->bi_bdev)) - disable_write_zeroes(md); + blk_queue_disable_write_zeroes(md->queue); } if (static_branch_unlikely(&zoned_enabled) && From abb4cf2f4c1c1b637cad04d726f2e13fd3051e03 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 22 Apr 2025 21:17:36 +0200 Subject: [PATCH 09/29] dm: lock limits when reading them Lock queue limits when reading them, so that we don't read halfway modified values. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org --- drivers/md/dm-table.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 69d4a0e0e613..933e01f3fab4 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -431,6 +431,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } + mutex_lock(&q->limits_lock); if (blk_stack_limits(limits, &q->limits, get_start_sect(bdev) + start) < 0) DMWARN("%s: adding target device %pg caused an alignment inconsistency: " @@ -448,6 +449,7 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, */ if (!dm_target_has_integrity(ti->type)) queue_limits_stack_integrity_bdev(limits, bdev); + mutex_unlock(&q->limits_lock); return 0; } @@ -1734,8 +1736,12 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev * sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); + int b; - return !q->limits.max_write_zeroes_sectors; + mutex_lock(&q->limits_lock); + b = !q->limits.max_write_zeroes_sectors; + mutex_unlock(&q->limits_lock); + return b; } static bool dm_table_supports_write_zeroes(struct dm_table *t) From f9ed31214e2ac43cd38d1e517e774050b613b8da Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 22 Apr 2025 13:22:05 -0700 Subject: [PATCH 10/29] dm-verity: use softirq context only when !need_resched() Further limit verification in softirq (a.k.a. BH) context to cases where rescheduling of the interrupted task is not pending. This helps prevent the CPU from spending too long in softirq context. Note that handle_softirqs() in kernel/softirq.c already stops running softirqs in this same case. However, that check is too coarse-grained, since many I/O requests can be processed in a single BLOCK_SOFTIRQ. Signed-off-by: Eric Biggers Signed-off-by: Mikulas Patocka --- drivers/md/dm-verity-target.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 3c427f18a04b..4de2c226ac9d 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -682,7 +682,8 @@ static void verity_bh_work(struct work_struct *w) static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio) { return ioprio <= IOPRIO_CLASS_IDLE && - bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]); + bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) && + !need_resched(); } static void verity_end_io(struct bio *bio) From 9769378133bbc4b77458ed0a520b008439a45488 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Tue, 22 Apr 2025 13:07:35 -0700 Subject: [PATCH 11/29] dm-bufio: remove maximum age based eviction Every 30 seconds, dm-bufio evicts all buffers that were not accessed within the last max_age_seconds, except those pinned in memory via retain_bytes. By default max_age_seconds is 300 (i.e. 5 minutes), and retain_bytes is 262144 (i.e. 256 KiB) per dm-bufio client. This eviction algorithm is much too eager and is also redundant with the shinker based eviction. Testing on an Android phone shows that about 30 MB of dm-bufio buffers (from dm-verity Merkle tree blocks) are loaded at boot time, and then about 90% of them are suddenly thrown away 5 minutes after boot. This results in unnecessary Merkle tree I/O later. Meanwhile, if the system actually encounters memory pressure, testing also shows that the shrinker is effective at evicting the buffers. Other major Linux kernel caches, such as the page cache, do not enforce a maximum age, instead relying on the shrinker. For these reasons, Android is now setting max_age_seconds to 86400 (i.e. 1 day), which mostly disables it; see https://android.googlesource.com/platform/system/core/+/cadad290a79d5b0a30add935aaadab7c1b1ef5e9%5E%21/ That is a much better default, but really the maximum age based eviction should not exist at all. Let's remove it. Note that this also eliminates the need to run work every 30 seconds, which is beneficial too. Signed-off-by: Eric Biggers Signed-off-by: Mikulas Patocka --- drivers/md/dm-bufio.c | 189 ++++++++---------------------------------- 1 file changed, 36 insertions(+), 153 deletions(-) diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9c8ed65cd87e..8f5ac14bb3b7 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -40,16 +40,6 @@ #define DM_BUFIO_WRITEBACK_RATIO 3 #define DM_BUFIO_LOW_WATERMARK_RATIO 16 -/* - * Check buffer ages in this interval (seconds) - */ -#define DM_BUFIO_WORK_TIMER_SECS 30 - -/* - * Free buffers when they are older than this (seconds) - */ -#define DM_BUFIO_DEFAULT_AGE_SECS 300 - /* * The nr of bytes of cached data to keep around. */ @@ -1055,10 +1045,8 @@ static unsigned long dm_bufio_cache_size_latch; static DEFINE_SPINLOCK(global_spinlock); -/* - * Buffers are freed after this timeout - */ -static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; +static unsigned int dm_bufio_max_age; /* No longer does anything */ + static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; static unsigned long dm_bufio_peak_allocated; @@ -1086,7 +1074,6 @@ static LIST_HEAD(dm_bufio_all_clients); static DEFINE_MUTEX(dm_bufio_clients_lock); static struct workqueue_struct *dm_bufio_wq; -static struct delayed_work dm_bufio_cleanup_old_work; static struct work_struct dm_bufio_replacement_work; @@ -2673,130 +2660,6 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); /*--------------------------------------------------------------*/ -static unsigned int get_max_age_hz(void) -{ - unsigned int max_age = READ_ONCE(dm_bufio_max_age); - - if (max_age > UINT_MAX / HZ) - max_age = UINT_MAX / HZ; - - return max_age * HZ; -} - -static bool older_than(struct dm_buffer *b, unsigned long age_hz) -{ - return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz); -} - -struct evict_params { - gfp_t gfp; - unsigned long age_hz; - - /* - * This gets updated with the largest last_accessed (ie. most - * recently used) of the evicted buffers. It will not be reinitialised - * by __evict_many(), so you can use it across multiple invocations. - */ - unsigned long last_accessed; -}; - -/* - * We may not be able to evict this buffer if IO pending or the client - * is still using it. - * - * And if GFP_NOFS is used, we must not do any I/O because we hold - * dm_bufio_clients_lock and we would risk deadlock if the I/O gets - * rerouted to different bufio client. - */ -static enum evict_result select_for_evict(struct dm_buffer *b, void *context) -{ - struct evict_params *params = context; - - if (!(params->gfp & __GFP_FS) || - (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit_acquire(B_READING, &b->state) || - test_bit(B_WRITING, &b->state) || - test_bit(B_DIRTY, &b->state)) - return ER_DONT_EVICT; - } - - return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP; -} - -static unsigned long __evict_many(struct dm_bufio_client *c, - struct evict_params *params, - int list_mode, unsigned long max_count) -{ - unsigned long count; - unsigned long last_accessed; - struct dm_buffer *b; - - for (count = 0; count < max_count; count++) { - b = cache_evict(&c->cache, list_mode, select_for_evict, params); - if (!b) - break; - - last_accessed = READ_ONCE(b->last_accessed); - if (time_after_eq(params->last_accessed, last_accessed)) - params->last_accessed = last_accessed; - - __make_buffer_clean(b); - __free_buffer_wake(b); - - cond_resched(); - } - - return count; -} - -static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) -{ - struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0}; - unsigned long retain = get_retain_buffers(c); - unsigned long count; - LIST_HEAD(write_list); - - dm_bufio_lock(c); - - __check_watermark(c, &write_list); - if (unlikely(!list_empty(&write_list))) { - dm_bufio_unlock(c); - __flush_write_list(&write_list); - dm_bufio_lock(c); - } - - count = cache_total(&c->cache); - if (count > retain) - __evict_many(c, ¶ms, LIST_CLEAN, count - retain); - - dm_bufio_unlock(c); -} - -static void cleanup_old_buffers(void) -{ - unsigned long max_age_hz = get_max_age_hz(); - struct dm_bufio_client *c; - - mutex_lock(&dm_bufio_clients_lock); - - __cache_size_refresh(); - - list_for_each_entry(c, &dm_bufio_all_clients, client_list) - evict_old_buffers(c, max_age_hz); - - mutex_unlock(&dm_bufio_clients_lock); -} - -static void work_fn(struct work_struct *w) -{ - cleanup_old_buffers(); - - queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, - DM_BUFIO_WORK_TIMER_SECS * HZ); -} - -/*--------------------------------------------------------------*/ - /* * Global cleanup tries to evict the oldest buffers from across _all_ * the clients. It does this by repeatedly evicting a few buffers from @@ -2834,27 +2697,51 @@ static void __insert_client(struct dm_bufio_client *new_client) list_add_tail(&new_client->client_list, h); } +static enum evict_result select_for_evict(struct dm_buffer *b, void *context) +{ + /* In no-sleep mode, we cannot wait on IO. */ + if (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep) { + if (test_bit_acquire(B_READING, &b->state) || + test_bit(B_WRITING, &b->state) || + test_bit(B_DIRTY, &b->state)) + return ER_DONT_EVICT; + } + return ER_EVICT; +} + static unsigned long __evict_a_few(unsigned long nr_buffers) { - unsigned long count; struct dm_bufio_client *c; - struct evict_params params = { - .gfp = GFP_KERNEL, - .age_hz = 0, - /* set to jiffies in case there are no buffers in this client */ - .last_accessed = jiffies - }; + unsigned long oldest_buffer = jiffies; + unsigned long last_accessed; + unsigned long count; + struct dm_buffer *b; c = __pop_client(); if (!c) return 0; dm_bufio_lock(c); - count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers); + + for (count = 0; count < nr_buffers; count++) { + b = cache_evict(&c->cache, LIST_CLEAN, select_for_evict, NULL); + if (!b) + break; + + last_accessed = READ_ONCE(b->last_accessed); + if (time_after_eq(oldest_buffer, last_accessed)) + oldest_buffer = last_accessed; + + __make_buffer_clean(b); + __free_buffer_wake(b); + + cond_resched(); + } + dm_bufio_unlock(c); if (count) - c->oldest_buffer = params.last_accessed; + c->oldest_buffer = oldest_buffer; __insert_client(c); return count; @@ -2937,10 +2824,7 @@ static int __init dm_bufio_init(void) if (!dm_bufio_wq) return -ENOMEM; - INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn); INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup); - queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work, - DM_BUFIO_WORK_TIMER_SECS * HZ); return 0; } @@ -2952,7 +2836,6 @@ static void __exit dm_bufio_exit(void) { int bug = 0; - cancel_delayed_work_sync(&dm_bufio_cleanup_old_work); destroy_workqueue(dm_bufio_wq); if (dm_bufio_client_count) { @@ -2989,7 +2872,7 @@ module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644); MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644); -MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); +MODULE_PARM_DESC(max_age_seconds, "No longer does anything"); module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644); MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); From d90e7a500cb62fe0de2591b6f08b6a3483434a3e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 15 Apr 2025 00:17:15 -0400 Subject: [PATCH 12/29] dm: remove unneeded kvfree from alloc_targets alloc_targets() is always called with a newly initialized table where t->highs == NULL. Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 933e01f3fab4..a937e1e12482 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -117,7 +117,6 @@ static int alloc_targets(struct dm_table *t, unsigned int num) n_targets = (struct dm_target *) (n_highs + num); memset(n_highs, -1, sizeof(*n_highs) * num); - kvfree(t->highs); t->num_allocated = num; t->highs = n_highs; From 19da6b2c9e8e22fa7a8e19f7ad9b5f91ccec0bfd Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 22 Apr 2025 19:47:35 -0400 Subject: [PATCH 13/29] dm-flakey: Clean up parsing messages There were a number of cases where the error message for an invalid table line did not match the actual problem. Fix these. Additionally, error out when duplicate corrupt_bio_byte, random_read_corrupt, or random_write_corrupt features are present. Also, error_reads is incompatible with random_read_corrupt and corrupt_bio_byte with the READ flag set, so disallow that. Reported-by: Kent Overstreet Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-flakey.c | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index b690905ab89f..f5f8d25d1b2d 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -128,8 +128,11 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, * corrupt_bio_byte */ if (!strcasecmp(arg_name, "corrupt_bio_byte")) { - if (!argc) { - ti->error = "Feature corrupt_bio_byte requires parameters"; + if (fc->corrupt_bio_byte) { + ti->error = "Feature corrupt_bio_byte duplicated"; + return -EINVAL; + } else if (argc < 4) { + ti->error = "Feature corrupt_bio_byte requires 4 parameters"; return -EINVAL; } @@ -176,7 +179,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, } if (!strcasecmp(arg_name, "random_read_corrupt")) { - if (!argc) { + if (fc->random_read_corrupt) { + ti->error = "Feature random_read_corrupt duplicated"; + return -EINVAL; + } else if (!argc) { ti->error = "Feature random_read_corrupt requires a parameter"; return -EINVAL; } @@ -189,7 +195,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, } if (!strcasecmp(arg_name, "random_write_corrupt")) { - if (!argc) { + if (fc->random_write_corrupt) { + ti->error = "Feature random_write_corrupt duplicated"; + return -EINVAL; + } else if (!argc) { ti->error = "Feature random_write_corrupt requires a parameter"; return -EINVAL; } @@ -205,12 +214,18 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, return -EINVAL; } - if (test_bit(DROP_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { - ti->error = "drop_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; + if (test_bit(DROP_WRITES, &fc->flags) && + (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) { + ti->error = "drop_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set"; return -EINVAL; - } else if (test_bit(ERROR_WRITES, &fc->flags) && (fc->corrupt_bio_rw == WRITE)) { - ti->error = "error_writes is incompatible with corrupt_bio_byte with the WRITE flag set"; + } else if (test_bit(ERROR_WRITES, &fc->flags) && + (fc->corrupt_bio_rw == WRITE || fc->random_write_corrupt)) { + ti->error = "error_writes is incompatible with random_write_corrupt or corrupt_bio_byte with the WRITE flag set"; + return -EINVAL; + } else if (test_bit(ERROR_READS, &fc->flags) && + (fc->corrupt_bio_rw == READ || fc->random_read_corrupt)) { + ti->error = "error_reads is incompatible with random_read_corrupt or corrupt_bio_byte with the READ flag set"; return -EINVAL; } @@ -278,7 +293,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (r) goto bad; - r = dm_read_arg(_args, &as, &fc->down_interval, &ti->error); + r = dm_read_arg(_args + 1, &as, &fc->down_interval, &ti->error); if (r) goto bad; From 40ed054f39bc99eac09871c33198e501f4acdf24 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 22 Apr 2025 19:47:36 -0400 Subject: [PATCH 14/29] dm-flakey: error all IOs when num_features is absent dm-flakey would error all IOs if num_features was 0, but if it was absent, dm-flakey would never error any IO. Fix this so that no num_features works the same as num_features set to 0. Fixes: aa7d7bc99fed7 ("dm flakey: add an "error_reads" option") Reported-by: Kent Overstreet Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-flakey.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index f5f8d25d1b2d..35f1708b62e8 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -53,8 +53,8 @@ struct per_bio_data { static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, struct dm_target *ti) { - int r; - unsigned int argc; + int r = 0; + unsigned int argc = 0; const char *arg_name; static const struct dm_arg _args[] = { @@ -65,14 +65,13 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, {0, PROBABILITY_BASE, "Invalid random corrupt argument"}, }; - /* No feature arguments supplied. */ - if (!as->argc) - return 0; - - r = dm_read_arg_group(_args, as, &argc, &ti->error); - if (r) + if (as->argc && (r = dm_read_arg_group(_args, as, &argc, &ti->error))) return r; + /* No feature arguments supplied. */ + if (!argc) + goto error_all_io; + while (argc) { arg_name = dm_shift_arg(as); argc--; @@ -232,6 +231,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, if (!fc->corrupt_bio_byte && !test_bit(ERROR_READS, &fc->flags) && !test_bit(DROP_WRITES, &fc->flags) && !test_bit(ERROR_WRITES, &fc->flags) && !fc->random_read_corrupt && !fc->random_write_corrupt) { +error_all_io: set_bit(ERROR_WRITES, &fc->flags); set_bit(ERROR_READS, &fc->flags); } From 4319f0aaa2354f14037f04456ed635983b379695 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 22 Apr 2025 19:47:37 -0400 Subject: [PATCH 15/29] dm-flakey: remove useless ERROR_READS check in flakey_end_io If ERROR_READS is set, flakey_map returns DM_MAPIO_KILL for read bios and flakey_end_io is never called, so there's no point in checking it there. Also clean up an incorrect comment about when read IOs are errored out. Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-flakey.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 35f1708b62e8..0421f9336680 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -511,8 +511,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) pb->bio_submitted = true; /* - * Error reads if neither corrupt_bio_byte or drop_writes or error_writes are set. - * Otherwise, flakey_end_io() will decide if the reads should be modified. + * If ERROR_READS isn't set flakey_end_io() will decide if the + * reads should be modified. */ if (bio_data_dir(bio) == READ) { if (test_bit(ERROR_READS, &fc->flags)) @@ -590,13 +590,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, if (rem < fc->random_read_corrupt) corrupt_bio_random(bio); } - if (test_bit(ERROR_READS, &fc->flags)) { - /* - * Error read during the down_interval if drop_writes - * and error_writes were not configured. - */ - *error = BLK_STS_IOERR; - } } return DM_ENDIO_DONE; From 13e79076c89f6e96a6cca8f6df38b40d025907b4 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Tue, 22 Apr 2025 19:47:38 -0400 Subject: [PATCH 16/29] dm-flakey: make corrupting read bios work dm-flakey corrupts the read bios in the endio function. However, the corrupt_bio_* functions checked bio_has_data() to see if there was data to corrupt. Since this was the endio function, there was no data left to complete, so bio_has_data() was always false. Fix this by saving a copy of the bio's bi_iter in flakey_map(), and using this to initialize the iter for corrupting the read bios. This patch also skips cloning the bio for write bios with no data. Reported-by: Kent Overstreet Fixes: a3998799fb4df ("dm flakey: add corrupt_bio_byte feature") Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-flakey.c | 54 ++++++++++++++++++++++-------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 0421f9336680..a8ee3df32d5f 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -47,7 +47,8 @@ enum feature_flag_bits { }; struct per_bio_data { - bool bio_submitted; + bool bio_can_corrupt; + struct bvec_iter saved_iter; }; static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, @@ -354,7 +355,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) } static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, - unsigned char corrupt_bio_value) + unsigned char corrupt_bio_value, + struct bvec_iter start) { struct bvec_iter iter; struct bio_vec bvec; @@ -363,7 +365,7 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, * Overwrite the Nth byte of the bio's data, on whichever page * it falls. */ - bio_for_each_segment(bvec, bio, iter) { + __bio_for_each_segment(bvec, bio, iter, start) { if (bio_iter_len(bio, iter) > corrupt_bio_byte) { unsigned char *segment = bvec_kmap_local(&bvec); segment[corrupt_bio_byte] = corrupt_bio_value; @@ -372,36 +374,31 @@ static void corrupt_bio_common(struct bio *bio, unsigned int corrupt_bio_byte, "(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n", bio, corrupt_bio_value, corrupt_bio_byte, (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, - (unsigned long long)bio->bi_iter.bi_sector, - bio->bi_iter.bi_size); + (unsigned long long)start.bi_sector, + start.bi_size); break; } corrupt_bio_byte -= bio_iter_len(bio, iter); } } -static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) +static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc, + struct bvec_iter start) { unsigned int corrupt_bio_byte = fc->corrupt_bio_byte - 1; - if (!bio_has_data(bio)) - return; - - corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value); + corrupt_bio_common(bio, corrupt_bio_byte, fc->corrupt_bio_value, start); } -static void corrupt_bio_random(struct bio *bio) +static void corrupt_bio_random(struct bio *bio, struct bvec_iter start) { unsigned int corrupt_byte; unsigned char corrupt_value; - if (!bio_has_data(bio)) - return; - - corrupt_byte = get_random_u32() % bio->bi_iter.bi_size; + corrupt_byte = get_random_u32() % start.bi_size; corrupt_value = get_random_u8(); - corrupt_bio_common(bio, corrupt_byte, corrupt_value); + corrupt_bio_common(bio, corrupt_byte, corrupt_value, start); } static void clone_free(struct bio *clone) @@ -496,7 +493,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) unsigned int elapsed; struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); - pb->bio_submitted = false; + pb->bio_can_corrupt = false; if (op_is_zone_mgmt(bio_op(bio))) goto map_bio; @@ -505,10 +502,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) elapsed = (jiffies - fc->start_time) / HZ; if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) { bool corrupt_fixed, corrupt_random; - /* - * Flag this bio as submitted while down. - */ - pb->bio_submitted = true; + + if (bio_has_data(bio)) { + pb->bio_can_corrupt = true; + pb->saved_iter = bio->bi_iter; + } /* * If ERROR_READS isn't set flakey_end_io() will decide if the @@ -531,6 +529,8 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } + if (!pb->bio_can_corrupt) + goto map_bio; /* * Corrupt matching writes. */ @@ -550,9 +550,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) struct bio *clone = clone_bio(ti, fc, bio); if (clone) { if (corrupt_fixed) - corrupt_bio_data(clone, fc); + corrupt_bio_data(clone, fc, + clone->bi_iter); if (corrupt_random) - corrupt_bio_random(clone); + corrupt_bio_random(clone, + clone->bi_iter); submit_bio(clone); return DM_MAPIO_SUBMITTED; } @@ -574,21 +576,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, if (op_is_zone_mgmt(bio_op(bio))) return DM_ENDIO_DONE; - if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { + if (!*error && pb->bio_can_corrupt && (bio_data_dir(bio) == READ)) { if (fc->corrupt_bio_byte) { if ((fc->corrupt_bio_rw == READ) && all_corrupt_bio_flags_match(bio, fc)) { /* * Corrupt successful matching READs while in down state. */ - corrupt_bio_data(bio, fc); + corrupt_bio_data(bio, fc, pb->saved_iter); } } if (fc->random_read_corrupt) { u64 rnd = get_random_u64(); u32 rem = do_div(rnd, PROBABILITY_BASE); if (rem < fc->random_read_corrupt) - corrupt_bio_random(bio); + corrupt_bio_random(bio, pb->saved_iter); } } From 4862c8861d902d43645a493e441c4478be1c6c44 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 29 Apr 2025 18:50:17 +0200 Subject: [PATCH 17/29] dm: Allow .prepare_ioctl to handle ioctls directly This adds a 'bool *forward' parameter to .prepare_ioctl, which allows device mapper targets to accept ioctls to themselves instead of the underlying device. If the target already fully handled the ioctl, it sets *forward to false and device mapper won't forward it to the underlying device any more. In order for targets to actually know what the ioctl is about and how to handle it, pass also cmd and arg. As long as targets restrict themselves to interpreting ioctls of type DM_IOCTL, this is a backwards compatible change because previously, any such ioctl would have been passed down through all device mapper layers until it reached a device that can't understand the ioctl and would return an error. Signed-off-by: Kevin Wolf Reviewed-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-dust.c | 4 +++- drivers/md/dm-ebs-target.c | 3 ++- drivers/md/dm-flakey.c | 4 +++- drivers/md/dm-linear.c | 4 +++- drivers/md/dm-log-writes.c | 4 +++- drivers/md/dm-mpath.c | 4 +++- drivers/md/dm-switch.c | 4 +++- drivers/md/dm-verity-target.c | 4 +++- drivers/md/dm-zoned-target.c | 3 ++- drivers/md/dm.c | 17 +++++++++++------ include/linux/device-mapper.h | 9 ++++++++- 11 files changed, 44 insertions(+), 16 deletions(-) diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 1a33820c9f46..e75310232bbf 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -534,7 +534,9 @@ static void dust_status(struct dm_target *ti, status_type_t type, } } -static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct dust_device *dd = ti->private; struct dm_dev *dev = dd->dev; diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index b19b0142a690..6abb31ca9662 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -415,7 +415,8 @@ static void ebs_status(struct dm_target *ti, status_type_t type, } } -static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, bool *forward) { struct ebs_c *ec = ti->private; struct dm_dev *dev = ec->dev; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index a8ee3df32d5f..c711db6f8f5c 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -648,7 +648,9 @@ static void flakey_status(struct dm_target *ti, status_type_t type, } } -static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct flakey_c *fc = ti->private; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 66318aba4bdb..15538ec58f8e 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -119,7 +119,9 @@ static void linear_status(struct dm_target *ti, status_type_t type, } } -static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct linear_c *lc = ti->private; struct dm_dev *dev = lc->dev; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 8d7df8303d0a..d484e8e1d48a 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -818,7 +818,9 @@ static void log_writes_status(struct dm_target *ti, status_type_t type, } static int log_writes_prepare_ioctl(struct dm_target *ti, - struct block_device **bdev) + struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct log_writes_c *lc = ti->private; struct dm_dev *dev = lc->dev; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 6c98f4ae5ea9..909ed6890ba5 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -2022,7 +2022,9 @@ out: } static int multipath_prepare_ioctl(struct dm_target *ti, - struct block_device **bdev) + struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct multipath *m = ti->private; struct pgpath *pgpath; diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index dfd9fb52a6f3..bb1a70b5a215 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -517,7 +517,9 @@ static void switch_status(struct dm_target *ti, status_type_t type, * * Passthrough all ioctls to the path for sector 0 */ -static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct switch_ctx *sctx = ti->private; unsigned int path_nr; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 4de2c226ac9d..34a9f9fbd0d1 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -994,7 +994,9 @@ static void verity_status(struct dm_target *ti, status_type_t type, } } -static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward) { struct dm_verity *v = ti->private; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index 6141fc25d842..5da3db06da10 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1015,7 +1015,8 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) /* * Pass on ioctl to the backend device. */ -static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, bool *forward) { struct dmz_target *dmz = ti->private; struct dmz_dev *dev = &dmz->dev[0]; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ccccc098b30e..1726f0f828cc 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -411,7 +411,8 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) } static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, - struct block_device **bdev) + struct block_device **bdev, unsigned int cmd, + unsigned long arg, bool *forward) { struct dm_target *ti; struct dm_table *map; @@ -434,8 +435,8 @@ retry: if (dm_suspended_md(md)) return -EAGAIN; - r = ti->type->prepare_ioctl(ti, bdev); - if (r == -ENOTCONN && !fatal_signal_pending(current)) { + r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward); + if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) { dm_put_live_table(md, *srcu_idx); fsleep(10000); goto retry; @@ -454,9 +455,10 @@ static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode, { struct mapped_device *md = bdev->bd_disk->private_data; int r, srcu_idx; + bool forward = true; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); - if (r < 0) + r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward); + if (!forward || r < 0) goto out; if (r > 0) { @@ -3630,10 +3632,13 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; int r, srcu_idx; + bool forward = true; - r = dm_prepare_ioctl(md, &srcu_idx, &bdev); + /* Not a real ioctl, but targets must not interpret non-DM ioctls */ + r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward); if (r < 0) goto out; + WARN_ON_ONCE(!forward); ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_clear) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index bcc6d7b69470..cb95951547ab 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -93,7 +93,14 @@ typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, char *result, unsigned int maxlen); -typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); +/* + * Called with *forward == true. If it remains true, the ioctl should be + * forwarded to bdev. If it is reset to false, the target already fully handled + * the ioctl and the return value is the return value for the whole ioctl. + */ +typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev, + unsigned int cmd, unsigned long arg, + bool *forward); #ifdef CONFIG_BLK_DEV_ZONED typedef int (*dm_report_zones_fn) (struct dm_target *ti, From 7734fb4ad98c3fdaf0fde82978ef8638195a5285 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Tue, 29 Apr 2025 18:50:18 +0200 Subject: [PATCH 18/29] dm mpath: Interface for explicit probing of active paths Multipath cannot directly provide failover for ioctls in the kernel because it doesn't know what each ioctl means and which result could indicate a path error. Userspace generally knows what the ioctl it issued means and if it might be a path error, but neither does it know which path the ioctl took nor does it necessarily have the privileges to fail a path using the control device. In order to allow userspace to address this situation, implement a DM_MPATH_PROBE_PATHS ioctl that prompts the dm-mpath driver to probe all active paths in the current path group to see whether they still work, and fail them if not. If this returns success, userspace can retry the ioctl and expect that the previously hit bad path is now failed (or working again). The immediate motivation for this is the use of SG_IO in QEMU for SCSI passthrough. Following a failed SG_IO ioctl, QEMU will trigger probing to ensure that all active paths are actually alive, so that retrying SG_IO at least has a lower chance of failing due to a path error. However, the problem is broader than just SG_IO (it affects any ioctl), and if applications need failover support for other ioctls, the same probing can be used. This is not implemented on the DM control device, but on the DM mpath block devices, to allow all users who have access to such a block device to make use of this interface, specifically to implement failover for ioctls. For the same reason, it is also unprivileged. Its implementation is effectively just a bunch of reads, which could already be issued by userspace, just without any guarantee that all the rights paths are selected. The probing implemented here is done fully synchronously path by path; probing all paths concurrently is left as an improvement for the future. Co-developed-by: Hanna Czenczek Signed-off-by: Hanna Czenczek Signed-off-by: Kevin Wolf Reviewed-by: Benjamin Marzinski Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-ioctl.c | 1 + drivers/md/dm-mpath.c | 100 +++++++++++++++++++++++++++++++++- include/uapi/linux/dm-ioctl.h | 9 ++- 3 files changed, 107 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index d42eac944eb5..4165fef4c170 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1885,6 +1885,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}, {DM_DEV_ARM_POLL_CMD, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll}, {DM_GET_TARGET_VERSION_CMD, 0, get_target_version}, + {DM_MPATH_PROBE_PATHS_CMD, 0, NULL}, /* block device ioctl */ }; if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 909ed6890ba5..53861ad5dd1d 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -2021,6 +2021,94 @@ out: return r; } +/* + * Perform a minimal read from the given path to find out whether the + * path still works. If a path error occurs, fail it. + */ +static int probe_path(struct pgpath *pgpath) +{ + struct block_device *bdev = pgpath->path.dev->bdev; + unsigned int read_size = bdev_logical_block_size(bdev); + struct page *page; + struct bio *bio; + blk_status_t status; + int r = 0; + + if (WARN_ON_ONCE(read_size > PAGE_SIZE)) + return -EINVAL; + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + + /* Perform a minimal read: Sector 0, length read_size */ + bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL); + if (!bio) { + r = -ENOMEM; + goto out; + } + + bio->bi_iter.bi_sector = 0; + __bio_add_page(bio, page, read_size, 0); + submit_bio_wait(bio); + status = bio->bi_status; + bio_put(bio); + + if (status && blk_path_error(status)) + fail_path(pgpath); + +out: + __free_page(page); + return r; +} + +/* + * Probe all active paths in current_pg to find out whether they still work. + * Fail all paths that do not work. + * + * Return -ENOTCONN if no valid path is left (even outside of current_pg). We + * cannot probe paths in other pgs without switching current_pg, so if valid + * paths are only in different pgs, they may or may not work. Additionally + * we should not probe paths in a pathgroup that is in the process of + * Initializing. Userspace can submit a request and we'll switch and wait + * for the pathgroup to be initialized. If the request fails, it may need to + * probe again. + */ +static int probe_active_paths(struct multipath *m) +{ + struct pgpath *pgpath; + struct priority_group *pg; + unsigned long flags; + int r = 0; + + mutex_lock(&m->work_mutex); + + spin_lock_irqsave(&m->lock, flags); + if (test_bit(MPATHF_QUEUE_IO, &m->flags)) + pg = NULL; + else + pg = m->current_pg; + spin_unlock_irqrestore(&m->lock, flags); + + if (pg) { + list_for_each_entry(pgpath, &pg->pgpaths, list) { + if (!pgpath->is_active) + continue; + + r = probe_path(pgpath); + if (r < 0) + goto out; + } + } + + if (!atomic_read(&m->nr_valid_paths)) + r = -ENOTCONN; + +out: + mutex_unlock(&m->work_mutex); + return r; +} + static int multipath_prepare_ioctl(struct dm_target *ti, struct block_device **bdev, unsigned int cmd, unsigned long arg, @@ -2031,6 +2119,16 @@ static int multipath_prepare_ioctl(struct dm_target *ti, unsigned long flags; int r; + if (_IOC_TYPE(cmd) == DM_IOCTL) { + *forward = false; + switch (cmd) { + case DM_MPATH_PROBE_PATHS: + return probe_active_paths(m); + default: + return -ENOTTY; + } + } + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) pgpath = choose_pgpath(m, 0); @@ -2182,7 +2280,7 @@ static int multipath_busy(struct dm_target *ti) */ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 14, 0}, + .version = {1, 15, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | DM_TARGET_PASSES_INTEGRITY, .module = THIS_MODULE, diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index b08c7378164d..3225e025e30e 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -258,10 +258,12 @@ enum { DM_DEV_SET_GEOMETRY_CMD, DM_DEV_ARM_POLL_CMD, DM_GET_TARGET_VERSION_CMD, + DM_MPATH_PROBE_PATHS_CMD, }; #define DM_IOCTL 0xfd +/* Control device ioctls */ #define DM_VERSION _IOWR(DM_IOCTL, DM_VERSION_CMD, struct dm_ioctl) #define DM_REMOVE_ALL _IOWR(DM_IOCTL, DM_REMOVE_ALL_CMD, struct dm_ioctl) #define DM_LIST_DEVICES _IOWR(DM_IOCTL, DM_LIST_DEVICES_CMD, struct dm_ioctl) @@ -285,10 +287,13 @@ enum { #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) +/* Block device ioctls */ +#define DM_MPATH_PROBE_PATHS _IO(DM_IOCTL, DM_MPATH_PROBE_PATHS_CMD) + #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 49 +#define DM_VERSION_MINOR 50 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2025-01-17)" +#define DM_VERSION_EXTRA "-ioctl (2025-04-28)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ From b7c18b17a173087ce97e809cefd55e581121f19e Mon Sep 17 00:00:00 2001 From: John Garry Date: Sun, 4 May 2025 11:26:45 +0200 Subject: [PATCH 19/29] dm-table: Set BLK_FEAT_ATOMIC_WRITES for target queue limits Feature flag BLK_FEAT_ATOMIC_WRITES is not being properly set for the target queue limits, and this means that atomic writes are not being enabled for any dm personalities. When calling dm_set_device_limits() -> blk_stack_limits() -> ... -> blk_stack_atomic_writes_limits(), the bottom device limits (which corresponds to intermediate target queue limits) does not have BLK_FEAT_ATOMIC_WRITES set, and so atomic writes can never be enabled. Typically such a flag would be inherited from the stacked device in dm_set_device_limits() -> blk_stack_limits() via BLK_FEAT_INHERIT_MASK, but BLK_FEAT_ATOMIC_WRITES is not inherited as it's preferred to manually enable on a per-personality basis. Set BLK_FEAT_ATOMIC_WRITES manually for the intermediate target queue limits from the stacked device to get atomic writes working. Fixes: 3194e36488e2 ("dm-table: atomic writes support") Cc: stable@vger.kernel.org # v6.14 Signed-off-by: John Garry Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a937e1e12482..9da5193e4ca9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -430,6 +430,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } + /* + * BLK_FEAT_ATOMIC_WRITES is not inherited from the bottom device in + * blk_stack_limits(), so do it manually. + */ + limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES); + mutex_lock(&q->limits_lock); if (blk_stack_limits(limits, &q->limits, get_start_sect(bdev) + start) < 0) From 025e138eeb752ae19e8eb0144d9f7cc87d579b45 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 1 May 2025 14:23:19 -0700 Subject: [PATCH 20/29] blk-crypto: export wrapped key functions Export blk_crypto_derive_sw_secret(), blk_crypto_import_key(), blk_crypto_generate_key(), and blk_crypto_prepare_key() so that they can be used by device-mapper when passing through wrapped key support. Signed-off-by: Eric Biggers Signed-off-by: Mikulas Patocka --- block/blk-crypto-profile.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c index 94a155912bf1..81918f6e0cae 100644 --- a/block/blk-crypto-profile.c +++ b/block/blk-crypto-profile.c @@ -501,6 +501,7 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev, blk_crypto_hw_exit(profile); return err; } +EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret); int blk_crypto_import_key(struct blk_crypto_profile *profile, const u8 *raw_key, size_t raw_key_size, @@ -520,6 +521,7 @@ int blk_crypto_import_key(struct blk_crypto_profile *profile, blk_crypto_hw_exit(profile); return ret; } +EXPORT_SYMBOL_GPL(blk_crypto_import_key); int blk_crypto_generate_key(struct blk_crypto_profile *profile, u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) @@ -537,6 +539,7 @@ int blk_crypto_generate_key(struct blk_crypto_profile *profile, blk_crypto_hw_exit(profile); return ret; } +EXPORT_SYMBOL_GPL(blk_crypto_generate_key); int blk_crypto_prepare_key(struct blk_crypto_profile *profile, const u8 *lt_key, size_t lt_key_size, @@ -556,6 +559,7 @@ int blk_crypto_prepare_key(struct blk_crypto_profile *profile, blk_crypto_hw_exit(profile); return ret; } +EXPORT_SYMBOL_GPL(blk_crypto_prepare_key); /** * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities From e93912786e50804e7c53456da75d586cace8732f Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 1 May 2025 14:23:20 -0700 Subject: [PATCH 21/29] dm: pass through operations on wrapped inline crypto keys Make the device-mapper layer pass through the derive_sw_secret, import_key, generate_key, and prepare_key blk-crypto operations when all underlying devices support hardware-wrapped inline crypto keys and are passing through inline crypto support. Commit ebc4176551cd ("blk-crypto: add basic hardware-wrapped key support") already made BLK_CRYPTO_KEY_TYPE_HW_WRAPPED be passed through in the same way that the other crypto capabilities are. But the wrapped key support also includes additional operations in blk_crypto_ll_ops, and the dm layer needs to implement those to pass them through. derive_sw_secret is needed by fscrypt, while the other operations are needed for the new blk-crypto ioctls to work on device-mapper devices and not just the raw partitions. Signed-off-by: Eric Biggers Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 177 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 9da5193e4ca9..982c8fdd619e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1197,6 +1197,176 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, return 0; } +enum dm_wrappedkey_op { + DERIVE_SW_SECRET, + IMPORT_KEY, + GENERATE_KEY, + PREPARE_KEY, +}; + +struct dm_wrappedkey_op_args { + enum dm_wrappedkey_op op; + int err; + union { + struct { + const u8 *eph_key; + size_t eph_key_size; + u8 *sw_secret; + } derive_sw_secret; + struct { + const u8 *raw_key; + size_t raw_key_size; + u8 *lt_key; + } import_key; + struct { + u8 *lt_key; + } generate_key; + struct { + const u8 *lt_key; + size_t lt_key_size; + u8 *eph_key; + } prepare_key; + }; +}; + +static int dm_wrappedkey_op_callback(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_wrappedkey_op_args *args = data; + struct block_device *bdev = dev->bdev; + struct blk_crypto_profile *profile = + bdev_get_queue(bdev)->crypto_profile; + int err = -EOPNOTSUPP; + + if (!args->err) + return 0; + + switch (args->op) { + case DERIVE_SW_SECRET: + err = blk_crypto_derive_sw_secret( + bdev, + args->derive_sw_secret.eph_key, + args->derive_sw_secret.eph_key_size, + args->derive_sw_secret.sw_secret); + break; + case IMPORT_KEY: + err = blk_crypto_import_key(profile, + args->import_key.raw_key, + args->import_key.raw_key_size, + args->import_key.lt_key); + break; + case GENERATE_KEY: + err = blk_crypto_generate_key(profile, + args->generate_key.lt_key); + break; + case PREPARE_KEY: + err = blk_crypto_prepare_key(profile, + args->prepare_key.lt_key, + args->prepare_key.lt_key_size, + args->prepare_key.eph_key); + break; + } + args->err = err; + + /* Try another device in case this fails. */ + return 0; +} + +static int dm_exec_wrappedkey_op(struct blk_crypto_profile *profile, + struct dm_wrappedkey_op_args *args) +{ + struct mapped_device *md = + container_of(profile, struct dm_crypto_profile, profile)->md; + struct dm_target *ti; + struct dm_table *t; + int srcu_idx; + int i; + + args->err = -EOPNOTSUPP; + + t = dm_get_live_table(md, &srcu_idx); + if (!t) + goto out; + + /* + * blk-crypto currently has no support for multiple incompatible + * implementations of wrapped inline crypto keys on a single system. + * It was already checked earlier that support for wrapped keys was + * declared on all underlying devices. Thus, all the underlying devices + * should support all wrapped key operations and they should behave + * identically, i.e. work with the same keys. So, just executing the + * operation on the first device on which it works suffices for now. + */ + for (i = 0; i < t->num_targets; i++) { + ti = dm_table_get_target(t, i); + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, dm_wrappedkey_op_callback, args); + if (!args->err) + break; + } +out: + dm_put_live_table(md, srcu_idx); + return args->err; +} + +static int dm_derive_sw_secret(struct blk_crypto_profile *profile, + const u8 *eph_key, size_t eph_key_size, + u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) +{ + struct dm_wrappedkey_op_args args = { + .op = DERIVE_SW_SECRET, + .derive_sw_secret = { + .eph_key = eph_key, + .eph_key_size = eph_key_size, + .sw_secret = sw_secret, + }, + }; + return dm_exec_wrappedkey_op(profile, &args); +} + +static int dm_import_key(struct blk_crypto_profile *profile, + const u8 *raw_key, size_t raw_key_size, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct dm_wrappedkey_op_args args = { + .op = IMPORT_KEY, + .import_key = { + .raw_key = raw_key, + .raw_key_size = raw_key_size, + .lt_key = lt_key, + }, + }; + return dm_exec_wrappedkey_op(profile, &args); +} + +static int dm_generate_key(struct blk_crypto_profile *profile, + u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct dm_wrappedkey_op_args args = { + .op = GENERATE_KEY, + .generate_key = { + .lt_key = lt_key, + }, + }; + return dm_exec_wrappedkey_op(profile, &args); +} + +static int dm_prepare_key(struct blk_crypto_profile *profile, + const u8 *lt_key, size_t lt_key_size, + u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) +{ + struct dm_wrappedkey_op_args args = { + .op = PREPARE_KEY, + .prepare_key = { + .lt_key = lt_key, + .lt_key_size = lt_key_size, + .eph_key = eph_key, + }, + }; + return dm_exec_wrappedkey_op(profile, &args); +} + static int device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) @@ -1271,6 +1441,13 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) profile); } + if (profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) { + profile->ll_ops.derive_sw_secret = dm_derive_sw_secret; + profile->ll_ops.import_key = dm_import_key; + profile->ll_ops.generate_key = dm_generate_key; + profile->ll_ops.prepare_key = dm_prepare_key; + } + if (t->md->queue && !blk_crypto_has_capabilities(profile, t->md->queue->crypto_profile)) { From 3da732687d72078e52cc7f334a482383e84ca156 Mon Sep 17 00:00:00 2001 From: Matthew Sakai Date: Mon, 12 May 2025 21:10:10 -0400 Subject: [PATCH 22/29] dm vdo indexer: don't read request structure after enqueuing The function get_volume_page_protected may place a request on a queue for another thread to process asynchronously. When this happens, the volume should not read the request from the original thread. This can not currently cause problems, due to the way request processing is handled, but it is not safe in general. Reviewed-by: Ken Raeburn Signed-off-by: Matthew Sakai Signed-off-by: Mikulas Patocka --- drivers/md/dm-vdo/indexer/volume.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/md/dm-vdo/indexer/volume.c b/drivers/md/dm-vdo/indexer/volume.c index 655453bb276b..425b3a74f4db 100644 --- a/drivers/md/dm-vdo/indexer/volume.c +++ b/drivers/md/dm-vdo/indexer/volume.c @@ -754,10 +754,11 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * u32 physical_page, struct cached_page **page_ptr) { struct cached_page *page; + unsigned int zone_number = request->zone_number; get_page_from_cache(&volume->page_cache, physical_page, &page); if (page != NULL) { - if (request->zone_number == 0) { + if (zone_number == 0) { /* Only one zone is allowed to update the LRU. */ make_page_most_recent(&volume->page_cache, page); } @@ -767,7 +768,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * } /* Prepare to enqueue a read for the page. */ - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); mutex_lock(&volume->read_threads_mutex); /* @@ -787,8 +788,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * * the order does not matter for correctness as it does below. */ mutex_unlock(&volume->read_threads_mutex); - begin_pending_search(&volume->page_cache, physical_page, - request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); return UDS_QUEUED; } @@ -797,7 +797,7 @@ static int get_volume_page_protected(struct volume *volume, struct uds_request * * "search pending" state in careful order so no other thread can mess with the data before * the caller gets to look at it. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); mutex_unlock(&volume->read_threads_mutex); *page_ptr = page; return UDS_SUCCESS; @@ -849,6 +849,7 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r { int result; struct cached_page *page = NULL; + unsigned int zone_number = request->zone_number; u32 physical_page = map_to_physical_page(volume->geometry, chapter, index_page_number); @@ -858,18 +859,18 @@ static int search_cached_index_page(struct volume *volume, struct uds_request *r * invalidation by the reader thread, before the reader thread has noticed that the * invalidate_counter has been incremented. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); result = get_volume_page_protected(volume, request, physical_page, &page); if (result != UDS_SUCCESS) { - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } result = uds_search_chapter_index_page(&page->index_page, volume->geometry, &request->record_name, record_page_number); - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } @@ -882,6 +883,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req { struct cached_page *record_page; struct index_geometry *geometry = volume->geometry; + unsigned int zone_number = request->zone_number; int result; u32 physical_page, page_number; @@ -905,11 +907,11 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req * invalidation by the reader thread, before the reader thread has noticed that the * invalidate_counter has been incremented. */ - begin_pending_search(&volume->page_cache, physical_page, request->zone_number); + begin_pending_search(&volume->page_cache, physical_page, zone_number); result = get_volume_page_protected(volume, request, physical_page, &record_page); if (result != UDS_SUCCESS) { - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return result; } @@ -917,7 +919,7 @@ int uds_search_cached_record_page(struct volume *volume, struct uds_request *req &request->record_name, geometry, &request->old_metadata)) *found = true; - end_pending_search(&volume->page_cache, request->zone_number); + end_pending_search(&volume->page_cache, zone_number); return UDS_SUCCESS; } From 241b9b584d50e01f7eb50c0555aa570502bcc8c5 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 14 May 2025 13:50:33 -0700 Subject: [PATCH 23/29] dm-zone: Use bdev_*() helper functions where applicable Improve code readability by using bdev_is_zone_aligned() and bdev_offset_from_zone_start() where applicable. No functionality has been changed. This patch is a reworked version of a patch from Pankaj Raghav. See also https://lore.kernel.org/linux-block/20220923173618.6899-11-p.raghav@samsung.com/. Cc: Damien Le Moal Cc: Pankaj Raghav Signed-off-by: Bart Van Assche Reviewed-by: Damien Le Moal Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 4 ++-- drivers/md/dm-zone.c | 6 +++--- include/linux/blkdev.h | 7 +++++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 982c8fdd619e..57573e8b5aa9 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -256,7 +256,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, if (bdev_is_zoned(bdev)) { unsigned int zone_sectors = bdev_zone_sectors(bdev); - if (start & (zone_sectors - 1)) { + if (!bdev_is_zone_aligned(bdev, start)) { DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", dm_device_name(ti->table->md), (unsigned long long)start, @@ -273,7 +273,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * devices do not end up with a smaller zone in the middle of * the sector range. */ - if (len & (zone_sectors - 1)) { + if (!bdev_is_zone_aligned(bdev, len)) { DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", dm_device_name(ti->table->md), (unsigned long long)len, diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 0d989651eb15..3d31b82e0730 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -463,9 +463,9 @@ void dm_zone_endio(struct dm_io *io, struct bio *clone) */ if (clone->bi_status == BLK_STS_OK && bio_op(clone) == REQ_OP_ZONE_APPEND) { - sector_t mask = bdev_zone_sectors(disk->part0) - 1; - - orig_bio->bi_iter.bi_sector += clone->bi_iter.bi_sector & mask; + orig_bio->bi_iter.bi_sector += + bdev_offset_from_zone_start(disk->part0, + clone->bi_iter.bi_sector); } return; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e39c45bc0a97..edc941ec8fdd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1419,6 +1419,13 @@ static inline bool bdev_is_zone_start(struct block_device *bdev, return bdev_offset_from_zone_start(bdev, sector) == 0; } +/* Check whether @sector is a multiple of the zone size. */ +static inline bool bdev_is_zone_aligned(struct block_device *bdev, + sector_t sector) +{ + return bdev_is_zone_start(bdev, sector); +} + /** * bdev_zone_is_seq - check if a sector belongs to a sequential write zone * @bdev: block device to check From 5c977f1023156938915c57d362fddde8fad2b052 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Thu, 15 May 2025 21:55:29 -0400 Subject: [PATCH 24/29] dm-mpath: Don't grab work_mutex while probing paths Grabbing the work_mutex keeps probe_active_paths() from running at the same time as multipath_message(). The only messages that could interfere with probing the paths are "disable_group", "enable_group", and "switch_group". These messages could force multipath to pick a new pathgroup while probe_active_paths() was probing the current pathgroup. If the multipath device has a hardware handler, and it switches active pathgroups while there is outstanding IO to a path device, it's possible that IO to the path will fail, even if the path would be usable if it was in the active pathgroup. To avoid this, do not clear the current pathgroup for the *_group messages while probe_active_paths() is running. Instead set a flag, and probe_active_paths() will clear the current pathgroup when it finishes probing the paths. For this to work correctly, multipath needs to check current_pg before next_pg in choose_pgpath(), but before this patch next_pg was only ever set when current_pg was cleared, so this doesn't change the current behavior when paths aren't being probed. Even with this change, it is still possible to switch pathgroups while the probe is running, but only if all the paths have failed, and the probe function will skip them as well in this case. If multiple DM_MPATH_PROBE_PATHS requests are received at once, there is no point in repeatedly issuing test IOs. Instead, the later probes should wait for the current probe to complete. If current pathgroup is still the same as the one that was just checked, the other probes should skip probing and just check the number of valid paths. Finally, probing the paths should quit early if the multipath device is trying to suspend, instead of continuing to issue test IOs, delaying the suspend. While this patch will not change the behavior of existing multipath users which don't use the DM_MPATH_PROBE_PATHS ioctl, when that ioctl is used, the behavior of the "disable_group", "enable_group", and "switch_group" messages can change subtly. When these messages return, the next IO to the multipath device will no longer be guaranteed to choose a new pathgroup. Instead, choosing a new pathgroup could be delayed by an in-progress DM_MPATH_PROBE_PATHS ioctl. The userspace multipath tools make no assumptions about what will happen to IOs after sending these messages, so this change will not effect already released versions of them, even if the DM_MPATH_PROBE_PATHS ioctl is run alongside them. Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-mpath.c | 128 +++++++++++++++++++++++++++--------------- 1 file changed, 83 insertions(+), 45 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 53861ad5dd1d..12b7bcae490c 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -79,6 +79,7 @@ struct multipath { struct pgpath *current_pgpath; struct priority_group *current_pg; struct priority_group *next_pg; /* Switch to this PG if set */ + struct priority_group *last_probed_pg; atomic_t nr_valid_paths; /* Total number of usable paths */ unsigned int nr_priority_groups; @@ -87,6 +88,7 @@ struct multipath { const char *hw_handler_name; char *hw_handler_params; wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ + wait_queue_head_t probe_wait; /* Wait for probing paths */ unsigned int pg_init_retries; /* Number of times to retry pg_init */ unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */ atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ @@ -100,6 +102,7 @@ struct multipath { struct bio_list queued_bios; struct timer_list nopath_timer; /* Timeout for queue_if_no_path */ + bool is_suspending; }; /* @@ -132,6 +135,8 @@ static void queue_if_no_path_timeout_work(struct timer_list *t); #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ +#define MPATHF_DELAY_PG_SWITCH 7 /* Delay switching pg if it still has paths */ +#define MPATHF_NEED_PG_SWITCH 8 /* Need to switch pgs after the delay has ended */ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m) { @@ -254,6 +259,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) atomic_set(&m->pg_init_count, 0); m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; init_waitqueue_head(&m->pg_init_wait); + init_waitqueue_head(&m->probe_wait); return 0; } @@ -413,23 +419,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) goto failed; } - /* Were we instructed to switch PG? */ - if (READ_ONCE(m->next_pg)) { - spin_lock_irqsave(&m->lock, flags); - pg = m->next_pg; - if (!pg) { - spin_unlock_irqrestore(&m->lock, flags); - goto check_current_pg; - } - m->next_pg = NULL; - spin_unlock_irqrestore(&m->lock, flags); - pgpath = choose_path_in_pg(m, pg, nr_bytes); - if (!IS_ERR_OR_NULL(pgpath)) - return pgpath; - } - /* Don't change PG until it has no remaining paths */ -check_current_pg: pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); @@ -437,6 +427,21 @@ check_current_pg: return pgpath; } + /* Were we instructed to switch PG? */ + if (READ_ONCE(m->next_pg)) { + spin_lock_irqsave(&m->lock, flags); + pg = m->next_pg; + if (!pg) { + spin_unlock_irqrestore(&m->lock, flags); + goto check_all_pgs; + } + m->next_pg = NULL; + spin_unlock_irqrestore(&m->lock, flags); + pgpath = choose_path_in_pg(m, pg, nr_bytes); + if (!IS_ERR_OR_NULL(pgpath)) + return pgpath; + } +check_all_pgs: /* * Loop through priority groups until we find a valid path. * First time we skip PGs marked 'bypassed'. @@ -1439,15 +1444,19 @@ static int action_dev(struct multipath *m, dev_t dev, action_fn action) * Temporarily try to avoid having to use the specified PG */ static void bypass_pg(struct multipath *m, struct priority_group *pg, - bool bypassed) + bool bypassed, bool can_be_delayed) { unsigned long flags; spin_lock_irqsave(&m->lock, flags); pg->bypassed = bypassed; - m->current_pgpath = NULL; - m->current_pg = NULL; + if (can_be_delayed && test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) + set_bit(MPATHF_NEED_PG_SWITCH, &m->flags); + else { + m->current_pgpath = NULL; + m->current_pg = NULL; + } spin_unlock_irqrestore(&m->lock, flags); @@ -1476,8 +1485,12 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) if (--pgnum) continue; - m->current_pgpath = NULL; - m->current_pg = NULL; + if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) + set_bit(MPATHF_NEED_PG_SWITCH, &m->flags); + else { + m->current_pgpath = NULL; + m->current_pg = NULL; + } m->next_pg = pg; } spin_unlock_irqrestore(&m->lock, flags); @@ -1507,7 +1520,7 @@ static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed) break; } - bypass_pg(m, pg, bypassed); + bypass_pg(m, pg, bypassed, true); return 0; } @@ -1561,7 +1574,7 @@ static void pg_init_done(void *data, int errors) * Probably doing something like FW upgrade on the * controller so try the other pg. */ - bypass_pg(m, pg, true); + bypass_pg(m, pg, true, false); break; case SCSI_DH_RETRY: /* Wait before retrying. */ @@ -1741,7 +1754,11 @@ done: static void multipath_presuspend(struct dm_target *ti) { struct multipath *m = ti->private; + unsigned long flags; + spin_lock_irqsave(&m->lock, flags); + m->is_suspending = true; + spin_unlock_irqrestore(&m->lock, flags); /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti)) queue_if_no_path(m, false, true, __func__); @@ -1765,6 +1782,7 @@ static void multipath_resume(struct dm_target *ti) unsigned long flags; spin_lock_irqsave(&m->lock, flags); + m->is_suspending = false; if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); @@ -1845,10 +1863,10 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("%u ", m->nr_priority_groups); - if (m->next_pg) - pg_num = m->next_pg->pg_num; - else if (m->current_pg) + if (m->current_pg) pg_num = m->current_pg->pg_num; + else if (m->next_pg) + pg_num = m->next_pg->pg_num; else pg_num = (m->nr_priority_groups ? 1 : 0); @@ -2077,35 +2095,55 @@ out: static int probe_active_paths(struct multipath *m) { struct pgpath *pgpath; - struct priority_group *pg; + struct priority_group *pg = NULL; unsigned long flags; int r = 0; - mutex_lock(&m->work_mutex); - spin_lock_irqsave(&m->lock, flags); - if (test_bit(MPATHF_QUEUE_IO, &m->flags)) - pg = NULL; - else - pg = m->current_pg; + if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) { + wait_event_lock_irq(m->probe_wait, + !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags), + m->lock); + /* + * if we waited because a probe was already in progress, + * and it probed the current active pathgroup, don't + * reprobe. Just return the number of valid paths + */ + if (m->current_pg == m->last_probed_pg) + goto skip_probe; + } + if (!m->current_pg || m->is_suspending || + test_bit(MPATHF_QUEUE_IO, &m->flags)) + goto skip_probe; + set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); + pg = m->last_probed_pg = m->current_pg; spin_unlock_irqrestore(&m->lock, flags); - if (pg) { - list_for_each_entry(pgpath, &pg->pgpaths, list) { - if (!pgpath->is_active) - continue; + list_for_each_entry(pgpath, &pg->pgpaths, list) { + if (pg != READ_ONCE(m->current_pg) || + READ_ONCE(m->is_suspending)) + goto out; + if (!pgpath->is_active) + continue; - r = probe_path(pgpath); - if (r < 0) - goto out; - } + r = probe_path(pgpath); + if (r < 0) + goto out; } - if (!atomic_read(&m->nr_valid_paths)) - r = -ENOTCONN; - out: - mutex_unlock(&m->work_mutex); + spin_lock_irqsave(&m->lock, flags); + clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); + if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) { + m->current_pgpath = NULL; + m->current_pg = NULL; + } +skip_probe: + if (r == 0 && !atomic_read(&m->nr_valid_paths)) + r = -ENOTCONN; + spin_unlock_irqrestore(&m->lock, flags); + if (pg) + wake_up(&m->probe_wait); return r; } From 050a3e71ce24c6f18d70679d68056f76375ff51c Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 19 May 2025 23:56:11 -0400 Subject: [PATCH 25/29] dm mpath: replace spin_lock_irqsave with spin_lock_irq Replace spin_lock_irqsave/spin_unlock_irqrestore with spin_lock_irq/spin_unlock_irq at places where it is known that interrupts are enabled. Signed-off-by: Mikulas Patocka Signed-off-by: Benjamin Marzinski --- drivers/md/dm-mpath.c | 75 ++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 12b7bcae490c..81fec2e1e0ef 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -617,7 +617,6 @@ static void multipath_queue_bio(struct multipath *m, struct bio *bio) static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) { struct pgpath *pgpath; - unsigned long flags; /* Do we need to select a new pgpath? */ pgpath = READ_ONCE(m->current_pgpath); @@ -625,12 +624,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) pgpath = choose_pgpath(m, bio->bi_iter.bi_size); if (!pgpath) { - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { __multipath_queue_bio(m, bio); pgpath = ERR_PTR(-EAGAIN); } - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { @@ -693,7 +692,6 @@ static void process_queued_io_list(struct multipath *m) static void process_queued_bios(struct work_struct *work) { int r; - unsigned long flags; struct bio *bio; struct bio_list bios; struct blk_plug plug; @@ -702,16 +700,16 @@ static void process_queued_bios(struct work_struct *work) bio_list_init(&bios); - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (bio_list_empty(&m->queued_bios)) { - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); return; } bio_list_merge_init(&bios, &m->queued_bios); - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); blk_start_plug(&plug); while ((bio = bio_list_pop(&bios))) { @@ -1195,7 +1193,6 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) struct dm_arg_set as; unsigned int pg_count = 0; unsigned int next_pg_num; - unsigned long flags; as.argc = argc; as.argv = argv; @@ -1260,9 +1257,9 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); enable_nopath_timeout(m); - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); ti->num_flush_bios = 1; ti->num_discard_bios = 1; @@ -1297,23 +1294,21 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { if (m->hw_handler_name) { - unsigned long flags; - if (!atomic_read(&m->pg_init_in_progress)) goto skip; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (atomic_read(&m->pg_init_in_progress) && !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); flush_workqueue(kmpath_handlerd); multipath_wait_for_pg_init_completion(m); - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); } - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); } skip: if (m->queue_mode == DM_TYPE_BIO_BASED) @@ -1375,11 +1370,10 @@ out: static int reinstate_path(struct pgpath *pgpath) { int r = 0, run_queue = 0; - unsigned long flags; struct multipath *m = pgpath->pg->m; unsigned int nr_valid_paths; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (pgpath->is_active) goto out; @@ -1409,7 +1403,7 @@ static int reinstate_path(struct pgpath *pgpath) schedule_work(&m->trigger_event); out: - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); if (run_queue) { dm_table_run_md_queue_async(m->ti->table); process_queued_io_list(m); @@ -1470,7 +1464,6 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) { struct priority_group *pg; unsigned int pgnum; - unsigned long flags; char dummy; if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum || @@ -1479,7 +1472,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) return -EINVAL; } - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); list_for_each_entry(pg, &m->priority_groups, list) { pg->bypassed = false; if (--pgnum) @@ -1493,7 +1486,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr) } m->next_pg = pg; } - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); schedule_work(&m->trigger_event); return 0; @@ -1754,11 +1747,10 @@ done: static void multipath_presuspend(struct dm_target *ti) { struct multipath *m = ti->private; - unsigned long flags; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); m->is_suspending = true; - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti)) queue_if_no_path(m, false, true, __func__); @@ -1779,9 +1771,8 @@ static void multipath_postsuspend(struct dm_target *ti) static void multipath_resume(struct dm_target *ti) { struct multipath *m = ti->private; - unsigned long flags; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); m->is_suspending = false; if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); @@ -1793,7 +1784,7 @@ static void multipath_resume(struct dm_target *ti) test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); } /* @@ -1816,14 +1807,13 @@ static void multipath_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { int sz = 0, pg_counter, pgpath_counter; - unsigned long flags; struct multipath *m = ti->private; struct priority_group *pg; struct pgpath *p; unsigned int pg_num; char state; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); /* Features */ if (type == STATUSTYPE_INFO) @@ -1969,7 +1959,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type, break; } - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); } static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv, @@ -1979,7 +1969,6 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg dev_t dev; struct multipath *m = ti->private; action_fn action; - unsigned long flags; mutex_lock(&m->work_mutex); @@ -1991,9 +1980,9 @@ static int multipath_message(struct dm_target *ti, unsigned int argc, char **arg if (argc == 1) { if (!strcasecmp(argv[0], "queue_if_no_path")) { r = queue_if_no_path(m, true, false, __func__); - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); enable_nopath_timeout(m); - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); goto out; } else if (!strcasecmp(argv[0], "fail_if_no_path")) { r = queue_if_no_path(m, false, false, __func__); @@ -2096,10 +2085,9 @@ static int probe_active_paths(struct multipath *m) { struct pgpath *pgpath; struct priority_group *pg = NULL; - unsigned long flags; int r = 0; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) { wait_event_lock_irq(m->probe_wait, !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags), @@ -2117,7 +2105,7 @@ static int probe_active_paths(struct multipath *m) goto skip_probe; set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); pg = m->last_probed_pg = m->current_pg; - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); list_for_each_entry(pgpath, &pg->pgpaths, list) { if (pg != READ_ONCE(m->current_pg) || @@ -2132,7 +2120,7 @@ static int probe_active_paths(struct multipath *m) } out: - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) { m->current_pgpath = NULL; @@ -2141,7 +2129,7 @@ out: skip_probe: if (r == 0 && !atomic_read(&m->nr_valid_paths)) r = -ENOTCONN; - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); if (pg) wake_up(&m->probe_wait); return r; @@ -2154,7 +2142,6 @@ static int multipath_prepare_ioctl(struct dm_target *ti, { struct multipath *m = ti->private; struct pgpath *pgpath; - unsigned long flags; int r; if (_IOC_TYPE(cmd) == DM_IOCTL) { @@ -2182,10 +2169,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } else { /* No path is available */ r = -EIO; - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) r = -ENOTCONN; - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); } if (r == -ENOTCONN) { @@ -2193,10 +2180,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti, /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } - spin_lock_irqsave(&m->lock, flags); + spin_lock_irq(&m->lock); if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) (void) __pg_init_all_paths(m); - spin_unlock_irqrestore(&m->lock, flags); + spin_unlock_irq(&m->lock); dm_table_run_md_queue_async(m->ti->table); process_queued_io_list(m); } From 85f6d5b729eaace1549f1dcc284d9865f2c3ec02 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Fri, 30 May 2025 10:50:32 -0400 Subject: [PATCH 26/29] dm-table: check BLK_FEAT_ATOMIC_WRITES inside limits_lock dm_set_device_limits() should check q->limits.features for BLK_FEAT_ATOMIC_WRITES while holding q->limits_lock, like it does for the rest of the queue limits. Fixes: b7c18b17a173 ("dm-table: Set BLK_FEAT_ATOMIC_WRITES for target queue limits") Signed-off-by: Benjamin Marzinski Signed-off-by: Mikulas Patocka --- drivers/md/dm-table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 57573e8b5aa9..9f95f77687ef 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -430,13 +430,13 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, return 0; } + mutex_lock(&q->limits_lock); /* * BLK_FEAT_ATOMIC_WRITES is not inherited from the bottom device in * blk_stack_limits(), so do it manually. */ limits->features |= (q->limits.features & BLK_FEAT_ATOMIC_WRITES); - mutex_lock(&q->limits_lock); if (blk_stack_limits(limits, &q->limits, get_start_sect(bdev) + start) < 0) DMWARN("%s: adding target device %pg caused an alignment inconsistency: " From 829451beaed6165eb11d7a9fb4e28eb17f489980 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 3 Jun 2025 18:53:17 +0200 Subject: [PATCH 27/29] dm-mirror: fix a tiny race condition There's a tiny race condition in dm-mirror. The functions queue_bio and write_callback grab a spinlock, add a bio to the list, drop the spinlock and wake up the mirrord thread that processes bios in the list. It may be possible that the mirrord thread processes the bio just after spin_unlock_irqrestore is called, before wakeup_mirrord. This spurious wake-up is normally harmless, however if the device mapper device is unloaded just after the bio was processed, it may be possible that wakeup_mirrord(ms) uses invalid "ms" pointer. Fix this bug by moving wakeup_mirrord inside the spinlock. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org --- drivers/md/dm-raid1.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9e615b4f1f5e..785af4816584 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) spin_lock_irqsave(&ms->lock, flags); should_wake = !(bl->head); bio_list_add(bl, bio); - spin_unlock_irqrestore(&ms->lock, flags); - if (should_wake) wakeup_mirrord(ms); + spin_unlock_irqrestore(&ms->lock, flags); } static void dispatch_bios(void *context, struct bio_list *bio_list) @@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context) if (!ms->failures.head) should_wake = 1; bio_list_add(&ms->failures, bio); - spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) wakeup_mirrord(ms); + spin_unlock_irqrestore(&ms->lock, flags); } static void do_write(struct mirror_set *ms, struct bio *bio) From 66be40a14e496689e1f0add50118408e22c96169 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 3 Jun 2025 18:55:50 +0200 Subject: [PATCH 28/29] dm-verity: fix a memory leak if some arguments are specified multiple times If some of the arguments "check_at_most_once", "ignore_zero_blocks", "use_fec_from_device", "root_hash_sig_key_desc" were specified more than once on the target line, a memory leak would happen. This commit fixes the memory leak. It also fixes error handling in verity_verify_sig_parse_opt_args. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org --- drivers/md/dm-verity-fec.c | 4 ++++ drivers/md/dm-verity-target.c | 8 +++++++- drivers/md/dm-verity-verify-sig.c | 17 +++++++++++++---- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 0c41949db784..631a887b487c 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -593,6 +593,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, (*argc)--; if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) { + if (v->fec->dev) { + ti->error = "FEC device already specified"; + return -EINVAL; + } r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev); if (r) { ti->error = "FEC device lookup failed"; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 34a9f9fbd0d1..81186bded1ce 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1123,6 +1123,9 @@ static int verity_alloc_most_once(struct dm_verity *v) { struct dm_target *ti = v->ti; + if (v->validated_blocks) + return 0; + /* the bitset can only handle INT_MAX blocks */ if (v->data_blocks > INT_MAX) { ti->error = "device too large to use check_at_most_once"; @@ -1146,6 +1149,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v) struct dm_verity_io *io; u8 *zero_data; + if (v->zero_digest) + return 0; + v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL); if (!v->zero_digest) @@ -1580,7 +1586,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - /* Root hash signature is a optional parameter*/ + /* Root hash signature is an optional parameter */ r = verity_verify_root_hash(root_hash_digest_to_validate, strlen(root_hash_digest_to_validate), verify_args.sig, diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c index a9e2c6c0a33c..d5261a0e4232 100644 --- a/drivers/md/dm-verity-verify-sig.c +++ b/drivers/md/dm-verity-verify-sig.c @@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, const char *arg_name) { struct dm_target *ti = v->ti; - int ret = 0; + int ret; const char *sig_key = NULL; + if (v->signature_key_desc) { + ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified"); + return -EINVAL; + } + if (!*argc) { ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified"); return -EINVAL; @@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as, (*argc)--; ret = verity_verify_get_sig_from_key(sig_key, sig_opts); - if (ret < 0) + if (ret < 0) { ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified"); + return ret; + } v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL); - if (!v->signature_key_desc) + if (!v->signature_key_desc) { + ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key"); return -ENOMEM; + } - return ret; + return 0; } /* From 9f2f6316d753fe80c764e924cee475306f38ceb6 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 3 Jun 2025 18:58:47 +0200 Subject: [PATCH 29/29] dm-stripe: small code cleanup This commit doesn't fix any bug, it is just code cleanup. Use the function format_dev_t instead of sprintf, because format_dev_t does the same thing. Remove the useless memset call. An unsigned integer can take at most 10 digits, so extend the array size to 22. (note that because the range of minor and major numbers is limited, the size 16 could not be exceeded, thus this function couldn't write beyond string end) Signed-off-by: Mikulas Patocka --- drivers/md/dm-stripe.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index a1b7535c508a..a7dc04bd55e5 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -405,7 +405,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { unsigned int i; - char major_minor[16]; + char major_minor[22]; struct stripe_c *sc = ti->private; if (!*error) @@ -417,8 +417,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, if (*error == BLK_STS_NOTSUPP) return DM_ENDIO_DONE; - memset(major_minor, 0, sizeof(major_minor)); - sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio))); + format_dev_t(major_minor, bio_dev(bio)); /* * Test to see which stripe drive triggered the event