From c48cff39fba9a1a61c3887108610d3bfb69c97a1 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:02 +0900 Subject: [PATCH 1/9] md/raid1,raid10: Set the LastDev flag when the configuration changes Currently, the LastDev flag is set on an rdev that failed a failfast metadata write and called md_error, but did not become Faulty. It is cleared when the metadata write retry succeeds. This has problems for the following reasons: * Despite its name, the flag is only set during a metadata write window. * Unlike when LastDev and Failfast was introduced, md_error on the last rdev of a RAID1/10 array now sets MD_BROKEN. Thus when LastDev is set, the array is already unwritable. A following commit will prevent failfast bios from breaking the array, which requires knowing from outside the personality whether an rdev is the last one. For that purpose, LastDev should be set on rdevs that must not be lost. This commit ensures that LastDev is set on the indispensable rdev in a degraded RAID1/10 array. Signed-off-by: Kenta Akagi --- drivers/md/md.c | 4 +--- drivers/md/md.h | 6 +++--- drivers/md/raid1.c | 34 +++++++++++++++++++++++++++++++++- drivers/md/raid10.c | 34 +++++++++++++++++++++++++++++++++- 4 files changed, 70 insertions(+), 8 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 09042b060086..61f82ea5643f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -990,10 +990,8 @@ static void super_written(struct bio *bio) if (!test_bit(Faulty, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); - set_bit(LastDev, &rdev->flags); } - } else - clear_bit(LastDev, &rdev->flags); + } bio_put(bio); diff --git a/drivers/md/md.h b/drivers/md/md.h index d45a9e6ead80..06ab6a382195 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -281,9 +281,9 @@ enum flag_bits { * It is expects that no bad block log * is present. */ - LastDev, /* Seems to be the last working dev as - * it didn't fail, so don't use FailFast - * any more for metadata + LastDev, /* This is the last working rdev. + * so don't use FailFast any more for + * metadata. */ CollisionCheck, /* * check if there is collision between raid1 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 64b8176907a9..c0ef39c56d19 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1735,6 +1735,33 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, "]"); } +/** + * update_lastdev - Set or clear LastDev flag for all rdevs in array + * @conf: pointer to r1conf + * + * Sets LastDev if the device is In_sync and cannot be lost for the array. + * Otherwise, clear it. + * + * Caller must hold ->device_lock. + */ +static void update_lastdev(struct r1conf *conf) +{ + int i; + int alive_disks = conf->raid_disks - conf->mddev->degraded; + + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = conf->mirrors[i].rdev; + + if (rdev) { + if (test_bit(In_sync, &rdev->flags) && + alive_disks == 1) + set_bit(LastDev, &rdev->flags); + else + clear_bit(LastDev, &rdev->flags); + } + } +} + /** * raid1_error() - RAID1 error handler. * @mddev: affected md device. @@ -1769,8 +1796,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) } } set_bit(Blocked, &rdev->flags); - if (test_and_clear_bit(In_sync, &rdev->flags)) + if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; + update_lastdev(conf); + } set_bit(Faulty, &rdev->flags); spin_unlock_irqrestore(&conf->device_lock, flags); /* @@ -1866,6 +1895,7 @@ static int raid1_spare_active(struct mddev *mddev) } } mddev->degraded -= count; + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); @@ -3298,6 +3328,7 @@ static int raid1_run(struct mddev *mddev) rcu_assign_pointer(conf->thread, NULL); mddev->private = conf; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + update_lastdev(conf); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); @@ -3451,6 +3482,7 @@ static int raid1_reshape(struct mddev *mddev) spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded += (raid_disks - conf->raid_disks); + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c9bd2005bfd0..b82b042510ae 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1983,6 +1983,33 @@ static int enough(struct r10conf *conf, int ignore) _enough(conf, 1, ignore); } +/** + * update_lastdev - Set or clear LastDev flag for all rdevs in array + * @conf: pointer to r10conf + * + * Sets LastDev if the device is In_sync and cannot be lost for the array. + * Otherwise, clear it. + * + * Caller must hold ->reconfig_mutex or ->device_lock. + */ +static void update_lastdev(struct r10conf *conf) +{ + int i; + int raid_disks = max(conf->geo.raid_disks, conf->prev.raid_disks); + + for (i = 0; i < raid_disks; i++) { + struct md_rdev *rdev = conf->mirrors[i].rdev; + + if (rdev) { + if (test_bit(In_sync, &rdev->flags) && + !enough(conf, i)) + set_bit(LastDev, &rdev->flags); + else + clear_bit(LastDev, &rdev->flags); + } + } +} + /** * raid10_error() - RAID10 error handler. * @mddev: affected md device. @@ -2013,8 +2040,10 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) return; } } - if (test_and_clear_bit(In_sync, &rdev->flags)) + if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; + update_lastdev(conf); + } set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); @@ -2102,6 +2131,7 @@ static int raid10_spare_active(struct mddev *mddev) } spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); @@ -4161,6 +4191,7 @@ static int raid10_run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + update_lastdev(conf); if (md_integrity_register(mddev)) goto out_free_conf; @@ -4569,6 +4600,7 @@ static int raid10_start_reshape(struct mddev *mddev) */ spin_lock_irq(&conf->device_lock); mddev->degraded = calc_degraded(conf); + update_lastdev(conf); spin_unlock_irq(&conf->device_lock); mddev->raid_disks = conf->geo.raid_disks; mddev->reshape_position = conf->reshape_progress; From 7dba30d332b8c886bd4c071addc8cec122adb39a Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:03 +0900 Subject: [PATCH 2/9] md: serialize md_error() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit md_error is mainly called when a bio fails, so it can run in parallel. Each personality’s error_handler locks with device_lock, so concurrent calls are safe. However, RAID1 and RAID10 require changes for Failfast bio error handling, which needs a special helper for md_error. For that helper to work, the regular md_error must also be serialized. The helper function md_bio_failure_error for failfast will be introduced in a subsequent commit. This commit serializes md_error for all RAID personalities. While unnecessary for RAID levels other than 1 and 10, it has no performance impact as it is a cold path. Signed-off-by: Kenta Akagi --- drivers/md/md.c | 10 +++++++++- drivers/md/md.h | 4 ++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 61f82ea5643f..4accdba37e23 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -701,6 +701,7 @@ int mddev_init(struct mddev *mddev) atomic_set(&mddev->openers, 0); atomic_set(&mddev->sync_seq, 0); spin_lock_init(&mddev->lock); + spin_lock_init(&mddev->error_handle_lock); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; @@ -8184,7 +8185,7 @@ void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) } EXPORT_SYMBOL(md_unregister_thread); -void md_error(struct mddev *mddev, struct md_rdev *rdev) +void _md_error(struct mddev *mddev, struct md_rdev *rdev) { if (!rdev || test_bit(Faulty, &rdev->flags)) return; @@ -8209,6 +8210,13 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev) queue_work(md_misc_wq, &mddev->event_work); md_new_event(); } + +void md_error(struct mddev *mddev, struct md_rdev *rdev) +{ + spin_lock(&mddev->error_handle_lock); + _md_error(mddev, rdev); + spin_unlock(&mddev->error_handle_lock); +} EXPORT_SYMBOL(md_error); /* seq_file implementation /proc/mdstat */ diff --git a/drivers/md/md.h b/drivers/md/md.h index 06ab6a382195..f2e68fcd8297 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -619,6 +619,9 @@ struct mddev { /* The sequence number for sync thread */ atomic_t sync_seq; + /* Lock for serializing md_error */ + spinlock_t error_handle_lock; + bool has_superblocks:1; bool fail_last_dev:1; bool serialize_policy:1; @@ -879,6 +882,7 @@ extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); +void _md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, From 87b176d13ee1cf660a145a12a79b4ae14788d975 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:04 +0900 Subject: [PATCH 3/9] md: introduce md_bio_failure_error() Add a new helper function md_bio_failure_error(). It is serialized with md_error() under the same lock and works almost the same, but with two differences: * Takes the failed bio as an argument * If MD_FAILFAST is set in bi_opf and the target rdev is LastDev, it does not mark the rdev faulty Failfast bios must not break the array, but in the current implementation this can happen. This is because MD_BROKEN was introduced in RAID1/RAID10 and is set when md_error() is called on an rdev required for mddev operation. At the time failfast was introduced, this was not the case. Before this commit, md_error() has already been serialized, and RAID1/RAID10 mark rdevs that must not be set Faulty by Failfast with the LastDev flag. The actual change in bio error handling will follow in a later commit. Signed-off-by: Kenta Akagi --- drivers/md/md.c | 42 ++++++++++++++++++++++++++++++++++++++++++ drivers/md/md.h | 4 +++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 4accdba37e23..2f7e06765181 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8219,6 +8219,48 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev) } EXPORT_SYMBOL(md_error); +/** md_bio_failure_error() - md error handler for MD_FAILFAST bios + * @mddev: affected md device. + * @rdev: member device to fail. + * @bio: bio whose triggered device failure. + * + * This is almost the same as md_error(). That is, it is serialized at + * the same level as md_error, marks the rdev as Faulty, and changes + * the mddev status. + * However, if all of the following conditions are met, it does nothing. + * This is because MD_FAILFAST bios must not stopping the array. + * * RAID1 or RAID10 + * * LastDev - if rdev becomes Faulty, mddev will stop + * * The failed bio has MD_FAILFAST set + * + * Returns: true if _md_error() was called, false if not. + */ +bool md_bio_failure_error(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio) +{ + bool do_md_error = true; + + spin_lock(&mddev->error_handle_lock); + if (mddev->pers) { + if (mddev->pers->head.id == ID_RAID1 || + mddev->pers->head.id == ID_RAID10) { + if (test_bit(LastDev, &rdev->flags) && + test_bit(FailFast, &rdev->flags) && + bio != NULL && (bio->bi_opf & MD_FAILFAST)) + do_md_error = false; + } + } + + if (do_md_error) + _md_error(mddev, rdev); + else + pr_warn_ratelimited("md: %s: %s didn't do anything for %pg\n", + mdname(mddev), __func__, rdev->bdev); + + spin_unlock(&mddev->error_handle_lock); + return do_md_error; +} +EXPORT_SYMBOL(md_bio_failure_error); + /* seq_file implementation /proc/mdstat */ static void status_unused(struct seq_file *seq) diff --git a/drivers/md/md.h b/drivers/md/md.h index f2e68fcd8297..6ea42f1d3e25 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -283,7 +283,8 @@ enum flag_bits { */ LastDev, /* This is the last working rdev. * so don't use FailFast any more for - * metadata. + * metadata and don't Fail rdev + * when FailFast bio failure. */ CollisionCheck, /* * check if there is collision between raid1 @@ -884,6 +885,7 @@ extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); void _md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); +extern bool md_bio_failure_error(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); From a07263fc98b2beced998f6ad0c0bca9f8f752dd7 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:05 +0900 Subject: [PATCH 4/9] md/raid1,raid10: Don't set MD_BROKEN on failfast bio failure Failfast is a feature implemented only for RAID1 and RAID10. It instructs the block device providing the rdev to immediately return a bio error without retrying if any issue occurs. This allows quickly detaching a problematic rdev and minimizes IO latency. Due to its nature, failfast bios can fail easily, and md must not mark an essential rdev as Faulty or set MD_BROKEN on the array just because a failfast bio failed. When failfast was introduced, RAID1 and RAID10 were designed to continue operating normally even if md_error was called for the last rdev. However, with the introduction of MD_BROKEN in RAID1/RAID10 in commit 9631abdbf406 ("md: Set MD_BROKEN for RAID1 and RAID10"), calling md_error for the last rdev now prevents further writes to the array. Despite this, the current failfast error handler still assumes that calling md_error will not break the array. Normally, this is not an issue because MD_FAILFAST is not set when a bio is issued to the last rdev. However, if the array is not degraded and a bio with MD_FAILFAST has been issued, simultaneous failures could potentially break the array. This is unusual but can happen; for example, this can occur when using NVMe over TCP if all rdevs depend on a single Ethernet link. In other words, this becomes a problem under the following conditions: Preconditions: * Failfast is enabled on all rdevs. * All rdevs are In_sync - This is a requirement for bio to be submit with MD_FAILFAST. * At least one bio has been submitted but has not yet completed. Trigger condition: * All underlying devices of the rdevs return an error for their failfast bios. Whether the bio is a read or a write makes little difference to the outcome. In the write case, md_error is invoked on each rdev through its bi_end_io handler. In the read case, losing the first rdev triggers a metadata update. Next, md_super_write, unlike raid1_write_request, issues the bio with MD_FAILFAST if the rdev supports it, causing the bio to fail immediately - Before this patchset, LastDev was set only by the failure path in super_written. Consequently, super_written calls md_error on the remaining rdev. Prior to this commit, the following changes were introduced: * The helper function md_bio_failure_error() that skips the error handler if a failfast bio targets the last rdev. * Serialization md_error() and md_bio_failure_error(). * Setting the LastDev flag for rdevs that must not be lost. This commit uses md_bio_failure_error() instead of md_error() for failfast bio failures, ensuring that failfast bios do not stop array operations. Fixes: 9631abdbf406 ("md: Set MD_BROKEN for RAID1 and RAID10") Signed-off-by: Kenta Akagi --- drivers/md/md.c | 5 +---- drivers/md/raid1.c | 37 ++++++++++++++++++------------------- drivers/md/raid10.c | 9 +++++---- 3 files changed, 24 insertions(+), 27 deletions(-) diff --git a/drivers/md/md.c b/drivers/md/md.c index 2f7e06765181..9874f7052f9f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -987,11 +987,8 @@ static void super_written(struct bio *bio) if (bio->bi_status) { pr_err("md: %s gets error=%d\n", __func__, blk_status_to_errno(bio->bi_status)); - md_error(mddev, rdev); - if (!test_bit(Faulty, &rdev->flags) - && (bio->bi_opf & MD_FAILFAST)) { + if (!md_bio_failure_error(mddev, rdev, bio)) set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); - } } bio_put(bio); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c0ef39c56d19..86ef54a170b3 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -471,7 +471,7 @@ static void raid1_end_write_request(struct bio *bio) (bio->bi_opf & MD_FAILFAST) && /* We never try FailFast to WriteMostly devices */ !test_bit(WriteMostly, &rdev->flags)) { - md_error(r1_bio->mddev, rdev); + md_bio_failure_error(r1_bio->mddev, rdev, bio); } /* @@ -2180,8 +2180,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (test_bit(FailFast, &rdev->flags)) { /* Don't try recovering from here - just fail it * ... unless it is the last working device of course */ - md_error(mddev, rdev); - if (test_bit(Faulty, &rdev->flags)) + if (md_bio_failure_error(mddev, rdev, bio)) /* Don't try to read from here, but make sure * put_buf does it's thing */ @@ -2659,9 +2658,8 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) { struct mddev *mddev = conf->mddev; - struct bio *bio; + struct bio *bio, *updated_bio; struct md_rdev *rdev; - sector_t sector; clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just @@ -2674,29 +2672,30 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) */ bio = r1_bio->bios[r1_bio->read_disk]; - bio_put(bio); - r1_bio->bios[r1_bio->read_disk] = NULL; + updated_bio = NULL; rdev = conf->mirrors[r1_bio->read_disk].rdev; - if (mddev->ro == 0 - && !test_bit(FailFast, &rdev->flags)) { - freeze_array(conf, 1); - fix_read_error(conf, r1_bio); - unfreeze_array(conf); - } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { - md_error(mddev, rdev); + if (mddev->ro == 0) { + if (!test_bit(FailFast, &rdev->flags)) { + freeze_array(conf, 1); + fix_read_error(conf, r1_bio); + unfreeze_array(conf); + } else { + md_bio_failure_error(mddev, rdev, bio); + } } else { - r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; + updated_bio = IO_BLOCKED; } + bio_put(bio); + r1_bio->bios[r1_bio->read_disk] = updated_bio; + rdev_dec_pending(rdev, conf->mddev); - sector = r1_bio->sector; - bio = r1_bio->master_bio; /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ r1_bio->state = 0; - raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); - allow_barrier(conf, sector); + raid1_read_request(mddev, r1_bio->master_bio, r1_bio->sectors, r1_bio); + allow_barrier(conf, r1_bio->sector); } static void raid1d(struct md_thread *thread) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b82b042510ae..06becc878ddc 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -488,7 +488,7 @@ static void raid10_end_write_request(struct bio *bio) dec_rdev = 0; if (test_bit(FailFast, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { - md_error(rdev->mddev, rdev); + md_bio_failure_error(rdev->mddev, rdev, bio); } /* @@ -2443,7 +2443,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; } else if (test_bit(FailFast, &rdev->flags)) { /* Just give up on this device */ - md_error(rdev->mddev, rdev); + md_bio_failure_error(rdev->mddev, rdev, tbio); continue; } /* Ok, we need to write this bio, either to correct an @@ -2898,8 +2898,9 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) freeze_array(conf, 1); fix_read_error(conf, mddev, r10_bio); unfreeze_array(conf); - } else - md_error(mddev, rdev); + } else { + md_bio_failure_error(mddev, rdev, bio); + } rdev_dec_pending(rdev, mddev); r10_bio->state = 0; From b9ce9bd46a76d080a303360d0e3aa5e556b58403 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:06 +0900 Subject: [PATCH 5/9] md/raid1,raid10: Set R{1,10}BIO_Uptodate when successful retry of a failed bio In the current implementation, when a write bio fails, the retry flow is as follows: * In bi_end_io, e.g. raid1_end_write_request, R1BIO_WriteError is set on the r1bio. * The md thread calls handle_write_finished corresponding to this r1bio. * Inside handle_write_finished, narrow_write_error is invoked. * narrow_write_error rewrites the r1bio on a per-sector basis, marking any failed sectors as badblocks. It returns true if all sectors succeed, or if failed sectors are successfully recorded via rdev_set_badblock. It returns false if rdev_set_badblock fails or if badblocks are disabled. * handle_write_finished faults the rdev if it receives false from narrow_write_error. Otherwise, it does nothing. This can cause a problem where an r1bio that succeeded on retry is incorrectly reported as failed to the higher layer, for example in the following case: * Only one In_sync rdev exists, and * The write bio initially failed but all retries in narrow_write_error succeed. This commit ensures that if a write initially fails but all retries in narrow_write_error succeed, R1BIO_Uptodate or R10BIO_Uptodate is set and the higher layer receives a successful write status. Signed-off-by: Kenta Akagi --- drivers/md/raid1.c | 32 ++++++++++++++++++++++++++------ drivers/md/raid10.c | 21 +++++++++++++++++++++ 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 86ef54a170b3..9cd45f3bd60e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2519,6 +2519,21 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) } } +/** + * narrow_write_error() - Retry write and set badblock + * @r1_bio: the r1bio containing the write error + * @i: which device to retry + * + * Rewrites the bio, splitting it at the least common multiple of the logical + * block size and the badblock size. Blocks that fail to be written are marked + * as bad. If badblocks are disabled, no write is attempted and false is + * returned immediately. + * + * Return: + * * %true - all blocks were written or marked bad successfully + * * %false - bbl disabled or + * one or more blocks write failed and could not be marked bad + */ static bool narrow_write_error(struct r1bio *r1_bio, int i) { struct mddev *mddev = r1_bio->mddev; @@ -2616,9 +2631,9 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) int m, idx; bool fail = false; - for (m = 0; m < conf->raid_disks * 2 ; m++) + for (m = 0; m < conf->raid_disks * 2 ; m++) { + struct md_rdev *rdev = conf->mirrors[m].rdev; if (r1_bio->bios[m] == IO_MADE_GOOD) { - struct md_rdev *rdev = conf->mirrors[m].rdev; rdev_clear_badblocks(rdev, r1_bio->sector, r1_bio->sectors, 0); @@ -2630,12 +2645,17 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) */ fail = true; if (!narrow_write_error(r1_bio, m)) - md_error(conf->mddev, - conf->mirrors[m].rdev); + md_error(conf->mddev, rdev); /* an I/O failed, we can't clear the bitmap */ - rdev_dec_pending(conf->mirrors[m].rdev, - conf->mddev); + else if (test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + rdev_has_badblock(rdev, + r1_bio->sector, + r1_bio->sectors) == 0) + set_bit(R1BIO_Uptodate, &r1_bio->state); + rdev_dec_pending(rdev, conf->mddev); } + } if (fail) { spin_lock_irq(&conf->device_lock); list_add(&r1_bio->retry_list, &conf->bio_end_io_list); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 06becc878ddc..59d73d1403c2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2812,6 +2812,21 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 } } +/** + * narrow_write_error() - Retry write and set badblock + * @r10_bio: the r10bio containing the write error + * @i: which device to retry + * + * Rewrites the bio, splitting it at the least common multiple of the logical + * block size and the badblock size. Blocks that fail to be written are marked + * as bad. If badblocks are disabled, no write is attempted and false is + * returned immediately. + * + * Return: + * * %true - all blocks were written or marked bad successfully + * * %false - bbl disabled or + * one or more blocks write failed and could not be marked bad + */ static bool narrow_write_error(struct r10bio *r10_bio, int i) { struct bio *bio = r10_bio->master_bio; @@ -2978,6 +2993,12 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) fail = true; if (!narrow_write_error(r10_bio, m)) md_error(conf->mddev, rdev); + else if (test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + rdev_has_badblock(rdev, + r10_bio->devs[m].addr, + r10_bio->sectors) == 0) + set_bit(R10BIO_Uptodate, &r10_bio->state); rdev_dec_pending(rdev, conf->mddev); } bio = r10_bio->devs[m].repl_bio; From f75eea78e9fa02ea4e17e2393979999b1f7bbcb1 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:07 +0900 Subject: [PATCH 6/9] md/raid1,raid10: Fix missing retries Failfast write bios on no-bbl rdevs In the current implementation, write failures are not retried on rdevs with badblocks disabled. This is because narrow_write_error, which issues retry bios, immediately returns when badblocks are disabled. As a result, a single write failure on such an rdev will immediately mark it as Faulty. The retry mechanism appears to have been implemented under the assumption that a bad block is involved in the failure. However, the retry after MD_FAILFAST write failure depend on this code, and a Failfast write request may fail for reasons unrelated to bad blocks. Consequently, if failfast is enabled and badblocks are disabled on all rdevs, and all rdevs encounter a failfast write bio failure at the same time, no retries will occur and the entire array can be lost. This commit adds a path in narrow_write_error to retry writes even on rdevs where bad blocks are disabled, and failed bios marked with MD_FAILFAST will use this path. For non-failfast cases, the behavior remains unchanged: no retry writes are attempted to rdevs with bad blocks disabled. Fixes: 1919cbb23bf1 ("md/raid10: add failfast handling for writes.") Fixes: 212e7eb7a340 ("md/raid1: add failfast handling for writes.") Signed-off-by: Kenta Akagi --- drivers/md/raid1.c | 44 +++++++++++++++++++++++++++++--------------- drivers/md/raid10.c | 37 ++++++++++++++++++++++++------------- 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9cd45f3bd60e..47b646a4915a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2523,18 +2523,19 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) * narrow_write_error() - Retry write and set badblock * @r1_bio: the r1bio containing the write error * @i: which device to retry + * @force: Retry writing even if badblock is disabled * * Rewrites the bio, splitting it at the least common multiple of the logical * block size and the badblock size. Blocks that fail to be written are marked - * as bad. If badblocks are disabled, no write is attempted and false is - * returned immediately. + * as bad. If bbl disabled and @force is not set, no retry is attempted. + * If bbl disabled and @force is set, the write is retried in the same way. * * Return: * * %true - all blocks were written or marked bad successfully * * %false - bbl disabled or * one or more blocks write failed and could not be marked bad */ -static bool narrow_write_error(struct r1bio *r1_bio, int i) +static bool narrow_write_error(struct r1bio *r1_bio, int i, bool force) { struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; @@ -2555,13 +2556,17 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i) sector_t sector; int sectors; int sect_to_write = r1_bio->sectors; - bool ok = true; + bool write_ok = true; + bool setbad_ok = true; + bool bbl_enabled = !(rdev->badblocks.shift < 0); - if (rdev->badblocks.shift < 0) + if (!force && !bbl_enabled) return false; - block_sectors = roundup(1 << rdev->badblocks.shift, - bdev_logical_block_size(rdev->bdev) >> 9); + block_sectors = bdev_logical_block_size(rdev->bdev) >> 9; + if (bbl_enabled) + block_sectors = roundup(1 << rdev->badblocks.shift, + block_sectors); sector = r1_bio->sector; sectors = ((sector + block_sectors) & ~(sector_t)(block_sectors - 1)) @@ -2589,18 +2594,22 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; - if (submit_bio_wait(wbio) < 0) + if (submit_bio_wait(wbio) < 0) { /* failure! */ - ok = rdev_set_badblocks(rdev, sector, - sectors, 0) - && ok; + write_ok = false; + if (bbl_enabled) + setbad_ok = rdev_set_badblocks(rdev, sector, + sectors, 0) + && setbad_ok; + } bio_put(wbio); sect_to_write -= sectors; sector += sectors; sectors = block_sectors; } - return ok; + return (write_ok || + (bbl_enabled && setbad_ok)); } static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) @@ -2633,18 +2642,23 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) for (m = 0; m < conf->raid_disks * 2 ; m++) { struct md_rdev *rdev = conf->mirrors[m].rdev; - if (r1_bio->bios[m] == IO_MADE_GOOD) { + struct bio *bio = r1_bio->bios[m]; + + if (bio == IO_MADE_GOOD) { rdev_clear_badblocks(rdev, r1_bio->sector, r1_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); - } else if (r1_bio->bios[m] != NULL) { + } else if (bio != NULL) { /* This drive got a write error. We need to * narrow down and record precise write * errors. */ fail = true; - if (!narrow_write_error(r1_bio, m)) + if (!narrow_write_error( + r1_bio, m, + test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST))) md_error(conf->mddev, rdev); /* an I/O failed, we can't clear the bitmap */ else if (test_bit(In_sync, &rdev->flags) && diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 59d73d1403c2..ba93bc4b2c2d 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2816,18 +2816,18 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 * narrow_write_error() - Retry write and set badblock * @r10_bio: the r10bio containing the write error * @i: which device to retry + * @force: Retry writing even if badblock is disabled * * Rewrites the bio, splitting it at the least common multiple of the logical * block size and the badblock size. Blocks that fail to be written are marked - * as bad. If badblocks are disabled, no write is attempted and false is - * returned immediately. + * as bad. If bbl disabled and @force is not set, no retry is attempted. * * Return: * * %true - all blocks were written or marked bad successfully * * %false - bbl disabled or * one or more blocks write failed and could not be marked bad */ -static bool narrow_write_error(struct r10bio *r10_bio, int i) +static bool narrow_write_error(struct r10bio *r10_bio, int i, bool force) { struct bio *bio = r10_bio->master_bio; struct mddev *mddev = r10_bio->mddev; @@ -2848,13 +2848,17 @@ static bool narrow_write_error(struct r10bio *r10_bio, int i) sector_t sector; int sectors; int sect_to_write = r10_bio->sectors; - bool ok = true; + bool write_ok = true; + bool setbad_ok = true; + bool bbl_enabled = !(rdev->badblocks.shift < 0); - if (rdev->badblocks.shift < 0) + if (!force && !bbl_enabled) return false; - block_sectors = roundup(1 << rdev->badblocks.shift, - bdev_logical_block_size(rdev->bdev) >> 9); + block_sectors = bdev_logical_block_size(rdev->bdev) >> 9; + if (bbl_enabled) + block_sectors = roundup(1 << rdev->badblocks.shift, + block_sectors); sector = r10_bio->sector; sectors = ((r10_bio->sector + block_sectors) & ~(sector_t)(block_sectors - 1)) @@ -2874,18 +2878,22 @@ static bool narrow_write_error(struct r10bio *r10_bio, int i) choose_data_offset(r10_bio, rdev); wbio->bi_opf = REQ_OP_WRITE; - if (submit_bio_wait(wbio) < 0) + if (submit_bio_wait(wbio) < 0) { /* Failure! */ - ok = rdev_set_badblocks(rdev, wsector, - sectors, 0) - && ok; + write_ok = false; + if (bbl_enabled) + setbad_ok = rdev_set_badblocks(rdev, wsector, + sectors, 0) + && setbad_ok; + } bio_put(wbio); sect_to_write -= sectors; sector += sectors; sectors = block_sectors; } - return ok; + return (write_ok || + (bbl_enabled && setbad_ok)); } static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) @@ -2991,7 +2999,10 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_dec_pending(rdev, conf->mddev); } else if (bio != NULL && bio->bi_status) { fail = true; - if (!narrow_write_error(r10_bio, m)) + if (!narrow_write_error( + r10_bio, m, + test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST))) md_error(conf->mddev, rdev); else if (test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags) && From 154cdc0abca689092319efa0f5a92d786142bfa1 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:08 +0900 Subject: [PATCH 7/9] md/raid10: fix failfast read error not rescheduled raid10_end_read_request lacks a path to retry when a FailFast IO fails. As a result, when Failfast Read IOs fail on all rdevs, the upper layer receives EIO, without read rescheduled. Looking at the two commits below, it seems only raid10_end_read_request lacks the failfast read retry handling, while raid1_end_read_request has it. In RAID1, the retry works as expected. * commit 8d3ca83dcf9c ("md/raid10: add failfast handling for reads.") * commit 2e52d449bcec ("md/raid1: add failfast handling for reads.") I don't know why raid10_end_read_request lacks this, but it is probably just a simple oversight. This commit will make the failfast read bio for the last rdev in raid10 retry if it fails. Fixes: 8d3ca83dcf9c ("md/raid10: add failfast handling for reads.") Signed-off-by: Kenta Akagi --- drivers/md/raid10.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ba93bc4b2c2d..87d1fcf82e71 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -399,6 +399,11 @@ static void raid10_end_read_request(struct bio *bio) * wait for the 'master' bio. */ set_bit(R10BIO_Uptodate, &r10_bio->state); + } else if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) { + /* This was a fail-fast read so we definitely + * want to retry */ + ; } else if (!raid1_should_handle_error(bio)) { uptodate = 1; } else { From da97ee29f5ac9892f41bddcdefb17236d5311c49 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:09 +0900 Subject: [PATCH 8/9] md/raid1,raid10: Add error message when setting MD_BROKEN Once MD_BROKEN is set on an array, no further writes can be performed to it. The user must be informed that the array cannot continue operation. Signed-off-by: Kenta Akagi --- drivers/md/raid1.c | 4 ++++ drivers/md/raid10.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 47b646a4915a..e75067c1529e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1788,6 +1788,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) if (test_bit(In_sync, &rdev->flags) && (conf->raid_disks - mddev->degraded) == 1) { set_bit(MD_BROKEN, &mddev->flags); + pr_crit("md/raid1:%s: Disk failure on %pg, this is the last device.\n" + "md/raid1:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), rdev->bdev, + mdname(mddev), mddev->degraded + 1, conf->raid_disks); if (!mddev->fail_last_dev) { conf->recovery_disabled = mddev->recovery_disabled; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 87d1fcf82e71..4075b8ee5330 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2039,6 +2039,10 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) { set_bit(MD_BROKEN, &mddev->flags); + pr_crit("md/raid10:%s: Disk failure on %pg, this is the last device.\n" + "md/raid10:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), rdev->bdev, + mdname(mddev), mddev->degraded + 1, conf->geo.raid_disks); if (!mddev->fail_last_dev) { spin_unlock_irqrestore(&conf->device_lock, flags); From 1539be0346e3ae90d17c448bd5e161c18ec36854 Mon Sep 17 00:00:00 2001 From: Kenta Akagi Date: Mon, 15 Sep 2025 12:42:10 +0900 Subject: [PATCH 9/9] md/raid1,raid10: Fix: Operation continuing on 0 devices. Since commit 9a567843f7ce ("md: allow last device to be forcibly removed from RAID1/RAID10."), RAID1/10 arrays can now lose all rdevs. Before that commit, losing the array last rdev or reaching the end of the function without early return in raid{1,10}_error never occurred. However, both situations can occur in the current implementation. As a result, when mddev->fail_last_dev is set, a spurious pr_crit message can be printed. This patch prevents "Operation continuing" printed if the array is not operational. root@fedora:~# mdadm --create --verbose /dev/md0 --level=1 \ --raid-devices=2 /dev/loop0 /dev/loop1 mdadm: Note: this array has metadata at the start and may not be suitable as a boot device. If you plan to store '/boot' on this device please ensure that your boot-loader understands md/v1.x metadata, or use --metadata=0.90 mdadm: size set to 1046528K Continue creating array? y mdadm: Defaulting to version 1.2 metadata mdadm: array /dev/md0 started. root@fedora:~# echo 1 > /sys/block/md0/md/fail_last_dev root@fedora:~# mdadm --fail /dev/md0 loop0 mdadm: set loop0 faulty in /dev/md0 root@fedora:~# mdadm --fail /dev/md0 loop1 mdadm: set device faulty failed for loop1: Device or resource busy root@fedora:~# dmesg | tail -n 4 [ 1314.359674] md/raid1:md0: Disk failure on loop0, disabling device. md/raid1:md0: Operation continuing on 1 devices. [ 1315.506633] md/raid1:md0: Disk failure on loop1, disabling device. md/raid1:md0: Operation continuing on 0 devices. root@fedora:~# Fixes: 9a567843f7ce ("md: allow last device to be forcibly removed from RAID1/RAID10.") Signed-off-by: Kenta Akagi --- drivers/md/raid1.c | 9 +++++---- drivers/md/raid10.c | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index e75067c1529e..41f3ba11b823 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1805,6 +1805,11 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) update_lastdev(conf); } set_bit(Faulty, &rdev->flags); + if ((conf->raid_disks - mddev->degraded) > 0) + pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n" + "md/raid1:%s: Operation continuing on %d devices.\n", + mdname(mddev), rdev->bdev, + mdname(mddev), conf->raid_disks - mddev->degraded); spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. @@ -1812,10 +1817,6 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); - pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n" - "md/raid1:%s: Operation continuing on %d devices.\n", - mdname(mddev), rdev->bdev, - mdname(mddev), conf->raid_disks - mddev->degraded); } static void print_conf(struct r1conf *conf) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 4075b8ee5330..c5c35f37d739 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2059,11 +2059,12 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(Faulty, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); + if (enough(conf, -1)) + pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n" + "md/raid10:%s: Operation continuing on %d devices.\n", + mdname(mddev), rdev->bdev, + mdname(mddev), conf->geo.raid_disks - mddev->degraded); spin_unlock_irqrestore(&conf->device_lock, flags); - pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n" - "md/raid10:%s: Operation continuing on %d devices.\n", - mdname(mddev), rdev->bdev, - mdname(mddev), conf->geo.raid_disks - mddev->degraded); } static void print_conf(struct r10conf *conf)