diff --git a/drivers/md/md.c b/drivers/md/md.c index 09042b060086..9874f7052f9f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -701,6 +701,7 @@ int mddev_init(struct mddev *mddev) atomic_set(&mddev->openers, 0); atomic_set(&mddev->sync_seq, 0); spin_lock_init(&mddev->lock); + spin_lock_init(&mddev->error_handle_lock); init_waitqueue_head(&mddev->sb_wait); init_waitqueue_head(&mddev->recovery_wait); mddev->reshape_position = MaxSector; @@ -986,14 +987,9 @@ static void super_written(struct bio *bio) if (bio->bi_status) { pr_err("md: %s gets error=%d\n", __func__, blk_status_to_errno(bio->bi_status)); - md_error(mddev, rdev); - if (!test_bit(Faulty, &rdev->flags) - && (bio->bi_opf & MD_FAILFAST)) { + if (!md_bio_failure_error(mddev, rdev, bio)) set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); - set_bit(LastDev, &rdev->flags); - } - } else - clear_bit(LastDev, &rdev->flags); + } bio_put(bio); @@ -8186,7 +8182,7 @@ void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) } EXPORT_SYMBOL(md_unregister_thread); -void md_error(struct mddev *mddev, struct md_rdev *rdev) +void _md_error(struct mddev *mddev, struct md_rdev *rdev) { if (!rdev || test_bit(Faulty, &rdev->flags)) return; @@ -8211,8 +8207,57 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev) queue_work(md_misc_wq, &mddev->event_work); md_new_event(); } + +void md_error(struct mddev *mddev, struct md_rdev *rdev) +{ + spin_lock(&mddev->error_handle_lock); + _md_error(mddev, rdev); + spin_unlock(&mddev->error_handle_lock); +} EXPORT_SYMBOL(md_error); +/** md_bio_failure_error() - md error handler for MD_FAILFAST bios + * @mddev: affected md device. + * @rdev: member device to fail. + * @bio: bio whose triggered device failure. + * + * This is almost the same as md_error(). That is, it is serialized at + * the same level as md_error, marks the rdev as Faulty, and changes + * the mddev status. + * However, if all of the following conditions are met, it does nothing. + * This is because MD_FAILFAST bios must not stopping the array. + * * RAID1 or RAID10 + * * LastDev - if rdev becomes Faulty, mddev will stop + * * The failed bio has MD_FAILFAST set + * + * Returns: true if _md_error() was called, false if not. + */ +bool md_bio_failure_error(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio) +{ + bool do_md_error = true; + + spin_lock(&mddev->error_handle_lock); + if (mddev->pers) { + if (mddev->pers->head.id == ID_RAID1 || + mddev->pers->head.id == ID_RAID10) { + if (test_bit(LastDev, &rdev->flags) && + test_bit(FailFast, &rdev->flags) && + bio != NULL && (bio->bi_opf & MD_FAILFAST)) + do_md_error = false; + } + } + + if (do_md_error) + _md_error(mddev, rdev); + else + pr_warn_ratelimited("md: %s: %s didn't do anything for %pg\n", + mdname(mddev), __func__, rdev->bdev); + + spin_unlock(&mddev->error_handle_lock); + return do_md_error; +} +EXPORT_SYMBOL(md_bio_failure_error); + /* seq_file implementation /proc/mdstat */ static void status_unused(struct seq_file *seq) diff --git a/drivers/md/md.h b/drivers/md/md.h index d45a9e6ead80..6ea42f1d3e25 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -281,9 +281,10 @@ enum flag_bits { * It is expects that no bad block log * is present. */ - LastDev, /* Seems to be the last working dev as - * it didn't fail, so don't use FailFast - * any more for metadata + LastDev, /* This is the last working rdev. + * so don't use FailFast any more for + * metadata and don't Fail rdev + * when FailFast bio failure. */ CollisionCheck, /* * check if there is collision between raid1 @@ -619,6 +620,9 @@ struct mddev { /* The sequence number for sync thread */ atomic_t sync_seq; + /* Lock for serializing md_error */ + spinlock_t error_handle_lock; + bool has_superblocks:1; bool fail_last_dev:1; bool serialize_policy:1; @@ -879,7 +883,9 @@ extern void md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); extern void md_write_end(struct mddev *mddev); extern void md_done_sync(struct mddev *mddev, int blocks, int ok); +void _md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_error(struct mddev *mddev, struct md_rdev *rdev); +extern bool md_bio_failure_error(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 64b8176907a9..41f3ba11b823 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -471,7 +471,7 @@ static void raid1_end_write_request(struct bio *bio) (bio->bi_opf & MD_FAILFAST) && /* We never try FailFast to WriteMostly devices */ !test_bit(WriteMostly, &rdev->flags)) { - md_error(r1_bio->mddev, rdev); + md_bio_failure_error(r1_bio->mddev, rdev, bio); } /* @@ -1735,6 +1735,33 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, "]"); } +/** + * update_lastdev - Set or clear LastDev flag for all rdevs in array + * @conf: pointer to r1conf + * + * Sets LastDev if the device is In_sync and cannot be lost for the array. + * Otherwise, clear it. + * + * Caller must hold ->device_lock. + */ +static void update_lastdev(struct r1conf *conf) +{ + int i; + int alive_disks = conf->raid_disks - conf->mddev->degraded; + + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = conf->mirrors[i].rdev; + + if (rdev) { + if (test_bit(In_sync, &rdev->flags) && + alive_disks == 1) + set_bit(LastDev, &rdev->flags); + else + clear_bit(LastDev, &rdev->flags); + } + } +} + /** * raid1_error() - RAID1 error handler. * @mddev: affected md device. @@ -1761,6 +1788,10 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) if (test_bit(In_sync, &rdev->flags) && (conf->raid_disks - mddev->degraded) == 1) { set_bit(MD_BROKEN, &mddev->flags); + pr_crit("md/raid1:%s: Disk failure on %pg, this is the last device.\n" + "md/raid1:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), rdev->bdev, + mdname(mddev), mddev->degraded + 1, conf->raid_disks); if (!mddev->fail_last_dev) { conf->recovery_disabled = mddev->recovery_disabled; @@ -1769,9 +1800,16 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) } } set_bit(Blocked, &rdev->flags); - if (test_and_clear_bit(In_sync, &rdev->flags)) + if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; + update_lastdev(conf); + } set_bit(Faulty, &rdev->flags); + if ((conf->raid_disks - mddev->degraded) > 0) + pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n" + "md/raid1:%s: Operation continuing on %d devices.\n", + mdname(mddev), rdev->bdev, + mdname(mddev), conf->raid_disks - mddev->degraded); spin_unlock_irqrestore(&conf->device_lock, flags); /* * if recovery is running, make sure it aborts. @@ -1779,10 +1817,6 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); - pr_crit("md/raid1:%s: Disk failure on %pg, disabling device.\n" - "md/raid1:%s: Operation continuing on %d devices.\n", - mdname(mddev), rdev->bdev, - mdname(mddev), conf->raid_disks - mddev->degraded); } static void print_conf(struct r1conf *conf) @@ -1866,6 +1900,7 @@ static int raid1_spare_active(struct mddev *mddev) } } mddev->degraded -= count; + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); @@ -2150,8 +2185,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (test_bit(FailFast, &rdev->flags)) { /* Don't try recovering from here - just fail it * ... unless it is the last working device of course */ - md_error(mddev, rdev); - if (test_bit(Faulty, &rdev->flags)) + if (md_bio_failure_error(mddev, rdev, bio)) /* Don't try to read from here, but make sure * put_buf does it's thing */ @@ -2490,7 +2524,23 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio) } } -static bool narrow_write_error(struct r1bio *r1_bio, int i) +/** + * narrow_write_error() - Retry write and set badblock + * @r1_bio: the r1bio containing the write error + * @i: which device to retry + * @force: Retry writing even if badblock is disabled + * + * Rewrites the bio, splitting it at the least common multiple of the logical + * block size and the badblock size. Blocks that fail to be written are marked + * as bad. If bbl disabled and @force is not set, no retry is attempted. + * If bbl disabled and @force is set, the write is retried in the same way. + * + * Return: + * * %true - all blocks were written or marked bad successfully + * * %false - bbl disabled or + * one or more blocks write failed and could not be marked bad + */ +static bool narrow_write_error(struct r1bio *r1_bio, int i, bool force) { struct mddev *mddev = r1_bio->mddev; struct r1conf *conf = mddev->private; @@ -2511,13 +2561,17 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i) sector_t sector; int sectors; int sect_to_write = r1_bio->sectors; - bool ok = true; + bool write_ok = true; + bool setbad_ok = true; + bool bbl_enabled = !(rdev->badblocks.shift < 0); - if (rdev->badblocks.shift < 0) + if (!force && !bbl_enabled) return false; - block_sectors = roundup(1 << rdev->badblocks.shift, - bdev_logical_block_size(rdev->bdev) >> 9); + block_sectors = bdev_logical_block_size(rdev->bdev) >> 9; + if (bbl_enabled) + block_sectors = roundup(1 << rdev->badblocks.shift, + block_sectors); sector = r1_bio->sector; sectors = ((sector + block_sectors) & ~(sector_t)(block_sectors - 1)) @@ -2545,18 +2599,22 @@ static bool narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; - if (submit_bio_wait(wbio) < 0) + if (submit_bio_wait(wbio) < 0) { /* failure! */ - ok = rdev_set_badblocks(rdev, sector, - sectors, 0) - && ok; + write_ok = false; + if (bbl_enabled) + setbad_ok = rdev_set_badblocks(rdev, sector, + sectors, 0) + && setbad_ok; + } bio_put(wbio); sect_to_write -= sectors; sector += sectors; sectors = block_sectors; } - return ok; + return (write_ok || + (bbl_enabled && setbad_ok)); } static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio) @@ -2587,26 +2645,36 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) int m, idx; bool fail = false; - for (m = 0; m < conf->raid_disks * 2 ; m++) - if (r1_bio->bios[m] == IO_MADE_GOOD) { - struct md_rdev *rdev = conf->mirrors[m].rdev; + for (m = 0; m < conf->raid_disks * 2 ; m++) { + struct md_rdev *rdev = conf->mirrors[m].rdev; + struct bio *bio = r1_bio->bios[m]; + + if (bio == IO_MADE_GOOD) { rdev_clear_badblocks(rdev, r1_bio->sector, r1_bio->sectors, 0); rdev_dec_pending(rdev, conf->mddev); - } else if (r1_bio->bios[m] != NULL) { + } else if (bio != NULL) { /* This drive got a write error. We need to * narrow down and record precise write * errors. */ fail = true; - if (!narrow_write_error(r1_bio, m)) - md_error(conf->mddev, - conf->mirrors[m].rdev); + if (!narrow_write_error( + r1_bio, m, + test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST))) + md_error(conf->mddev, rdev); /* an I/O failed, we can't clear the bitmap */ - rdev_dec_pending(conf->mirrors[m].rdev, - conf->mddev); + else if (test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + rdev_has_badblock(rdev, + r1_bio->sector, + r1_bio->sectors) == 0) + set_bit(R1BIO_Uptodate, &r1_bio->state); + rdev_dec_pending(rdev, conf->mddev); } + } if (fail) { spin_lock_irq(&conf->device_lock); list_add(&r1_bio->retry_list, &conf->bio_end_io_list); @@ -2629,9 +2697,8 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) { struct mddev *mddev = conf->mddev; - struct bio *bio; + struct bio *bio, *updated_bio; struct md_rdev *rdev; - sector_t sector; clear_bit(R1BIO_ReadError, &r1_bio->state); /* we got a read error. Maybe the drive is bad. Maybe just @@ -2644,29 +2711,30 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) */ bio = r1_bio->bios[r1_bio->read_disk]; - bio_put(bio); - r1_bio->bios[r1_bio->read_disk] = NULL; + updated_bio = NULL; rdev = conf->mirrors[r1_bio->read_disk].rdev; - if (mddev->ro == 0 - && !test_bit(FailFast, &rdev->flags)) { - freeze_array(conf, 1); - fix_read_error(conf, r1_bio); - unfreeze_array(conf); - } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { - md_error(mddev, rdev); + if (mddev->ro == 0) { + if (!test_bit(FailFast, &rdev->flags)) { + freeze_array(conf, 1); + fix_read_error(conf, r1_bio); + unfreeze_array(conf); + } else { + md_bio_failure_error(mddev, rdev, bio); + } } else { - r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; + updated_bio = IO_BLOCKED; } + bio_put(bio); + r1_bio->bios[r1_bio->read_disk] = updated_bio; + rdev_dec_pending(rdev, conf->mddev); - sector = r1_bio->sector; - bio = r1_bio->master_bio; /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */ r1_bio->state = 0; - raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio); - allow_barrier(conf, sector); + raid1_read_request(mddev, r1_bio->master_bio, r1_bio->sectors, r1_bio); + allow_barrier(conf, r1_bio->sector); } static void raid1d(struct md_thread *thread) @@ -3298,6 +3366,7 @@ static int raid1_run(struct mddev *mddev) rcu_assign_pointer(conf->thread, NULL); mddev->private = conf; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + update_lastdev(conf); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); @@ -3451,6 +3520,7 @@ static int raid1_reshape(struct mddev *mddev) spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded += (raid_disks - conf->raid_disks); + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c9bd2005bfd0..c5c35f37d739 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -399,6 +399,11 @@ static void raid10_end_read_request(struct bio *bio) * wait for the 'master' bio. */ set_bit(R10BIO_Uptodate, &r10_bio->state); + } else if (test_bit(FailFast, &rdev->flags) && + test_bit(R10BIO_FailFast, &r10_bio->state)) { + /* This was a fail-fast read so we definitely + * want to retry */ + ; } else if (!raid1_should_handle_error(bio)) { uptodate = 1; } else { @@ -488,7 +493,7 @@ static void raid10_end_write_request(struct bio *bio) dec_rdev = 0; if (test_bit(FailFast, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { - md_error(rdev->mddev, rdev); + md_bio_failure_error(rdev->mddev, rdev, bio); } /* @@ -1983,6 +1988,33 @@ static int enough(struct r10conf *conf, int ignore) _enough(conf, 1, ignore); } +/** + * update_lastdev - Set or clear LastDev flag for all rdevs in array + * @conf: pointer to r10conf + * + * Sets LastDev if the device is In_sync and cannot be lost for the array. + * Otherwise, clear it. + * + * Caller must hold ->reconfig_mutex or ->device_lock. + */ +static void update_lastdev(struct r10conf *conf) +{ + int i; + int raid_disks = max(conf->geo.raid_disks, conf->prev.raid_disks); + + for (i = 0; i < raid_disks; i++) { + struct md_rdev *rdev = conf->mirrors[i].rdev; + + if (rdev) { + if (test_bit(In_sync, &rdev->flags) && + !enough(conf, i)) + set_bit(LastDev, &rdev->flags); + else + clear_bit(LastDev, &rdev->flags); + } + } +} + /** * raid10_error() - RAID10 error handler. * @mddev: affected md device. @@ -2007,25 +2039,32 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) { set_bit(MD_BROKEN, &mddev->flags); + pr_crit("md/raid10:%s: Disk failure on %pg, this is the last device.\n" + "md/raid10:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), rdev->bdev, + mdname(mddev), mddev->degraded + 1, conf->geo.raid_disks); if (!mddev->fail_last_dev) { spin_unlock_irqrestore(&conf->device_lock, flags); return; } } - if (test_and_clear_bit(In_sync, &rdev->flags)) + if (test_and_clear_bit(In_sync, &rdev->flags)) { mddev->degraded++; + update_lastdev(conf); + } set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); + if (enough(conf, -1)) + pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n" + "md/raid10:%s: Operation continuing on %d devices.\n", + mdname(mddev), rdev->bdev, + mdname(mddev), conf->geo.raid_disks - mddev->degraded); spin_unlock_irqrestore(&conf->device_lock, flags); - pr_crit("md/raid10:%s: Disk failure on %pg, disabling device.\n" - "md/raid10:%s: Operation continuing on %d devices.\n", - mdname(mddev), rdev->bdev, - mdname(mddev), conf->geo.raid_disks - mddev->degraded); } static void print_conf(struct r10conf *conf) @@ -2102,6 +2141,7 @@ static int raid10_spare_active(struct mddev *mddev) } spin_lock_irqsave(&conf->device_lock, flags); mddev->degraded -= count; + update_lastdev(conf); spin_unlock_irqrestore(&conf->device_lock, flags); print_conf(conf); @@ -2413,7 +2453,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) continue; } else if (test_bit(FailFast, &rdev->flags)) { /* Just give up on this device */ - md_error(rdev->mddev, rdev); + md_bio_failure_error(rdev->mddev, rdev, tbio); continue; } /* Ok, we need to write this bio, either to correct an @@ -2782,7 +2822,22 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 } } -static bool narrow_write_error(struct r10bio *r10_bio, int i) +/** + * narrow_write_error() - Retry write and set badblock + * @r10_bio: the r10bio containing the write error + * @i: which device to retry + * @force: Retry writing even if badblock is disabled + * + * Rewrites the bio, splitting it at the least common multiple of the logical + * block size and the badblock size. Blocks that fail to be written are marked + * as bad. If bbl disabled and @force is not set, no retry is attempted. + * + * Return: + * * %true - all blocks were written or marked bad successfully + * * %false - bbl disabled or + * one or more blocks write failed and could not be marked bad + */ +static bool narrow_write_error(struct r10bio *r10_bio, int i, bool force) { struct bio *bio = r10_bio->master_bio; struct mddev *mddev = r10_bio->mddev; @@ -2803,13 +2858,17 @@ static bool narrow_write_error(struct r10bio *r10_bio, int i) sector_t sector; int sectors; int sect_to_write = r10_bio->sectors; - bool ok = true; + bool write_ok = true; + bool setbad_ok = true; + bool bbl_enabled = !(rdev->badblocks.shift < 0); - if (rdev->badblocks.shift < 0) + if (!force && !bbl_enabled) return false; - block_sectors = roundup(1 << rdev->badblocks.shift, - bdev_logical_block_size(rdev->bdev) >> 9); + block_sectors = bdev_logical_block_size(rdev->bdev) >> 9; + if (bbl_enabled) + block_sectors = roundup(1 << rdev->badblocks.shift, + block_sectors); sector = r10_bio->sector; sectors = ((r10_bio->sector + block_sectors) & ~(sector_t)(block_sectors - 1)) @@ -2829,18 +2888,22 @@ static bool narrow_write_error(struct r10bio *r10_bio, int i) choose_data_offset(r10_bio, rdev); wbio->bi_opf = REQ_OP_WRITE; - if (submit_bio_wait(wbio) < 0) + if (submit_bio_wait(wbio) < 0) { /* Failure! */ - ok = rdev_set_badblocks(rdev, wsector, - sectors, 0) - && ok; + write_ok = false; + if (bbl_enabled) + setbad_ok = rdev_set_badblocks(rdev, wsector, + sectors, 0) + && setbad_ok; + } bio_put(wbio); sect_to_write -= sectors; sector += sectors; sectors = block_sectors; } - return ok; + return (write_ok || + (bbl_enabled && setbad_ok)); } static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) @@ -2868,8 +2931,9 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) freeze_array(conf, 1); fix_read_error(conf, mddev, r10_bio); unfreeze_array(conf); - } else - md_error(mddev, rdev); + } else { + md_bio_failure_error(mddev, rdev, bio); + } rdev_dec_pending(rdev, mddev); r10_bio->state = 0; @@ -2945,8 +3009,17 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_dec_pending(rdev, conf->mddev); } else if (bio != NULL && bio->bi_status) { fail = true; - if (!narrow_write_error(r10_bio, m)) + if (!narrow_write_error( + r10_bio, m, + test_bit(FailFast, &rdev->flags) && + (bio->bi_opf & MD_FAILFAST))) md_error(conf->mddev, rdev); + else if (test_bit(In_sync, &rdev->flags) && + !test_bit(Faulty, &rdev->flags) && + rdev_has_badblock(rdev, + r10_bio->devs[m].addr, + r10_bio->sectors) == 0) + set_bit(R10BIO_Uptodate, &r10_bio->state); rdev_dec_pending(rdev, conf->mddev); } bio = r10_bio->devs[m].repl_bio; @@ -4161,6 +4234,7 @@ static int raid10_run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); + update_lastdev(conf); if (md_integrity_register(mddev)) goto out_free_conf; @@ -4569,6 +4643,7 @@ static int raid10_start_reshape(struct mddev *mddev) */ spin_lock_irq(&conf->device_lock); mddev->degraded = calc_degraded(conf); + update_lastdev(conf); spin_unlock_irq(&conf->device_lock); mddev->raid_disks = conf->geo.raid_disks; mddev->reshape_position = conf->reshape_progress;