2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* raid1.c : Multiple Devices driver for Linux
|
|
|
|
*
|
|
|
|
* Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
|
|
|
|
*
|
|
|
|
* RAID-1 management functions.
|
|
|
|
*
|
|
|
|
* Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
|
|
|
|
*
|
2007-10-19 14:21:04 -07:00
|
|
|
* Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk>
|
2005-04-16 15:20:36 -07:00
|
|
|
* Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
|
|
|
|
*
|
2005-06-21 17:17:23 -07:00
|
|
|
* Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
|
|
|
|
* bitmapped intelligence in resync:
|
|
|
|
*
|
|
|
|
* - bitmap marked during normal i/o
|
|
|
|
* - bitmap used to skip nondirty blocks during sync
|
|
|
|
*
|
|
|
|
* Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
|
|
|
|
* - persistent bitmap code
|
|
|
|
*
|
2005-04-16 15:20:36 -07:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
|
|
* any later version.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* (for example /usr/src/linux/COPYING); if not, write to the Free
|
|
|
|
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*/
|
|
|
|
|
2008-10-14 15:09:21 -07:00
|
|
|
#include <linux/delay.h>
|
2009-03-30 20:33:13 -07:00
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/seq_file.h>
|
2009-03-30 20:33:13 -07:00
|
|
|
#include "md.h"
|
2009-03-30 20:27:03 -07:00
|
|
|
#include "raid1.h"
|
|
|
|
#include "bitmap.h"
|
2005-06-21 17:17:23 -07:00
|
|
|
|
|
|
|
#define DEBUG 0
|
|
|
|
#if DEBUG
|
|
|
|
#define PRINTK(x...) printk(x)
|
|
|
|
#else
|
|
|
|
#define PRINTK(x...)
|
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of guaranteed r1bios in case of extreme VM load:
|
|
|
|
*/
|
|
|
|
#define NR_RAID1_BIOS 256
|
|
|
|
|
|
|
|
|
|
|
|
static void unplug_slaves(mddev_t *mddev);
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
static void allow_barrier(conf_t *conf);
|
|
|
|
static void lower_barrier(conf_t *conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-10-06 23:46:04 -07:00
|
|
|
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
|
|
|
r1bio_t *r1_bio;
|
|
|
|
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
|
|
|
|
|
|
|
|
/* allocate a r1bio with room for raid_disks entries in the bios array */
|
2006-01-06 01:20:32 -07:00
|
|
|
r1_bio = kzalloc(size, gfp_flags);
|
2009-10-15 21:55:44 -07:00
|
|
|
if (!r1_bio && pi->mddev)
|
2005-04-16 15:20:36 -07:00
|
|
|
unplug_slaves(pi->mddev);
|
|
|
|
|
|
|
|
return r1_bio;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void r1bio_pool_free(void *r1_bio, void *data)
|
|
|
|
{
|
|
|
|
kfree(r1_bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define RESYNC_BLOCK_SIZE (64*1024)
|
|
|
|
//#define RESYNC_BLOCK_SIZE PAGE_SIZE
|
|
|
|
#define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
|
|
|
|
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
|
|
|
|
#define RESYNC_WINDOW (2048*1024)
|
|
|
|
|
2005-10-06 23:46:04 -07:00
|
|
|
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
|
|
|
struct page *page;
|
|
|
|
r1bio_t *r1_bio;
|
|
|
|
struct bio *bio;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
|
|
|
|
if (!r1_bio) {
|
|
|
|
unplug_slaves(pi->mddev);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate bios : 1 for reading, n-1 for writing
|
|
|
|
*/
|
|
|
|
for (j = pi->raid_disks ; j-- ; ) {
|
|
|
|
bio = bio_alloc(gfp_flags, RESYNC_PAGES);
|
|
|
|
if (!bio)
|
|
|
|
goto out_free_bio;
|
|
|
|
r1_bio->bios[j] = bio;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Allocate RESYNC_PAGES data pages and attach them to
|
2006-01-06 01:20:26 -07:00
|
|
|
* the first bio.
|
|
|
|
* If this is a user-requested check/repair, allocate
|
|
|
|
* RESYNC_PAGES for each bio.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2006-01-06 01:20:26 -07:00
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
|
|
|
|
j = pi->raid_disks;
|
|
|
|
else
|
|
|
|
j = 1;
|
|
|
|
while(j--) {
|
|
|
|
bio = r1_bio->bios[j];
|
|
|
|
for (i = 0; i < RESYNC_PAGES; i++) {
|
|
|
|
page = alloc_page(gfp_flags);
|
|
|
|
if (unlikely(!page))
|
|
|
|
goto out_free_pages;
|
|
|
|
|
|
|
|
bio->bi_io_vec[i].bv_page = page;
|
2009-04-05 21:40:38 -07:00
|
|
|
bio->bi_vcnt = i+1;
|
2006-01-06 01:20:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* If not user-requests, copy the page pointers to all bios */
|
|
|
|
if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
|
|
|
|
for (i=0; i<RESYNC_PAGES ; i++)
|
|
|
|
for (j=1; j<pi->raid_disks; j++)
|
|
|
|
r1_bio->bios[j]->bi_io_vec[i].bv_page =
|
|
|
|
r1_bio->bios[0]->bi_io_vec[i].bv_page;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
r1_bio->master_bio = NULL;
|
|
|
|
|
|
|
|
return r1_bio;
|
|
|
|
|
|
|
|
out_free_pages:
|
2009-04-05 21:40:38 -07:00
|
|
|
for (j=0 ; j < pi->raid_disks; j++)
|
|
|
|
for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
|
|
|
|
put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
|
2006-01-06 01:20:26 -07:00
|
|
|
j = -1;
|
2005-04-16 15:20:36 -07:00
|
|
|
out_free_bio:
|
|
|
|
while ( ++j < pi->raid_disks )
|
|
|
|
bio_put(r1_bio->bios[j]);
|
|
|
|
r1bio_pool_free(r1_bio, data);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void r1buf_pool_free(void *__r1_bio, void *data)
|
|
|
|
{
|
|
|
|
struct pool_info *pi = data;
|
2006-01-06 01:20:26 -07:00
|
|
|
int i,j;
|
2005-04-16 15:20:36 -07:00
|
|
|
r1bio_t *r1bio = __r1_bio;
|
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
for (i = 0; i < RESYNC_PAGES; i++)
|
|
|
|
for (j = pi->raid_disks; j-- ;) {
|
|
|
|
if (j == 0 ||
|
|
|
|
r1bio->bios[j]->bi_io_vec[i].bv_page !=
|
|
|
|
r1bio->bios[0]->bi_io_vec[i].bv_page)
|
2006-01-06 01:20:40 -07:00
|
|
|
safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page);
|
2006-01-06 01:20:26 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i=0 ; i < pi->raid_disks; i++)
|
|
|
|
bio_put(r1bio->bios[i]);
|
|
|
|
|
|
|
|
r1bio_pool_free(r1bio, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
|
struct bio **bio = r1_bio->bios + i;
|
2006-01-06 01:20:23 -07:00
|
|
|
if (*bio && *bio != IO_BLOCKED)
|
2005-04-16 15:20:36 -07:00
|
|
|
bio_put(*bio);
|
|
|
|
*bio = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-14 14:20:43 -07:00
|
|
|
static void free_r1bio(r1bio_t *r1_bio)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = r1_bio->mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up any possible resync thread that waits for the device
|
|
|
|
* to go idle.
|
|
|
|
*/
|
2006-01-06 01:20:12 -07:00
|
|
|
allow_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
put_all_bios(conf, r1_bio);
|
|
|
|
mempool_free(r1_bio, conf->r1bio_pool);
|
|
|
|
}
|
|
|
|
|
2006-01-14 14:20:43 -07:00
|
|
|
static void put_buf(r1bio_t *r1_bio)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = r1_bio->mddev->private;
|
2006-01-06 01:20:21 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i=0; i<conf->raid_disks; i++) {
|
|
|
|
struct bio *bio = r1_bio->bios[i];
|
|
|
|
if (bio->bi_end_io)
|
|
|
|
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
mempool_free(r1_bio, conf->r1buf_pool);
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
lower_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void reschedule_retry(r1bio_t *r1_bio)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
mddev_t *mddev = r1_bio->mddev;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
list_add(&r1_bio->retry_list, &conf->retry_list);
|
2006-01-06 01:20:19 -07:00
|
|
|
conf->nr_queued ++;
|
2005-04-16 15:20:36 -07:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
wake_up(&conf->wait_barrier);
|
2005-04-16 15:20:36 -07:00
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* raid_end_bio_io() is called when we have finished servicing a mirrored
|
|
|
|
* operation and are ready to return a success/failure code to the buffer
|
|
|
|
* cache layer.
|
|
|
|
*/
|
|
|
|
static void raid_end_bio_io(r1bio_t *r1_bio)
|
|
|
|
{
|
|
|
|
struct bio *bio = r1_bio->master_bio;
|
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
/* if nobody has done the final endio yet, do it now */
|
|
|
|
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
|
|
|
PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n",
|
|
|
|
(bio_data_dir(bio) == WRITE) ? "write" : "read",
|
|
|
|
(unsigned long long) bio->bi_sector,
|
|
|
|
(unsigned long long) bio->bi_sector +
|
|
|
|
(bio->bi_size >> 9) - 1);
|
|
|
|
|
2007-09-27 03:47:43 -07:00
|
|
|
bio_endio(bio,
|
2005-09-09 16:23:47 -07:00
|
|
|
test_bit(R1BIO_Uptodate, &r1_bio->state) ? 0 : -EIO);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
free_r1bio(r1_bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update disk head position estimator based on IRQ completion info.
|
|
|
|
*/
|
|
|
|
static inline void update_head_pos(int disk, r1bio_t *r1_bio)
|
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = r1_bio->mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
conf->mirrors[disk].head_position =
|
|
|
|
r1_bio->sector + (r1_bio->sectors);
|
|
|
|
}
|
|
|
|
|
2007-09-27 03:47:43 -07:00
|
|
|
static void raid1_end_read_request(struct bio *bio, int error)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
|
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
|
|
|
|
int mirror;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = r1_bio->mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
mirror = r1_bio->read_disk;
|
|
|
|
/*
|
|
|
|
* this branch is our 'one mirror IO has finished' event handler:
|
|
|
|
*/
|
2006-01-06 01:20:19 -07:00
|
|
|
update_head_pos(mirror, r1_bio);
|
|
|
|
|
2007-05-10 03:15:50 -07:00
|
|
|
if (uptodate)
|
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
|
|
|
else {
|
|
|
|
/* If all other devices have failed, we want to return
|
|
|
|
* the error upwards rather than fail the last device.
|
|
|
|
* Here we redefine "uptodate" to mean "Don't want to retry"
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2007-05-10 03:15:50 -07:00
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
if (r1_bio->mddev->degraded == conf->raid_disks ||
|
|
|
|
(r1_bio->mddev->degraded == conf->raid_disks-1 &&
|
|
|
|
!test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
|
|
|
|
uptodate = 1;
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2007-05-10 03:15:50 -07:00
|
|
|
if (uptodate)
|
2005-04-16 15:20:36 -07:00
|
|
|
raid_end_bio_io(r1_bio);
|
2007-05-10 03:15:50 -07:00
|
|
|
else {
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* oops, read error:
|
|
|
|
*/
|
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
if (printk_ratelimit())
|
|
|
|
printk(KERN_ERR "raid1: %s: rescheduling sector %llu\n",
|
|
|
|
bdevname(conf->mirrors[mirror].rdev->bdev,b), (unsigned long long)r1_bio->sector);
|
|
|
|
reschedule_retry(r1_bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
|
|
|
|
}
|
|
|
|
|
2007-09-27 03:47:43 -07:00
|
|
|
static void raid1_end_write_request(struct bio *bio, int error)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
|
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = r1_bio->mddev->private;
|
2006-03-09 18:33:46 -07:00
|
|
|
struct bio *to_put = NULL;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
|
|
|
|
for (mirror = 0; mirror < conf->raid_disks; mirror++)
|
|
|
|
if (r1_bio->bios[mirror] == bio)
|
|
|
|
break;
|
|
|
|
|
2006-05-01 12:15:46 -07:00
|
|
|
if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
|
|
|
|
set_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
|
|
|
r1_bio->mddev->barriers_work = 0;
|
2006-05-01 12:15:47 -07:00
|
|
|
/* Don't rdev_dec_pending in this branch - keep it for the retry */
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
} else {
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
* this branch is our 'one mirror IO has finished' event handler:
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
r1_bio->bios[mirror] = NULL;
|
2006-03-09 18:33:46 -07:00
|
|
|
to_put = bio;
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
if (!uptodate) {
|
|
|
|
md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
|
|
|
|
/* an I/O failed, we can't clear the bitmap */
|
|
|
|
set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
|
|
} else
|
|
|
|
/*
|
|
|
|
* Set R1BIO_Uptodate in our master bio, so that
|
|
|
|
* we will return a good error code for to the higher
|
|
|
|
* levels even if IO on some other mirrored buffer fails.
|
|
|
|
*
|
|
|
|
* The 'master' represents the composite IO operation to
|
|
|
|
* user-side. So if something waits for IO, then it will
|
|
|
|
* wait for the 'master' bio.
|
|
|
|
*/
|
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
|
|
|
|
|
|
|
update_head_pos(mirror, r1_bio);
|
|
|
|
|
|
|
|
if (behind) {
|
|
|
|
if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
|
|
|
|
atomic_dec(&r1_bio->behind_remaining);
|
|
|
|
|
|
|
|
/* In behind mode, we ACK the master bio once the I/O has safely
|
|
|
|
* reached all non-writemostly disks. Setting the Returned bit
|
|
|
|
* ensures that this gets done only once -- we don't ever want to
|
|
|
|
* return -EIO here, instead we'll wait */
|
|
|
|
|
|
|
|
if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
|
|
|
|
test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
|
|
|
/* Maybe we can return now */
|
|
|
|
if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
|
|
|
|
struct bio *mbio = r1_bio->master_bio;
|
|
|
|
PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
|
|
|
|
(unsigned long long) mbio->bi_sector,
|
|
|
|
(unsigned long long) mbio->bi_sector +
|
|
|
|
(mbio->bi_size >> 9) - 1);
|
2007-09-27 03:47:43 -07:00
|
|
|
bio_endio(mbio, 0);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
}
|
2005-09-09 16:23:47 -07:00
|
|
|
}
|
|
|
|
}
|
2006-05-01 12:15:47 -07:00
|
|
|
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
|
2005-09-09 16:23:47 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Let's see if all mirrored write operations have finished
|
|
|
|
* already.
|
|
|
|
*/
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
2006-06-26 00:27:35 -07:00
|
|
|
if (test_bit(R1BIO_BarrierRetry, &r1_bio->state))
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
reschedule_retry(r1_bio);
|
2006-06-26 00:27:35 -07:00
|
|
|
else {
|
|
|
|
/* it really is the end of this request */
|
|
|
|
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
|
|
|
|
/* free extra copy of the data pages */
|
|
|
|
int i = bio->bi_vcnt;
|
|
|
|
while (i--)
|
|
|
|
safe_put_page(bio->bi_io_vec[i].bv_page);
|
|
|
|
}
|
|
|
|
/* clear the bitmap if all writes complete successfully */
|
|
|
|
bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
|
|
|
|
r1_bio->sectors,
|
|
|
|
!test_bit(R1BIO_Degraded, &r1_bio->state),
|
|
|
|
behind);
|
|
|
|
md_write_end(r1_bio->mddev);
|
|
|
|
raid_end_bio_io(r1_bio);
|
2005-09-09 16:23:47 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-06-26 00:27:35 -07:00
|
|
|
|
2006-03-09 18:33:46 -07:00
|
|
|
if (to_put)
|
|
|
|
bio_put(to_put);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine returns the disk from which the requested read should
|
|
|
|
* be done. There is a per-array 'next expected sequential IO' sector
|
|
|
|
* number - if this matches on the next IO then we use the last disk.
|
|
|
|
* There is also a per-disk 'last know head position' sector that is
|
|
|
|
* maintained from IRQ contexts, both the normal and the resync IO
|
|
|
|
* completion handlers update this position correctly. If there is no
|
|
|
|
* perfect sequential match then we pick the disk whose head is closest.
|
|
|
|
*
|
|
|
|
* If there are 2 mirrors in the same 2 devices, performance degrades
|
|
|
|
* because position is mirror, not device based.
|
|
|
|
*
|
|
|
|
* The rdev for the device selected will have nr_pending incremented.
|
|
|
|
*/
|
|
|
|
static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
|
|
|
{
|
|
|
|
const unsigned long this_sector = r1_bio->sector;
|
|
|
|
int new_disk = conf->last_used, disk = new_disk;
|
2005-09-09 16:23:45 -07:00
|
|
|
int wonly_disk = -1;
|
2005-04-16 15:20:36 -07:00
|
|
|
const int sectors = r1_bio->sectors;
|
|
|
|
sector_t new_distance, current_distance;
|
2005-09-09 16:23:45 -07:00
|
|
|
mdk_rdev_t *rdev;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
/*
|
2005-09-09 16:23:45 -07:00
|
|
|
* Check if we can balance. We can balance on the whole
|
2005-04-16 15:20:36 -07:00
|
|
|
* device if no resync is going on, or below the resync window.
|
|
|
|
* We take the first readable disk when above the resync window.
|
|
|
|
*/
|
|
|
|
retry:
|
|
|
|
if (conf->mddev->recovery_cp < MaxSector &&
|
|
|
|
(this_sector + sectors >= conf->next_resync)) {
|
|
|
|
/* Choose the first operation device, for consistancy */
|
|
|
|
new_disk = 0;
|
|
|
|
|
2005-11-08 22:39:27 -07:00
|
|
|
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
2006-01-06 01:20:23 -07:00
|
|
|
r1_bio->bios[new_disk] == IO_BLOCKED ||
|
2005-11-08 22:39:31 -07:00
|
|
|
!rdev || !test_bit(In_sync, &rdev->flags)
|
2005-09-09 16:23:45 -07:00
|
|
|
|| test_bit(WriteMostly, &rdev->flags);
|
2005-11-08 22:39:27 -07:00
|
|
|
rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
|
2005-09-09 16:23:45 -07:00
|
|
|
|
2006-01-06 01:20:23 -07:00
|
|
|
if (rdev && test_bit(In_sync, &rdev->flags) &&
|
|
|
|
r1_bio->bios[new_disk] != IO_BLOCKED)
|
2005-09-09 16:23:45 -07:00
|
|
|
wonly_disk = new_disk;
|
|
|
|
|
|
|
|
if (new_disk == conf->raid_disks - 1) {
|
|
|
|
new_disk = wonly_disk;
|
2005-04-16 15:20:36 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto rb_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* make sure the disk is operational */
|
2005-11-08 22:39:27 -07:00
|
|
|
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
2006-01-06 01:20:23 -07:00
|
|
|
r1_bio->bios[new_disk] == IO_BLOCKED ||
|
2005-11-08 22:39:31 -07:00
|
|
|
!rdev || !test_bit(In_sync, &rdev->flags) ||
|
2005-09-09 16:23:45 -07:00
|
|
|
test_bit(WriteMostly, &rdev->flags);
|
2005-11-08 22:39:27 -07:00
|
|
|
rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
|
2005-09-09 16:23:45 -07:00
|
|
|
|
2006-01-06 01:20:23 -07:00
|
|
|
if (rdev && test_bit(In_sync, &rdev->flags) &&
|
|
|
|
r1_bio->bios[new_disk] != IO_BLOCKED)
|
2005-09-09 16:23:45 -07:00
|
|
|
wonly_disk = new_disk;
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
if (new_disk <= 0)
|
|
|
|
new_disk = conf->raid_disks;
|
|
|
|
new_disk--;
|
|
|
|
if (new_disk == disk) {
|
2005-09-09 16:23:45 -07:00
|
|
|
new_disk = wonly_disk;
|
|
|
|
break;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
2005-09-09 16:23:45 -07:00
|
|
|
|
|
|
|
if (new_disk < 0)
|
|
|
|
goto rb_out;
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
disk = new_disk;
|
|
|
|
/* now disk == new_disk == starting point for search */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't change to another disk for sequential reads:
|
|
|
|
*/
|
|
|
|
if (conf->next_seq_sect == this_sector)
|
|
|
|
goto rb_out;
|
|
|
|
if (this_sector == conf->mirrors[new_disk].head_position)
|
|
|
|
goto rb_out;
|
|
|
|
|
|
|
|
current_distance = abs(this_sector - conf->mirrors[disk].head_position);
|
|
|
|
|
|
|
|
/* Find the disk whose head is closest */
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (disk <= 0)
|
|
|
|
disk = conf->raid_disks;
|
|
|
|
disk--;
|
|
|
|
|
2005-11-08 22:39:27 -07:00
|
|
|
rdev = rcu_dereference(conf->mirrors[disk].rdev);
|
2005-09-09 16:23:45 -07:00
|
|
|
|
2006-01-06 01:20:23 -07:00
|
|
|
if (!rdev || r1_bio->bios[disk] == IO_BLOCKED ||
|
2005-11-08 22:39:31 -07:00
|
|
|
!test_bit(In_sync, &rdev->flags) ||
|
2005-09-09 16:23:45 -07:00
|
|
|
test_bit(WriteMostly, &rdev->flags))
|
2005-04-16 15:20:36 -07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!atomic_read(&rdev->nr_pending)) {
|
|
|
|
new_disk = disk;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
new_distance = abs(this_sector - conf->mirrors[disk].head_position);
|
|
|
|
if (new_distance < current_distance) {
|
|
|
|
current_distance = new_distance;
|
|
|
|
new_disk = disk;
|
|
|
|
}
|
|
|
|
} while (disk != conf->last_used);
|
|
|
|
|
2005-09-09 16:23:45 -07:00
|
|
|
rb_out:
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
|
|
|
|
if (new_disk >= 0) {
|
2005-11-08 22:39:27 -07:00
|
|
|
rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
2005-09-09 16:23:45 -07:00
|
|
|
if (!rdev)
|
|
|
|
goto retry;
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
2005-11-08 22:39:31 -07:00
|
|
|
if (!test_bit(In_sync, &rdev->flags)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/* cannot risk returning a device that failed
|
|
|
|
* before we inc'ed nr_pending
|
|
|
|
*/
|
2006-01-06 01:20:46 -07:00
|
|
|
rdev_dec_pending(rdev, conf->mddev);
|
2005-04-16 15:20:36 -07:00
|
|
|
goto retry;
|
|
|
|
}
|
2005-09-09 16:23:45 -07:00
|
|
|
conf->next_seq_sect = this_sector + sectors;
|
|
|
|
conf->last_used = new_disk;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return new_disk;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unplug_slaves(mddev_t *mddev)
|
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
for (i=0; i<mddev->raid_disks; i++) {
|
2005-11-08 22:39:27 -07:00
|
|
|
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
2005-11-08 22:39:31 -07:00
|
|
|
if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
|
2007-07-24 00:28:11 -07:00
|
|
|
struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2007-11-07 12:26:56 -07:00
|
|
|
blk_unplug(r_queue);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
rdev_dec_pending(rdev, mddev);
|
|
|
|
rcu_read_lock();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2007-07-24 00:28:11 -07:00
|
|
|
static void raid1_unplug(struct request_queue *q)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2005-06-21 17:17:23 -07:00
|
|
|
mddev_t *mddev = q->queuedata;
|
|
|
|
|
|
|
|
unplug_slaves(mddev);
|
|
|
|
md_wakeup_thread(mddev->thread);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2006-10-03 01:15:54 -07:00
|
|
|
static int raid1_congested(void *data, int bits)
|
|
|
|
{
|
|
|
|
mddev_t *mddev = data;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2006-10-03 01:15:54 -07:00
|
|
|
int i, ret = 0;
|
|
|
|
|
2009-09-23 01:10:29 -07:00
|
|
|
if (mddev_congested(mddev, bits))
|
|
|
|
return 1;
|
|
|
|
|
2006-10-03 01:15:54 -07:00
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < mddev->raid_disks; i++) {
|
|
|
|
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
|
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
2007-07-24 00:28:11 -07:00
|
|
|
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
2006-10-03 01:15:54 -07:00
|
|
|
|
|
|
|
/* Note the '|| 1' - when read_balance prefers
|
|
|
|
* non-congested targets, it can be removed
|
|
|
|
*/
|
2009-04-06 14:35:56 -07:00
|
|
|
if ((bits & (1<<BDI_async_congested)) || 1)
|
2006-10-03 01:15:54 -07:00
|
|
|
ret |= bdi_congested(&q->backing_dev_info, bits);
|
|
|
|
else
|
|
|
|
ret &= bdi_congested(&q->backing_dev_info, bits);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-03-04 15:29:29 -07:00
|
|
|
static int flush_pending_writes(conf_t *conf)
|
|
|
|
{
|
|
|
|
/* Any writes that have been queued but are awaiting
|
|
|
|
* bitmap updates get flushed here.
|
|
|
|
* We return 1 if any requests were actually submitted.
|
|
|
|
*/
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
spin_lock_irq(&conf->device_lock);
|
|
|
|
|
|
|
|
if (conf->pending_bio_list.head) {
|
|
|
|
struct bio *bio;
|
|
|
|
bio = bio_list_get(&conf->pending_bio_list);
|
|
|
|
blk_remove_plug(conf->mddev->queue);
|
|
|
|
spin_unlock_irq(&conf->device_lock);
|
|
|
|
/* flush any pending bitmap writes to
|
|
|
|
* disk before proceeding w/ I/O */
|
|
|
|
bitmap_unplug(conf->mddev->bitmap);
|
|
|
|
|
|
|
|
while (bio) { /* submit pending writes */
|
|
|
|
struct bio *next = bio->bi_next;
|
|
|
|
bio->bi_next = NULL;
|
|
|
|
generic_make_request(bio);
|
|
|
|
bio = next;
|
|
|
|
}
|
|
|
|
rv = 1;
|
|
|
|
} else
|
|
|
|
spin_unlock_irq(&conf->device_lock);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
/* Barriers....
|
|
|
|
* Sometimes we need to suspend IO while we do something else,
|
|
|
|
* either some resync/recovery, or reconfigure the array.
|
|
|
|
* To do this we raise a 'barrier'.
|
|
|
|
* The 'barrier' is a counter that can be raised multiple times
|
|
|
|
* to count how many activities are happening which preclude
|
|
|
|
* normal IO.
|
|
|
|
* We can only raise the barrier if there is no pending IO.
|
|
|
|
* i.e. if nr_pending == 0.
|
|
|
|
* We choose only to raise the barrier if no-one is waiting for the
|
|
|
|
* barrier to go down. This means that as soon as an IO request
|
|
|
|
* is ready, no other operations which require a barrier will start
|
|
|
|
* until the IO request has had a chance.
|
|
|
|
*
|
|
|
|
* So: regular IO calls 'wait_barrier'. When that returns there
|
|
|
|
* is no backgroup IO happening, It must arrange to call
|
|
|
|
* allow_barrier when it has finished its IO.
|
|
|
|
* backgroup IO calls must call raise_barrier. Once that returns
|
|
|
|
* there is no normal IO happeing. It must arrange to call
|
|
|
|
* lower_barrier when the particular background IO completes.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
#define RESYNC_DEPTH 32
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
static void raise_barrier(conf_t *conf)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
2006-01-06 01:20:12 -07:00
|
|
|
|
|
|
|
/* Wait until no block IO is waiting */
|
|
|
|
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
|
|
|
|
conf->resync_lock,
|
|
|
|
raid1_unplug(conf->mddev->queue));
|
|
|
|
|
|
|
|
/* block any new IO from starting */
|
|
|
|
conf->barrier++;
|
|
|
|
|
|
|
|
/* No wait for all pending IO to complete */
|
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
|
|
|
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
|
|
|
|
conf->resync_lock,
|
|
|
|
raid1_unplug(conf->mddev->queue));
|
|
|
|
|
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void lower_barrier(conf_t *conf)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2009-12-13 18:49:51 -07:00
|
|
|
BUG_ON(conf->barrier <= 0);
|
2006-01-06 01:20:12 -07:00
|
|
|
spin_lock_irqsave(&conf->resync_lock, flags);
|
|
|
|
conf->barrier--;
|
|
|
|
spin_unlock_irqrestore(&conf->resync_lock, flags);
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wait_barrier(conf_t *conf)
|
|
|
|
{
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
if (conf->barrier) {
|
|
|
|
conf->nr_waiting++;
|
|
|
|
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
|
|
|
|
conf->resync_lock,
|
|
|
|
raid1_unplug(conf->mddev->queue));
|
|
|
|
conf->nr_waiting--;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-01-06 01:20:12 -07:00
|
|
|
conf->nr_pending++;
|
2005-04-16 15:20:36 -07:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
static void allow_barrier(conf_t *conf)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conf->resync_lock, flags);
|
|
|
|
conf->nr_pending--;
|
|
|
|
spin_unlock_irqrestore(&conf->resync_lock, flags);
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:19 -07:00
|
|
|
static void freeze_array(conf_t *conf)
|
|
|
|
{
|
|
|
|
/* stop syncio and normal IO and wait for everything to
|
|
|
|
* go quite.
|
|
|
|
* We increment barrier and nr_waiting, and then
|
2008-03-04 15:29:35 -07:00
|
|
|
* wait until nr_pending match nr_queued+1
|
|
|
|
* This is called in the context of one normal IO request
|
|
|
|
* that has failed. Thus any sync request that might be pending
|
|
|
|
* will be blocked by nr_pending, and we need to wait for
|
|
|
|
* pending IO requests to complete or be queued for re-try.
|
|
|
|
* Thus the number queued (nr_queued) plus this request (1)
|
|
|
|
* must match the number of pending IOs (nr_pending) before
|
|
|
|
* we continue.
|
2006-01-06 01:20:19 -07:00
|
|
|
*/
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
conf->barrier++;
|
|
|
|
conf->nr_waiting++;
|
|
|
|
wait_event_lock_irq(conf->wait_barrier,
|
2008-03-04 15:29:35 -07:00
|
|
|
conf->nr_pending == conf->nr_queued+1,
|
2006-01-06 01:20:19 -07:00
|
|
|
conf->resync_lock,
|
2008-03-04 15:29:29 -07:00
|
|
|
({ flush_pending_writes(conf);
|
|
|
|
raid1_unplug(conf->mddev->queue); }));
|
2006-01-06 01:20:19 -07:00
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
|
|
|
static void unfreeze_array(conf_t *conf)
|
|
|
|
{
|
|
|
|
/* reverse the effect of the freeze */
|
|
|
|
spin_lock_irq(&conf->resync_lock);
|
|
|
|
conf->barrier--;
|
|
|
|
conf->nr_waiting--;
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
spin_unlock_irq(&conf->resync_lock);
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
/* duplicate the data pages for behind I/O */
|
|
|
|
static struct page **alloc_behind_pages(struct bio *bio)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct bio_vec *bvec;
|
2006-01-06 01:20:32 -07:00
|
|
|
struct page **pages = kzalloc(bio->bi_vcnt * sizeof(struct page *),
|
2005-09-09 16:23:47 -07:00
|
|
|
GFP_NOIO);
|
|
|
|
if (unlikely(!pages))
|
|
|
|
goto do_sync_io;
|
|
|
|
|
|
|
|
bio_for_each_segment(bvec, bio, i) {
|
|
|
|
pages[i] = alloc_page(GFP_NOIO);
|
|
|
|
if (unlikely(!pages[i]))
|
|
|
|
goto do_sync_io;
|
|
|
|
memcpy(kmap(pages[i]) + bvec->bv_offset,
|
|
|
|
kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len);
|
|
|
|
kunmap(pages[i]);
|
|
|
|
kunmap(bvec->bv_page);
|
|
|
|
}
|
|
|
|
|
|
|
|
return pages;
|
|
|
|
|
|
|
|
do_sync_io:
|
|
|
|
if (pages)
|
|
|
|
for (i = 0; i < bio->bi_vcnt && pages[i]; i++)
|
2006-01-06 01:20:31 -07:00
|
|
|
put_page(pages[i]);
|
2005-09-09 16:23:47 -07:00
|
|
|
kfree(pages);
|
|
|
|
PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-07-24 00:28:11 -07:00
|
|
|
static int make_request(struct request_queue *q, struct bio * bio)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
mddev_t *mddev = q->queuedata;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
mirror_info_t *mirror;
|
|
|
|
r1bio_t *r1_bio;
|
|
|
|
struct bio *read_bio;
|
2005-06-21 17:17:23 -07:00
|
|
|
int i, targets = 0, disks;
|
2008-05-23 13:04:32 -07:00
|
|
|
struct bitmap *bitmap;
|
2005-06-21 17:17:23 -07:00
|
|
|
unsigned long flags;
|
|
|
|
struct bio_list bl;
|
2005-09-09 16:23:47 -07:00
|
|
|
struct page **behind_pages = NULL;
|
2005-11-01 01:26:16 -07:00
|
|
|
const int rw = bio_data_dir(bio);
|
2009-09-11 05:32:04 -07:00
|
|
|
const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
|
|
|
int cpu;
|
|
|
|
bool do_barriers;
|
2008-04-30 00:52:32 -07:00
|
|
|
mdk_rdev_t *blocked_rdev;
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Register the new request and wait if the reconstruction
|
|
|
|
* thread has put up a bar for new requests.
|
|
|
|
* Continue immediately if no resync is active currently.
|
2006-05-01 12:15:47 -07:00
|
|
|
* We test barriers_work *after* md_write_start as md_write_start
|
|
|
|
* may cause the first superblock write, and that will check out
|
|
|
|
* if barriers work.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2006-05-01 12:15:47 -07:00
|
|
|
|
2005-06-21 17:17:26 -07:00
|
|
|
md_write_start(mddev, bio); /* wait on superblock update early */
|
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
if (bio_data_dir(bio) == WRITE &&
|
|
|
|
bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
|
|
|
|
bio->bi_sector < mddev->suspend_hi) {
|
|
|
|
/* As the suspend_* range is controlled by
|
|
|
|
* userspace, we want an interruptible
|
|
|
|
* wait.
|
|
|
|
*/
|
|
|
|
DEFINE_WAIT(w);
|
|
|
|
for (;;) {
|
|
|
|
flush_signals(current);
|
|
|
|
prepare_to_wait(&conf->wait_barrier,
|
|
|
|
&w, TASK_INTERRUPTIBLE);
|
|
|
|
if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
|
|
|
|
bio->bi_sector >= mddev->suspend_hi)
|
|
|
|
break;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
finish_wait(&conf->wait_barrier, &w);
|
|
|
|
}
|
2009-09-11 05:32:04 -07:00
|
|
|
if (unlikely(!mddev->barriers_work &&
|
|
|
|
bio_rw_flagged(bio, BIO_RW_BARRIER))) {
|
2006-05-01 12:15:47 -07:00
|
|
|
if (rw == WRITE)
|
|
|
|
md_write_end(mddev);
|
2007-09-27 03:47:43 -07:00
|
|
|
bio_endio(bio, -EOPNOTSUPP);
|
2006-05-01 12:15:47 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
wait_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-05-23 13:04:32 -07:00
|
|
|
bitmap = mddev->bitmap;
|
|
|
|
|
2008-08-25 03:56:14 -07:00
|
|
|
cpu = part_stat_lock();
|
|
|
|
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
|
|
|
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
|
|
|
|
bio_sectors(bio));
|
|
|
|
part_stat_unlock();
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* make_request() can abort the operation when READA is being
|
|
|
|
* used and no empty request is available.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
|
|
|
|
|
|
|
|
r1_bio->master_bio = bio;
|
|
|
|
r1_bio->sectors = bio->bi_size >> 9;
|
2005-06-21 17:17:23 -07:00
|
|
|
r1_bio->state = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
r1_bio->mddev = mddev;
|
|
|
|
r1_bio->sector = bio->bi_sector;
|
|
|
|
|
2005-11-01 01:26:16 -07:00
|
|
|
if (rw == READ) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* read balancing logic:
|
|
|
|
*/
|
|
|
|
int rdisk = read_balance(conf, r1_bio);
|
|
|
|
|
|
|
|
if (rdisk < 0) {
|
|
|
|
/* couldn't find anywhere to read from */
|
|
|
|
raid_end_bio_io(r1_bio);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
mirror = conf->mirrors + rdisk;
|
|
|
|
|
|
|
|
r1_bio->read_disk = rdisk;
|
|
|
|
|
|
|
|
read_bio = bio_clone(bio, GFP_NOIO);
|
|
|
|
|
|
|
|
r1_bio->bios[rdisk] = read_bio;
|
|
|
|
|
|
|
|
read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
|
|
|
|
read_bio->bi_bdev = mirror->rdev->bdev;
|
|
|
|
read_bio->bi_end_io = raid1_end_read_request;
|
2009-09-19 18:52:25 -07:00
|
|
|
read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
|
2005-04-16 15:20:36 -07:00
|
|
|
read_bio->bi_private = r1_bio;
|
|
|
|
|
|
|
|
generic_make_request(read_bio);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* WRITE:
|
|
|
|
*/
|
|
|
|
/* first select target devices under spinlock and
|
|
|
|
* inc refcount on their rdev. Record them by setting
|
|
|
|
* bios[x] to bio
|
|
|
|
*/
|
|
|
|
disks = conf->raid_disks;
|
2005-06-21 17:17:23 -07:00
|
|
|
#if 0
|
|
|
|
{ static int first=1;
|
|
|
|
if (first) printk("First Write sector %llu disks %d\n",
|
|
|
|
(unsigned long long)r1_bio->sector, disks);
|
|
|
|
first = 0;
|
|
|
|
}
|
|
|
|
#endif
|
2008-04-30 00:52:32 -07:00
|
|
|
retry_write:
|
|
|
|
blocked_rdev = NULL;
|
2005-04-16 15:20:36 -07:00
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < disks; i++) {
|
2008-04-30 00:52:32 -07:00
|
|
|
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
|
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
blocked_rdev = rdev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
atomic_inc(&rdev->nr_pending);
|
2005-11-08 22:39:31 -07:00
|
|
|
if (test_bit(Faulty, &rdev->flags)) {
|
2006-01-06 01:20:46 -07:00
|
|
|
rdev_dec_pending(rdev, mddev);
|
2005-04-16 15:20:36 -07:00
|
|
|
r1_bio->bios[i] = NULL;
|
|
|
|
} else
|
|
|
|
r1_bio->bios[i] = bio;
|
2005-06-21 17:17:23 -07:00
|
|
|
targets++;
|
2005-04-16 15:20:36 -07:00
|
|
|
} else
|
|
|
|
r1_bio->bios[i] = NULL;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2008-04-30 00:52:32 -07:00
|
|
|
if (unlikely(blocked_rdev)) {
|
|
|
|
/* Wait for this device to become unblocked */
|
|
|
|
int j;
|
|
|
|
|
|
|
|
for (j = 0; j < i; j++)
|
|
|
|
if (r1_bio->bios[j])
|
|
|
|
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
|
|
|
|
|
|
|
|
allow_barrier(conf);
|
|
|
|
md_wait_for_blocked_rdev(blocked_rdev, mddev);
|
|
|
|
wait_barrier(conf);
|
|
|
|
goto retry_write;
|
|
|
|
}
|
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
BUG_ON(targets == 0); /* we never fail the last device */
|
|
|
|
|
2005-06-21 17:17:23 -07:00
|
|
|
if (targets < conf->raid_disks) {
|
|
|
|
/* array is degraded, we will not clear the bitmap
|
|
|
|
* on I/O completion (see raid1_end_write_request) */
|
|
|
|
set_bit(R1BIO_Degraded, &r1_bio->state);
|
|
|
|
}
|
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
/* do behind I/O ? */
|
|
|
|
if (bitmap &&
|
2009-12-13 18:49:53 -07:00
|
|
|
(atomic_read(&bitmap->behind_writes)
|
|
|
|
< mddev->bitmap_info.max_write_behind) &&
|
2005-09-09 16:23:47 -07:00
|
|
|
(behind_pages = alloc_behind_pages(bio)) != NULL)
|
|
|
|
set_bit(R1BIO_BehindIO, &r1_bio->state);
|
|
|
|
|
2005-06-21 17:17:23 -07:00
|
|
|
atomic_set(&r1_bio->remaining, 0);
|
2005-09-09 16:23:47 -07:00
|
|
|
atomic_set(&r1_bio->behind_remaining, 0);
|
2005-06-21 17:17:12 -07:00
|
|
|
|
2009-09-11 05:32:04 -07:00
|
|
|
do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
if (do_barriers)
|
|
|
|
set_bit(R1BIO_Barrier, &r1_bio->state);
|
|
|
|
|
2005-06-21 17:17:23 -07:00
|
|
|
bio_list_init(&bl);
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i = 0; i < disks; i++) {
|
|
|
|
struct bio *mbio;
|
|
|
|
if (!r1_bio->bios[i])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
mbio = bio_clone(bio, GFP_NOIO);
|
|
|
|
r1_bio->bios[i] = mbio;
|
|
|
|
|
|
|
|
mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
|
|
|
|
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
|
|
|
mbio->bi_end_io = raid1_end_write_request;
|
2009-09-19 18:52:25 -07:00
|
|
|
mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
|
|
|
|
(do_sync << BIO_RW_SYNCIO);
|
2005-04-16 15:20:36 -07:00
|
|
|
mbio->bi_private = r1_bio;
|
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
if (behind_pages) {
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
/* Yes, I really want the '__' version so that
|
|
|
|
* we clear any unused pointer in the io_vec, rather
|
|
|
|
* than leave them unchanged. This is important
|
|
|
|
* because when we come to free the pages, we won't
|
|
|
|
* know the originial bi_idx, so we just free
|
|
|
|
* them all
|
|
|
|
*/
|
|
|
|
__bio_for_each_segment(bvec, mbio, j, 0)
|
|
|
|
bvec->bv_page = behind_pages[j];
|
|
|
|
if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
|
|
|
|
atomic_inc(&r1_bio->behind_remaining);
|
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
atomic_inc(&r1_bio->remaining);
|
|
|
|
|
2005-06-21 17:17:23 -07:00
|
|
|
bio_list_add(&bl, mbio);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2005-09-09 16:23:47 -07:00
|
|
|
kfree(behind_pages); /* the behind pages are attached to the bios now */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-09-09 16:23:47 -07:00
|
|
|
bitmap_startwrite(bitmap, bio->bi_sector, r1_bio->sectors,
|
|
|
|
test_bit(R1BIO_BehindIO, &r1_bio->state));
|
2005-06-21 17:17:23 -07:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
bio_list_merge(&conf->pending_bio_list, &bl);
|
|
|
|
bio_list_init(&bl);
|
|
|
|
|
|
|
|
blk_plug_device(mddev->queue);
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
|
2008-03-04 15:29:29 -07:00
|
|
|
/* In case raid1d snuck into freeze_array */
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
|
2007-01-11 00:15:37 -07:00
|
|
|
if (do_sync)
|
|
|
|
md_wakeup_thread(mddev->thread);
|
2005-06-21 17:17:23 -07:00
|
|
|
#if 0
|
|
|
|
while ((bio = bio_list_pop(&bl)) != NULL)
|
|
|
|
generic_make_request(bio);
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void status(struct seq_file *seq, mddev_t *mddev)
|
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
|
2006-10-03 01:15:52 -07:00
|
|
|
conf->raid_disks - mddev->degraded);
|
2006-08-31 21:27:36 -07:00
|
|
|
rcu_read_lock();
|
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
|
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
2005-04-16 15:20:36 -07:00
|
|
|
seq_printf(seq, "%s",
|
2006-08-31 21:27:36 -07:00
|
|
|
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
2005-04-16 15:20:36 -07:00
|
|
|
seq_printf(seq, "]");
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void error(mddev_t *mddev, mdk_rdev_t *rdev)
|
|
|
|
{
|
|
|
|
char b[BDEVNAME_SIZE];
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it is not operational, then we have already marked it as dead
|
|
|
|
* else if it is the last working disks, ignore the error, let the
|
|
|
|
* next level up know.
|
|
|
|
* else mark the drive as failed
|
|
|
|
*/
|
2005-11-08 22:39:31 -07:00
|
|
|
if (test_bit(In_sync, &rdev->flags)
|
2009-01-08 14:31:11 -07:00
|
|
|
&& (conf->raid_disks - mddev->degraded) == 1) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Don't fail the drive, act as though we were just a
|
2009-01-08 14:31:11 -07:00
|
|
|
* normal single drive.
|
|
|
|
* However don't try a recovery from this drive as
|
|
|
|
* it is very likely to fail.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2009-01-08 14:31:11 -07:00
|
|
|
mddev->recovery_disabled = 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
return;
|
2009-01-08 14:31:11 -07:00
|
|
|
}
|
2006-10-03 01:15:53 -07:00
|
|
|
if (test_and_clear_bit(In_sync, &rdev->flags)) {
|
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
mddev->degraded++;
|
2007-05-10 03:15:50 -07:00
|
|
|
set_bit(Faulty, &rdev->flags);
|
2006-10-03 01:15:53 -07:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* if recovery is running, make sure it aborts.
|
|
|
|
*/
|
md: restart recovery cleanly after device failure.
When we get any IO error during a recovery (rebuilding a spare), we abort
the recovery and restart it.
For RAID6 (and multi-drive RAID1) it may not be best to restart at the
beginning: when multiple failures can be tolerated, the recovery may be
able to continue and re-doing all that has already been done doesn't make
sense.
We already have the infrastructure to record where a recovery is up to
and restart from there, but it is not being used properly.
This is because:
- We sometimes abort with MD_RECOVERY_ERR rather than just MD_RECOVERY_INTR,
which causes the recovery not be be checkpointed.
- We remove spares and then re-added them which loses important state
information.
The distinction between MD_RECOVERY_ERR and MD_RECOVERY_INTR really isn't
needed. If there is an error, the relevant drive will be marked as
Faulty, and that is enough to ensure correct handling of the error. So we
first remove MD_RECOVERY_ERR, changing some of the uses of it to
MD_RECOVERY_INTR.
Then we cause the attempt to remove a non-faulty device from an array to
fail (unless recovery is impossible as the array is too degraded). Then
when remove_and_add_spares attempts to remove the devices on which
recovery can continue, it will fail, they will remain in place, and
recovery will continue on them as desired.
Issue: If we are halfway through rebuilding a spare and another drive
fails, and a new spare is immediately available, do we want to:
1/ complete the current rebuild, then go back and rebuild the new spare or
2/ restart the rebuild from the start and rebuild both devices in
parallel.
Both options can be argued for. The code currently takes option 2 as
a/ this requires least code change
b/ this results in a minimally-degraded array in minimal time.
Cc: "Eivind Sarto" <ivan@kasenna.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-05-23 13:04:39 -07:00
|
|
|
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
|
2007-05-10 03:15:50 -07:00
|
|
|
} else
|
|
|
|
set_bit(Faulty, &rdev->flags);
|
2006-10-03 01:15:46 -07:00
|
|
|
set_bit(MD_CHANGE_DEVS, &mddev->flags);
|
2008-04-28 02:15:55 -07:00
|
|
|
printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
|
|
|
|
"raid1: Operation continuing on %d devices.\n",
|
2006-10-03 01:15:52 -07:00
|
|
|
bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_conf(conf_t *conf)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
printk("RAID1 conf printout:\n");
|
|
|
|
if (!conf) {
|
|
|
|
printk("(!conf)\n");
|
|
|
|
return;
|
|
|
|
}
|
2006-10-03 01:15:52 -07:00
|
|
|
printk(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
|
2005-04-16 15:20:36 -07:00
|
|
|
conf->raid_disks);
|
|
|
|
|
2006-08-31 21:27:36 -07:00
|
|
|
rcu_read_lock();
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
|
char b[BDEVNAME_SIZE];
|
2006-08-31 21:27:36 -07:00
|
|
|
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
|
if (rdev)
|
2005-04-16 15:20:36 -07:00
|
|
|
printk(" disk %d, wo:%d, o:%d, dev:%s\n",
|
2006-08-31 21:27:36 -07:00
|
|
|
i, !test_bit(In_sync, &rdev->flags),
|
|
|
|
!test_bit(Faulty, &rdev->flags),
|
|
|
|
bdevname(rdev->bdev,b));
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-08-31 21:27:36 -07:00
|
|
|
rcu_read_unlock();
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void close_sync(conf_t *conf)
|
|
|
|
{
|
2006-01-06 01:20:12 -07:00
|
|
|
wait_barrier(conf);
|
|
|
|
allow_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
mempool_destroy(conf->r1buf_pool);
|
|
|
|
conf->r1buf_pool = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int raid1_spare_active(mddev_t *mddev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
conf_t *conf = mddev->private;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find all failed disks within the RAID1 configuration
|
2006-08-31 21:27:36 -07:00
|
|
|
* and mark them readable.
|
|
|
|
* Called under mddev lock, so rcu protection not needed.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
2006-08-31 21:27:36 -07:00
|
|
|
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
|
|
|
|
if (rdev
|
|
|
|
&& !test_bit(Faulty, &rdev->flags)
|
2006-10-03 01:15:53 -07:00
|
|
|
&& !test_and_set_bit(In_sync, &rdev->flags)) {
|
|
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
mddev->degraded--;
|
2006-10-03 01:15:53 -07:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
print_conf(conf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|
|
|
{
|
|
|
|
conf_t *conf = mddev->private;
|
2008-06-27 15:31:33 -07:00
|
|
|
int err = -EEXIST;
|
2005-06-21 17:17:25 -07:00
|
|
|
int mirror = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
mirror_info_t *p;
|
2008-06-27 15:31:31 -07:00
|
|
|
int first = 0;
|
|
|
|
int last = mddev->raid_disks - 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-06-27 15:31:31 -07:00
|
|
|
if (rdev->raid_disk >= 0)
|
|
|
|
first = last = rdev->raid_disk;
|
|
|
|
|
|
|
|
for (mirror = first; mirror <= last; mirror++)
|
2005-04-16 15:20:36 -07:00
|
|
|
if ( !(p=conf->mirrors+mirror)->rdev) {
|
|
|
|
|
2009-06-30 18:13:45 -07:00
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
|
rdev->data_offset << 9);
|
2005-04-16 15:20:36 -07:00
|
|
|
/* as we don't honour merge_bvec_fn, we must never risk
|
|
|
|
* violating it, so limit ->max_sector to one PAGE, as
|
|
|
|
* a one page request is never in violation.
|
|
|
|
*/
|
|
|
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
2009-05-22 14:17:50 -07:00
|
|
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
2010-02-25 22:20:38 -07:00
|
|
|
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
p->head_position = 0;
|
|
|
|
rdev->raid_disk = mirror;
|
2008-06-27 15:31:33 -07:00
|
|
|
err = 0;
|
2005-11-28 14:44:13 -07:00
|
|
|
/* As all devices are equivalent, we don't need a full recovery
|
|
|
|
* if this was recently any drive of the array
|
|
|
|
*/
|
|
|
|
if (rdev->saved_raid_disk < 0)
|
2005-06-21 17:17:25 -07:00
|
|
|
conf->fullsync = 1;
|
2005-11-08 22:39:27 -07:00
|
|
|
rcu_assign_pointer(p->rdev, rdev);
|
2005-04-16 15:20:36 -07:00
|
|
|
break;
|
|
|
|
}
|
2009-08-02 17:59:47 -07:00
|
|
|
md_integrity_add_rdev(rdev, mddev);
|
2005-04-16 15:20:36 -07:00
|
|
|
print_conf(conf);
|
2008-06-27 15:31:33 -07:00
|
|
|
return err;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int raid1_remove_disk(mddev_t *mddev, int number)
|
|
|
|
{
|
|
|
|
conf_t *conf = mddev->private;
|
|
|
|
int err = 0;
|
|
|
|
mdk_rdev_t *rdev;
|
|
|
|
mirror_info_t *p = conf->mirrors+ number;
|
|
|
|
|
|
|
|
print_conf(conf);
|
|
|
|
rdev = p->rdev;
|
|
|
|
if (rdev) {
|
2005-11-08 22:39:31 -07:00
|
|
|
if (test_bit(In_sync, &rdev->flags) ||
|
2005-04-16 15:20:36 -07:00
|
|
|
atomic_read(&rdev->nr_pending)) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto abort;
|
|
|
|
}
|
md: restart recovery cleanly after device failure.
When we get any IO error during a recovery (rebuilding a spare), we abort
the recovery and restart it.
For RAID6 (and multi-drive RAID1) it may not be best to restart at the
beginning: when multiple failures can be tolerated, the recovery may be
able to continue and re-doing all that has already been done doesn't make
sense.
We already have the infrastructure to record where a recovery is up to
and restart from there, but it is not being used properly.
This is because:
- We sometimes abort with MD_RECOVERY_ERR rather than just MD_RECOVERY_INTR,
which causes the recovery not be be checkpointed.
- We remove spares and then re-added them which loses important state
information.
The distinction between MD_RECOVERY_ERR and MD_RECOVERY_INTR really isn't
needed. If there is an error, the relevant drive will be marked as
Faulty, and that is enough to ensure correct handling of the error. So we
first remove MD_RECOVERY_ERR, changing some of the uses of it to
MD_RECOVERY_INTR.
Then we cause the attempt to remove a non-faulty device from an array to
fail (unless recovery is impossible as the array is too degraded). Then
when remove_and_add_spares attempts to remove the devices on which
recovery can continue, it will fail, they will remain in place, and
recovery will continue on them as desired.
Issue: If we are halfway through rebuilding a spare and another drive
fails, and a new spare is immediately available, do we want to:
1/ complete the current rebuild, then go back and rebuild the new spare or
2/ restart the rebuild from the start and rebuild both devices in
parallel.
Both options can be argued for. The code currently takes option 2 as
a/ this requires least code change
b/ this results in a minimally-degraded array in minimal time.
Cc: "Eivind Sarto" <ivan@kasenna.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-05-23 13:04:39 -07:00
|
|
|
/* Only remove non-faulty devices is recovery
|
|
|
|
* is not possible.
|
|
|
|
*/
|
|
|
|
if (!test_bit(Faulty, &rdev->flags) &&
|
|
|
|
mddev->degraded < conf->raid_disks) {
|
|
|
|
err = -EBUSY;
|
|
|
|
goto abort;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
p->rdev = NULL;
|
2005-05-01 08:59:04 -07:00
|
|
|
synchronize_rcu();
|
2005-04-16 15:20:36 -07:00
|
|
|
if (atomic_read(&rdev->nr_pending)) {
|
|
|
|
/* lost the race, try later */
|
|
|
|
err = -EBUSY;
|
|
|
|
p->rdev = rdev;
|
2009-08-02 17:59:47 -07:00
|
|
|
goto abort;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2009-08-02 17:59:47 -07:00
|
|
|
md_integrity_register(mddev);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
abort:
|
|
|
|
|
|
|
|
print_conf(conf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-09-27 03:47:43 -07:00
|
|
|
static void end_sync_read(struct bio *bio, int error)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
|
2006-01-06 01:20:26 -07:00
|
|
|
int i;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
for (i=r1_bio->mddev->raid_disks; i--; )
|
|
|
|
if (r1_bio->bios[i] == bio)
|
|
|
|
break;
|
|
|
|
BUG_ON(i < 0);
|
|
|
|
update_head_pos(i, r1_bio);
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* we have read a block, now it needs to be re-written,
|
|
|
|
* or re-read if the read failed.
|
|
|
|
* We don't do much here, just schedule handling by raid1d
|
|
|
|
*/
|
2006-01-06 01:20:22 -07:00
|
|
|
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
|
2005-04-16 15:20:36 -07:00
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
2006-01-06 01:20:26 -07:00
|
|
|
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining))
|
|
|
|
reschedule_retry(r1_bio);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2007-09-27 03:47:43 -07:00
|
|
|
static void end_sync_write(struct bio *bio, int error)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
|
|
|
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
|
|
|
|
mddev_t *mddev = r1_bio->mddev;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
int i;
|
|
|
|
int mirror=0;
|
|
|
|
|
|
|
|
for (i = 0; i < conf->raid_disks; i++)
|
|
|
|
if (r1_bio->bios[i] == bio) {
|
|
|
|
mirror = i;
|
|
|
|
break;
|
|
|
|
}
|
2006-03-31 03:31:57 -07:00
|
|
|
if (!uptodate) {
|
|
|
|
int sync_blocks = 0;
|
|
|
|
sector_t s = r1_bio->sector;
|
|
|
|
long sectors_to_go = r1_bio->sectors;
|
|
|
|
/* make sure these bits doesn't get cleared. */
|
|
|
|
do {
|
2006-07-10 04:44:18 -07:00
|
|
|
bitmap_end_sync(mddev->bitmap, s,
|
2006-03-31 03:31:57 -07:00
|
|
|
&sync_blocks, 1);
|
|
|
|
s += sync_blocks;
|
|
|
|
sectors_to_go -= sync_blocks;
|
|
|
|
} while (sectors_to_go > 0);
|
2005-04-16 15:20:36 -07:00
|
|
|
md_error(mddev, conf->mirrors[mirror].rdev);
|
2006-03-31 03:31:57 -07:00
|
|
|
}
|
2005-08-04 12:53:34 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
update_head_pos(mirror, r1_bio);
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
2009-02-24 19:18:47 -07:00
|
|
|
sector_t s = r1_bio->sectors;
|
2005-04-16 15:20:36 -07:00
|
|
|
put_buf(r1_bio);
|
2009-02-24 19:18:47 -07:00
|
|
|
md_done_sync(mddev, s, uptodate);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
|
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
int i;
|
|
|
|
int disks = conf->raid_disks;
|
|
|
|
struct bio *bio, *wbio;
|
|
|
|
|
|
|
|
bio = r1_bio->bios[r1_bio->read_disk];
|
|
|
|
|
2006-01-06 01:20:22 -07:00
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
|
|
|
/* We have read all readable devices. If we haven't
|
|
|
|
* got the block, then there is no hope left.
|
|
|
|
* If we have, then we want to do a comparison
|
|
|
|
* and skip the write if everything is the same.
|
|
|
|
* If any blocks failed to read, then we need to
|
|
|
|
* attempt an over-write
|
|
|
|
*/
|
|
|
|
int primary;
|
|
|
|
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
|
|
|
for (i=0; i<mddev->raid_disks; i++)
|
|
|
|
if (r1_bio->bios[i]->bi_end_io == end_sync_read)
|
|
|
|
md_error(mddev, conf->mirrors[i].rdev);
|
|
|
|
|
|
|
|
md_done_sync(mddev, r1_bio->sectors, 1);
|
|
|
|
put_buf(r1_bio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for (primary=0; primary<mddev->raid_disks; primary++)
|
|
|
|
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
|
|
|
|
test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
|
|
|
|
r1_bio->bios[primary]->bi_end_io = NULL;
|
2006-01-06 01:20:46 -07:00
|
|
|
rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
|
2006-01-06 01:20:26 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
r1_bio->read_disk = primary;
|
|
|
|
for (i=0; i<mddev->raid_disks; i++)
|
2007-06-16 10:16:07 -07:00
|
|
|
if (r1_bio->bios[i]->bi_end_io == end_sync_read) {
|
2006-01-06 01:20:26 -07:00
|
|
|
int j;
|
|
|
|
int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
|
|
|
|
struct bio *pbio = r1_bio->bios[primary];
|
|
|
|
struct bio *sbio = r1_bio->bios[i];
|
2007-06-16 10:16:07 -07:00
|
|
|
|
|
|
|
if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
|
|
|
|
for (j = vcnt; j-- ; ) {
|
|
|
|
struct page *p, *s;
|
|
|
|
p = pbio->bi_io_vec[j].bv_page;
|
|
|
|
s = sbio->bi_io_vec[j].bv_page;
|
|
|
|
if (memcmp(page_address(p),
|
|
|
|
page_address(s),
|
|
|
|
PAGE_SIZE))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
j = 0;
|
2006-01-06 01:20:26 -07:00
|
|
|
if (j >= 0)
|
|
|
|
mddev->resync_mismatches += r1_bio->sectors;
|
2007-10-16 23:30:55 -07:00
|
|
|
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
|
|
|
|
&& test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
|
2006-01-06 01:20:26 -07:00
|
|
|
sbio->bi_end_io = NULL;
|
2006-01-06 01:20:46 -07:00
|
|
|
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
|
|
|
|
} else {
|
2006-01-06 01:20:26 -07:00
|
|
|
/* fixup the bio for reuse */
|
2008-05-23 13:04:35 -07:00
|
|
|
int size;
|
2006-01-06 01:20:26 -07:00
|
|
|
sbio->bi_vcnt = vcnt;
|
|
|
|
sbio->bi_size = r1_bio->sectors << 9;
|
|
|
|
sbio->bi_idx = 0;
|
|
|
|
sbio->bi_phys_segments = 0;
|
|
|
|
sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
|
|
|
|
sbio->bi_flags |= 1 << BIO_UPTODATE;
|
|
|
|
sbio->bi_next = NULL;
|
|
|
|
sbio->bi_sector = r1_bio->sector +
|
|
|
|
conf->mirrors[i].rdev->data_offset;
|
|
|
|
sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
2008-05-23 13:04:35 -07:00
|
|
|
size = sbio->bi_size;
|
|
|
|
for (j = 0; j < vcnt ; j++) {
|
|
|
|
struct bio_vec *bi;
|
|
|
|
bi = &sbio->bi_io_vec[j];
|
|
|
|
bi->bv_offset = 0;
|
|
|
|
if (size > PAGE_SIZE)
|
|
|
|
bi->bv_len = PAGE_SIZE;
|
|
|
|
else
|
|
|
|
bi->bv_len = size;
|
|
|
|
size -= PAGE_SIZE;
|
|
|
|
memcpy(page_address(bi->bv_page),
|
2007-01-26 01:57:01 -07:00
|
|
|
page_address(pbio->bi_io_vec[j].bv_page),
|
|
|
|
PAGE_SIZE);
|
2008-05-23 13:04:35 -07:00
|
|
|
}
|
2007-01-26 01:57:01 -07:00
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) {
|
2006-01-06 01:20:22 -07:00
|
|
|
/* ouch - failed to read all of that.
|
|
|
|
* Try some synchronous reads of other devices to get
|
|
|
|
* good data, much like with normal read errors. Only
|
2006-08-31 21:27:36 -07:00
|
|
|
* read into the pages we already have so we don't
|
2006-01-06 01:20:22 -07:00
|
|
|
* need to re-issue the read request.
|
|
|
|
* We don't need to freeze the array, because being in an
|
|
|
|
* active sync request, there is no normal IO, and
|
|
|
|
* no overlapping syncs.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2006-01-06 01:20:22 -07:00
|
|
|
sector_t sect = r1_bio->sector;
|
|
|
|
int sectors = r1_bio->sectors;
|
|
|
|
int idx = 0;
|
|
|
|
|
|
|
|
while(sectors) {
|
|
|
|
int s = sectors;
|
|
|
|
int d = r1_bio->read_disk;
|
|
|
|
int success = 0;
|
|
|
|
mdk_rdev_t *rdev;
|
|
|
|
|
|
|
|
if (s > (PAGE_SIZE>>9))
|
|
|
|
s = PAGE_SIZE >> 9;
|
|
|
|
do {
|
|
|
|
if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
|
2006-08-31 21:27:36 -07:00
|
|
|
/* No rcu protection needed here devices
|
|
|
|
* can only be removed when no resync is
|
|
|
|
* active, and resync is currently active
|
|
|
|
*/
|
2006-01-06 01:20:22 -07:00
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (sync_page_io(rdev->bdev,
|
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9,
|
|
|
|
bio->bi_io_vec[idx].bv_page,
|
|
|
|
READ)) {
|
|
|
|
success = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d++;
|
|
|
|
if (d == conf->raid_disks)
|
|
|
|
d = 0;
|
|
|
|
} while (!success && d != r1_bio->read_disk);
|
|
|
|
|
|
|
|
if (success) {
|
2006-01-06 01:20:37 -07:00
|
|
|
int start = d;
|
2006-01-06 01:20:22 -07:00
|
|
|
/* write it back and re-read */
|
|
|
|
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
|
|
|
while (d != r1_bio->read_disk) {
|
|
|
|
if (d == 0)
|
|
|
|
d = conf->raid_disks;
|
|
|
|
d--;
|
|
|
|
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
|
|
|
|
continue;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
2006-01-06 01:20:52 -07:00
|
|
|
atomic_add(s, &rdev->corrected_errors);
|
2006-01-06 01:20:22 -07:00
|
|
|
if (sync_page_io(rdev->bdev,
|
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9,
|
|
|
|
bio->bi_io_vec[idx].bv_page,
|
2006-01-06 01:20:37 -07:00
|
|
|
WRITE) == 0)
|
|
|
|
md_error(mddev, rdev);
|
|
|
|
}
|
|
|
|
d = start;
|
|
|
|
while (d != r1_bio->read_disk) {
|
|
|
|
if (d == 0)
|
|
|
|
d = conf->raid_disks;
|
|
|
|
d--;
|
|
|
|
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
|
|
|
|
continue;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (sync_page_io(rdev->bdev,
|
2006-01-06 01:20:22 -07:00
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9,
|
|
|
|
bio->bi_io_vec[idx].bv_page,
|
2006-01-06 01:20:37 -07:00
|
|
|
READ) == 0)
|
2006-01-06 01:20:22 -07:00
|
|
|
md_error(mddev, rdev);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
/* Cannot read from anywhere, array is toast */
|
|
|
|
md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev);
|
|
|
|
printk(KERN_ALERT "raid1: %s: unrecoverable I/O read error"
|
|
|
|
" for block %llu\n",
|
|
|
|
bdevname(bio->bi_bdev,b),
|
|
|
|
(unsigned long long)r1_bio->sector);
|
|
|
|
md_done_sync(mddev, r1_bio->sectors, 0);
|
|
|
|
put_buf(r1_bio);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
sectors -= s;
|
|
|
|
sect += s;
|
|
|
|
idx ++;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-01-06 01:20:26 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* schedule writes
|
|
|
|
*/
|
2005-04-16 15:20:36 -07:00
|
|
|
atomic_set(&r1_bio->remaining, 1);
|
|
|
|
for (i = 0; i < disks ; i++) {
|
|
|
|
wbio = r1_bio->bios[i];
|
2006-01-06 01:20:21 -07:00
|
|
|
if (wbio->bi_end_io == NULL ||
|
|
|
|
(wbio->bi_end_io == end_sync_read &&
|
|
|
|
(i == r1_bio->read_disk ||
|
|
|
|
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
|
2005-04-16 15:20:36 -07:00
|
|
|
continue;
|
|
|
|
|
2006-01-06 01:20:21 -07:00
|
|
|
wbio->bi_rw = WRITE;
|
|
|
|
wbio->bi_end_io = end_sync_write;
|
2005-04-16 15:20:36 -07:00
|
|
|
atomic_inc(&r1_bio->remaining);
|
|
|
|
md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
generic_make_request(wbio);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (atomic_dec_and_test(&r1_bio->remaining)) {
|
2005-06-21 17:17:23 -07:00
|
|
|
/* if we're here, all write(s) have completed, so clean up */
|
2005-04-16 15:20:36 -07:00
|
|
|
md_done_sync(mddev, r1_bio->sectors, 1);
|
|
|
|
put_buf(r1_bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is a kernel thread which:
|
|
|
|
*
|
|
|
|
* 1. Retries failed read operations on working mirrors.
|
|
|
|
* 2. Updates the raid superblock when problems encounter.
|
|
|
|
* 3. Performs writes following reads for array syncronising.
|
|
|
|
*/
|
|
|
|
|
2006-10-03 01:15:51 -07:00
|
|
|
static void fix_read_error(conf_t *conf, int read_disk,
|
|
|
|
sector_t sect, int sectors)
|
|
|
|
{
|
|
|
|
mddev_t *mddev = conf->mddev;
|
|
|
|
while(sectors) {
|
|
|
|
int s = sectors;
|
|
|
|
int d = read_disk;
|
|
|
|
int success = 0;
|
|
|
|
int start;
|
|
|
|
mdk_rdev_t *rdev;
|
|
|
|
|
|
|
|
if (s > (PAGE_SIZE>>9))
|
|
|
|
s = PAGE_SIZE >> 9;
|
|
|
|
|
|
|
|
do {
|
|
|
|
/* Note: no rcu protection needed here
|
|
|
|
* as this is synchronous in the raid1d thread
|
|
|
|
* which is the thread that might remove
|
|
|
|
* a device. If raid1d ever becomes multi-threaded....
|
|
|
|
*/
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (rdev &&
|
|
|
|
test_bit(In_sync, &rdev->flags) &&
|
|
|
|
sync_page_io(rdev->bdev,
|
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9,
|
|
|
|
conf->tmppage, READ))
|
|
|
|
success = 1;
|
|
|
|
else {
|
|
|
|
d++;
|
|
|
|
if (d == conf->raid_disks)
|
|
|
|
d = 0;
|
|
|
|
}
|
|
|
|
} while (!success && d != read_disk);
|
|
|
|
|
|
|
|
if (!success) {
|
|
|
|
/* Cannot read from anywhere -- bye bye array */
|
|
|
|
md_error(mddev, conf->mirrors[read_disk].rdev);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* write it back and re-read */
|
|
|
|
start = d;
|
|
|
|
while (d != read_disk) {
|
|
|
|
if (d==0)
|
|
|
|
d = conf->raid_disks;
|
|
|
|
d--;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (rdev &&
|
|
|
|
test_bit(In_sync, &rdev->flags)) {
|
|
|
|
if (sync_page_io(rdev->bdev,
|
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9, conf->tmppage, WRITE)
|
|
|
|
== 0)
|
|
|
|
/* Well, this device is dead */
|
|
|
|
md_error(mddev, rdev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
d = start;
|
|
|
|
while (d != read_disk) {
|
|
|
|
char b[BDEVNAME_SIZE];
|
|
|
|
if (d==0)
|
|
|
|
d = conf->raid_disks;
|
|
|
|
d--;
|
|
|
|
rdev = conf->mirrors[d].rdev;
|
|
|
|
if (rdev &&
|
|
|
|
test_bit(In_sync, &rdev->flags)) {
|
|
|
|
if (sync_page_io(rdev->bdev,
|
|
|
|
sect + rdev->data_offset,
|
|
|
|
s<<9, conf->tmppage, READ)
|
|
|
|
== 0)
|
|
|
|
/* Well, this device is dead */
|
|
|
|
md_error(mddev, rdev);
|
|
|
|
else {
|
|
|
|
atomic_add(s, &rdev->corrected_errors);
|
|
|
|
printk(KERN_INFO
|
|
|
|
"raid1:%s: read error corrected "
|
|
|
|
"(%d sectors at %llu on %s)\n",
|
|
|
|
mdname(mddev), s,
|
2006-10-28 10:38:32 -07:00
|
|
|
(unsigned long long)(sect +
|
|
|
|
rdev->data_offset),
|
2006-10-03 01:15:51 -07:00
|
|
|
bdevname(rdev->bdev, b));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sectors -= s;
|
|
|
|
sect += s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
static void raid1d(mddev_t *mddev)
|
|
|
|
{
|
|
|
|
r1bio_t *r1_bio;
|
|
|
|
struct bio *bio;
|
|
|
|
unsigned long flags;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
struct list_head *head = &conf->retry_list;
|
|
|
|
int unplug=0;
|
|
|
|
mdk_rdev_t *rdev;
|
|
|
|
|
|
|
|
md_check_recovery(mddev);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
char b[BDEVNAME_SIZE];
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2008-03-04 15:29:29 -07:00
|
|
|
unplug += flush_pending_writes(conf);
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2008-03-04 15:29:29 -07:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
|
|
|
if (list_empty(head)) {
|
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
break;
|
2008-03-04 15:29:29 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
r1_bio = list_entry(head->prev, r1bio_t, retry_list);
|
|
|
|
list_del(head->prev);
|
2006-01-06 01:20:19 -07:00
|
|
|
conf->nr_queued--;
|
2005-04-16 15:20:36 -07:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
|
|
|
|
|
|
|
mddev = r1_bio->mddev;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
|
|
|
|
sync_request_write(mddev, r1_bio);
|
|
|
|
unplug = 1;
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
|
|
|
|
/* some requests in the r1bio were BIO_RW_BARRIER
|
2006-05-01 12:15:46 -07:00
|
|
|
* requests which failed with -EOPNOTSUPP. Hohumm..
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
* Better resubmit without the barrier.
|
|
|
|
* We know which devices to resubmit for, because
|
|
|
|
* all others have had their bios[] entry cleared.
|
2006-05-01 12:15:47 -07:00
|
|
|
* We already have a nr_pending reference on these rdevs.
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
*/
|
|
|
|
int i;
|
2009-09-11 05:32:04 -07:00
|
|
|
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
|
|
|
|
clear_bit(R1BIO_Barrier, &r1_bio->state);
|
2006-03-27 02:18:19 -07:00
|
|
|
for (i=0; i < conf->raid_disks; i++)
|
|
|
|
if (r1_bio->bios[i])
|
|
|
|
atomic_inc(&r1_bio->remaining);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
for (i=0; i < conf->raid_disks; i++)
|
|
|
|
if (r1_bio->bios[i]) {
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
|
|
|
|
/* copy pages from the failed bio, as
|
|
|
|
* this might be a write-behind device */
|
|
|
|
__bio_for_each_segment(bvec, bio, j, 0)
|
|
|
|
bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
|
|
|
|
bio_put(r1_bio->bios[i]);
|
|
|
|
bio->bi_sector = r1_bio->sector +
|
|
|
|
conf->mirrors[i].rdev->data_offset;
|
|
|
|
bio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
|
|
|
bio->bi_end_io = raid1_end_write_request;
|
2009-09-19 18:52:25 -07:00
|
|
|
bio->bi_rw = WRITE |
|
|
|
|
(do_sync << BIO_RW_SYNCIO);
|
[PATCH] md: support BIO_RW_BARRIER for md/raid1
We can only accept BARRIER requests if all slaves handle
barriers, and that can, of course, change with time....
So we keep track of whether the whole array seems safe for barriers,
and also whether each individual rdev handles barriers.
We initially assumes barriers are OK.
When writing the superblock we try a barrier, and if that fails, we flag
things for no-barriers. This will usually clear the flags fairly quickly.
If writing the superblock finds that BIO_RW_BARRIER is -ENOTSUPP, we need to
resubmit, so introduce function "md_super_wait" which waits for requests to
finish, and retries ENOTSUPP requests without the barrier flag.
When writing the real raid1, write requests which were BIO_RW_BARRIER but
which aresn't supported need to be retried. So raid1d is enhanced to do this,
and when any bio write completes (i.e. no retry needed) we remove it from the
r1bio, so that devices needing retry are easy to find.
We should hardly ever get -ENOTSUPP errors when writing data to the raid.
It should only happen if:
1/ the device used to support BARRIER, but now doesn't. Few devices
change like this, though raid1 can!
or
2/ the array has no persistent superblock, so there was no opportunity to
pre-test for barriers when writing the superblock.
Signed-off-by: Neil Brown <neilb@cse.unsw.edu.au>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-08 22:39:34 -07:00
|
|
|
bio->bi_private = r1_bio;
|
|
|
|
r1_bio->bios[i] = bio;
|
|
|
|
generic_make_request(bio);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
} else {
|
|
|
|
int disk;
|
2006-01-06 01:20:19 -07:00
|
|
|
|
|
|
|
/* we got a read error. Maybe the drive is bad. Maybe just
|
|
|
|
* the block and we can fix it.
|
|
|
|
* We freeze all other IO, and try reading the block from
|
|
|
|
* other devices. When we find one, we re-write
|
|
|
|
* and check it that fixes the read error.
|
|
|
|
* This is all done synchronously while the array is
|
|
|
|
* frozen
|
|
|
|
*/
|
2006-10-03 01:15:51 -07:00
|
|
|
if (mddev->ro == 0) {
|
|
|
|
freeze_array(conf);
|
|
|
|
fix_read_error(conf, r1_bio->read_disk,
|
|
|
|
r1_bio->sector,
|
|
|
|
r1_bio->sectors);
|
|
|
|
unfreeze_array(conf);
|
2009-11-30 23:30:59 -07:00
|
|
|
} else
|
|
|
|
md_error(mddev,
|
|
|
|
conf->mirrors[r1_bio->read_disk].rdev);
|
2006-01-06 01:20:19 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
bio = r1_bio->bios[r1_bio->read_disk];
|
2009-11-30 23:30:59 -07:00
|
|
|
if ((disk=read_balance(conf, r1_bio)) == -1) {
|
2005-04-16 15:20:36 -07:00
|
|
|
printk(KERN_ALERT "raid1: %s: unrecoverable I/O"
|
|
|
|
" read error for block %llu\n",
|
|
|
|
bdevname(bio->bi_bdev,b),
|
|
|
|
(unsigned long long)r1_bio->sector);
|
|
|
|
raid_end_bio_io(r1_bio);
|
|
|
|
} else {
|
2009-09-11 05:32:04 -07:00
|
|
|
const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
|
2006-01-06 01:20:23 -07:00
|
|
|
r1_bio->bios[r1_bio->read_disk] =
|
|
|
|
mddev->ro ? IO_BLOCKED : NULL;
|
2005-04-16 15:20:36 -07:00
|
|
|
r1_bio->read_disk = disk;
|
|
|
|
bio_put(bio);
|
|
|
|
bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
|
|
|
|
r1_bio->bios[r1_bio->read_disk] = bio;
|
|
|
|
rdev = conf->mirrors[disk].rdev;
|
|
|
|
if (printk_ratelimit())
|
|
|
|
printk(KERN_ERR "raid1: %s: redirecting sector %llu to"
|
|
|
|
" another mirror\n",
|
|
|
|
bdevname(rdev->bdev,b),
|
|
|
|
(unsigned long long)r1_bio->sector);
|
|
|
|
bio->bi_sector = r1_bio->sector + rdev->data_offset;
|
|
|
|
bio->bi_bdev = rdev->bdev;
|
|
|
|
bio->bi_end_io = raid1_end_read_request;
|
2009-09-19 18:52:25 -07:00
|
|
|
bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
|
2005-04-16 15:20:36 -07:00
|
|
|
bio->bi_private = r1_bio;
|
|
|
|
unplug = 1;
|
|
|
|
generic_make_request(bio);
|
|
|
|
}
|
|
|
|
}
|
2009-10-15 21:55:32 -07:00
|
|
|
cond_resched();
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
if (unplug)
|
|
|
|
unplug_slaves(mddev);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int init_resync(conf_t *conf)
|
|
|
|
{
|
|
|
|
int buffs;
|
|
|
|
|
|
|
|
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
|
2006-03-31 16:08:49 -07:00
|
|
|
BUG_ON(conf->r1buf_pool);
|
2005-04-16 15:20:36 -07:00
|
|
|
conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free,
|
|
|
|
conf->poolinfo);
|
|
|
|
if (!conf->r1buf_pool)
|
|
|
|
return -ENOMEM;
|
|
|
|
conf->next_resync = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* perform a "sync" on one "block"
|
|
|
|
*
|
|
|
|
* We need to make sure that no normal I/O request - particularly write
|
|
|
|
* requests - conflict with active sync requests.
|
|
|
|
*
|
|
|
|
* This is achieved by tracking pending requests and a 'barrier' concept
|
|
|
|
* that can be installed to exclude normal IO requests.
|
|
|
|
*/
|
|
|
|
|
2005-06-21 17:17:13 -07:00
|
|
|
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
r1bio_t *r1_bio;
|
|
|
|
struct bio *bio;
|
|
|
|
sector_t max_sector, nr_sectors;
|
2006-01-06 01:20:21 -07:00
|
|
|
int disk = -1;
|
2005-04-16 15:20:36 -07:00
|
|
|
int i;
|
2006-01-06 01:20:21 -07:00
|
|
|
int wonly = -1;
|
|
|
|
int write_targets = 0, read_targets = 0;
|
2005-06-21 17:17:23 -07:00
|
|
|
int sync_blocks;
|
2005-08-04 12:53:34 -07:00
|
|
|
int still_degraded = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
if (!conf->r1buf_pool)
|
2005-06-21 17:17:23 -07:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
printk("sync start - bitmap %p\n", mddev->bitmap);
|
|
|
|
*/
|
2005-04-16 15:20:36 -07:00
|
|
|
if (init_resync(conf))
|
2005-06-21 17:17:13 -07:00
|
|
|
return 0;
|
2005-06-21 17:17:23 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-03-30 20:33:13 -07:00
|
|
|
max_sector = mddev->dev_sectors;
|
2005-04-16 15:20:36 -07:00
|
|
|
if (sector_nr >= max_sector) {
|
2005-06-21 17:17:23 -07:00
|
|
|
/* If we aborted, we need to abort the
|
|
|
|
* sync on the 'current' bitmap chunk (there will
|
|
|
|
* only be one in raid1 resync.
|
|
|
|
* We can find the current addess in mddev->curr_resync
|
|
|
|
*/
|
2005-07-15 03:56:35 -07:00
|
|
|
if (mddev->curr_resync < max_sector) /* aborted */
|
|
|
|
bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
|
2005-06-21 17:17:23 -07:00
|
|
|
&sync_blocks, 1);
|
2005-07-15 03:56:35 -07:00
|
|
|
else /* completed sync */
|
2005-06-21 17:17:23 -07:00
|
|
|
conf->fullsync = 0;
|
2005-07-15 03:56:35 -07:00
|
|
|
|
|
|
|
bitmap_close_sync(mddev->bitmap);
|
2005-04-16 15:20:36 -07:00
|
|
|
close_sync(conf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-06-26 00:27:56 -07:00
|
|
|
if (mddev->bitmap == NULL &&
|
|
|
|
mddev->recovery_cp == MaxSector &&
|
2006-08-27 01:23:50 -07:00
|
|
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
|
2006-06-26 00:27:56 -07:00
|
|
|
conf->fullsync == 0) {
|
|
|
|
*skipped = 1;
|
|
|
|
return max_sector - sector_nr;
|
|
|
|
}
|
2006-08-27 01:23:50 -07:00
|
|
|
/* before building a request, check if we can skip these blocks..
|
|
|
|
* This call the bitmap_start_sync doesn't actually record anything
|
|
|
|
*/
|
2005-08-04 12:53:34 -07:00
|
|
|
if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
|
2005-11-08 22:39:38 -07:00
|
|
|
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
2005-06-21 17:17:23 -07:00
|
|
|
/* We can skip this block, and probably several more */
|
|
|
|
*skipped = 1;
|
|
|
|
return sync_blocks;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2006-01-06 01:20:12 -07:00
|
|
|
* If there is non-resync activity waiting for a turn,
|
|
|
|
* and resync is going fast enough,
|
|
|
|
* then let it though before starting on this new sync request.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2006-01-06 01:20:12 -07:00
|
|
|
if (!go_faster && conf->nr_waiting)
|
2005-04-16 15:20:36 -07:00
|
|
|
msleep_interruptible(1000);
|
2006-01-06 01:20:12 -07:00
|
|
|
|
2008-02-06 02:39:50 -07:00
|
|
|
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
|
2006-01-06 01:20:12 -07:00
|
|
|
raise_barrier(conf);
|
|
|
|
|
|
|
|
conf->next_resync = sector_nr;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:21 -07:00
|
|
|
r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
|
|
|
|
rcu_read_lock();
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2006-01-06 01:20:21 -07:00
|
|
|
* If we get a correctably read error during resync or recovery,
|
|
|
|
* we might want to read from a different device. So we
|
|
|
|
* flag all drives that could conceivably be read from for READ,
|
|
|
|
* and any others (which will be non-In_sync devices) for WRITE.
|
|
|
|
* If a read fails, we try reading from something else for which READ
|
|
|
|
* is OK.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
r1_bio->mddev = mddev;
|
|
|
|
r1_bio->sector = sector_nr;
|
2005-06-21 17:17:23 -07:00
|
|
|
r1_bio->state = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
set_bit(R1BIO_IsSync, &r1_bio->state);
|
|
|
|
|
|
|
|
for (i=0; i < conf->raid_disks; i++) {
|
2006-01-06 01:20:21 -07:00
|
|
|
mdk_rdev_t *rdev;
|
2005-04-16 15:20:36 -07:00
|
|
|
bio = r1_bio->bios[i];
|
|
|
|
|
|
|
|
/* take from bio_init */
|
|
|
|
bio->bi_next = NULL;
|
|
|
|
bio->bi_flags |= 1 << BIO_UPTODATE;
|
2006-12-13 01:34:13 -07:00
|
|
|
bio->bi_rw = READ;
|
2005-04-16 15:20:36 -07:00
|
|
|
bio->bi_vcnt = 0;
|
|
|
|
bio->bi_idx = 0;
|
|
|
|
bio->bi_phys_segments = 0;
|
|
|
|
bio->bi_size = 0;
|
|
|
|
bio->bi_end_io = NULL;
|
|
|
|
bio->bi_private = NULL;
|
|
|
|
|
2006-01-06 01:20:21 -07:00
|
|
|
rdev = rcu_dereference(conf->mirrors[i].rdev);
|
|
|
|
if (rdev == NULL ||
|
|
|
|
test_bit(Faulty, &rdev->flags)) {
|
2005-08-04 12:53:34 -07:00
|
|
|
still_degraded = 1;
|
|
|
|
continue;
|
2006-01-06 01:20:21 -07:00
|
|
|
} else if (!test_bit(In_sync, &rdev->flags)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
bio->bi_rw = WRITE;
|
|
|
|
bio->bi_end_io = end_sync_write;
|
|
|
|
write_targets ++;
|
2006-01-06 01:20:21 -07:00
|
|
|
} else {
|
|
|
|
/* may need to read from here */
|
|
|
|
bio->bi_rw = READ;
|
|
|
|
bio->bi_end_io = end_sync_read;
|
|
|
|
if (test_bit(WriteMostly, &rdev->flags)) {
|
|
|
|
if (wonly < 0)
|
|
|
|
wonly = i;
|
|
|
|
} else {
|
|
|
|
if (disk < 0)
|
|
|
|
disk = i;
|
|
|
|
}
|
|
|
|
read_targets++;
|
|
|
|
}
|
|
|
|
atomic_inc(&rdev->nr_pending);
|
|
|
|
bio->bi_sector = sector_nr + rdev->data_offset;
|
|
|
|
bio->bi_bdev = rdev->bdev;
|
2005-04-16 15:20:36 -07:00
|
|
|
bio->bi_private = r1_bio;
|
|
|
|
}
|
2006-01-06 01:20:21 -07:00
|
|
|
rcu_read_unlock();
|
|
|
|
if (disk < 0)
|
|
|
|
disk = wonly;
|
|
|
|
r1_bio->read_disk = disk;
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2006-01-06 01:20:21 -07:00
|
|
|
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
|
|
|
|
/* extra read targets are also write targets */
|
|
|
|
write_targets += read_targets-1;
|
|
|
|
|
|
|
|
if (write_targets == 0 || read_targets == 0) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/* There is nowhere to write, so all non-sync
|
|
|
|
* drives must be failed - so we are finished
|
|
|
|
*/
|
2005-06-21 17:17:13 -07:00
|
|
|
sector_t rv = max_sector - sector_nr;
|
|
|
|
*skipped = 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
put_buf(r1_bio);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-02-06 02:39:52 -07:00
|
|
|
if (max_sector > mddev->resync_max)
|
|
|
|
max_sector = mddev->resync_max; /* Don't do IO beyond here */
|
2005-04-16 15:20:36 -07:00
|
|
|
nr_sectors = 0;
|
2005-06-21 17:17:24 -07:00
|
|
|
sync_blocks = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
do {
|
|
|
|
struct page *page;
|
|
|
|
int len = PAGE_SIZE;
|
|
|
|
if (sector_nr + (len>>9) > max_sector)
|
|
|
|
len = (max_sector - sector_nr) << 9;
|
|
|
|
if (len == 0)
|
|
|
|
break;
|
2005-07-15 03:56:35 -07:00
|
|
|
if (sync_blocks == 0) {
|
|
|
|
if (!bitmap_start_sync(mddev->bitmap, sector_nr,
|
2005-11-08 22:39:38 -07:00
|
|
|
&sync_blocks, still_degraded) &&
|
|
|
|
!conf->fullsync &&
|
|
|
|
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
|
2005-07-15 03:56:35 -07:00
|
|
|
break;
|
2006-03-31 16:08:49 -07:00
|
|
|
BUG_ON(sync_blocks < (PAGE_SIZE>>9));
|
2005-07-15 03:56:35 -07:00
|
|
|
if (len > (sync_blocks<<9))
|
|
|
|
len = sync_blocks<<9;
|
2005-06-21 17:17:23 -07:00
|
|
|
}
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i=0 ; i < conf->raid_disks; i++) {
|
|
|
|
bio = r1_bio->bios[i];
|
|
|
|
if (bio->bi_end_io) {
|
2006-01-06 01:20:26 -07:00
|
|
|
page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
|
2005-04-16 15:20:36 -07:00
|
|
|
if (bio_add_page(bio, page, len, 0) == 0) {
|
|
|
|
/* stop here */
|
2006-01-06 01:20:26 -07:00
|
|
|
bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
|
2005-04-16 15:20:36 -07:00
|
|
|
while (i > 0) {
|
|
|
|
i--;
|
|
|
|
bio = r1_bio->bios[i];
|
2005-07-15 03:56:35 -07:00
|
|
|
if (bio->bi_end_io==NULL)
|
|
|
|
continue;
|
2005-04-16 15:20:36 -07:00
|
|
|
/* remove last page from this bio */
|
|
|
|
bio->bi_vcnt--;
|
|
|
|
bio->bi_size -= len;
|
|
|
|
bio->bi_flags &= ~(1<< BIO_SEG_VALID);
|
|
|
|
}
|
|
|
|
goto bio_full;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
nr_sectors += len>>9;
|
|
|
|
sector_nr += len>>9;
|
2005-06-21 17:17:23 -07:00
|
|
|
sync_blocks -= (len>>9);
|
2005-04-16 15:20:36 -07:00
|
|
|
} while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES);
|
|
|
|
bio_full:
|
|
|
|
r1_bio->sectors = nr_sectors;
|
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
/* For a user-requested sync, we read all readable devices and do a
|
|
|
|
* compare
|
|
|
|
*/
|
|
|
|
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
|
|
|
|
atomic_set(&r1_bio->remaining, read_targets);
|
|
|
|
for (i=0; i<conf->raid_disks; i++) {
|
|
|
|
bio = r1_bio->bios[i];
|
|
|
|
if (bio->bi_end_io == end_sync_read) {
|
2006-08-31 21:27:36 -07:00
|
|
|
md_sync_acct(bio->bi_bdev, nr_sectors);
|
2006-01-06 01:20:26 -07:00
|
|
|
generic_make_request(bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
atomic_set(&r1_bio->remaining, 1);
|
|
|
|
bio = r1_bio->bios[r1_bio->read_disk];
|
2006-08-31 21:27:36 -07:00
|
|
|
md_sync_acct(bio->bi_bdev, nr_sectors);
|
2006-01-06 01:20:26 -07:00
|
|
|
generic_make_request(bio);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:26 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
return nr_sectors;
|
|
|
|
}
|
|
|
|
|
2009-03-17 18:10:40 -07:00
|
|
|
static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks)
|
|
|
|
{
|
|
|
|
if (sectors)
|
|
|
|
return sectors;
|
|
|
|
|
|
|
|
return mddev->dev_sectors;
|
|
|
|
}
|
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
static conf_t *setup_conf(mddev_t *mddev)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
conf_t *conf;
|
2009-12-13 18:49:51 -07:00
|
|
|
int i;
|
2005-04-16 15:20:36 -07:00
|
|
|
mirror_info_t *disk;
|
|
|
|
mdk_rdev_t *rdev;
|
2009-12-13 18:49:51 -07:00
|
|
|
int err = -ENOMEM;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:32 -07:00
|
|
|
conf = kzalloc(sizeof(conf_t), GFP_KERNEL);
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!conf)
|
2009-12-13 18:49:51 -07:00
|
|
|
goto abort;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:32 -07:00
|
|
|
conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
|
2005-04-16 15:20:36 -07:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!conf->mirrors)
|
2009-12-13 18:49:51 -07:00
|
|
|
goto abort;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:19 -07:00
|
|
|
conf->tmppage = alloc_page(GFP_KERNEL);
|
|
|
|
if (!conf->tmppage)
|
2009-12-13 18:49:51 -07:00
|
|
|
goto abort;
|
2006-01-06 01:20:19 -07:00
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!conf->poolinfo)
|
2009-12-13 18:49:51 -07:00
|
|
|
goto abort;
|
2005-04-16 15:20:36 -07:00
|
|
|
conf->poolinfo->raid_disks = mddev->raid_disks;
|
|
|
|
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
|
r1bio_pool_free,
|
|
|
|
conf->poolinfo);
|
|
|
|
if (!conf->r1bio_pool)
|
2009-12-13 18:49:51 -07:00
|
|
|
goto abort;
|
|
|
|
|
2009-10-15 21:55:44 -07:00
|
|
|
conf->poolinfo->mddev = mddev;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-05-14 16:05:54 -07:00
|
|
|
spin_lock_init(&conf->device_lock);
|
2009-01-08 14:31:08 -07:00
|
|
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
2009-12-13 18:49:51 -07:00
|
|
|
int disk_idx = rdev->raid_disk;
|
2005-04-16 15:20:36 -07:00
|
|
|
if (disk_idx >= mddev->raid_disks
|
|
|
|
|| disk_idx < 0)
|
|
|
|
continue;
|
|
|
|
disk = conf->mirrors + disk_idx;
|
|
|
|
|
|
|
|
disk->rdev = rdev;
|
|
|
|
|
|
|
|
disk->head_position = 0;
|
|
|
|
}
|
|
|
|
conf->raid_disks = mddev->raid_disks;
|
|
|
|
conf->mddev = mddev;
|
|
|
|
INIT_LIST_HEAD(&conf->retry_list);
|
|
|
|
|
|
|
|
spin_lock_init(&conf->resync_lock);
|
2006-01-06 01:20:12 -07:00
|
|
|
init_waitqueue_head(&conf->wait_barrier);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-06-21 17:17:23 -07:00
|
|
|
bio_list_init(&conf->pending_bio_list);
|
|
|
|
bio_list_init(&conf->flushing_bio_list);
|
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
conf->last_used = -1;
|
2005-04-16 15:20:36 -07:00
|
|
|
for (i = 0; i < conf->raid_disks; i++) {
|
|
|
|
|
|
|
|
disk = conf->mirrors + i;
|
|
|
|
|
2006-06-26 00:27:40 -07:00
|
|
|
if (!disk->rdev ||
|
|
|
|
!test_bit(In_sync, &disk->rdev->flags)) {
|
2005-04-16 15:20:36 -07:00
|
|
|
disk->head_position = 0;
|
2007-08-22 14:01:52 -07:00
|
|
|
if (disk->rdev)
|
|
|
|
conf->fullsync = 1;
|
2009-12-13 18:49:51 -07:00
|
|
|
} else if (conf->last_used < 0)
|
|
|
|
/*
|
|
|
|
* The first working device is used as a
|
|
|
|
* starting point to read balancing.
|
|
|
|
*/
|
|
|
|
conf->last_used = i;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2009-12-13 18:49:51 -07:00
|
|
|
|
|
|
|
err = -EIO;
|
|
|
|
if (conf->last_used < 0) {
|
2006-10-03 01:15:52 -07:00
|
|
|
printk(KERN_ERR "raid1: no operational mirrors for %s\n",
|
2009-12-13 18:49:51 -07:00
|
|
|
mdname(mddev));
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
err = -ENOMEM;
|
|
|
|
conf->thread = md_register_thread(raid1d, mddev, NULL);
|
|
|
|
if (!conf->thread) {
|
|
|
|
printk(KERN_ERR
|
|
|
|
"raid1: couldn't allocate thread for %s\n",
|
|
|
|
mdname(mddev));
|
|
|
|
goto abort;
|
2006-10-03 01:15:52 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
return conf;
|
|
|
|
|
|
|
|
abort:
|
|
|
|
if (conf) {
|
|
|
|
if (conf->r1bio_pool)
|
|
|
|
mempool_destroy(conf->r1bio_pool);
|
|
|
|
kfree(conf->mirrors);
|
|
|
|
safe_put_page(conf->tmppage);
|
|
|
|
kfree(conf->poolinfo);
|
|
|
|
kfree(conf);
|
|
|
|
}
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int run(mddev_t *mddev)
|
|
|
|
{
|
|
|
|
conf_t *conf;
|
|
|
|
int i;
|
|
|
|
mdk_rdev_t *rdev;
|
|
|
|
|
|
|
|
if (mddev->level != 1) {
|
|
|
|
printk("raid1: %s: raid level not set to mirroring (%d)\n",
|
|
|
|
mdname(mddev), mddev->level);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
if (mddev->reshape_position != MaxSector) {
|
|
|
|
printk("raid1: %s: reshape_position set but not supported\n",
|
|
|
|
mdname(mddev));
|
|
|
|
return -EIO;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2009-12-13 18:49:51 -07:00
|
|
|
* copy the already verified devices into our private RAID1
|
|
|
|
* bookkeeping area. [whatever we allocate in run(),
|
|
|
|
* should be freed in stop()]
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
2009-12-13 18:49:51 -07:00
|
|
|
if (mddev->private == NULL)
|
|
|
|
conf = setup_conf(mddev);
|
|
|
|
else
|
|
|
|
conf = mddev->private;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
if (IS_ERR(conf))
|
|
|
|
return PTR_ERR(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
mddev->queue->queue_lock = &conf->device_lock;
|
|
|
|
list_for_each_entry(rdev, &mddev->disks, same_set) {
|
|
|
|
disk_stack_limits(mddev->gendisk, rdev->bdev,
|
|
|
|
rdev->data_offset << 9);
|
|
|
|
/* as we don't honour merge_bvec_fn, we must never risk
|
|
|
|
* violating it, so limit ->max_sector to one PAGE, as
|
|
|
|
* a one page request is never in violation.
|
|
|
|
*/
|
|
|
|
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
|
|
|
|
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
|
2010-02-25 22:20:38 -07:00
|
|
|
blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2005-06-21 17:17:23 -07:00
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
mddev->degraded = 0;
|
|
|
|
for (i=0; i < conf->raid_disks; i++)
|
|
|
|
if (conf->mirrors[i].rdev == NULL ||
|
|
|
|
!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
|
|
|
|
test_bit(Faulty, &conf->mirrors[i].rdev->flags))
|
|
|
|
mddev->degraded++;
|
|
|
|
|
|
|
|
if (conf->raid_disks - mddev->degraded == 1)
|
|
|
|
mddev->recovery_cp = MaxSector;
|
|
|
|
|
2009-06-17 15:48:06 -07:00
|
|
|
if (mddev->recovery_cp != MaxSector)
|
|
|
|
printk(KERN_NOTICE "raid1: %s is not clean"
|
|
|
|
" -- starting background reconstruction\n",
|
|
|
|
mdname(mddev));
|
2005-04-16 15:20:36 -07:00
|
|
|
printk(KERN_INFO
|
|
|
|
"raid1: raid set %s active with %d out of %d mirrors\n",
|
|
|
|
mdname(mddev), mddev->raid_disks - mddev->degraded,
|
|
|
|
mddev->raid_disks);
|
2009-12-13 18:49:51 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Ok, everything is just fine now
|
|
|
|
*/
|
2009-12-13 18:49:51 -07:00
|
|
|
mddev->thread = conf->thread;
|
|
|
|
conf->thread = NULL;
|
|
|
|
mddev->private = conf;
|
|
|
|
|
2009-03-30 20:59:03 -07:00
|
|
|
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-05-16 21:53:16 -07:00
|
|
|
mddev->queue->unplug_fn = raid1_unplug;
|
2006-10-03 01:15:54 -07:00
|
|
|
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
|
|
|
|
mddev->queue->backing_dev_info.congested_data = mddev;
|
2009-08-02 17:59:47 -07:00
|
|
|
md_integrity_register(mddev);
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stop(mddev_t *mddev)
|
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-09-09 16:23:47 -07:00
|
|
|
struct bitmap *bitmap = mddev->bitmap;
|
|
|
|
int behind_wait = 0;
|
|
|
|
|
|
|
|
/* wait for behind writes to complete */
|
|
|
|
while (bitmap && atomic_read(&bitmap->behind_writes) > 0) {
|
|
|
|
behind_wait++;
|
|
|
|
printk(KERN_INFO "raid1: behind writes in progress on device %s, waiting to stop (%d)\n", mdname(mddev), behind_wait);
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
schedule_timeout(HZ); /* wait a second */
|
|
|
|
/* need to kick something here to make sure I/O goes? */
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-03-30 20:39:39 -07:00
|
|
|
raise_barrier(conf);
|
|
|
|
lower_barrier(conf);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
md_unregister_thread(mddev->thread);
|
|
|
|
mddev->thread = NULL;
|
|
|
|
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
|
|
|
|
if (conf->r1bio_pool)
|
|
|
|
mempool_destroy(conf->r1bio_pool);
|
2005-06-21 17:17:30 -07:00
|
|
|
kfree(conf->mirrors);
|
|
|
|
kfree(conf->poolinfo);
|
2005-04-16 15:20:36 -07:00
|
|
|
kfree(conf);
|
|
|
|
mddev->private = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int raid1_resize(mddev_t *mddev, sector_t sectors)
|
|
|
|
{
|
|
|
|
/* no resync is happening, and there is enough space
|
|
|
|
* on all devices, so we can resize.
|
|
|
|
* We need to make sure resync covers any new space.
|
|
|
|
* If the array is shrinking we should possibly wait until
|
|
|
|
* any io in the removed space completes, but it hardly seems
|
|
|
|
* worth it.
|
|
|
|
*/
|
2009-03-30 20:59:03 -07:00
|
|
|
md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0));
|
2009-03-30 21:00:31 -07:00
|
|
|
if (mddev->array_sectors > raid1_size(mddev, sectors, 0))
|
|
|
|
return -EINVAL;
|
2008-07-21 00:05:22 -07:00
|
|
|
set_capacity(mddev->gendisk, mddev->array_sectors);
|
2007-05-09 18:51:36 -07:00
|
|
|
mddev->changed = 1;
|
2009-08-02 17:59:58 -07:00
|
|
|
revalidate_disk(mddev->gendisk);
|
2009-03-30 21:00:31 -07:00
|
|
|
if (sectors > mddev->dev_sectors &&
|
2008-07-21 00:05:22 -07:00
|
|
|
mddev->recovery_cp == MaxSector) {
|
2009-03-30 20:33:13 -07:00
|
|
|
mddev->recovery_cp = mddev->dev_sectors;
|
2005-04-16 15:20:36 -07:00
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
|
}
|
2009-03-30 21:00:31 -07:00
|
|
|
mddev->dev_sectors = sectors;
|
2005-07-27 11:43:28 -07:00
|
|
|
mddev->resync_max_sectors = sectors;
|
2005-04-16 15:20:36 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-27 02:18:13 -07:00
|
|
|
static int raid1_reshape(mddev_t *mddev)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
/* We need to:
|
|
|
|
* 1/ resize the r1bio_pool
|
|
|
|
* 2/ resize conf->mirrors
|
|
|
|
*
|
|
|
|
* We allocate a new r1bio_pool if we can.
|
|
|
|
* Then raise a device barrier and wait until all IO stops.
|
|
|
|
* Then resize conf->mirrors and swap in the new r1bio pool.
|
2005-06-21 17:17:09 -07:00
|
|
|
*
|
|
|
|
* At the same time, we "pack" the devices so that all the missing
|
|
|
|
* devices have the higher raid_disk numbers.
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
mempool_t *newpool, *oldpool;
|
|
|
|
struct pool_info *newpoolinfo;
|
|
|
|
mirror_info_t *newmirrors;
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2006-03-27 02:18:13 -07:00
|
|
|
int cnt, raid_disks;
|
2006-10-03 01:15:53 -07:00
|
|
|
unsigned long flags;
|
2008-06-27 21:44:04 -07:00
|
|
|
int d, d2, err;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-03-27 02:18:13 -07:00
|
|
|
/* Cannot change chunk_size, layout, or level */
|
2009-06-17 15:45:27 -07:00
|
|
|
if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
|
2006-03-27 02:18:13 -07:00
|
|
|
mddev->layout != mddev->new_layout ||
|
|
|
|
mddev->level != mddev->new_level) {
|
2009-06-17 15:45:27 -07:00
|
|
|
mddev->new_chunk_sectors = mddev->chunk_sectors;
|
2006-03-27 02:18:13 -07:00
|
|
|
mddev->new_layout = mddev->layout;
|
|
|
|
mddev->new_level = mddev->level;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-06-27 21:44:04 -07:00
|
|
|
err = md_allow_write(mddev);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2007-01-26 01:57:11 -07:00
|
|
|
|
2006-03-27 02:18:13 -07:00
|
|
|
raid_disks = mddev->raid_disks + mddev->delta_disks;
|
|
|
|
|
2005-06-21 17:17:09 -07:00
|
|
|
if (raid_disks < conf->raid_disks) {
|
|
|
|
cnt=0;
|
|
|
|
for (d= 0; d < conf->raid_disks; d++)
|
|
|
|
if (conf->mirrors[d].rdev)
|
|
|
|
cnt++;
|
|
|
|
if (cnt > raid_disks)
|
2005-04-16 15:20:36 -07:00
|
|
|
return -EBUSY;
|
2005-06-21 17:17:09 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
|
|
|
|
if (!newpoolinfo)
|
|
|
|
return -ENOMEM;
|
|
|
|
newpoolinfo->mddev = mddev;
|
|
|
|
newpoolinfo->raid_disks = raid_disks;
|
|
|
|
|
|
|
|
newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
|
|
|
|
r1bio_pool_free, newpoolinfo);
|
|
|
|
if (!newpool) {
|
|
|
|
kfree(newpoolinfo);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2006-01-06 01:20:32 -07:00
|
|
|
newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
|
2005-04-16 15:20:36 -07:00
|
|
|
if (!newmirrors) {
|
|
|
|
kfree(newpoolinfo);
|
|
|
|
mempool_destroy(newpool);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2006-01-06 01:20:12 -07:00
|
|
|
raise_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* ok, everything is stopped */
|
|
|
|
oldpool = conf->r1bio_pool;
|
|
|
|
conf->r1bio_pool = newpool;
|
2005-06-21 17:17:09 -07:00
|
|
|
|
2007-08-22 14:01:53 -07:00
|
|
|
for (d = d2 = 0; d < conf->raid_disks; d++) {
|
|
|
|
mdk_rdev_t *rdev = conf->mirrors[d].rdev;
|
|
|
|
if (rdev && rdev->raid_disk != d2) {
|
|
|
|
char nm[20];
|
|
|
|
sprintf(nm, "rd%d", rdev->raid_disk);
|
|
|
|
sysfs_remove_link(&mddev->kobj, nm);
|
|
|
|
rdev->raid_disk = d2;
|
|
|
|
sprintf(nm, "rd%d", rdev->raid_disk);
|
|
|
|
sysfs_remove_link(&mddev->kobj, nm);
|
|
|
|
if (sysfs_create_link(&mddev->kobj,
|
|
|
|
&rdev->kobj, nm))
|
|
|
|
printk(KERN_WARNING
|
|
|
|
"md/raid1: cannot register "
|
|
|
|
"%s for %s\n",
|
|
|
|
nm, mdname(mddev));
|
2005-06-21 17:17:09 -07:00
|
|
|
}
|
2007-08-22 14:01:53 -07:00
|
|
|
if (rdev)
|
|
|
|
newmirrors[d2++].rdev = rdev;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
kfree(conf->mirrors);
|
|
|
|
conf->mirrors = newmirrors;
|
|
|
|
kfree(conf->poolinfo);
|
|
|
|
conf->poolinfo = newpoolinfo;
|
|
|
|
|
2006-10-03 01:15:53 -07:00
|
|
|
spin_lock_irqsave(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
mddev->degraded += (raid_disks - conf->raid_disks);
|
2006-10-03 01:15:53 -07:00
|
|
|
spin_unlock_irqrestore(&conf->device_lock, flags);
|
2005-04-16 15:20:36 -07:00
|
|
|
conf->raid_disks = mddev->raid_disks = raid_disks;
|
2006-03-27 02:18:13 -07:00
|
|
|
mddev->delta_disks = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2005-06-21 17:17:09 -07:00
|
|
|
conf->last_used = 0; /* just make sure it is in-range */
|
2006-01-06 01:20:12 -07:00
|
|
|
lower_barrier(conf);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
|
|
|
md_wakeup_thread(mddev->thread);
|
|
|
|
|
|
|
|
mempool_destroy(oldpool);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-09-09 16:23:58 -07:00
|
|
|
static void raid1_quiesce(mddev_t *mddev, int state)
|
2005-09-09 16:23:45 -07:00
|
|
|
{
|
2009-06-15 23:54:21 -07:00
|
|
|
conf_t *conf = mddev->private;
|
2005-09-09 16:23:45 -07:00
|
|
|
|
|
|
|
switch(state) {
|
2009-12-13 18:49:51 -07:00
|
|
|
case 2: /* wake for suspend */
|
|
|
|
wake_up(&conf->wait_barrier);
|
|
|
|
break;
|
2005-09-09 16:23:48 -07:00
|
|
|
case 1:
|
2006-01-06 01:20:12 -07:00
|
|
|
raise_barrier(conf);
|
2005-09-09 16:23:45 -07:00
|
|
|
break;
|
2005-09-09 16:23:48 -07:00
|
|
|
case 0:
|
2006-01-06 01:20:12 -07:00
|
|
|
lower_barrier(conf);
|
2005-09-09 16:23:45 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-13 18:49:51 -07:00
|
|
|
static void *raid1_takeover(mddev_t *mddev)
|
|
|
|
{
|
|
|
|
/* raid1 can take over:
|
|
|
|
* raid5 with 2 devices, any layout or chunk size
|
|
|
|
*/
|
|
|
|
if (mddev->level == 5 && mddev->raid_disks == 2) {
|
|
|
|
conf_t *conf;
|
|
|
|
mddev->new_level = 1;
|
|
|
|
mddev->new_layout = 0;
|
|
|
|
mddev->new_chunk_sectors = 0;
|
|
|
|
conf = setup_conf(mddev);
|
|
|
|
if (!IS_ERR(conf))
|
|
|
|
conf->barrier = 1;
|
|
|
|
return conf;
|
|
|
|
}
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2006-01-06 01:20:36 -07:00
|
|
|
static struct mdk_personality raid1_personality =
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
.name = "raid1",
|
2006-01-06 01:20:36 -07:00
|
|
|
.level = 1,
|
2005-04-16 15:20:36 -07:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.make_request = make_request,
|
|
|
|
.run = run,
|
|
|
|
.stop = stop,
|
|
|
|
.status = status,
|
|
|
|
.error_handler = error,
|
|
|
|
.hot_add_disk = raid1_add_disk,
|
|
|
|
.hot_remove_disk= raid1_remove_disk,
|
|
|
|
.spare_active = raid1_spare_active,
|
|
|
|
.sync_request = sync_request,
|
|
|
|
.resize = raid1_resize,
|
2009-03-17 18:10:40 -07:00
|
|
|
.size = raid1_size,
|
2006-03-27 02:18:13 -07:00
|
|
|
.check_reshape = raid1_reshape,
|
2005-09-09 16:23:45 -07:00
|
|
|
.quiesce = raid1_quiesce,
|
2009-12-13 18:49:51 -07:00
|
|
|
.takeover = raid1_takeover,
|
2005-04-16 15:20:36 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init raid_init(void)
|
|
|
|
{
|
2006-01-06 01:20:36 -07:00
|
|
|
return register_md_personality(&raid1_personality);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void raid_exit(void)
|
|
|
|
{
|
2006-01-06 01:20:36 -07:00
|
|
|
unregister_md_personality(&raid1_personality);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(raid_init);
|
|
|
|
module_exit(raid_exit);
|
|
|
|
MODULE_LICENSE("GPL");
|
2009-12-13 18:49:58 -07:00
|
|
|
MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
|
2005-04-16 15:20:36 -07:00
|
|
|
MODULE_ALIAS("md-personality-3"); /* RAID1 */
|
2006-01-06 01:20:51 -07:00
|
|
|
MODULE_ALIAS("md-raid1");
|
2006-01-06 01:20:36 -07:00
|
|
|
MODULE_ALIAS("md-level-1");
|