2005-04-17 02:20:36 +04:00
# ifndef _RAID10_H
# define _RAID10_H
struct mirror_info {
2011-12-23 03:17:54 +04:00
struct md_rdev * rdev , * replacement ;
2005-04-17 02:20:36 +04:00
sector_t head_position ;
2011-07-27 05:00:36 +04:00
int recovery_disabled ; /* matches
* mddev - > recovery_disabled
* when we shouldn ' t try
* recovering this device .
*/
2005-04-17 02:20:36 +04:00
} ;
2011-10-11 09:49:02 +04:00
struct r10conf {
2011-10-11 09:47:53 +04:00
struct mddev * mddev ;
2011-10-11 09:48:46 +04:00
struct mirror_info * mirrors ;
2005-04-17 02:20:36 +04:00
int raid_disks ;
spinlock_t device_lock ;
/* geometry */
2011-12-23 03:17:54 +04:00
int near_copies ; /* number of copies laid out
* raid0 style */
2011-03-31 05:57:33 +04:00
int far_copies ; /* number of copies laid out
2005-04-17 02:20:36 +04:00
* at large strides across drives
*/
2011-12-23 03:17:54 +04:00
int far_offset ; /* far_copies are offset by 1
* stripe instead of many
2006-06-26 11:27:41 +04:00
*/
2005-04-17 02:20:36 +04:00
int copies ; /* near_copies * far_copies.
* must be < = raid_disks
*/
sector_t stride ; /* distance between far copies.
2006-06-26 11:27:41 +04:00
* This is size / far_copies unless
* far_offset , in which case it is
* 1 stripe .
2005-04-17 02:20:36 +04:00
*/
2011-12-23 03:17:54 +04:00
sector_t dev_sectors ; /* temp copy of
* mddev - > dev_sectors */
2010-03-08 08:02:45 +03:00
2011-12-23 03:17:54 +04:00
int chunk_shift ; /* shift from chunks to sectors */
sector_t chunk_mask ;
2005-04-17 02:20:36 +04:00
struct list_head retry_list ;
2006-01-06 11:20:16 +03:00
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list ;
2011-10-11 09:50:01 +04:00
int pending_count ;
2005-04-17 02:20:36 +04:00
spinlock_t resync_lock ;
2011-12-23 03:17:54 +04:00
int nr_pending ;
int nr_waiting ;
int nr_queued ;
int barrier ;
2005-04-17 02:20:36 +04:00
sector_t next_resync ;
2006-01-06 11:20:16 +03:00
int fullsync ; /* set to 1 if a full sync is needed,
* ( fresh device added ) .
* Cleared when a sync completes .
*/
2011-12-23 03:17:54 +04:00
int have_replacement ; /* There is at least one
* replacement device .
*/
2006-01-06 11:20:13 +03:00
wait_queue_head_t wait_barrier ;
2005-04-17 02:20:36 +04:00
2011-12-23 03:17:54 +04:00
mempool_t * r10bio_pool ;
mempool_t * r10buf_pool ;
2006-01-06 11:20:28 +03:00
struct page * tmppage ;
2010-03-08 08:02:45 +03:00
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array .
*/
2011-10-11 09:48:23 +04:00
struct md_thread * thread ;
2005-04-17 02:20:36 +04:00
} ;
/*
* this is our ' private ' RAID10 bio .
*
* it contains information about what kind of IO operations were started
* for this RAID10 operation , and about their status :
*/
2011-10-11 09:48:43 +04:00
struct r10bio {
2005-04-17 02:20:36 +04:00
atomic_t remaining ; /* 'have we finished' count,
* used from IRQ handlers
*/
sector_t sector ; /* virtual sector number */
int sectors ;
unsigned long state ;
2011-10-11 09:47:53 +04:00
struct mddev * mddev ;
2005-04-17 02:20:36 +04:00
/*
* original bio going to / dev / mdx
*/
struct bio * master_bio ;
/*
* if the IO is in READ direction , then this is where we read
*/
int read_slot ;
struct list_head retry_list ;
/*
* if the IO is in WRITE direction , then multiple bios are used ,
* one for each copy .
* When resyncing we also use one for each copy .
* When reconstructing , we use 2 bios , one for read , one for write .
* We choose the number when they are allocated .
2011-12-23 03:17:54 +04:00
* We sometimes need an extra bio to write to the replacement .
2005-04-17 02:20:36 +04:00
*/
struct {
2011-12-23 03:17:54 +04:00
struct bio * bio ;
union {
struct bio * repl_bio ; /* used for resync and
* writes */
struct md_rdev * rdev ; /* used for reads
* ( read_slot > = 0 ) */
} ;
sector_t addr ;
int devnum ;
2005-04-17 02:20:36 +04:00
} devs [ 0 ] ;
} ;
2006-01-06 11:20:29 +03:00
/* when we get a read error on a read-only array, we redirect to another
* device without failing the first device , or trying to over - write to
* correct the read error . To keep track of bad blocks on a per - bio
* level , we store IO_BLOCKED in the appropriate ' bios ' pointer
*/
# define IO_BLOCKED ((struct bio*)1)
2011-07-28 05:39:24 +04:00
/* When we successfully write to a known bad-block, we need to remove the
* bad - block marking which must be done from process context . So we record
* the success by setting devs [ n ] . bio to IO_MADE_GOOD
*/
# define IO_MADE_GOOD ((struct bio *)2)
# define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
2006-01-06 11:20:29 +03:00
2005-04-17 02:20:36 +04:00
/* bits for r10bio.state */
2011-12-23 03:17:54 +04:00
enum r10bio_state {
R10BIO_Uptodate ,
R10BIO_IsSync ,
R10BIO_IsRecover ,
R10BIO_Degraded ,
2011-07-28 05:39:23 +04:00
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them .
*/
2011-12-23 03:17:54 +04:00
R10BIO_ReadError ,
2011-07-28 05:39:24 +04:00
/* If a write for this request means we can clear some
* known - bad - block records , we set this flag .
*/
2011-12-23 03:17:54 +04:00
R10BIO_MadeGood ,
R10BIO_WriteError ,
} ;
2005-04-17 02:20:36 +04:00
# endif