2005-04-17 02:20:36 +04:00
# ifndef _RAID10_H
# define _RAID10_H
2012-07-31 04:03:52 +04:00
struct raid10_info {
2011-12-23 03:17:54 +04:00
struct md_rdev * rdev , * replacement ;
2005-04-17 02:20:36 +04:00
sector_t head_position ;
2011-07-27 05:00:36 +04:00
int recovery_disabled ; /* matches
* mddev - > recovery_disabled
* when we shouldn ' t try
* recovering this device .
*/
2005-04-17 02:20:36 +04:00
} ;
2011-10-11 09:49:02 +04:00
struct r10conf {
2011-10-11 09:47:53 +04:00
struct mddev * mddev ;
2012-07-31 04:03:52 +04:00
struct raid10_info * mirrors ;
struct raid10_info * mirrors_new , * mirrors_old ;
2005-04-17 02:20:36 +04:00
spinlock_t device_lock ;
/* geometry */
2012-05-21 03:28:20 +04:00
struct geom {
int raid_disks ;
int near_copies ; /* number of copies laid out
2011-12-23 03:17:54 +04:00
* raid0 style */
2012-05-21 03:28:20 +04:00
int far_copies ; /* number of copies laid out
2005-04-17 02:20:36 +04:00
* at large strides across drives
*/
2012-05-21 03:28:20 +04:00
int far_offset ; /* far_copies are offset by 1
2011-12-23 03:17:54 +04:00
* stripe instead of many
2006-06-26 11:27:41 +04:00
*/
2012-05-21 03:28:20 +04:00
sector_t stride ; /* distance between far copies.
2006-06-26 11:27:41 +04:00
* This is size / far_copies unless
* far_offset , in which case it is
* 1 stripe .
2005-04-17 02:20:36 +04:00
*/
2012-05-21 03:28:20 +04:00
int chunk_shift ; /* shift from chunks to sectors */
sector_t chunk_mask ;
2012-05-21 03:28:33 +04:00
} prev , geo ;
2012-05-21 03:28:20 +04:00
int copies ; /* near_copies * far_copies.
* must be < = raid_disks
*/
2005-04-17 02:20:36 +04:00
2011-12-23 03:17:54 +04:00
sector_t dev_sectors ; /* temp copy of
* mddev - > dev_sectors */
2012-05-21 03:28:33 +04:00
sector_t reshape_progress ;
2012-05-22 07:53:47 +04:00
sector_t reshape_safe ;
unsigned long reshape_checkpoint ;
sector_t offset_diff ;
2010-03-08 08:02:45 +03:00
2005-04-17 02:20:36 +04:00
struct list_head retry_list ;
2006-01-06 11:20:16 +03:00
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list ;
2011-10-11 09:50:01 +04:00
int pending_count ;
2005-04-17 02:20:36 +04:00
spinlock_t resync_lock ;
2011-12-23 03:17:54 +04:00
int nr_pending ;
int nr_waiting ;
int nr_queued ;
int barrier ;
2005-04-17 02:20:36 +04:00
sector_t next_resync ;
2006-01-06 11:20:16 +03:00
int fullsync ; /* set to 1 if a full sync is needed,
* ( fresh device added ) .
* Cleared when a sync completes .
*/
2011-12-23 03:17:54 +04:00
int have_replacement ; /* There is at least one
* replacement device .
*/
2006-01-06 11:20:13 +03:00
wait_queue_head_t wait_barrier ;
2005-04-17 02:20:36 +04:00
2011-12-23 03:17:54 +04:00
mempool_t * r10bio_pool ;
mempool_t * r10buf_pool ;
2006-01-06 11:20:28 +03:00
struct page * tmppage ;
2010-03-08 08:02:45 +03:00
/* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array .
*/
2011-10-11 09:48:23 +04:00
struct md_thread * thread ;
2005-04-17 02:20:36 +04:00
} ;
/*
* this is our ' private ' RAID10 bio .
*
* it contains information about what kind of IO operations were started
* for this RAID10 operation , and about their status :
*/
2011-10-11 09:48:43 +04:00
struct r10bio {
2005-04-17 02:20:36 +04:00
atomic_t remaining ; /* 'have we finished' count,
* used from IRQ handlers
*/
sector_t sector ; /* virtual sector number */
int sectors ;
unsigned long state ;
2011-10-11 09:47:53 +04:00
struct mddev * mddev ;
2005-04-17 02:20:36 +04:00
/*
* original bio going to / dev / mdx
*/
struct bio * master_bio ;
/*
* if the IO is in READ direction , then this is where we read
*/
int read_slot ;
struct list_head retry_list ;
/*
* if the IO is in WRITE direction , then multiple bios are used ,
* one for each copy .
* When resyncing we also use one for each copy .
* When reconstructing , we use 2 bios , one for read , one for write .
* We choose the number when they are allocated .
2011-12-23 03:17:54 +04:00
* We sometimes need an extra bio to write to the replacement .
2005-04-17 02:20:36 +04:00
*/
2012-08-18 03:51:42 +04:00
struct r10dev {
2011-12-23 03:17:54 +04:00
struct bio * bio ;
union {
struct bio * repl_bio ; /* used for resync and
* writes */
struct md_rdev * rdev ; /* used for reads
* ( read_slot > = 0 ) */
} ;
sector_t addr ;
int devnum ;
2005-04-17 02:20:36 +04:00
} devs [ 0 ] ;
} ;
/* bits for r10bio.state */
2011-12-23 03:17:54 +04:00
enum r10bio_state {
R10BIO_Uptodate ,
R10BIO_IsSync ,
R10BIO_IsRecover ,
2012-05-22 07:53:47 +04:00
R10BIO_IsReshape ,
2011-12-23 03:17:54 +04:00
R10BIO_Degraded ,
2011-07-28 05:39:23 +04:00
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them .
*/
2011-12-23 03:17:54 +04:00
R10BIO_ReadError ,
2011-07-28 05:39:24 +04:00
/* If a write for this request means we can clear some
* known - bad - block records , we set this flag .
*/
2011-12-23 03:17:54 +04:00
R10BIO_MadeGood ,
R10BIO_WriteError ,
2012-05-21 03:28:33 +04:00
/* During a reshape we might be performing IO on the
* ' previous ' part of the array , in which case this
* flag is set
*/
R10BIO_Previous ,
2011-12-23 03:17:54 +04:00
} ;
2012-07-31 04:03:53 +04:00
extern int md_raid10_congested ( struct mddev * mddev , int bits ) ;
2005-04-17 02:20:36 +04:00
# endif