Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: cciss: fix cciss_revalidate panic block: max hardware sectors limit wrapper block: Deprecate QUEUE_FLAG_CLUSTER and use queue_limits instead blk-throttle: Correct the placement of smp_rmb() blk-throttle: Trim/adjust slice_end once a bio has been dispatched block: check for proper length of iov entries earlier in blk_rq_map_user_iov() drbd: fix for spin_lock_irqsave in endio callback drbd: don't recvmsg with zero length
This commit is contained in:
commit
7f8635cc9e
@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||||||
for (i = 0; i < iov_count; i++) {
|
for (i = 0; i < iov_count; i++) {
|
||||||
unsigned long uaddr = (unsigned long)iov[i].iov_base;
|
unsigned long uaddr = (unsigned long)iov[i].iov_base;
|
||||||
|
|
||||||
|
if (!iov[i].iov_len)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (uaddr & queue_dma_alignment(q)) {
|
if (uaddr & queue_dma_alignment(q)) {
|
||||||
unaligned = 1;
|
unaligned = 1;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!iov[i].iov_len)
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unaligned || (q->dma_pad_mask & len) || map_data)
|
if (unaligned || (q->dma_pad_mask & len) || map_data)
|
||||||
|
@ -21,7 +21,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fbio = bio;
|
fbio = bio;
|
||||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
cluster = blk_queue_cluster(q);
|
||||||
seg_size = 0;
|
seg_size = 0;
|
||||||
nr_phys_segs = 0;
|
nr_phys_segs = 0;
|
||||||
for_each_bio(bio) {
|
for_each_bio(bio) {
|
||||||
@ -87,7 +87,7 @@ EXPORT_SYMBOL(blk_recount_segments);
|
|||||||
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
|
||||||
struct bio *nxt)
|
struct bio *nxt)
|
||||||
{
|
{
|
||||||
if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
|
if (!blk_queue_cluster(q))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
|
||||||
@ -123,7 +123,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||||||
int nsegs, cluster;
|
int nsegs, cluster;
|
||||||
|
|
||||||
nsegs = 0;
|
nsegs = 0;
|
||||||
cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
cluster = blk_queue_cluster(q);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for each bio in rq
|
* for each bio in rq
|
||||||
|
@ -126,7 +126,7 @@ void blk_set_default_limits(struct queue_limits *lim)
|
|||||||
lim->alignment_offset = 0;
|
lim->alignment_offset = 0;
|
||||||
lim->io_opt = 0;
|
lim->io_opt = 0;
|
||||||
lim->misaligned = 0;
|
lim->misaligned = 0;
|
||||||
lim->no_cluster = 0;
|
lim->cluster = 1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_default_limits);
|
EXPORT_SYMBOL(blk_set_default_limits);
|
||||||
|
|
||||||
@ -229,8 +229,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
|
|||||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
|
||||||
* @q: the request queue for the device
|
* @limits: the queue limits
|
||||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
@ -244,7 +244,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
|||||||
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
||||||
* The soft limit can not exceed max_hw_sectors.
|
* The soft limit can not exceed max_hw_sectors.
|
||||||
**/
|
**/
|
||||||
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
|
||||||
{
|
{
|
||||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
||||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
||||||
@ -252,9 +252,23 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
|
|||||||
__func__, max_hw_sectors);
|
__func__, max_hw_sectors);
|
||||||
}
|
}
|
||||||
|
|
||||||
q->limits.max_hw_sectors = max_hw_sectors;
|
limits->max_hw_sectors = max_hw_sectors;
|
||||||
q->limits.max_sectors = min_t(unsigned int, max_hw_sectors,
|
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
|
||||||
BLK_DEF_MAX_SECTORS);
|
BLK_DEF_MAX_SECTORS);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
||||||
|
* @q: the request queue for the device
|
||||||
|
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* See description for blk_limits_max_hw_sectors().
|
||||||
|
**/
|
||||||
|
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||||
|
{
|
||||||
|
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||||
|
|
||||||
@ -464,15 +478,6 @@ EXPORT_SYMBOL(blk_queue_io_opt);
|
|||||||
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
||||||
{
|
{
|
||||||
blk_stack_limits(&t->limits, &b->limits, 0);
|
blk_stack_limits(&t->limits, &b->limits, 0);
|
||||||
|
|
||||||
if (!t->queue_lock)
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
|
|
||||||
unsigned long flags;
|
|
||||||
spin_lock_irqsave(t->queue_lock, flags);
|
|
||||||
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
|
|
||||||
spin_unlock_irqrestore(t->queue_lock, flags);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_stack_limits);
|
EXPORT_SYMBOL(blk_queue_stack_limits);
|
||||||
|
|
||||||
@ -545,7 +550,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||||||
t->io_min = max(t->io_min, b->io_min);
|
t->io_min = max(t->io_min, b->io_min);
|
||||||
t->io_opt = lcm(t->io_opt, b->io_opt);
|
t->io_opt = lcm(t->io_opt, b->io_opt);
|
||||||
|
|
||||||
t->no_cluster |= b->no_cluster;
|
t->cluster &= b->cluster;
|
||||||
t->discard_zeroes_data &= b->discard_zeroes_data;
|
t->discard_zeroes_data &= b->discard_zeroes_data;
|
||||||
|
|
||||||
/* Physical block size a multiple of the logical block size? */
|
/* Physical block size a multiple of the logical block size? */
|
||||||
@ -641,7 +646,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|||||||
sector_t offset)
|
sector_t offset)
|
||||||
{
|
{
|
||||||
struct request_queue *t = disk->queue;
|
struct request_queue *t = disk->queue;
|
||||||
struct request_queue *b = bdev_get_queue(bdev);
|
|
||||||
|
|
||||||
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
|
if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
|
||||||
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
|
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
|
||||||
@ -652,17 +656,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
|
|||||||
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
|
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
|
||||||
top, bottom);
|
top, bottom);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!t->queue_lock)
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(t->queue_lock, flags);
|
|
||||||
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
|
|
||||||
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
|
|
||||||
spin_unlock_irqrestore(t->queue_lock, flags);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(disk_stack_limits);
|
EXPORT_SYMBOL(disk_stack_limits);
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *
|
|||||||
|
|
||||||
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
|
if (blk_queue_cluster(q))
|
||||||
return queue_var_show(queue_max_segment_size(q), (page));
|
return queue_var_show(queue_max_segment_size(q), (page));
|
||||||
|
|
||||||
return queue_var_show(PAGE_CACHE_SIZE, (page));
|
return queue_var_show(PAGE_CACHE_SIZE, (page));
|
||||||
|
@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
|
|||||||
tg->slice_end[rw], jiffies);
|
tg->slice_end[rw], jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void throtl_set_slice_end(struct throtl_data *td,
|
||||||
|
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
|
||||||
|
{
|
||||||
|
tg->slice_end[rw] = roundup(jiffy_end, throtl_slice);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void throtl_extend_slice(struct throtl_data *td,
|
static inline void throtl_extend_slice(struct throtl_data *td,
|
||||||
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
|
struct throtl_grp *tg, bool rw, unsigned long jiffy_end)
|
||||||
{
|
{
|
||||||
@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw)
|
|||||||
if (throtl_slice_used(td, tg, rw))
|
if (throtl_slice_used(td, tg, rw))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A bio has been dispatched. Also adjust slice_end. It might happen
|
||||||
|
* that initially cgroup limit was very low resulting in high
|
||||||
|
* slice_end, but later limit was bumped up and bio was dispached
|
||||||
|
* sooner, then we need to reduce slice_end. A high bogus slice_end
|
||||||
|
* is bad because it does not allow new slice to start.
|
||||||
|
*/
|
||||||
|
|
||||||
|
throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice);
|
||||||
|
|
||||||
time_elapsed = jiffies - tg->slice_start[rw];
|
time_elapsed = jiffies - tg->slice_start[rw];
|
||||||
|
|
||||||
nr_slices = time_elapsed / throtl_slice;
|
nr_slices = time_elapsed / throtl_slice;
|
||||||
@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td)
|
|||||||
struct throtl_grp *tg;
|
struct throtl_grp *tg;
|
||||||
struct hlist_node *pos, *n;
|
struct hlist_node *pos, *n;
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure atomic_inc() effects from
|
|
||||||
* throtl_update_blkio_group_read_bps(), group of functions are
|
|
||||||
* visible.
|
|
||||||
* Is this required or smp_mb__after_atomic_inc() was suffcient
|
|
||||||
* after the atomic_inc().
|
|
||||||
*/
|
|
||||||
smp_rmb();
|
|
||||||
if (!atomic_read(&td->limits_changed))
|
if (!atomic_read(&td->limits_changed))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
|
throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed));
|
||||||
|
|
||||||
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
/*
|
||||||
/*
|
* Make sure updates from throtl_update_blkio_group_read_bps() group
|
||||||
* Do I need an smp_rmb() here to make sure tg->limits_changed
|
* of functions to tg->limits_changed are visible. We do not
|
||||||
* update is visible. I am relying on smp_rmb() at the
|
* want update td->limits_changed to be visible but update to
|
||||||
* beginning of function and not putting a new one here.
|
* tg->limits_changed not being visible yet on this cpu. Hence
|
||||||
*/
|
* the read barrier.
|
||||||
|
*/
|
||||||
|
smp_rmb();
|
||||||
|
|
||||||
|
hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) {
|
||||||
if (throtl_tg_on_rr(tg) && tg->limits_changed) {
|
if (throtl_tg_on_rr(tg) && tg->limits_changed) {
|
||||||
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
|
throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu"
|
||||||
" riops=%u wiops=%u", tg->bps[READ],
|
" riops=%u wiops=%u", tg->bps[READ],
|
||||||
|
@ -2834,6 +2834,8 @@ static int cciss_revalidate(struct gendisk *disk)
|
|||||||
InquiryData_struct *inq_buff = NULL;
|
InquiryData_struct *inq_buff = NULL;
|
||||||
|
|
||||||
for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
|
for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
|
||||||
|
if (!h->drv[logvol])
|
||||||
|
continue
|
||||||
if (memcmp(h->drv[logvol]->LunID, drv->LunID,
|
if (memcmp(h->drv[logvol]->LunID, drv->LunID,
|
||||||
sizeof(drv->LunID)) == 0) {
|
sizeof(drv->LunID)) == 0) {
|
||||||
FOUND = 1;
|
FOUND = 1;
|
||||||
|
@ -3627,17 +3627,19 @@ static void drbdd(struct drbd_conf *mdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
|
shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
|
||||||
rv = drbd_recv(mdev, &header->h80.payload, shs);
|
|
||||||
if (unlikely(rv != shs)) {
|
|
||||||
dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
|
|
||||||
goto err_out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
|
if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
|
||||||
dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
|
dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (shs) {
|
||||||
|
rv = drbd_recv(mdev, &header->h80.payload, shs);
|
||||||
|
if (unlikely(rv != shs)) {
|
||||||
|
dev_err(DEV, "short read while reading sub header: rv=%d\n", rv);
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
|
rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
|
||||||
|
|
||||||
if (unlikely(!rv)) {
|
if (unlikely(!rv)) {
|
||||||
|
@ -339,7 +339,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* completion of master bio is outside of spinlock.
|
/* completion of master bio is outside of spinlock.
|
||||||
* If you need it irqsave, do it your self! */
|
* If you need it irqsave, do it your self!
|
||||||
|
* Which means: don't use from bio endio callback. */
|
||||||
static inline int req_mod(struct drbd_request *req,
|
static inline int req_mod(struct drbd_request *req,
|
||||||
enum drbd_req_event what)
|
enum drbd_req_event what)
|
||||||
{
|
{
|
||||||
|
@ -193,8 +193,10 @@ void drbd_endio_sec(struct bio *bio, int error)
|
|||||||
*/
|
*/
|
||||||
void drbd_endio_pri(struct bio *bio, int error)
|
void drbd_endio_pri(struct bio *bio, int error)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
struct drbd_request *req = bio->bi_private;
|
struct drbd_request *req = bio->bi_private;
|
||||||
struct drbd_conf *mdev = req->mdev;
|
struct drbd_conf *mdev = req->mdev;
|
||||||
|
struct bio_and_error m;
|
||||||
enum drbd_req_event what;
|
enum drbd_req_event what;
|
||||||
int uptodate = bio_flagged(bio, BIO_UPTODATE);
|
int uptodate = bio_flagged(bio, BIO_UPTODATE);
|
||||||
|
|
||||||
@ -220,7 +222,13 @@ void drbd_endio_pri(struct bio *bio, int error)
|
|||||||
bio_put(req->private_bio);
|
bio_put(req->private_bio);
|
||||||
req->private_bio = ERR_PTR(error);
|
req->private_bio = ERR_PTR(error);
|
||||||
|
|
||||||
req_mod(req, what);
|
/* not req_mod(), we need irqsave here! */
|
||||||
|
spin_lock_irqsave(&mdev->req_lock, flags);
|
||||||
|
__req_mod(req, what, &m);
|
||||||
|
spin_unlock_irqrestore(&mdev->req_lock, flags);
|
||||||
|
|
||||||
|
if (m.bio)
|
||||||
|
complete_master_bio(mdev, &m);
|
||||||
}
|
}
|
||||||
|
|
||||||
int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
|
||||||
|
@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (q->merge_bvec_fn && !ti->type->merge)
|
if (q->merge_bvec_fn && !ti->type->merge)
|
||||||
limits->max_sectors =
|
blk_limits_max_hw_sectors(limits,
|
||||||
min_not_zero(limits->max_sectors,
|
(unsigned int) (PAGE_SIZE >> 9));
|
||||||
(unsigned int) (PAGE_SIZE >> 9));
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_set_device_limits);
|
EXPORT_SYMBOL_GPL(dm_set_device_limits);
|
||||||
@ -1131,11 +1130,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
*/
|
*/
|
||||||
q->limits = *limits;
|
q->limits = *limits;
|
||||||
|
|
||||||
if (limits->no_cluster)
|
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
|
||||||
else
|
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
|
|
||||||
|
|
||||||
if (!dm_table_supports_discards(t))
|
if (!dm_table_supports_discards(t))
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
|
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||||
else
|
else
|
||||||
|
@ -4295,9 +4295,6 @@ static int md_alloc(dev_t dev, char *name)
|
|||||||
goto abort;
|
goto abort;
|
||||||
mddev->queue->queuedata = mddev;
|
mddev->queue->queuedata = mddev;
|
||||||
|
|
||||||
/* Can be unlocked because the queue is new: no concurrency */
|
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue);
|
|
||||||
|
|
||||||
blk_queue_make_request(mddev->queue, md_make_request);
|
blk_queue_make_request(mddev->queue, md_make_request);
|
||||||
|
|
||||||
disk = alloc_disk(1 << shift);
|
disk = alloc_disk(1 << shift);
|
||||||
|
@ -1637,9 +1637,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
|
|||||||
|
|
||||||
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
|
blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
|
||||||
|
|
||||||
/* New queue, no concurrency on queue_flags */
|
|
||||||
if (!shost->use_clustering)
|
if (!shost->use_clustering)
|
||||||
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
|
q->limits.cluster = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* set a reasonable default alignment on word boundaries: the
|
* set a reasonable default alignment on word boundaries: the
|
||||||
|
@ -250,7 +250,7 @@ struct queue_limits {
|
|||||||
|
|
||||||
unsigned char misaligned;
|
unsigned char misaligned;
|
||||||
unsigned char discard_misaligned;
|
unsigned char discard_misaligned;
|
||||||
unsigned char no_cluster;
|
unsigned char cluster;
|
||||||
signed char discard_zeroes_data;
|
signed char discard_zeroes_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -380,7 +380,6 @@ struct request_queue
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
|
|
||||||
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
|
||||||
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
|
#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
|
||||||
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
|
||||||
@ -403,7 +402,6 @@ struct request_queue
|
|||||||
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
|
#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
|
||||||
|
|
||||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||||
(1 << QUEUE_FLAG_CLUSTER) | \
|
|
||||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||||
(1 << QUEUE_FLAG_SAME_COMP) | \
|
(1 << QUEUE_FLAG_SAME_COMP) | \
|
||||||
(1 << QUEUE_FLAG_ADD_RANDOM))
|
(1 << QUEUE_FLAG_ADD_RANDOM))
|
||||||
@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||||||
|
|
||||||
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
|
#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
|
||||||
|
|
||||||
|
static inline unsigned int blk_queue_cluster(struct request_queue *q)
|
||||||
|
{
|
||||||
|
return q->limits.cluster;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We regard a request as sync, if either a read or a sync write
|
* We regard a request as sync, if either a read or a sync write
|
||||||
*/
|
*/
|
||||||
@ -805,6 +808,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
|
|||||||
extern void blk_cleanup_queue(struct request_queue *);
|
extern void blk_cleanup_queue(struct request_queue *);
|
||||||
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
|
||||||
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
extern void blk_queue_bounce_limit(struct request_queue *, u64);
|
||||||
|
extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
|
||||||
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
|
||||||
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
|
||||||
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
|
||||||
|
Loading…
Reference in New Issue
Block a user