block-5.15-2021-09-05
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmE1hQcQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgprjfEACdG+medwQOPpKNSoAvQmYyQnZRMbPjiruv A4nW2L6MKaExO59qQLVbBYHaH2+ng2UR/p5jNi2AKm+hrQEYllxlNvuCkRBIn97J r45R48mzBbHjR4kE3Fdu1mOFpBWOuU9JrtzHI+JF/Sl/qPIxKYNHf5E66T6l90Fz 0hJkorAoVB7+hQYixdmkM9quZy11D5SY3aM+bG8r2uNjZTBEHMfmOen8o1giR0vC EOHzObuC6WLjLGQInNW+Cq2//vVVybQa79mhOUMp93z5nhDMtwUu7MH4B4kmGpix GLjDa1DukUZe7nGcnsRKmjjXQ+BpG6YF52Z2RfVZpWZn83t5c4YQsq++TPZ8KfpK 4NAFFuSbGM/+QWwEiiyWu00syvpzrEJ4ZIJyZX3FYEeKyKWVRGHqlMDcS9LstYOk 4OfgQUcJ7f/fXeedwi0OGJS1BLr6fi8RnazIafCNIIJLe1XIwTsNufPCNxWYqDAi 0XhH+uYGD38VoUiR5JymZku6frwY4kxssA1khPPE5jWbzCZXiHprwwzaP4hBNNeZ c5cn9/1ZQSoTE3ebrX9pzTn5wRZwAL+iDhZ2SpLlN2Ji1BJ4EM9H8qFGj3U/CSM4 OWKY2c0VwJYQUhjO4QDBx0MblJgNy8HsvmqGETuxUlk56j3Q1Mx3ViPV43amP9eM OM4mGige3Q== =4SCA -----END PGP SIGNATURE----- Merge tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "Was going to send this one in later this week, but given that -Werror is now enabled (or at least available), the mq-deadline fix really should go in for the folks hitting that. - Ensure dd_queued() is only there if needed (Geert) - Fix a kerneldoc warning for bio_alloc_kiocb() - BFQ fix for queue merging - loop locking fix (Tetsuo)" * tag 'block-5.15-2021-09-05' of git://git.kernel.dk/linux-block: loop: reduce the loop_ctl_mutex scope bio: fix kerneldoc documentation for bio_alloc_kiocb() block, bfq: honor already-setup queue merges block/mq-deadline: Move dd_queued() to fix defined but not used warning
This commit is contained in:
commit
1dbe7e386f
@ -2662,6 +2662,15 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
|
|||||||
* are likely to increase the throughput.
|
* are likely to increase the throughput.
|
||||||
*/
|
*/
|
||||||
bfqq->new_bfqq = new_bfqq;
|
bfqq->new_bfqq = new_bfqq;
|
||||||
|
/*
|
||||||
|
* The above assignment schedules the following redirections:
|
||||||
|
* each time some I/O for bfqq arrives, the process that
|
||||||
|
* generated that I/O is disassociated from bfqq and
|
||||||
|
* associated with new_bfqq. Here we increases new_bfqq->ref
|
||||||
|
* in advance, adding the number of processes that are
|
||||||
|
* expected to be associated with new_bfqq as they happen to
|
||||||
|
* issue I/O.
|
||||||
|
*/
|
||||||
new_bfqq->ref += process_refs;
|
new_bfqq->ref += process_refs;
|
||||||
return new_bfqq;
|
return new_bfqq;
|
||||||
}
|
}
|
||||||
@ -2724,6 +2733,10 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
{
|
{
|
||||||
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
struct bfq_queue *in_service_bfqq, *new_bfqq;
|
||||||
|
|
||||||
|
/* if a merge has already been setup, then proceed with that first */
|
||||||
|
if (bfqq->new_bfqq)
|
||||||
|
return bfqq->new_bfqq;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check delayed stable merge for rotational or non-queueing
|
* Check delayed stable merge for rotational or non-queueing
|
||||||
* devs. For this branch to be executed, bfqq must not be
|
* devs. For this branch to be executed, bfqq must not be
|
||||||
@ -2825,9 +2838,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
|||||||
if (bfq_too_late_for_merging(bfqq))
|
if (bfq_too_late_for_merging(bfqq))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (bfqq->new_bfqq)
|
|
||||||
return bfqq->new_bfqq;
|
|
||||||
|
|
||||||
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -1688,7 +1688,7 @@ EXPORT_SYMBOL(bioset_init_from_src);
|
|||||||
/**
|
/**
|
||||||
* bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
|
* bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
|
||||||
* @kiocb: kiocb describing the IO
|
* @kiocb: kiocb describing the IO
|
||||||
* @nr_iovecs: number of iovecs to pre-allocate
|
* @nr_vecs: number of iovecs to pre-allocate
|
||||||
* @bs: bio_set to allocate from
|
* @bs: bio_set to allocate from
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
|
@ -270,12 +270,6 @@ deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
|
|||||||
deadline_remove_request(rq->q, per_prio, rq);
|
deadline_remove_request(rq->q, per_prio, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Number of requests queued for a given priority level. */
|
|
||||||
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
|
|
||||||
{
|
|
||||||
return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
|
* deadline_check_fifo returns 0 if there are no expired requests on the fifo,
|
||||||
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
|
* 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
|
||||||
@ -953,6 +947,12 @@ static int dd_async_depth_show(void *data, struct seq_file *m)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Number of requests queued for a given priority level. */
|
||||||
|
static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
|
||||||
|
{
|
||||||
|
return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
|
||||||
|
}
|
||||||
|
|
||||||
static int dd_queued_show(void *data, struct seq_file *m)
|
static int dd_queued_show(void *data, struct seq_file *m)
|
||||||
{
|
{
|
||||||
struct request_queue *q = data;
|
struct request_queue *q = data;
|
||||||
|
@ -2111,18 +2111,6 @@ int loop_register_transfer(struct loop_func_table *funcs)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int unregister_transfer_cb(int id, void *ptr, void *data)
|
|
||||||
{
|
|
||||||
struct loop_device *lo = ptr;
|
|
||||||
struct loop_func_table *xfer = data;
|
|
||||||
|
|
||||||
mutex_lock(&lo->lo_mutex);
|
|
||||||
if (lo->lo_encryption == xfer)
|
|
||||||
loop_release_xfer(lo);
|
|
||||||
mutex_unlock(&lo->lo_mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int loop_unregister_transfer(int number)
|
int loop_unregister_transfer(int number)
|
||||||
{
|
{
|
||||||
unsigned int n = number;
|
unsigned int n = number;
|
||||||
@ -2130,9 +2118,20 @@ int loop_unregister_transfer(int number)
|
|||||||
|
|
||||||
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
|
if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
/*
|
||||||
|
* This function is called from only cleanup_cryptoloop().
|
||||||
|
* Given that each loop device that has a transfer enabled holds a
|
||||||
|
* reference to the module implementing it we should never get here
|
||||||
|
* with a transfer that is set (unless forced module unloading is
|
||||||
|
* requested). Thus, check module's refcount and warn if this is
|
||||||
|
* not a clean unloading.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_MODULE_UNLOAD
|
||||||
|
if (xfer->owner && module_refcount(xfer->owner) != -1)
|
||||||
|
pr_err("Danger! Unregistering an in use transfer function.\n");
|
||||||
|
#endif
|
||||||
|
|
||||||
xfer_funcs[n] = NULL;
|
xfer_funcs[n] = NULL;
|
||||||
idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2323,8 +2322,9 @@ static int loop_add(int i)
|
|||||||
} else {
|
} else {
|
||||||
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
|
err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto out_unlock;
|
goto out_free_dev;
|
||||||
i = err;
|
i = err;
|
||||||
|
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
@ -2393,15 +2393,19 @@ static int loop_add(int i)
|
|||||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||||
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
|
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
|
||||||
sprintf(disk->disk_name, "loop%d", i);
|
sprintf(disk->disk_name, "loop%d", i);
|
||||||
|
/* Make this loop device reachable from pathname. */
|
||||||
add_disk(disk);
|
add_disk(disk);
|
||||||
|
/* Show this loop device. */
|
||||||
|
mutex_lock(&loop_ctl_mutex);
|
||||||
|
lo->idr_visible = true;
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
return i;
|
return i;
|
||||||
|
|
||||||
out_cleanup_tags:
|
out_cleanup_tags:
|
||||||
blk_mq_free_tag_set(&lo->tag_set);
|
blk_mq_free_tag_set(&lo->tag_set);
|
||||||
out_free_idr:
|
out_free_idr:
|
||||||
|
mutex_lock(&loop_ctl_mutex);
|
||||||
idr_remove(&loop_index_idr, i);
|
idr_remove(&loop_index_idr, i);
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
out_free_dev:
|
out_free_dev:
|
||||||
kfree(lo);
|
kfree(lo);
|
||||||
@ -2411,9 +2415,14 @@ out:
|
|||||||
|
|
||||||
static void loop_remove(struct loop_device *lo)
|
static void loop_remove(struct loop_device *lo)
|
||||||
{
|
{
|
||||||
|
/* Make this loop device unreachable from pathname. */
|
||||||
del_gendisk(lo->lo_disk);
|
del_gendisk(lo->lo_disk);
|
||||||
blk_cleanup_disk(lo->lo_disk);
|
blk_cleanup_disk(lo->lo_disk);
|
||||||
blk_mq_free_tag_set(&lo->tag_set);
|
blk_mq_free_tag_set(&lo->tag_set);
|
||||||
|
mutex_lock(&loop_ctl_mutex);
|
||||||
|
idr_remove(&loop_index_idr, lo->lo_number);
|
||||||
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
|
/* There is no route which can find this loop device. */
|
||||||
mutex_destroy(&lo->lo_mutex);
|
mutex_destroy(&lo->lo_mutex);
|
||||||
kfree(lo);
|
kfree(lo);
|
||||||
}
|
}
|
||||||
@ -2437,31 +2446,40 @@ static int loop_control_remove(int idx)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Hide this loop device for serialization. */
|
||||||
ret = mutex_lock_killable(&loop_ctl_mutex);
|
ret = mutex_lock_killable(&loop_ctl_mutex);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
lo = idr_find(&loop_index_idr, idx);
|
lo = idr_find(&loop_index_idr, idx);
|
||||||
if (!lo) {
|
if (!lo || !lo->idr_visible)
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto out_unlock_ctrl;
|
else
|
||||||
}
|
lo->idr_visible = false;
|
||||||
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* Check whether this loop device can be removed. */
|
||||||
ret = mutex_lock_killable(&lo->lo_mutex);
|
ret = mutex_lock_killable(&lo->lo_mutex);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock_ctrl;
|
goto mark_visible;
|
||||||
if (lo->lo_state != Lo_unbound ||
|
if (lo->lo_state != Lo_unbound ||
|
||||||
atomic_read(&lo->lo_refcnt) > 0) {
|
atomic_read(&lo->lo_refcnt) > 0) {
|
||||||
mutex_unlock(&lo->lo_mutex);
|
mutex_unlock(&lo->lo_mutex);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out_unlock_ctrl;
|
goto mark_visible;
|
||||||
}
|
}
|
||||||
|
/* Mark this loop device no longer open()-able. */
|
||||||
lo->lo_state = Lo_deleting;
|
lo->lo_state = Lo_deleting;
|
||||||
mutex_unlock(&lo->lo_mutex);
|
mutex_unlock(&lo->lo_mutex);
|
||||||
|
|
||||||
idr_remove(&loop_index_idr, lo->lo_number);
|
|
||||||
loop_remove(lo);
|
loop_remove(lo);
|
||||||
out_unlock_ctrl:
|
return 0;
|
||||||
|
|
||||||
|
mark_visible:
|
||||||
|
/* Show this loop device again. */
|
||||||
|
mutex_lock(&loop_ctl_mutex);
|
||||||
|
lo->idr_visible = true;
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2475,7 +2493,8 @@ static int loop_control_get_free(int idx)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
idr_for_each_entry(&loop_index_idr, lo, id) {
|
idr_for_each_entry(&loop_index_idr, lo, id) {
|
||||||
if (lo->lo_state == Lo_unbound)
|
/* Hitting a race results in creating a new loop device which is harmless. */
|
||||||
|
if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound)
|
||||||
goto found;
|
goto found;
|
||||||
}
|
}
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
mutex_unlock(&loop_ctl_mutex);
|
||||||
@ -2591,10 +2610,14 @@ static void __exit loop_exit(void)
|
|||||||
unregister_blkdev(LOOP_MAJOR, "loop");
|
unregister_blkdev(LOOP_MAJOR, "loop");
|
||||||
misc_deregister(&loop_misc);
|
misc_deregister(&loop_misc);
|
||||||
|
|
||||||
mutex_lock(&loop_ctl_mutex);
|
/*
|
||||||
|
* There is no need to use loop_ctl_mutex here, for nobody else can
|
||||||
|
* access loop_index_idr when this module is unloading (unless forced
|
||||||
|
* module unloading is requested). If this is not a clean unloading,
|
||||||
|
* we have no means to avoid kernel crash.
|
||||||
|
*/
|
||||||
idr_for_each_entry(&loop_index_idr, lo, id)
|
idr_for_each_entry(&loop_index_idr, lo, id)
|
||||||
loop_remove(lo);
|
loop_remove(lo);
|
||||||
mutex_unlock(&loop_ctl_mutex);
|
|
||||||
|
|
||||||
idr_destroy(&loop_index_idr);
|
idr_destroy(&loop_index_idr);
|
||||||
}
|
}
|
||||||
|
@ -68,6 +68,7 @@ struct loop_device {
|
|||||||
struct blk_mq_tag_set tag_set;
|
struct blk_mq_tag_set tag_set;
|
||||||
struct gendisk *lo_disk;
|
struct gendisk *lo_disk;
|
||||||
struct mutex lo_mutex;
|
struct mutex lo_mutex;
|
||||||
|
bool idr_visible;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct loop_cmd {
|
struct loop_cmd {
|
||||||
|
Loading…
Reference in New Issue
Block a user