blk-cgroup: pass a gendisk to the blkg allocation helpers

Prepare for storing the blkcg information in the gendisk instead of
the request_queue.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
Acked-by: Tejun Heo <tj@kernel.org>
Link: https://lore.kernel.org/r/20220921180501.1539876-18-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2022-09-21 20:05:01 +02:00 committed by Jens Axboe
parent de185b56e8
commit 99e6038743

View File

@ -202,19 +202,19 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
/** /**
* blkg_alloc - allocate a blkg * blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with * @blkcg: block cgroup the new blkg is associated with
* @q: request_queue the new blkg is associated with * @disk: gendisk the new blkg is associated with
* @gfp_mask: allocation mask to use * @gfp_mask: allocation mask to use
* *
* Allocate a new blkg assocating @blkcg and @q. * Allocate a new blkg assocating @blkcg and @q.
*/ */
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i, cpu; int i, cpu;
/* alloc and init base part */ /* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
if (!blkg) if (!blkg)
return NULL; return NULL;
@ -225,10 +225,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
if (!blkg->iostat_cpu) if (!blkg->iostat_cpu)
goto err_free; goto err_free;
if (!blk_get_queue(q)) if (!blk_get_queue(disk->queue))
goto err_free; goto err_free;
blkg->q = q; blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node); INIT_LIST_HEAD(&blkg->q_node);
spin_lock_init(&blkg->async_bio_lock); spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios); bio_list_init(&blkg->async_bios);
@ -243,11 +243,11 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd; struct blkg_policy_data *pd;
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(disk->queue, pol))
continue; continue;
/* alloc per-policy data and attach it to blkg */ /* alloc per-policy data and attach it to blkg */
pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); pd = pol->pd_alloc_fn(gfp_mask, disk->queue, blkcg);
if (!pd) if (!pd)
goto err_free; goto err_free;
@ -275,17 +275,16 @@ static void blkg_update_hint(struct blkcg *blkcg, struct blkcg_gq *blkg)
* If @new_blkg is %NULL, this function tries to allocate a new one as * If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/ */
static struct blkcg_gq *blkg_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
struct request_queue *q,
struct blkcg_gq *new_blkg) struct blkcg_gq *new_blkg)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i, ret; int i, ret;
lockdep_assert_held(&q->queue_lock); lockdep_assert_held(&disk->queue->queue_lock);
/* request_queue is dying, do not create/recreate a blkg */ /* request_queue is dying, do not create/recreate a blkg */
if (blk_queue_dying(q)) { if (blk_queue_dying(disk->queue)) {
ret = -ENODEV; ret = -ENODEV;
goto err_free_blkg; goto err_free_blkg;
} }
@ -298,7 +297,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */ /* allocate */
if (!new_blkg) { if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) { if (unlikely(!new_blkg)) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_put_css; goto err_put_css;
@ -308,7 +307,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* link parent */ /* link parent */
if (blkcg_parent(blkcg)) { if (blkcg_parent(blkcg)) {
blkg->parent = blkg_lookup(blkcg_parent(blkcg), q); blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
if (WARN_ON_ONCE(!blkg->parent)) { if (WARN_ON_ONCE(!blkg->parent)) {
ret = -ENODEV; ret = -ENODEV;
goto err_put_css; goto err_put_css;
@ -326,10 +325,10 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* insert */ /* insert */
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
if (likely(!ret)) { if (likely(!ret)) {
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &q->blkg_list); list_add(&blkg->q_node, &disk->queue->blkg_list);
for (i = 0; i < BLKCG_MAX_POLS; i++) { for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
@ -358,19 +357,20 @@ err_free_blkg:
/** /**
* blkg_lookup_create - lookup blkg, try to create one if not there * blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest * @blkcg: blkcg of interest
* @q: request_queue of interest * @disk: gendisk of interest
* *
* Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
* create one. blkg creation is performed recursively from blkcg_root such * create one. blkg creation is performed recursively from blkcg_root such
* that all non-root blkg's have access to the parent blkg. This function * that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and takes @q->queue_lock. * should be called under RCU read lock and takes @disk->queue->queue_lock.
* *
* Returns the blkg or the closest blkg if blkg_create() fails as it walks * Returns the blkg or the closest blkg if blkg_create() fails as it walks
* down from root. * down from root.
*/ */
static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q) struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
unsigned long flags; unsigned long flags;
@ -408,7 +408,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
parent = blkcg_parent(parent); parent = blkcg_parent(parent);
} }
blkg = blkg_create(pos, q, NULL); blkg = blkg_create(pos, disk, NULL);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
blkg = ret_blkg; blkg = ret_blkg;
break; break;
@ -652,6 +652,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
__acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
{ {
struct block_device *bdev; struct block_device *bdev;
struct gendisk *disk;
struct request_queue *q; struct request_queue *q;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int ret; int ret;
@ -659,8 +660,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
bdev = blkcg_conf_open_bdev(&input); bdev = blkcg_conf_open_bdev(&input);
if (IS_ERR(bdev)) if (IS_ERR(bdev))
return PTR_ERR(bdev); return PTR_ERR(bdev);
disk = bdev->bd_disk;
q = bdev_get_queue(bdev); q = disk->queue;
/* /*
* blkcg_deactivate_policy() requires queue to be frozen, we can grab * blkcg_deactivate_policy() requires queue to be frozen, we can grab
@ -703,7 +704,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL); new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
if (unlikely(!new_blkg)) { if (unlikely(!new_blkg)) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail_exit_queue; goto fail_exit_queue;
@ -729,7 +730,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg_update_hint(pos, blkg); blkg_update_hint(pos, blkg);
blkg_free(new_blkg); blkg_free(new_blkg);
} else { } else {
blkg = blkg_create(pos, q, new_blkg); blkg = blkg_create(pos, disk, new_blkg);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto fail_preloaded; goto fail_preloaded;
@ -1234,7 +1235,7 @@ int blkcg_init_disk(struct gendisk *disk)
INIT_LIST_HEAD(&q->blkg_list); INIT_LIST_HEAD(&q->blkg_list);
new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg) if (!new_blkg)
return -ENOMEM; return -ENOMEM;
@ -1243,7 +1244,7 @@ int blkcg_init_disk(struct gendisk *disk)
/* Make sure the root blkg exists. */ /* Make sure the root blkg exists. */
/* spin_lock_irq can serve as RCU read-side critical section. */ /* spin_lock_irq can serve as RCU read-side critical section. */
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg); blkg = blkg_create(&blkcg_root, disk, new_blkg);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
goto err_unlock; goto err_unlock;
q->root_blkg = blkg; q->root_blkg = blkg;
@ -1860,8 +1861,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
struct blkcg_gq *blkg, *ret_blkg = NULL; struct blkcg_gq *blkg, *ret_blkg = NULL;
rcu_read_lock(); rcu_read_lock();
blkg = blkg_lookup_create(css_to_blkcg(css), blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
bdev_get_queue(bio->bi_bdev));
while (blkg) { while (blkg) {
if (blkg_tryget(blkg)) { if (blkg_tryget(blkg)) {
ret_blkg = blkg; ret_blkg = blkg;