cfq-iosched: remove dead_key from cfq_io_context

Remove ->dead_key field from cfq_io_context to shrink its size to 128 bytes.
(64 bytes for 32-bit hosts)

Use lower bit in ->key as dead-mark, instead of moving key to separate field.
After this for dead cfq_io_context we got cic->key != cfqd automatically.
Thus, io_context's last-hit cache should work without changing.

Now to check ->key for non-dead state compare it with cfqd,
instead of checking ->key for non-null value as it was before.

Plus remove obsolete race protection in cfq_cic_lookup.
This race gone after v2.6.24-1728-g4ac845a

Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
Konstantin Khlebnikov 2010-05-20 23:21:34 +04:00 committed by Jens Axboe
parent f4b87dee92
commit bca4b914b5
2 changed files with 28 additions and 14 deletions

View File

@ -430,6 +430,23 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
cic->cfqq[is_sync] = cfqq; cic->cfqq[is_sync] = cfqq;
} }
#define CIC_DEAD_KEY 1ul
static inline void *cfqd_dead_key(struct cfq_data *cfqd)
{
return (void *)((unsigned long) cfqd | CIC_DEAD_KEY);
}
static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
{
struct cfq_data *cfqd = cic->key;
if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
return NULL;
return cfqd;
}
/* /*
* We regard a request as SYNC, if it's either a read or has the SYNC bit * We regard a request as SYNC, if it's either a read or has the SYNC bit
* set (in which case it could also be direct WRITE). * set (in which case it could also be direct WRITE).
@ -2510,11 +2527,12 @@ static void cfq_cic_free(struct cfq_io_context *cic)
static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
{ {
unsigned long flags; unsigned long flags;
unsigned long dead_key = (unsigned long) cic->key;
BUG_ON(!cic->dead_key); BUG_ON(!(dead_key & CIC_DEAD_KEY));
spin_lock_irqsave(&ioc->lock, flags); spin_lock_irqsave(&ioc->lock, flags);
radix_tree_delete(&ioc->radix_root, cic->dead_key); radix_tree_delete(&ioc->radix_root, dead_key & ~CIC_DEAD_KEY);
hlist_del_rcu(&cic->cic_list); hlist_del_rcu(&cic->cic_list);
spin_unlock_irqrestore(&ioc->lock, flags); spin_unlock_irqrestore(&ioc->lock, flags);
@ -2573,11 +2591,10 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
list_del_init(&cic->queue_list); list_del_init(&cic->queue_list);
/* /*
* Make sure key == NULL is seen for dead queues * Make sure dead mark is seen for dead queues
*/ */
smp_wmb(); smp_wmb();
cic->dead_key = (unsigned long) cic->key; cic->key = cfqd_dead_key(cfqd);
cic->key = NULL;
if (ioc->ioc_data == cic) if (ioc->ioc_data == cic)
rcu_assign_pointer(ioc->ioc_data, NULL); rcu_assign_pointer(ioc->ioc_data, NULL);
@ -2596,7 +2613,7 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
static void cfq_exit_single_io_context(struct io_context *ioc, static void cfq_exit_single_io_context(struct io_context *ioc,
struct cfq_io_context *cic) struct cfq_io_context *cic)
{ {
struct cfq_data *cfqd = cic->key; struct cfq_data *cfqd = cic_to_cfqd(cic);
if (cfqd) { if (cfqd) {
struct request_queue *q = cfqd->queue; struct request_queue *q = cfqd->queue;
@ -2609,7 +2626,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
* race between exiting task and queue * race between exiting task and queue
*/ */
smp_read_barrier_depends(); smp_read_barrier_depends();
if (cic->key) if (cic->key == cfqd)
__cfq_exit_single_io_context(cfqd, cic); __cfq_exit_single_io_context(cfqd, cic);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
@ -2689,7 +2706,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
{ {
struct cfq_data *cfqd = cic->key; struct cfq_data *cfqd = cic_to_cfqd(cic);
struct cfq_queue *cfqq; struct cfq_queue *cfqq;
unsigned long flags; unsigned long flags;
@ -2746,7 +2763,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
{ {
struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
struct cfq_data *cfqd = cic->key; struct cfq_data *cfqd = cic_to_cfqd(cic);
unsigned long flags; unsigned long flags;
struct request_queue *q; struct request_queue *q;
@ -2883,6 +2900,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
unsigned long flags; unsigned long flags;
WARN_ON(!list_empty(&cic->queue_list)); WARN_ON(!list_empty(&cic->queue_list));
BUG_ON(cic->key != cfqd_dead_key(cfqd));
spin_lock_irqsave(&ioc->lock, flags); spin_lock_irqsave(&ioc->lock, flags);
@ -2900,7 +2918,6 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{ {
struct cfq_io_context *cic; struct cfq_io_context *cic;
unsigned long flags; unsigned long flags;
void *k;
if (unlikely(!ioc)) if (unlikely(!ioc))
return NULL; return NULL;
@ -2921,9 +2938,7 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
rcu_read_unlock(); rcu_read_unlock();
if (!cic) if (!cic)
break; break;
/* ->key must be copied to avoid race with cfq_exit_queue() */ if (unlikely(cic->key != cfqd)) {
k = cic->key;
if (unlikely(!k)) {
cfq_drop_dead_cic(cfqd, ioc, cic); cfq_drop_dead_cic(cfqd, ioc, cic);
rcu_read_lock(); rcu_read_lock();
continue; continue;

View File

@ -7,7 +7,6 @@
struct cfq_queue; struct cfq_queue;
struct cfq_io_context { struct cfq_io_context {
void *key; void *key;
unsigned long dead_key;
struct cfq_queue *cfqq[2]; struct cfq_queue *cfqq[2];