Merge git://git.kvack.org/~bcrl/aio-next

Pull aio fix and cleanups from Ben LaHaise:
 "This consists of a couple of code cleanups plus a minor bug fix"

* git://git.kvack.org/~bcrl/aio-next:
  aio: cleanup: flatten kill_ioctx()
  aio: report error from io_destroy() when threads race in io_destroy()
  fs/aio.c: Remove ctx parameter in kiocb_cancel
This commit is contained in:
Linus Torvalds 2014-06-14 19:43:27 -05:00
commit a311c48038

View File

@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb)
static int kiocb_cancel(struct kiocb *kiocb)
{
kiocb_cancel_fn *old, *cancel;
@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
struct kiocb, ki_list);
list_del_init(&req->ki_list);
kiocb_cancel(ctx, req);
kiocb_cancel(req);
}
spin_unlock_irq(&ctx->ctx_lock);
@ -727,42 +727,42 @@ err:
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct completion *requests_done)
{
if (!atomic_xchg(&ctx->dead, 1)) {
struct kioctx_table *table;
struct kioctx_table *table;
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
if (atomic_xchg(&ctx->dead, 1))
return -EINVAL;
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
/* percpu_ref_kill() will do the necessary call_rcu() */
wake_up_all(&ctx->wait);
spin_lock(&mm->ioctx_lock);
rcu_read_lock();
table = rcu_dereference(mm->ioctx_table);
/*
* It'd be more correct to do this in free_ioctx(), after all
* the outstanding kiocbs have finished - but by then io_destroy
* has already returned, so io_setup() could potentially return
* -EAGAIN with no ioctxs actually in use (as far as userspace
* could tell).
*/
aio_nr_sub(ctx->max_reqs);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
/* percpu_ref_kill() will do the necessary call_rcu() */
wake_up_all(&ctx->wait);
ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users);
} else {
if (requests_done)
complete(requests_done);
}
/*
* It'd be more correct to do this in free_ioctx(), after all
* the outstanding kiocbs have finished - but by then io_destroy
* has already returned, so io_setup() could potentially return
* -EAGAIN with no ioctxs actually in use (as far as userspace
* could tell).
*/
aio_nr_sub(ctx->max_reqs);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users);
return 0;
}
/* wait_on_sync_kiocb:
@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
if (likely(NULL != ioctx)) {
struct completion requests_done =
COMPLETION_INITIALIZER_ONSTACK(requests_done);
int ret;
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
*/
kill_ioctx(current->mm, ioctx, &requests_done);
ret = kill_ioctx(current->mm, ioctx, &requests_done);
percpu_ref_put(&ioctx->users);
/* Wait until all IO for the context are done. Otherwise kernel
* keep using user-space buffers even if user thinks the context
* is destroyed.
*/
wait_for_completion(&requests_done);
if (!ret)
wait_for_completion(&requests_done);
return 0;
return ret;
}
pr_debug("EINVAL: io_destroy: invalid context id\n");
return -EINVAL;
@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
kiocb = lookup_kiocb(ctx, iocb, key);
if (kiocb)
ret = kiocb_cancel(ctx, kiocb);
ret = kiocb_cancel(kiocb);
else
ret = -EINVAL;