9p: Add refcount to p9_req_t
To avoid use-after-free(s), use a refcount to keep track of the usable references to any instantiated struct p9_req_t. This commit adds p9_req_put(), p9_req_get() and p9_req_try_get() as wrappers to kref_put(), kref_get() and kref_get_unless_zero(). These are used by the client and the transports to keep track of valid requests' references. p9_free_req() is added back and used as callback by kref_put(). Add SLAB_TYPESAFE_BY_RCU as it ensures that the memory freed by kmem_cache_free() will not be reused for another type until the rcu synchronisation period is over, so an address gotten under rcu read lock is safe to inc_ref() without corrupting random memory while the lock is held. Link: http://lkml.kernel.org/r/1535626341-20693-1-git-send-email-asmadeus@codewreck.org Co-developed-by: Dominique Martinet <dominique.martinet@cea.fr> Signed-off-by: Tomas Bortoli <tomasbortoli@gmail.com> Reported-by: syzbot+467050c1ce275af2a5b8@syzkaller.appspotmail.com Signed-off-by: Dominique Martinet <dominique.martinet@cea.fr>
This commit is contained in:
parent
43cbcbee99
commit
728356dede
@ -94,6 +94,7 @@ enum p9_req_status_t {
|
|||||||
struct p9_req_t {
|
struct p9_req_t {
|
||||||
int status;
|
int status;
|
||||||
int t_err;
|
int t_err;
|
||||||
|
struct kref refcount;
|
||||||
wait_queue_head_t wq;
|
wait_queue_head_t wq;
|
||||||
struct p9_fcall tc;
|
struct p9_fcall tc;
|
||||||
struct p9_fcall rc;
|
struct p9_fcall rc;
|
||||||
@ -233,6 +234,19 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
|
|||||||
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
|
int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
|
||||||
void p9_fcall_fini(struct p9_fcall *fc);
|
void p9_fcall_fini(struct p9_fcall *fc);
|
||||||
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
|
struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
|
||||||
|
|
||||||
|
static inline void p9_req_get(struct p9_req_t *r)
|
||||||
|
{
|
||||||
|
kref_get(&r->refcount);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int p9_req_try_get(struct p9_req_t *r)
|
||||||
|
{
|
||||||
|
return kref_get_unless_zero(&r->refcount);
|
||||||
|
}
|
||||||
|
|
||||||
|
int p9_req_put(struct p9_req_t *r);
|
||||||
|
|
||||||
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
|
void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
|
||||||
|
|
||||||
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
|
int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
|
||||||
|
@ -307,6 +307,18 @@ p9_tag_alloc(struct p9_client *c, int8_t type, unsigned int max_size)
|
|||||||
if (tag < 0)
|
if (tag < 0)
|
||||||
goto free;
|
goto free;
|
||||||
|
|
||||||
|
/* Init ref to two because in the general case there is one ref
|
||||||
|
* that is put asynchronously by a writer thread, one ref
|
||||||
|
* temporarily given by p9_tag_lookup and put by p9_client_cb
|
||||||
|
* in the recv thread, and one ref put by p9_tag_remove in the
|
||||||
|
* main thread. The only exception is virtio that does not use
|
||||||
|
* p9_tag_lookup but does not have a writer thread either
|
||||||
|
* (the write happens synchronously in the request/zc_request
|
||||||
|
* callback), so p9_client_cb eats the second ref there
|
||||||
|
* as the pointer is duplicated directly by virtqueue_add_sgs()
|
||||||
|
*/
|
||||||
|
refcount_set(&req->refcount.refcount, 2);
|
||||||
|
|
||||||
return req;
|
return req;
|
||||||
|
|
||||||
free:
|
free:
|
||||||
@ -330,10 +342,21 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
|
|||||||
struct p9_req_t *req;
|
struct p9_req_t *req;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
again:
|
||||||
req = idr_find(&c->reqs, tag);
|
req = idr_find(&c->reqs, tag);
|
||||||
/* There's no refcount on the req; a malicious server could cause
|
if (req) {
|
||||||
* us to dereference a NULL pointer
|
/* We have to be careful with the req found under rcu_read_lock
|
||||||
*/
|
* Thanks to SLAB_TYPESAFE_BY_RCU we can safely try to get the
|
||||||
|
* ref again without corrupting other data, then check again
|
||||||
|
* that the tag matches once we have the ref
|
||||||
|
*/
|
||||||
|
if (!p9_req_try_get(req))
|
||||||
|
goto again;
|
||||||
|
if (req->tc.tag != tag) {
|
||||||
|
p9_req_put(req);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
return req;
|
return req;
|
||||||
@ -347,7 +370,7 @@ EXPORT_SYMBOL(p9_tag_lookup);
|
|||||||
*
|
*
|
||||||
* Context: Any context.
|
* Context: Any context.
|
||||||
*/
|
*/
|
||||||
static void p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
|
static int p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u16 tag = r->tc.tag;
|
u16 tag = r->tc.tag;
|
||||||
@ -356,11 +379,23 @@ static void p9_tag_remove(struct p9_client *c, struct p9_req_t *r)
|
|||||||
spin_lock_irqsave(&c->lock, flags);
|
spin_lock_irqsave(&c->lock, flags);
|
||||||
idr_remove(&c->reqs, tag);
|
idr_remove(&c->reqs, tag);
|
||||||
spin_unlock_irqrestore(&c->lock, flags);
|
spin_unlock_irqrestore(&c->lock, flags);
|
||||||
|
return p9_req_put(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void p9_req_free(struct kref *ref)
|
||||||
|
{
|
||||||
|
struct p9_req_t *r = container_of(ref, struct p9_req_t, refcount);
|
||||||
p9_fcall_fini(&r->tc);
|
p9_fcall_fini(&r->tc);
|
||||||
p9_fcall_fini(&r->rc);
|
p9_fcall_fini(&r->rc);
|
||||||
kmem_cache_free(p9_req_cache, r);
|
kmem_cache_free(p9_req_cache, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int p9_req_put(struct p9_req_t *r)
|
||||||
|
{
|
||||||
|
return kref_put(&r->refcount, p9_req_free);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(p9_req_put);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* p9_tag_cleanup - cleans up tags structure and reclaims resources
|
* p9_tag_cleanup - cleans up tags structure and reclaims resources
|
||||||
* @c: v9fs client struct
|
* @c: v9fs client struct
|
||||||
@ -376,7 +411,9 @@ static void p9_tag_cleanup(struct p9_client *c)
|
|||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
idr_for_each_entry(&c->reqs, req, id) {
|
idr_for_each_entry(&c->reqs, req, id) {
|
||||||
pr_info("Tag %d still in use\n", id);
|
pr_info("Tag %d still in use\n", id);
|
||||||
p9_tag_remove(c, req);
|
if (p9_tag_remove(c, req) == 0)
|
||||||
|
pr_warn("Packet with tag %d has still references",
|
||||||
|
req->tc.tag);
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
@ -400,6 +437,7 @@ void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
|
|||||||
|
|
||||||
wake_up(&req->wq);
|
wake_up(&req->wq);
|
||||||
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
|
p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc.tag);
|
||||||
|
p9_req_put(req);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(p9_client_cb);
|
EXPORT_SYMBOL(p9_client_cb);
|
||||||
|
|
||||||
@ -640,9 +678,10 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
|
|||||||
* if we haven't received a response for oldreq,
|
* if we haven't received a response for oldreq,
|
||||||
* remove it from the list
|
* remove it from the list
|
||||||
*/
|
*/
|
||||||
if (oldreq->status == REQ_STATUS_SENT)
|
if (oldreq->status == REQ_STATUS_SENT) {
|
||||||
if (c->trans_mod->cancelled)
|
if (c->trans_mod->cancelled)
|
||||||
c->trans_mod->cancelled(c, oldreq);
|
c->trans_mod->cancelled(c, oldreq);
|
||||||
|
}
|
||||||
|
|
||||||
p9_tag_remove(c, req);
|
p9_tag_remove(c, req);
|
||||||
return 0;
|
return 0;
|
||||||
@ -679,6 +718,8 @@ static struct p9_req_t *p9_client_prepare_req(struct p9_client *c,
|
|||||||
return req;
|
return req;
|
||||||
reterr:
|
reterr:
|
||||||
p9_tag_remove(c, req);
|
p9_tag_remove(c, req);
|
||||||
|
/* We have to put also the 2nd reference as it won't be used */
|
||||||
|
p9_req_put(req);
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -713,6 +754,8 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
|
|||||||
|
|
||||||
err = c->trans_mod->request(c, req);
|
err = c->trans_mod->request(c, req);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
/* write won't happen */
|
||||||
|
p9_req_put(req);
|
||||||
if (err != -ERESTARTSYS && err != -EFAULT)
|
if (err != -ERESTARTSYS && err != -EFAULT)
|
||||||
c->status = Disconnected;
|
c->status = Disconnected;
|
||||||
goto recalc_sigpending;
|
goto recalc_sigpending;
|
||||||
@ -2238,7 +2281,7 @@ EXPORT_SYMBOL(p9_client_readlink);
|
|||||||
|
|
||||||
int __init p9_client_init(void)
|
int __init p9_client_init(void)
|
||||||
{
|
{
|
||||||
p9_req_cache = KMEM_CACHE(p9_req_t, 0);
|
p9_req_cache = KMEM_CACHE(p9_req_t, SLAB_TYPESAFE_BY_RCU);
|
||||||
return p9_req_cache ? 0 : -ENOMEM;
|
return p9_req_cache ? 0 : -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,6 +132,7 @@ struct p9_conn {
|
|||||||
struct list_head req_list;
|
struct list_head req_list;
|
||||||
struct list_head unsent_req_list;
|
struct list_head unsent_req_list;
|
||||||
struct p9_req_t *req;
|
struct p9_req_t *req;
|
||||||
|
struct p9_req_t *wreq;
|
||||||
char tmp_buf[7];
|
char tmp_buf[7];
|
||||||
struct p9_fcall rc;
|
struct p9_fcall rc;
|
||||||
int wpos;
|
int wpos;
|
||||||
@ -383,6 +384,7 @@ static void p9_read_work(struct work_struct *work)
|
|||||||
m->rc.sdata = NULL;
|
m->rc.sdata = NULL;
|
||||||
m->rc.offset = 0;
|
m->rc.offset = 0;
|
||||||
m->rc.capacity = 0;
|
m->rc.capacity = 0;
|
||||||
|
p9_req_put(m->req);
|
||||||
m->req = NULL;
|
m->req = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,6 +474,8 @@ static void p9_write_work(struct work_struct *work)
|
|||||||
m->wbuf = req->tc.sdata;
|
m->wbuf = req->tc.sdata;
|
||||||
m->wsize = req->tc.size;
|
m->wsize = req->tc.size;
|
||||||
m->wpos = 0;
|
m->wpos = 0;
|
||||||
|
p9_req_get(req);
|
||||||
|
m->wreq = req;
|
||||||
spin_unlock(&m->client->lock);
|
spin_unlock(&m->client->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -492,8 +496,11 @@ static void p9_write_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
m->wpos += err;
|
m->wpos += err;
|
||||||
if (m->wpos == m->wsize)
|
if (m->wpos == m->wsize) {
|
||||||
m->wpos = m->wsize = 0;
|
m->wpos = m->wsize = 0;
|
||||||
|
p9_req_put(m->wreq);
|
||||||
|
m->wreq = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
end_clear:
|
end_clear:
|
||||||
clear_bit(Wworksched, &m->wsched);
|
clear_bit(Wworksched, &m->wsched);
|
||||||
@ -694,6 +701,7 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
|
|||||||
if (req->status == REQ_STATUS_UNSENT) {
|
if (req->status == REQ_STATUS_UNSENT) {
|
||||||
list_del(&req->req_list);
|
list_del(&req->req_list);
|
||||||
req->status = REQ_STATUS_FLSHD;
|
req->status = REQ_STATUS_FLSHD;
|
||||||
|
p9_req_put(req);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
spin_unlock(&client->lock);
|
spin_unlock(&client->lock);
|
||||||
@ -711,6 +719,7 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
|
|||||||
spin_lock(&client->lock);
|
spin_lock(&client->lock);
|
||||||
list_del(&req->req_list);
|
list_del(&req->req_list);
|
||||||
spin_unlock(&client->lock);
|
spin_unlock(&client->lock);
|
||||||
|
p9_req_put(req);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -365,6 +365,7 @@ send_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||||||
c->busa, c->req->tc.size,
|
c->busa, c->req->tc.size,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
up(&rdma->sq_sem);
|
up(&rdma->sq_sem);
|
||||||
|
p9_req_put(c->req);
|
||||||
kfree(c);
|
kfree(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,6 +207,13 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Reply won't come, so drop req ref */
|
||||||
|
static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
|
||||||
|
{
|
||||||
|
p9_req_put(req);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
|
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
|
||||||
* this takes a list of pages.
|
* this takes a list of pages.
|
||||||
@ -404,6 +411,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||||||
struct scatterlist *sgs[4];
|
struct scatterlist *sgs[4];
|
||||||
size_t offs;
|
size_t offs;
|
||||||
int need_drop = 0;
|
int need_drop = 0;
|
||||||
|
int kicked = 0;
|
||||||
|
|
||||||
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
|
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
|
||||||
|
|
||||||
@ -411,8 +419,10 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||||||
__le32 sz;
|
__le32 sz;
|
||||||
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
|
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
|
||||||
outlen, &offs, &need_drop);
|
outlen, &offs, &need_drop);
|
||||||
if (n < 0)
|
if (n < 0) {
|
||||||
return n;
|
err = n;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||||
if (n != outlen) {
|
if (n != outlen) {
|
||||||
__le32 v = cpu_to_le32(n);
|
__le32 v = cpu_to_le32(n);
|
||||||
@ -428,8 +438,10 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
|
|||||||
} else if (uidata) {
|
} else if (uidata) {
|
||||||
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
|
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
|
||||||
inlen, &offs, &need_drop);
|
inlen, &offs, &need_drop);
|
||||||
if (n < 0)
|
if (n < 0) {
|
||||||
return n;
|
err = n;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
|
||||||
if (n != inlen) {
|
if (n != inlen) {
|
||||||
__le32 v = cpu_to_le32(n);
|
__le32 v = cpu_to_le32(n);
|
||||||
@ -498,6 +510,7 @@ req_retry_pinned:
|
|||||||
}
|
}
|
||||||
virtqueue_kick(chan->vq);
|
virtqueue_kick(chan->vq);
|
||||||
spin_unlock_irqrestore(&chan->lock, flags);
|
spin_unlock_irqrestore(&chan->lock, flags);
|
||||||
|
kicked = 1;
|
||||||
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
|
p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
|
||||||
err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
|
err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
|
||||||
/*
|
/*
|
||||||
@ -518,6 +531,10 @@ err_out:
|
|||||||
}
|
}
|
||||||
kvfree(in_pages);
|
kvfree(in_pages);
|
||||||
kvfree(out_pages);
|
kvfree(out_pages);
|
||||||
|
if (!kicked) {
|
||||||
|
/* reply won't come */
|
||||||
|
p9_req_put(req);
|
||||||
|
}
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,6 +767,7 @@ static struct p9_trans_module p9_virtio_trans = {
|
|||||||
.request = p9_virtio_request,
|
.request = p9_virtio_request,
|
||||||
.zc_request = p9_virtio_zc_request,
|
.zc_request = p9_virtio_zc_request,
|
||||||
.cancel = p9_virtio_cancel,
|
.cancel = p9_virtio_cancel,
|
||||||
|
.cancelled = p9_virtio_cancelled,
|
||||||
/*
|
/*
|
||||||
* We leave one entry for input and one entry for response
|
* We leave one entry for input and one entry for response
|
||||||
* headers. We also skip one more entry to accomodate, address
|
* headers. We also skip one more entry to accomodate, address
|
||||||
|
@ -185,6 +185,7 @@ again:
|
|||||||
ring->intf->out_prod = prod;
|
ring->intf->out_prod = prod;
|
||||||
spin_unlock_irqrestore(&ring->lock, flags);
|
spin_unlock_irqrestore(&ring->lock, flags);
|
||||||
notify_remote_via_irq(ring->irq);
|
notify_remote_via_irq(ring->irq);
|
||||||
|
p9_req_put(p9_req);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user