ceph: clean up inode work queueing
Add a generic function for taking an inode reference, setting the I_WORK bit and queueing i_work. Turn the ceph_queue_* functions into static inline wrappers that pass in the right bit. Signed-off-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Ilya Dryomov <idryomov@gmail.com> Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
This commit is contained in:
parent
64f36da562
commit
64f28c627a
@ -1816,60 +1816,17 @@ void ceph_async_iput(struct inode *inode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
void ceph_queue_inode_work(struct inode *inode, int work_bit)
|
||||||
* Write back inode data in a worker thread. (This can't be done
|
|
||||||
* in the message handler context.)
|
|
||||||
*/
|
|
||||||
void ceph_queue_writeback(struct inode *inode)
|
|
||||||
{
|
{
|
||||||
|
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
|
||||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||||
set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask);
|
set_bit(work_bit, &ci->i_work_mask);
|
||||||
|
|
||||||
ihold(inode);
|
ihold(inode);
|
||||||
if (queue_work(ceph_inode_to_client(inode)->inode_wq,
|
if (queue_work(fsc->inode_wq, &ci->i_work)) {
|
||||||
&ci->i_work)) {
|
dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
|
||||||
dout("ceph_queue_writeback %p\n", inode);
|
|
||||||
} else {
|
} else {
|
||||||
dout("ceph_queue_writeback %p already queued, mask=%lx\n",
|
dout("queue_inode_work %p already queued, mask=%lx\n",
|
||||||
inode, ci->i_work_mask);
|
|
||||||
iput(inode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* queue an async invalidation
|
|
||||||
*/
|
|
||||||
void ceph_queue_invalidate(struct inode *inode)
|
|
||||||
{
|
|
||||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
||||||
set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask);
|
|
||||||
|
|
||||||
ihold(inode);
|
|
||||||
if (queue_work(ceph_inode_to_client(inode)->inode_wq,
|
|
||||||
&ceph_inode(inode)->i_work)) {
|
|
||||||
dout("ceph_queue_invalidate %p\n", inode);
|
|
||||||
} else {
|
|
||||||
dout("ceph_queue_invalidate %p already queued, mask=%lx\n",
|
|
||||||
inode, ci->i_work_mask);
|
|
||||||
iput(inode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Queue an async vmtruncate. If we fail to queue work, we will handle
|
|
||||||
* the truncation the next time we call __ceph_do_pending_vmtruncate.
|
|
||||||
*/
|
|
||||||
void ceph_queue_vmtruncate(struct inode *inode)
|
|
||||||
{
|
|
||||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
|
||||||
set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask);
|
|
||||||
|
|
||||||
ihold(inode);
|
|
||||||
if (queue_work(ceph_inode_to_client(inode)->inode_wq,
|
|
||||||
&ci->i_work)) {
|
|
||||||
dout("ceph_queue_vmtruncate %p\n", inode);
|
|
||||||
} else {
|
|
||||||
dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n",
|
|
||||||
inode, ci->i_work_mask);
|
inode, ci->i_work_mask);
|
||||||
iput(inode);
|
iput(inode);
|
||||||
}
|
}
|
||||||
|
@ -962,11 +962,26 @@ extern int ceph_inode_holds_cap(struct inode *inode, int mask);
|
|||||||
|
|
||||||
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
|
extern bool ceph_inode_set_size(struct inode *inode, loff_t size);
|
||||||
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
|
extern void __ceph_do_pending_vmtruncate(struct inode *inode);
|
||||||
extern void ceph_queue_vmtruncate(struct inode *inode);
|
|
||||||
extern void ceph_queue_invalidate(struct inode *inode);
|
|
||||||
extern void ceph_queue_writeback(struct inode *inode);
|
|
||||||
extern void ceph_async_iput(struct inode *inode);
|
extern void ceph_async_iput(struct inode *inode);
|
||||||
|
|
||||||
|
void ceph_queue_inode_work(struct inode *inode, int work_bit);
|
||||||
|
|
||||||
|
static inline void ceph_queue_vmtruncate(struct inode *inode)
|
||||||
|
{
|
||||||
|
ceph_queue_inode_work(inode, CEPH_I_WORK_VMTRUNCATE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ceph_queue_invalidate(struct inode *inode)
|
||||||
|
{
|
||||||
|
ceph_queue_inode_work(inode, CEPH_I_WORK_INVALIDATE_PAGES);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ceph_queue_writeback(struct inode *inode)
|
||||||
|
{
|
||||||
|
ceph_queue_inode_work(inode, CEPH_I_WORK_WRITEBACK);
|
||||||
|
}
|
||||||
|
|
||||||
extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
|
extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
|
||||||
int mask, bool force);
|
int mask, bool force);
|
||||||
static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
|
static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user